From python-checkins at python.org Thu Oct 1 00:54:53 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 30 Sep 2015 22:54:53 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Fix_regrtest_--coverage_on?= =?utf-8?q?_Windows?= Message-ID: <20150930225451.115507.12471@psf.io> https://hg.python.org/cpython/rev/0f10e0b3e76d changeset: 98443:0f10e0b3e76d user: Victor Stinner date: Thu Oct 01 00:53:09 2015 +0200 summary: Fix regrtest --coverage on Windows Issue #25260: Fix ``python -m test --coverage`` on Windows. Remove the list of ignored directories. files: Lib/test/libregrtest/main.py | 5 +---- Lib/test/test_regrtest.py | 2 -- Misc/NEWS | 3 +++ 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py --- a/Lib/test/libregrtest/main.py +++ b/Lib/test/libregrtest/main.py @@ -272,10 +272,7 @@ def run_tests_sequential(self): if self.ns.trace: import trace - self.tracer = trace.Trace(ignoredirs=[sys.base_prefix, - sys.base_exec_prefix, - tempfile.gettempdir()], - trace=False, count=True) + self.tracer = trace.Trace(trace=False, count=True) save_modules = sys.modules.keys() diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -605,8 +605,6 @@ % (self.TESTNAME_REGEX, len(tests))) self.check_line(output, regex) - @unittest.skipIf(sys.platform == 'win32', - "FIXME: coverage doesn't work on Windows") def test_coverage(self): # test --coverage test = self.create_test() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -157,6 +157,9 @@ Tests ----- +- Issue #25260: Fix ``python -m test --coverage`` on Windows. Remove the + list of ignored directories. + - PCbuild\rt.bat now accepts an unlimited number of arguments to pass along to regrtest.py. Previously there was a limit of 9. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 08:15:08 2015 From: python-checkins at python.org (raymond.hettinger) Date: Thu, 01 Oct 2015 06:15:08 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Add_fast_paths_to_deque=5F?= =?utf-8?q?init=28=29_for_the_common_cases?= Message-ID: <20151001061507.115366.64922@psf.io> https://hg.python.org/cpython/rev/5352badd200e changeset: 98444:5352badd200e user: Raymond Hettinger date: Wed Sep 30 23:15:02 2015 -0700 summary: Add fast paths to deque_init() for the common cases files: Modules/_collectionsmodule.c | 13 ++++++++++--- 1 files changed, 10 insertions(+), 3 deletions(-) diff --git a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c --- a/Modules/_collectionsmodule.c +++ b/Modules/_collectionsmodule.c @@ -1456,8 +1456,14 @@ Py_ssize_t maxlen = -1; char *kwlist[] = {"iterable", "maxlen", 0}; - if (!PyArg_ParseTupleAndKeywords(args, kwdargs, "|OO:deque", kwlist, &iterable, &maxlenobj)) - return -1; + if (kwdargs == NULL) { + if (!PyArg_UnpackTuple(args, "deque()", 0, 2, &iterable, &maxlenobj)) + return -1; + } else { + if (!PyArg_ParseTupleAndKeywords(args, kwdargs, "|OO:deque", kwlist, + &iterable, &maxlenobj)) + return -1; + } if (maxlenobj != NULL && maxlenobj != Py_None) { maxlen = PyLong_AsSsize_t(maxlenobj); if (maxlen == -1 && PyErr_Occurred()) @@ -1468,7 +1474,8 @@ } } deque->maxlen = maxlen; - deque_clear(deque); + if (Py_SIZE(deque) > 0) + deque_clear(deque); if (iterable != NULL) { PyObject *rv = deque_extend(deque, iterable); if (rv == NULL) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 08:44:41 2015 From: python-checkins at python.org (victor.stinner) Date: Thu, 01 Oct 2015 06:44:41 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Fix_=5FPyTime=5FAsTimevalS?= =?utf-8?q?truct=5Fimpl=28=29_on_OpenBSD?= Message-ID: <20151001064441.82658.27044@psf.io> https://hg.python.org/cpython/rev/33a5ab6c578a changeset: 98445:33a5ab6c578a user: Victor Stinner date: Thu Oct 01 08:44:03 2015 +0200 summary: Fix _PyTime_AsTimevalStruct_impl() on OpenBSD On the x86 OpenBSD 5.8 buildbot, the integer overflow check is ignored. Copy the tv_sec variable into a Py_time_t variable instead of "simply" casting it to Py_time_t, to fix the integer overflow check. files: Python/pytime.c | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Python/pytime.c b/Python/pytime.c --- a/Python/pytime.c +++ b/Python/pytime.c @@ -454,7 +454,7 @@ _PyTime_AsTimevalStruct_impl(_PyTime_t t, struct timeval *tv, _PyTime_round_t round, int raise) { - _PyTime_t secs; + _PyTime_t secs, secs2; int us; int res; @@ -467,7 +467,8 @@ #endif tv->tv_usec = us; - if (res < 0 || (_PyTime_t)tv->tv_sec != secs) { + secs2 = (_PyTime_t)tv->tv_sec; + if (res < 0 || secs2 != secs) { if (raise) error_time_t_overflow(); return -1; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 08:49:12 2015 From: python-checkins at python.org (andrew.svetlov) Date: Thu, 01 Oct 2015 06:49:12 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Merge_3=2E4_-=3E_3=2E5?= Message-ID: <20151001064912.94107.11455@psf.io> https://hg.python.org/cpython/rev/d7d18ef3e05c changeset: 98447:d7d18ef3e05c branch: 3.5 parent: 98439:0eb26a4d5ffa parent: 98446:9a10055e12fa user: Andrew Svetlov date: Thu Oct 01 09:48:36 2015 +0300 summary: Merge 3.4 -> 3.5 files: Doc/library/asyncio-eventloop.rst | 6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Doc/library/asyncio-eventloop.rst b/Doc/library/asyncio-eventloop.rst --- a/Doc/library/asyncio-eventloop.rst +++ b/Doc/library/asyncio-eventloop.rst @@ -586,14 +586,14 @@ pool of processes). By default, an event loop uses a thread pool executor (:class:`~concurrent.futures.ThreadPoolExecutor`). -.. coroutinemethod:: BaseEventLoop.run_in_executor(executor, callback, \*args) +.. coroutinemethod:: BaseEventLoop.run_in_executor(executor, func, \*args) - Arrange for a callback to be called in the specified executor. + Arrange for a *func* to be called in the specified executor. The *executor* argument should be an :class:`~concurrent.futures.Executor` instance. The default executor is used if *executor* is ``None``. - :ref:`Use functools.partial to pass keywords to the callback + :ref:`Use functools.partial to pass keywords to the *func* `. This method is a :ref:`coroutine `. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 08:49:12 2015 From: python-checkins at python.org (andrew.svetlov) Date: Thu, 01 Oct 2015 06:49:12 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Reflect_parame?= =?utf-8?q?ter_name_change_in_the_doc?= Message-ID: <20151001064912.11694.9265@psf.io> https://hg.python.org/cpython/rev/9a10055e12fa changeset: 98446:9a10055e12fa branch: 3.4 parent: 98438:2652c1798f7d user: Andrew Svetlov date: Thu Oct 01 09:48:08 2015 +0300 summary: Reflect parameter name change in the doc files: Doc/library/asyncio-eventloop.rst | 6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Doc/library/asyncio-eventloop.rst b/Doc/library/asyncio-eventloop.rst --- a/Doc/library/asyncio-eventloop.rst +++ b/Doc/library/asyncio-eventloop.rst @@ -582,14 +582,14 @@ pool of processes). By default, an event loop uses a thread pool executor (:class:`~concurrent.futures.ThreadPoolExecutor`). -.. coroutinemethod:: BaseEventLoop.run_in_executor(executor, callback, \*args) +.. coroutinemethod:: BaseEventLoop.run_in_executor(executor, func, \*args) - Arrange for a callback to be called in the specified executor. + Arrange for a *func* to be called in the specified executor. The *executor* argument should be an :class:`~concurrent.futures.Executor` instance. The default executor is used if *executor* is ``None``. - :ref:`Use functools.partial to pass keywords to the callback + :ref:`Use functools.partial to pass keywords to the *func* `. This method is a :ref:`coroutine `. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 08:49:13 2015 From: python-checkins at python.org (andrew.svetlov) Date: Thu, 01 Oct 2015 06:49:13 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_3=2E5_-=3E_default?= Message-ID: <20151001064912.3648.54297@psf.io> https://hg.python.org/cpython/rev/1465b18ef4fc changeset: 98448:1465b18ef4fc parent: 98445:33a5ab6c578a parent: 98447:d7d18ef3e05c user: Andrew Svetlov date: Thu Oct 01 09:49:03 2015 +0300 summary: Merge 3.5 -> default files: Doc/library/asyncio-eventloop.rst | 6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Doc/library/asyncio-eventloop.rst b/Doc/library/asyncio-eventloop.rst --- a/Doc/library/asyncio-eventloop.rst +++ b/Doc/library/asyncio-eventloop.rst @@ -586,14 +586,14 @@ pool of processes). By default, an event loop uses a thread pool executor (:class:`~concurrent.futures.ThreadPoolExecutor`). -.. coroutinemethod:: BaseEventLoop.run_in_executor(executor, callback, \*args) +.. coroutinemethod:: BaseEventLoop.run_in_executor(executor, func, \*args) - Arrange for a callback to be called in the specified executor. + Arrange for a *func* to be called in the specified executor. The *executor* argument should be an :class:`~concurrent.futures.Executor` instance. The default executor is used if *executor* is ``None``. - :ref:`Use functools.partial to pass keywords to the callback + :ref:`Use functools.partial to pass keywords to the *func* `. This method is a :ref:`coroutine `. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 08:57:34 2015 From: python-checkins at python.org (victor.stinner) Date: Thu, 01 Oct 2015 06:57:34 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_=28Merge_3=2E5=29_Issue_=2325274=3A_test=5Frecursionlimi?= =?utf-8?q?t=5Frecovery=28=29_of_test=5Fsys_now_checks?= Message-ID: <20151001065734.98358.78229@psf.io> https://hg.python.org/cpython/rev/bae0912dd160 changeset: 98451:bae0912dd160 parent: 98448:1465b18ef4fc parent: 98450:898a9a959927 user: Victor Stinner date: Thu Oct 01 08:56:54 2015 +0200 summary: (Merge 3.5) Issue #25274: test_recursionlimit_recovery() of test_sys now checks sys.gettrace() when the test is executed, not when the module is loaded. sys.settrace() may be after after the test is loaded. files: Lib/test/test_sys.py | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_sys.py b/Lib/test/test_sys.py --- a/Lib/test/test_sys.py +++ b/Lib/test/test_sys.py @@ -197,9 +197,10 @@ self.assertEqual(sys.getrecursionlimit(), 10000) sys.setrecursionlimit(oldlimit) - @unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(), - 'fatal error if run with a trace function') def test_recursionlimit_recovery(self): + if hasattr(sys, 'gettrace') and sys.gettrace(): + self.skipTest('fatal error if run with a trace function') + # NOTE: this test is slightly fragile in that it depends on the current # recursion count when executing the test being low enough so as to # trigger the recursion recovery detection in the _Py_MakeEndRecCheck -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 08:57:34 2015 From: python-checkins at python.org (victor.stinner) Date: Thu, 01 Oct 2015 06:57:34 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_=28Merge_3=2E4=29_Issue_=2325274=3A_test=5Frecursionlimit=5Fre?= =?utf-8?q?covery=28=29_of_test=5Fsys_now_checks?= Message-ID: <20151001065734.3664.80792@psf.io> https://hg.python.org/cpython/rev/898a9a959927 changeset: 98450:898a9a959927 branch: 3.5 parent: 98447:d7d18ef3e05c parent: 98449:60c4fd84ef92 user: Victor Stinner date: Thu Oct 01 08:56:27 2015 +0200 summary: (Merge 3.4) Issue #25274: test_recursionlimit_recovery() of test_sys now checks sys.gettrace() when the test is executed, not when the module is loaded. sys.settrace() may be after after the test is loaded. files: Lib/test/test_sys.py | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_sys.py b/Lib/test/test_sys.py --- a/Lib/test/test_sys.py +++ b/Lib/test/test_sys.py @@ -197,9 +197,10 @@ self.assertEqual(sys.getrecursionlimit(), 10000) sys.setrecursionlimit(oldlimit) - @unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(), - 'fatal error if run with a trace function') def test_recursionlimit_recovery(self): + if hasattr(sys, 'gettrace') and sys.gettrace(): + self.skipTest('fatal error if run with a trace function') + # NOTE: this test is slightly fragile in that it depends on the current # recursion count when executing the test being low enough so as to # trigger the recursion recovery detection in the _Py_MakeEndRecCheck -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 08:57:34 2015 From: python-checkins at python.org (victor.stinner) Date: Thu, 01 Oct 2015 06:57:34 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1Mjc0?= =?utf-8?q?=3A_test=5Frecursionlimit=5Frecovery=28=29_of_test=5Fsys_now_ch?= =?utf-8?q?ecks?= Message-ID: <20151001065734.9957.26964@psf.io> https://hg.python.org/cpython/rev/60c4fd84ef92 changeset: 98449:60c4fd84ef92 branch: 3.4 parent: 98446:9a10055e12fa user: Victor Stinner date: Thu Oct 01 08:55:33 2015 +0200 summary: Issue #25274: test_recursionlimit_recovery() of test_sys now checks sys.gettrace() when the test is executed, not when the module is loaded. sys.settrace() may be after after the test is loaded. files: Lib/test/test_sys.py | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_sys.py b/Lib/test/test_sys.py --- a/Lib/test/test_sys.py +++ b/Lib/test/test_sys.py @@ -197,9 +197,10 @@ self.assertEqual(sys.getrecursionlimit(), 10000) sys.setrecursionlimit(oldlimit) - @unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(), - 'fatal error if run with a trace function') def test_recursionlimit_recovery(self): + if hasattr(sys, 'gettrace') and sys.gettrace(): + self.skipTest('fatal error if run with a trace function') + # NOTE: this test is slightly fragile in that it depends on the current # recursion count when executing the test being low enough so as to # trigger the recursion recovery detection in the _Py_MakeEndRecCheck -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 09:51:52 2015 From: python-checkins at python.org (victor.stinner) Date: Thu, 01 Oct 2015 07:51:52 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?b?KTogTWVyZ2UgMy41?= Message-ID: <20151001075152.115214.35513@psf.io> https://hg.python.org/cpython/rev/710ef035ee44 changeset: 98453:710ef035ee44 parent: 98451:bae0912dd160 parent: 98452:835085cc28cd user: Victor Stinner date: Thu Oct 01 09:51:02 2015 +0200 summary: Merge 3.5 files: Python/random.c | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Python/random.c b/Python/random.c --- a/Python/random.c +++ b/Python/random.c @@ -73,7 +73,7 @@ } #elif defined(HAVE_GETENTROPY) && !defined(sun) -#define PY_GETENTROPY +#define PY_GETENTROPY 1 /* Fill buffer with size pseudo-random bytes generated by getentropy(). Return 0 on success, or raise an exception and return -1 on error. @@ -112,7 +112,7 @@ #else #if defined(HAVE_GETRANDOM) || defined(HAVE_GETRANDOM_SYSCALL) -#define PY_GETRANDOM +#define PY_GETRANDOM 1 static int py_getrandom(void *buffer, Py_ssize_t size, int raise) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 09:51:52 2015 From: python-checkins at python.org (victor.stinner) Date: Thu, 01 Oct 2015 07:51:52 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1MDAz?= =?utf-8?q?=3A_On_Solaris_11=2E3_or_newer=2C_os=2Eurandom=28=29_now_uses_t?= =?utf-8?q?he_getrandom=28=29?= Message-ID: <20151001075152.115050.90181@psf.io> https://hg.python.org/cpython/rev/835085cc28cd changeset: 98452:835085cc28cd branch: 3.5 parent: 98450:898a9a959927 user: Victor Stinner date: Thu Oct 01 09:47:30 2015 +0200 summary: Issue #25003: On Solaris 11.3 or newer, os.urandom() now uses the getrandom() function instead of the getentropy() function. The getentropy() function is blocking to generate very good quality entropy, os.urandom() doesn't need such high-quality entropy. files: Lib/test/test_os.py | 16 ++++++---- Misc/NEWS | 5 +++ Python/random.c | 49 ++++++++++++++++++++++---------- configure | 43 ++++++++++++++++++++++++++-- configure.ac | 31 ++++++++++++++++++-- pyconfig.h.in | 3 ++ 6 files changed, 119 insertions(+), 28 deletions(-) diff --git a/Lib/test/test_os.py b/Lib/test/test_os.py --- a/Lib/test/test_os.py +++ b/Lib/test/test_os.py @@ -1226,13 +1226,15 @@ self.assertNotEqual(data1, data2) -HAVE_GETENTROPY = (sysconfig.get_config_var('HAVE_GETENTROPY') == 1) -HAVE_GETRANDOM = (sysconfig.get_config_var('HAVE_GETRANDOM_SYSCALL') == 1) - - at unittest.skipIf(HAVE_GETENTROPY, - "getentropy() does not use a file descriptor") - at unittest.skipIf(HAVE_GETRANDOM, - "getrandom() does not use a file descriptor") +# os.urandom() doesn't use a file descriptor when it is implemented with the +# getentropy() function, the getrandom() function or the getrandom() syscall +OS_URANDOM_DONT_USE_FD = ( + sysconfig.get_config_var('HAVE_GETENTROPY') == 1 + or sysconfig.get_config_var('HAVE_GETRANDOM') == 1 + or sysconfig.get_config_var('HAVE_GETRANDOM_SYSCALL') == 1) + + at unittest.skipIf(OS_URANDOM_DONT_USE_FD , + "os.random() does not use a file descriptor") class URandomFDTests(unittest.TestCase): @unittest.skipUnless(resource, "test requires the resource module") def test_urandom_failure(self): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -11,6 +11,11 @@ Core and Builtins ----------------- +- Issue #25003: On Solaris 11.3 or newer, os.urandom() now uses the + getrandom() function instead of the getentropy() function. The getentropy() + function is blocking to generate very good quality entropy, os.urandom() + doesn't need such high-quality entropy. + - Issue #25182: The stdprinter (used as sys.stderr before the io module is imported at startup) now uses the backslashreplace error handler. diff --git a/Python/random.c b/Python/random.c --- a/Python/random.c +++ b/Python/random.c @@ -6,7 +6,9 @@ # ifdef HAVE_SYS_STAT_H # include # endif -# ifdef HAVE_GETRANDOM_SYSCALL +# ifdef HAVE_GETRANDOM +# include +# elif defined(HAVE_GETRANDOM_SYSCALL) # include # endif #endif @@ -70,7 +72,9 @@ return 0; } -#elif HAVE_GETENTROPY +#elif defined(HAVE_GETENTROPY) && !defined(sun) +#define PY_GETENTROPY 1 + /* Fill buffer with size pseudo-random bytes generated by getentropy(). Return 0 on success, or raise an exception and return -1 on error. @@ -105,16 +109,19 @@ return 0; } -#else /* !HAVE_GETENTROPY */ +#else -#ifdef HAVE_GETRANDOM_SYSCALL +#if defined(HAVE_GETRANDOM) || defined(HAVE_GETRANDOM_SYSCALL) +#define PY_GETRANDOM 1 + static int py_getrandom(void *buffer, Py_ssize_t size, int raise) { - /* is getrandom() supported by the running kernel? - * need Linux kernel 3.17 or later */ + /* Is getrandom() supported by the running kernel? + * Need Linux kernel 3.17 or newer, or Solaris 11.3 or newer */ static int getrandom_works = 1; - /* Use /dev/urandom, block if the kernel has no entropy */ + /* Use non-blocking /dev/urandom device. On Linux at boot, the getrandom() + * syscall blocks until /dev/urandom is initialized with enough entropy. */ const int flags = 0; int n; @@ -124,7 +131,18 @@ while (0 < size) { errno = 0; - /* Use syscall() because the libc doesn't expose getrandom() yet, see: +#ifdef HAVE_GETRANDOM + if (raise) { + Py_BEGIN_ALLOW_THREADS + n = getrandom(buffer, size, flags); + Py_END_ALLOW_THREADS + } + else { + n = getrandom(buffer, size, flags); + } +#else + /* On Linux, use the syscall() function because the GNU libc doesn't + * expose the Linux getrandom() syscall yet. See: * https://sourceware.org/bugzilla/show_bug.cgi?id=17252 */ if (raise) { Py_BEGIN_ALLOW_THREADS @@ -134,6 +152,7 @@ else { n = syscall(SYS_getrandom, buffer, size, flags); } +#endif if (n < 0) { if (errno == ENOSYS) { @@ -182,7 +201,7 @@ assert (0 < size); -#ifdef HAVE_GETRANDOM_SYSCALL +#ifdef PY_GETRANDOM if (py_getrandom(buffer, size, 0) == 1) return; /* getrandom() is not supported by the running kernel, fall back @@ -218,14 +237,14 @@ int fd; Py_ssize_t n; struct _Py_stat_struct st; -#ifdef HAVE_GETRANDOM_SYSCALL +#ifdef PY_GETRANDOM int res; #endif if (size <= 0) return 0; -#ifdef HAVE_GETRANDOM_SYSCALL +#ifdef PY_GETRANDOM res = py_getrandom(buffer, size, 1); if (res < 0) return -1; @@ -304,7 +323,7 @@ } } -#endif /* HAVE_GETENTROPY */ +#endif /* Fill buffer with pseudo-random bytes generated by a linear congruent generator (LCG): @@ -345,7 +364,7 @@ #ifdef MS_WINDOWS return win32_urandom((unsigned char *)buffer, size, 1); -#elif HAVE_GETENTROPY +#elif defined(PY_GETENTROPY) return py_getentropy(buffer, size, 0); #else return dev_urandom_python((char*)buffer, size); @@ -392,7 +411,7 @@ else { #ifdef MS_WINDOWS (void)win32_urandom(secret, secret_size, 0); -#elif HAVE_GETENTROPY +#elif defined(PY_GETENTROPY) (void)py_getentropy(secret, secret_size, 1); #else dev_urandom_noraise(secret, secret_size); @@ -408,7 +427,7 @@ CryptReleaseContext(hCryptProv, 0); hCryptProv = 0; } -#elif HAVE_GETENTROPY +#elif defined(PY_GETENTROPY) /* nothing to clean */ #else dev_urandom_close(); diff --git a/configure b/configure --- a/configure +++ b/configure @@ -16085,11 +16085,11 @@ #include int main() { + char buffer[1]; + const size_t buflen = sizeof(buffer); const int flags = 0; - char buffer[1]; - int n; /* ignore the result, Python checks for ENOSYS at runtime */ - (void)syscall(SYS_getrandom, buffer, sizeof(buffer), flags); + (void)syscall(SYS_getrandom, buffer, buflen, flags); return 0; } @@ -16111,6 +16111,43 @@ fi +# check if the getrandom() function is available +# the test was written for the Solaris function of +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for the getrandom() function" >&5 +$as_echo_n "checking for the getrandom() function... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + + #include + + int main() { + char buffer[1]; + const size_t buflen = sizeof(buffer); + const int flags = 0; + /* ignore the result, Python checks for ENOSYS at runtime */ + (void)getrandom(buffer, buflen, flags); + return 0; + } + + +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + have_getrandom=yes +else + have_getrandom=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_getrandom" >&5 +$as_echo "$have_getrandom" >&6; } + +if test "$have_getrandom" = yes; then + +$as_echo "#define HAVE_GETRANDOM 1" >>confdefs.h + +fi + # generate output files ac_config_files="$ac_config_files Makefile.pre Modules/Setup.config Misc/python.pc Misc/python-config.sh" diff --git a/configure.ac b/configure.ac --- a/configure.ac +++ b/configure.ac @@ -5154,11 +5154,11 @@ #include int main() { + char buffer[1]; + const size_t buflen = sizeof(buffer); const int flags = 0; - char buffer[1]; - int n; /* ignore the result, Python checks for ENOSYS at runtime */ - (void)syscall(SYS_getrandom, buffer, sizeof(buffer), flags); + (void)syscall(SYS_getrandom, buffer, buflen, flags); return 0; } ]]) @@ -5170,6 +5170,31 @@ [Define to 1 if the Linux getrandom() syscall is available]) fi +# check if the getrandom() function is available +# the test was written for the Solaris function of +AC_MSG_CHECKING(for the getrandom() function) +AC_LINK_IFELSE( +[ + AC_LANG_SOURCE([[ + #include + + int main() { + char buffer[1]; + const size_t buflen = sizeof(buffer); + const int flags = 0; + /* ignore the result, Python checks for ENOSYS at runtime */ + (void)getrandom(buffer, buflen, flags); + return 0; + } + ]]) +],[have_getrandom=yes],[have_getrandom=no]) +AC_MSG_RESULT($have_getrandom) + +if test "$have_getrandom" = yes; then + AC_DEFINE(HAVE_GETRANDOM, 1, + [Define to 1 if the getrandom() function is available]) +fi + # generate output files AC_CONFIG_FILES(Makefile.pre Modules/Setup.config Misc/python.pc Misc/python-config.sh) AC_CONFIG_FILES([Modules/ld_so_aix], [chmod +x Modules/ld_so_aix]) diff --git a/pyconfig.h.in b/pyconfig.h.in --- a/pyconfig.h.in +++ b/pyconfig.h.in @@ -395,6 +395,9 @@ /* Define to 1 if you have the `getpwent' function. */ #undef HAVE_GETPWENT +/* Define to 1 if the getrandom() function is available */ +#undef HAVE_GETRANDOM + /* Define to 1 if the Linux getrandom() syscall is available */ #undef HAVE_GETRANDOM_SYSCALL -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 10:02:00 2015 From: python-checkins at python.org (victor.stinner) Date: Thu, 01 Oct 2015 08:02:00 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI1MDAz?= =?utf-8?q?=3A_os=2Eurandom=28=29_doesn=27t_use_getentropy=28=29_on_Solari?= =?utf-8?q?s_because?= Message-ID: <20151001080200.3652.15402@psf.io> https://hg.python.org/cpython/rev/202c827f86df changeset: 98454:202c827f86df branch: 2.7 parent: 98411:8274fc521e69 user: Victor Stinner date: Thu Oct 01 09:57:26 2015 +0200 summary: Issue #25003: os.urandom() doesn't use getentropy() on Solaris because getentropy() is blocking, whereas os.urandom() should not block. getentropy() is supported since Solaris 11.3. files: Misc/NEWS | 4 ++++ Python/random.c | 12 ++++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,10 @@ Core and Builtins ----------------- +- Issue #25003: os.urandom() doesn't use getentropy() on Solaris because + getentropy() is blocking, whereas os.urandom() should not block. getentropy() + is supported since Solaris 11.3. + - Issue #21167: NAN operations are now handled correctly when python is compiled with ICC even if -fp-model strict is not specified. diff --git a/Python/random.c b/Python/random.c --- a/Python/random.c +++ b/Python/random.c @@ -93,7 +93,11 @@ return 0; } -#elif HAVE_GETENTROPY +/* Issue #25003: Don' use getentropy() on Solaris (available since + * Solaris 11.3), it is blocking whereas os.urandom() should not block. */ +#elif defined(HAVE_GETENTROPY) && !defined(sun) +#define PY_GETENTROPY 1 + /* Fill buffer with size pseudo-random bytes generated by getentropy(). Return 0 on success, or raise an exception and return -1 on error. If fatal is nonzero, call Py_FatalError() instead of raising an exception @@ -333,7 +337,7 @@ #ifdef MS_WINDOWS return win32_urandom((unsigned char *)buffer, size, 1); -#elif HAVE_GETENTROPY +#elif defined(PY_GETENTROPY) return py_getentropy(buffer, size, 0); #else # ifdef __VMS @@ -396,7 +400,7 @@ (void)win32_urandom((unsigned char *)secret, secret_size, 0); #elif __VMS vms_urandom((unsigned char *)secret, secret_size, 0); -#elif HAVE_GETENTROPY +#elif defined(PY_GETENTROPY) (void)py_getentropy(secret, secret_size, 1); #else dev_urandom_noraise(secret, secret_size); @@ -412,7 +416,7 @@ CryptReleaseContext(hCryptProv, 0); hCryptProv = 0; } -#elif HAVE_GETENTROPY +#elif defined(PY_GETENTROPY) /* nothing to clean */ #else dev_urandom_close(); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 10:02:01 2015 From: python-checkins at python.org (victor.stinner) Date: Thu, 01 Oct 2015 08:02:01 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?b?KTogTWVyZ2UgMy41?= Message-ID: <20151001080201.115149.13299@psf.io> https://hg.python.org/cpython/rev/e885f3f00256 changeset: 98457:e885f3f00256 parent: 98453:710ef035ee44 parent: 98456:8165c7460596 user: Victor Stinner date: Thu Oct 01 10:01:31 2015 +0200 summary: Merge 3.5 files: Python/random.c | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/Python/random.c b/Python/random.c --- a/Python/random.c +++ b/Python/random.c @@ -111,6 +111,8 @@ #else +/* Issue #25003: Don' use getentropy() on Solaris (available since + * Solaris 11.3), it is blocking whereas os.urandom() should not block. */ #if defined(HAVE_GETRANDOM) || defined(HAVE_GETRANDOM_SYSCALL) #define PY_GETRANDOM 1 -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 10:02:01 2015 From: python-checkins at python.org (victor.stinner) Date: Thu, 01 Oct 2015 08:02:01 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1MDAz?= =?utf-8?q?=3A_os=2Eurandom=28=29_doesn=27t_use_getentropy=28=29_on_Solari?= =?utf-8?q?s_because?= Message-ID: <20151001080200.82662.7817@psf.io> https://hg.python.org/cpython/rev/83dc79eeaf7f changeset: 98455:83dc79eeaf7f branch: 3.4 parent: 98449:60c4fd84ef92 user: Victor Stinner date: Thu Oct 01 09:59:32 2015 +0200 summary: Issue #25003: os.urandom() doesn't use getentropy() on Solaris because getentropy() is blocking, whereas os.urandom() should not block. getentropy() is supported since Solaris 11.3. files: Misc/NEWS | 4 ++++ Python/random.c | 12 ++++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,10 @@ Core and Builtins ----------------- +- Issue #25003: os.urandom() doesn't use getentropy() on Solaris because + getentropy() is blocking, whereas os.urandom() should not block. getentropy() + is supported since Solaris 11.3. + - Issue #25182: The stdprinter (used as sys.stderr before the io module is imported at startup) now uses the backslashreplace error handler. diff --git a/Python/random.c b/Python/random.c --- a/Python/random.c +++ b/Python/random.c @@ -67,7 +67,11 @@ return 0; } -#elif HAVE_GETENTROPY +/* Issue #25003: Don' use getentropy() on Solaris (available since + * Solaris 11.3), it is blocking whereas os.urandom() should not block. */ +#elif defined(HAVE_GETENTROPY) && !defined(sun) +#define PY_GETENTROPY 1 + /* Fill buffer with size pseudo-random bytes generated by getentropy(). Return 0 on success, or raise an exception and return -1 on error. @@ -275,7 +279,7 @@ #ifdef MS_WINDOWS return win32_urandom((unsigned char *)buffer, size, 1); -#elif HAVE_GETENTROPY +#elif defined(PY_GETENTROPY) return py_getentropy(buffer, size, 0); #else return dev_urandom_python((char*)buffer, size); @@ -322,7 +326,7 @@ else { #ifdef MS_WINDOWS (void)win32_urandom(secret, secret_size, 0); -#elif HAVE_GETENTROPY +#elif defined(PY_GETENTROPY) (void)py_getentropy(secret, secret_size, 1); #else dev_urandom_noraise(secret, secret_size); @@ -338,7 +342,7 @@ CryptReleaseContext(hCryptProv, 0); hCryptProv = 0; } -#elif HAVE_GETENTROPY +#elif defined(PY_GETENTROPY) /* nothing to clean */ #else dev_urandom_close(); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 10:02:01 2015 From: python-checkins at python.org (victor.stinner) Date: Thu, 01 Oct 2015 08:02:01 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Merge_3=2E4_=28os=2Eurandom=29?= Message-ID: <20151001080200.81627.19030@psf.io> https://hg.python.org/cpython/rev/8165c7460596 changeset: 98456:8165c7460596 branch: 3.5 parent: 98452:835085cc28cd parent: 98455:83dc79eeaf7f user: Victor Stinner date: Thu Oct 01 10:00:23 2015 +0200 summary: Merge 3.4 (os.urandom) files: Python/random.c | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/Python/random.c b/Python/random.c --- a/Python/random.c +++ b/Python/random.c @@ -111,6 +111,8 @@ #else +/* Issue #25003: Don' use getentropy() on Solaris (available since + * Solaris 11.3), it is blocking whereas os.urandom() should not block. */ #if defined(HAVE_GETRANDOM) || defined(HAVE_GETRANDOM_SYSCALL) #define PY_GETRANDOM 1 -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Thu Oct 1 10:45:15 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Thu, 01 Oct 2015 08:45:15 +0000 Subject: [Python-checkins] Daily reference leaks (0f10e0b3e76d): sum=17877 Message-ID: <20151001084515.94137.1702@psf.io> results for 0f10e0b3e76d on branch "default" -------------------------------------------- test_capi leaked [1598, 1598, 1598] references, sum=4794 test_capi leaked [387, 389, 389] memory blocks, sum=1165 test_functools leaked [0, 2, 2] memory blocks, sum=4 test_threading leaked [3196, 3196, 3196] references, sum=9588 test_threading leaked [774, 776, 776] memory blocks, sum=2326 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflogZKhs_L', '--timeout', '7200'] From python-checkins at python.org Thu Oct 1 10:57:04 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 01 Oct 2015 08:57:04 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325280=3A_Import_trace_messages_emitted_in_verbo?= =?utf-8?q?se_=28-v=29_mode_are_no?= Message-ID: <20151001085704.94127.94122@psf.io> https://hg.python.org/cpython/rev/e377d568928b changeset: 98460:e377d568928b parent: 98457:e885f3f00256 parent: 98459:10c13441bf8d user: Serhiy Storchaka date: Thu Oct 01 11:55:52 2015 +0300 summary: Issue #25280: Import trace messages emitted in verbose (-v) mode are no longer formatted twice. files: Lib/importlib/_bootstrap_external.py | 8 ++++---- Misc/NEWS | 3 +++ 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/Lib/importlib/_bootstrap_external.py b/Lib/importlib/_bootstrap_external.py --- a/Lib/importlib/_bootstrap_external.py +++ b/Lib/importlib/_bootstrap_external.py @@ -429,15 +429,15 @@ raw_size = data[8:12] if magic != MAGIC_NUMBER: message = 'bad magic number in {!r}: {!r}'.format(name, magic) - _bootstrap._verbose_message(message) + _bootstrap._verbose_message('{}', message) raise ImportError(message, **exc_details) elif len(raw_timestamp) != 4: message = 'reached EOF while reading timestamp in {!r}'.format(name) - _bootstrap._verbose_message(message) + _bootstrap._verbose_message('{}', message) raise EOFError(message) elif len(raw_size) != 4: message = 'reached EOF while reading size of source in {!r}'.format(name) - _bootstrap._verbose_message(message) + _bootstrap._verbose_message('{}', message) raise EOFError(message) if source_stats is not None: try: @@ -447,7 +447,7 @@ else: if _r_long(raw_timestamp) != source_mtime: message = 'bytecode is stale for {!r}'.format(name) - _bootstrap._verbose_message(message) + _bootstrap._verbose_message('{}', message) raise ImportError(message, **exc_details) try: source_size = source_stats['size'] & 0xFFFFFFFF diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #25280: Import trace messages emitted in verbose (-v) mode are no + longer formatted twice. + - Issue #25227: Optimize ASCII and latin1 encoders with the ``surrogateescape`` error handler: the encoders are now up to 3 times as fast. Initial patch written by Serhiy Storchaka. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 10:57:04 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 01 Oct 2015 08:57:04 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325280=3A_Import_trace_messages_emitted_in_verbose_=28?= =?utf-8?q?-v=29_mode_are_no?= Message-ID: <20151001085704.11706.18971@psf.io> https://hg.python.org/cpython/rev/10c13441bf8d changeset: 98459:10c13441bf8d branch: 3.5 parent: 98456:8165c7460596 parent: 98458:da42b38f7470 user: Serhiy Storchaka date: Thu Oct 01 11:40:22 2015 +0300 summary: Issue #25280: Import trace messages emitted in verbose (-v) mode are no longer formatted twice. files: Lib/importlib/_bootstrap_external.py | 8 +- Misc/NEWS | 3 + Python/importlib_external.h | 113 +++++++------- 3 files changed, 64 insertions(+), 60 deletions(-) diff --git a/Lib/importlib/_bootstrap_external.py b/Lib/importlib/_bootstrap_external.py --- a/Lib/importlib/_bootstrap_external.py +++ b/Lib/importlib/_bootstrap_external.py @@ -437,15 +437,15 @@ raw_size = data[8:12] if magic != MAGIC_NUMBER: message = 'bad magic number in {!r}: {!r}'.format(name, magic) - _verbose_message(message) + _verbose_message('{}', message) raise ImportError(message, **exc_details) elif len(raw_timestamp) != 4: message = 'reached EOF while reading timestamp in {!r}'.format(name) - _verbose_message(message) + _verbose_message('{}', message) raise EOFError(message) elif len(raw_size) != 4: message = 'reached EOF while reading size of source in {!r}'.format(name) - _verbose_message(message) + _verbose_message('{}', message) raise EOFError(message) if source_stats is not None: try: @@ -455,7 +455,7 @@ else: if _r_long(raw_timestamp) != source_mtime: message = 'bytecode is stale for {!r}'.format(name) - _verbose_message(message) + _verbose_message('{}', message) raise ImportError(message, **exc_details) try: source_size = source_stats['size'] & 0xFFFFFFFF diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -11,6 +11,9 @@ Core and Builtins ----------------- +- Issue #25280: Import trace messages emitted in verbose (-v) mode are no + longer formatted twice. + - Issue #25003: On Solaris 11.3 or newer, os.urandom() now uses the getrandom() function instead of the getentropy() function. The getentropy() function is blocking to generate very good quality entropy, os.urandom() diff --git a/Python/importlib_external.h b/Python/importlib_external.h --- a/Python/importlib_external.h +++ b/Python/importlib_external.h @@ -635,7 +635,7 @@ 95,102,105,110,100,95,109,111,100,117,108,101,95,115,104,105, 109,143,1,0,0,115,10,0,0,0,0,10,21,1,24,1, 6,1,29,1,114,130,0,0,0,99,4,0,0,0,0,0, - 0,0,11,0,0,0,19,0,0,0,67,0,0,0,115,228, + 0,0,11,0,0,0,19,0,0,0,67,0,0,0,115,240, 1,0,0,105,0,0,125,4,0,124,2,0,100,1,0,107, 9,0,114,31,0,124,2,0,124,4,0,100,2,0,60,110, 6,0,100,3,0,125,2,0,124,3,0,100,1,0,107,9, @@ -643,58 +643,59 @@ 0,100,1,0,100,5,0,133,2,0,25,125,5,0,124,0, 0,100,5,0,100,6,0,133,2,0,25,125,6,0,124,0, 0,100,6,0,100,7,0,133,2,0,25,125,7,0,124,5, - 0,116,0,0,107,3,0,114,165,0,100,8,0,106,1,0, - 124,2,0,124,5,0,131,2,0,125,8,0,116,2,0,124, - 8,0,131,1,0,1,116,3,0,124,8,0,124,4,0,141, - 1,0,130,1,0,110,113,0,116,4,0,124,6,0,131,1, - 0,100,5,0,107,3,0,114,223,0,100,9,0,106,1,0, - 124,2,0,131,1,0,125,8,0,116,2,0,124,8,0,131, - 1,0,1,116,5,0,124,8,0,131,1,0,130,1,0,110, - 55,0,116,4,0,124,7,0,131,1,0,100,5,0,107,3, - 0,114,22,1,100,10,0,106,1,0,124,2,0,131,1,0, - 125,8,0,116,2,0,124,8,0,131,1,0,1,116,5,0, - 124,8,0,131,1,0,130,1,0,124,1,0,100,1,0,107, - 9,0,114,214,1,121,20,0,116,6,0,124,1,0,100,11, - 0,25,131,1,0,125,9,0,87,110,18,0,4,116,7,0, - 107,10,0,114,74,1,1,1,1,89,110,59,0,88,116,8, - 0,124,6,0,131,1,0,124,9,0,107,3,0,114,133,1, - 100,12,0,106,1,0,124,2,0,131,1,0,125,8,0,116, - 2,0,124,8,0,131,1,0,1,116,3,0,124,8,0,124, - 4,0,141,1,0,130,1,0,121,18,0,124,1,0,100,13, - 0,25,100,14,0,64,125,10,0,87,110,18,0,4,116,7, - 0,107,10,0,114,171,1,1,1,1,89,110,43,0,88,116, - 8,0,124,7,0,131,1,0,124,10,0,107,3,0,114,214, - 1,116,3,0,100,12,0,106,1,0,124,2,0,131,1,0, - 124,4,0,141,1,0,130,1,0,124,0,0,100,7,0,100, - 1,0,133,2,0,25,83,41,15,97,122,1,0,0,86,97, - 108,105,100,97,116,101,32,116,104,101,32,104,101,97,100,101, - 114,32,111,102,32,116,104,101,32,112,97,115,115,101,100,45, - 105,110,32,98,121,116,101,99,111,100,101,32,97,103,97,105, - 110,115,116,32,115,111,117,114,99,101,95,115,116,97,116,115, - 32,40,105,102,10,32,32,32,32,103,105,118,101,110,41,32, - 97,110,100,32,114,101,116,117,114,110,105,110,103,32,116,104, - 101,32,98,121,116,101,99,111,100,101,32,116,104,97,116,32, - 99,97,110,32,98,101,32,99,111,109,112,105,108,101,100,32, - 98,121,32,99,111,109,112,105,108,101,40,41,46,10,10,32, - 32,32,32,65,108,108,32,111,116,104,101,114,32,97,114,103, - 117,109,101,110,116,115,32,97,114,101,32,117,115,101,100,32, - 116,111,32,101,110,104,97,110,99,101,32,101,114,114,111,114, - 32,114,101,112,111,114,116,105,110,103,46,10,10,32,32,32, - 32,73,109,112,111,114,116,69,114,114,111,114,32,105,115,32, - 114,97,105,115,101,100,32,119,104,101,110,32,116,104,101,32, - 109,97,103,105,99,32,110,117,109,98,101,114,32,105,115,32, - 105,110,99,111,114,114,101,99,116,32,111,114,32,116,104,101, - 32,98,121,116,101,99,111,100,101,32,105,115,10,32,32,32, - 32,102,111,117,110,100,32,116,111,32,98,101,32,115,116,97, - 108,101,46,32,69,79,70,69,114,114,111,114,32,105,115,32, - 114,97,105,115,101,100,32,119,104,101,110,32,116,104,101,32, - 100,97,116,97,32,105,115,32,102,111,117,110,100,32,116,111, - 32,98,101,10,32,32,32,32,116,114,117,110,99,97,116,101, - 100,46,10,10,32,32,32,32,78,114,106,0,0,0,122,10, - 60,98,121,116,101,99,111,100,101,62,114,35,0,0,0,114, - 12,0,0,0,233,8,0,0,0,233,12,0,0,0,122,30, - 98,97,100,32,109,97,103,105,99,32,110,117,109,98,101,114, - 32,105,110,32,123,33,114,125,58,32,123,33,114,125,122,43, + 0,116,0,0,107,3,0,114,168,0,100,8,0,106,1,0, + 124,2,0,124,5,0,131,2,0,125,8,0,116,2,0,100, + 9,0,124,8,0,131,2,0,1,116,3,0,124,8,0,124, + 4,0,141,1,0,130,1,0,110,119,0,116,4,0,124,6, + 0,131,1,0,100,5,0,107,3,0,114,229,0,100,10,0, + 106,1,0,124,2,0,131,1,0,125,8,0,116,2,0,100, + 9,0,124,8,0,131,2,0,1,116,5,0,124,8,0,131, + 1,0,130,1,0,110,58,0,116,4,0,124,7,0,131,1, + 0,100,5,0,107,3,0,114,31,1,100,11,0,106,1,0, + 124,2,0,131,1,0,125,8,0,116,2,0,100,9,0,124, + 8,0,131,2,0,1,116,5,0,124,8,0,131,1,0,130, + 1,0,124,1,0,100,1,0,107,9,0,114,226,1,121,20, + 0,116,6,0,124,1,0,100,12,0,25,131,1,0,125,9, + 0,87,110,18,0,4,116,7,0,107,10,0,114,83,1,1, + 1,1,89,110,62,0,88,116,8,0,124,6,0,131,1,0, + 124,9,0,107,3,0,114,145,1,100,13,0,106,1,0,124, + 2,0,131,1,0,125,8,0,116,2,0,100,9,0,124,8, + 0,131,2,0,1,116,3,0,124,8,0,124,4,0,141,1, + 0,130,1,0,121,18,0,124,1,0,100,14,0,25,100,15, + 0,64,125,10,0,87,110,18,0,4,116,7,0,107,10,0, + 114,183,1,1,1,1,89,110,43,0,88,116,8,0,124,7, + 0,131,1,0,124,10,0,107,3,0,114,226,1,116,3,0, + 100,13,0,106,1,0,124,2,0,131,1,0,124,4,0,141, + 1,0,130,1,0,124,0,0,100,7,0,100,1,0,133,2, + 0,25,83,41,16,97,122,1,0,0,86,97,108,105,100,97, + 116,101,32,116,104,101,32,104,101,97,100,101,114,32,111,102, + 32,116,104,101,32,112,97,115,115,101,100,45,105,110,32,98, + 121,116,101,99,111,100,101,32,97,103,97,105,110,115,116,32, + 115,111,117,114,99,101,95,115,116,97,116,115,32,40,105,102, + 10,32,32,32,32,103,105,118,101,110,41,32,97,110,100,32, + 114,101,116,117,114,110,105,110,103,32,116,104,101,32,98,121, + 116,101,99,111,100,101,32,116,104,97,116,32,99,97,110,32, + 98,101,32,99,111,109,112,105,108,101,100,32,98,121,32,99, + 111,109,112,105,108,101,40,41,46,10,10,32,32,32,32,65, + 108,108,32,111,116,104,101,114,32,97,114,103,117,109,101,110, + 116,115,32,97,114,101,32,117,115,101,100,32,116,111,32,101, + 110,104,97,110,99,101,32,101,114,114,111,114,32,114,101,112, + 111,114,116,105,110,103,46,10,10,32,32,32,32,73,109,112, + 111,114,116,69,114,114,111,114,32,105,115,32,114,97,105,115, + 101,100,32,119,104,101,110,32,116,104,101,32,109,97,103,105, + 99,32,110,117,109,98,101,114,32,105,115,32,105,110,99,111, + 114,114,101,99,116,32,111,114,32,116,104,101,32,98,121,116, + 101,99,111,100,101,32,105,115,10,32,32,32,32,102,111,117, + 110,100,32,116,111,32,98,101,32,115,116,97,108,101,46,32, + 69,79,70,69,114,114,111,114,32,105,115,32,114,97,105,115, + 101,100,32,119,104,101,110,32,116,104,101,32,100,97,116,97, + 32,105,115,32,102,111,117,110,100,32,116,111,32,98,101,10, + 32,32,32,32,116,114,117,110,99,97,116,101,100,46,10,10, + 32,32,32,32,78,114,106,0,0,0,122,10,60,98,121,116, + 101,99,111,100,101,62,114,35,0,0,0,114,12,0,0,0, + 233,8,0,0,0,233,12,0,0,0,122,30,98,97,100,32, + 109,97,103,105,99,32,110,117,109,98,101,114,32,105,110,32, + 123,33,114,125,58,32,123,33,114,125,122,2,123,125,122,43, 114,101,97,99,104,101,100,32,69,79,70,32,119,104,105,108, 101,32,114,101,97,100,105,110,103,32,116,105,109,101,115,116, 97,109,112,32,105,110,32,123,33,114,125,122,48,114,101,97, @@ -719,9 +720,9 @@ 95,118,97,108,105,100,97,116,101,95,98,121,116,101,99,111, 100,101,95,104,101,97,100,101,114,160,1,0,0,115,76,0, 0,0,0,11,6,1,12,1,13,3,6,1,12,1,10,1, - 16,1,16,1,16,1,12,1,18,1,10,1,18,1,18,1, - 15,1,10,1,15,1,18,1,15,1,10,1,12,1,12,1, - 3,1,20,1,13,1,5,2,18,1,15,1,10,1,15,1, + 16,1,16,1,16,1,12,1,18,1,13,1,18,1,18,1, + 15,1,13,1,15,1,18,1,15,1,13,1,12,1,12,1, + 3,1,20,1,13,1,5,2,18,1,15,1,13,1,15,1, 3,1,18,1,13,1,5,2,18,1,15,1,9,1,114,141, 0,0,0,99,4,0,0,0,0,0,0,0,5,0,0,0, 6,0,0,0,67,0,0,0,115,112,0,0,0,116,0,0, -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 10:57:05 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 01 Oct 2015 08:57:05 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1Mjgw?= =?utf-8?q?=3A_Import_trace_messages_emitted_in_verbose_=28-v=29_mode_are_?= =?utf-8?q?no?= Message-ID: <20151001085703.11710.40499@psf.io> https://hg.python.org/cpython/rev/da42b38f7470 changeset: 98458:da42b38f7470 branch: 3.4 parent: 98455:83dc79eeaf7f user: Serhiy Storchaka date: Thu Oct 01 11:08:50 2015 +0300 summary: Issue #25280: Import trace messages emitted in verbose (-v) mode are no longer formatted twice. files: Lib/importlib/_bootstrap.py | 8 +- Misc/NEWS | 3 + Python/importlib.h | 115 ++++++++++++----------- 3 files changed, 65 insertions(+), 61 deletions(-) diff --git a/Lib/importlib/_bootstrap.py b/Lib/importlib/_bootstrap.py --- a/Lib/importlib/_bootstrap.py +++ b/Lib/importlib/_bootstrap.py @@ -620,15 +620,15 @@ raw_size = data[8:12] if magic != MAGIC_NUMBER: message = 'bad magic number in {!r}: {!r}'.format(name, magic) - _verbose_message(message) + _verbose_message('{}', message) raise ImportError(message, **exc_details) elif len(raw_timestamp) != 4: message = 'reached EOF while reading timestamp in {!r}'.format(name) - _verbose_message(message) + _verbose_message('{}', message) raise EOFError(message) elif len(raw_size) != 4: message = 'reached EOF while reading size of source in {!r}'.format(name) - _verbose_message(message) + _verbose_message('{}', message) raise EOFError(message) if source_stats is not None: try: @@ -638,7 +638,7 @@ else: if _r_long(raw_timestamp) != source_mtime: message = 'bytecode is stale for {!r}'.format(name) - _verbose_message(message) + _verbose_message('{}', message) raise ImportError(message, **exc_details) try: source_size = source_stats['size'] & 0xFFFFFFFF diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #25280: Import trace messages emitted in verbose (-v) mode are no + longer formatted twice. + - Issue #25003: os.urandom() doesn't use getentropy() on Solaris because getentropy() is blocking, whereas os.urandom() should not block. getentropy() is supported since Solaris 11.3. diff --git a/Python/importlib.h b/Python/importlib.h --- a/Python/importlib.h +++ b/Python/importlib.h [stripped] -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 12:28:49 2015 From: python-checkins at python.org (vinay.sajip) Date: Thu, 01 Oct 2015 10:28:49 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogQ2xvc2VzICMyNTE4?= =?utf-8?q?5=3A_Use_UTF-8_encoding_when_reading_pyvenv=2Ecfg=2E?= Message-ID: <20151001102849.98350.41256@psf.io> https://hg.python.org/cpython/rev/d927c6cae05f changeset: 98461:d927c6cae05f branch: 3.4 parent: 98458:da42b38f7470 user: Vinay Sajip date: Thu Oct 01 11:27:00 2015 +0100 summary: Closes #25185: Use UTF-8 encoding when reading pyvenv.cfg. files: Lib/site.py | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Lib/site.py b/Lib/site.py --- a/Lib/site.py +++ b/Lib/site.py @@ -472,7 +472,9 @@ config_line = re.compile(CONFIG_LINE) virtual_conf = candidate_confs[0] system_site = "true" - with open(virtual_conf) as f: + # Issue 25185: Use UTF-8, as that's what the venv module uses when + # writing the file. + with open(virtual_conf, encoding='utf-8') as f: for line in f: line = line.strip() m = config_line.match(line) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 12:28:51 2015 From: python-checkins at python.org (vinay.sajip) Date: Thu, 01 Oct 2015 10:28:51 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Closes_=2325185=3A_merged_fix_from_3=2E5=2E?= Message-ID: <20151001102849.98376.95963@psf.io> https://hg.python.org/cpython/rev/69dd42cef190 changeset: 98463:69dd42cef190 parent: 98460:e377d568928b parent: 98462:eaf9220bdee3 user: Vinay Sajip date: Thu Oct 01 11:28:34 2015 +0100 summary: Closes #25185: merged fix from 3.5. files: Lib/site.py | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Lib/site.py b/Lib/site.py --- a/Lib/site.py +++ b/Lib/site.py @@ -465,7 +465,9 @@ config_line = re.compile(CONFIG_LINE) virtual_conf = candidate_confs[0] system_site = "true" - with open(virtual_conf) as f: + # Issue 25185: Use UTF-8, as that's what the venv module uses when + # writing the file. + with open(virtual_conf, encoding='utf-8') as f: for line in f: line = line.strip() m = config_line.match(line) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 12:28:51 2015 From: python-checkins at python.org (vinay.sajip) Date: Thu, 01 Oct 2015 10:28:51 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Closes_=2325185=3A_merged_fix_from_3=2E4=2E?= Message-ID: <20151001102849.81633.31210@psf.io> https://hg.python.org/cpython/rev/eaf9220bdee3 changeset: 98462:eaf9220bdee3 branch: 3.5 parent: 98459:10c13441bf8d parent: 98461:d927c6cae05f user: Vinay Sajip date: Thu Oct 01 11:27:57 2015 +0100 summary: Closes #25185: merged fix from 3.4. files: Lib/site.py | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Lib/site.py b/Lib/site.py --- a/Lib/site.py +++ b/Lib/site.py @@ -465,7 +465,9 @@ config_line = re.compile(CONFIG_LINE) virtual_conf = candidate_confs[0] system_site = "true" - with open(virtual_conf) as f: + # Issue 25185: Use UTF-8, as that's what the venv module uses when + # writing the file. + with open(virtual_conf, encoding='utf-8') as f: for line in f: line = line.strip() m = config_line.match(line) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 13:17:14 2015 From: python-checkins at python.org (victor.stinner) Date: Thu, 01 Oct 2015 11:17:14 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325277=3A_Set_a_ti?= =?utf-8?q?meout_of_10_minutes_in_test=5Feintr_using_faulthandler_to?= Message-ID: <20151001111713.98374.93926@psf.io> https://hg.python.org/cpython/rev/10efb1797e7b changeset: 98464:10efb1797e7b user: Victor Stinner date: Thu Oct 01 13:16:43 2015 +0200 summary: Issue #25277: Set a timeout of 10 minutes in test_eintr using faulthandler to try to debug a hang on the FreeBSD 9 buildbot. Run also eintr_tester.py with python "-u" command line option to try to get the full output on hang/crash. files: Lib/test/eintrdata/eintr_tester.py | 5 +++++ Lib/test/test_eintr.py | 3 ++- 2 files changed, 7 insertions(+), 1 deletions(-) diff --git a/Lib/test/eintrdata/eintr_tester.py b/Lib/test/eintrdata/eintr_tester.py --- a/Lib/test/eintrdata/eintr_tester.py +++ b/Lib/test/eintrdata/eintr_tester.py @@ -9,6 +9,7 @@ """ import contextlib +import faulthandler import io import os import select @@ -50,6 +51,9 @@ signal.setitimer(signal.ITIMER_REAL, cls.signal_delay, cls.signal_period) + # Issue #25277: Use faulthandler to try to debug a hang on FreeBSD + faulthandler.dump_traceback_later(10 * 60, exit=True) + @classmethod def stop_alarm(cls): signal.setitimer(signal.ITIMER_REAL, 0, 0) @@ -58,6 +62,7 @@ def tearDownClass(cls): cls.stop_alarm() signal.signal(signal.SIGALRM, cls.orig_handler) + faulthandler.cancel_dump_traceback_later() @classmethod def _sleep(cls): diff --git a/Lib/test/test_eintr.py b/Lib/test/test_eintr.py --- a/Lib/test/test_eintr.py +++ b/Lib/test/test_eintr.py @@ -16,7 +16,8 @@ # Run the tester in a sub-process, to make sure there is only one # thread (for reliable signal delivery). tester = support.findfile("eintr_tester.py", subdir="eintrdata") - script_helper.assert_python_ok(tester) + # use -u to try to get the full output if the test hangs or crash + script_helper.assert_python_ok("-u", tester) if __name__ == "__main__": -- Repository URL: https://hg.python.org/cpython From lp_benchmark_robot at intel.com Thu Oct 1 16:47:42 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Thu, 1 Oct 2015 15:47:42 +0100 Subject: [Python-checkins] Benchmark Results for Python Default 2015-10-01 Message-ID: <284e6abe-30cf-455f-8d9e-0581dcb6180b@irsmsx153.ger.corp.intel.com> Results for project python_default-nightly, build date 2015-10-01 03:02:02 commit: 0f10e0b3e76d2ec4958d1260eef91055a096d990 revision date: 2015-09-30 22:53:09 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v3.4.3, with hash b4cbecbc0781e89a309d03b60a1f75f8499250e6 from 2015-02-25 12:15:33+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v3.4.3 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.23373% 1.27877% 8.62931% 16.74727% :-| pybench 0.14534% -0.15742% -1.72834% 8.99633% :-( regex_v8 2.70946% 1.05918% -3.98469% 3.95760% :-| nbody 0.13576% 0.92277% 0.13154% 7.82985% :-| json_dump_v2 0.20029% 0.43084% -1.07688% 9.96645% :-| normal_startup 0.73680% -0.17347% 0.43557% 4.99884% ------------------------------------------------------------------------------------------ Note: Benchmark results are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From lp_benchmark_robot at intel.com Thu Oct 1 16:48:09 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Thu, 1 Oct 2015 15:48:09 +0100 Subject: [Python-checkins] Benchmark Results for Python 2.7 2015-10-01 Message-ID: <1da2a71b-25ce-47da-b41e-10cbe2cdb2d9@irsmsx153.ger.corp.intel.com> No new revisions. Here are the previous results: Results for project python_2.7-nightly, build date 2015-10-01 03:44:50 commit: 8274fc521e69cb6baf2932ffd3d81bef4a45b5e8 revision date: 2015-09-29 20:51:27 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v2.7.10, with hash 15c95b7d81dcf821daade360741e00714667653f from 2015-05-23 16:02:14+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v2.7.10 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.37820% -0.19226% 4.02348% 9.24847% :-) pybench 0.18635% -0.18920% 6.61514% 7.24446% :-| regex_v8 1.06512% -0.76123% -1.89789% 8.19914% :-) nbody 0.11606% -0.51994% 8.62419% 3.90676% :-) json_dump_v2 0.28493% -0.97989% 3.44541% 12.26374% :-| normal_startup 1.57922% 0.15100% -1.28399% 2.33192% :-| ssbench 0.43283% 0.56654% 1.58202% 2.52947% ------------------------------------------------------------------------------------------ Note: Benchmark results for ssbench are measured in requests/second while all other are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From python-checkins at python.org Thu Oct 1 21:39:51 2015 From: python-checkins at python.org (vinay.sajip) Date: Thu, 01 Oct 2015 19:39:51 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogRml4ZXMgIzI1MDk3?= =?utf-8?q?=3A_Windows_test_is_skipped_if_there_are_insufficient_privilege?= =?utf-8?q?s=2C?= Message-ID: <20151001193949.81647.28965@psf.io> https://hg.python.org/cpython/rev/72c57c120c19 changeset: 98465:72c57c120c19 branch: 3.4 parent: 98461:d927c6cae05f user: Vinay Sajip date: Thu Oct 01 20:37:54 2015 +0100 summary: Fixes #25097: Windows test is skipped if there are insufficient privileges, rather than failing. files: Lib/test/test_logging.py | 20 +++++++++++--------- 1 files changed, 11 insertions(+), 9 deletions(-) diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py --- a/Lib/test/test_logging.py +++ b/Lib/test/test_logging.py @@ -65,14 +65,10 @@ except ImportError: threading = None try: - import win32evtlog + import win32evtlog, win32evtlogutil, pywintypes except ImportError: - win32evtlog = None -try: - import win32evtlogutil -except ImportError: - win32evtlogutil = None - win32evtlog = None + win32evtlog = win32evtlogutil = pywintypes = None + try: import zlib except ImportError: @@ -4098,13 +4094,19 @@ setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover) - at unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil required for this test.') + at unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.') class NTEventLogHandlerTest(BaseTest): def test_basic(self): logtype = 'Application' elh = win32evtlog.OpenEventLog(None, logtype) num_recs = win32evtlog.GetNumberOfEventLogRecords(elh) - h = logging.handlers.NTEventLogHandler('test_logging') + + try: + h = logging.handlers.NTEventLogHandler('test_logging') + except pywintypes.error as e: + if e[0] == 5: # access denied + raise unittest.SkipTest('Insufficient privileges to run test') + r = logging.makeLogRecord({'msg': 'Test Log Message'}) h.handle(r) h.close() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 21:39:51 2015 From: python-checkins at python.org (vinay.sajip) Date: Thu, 01 Oct 2015 19:39:51 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Fixes_=2325097=3A_Merged_fi_from_3=2E4=2E?= Message-ID: <20151001193949.82638.92770@psf.io> https://hg.python.org/cpython/rev/b54528d8d8c3 changeset: 98466:b54528d8d8c3 branch: 3.5 parent: 98462:eaf9220bdee3 parent: 98465:72c57c120c19 user: Vinay Sajip date: Thu Oct 01 20:38:53 2015 +0100 summary: Fixes #25097: Merged fi from 3.4. files: Lib/test/test_logging.py | 20 +++++++++++--------- 1 files changed, 11 insertions(+), 9 deletions(-) diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py --- a/Lib/test/test_logging.py +++ b/Lib/test/test_logging.py @@ -58,14 +58,10 @@ except ImportError: threading = None try: - import win32evtlog + import win32evtlog, win32evtlogutil, pywintypes except ImportError: - win32evtlog = None -try: - import win32evtlogutil -except ImportError: - win32evtlogutil = None - win32evtlog = None + win32evtlog = win32evtlogutil = pywintypes = None + try: import zlib except ImportError: @@ -4128,13 +4124,19 @@ setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover) - at unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil required for this test.') + at unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.') class NTEventLogHandlerTest(BaseTest): def test_basic(self): logtype = 'Application' elh = win32evtlog.OpenEventLog(None, logtype) num_recs = win32evtlog.GetNumberOfEventLogRecords(elh) - h = logging.handlers.NTEventLogHandler('test_logging') + + try: + h = logging.handlers.NTEventLogHandler('test_logging') + except pywintypes.error as e: + if e[0] == 5: # access denied + raise unittest.SkipTest('Insufficient privileges to run test') + r = logging.makeLogRecord({'msg': 'Test Log Message'}) h.handle(r) h.close() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 21:39:52 2015 From: python-checkins at python.org (vinay.sajip) Date: Thu, 01 Oct 2015 19:39:52 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Fixes_=2325097=3A_Merged_fix_from_3=2E5=2E?= Message-ID: <20151001193949.9951.91082@psf.io> https://hg.python.org/cpython/rev/757baaedc043 changeset: 98467:757baaedc043 parent: 98464:10efb1797e7b parent: 98466:b54528d8d8c3 user: Vinay Sajip date: Thu Oct 01 20:39:30 2015 +0100 summary: Fixes #25097: Merged fix from 3.5. files: Lib/test/test_logging.py | 20 +++++++++++--------- 1 files changed, 11 insertions(+), 9 deletions(-) diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py --- a/Lib/test/test_logging.py +++ b/Lib/test/test_logging.py @@ -58,14 +58,10 @@ except ImportError: threading = None try: - import win32evtlog + import win32evtlog, win32evtlogutil, pywintypes except ImportError: - win32evtlog = None -try: - import win32evtlogutil -except ImportError: - win32evtlogutil = None - win32evtlog = None + win32evtlog = win32evtlogutil = pywintypes = None + try: import zlib except ImportError: @@ -4128,13 +4124,19 @@ setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover) - at unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil required for this test.') + at unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.') class NTEventLogHandlerTest(BaseTest): def test_basic(self): logtype = 'Application' elh = win32evtlog.OpenEventLog(None, logtype) num_recs = win32evtlog.GetNumberOfEventLogRecords(elh) - h = logging.handlers.NTEventLogHandler('test_logging') + + try: + h = logging.handlers.NTEventLogHandler('test_logging') + except pywintypes.error as e: + if e[0] == 5: # access denied + raise unittest.SkipTest('Insufficient privileges to run test') + r = logging.makeLogRecord({'msg': 'Test Log Message'}) h.handle(r) h.close() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 21:55:18 2015 From: python-checkins at python.org (vinay.sajip) Date: Thu, 01 Oct 2015 19:55:18 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Closes_=2324884=3A_refacto?= =?utf-8?q?red_WatchedFileHandler_file_reopening_into_a_separate?= Message-ID: <20151001195501.94115.96069@psf.io> https://hg.python.org/cpython/rev/6d61b057c375 changeset: 98468:6d61b057c375 user: Vinay Sajip date: Thu Oct 01 20:54:41 2015 +0100 summary: Closes #24884: refactored WatchedFileHandler file reopening into a separate method, based on a suggestion and patch by Marian Horban. files: Doc/library/logging.handlers.rst | 12 +++++++++--- Lib/logging/handlers.py | 15 ++++++++++++--- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/Doc/library/logging.handlers.rst b/Doc/library/logging.handlers.rst --- a/Doc/library/logging.handlers.rst +++ b/Doc/library/logging.handlers.rst @@ -162,11 +162,17 @@ first call to :meth:`emit`. By default, the file grows indefinitely. + .. method:: reopenIfNeeded() + + Checks to see if the file has changed. If it has, the existing stream is + flushed and closed and the file opened again, typically as a precursor to + outputting the record to the file. + + .. method:: emit(record) - Outputs the record to the file, but first checks to see if the file has - changed. If it has, the existing stream is flushed and closed and the - file opened again, before outputting the record to the file. + Outputs the record to the file, but first calls :meth:`reopenIfNeeded` to + reopen the file if it has changed. .. _base-rotating-handler: diff --git a/Lib/logging/handlers.py b/Lib/logging/handlers.py --- a/Lib/logging/handlers.py +++ b/Lib/logging/handlers.py @@ -440,11 +440,11 @@ sres = os.fstat(self.stream.fileno()) self.dev, self.ino = sres[ST_DEV], sres[ST_INO] - def emit(self, record): + def reopenIfNeeded(self): """ - Emit a record. + Reopen log file if needed. - First check if the underlying file has changed, and if it + Checks if the underlying file has changed, and if it has, close the old stream and reopen the file to get the current stream. """ @@ -467,6 +467,15 @@ # open a new file handle and get new stat info from that fd self.stream = self._open() self._statstream() + + def emit(self, record): + """ + Emit a record. + + If underlying file has changed, reopen the file before emitting the + record to it. + """ + self.reopenIfNeeded() logging.FileHandler.emit(self, record) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 23:20:08 2015 From: python-checkins at python.org (victor.stinner) Date: Thu, 01 Oct 2015 21:20:08 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Make_=5FPyUnicode=5FTransl?= =?utf-8?q?ateCharmap=28=29_symbol_private?= Message-ID: <20151001212007.31179.19998@psf.io> https://hg.python.org/cpython/rev/3bcf60b12094 changeset: 98471:3bcf60b12094 user: Victor Stinner date: Thu Oct 01 22:07:32 2015 +0200 summary: Make _PyUnicode_TranslateCharmap() symbol private unicodeobject.h exposes PyUnicode_TranslateCharmap() and PyUnicode_Translate(). files: Objects/unicodeobject.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -8683,7 +8683,7 @@ return res; } -PyObject * +static PyObject * _PyUnicode_TranslateCharmap(PyObject *input, PyObject *mapping, const char *errors) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 23:20:08 2015 From: python-checkins at python.org (victor.stinner) Date: Thu, 01 Oct 2015 21:20:08 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Update_importlib=5Fexterna?= =?utf-8?b?bC5o?= Message-ID: <20151001212007.16569.68394@psf.io> https://hg.python.org/cpython/rev/d3c6c1ff35aa changeset: 98470:d3c6c1ff35aa user: Victor Stinner date: Thu Oct 01 22:06:54 2015 +0200 summary: Update importlib_external.h files: Python/importlib_external.h | 113 ++++++++++++----------- 1 files changed, 57 insertions(+), 56 deletions(-) diff --git a/Python/importlib_external.h b/Python/importlib_external.h --- a/Python/importlib_external.h +++ b/Python/importlib_external.h @@ -613,7 +613,7 @@ 95,102,105,110,100,95,109,111,100,117,108,101,95,115,104,105, 109,135,1,0,0,115,10,0,0,0,0,10,21,1,24,1, 6,1,29,1,114,123,0,0,0,99,4,0,0,0,0,0, - 0,0,11,0,0,0,19,0,0,0,67,0,0,0,115,240, + 0,0,11,0,0,0,19,0,0,0,67,0,0,0,115,252, 1,0,0,105,0,0,125,4,0,124,2,0,100,1,0,107, 9,0,114,31,0,124,2,0,124,4,0,100,2,0,60,110, 6,0,100,3,0,125,2,0,124,3,0,100,1,0,107,9, @@ -621,59 +621,60 @@ 0,100,1,0,100,5,0,133,2,0,25,125,5,0,124,0, 0,100,5,0,100,6,0,133,2,0,25,125,6,0,124,0, 0,100,6,0,100,7,0,133,2,0,25,125,7,0,124,5, - 0,116,0,0,107,3,0,114,168,0,100,8,0,106,1,0, + 0,116,0,0,107,3,0,114,171,0,100,8,0,106,1,0, 124,2,0,124,5,0,131,2,0,125,8,0,116,2,0,106, - 3,0,124,8,0,131,1,0,1,116,4,0,124,8,0,124, - 4,0,141,1,0,130,1,0,110,119,0,116,5,0,124,6, - 0,131,1,0,100,5,0,107,3,0,114,229,0,100,9,0, - 106,1,0,124,2,0,131,1,0,125,8,0,116,2,0,106, - 3,0,124,8,0,131,1,0,1,116,6,0,124,8,0,131, - 1,0,130,1,0,110,58,0,116,5,0,124,7,0,131,1, - 0,100,5,0,107,3,0,114,31,1,100,10,0,106,1,0, - 124,2,0,131,1,0,125,8,0,116,2,0,106,3,0,124, - 8,0,131,1,0,1,116,6,0,124,8,0,131,1,0,130, - 1,0,124,1,0,100,1,0,107,9,0,114,226,1,121,20, - 0,116,7,0,124,1,0,100,11,0,25,131,1,0,125,9, - 0,87,110,18,0,4,116,8,0,107,10,0,114,83,1,1, - 1,1,89,110,62,0,88,116,9,0,124,6,0,131,1,0, - 124,9,0,107,3,0,114,145,1,100,12,0,106,1,0,124, - 2,0,131,1,0,125,8,0,116,2,0,106,3,0,124,8, - 0,131,1,0,1,116,4,0,124,8,0,124,4,0,141,1, - 0,130,1,0,121,18,0,124,1,0,100,13,0,25,100,14, - 0,64,125,10,0,87,110,18,0,4,116,8,0,107,10,0, - 114,183,1,1,1,1,89,110,43,0,88,116,9,0,124,7, - 0,131,1,0,124,10,0,107,3,0,114,226,1,116,4,0, - 100,12,0,106,1,0,124,2,0,131,1,0,124,4,0,141, - 1,0,130,1,0,124,0,0,100,7,0,100,1,0,133,2, - 0,25,83,41,15,97,122,1,0,0,86,97,108,105,100,97, - 116,101,32,116,104,101,32,104,101,97,100,101,114,32,111,102, - 32,116,104,101,32,112,97,115,115,101,100,45,105,110,32,98, - 121,116,101,99,111,100,101,32,97,103,97,105,110,115,116,32, - 115,111,117,114,99,101,95,115,116,97,116,115,32,40,105,102, - 10,32,32,32,32,103,105,118,101,110,41,32,97,110,100,32, - 114,101,116,117,114,110,105,110,103,32,116,104,101,32,98,121, - 116,101,99,111,100,101,32,116,104,97,116,32,99,97,110,32, - 98,101,32,99,111,109,112,105,108,101,100,32,98,121,32,99, - 111,109,112,105,108,101,40,41,46,10,10,32,32,32,32,65, - 108,108,32,111,116,104,101,114,32,97,114,103,117,109,101,110, - 116,115,32,97,114,101,32,117,115,101,100,32,116,111,32,101, - 110,104,97,110,99,101,32,101,114,114,111,114,32,114,101,112, - 111,114,116,105,110,103,46,10,10,32,32,32,32,73,109,112, - 111,114,116,69,114,114,111,114,32,105,115,32,114,97,105,115, - 101,100,32,119,104,101,110,32,116,104,101,32,109,97,103,105, - 99,32,110,117,109,98,101,114,32,105,115,32,105,110,99,111, - 114,114,101,99,116,32,111,114,32,116,104,101,32,98,121,116, - 101,99,111,100,101,32,105,115,10,32,32,32,32,102,111,117, - 110,100,32,116,111,32,98,101,32,115,116,97,108,101,46,32, - 69,79,70,69,114,114,111,114,32,105,115,32,114,97,105,115, - 101,100,32,119,104,101,110,32,116,104,101,32,100,97,116,97, - 32,105,115,32,102,111,117,110,100,32,116,111,32,98,101,10, - 32,32,32,32,116,114,117,110,99,97,116,101,100,46,10,10, - 32,32,32,32,78,114,98,0,0,0,122,10,60,98,121,116, - 101,99,111,100,101,62,114,35,0,0,0,114,12,0,0,0, - 233,8,0,0,0,233,12,0,0,0,122,30,98,97,100,32, - 109,97,103,105,99,32,110,117,109,98,101,114,32,105,110,32, - 123,33,114,125,58,32,123,33,114,125,122,43,114,101,97,99, + 3,0,100,9,0,124,8,0,131,2,0,1,116,4,0,124, + 8,0,124,4,0,141,1,0,130,1,0,110,125,0,116,5, + 0,124,6,0,131,1,0,100,5,0,107,3,0,114,235,0, + 100,10,0,106,1,0,124,2,0,131,1,0,125,8,0,116, + 2,0,106,3,0,100,9,0,124,8,0,131,2,0,1,116, + 6,0,124,8,0,131,1,0,130,1,0,110,61,0,116,5, + 0,124,7,0,131,1,0,100,5,0,107,3,0,114,40,1, + 100,11,0,106,1,0,124,2,0,131,1,0,125,8,0,116, + 2,0,106,3,0,100,9,0,124,8,0,131,2,0,1,116, + 6,0,124,8,0,131,1,0,130,1,0,124,1,0,100,1, + 0,107,9,0,114,238,1,121,20,0,116,7,0,124,1,0, + 100,12,0,25,131,1,0,125,9,0,87,110,18,0,4,116, + 8,0,107,10,0,114,92,1,1,1,1,89,110,65,0,88, + 116,9,0,124,6,0,131,1,0,124,9,0,107,3,0,114, + 157,1,100,13,0,106,1,0,124,2,0,131,1,0,125,8, + 0,116,2,0,106,3,0,100,9,0,124,8,0,131,2,0, + 1,116,4,0,124,8,0,124,4,0,141,1,0,130,1,0, + 121,18,0,124,1,0,100,14,0,25,100,15,0,64,125,10, + 0,87,110,18,0,4,116,8,0,107,10,0,114,195,1,1, + 1,1,89,110,43,0,88,116,9,0,124,7,0,131,1,0, + 124,10,0,107,3,0,114,238,1,116,4,0,100,13,0,106, + 1,0,124,2,0,131,1,0,124,4,0,141,1,0,130,1, + 0,124,0,0,100,7,0,100,1,0,133,2,0,25,83,41, + 16,97,122,1,0,0,86,97,108,105,100,97,116,101,32,116, + 104,101,32,104,101,97,100,101,114,32,111,102,32,116,104,101, + 32,112,97,115,115,101,100,45,105,110,32,98,121,116,101,99, + 111,100,101,32,97,103,97,105,110,115,116,32,115,111,117,114, + 99,101,95,115,116,97,116,115,32,40,105,102,10,32,32,32, + 32,103,105,118,101,110,41,32,97,110,100,32,114,101,116,117, + 114,110,105,110,103,32,116,104,101,32,98,121,116,101,99,111, + 100,101,32,116,104,97,116,32,99,97,110,32,98,101,32,99, + 111,109,112,105,108,101,100,32,98,121,32,99,111,109,112,105, + 108,101,40,41,46,10,10,32,32,32,32,65,108,108,32,111, + 116,104,101,114,32,97,114,103,117,109,101,110,116,115,32,97, + 114,101,32,117,115,101,100,32,116,111,32,101,110,104,97,110, + 99,101,32,101,114,114,111,114,32,114,101,112,111,114,116,105, + 110,103,46,10,10,32,32,32,32,73,109,112,111,114,116,69, + 114,114,111,114,32,105,115,32,114,97,105,115,101,100,32,119, + 104,101,110,32,116,104,101,32,109,97,103,105,99,32,110,117, + 109,98,101,114,32,105,115,32,105,110,99,111,114,114,101,99, + 116,32,111,114,32,116,104,101,32,98,121,116,101,99,111,100, + 101,32,105,115,10,32,32,32,32,102,111,117,110,100,32,116, + 111,32,98,101,32,115,116,97,108,101,46,32,69,79,70,69, + 114,114,111,114,32,105,115,32,114,97,105,115,101,100,32,119, + 104,101,110,32,116,104,101,32,100,97,116,97,32,105,115,32, + 102,111,117,110,100,32,116,111,32,98,101,10,32,32,32,32, + 116,114,117,110,99,97,116,101,100,46,10,10,32,32,32,32, + 78,114,98,0,0,0,122,10,60,98,121,116,101,99,111,100, + 101,62,114,35,0,0,0,114,12,0,0,0,233,8,0,0, + 0,233,12,0,0,0,122,30,98,97,100,32,109,97,103,105, + 99,32,110,117,109,98,101,114,32,105,110,32,123,33,114,125, + 58,32,123,33,114,125,122,2,123,125,122,43,114,101,97,99, 104,101,100,32,69,79,70,32,119,104,105,108,101,32,114,101, 97,100,105,110,103,32,116,105,109,101,115,116,97,109,112,32, 105,110,32,123,33,114,125,122,48,114,101,97,99,104,101,100, @@ -699,9 +700,9 @@ 97,108,105,100,97,116,101,95,98,121,116,101,99,111,100,101, 95,104,101,97,100,101,114,152,1,0,0,115,76,0,0,0, 0,11,6,1,12,1,13,3,6,1,12,1,10,1,16,1, - 16,1,16,1,12,1,18,1,13,1,18,1,18,1,15,1, - 13,1,15,1,18,1,15,1,13,1,12,1,12,1,3,1, - 20,1,13,1,5,2,18,1,15,1,13,1,15,1,3,1, + 16,1,16,1,12,1,18,1,16,1,18,1,18,1,15,1, + 16,1,15,1,18,1,15,1,16,1,12,1,12,1,3,1, + 20,1,13,1,5,2,18,1,15,1,16,1,15,1,3,1, 18,1,13,1,5,2,18,1,15,1,9,1,114,135,0,0, 0,99,4,0,0,0,0,0,0,0,5,0,0,0,6,0, 0,0,67,0,0,0,115,115,0,0,0,116,0,0,106,1, -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 1 23:20:08 2015 From: python-checkins at python.org (victor.stinner) Date: Thu, 01 Oct 2015 21:20:08 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325267=3A_The_UTF-?= =?utf-8?q?8_encoder_is_now_up_to_75_times_as_fast_for_error?= Message-ID: <20151001212007.9949.17787@psf.io> https://hg.python.org/cpython/rev/2b5357b38366 changeset: 98469:2b5357b38366 user: Victor Stinner date: Thu Oct 01 21:54:51 2015 +0200 summary: Issue #25267: The UTF-8 encoder is now up to 75 times as fast for error handlers: ``ignore``, ``replace``, ``surrogateescape``, ``surrogatepass``. Patch co-written with Serhiy Storchaka. files: Doc/whatsnew/3.6.rst | 3 + Lib/test/test_codecs.py | 37 ++++- Misc/NEWS | 4 + Objects/stringlib/codecs.h | 149 ++++++++++++++++-------- Objects/unicodeobject.c | 7 +- 5 files changed, 136 insertions(+), 64 deletions(-) diff --git a/Doc/whatsnew/3.6.rst b/Doc/whatsnew/3.6.rst --- a/Doc/whatsnew/3.6.rst +++ b/Doc/whatsnew/3.6.rst @@ -120,6 +120,9 @@ * The ASCII and the Latin1 encoders are now up to 3 times as fast for the error error ``surrogateescape``. +* The UTF-8 encoder is now up to 75 times as fast for error handlers: + ``ignore``, ``replace``, ``surrogateescape``, ``surrogatepass``. + Build and C API Changes ======================= diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py --- a/Lib/test/test_codecs.py +++ b/Lib/test/test_codecs.py @@ -361,6 +361,12 @@ self.assertEqual("[\uDC80]".encode(self.encoding, "replace"), "[?]".encode(self.encoding)) + # sequential surrogate characters + self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "ignore"), + "[]".encode(self.encoding)) + self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "replace"), + "[??]".encode(self.encoding)) + bom = "".encode(self.encoding) for before, after in [("\U00010fff", "A"), ("[", "]"), ("A", "\U00010fff")]: @@ -753,6 +759,7 @@ encoding = "utf-8" ill_formed_sequence = b"\xed\xb2\x80" ill_formed_sequence_replace = "\ufffd" * 3 + BOM = b'' def test_partial(self): self.check_partial( @@ -785,23 +792,32 @@ super().test_lone_surrogates() # not sure if this is making sense for # UTF-16 and UTF-32 - self.assertEqual("[\uDC80]".encode('utf-8', "surrogateescape"), - b'[\x80]') + self.assertEqual("[\uDC80]".encode(self.encoding, "surrogateescape"), + self.BOM + b'[\x80]') + + with self.assertRaises(UnicodeEncodeError) as cm: + "[\uDC80\uD800\uDFFF]".encode(self.encoding, "surrogateescape") + exc = cm.exception + self.assertEqual(exc.object[exc.start:exc.end], '\uD800\uDFFF') def test_surrogatepass_handler(self): - self.assertEqual("abc\ud800def".encode("utf-8", "surrogatepass"), - b"abc\xed\xa0\x80def") - self.assertEqual(b"abc\xed\xa0\x80def".decode("utf-8", "surrogatepass"), + self.assertEqual("abc\ud800def".encode(self.encoding, "surrogatepass"), + self.BOM + b"abc\xed\xa0\x80def") + self.assertEqual("\U00010fff\uD800".encode(self.encoding, "surrogatepass"), + self.BOM + b"\xf0\x90\xbf\xbf\xed\xa0\x80") + self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "surrogatepass"), + self.BOM + b'[\xed\xa0\x80\xed\xb2\x80]') + + self.assertEqual(b"abc\xed\xa0\x80def".decode(self.encoding, "surrogatepass"), "abc\ud800def") - self.assertEqual("\U00010fff\uD800".encode("utf-8", "surrogatepass"), - b"\xf0\x90\xbf\xbf\xed\xa0\x80") - self.assertEqual(b"\xf0\x90\xbf\xbf\xed\xa0\x80".decode("utf-8", "surrogatepass"), + self.assertEqual(b"\xf0\x90\xbf\xbf\xed\xa0\x80".decode(self.encoding, "surrogatepass"), "\U00010fff\uD800") + self.assertTrue(codecs.lookup_error("surrogatepass")) with self.assertRaises(UnicodeDecodeError): - b"abc\xed\xa0".decode("utf-8", "surrogatepass") + b"abc\xed\xa0".decode(self.encoding, "surrogatepass") with self.assertRaises(UnicodeDecodeError): - b"abc\xed\xa0z".decode("utf-8", "surrogatepass") + b"abc\xed\xa0z".decode(self.encoding, "surrogatepass") @unittest.skipUnless(sys.platform == 'win32', @@ -1008,6 +1024,7 @@ class UTF8SigTest(UTF8Test, unittest.TestCase): encoding = "utf-8-sig" + BOM = codecs.BOM_UTF8 def test_partial(self): self.check_partial( diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,10 @@ Core and Builtins ----------------- +- Issue #25267: The UTF-8 encoder is now up to 75 times as fast for error + handlers: ``ignore``, ``replace``, ``surrogateescape``, ``surrogatepass``. + Patch co-written with Serhiy Storchaka. + - Issue #25280: Import trace messages emitted in verbose (-v) mode are no longer formatted twice. diff --git a/Objects/stringlib/codecs.h b/Objects/stringlib/codecs.h --- a/Objects/stringlib/codecs.h +++ b/Objects/stringlib/codecs.h @@ -268,9 +268,10 @@ Py_ssize_t nallocated; /* number of result bytes allocated */ Py_ssize_t nneeded; /* number of result bytes needed */ #if STRINGLIB_SIZEOF_CHAR > 1 - PyObject *errorHandler = NULL; + PyObject *error_handler_obj = NULL; PyObject *exc = NULL; PyObject *rep = NULL; + _Py_error_handler error_handler = _Py_ERROR_UNKNOWN; #endif #if STRINGLIB_SIZEOF_CHAR == 1 const Py_ssize_t max_char_size = 2; @@ -326,72 +327,116 @@ } #if STRINGLIB_SIZEOF_CHAR > 1 else if (Py_UNICODE_IS_SURROGATE(ch)) { - Py_ssize_t newpos; - Py_ssize_t repsize, k, startpos; + Py_ssize_t startpos, endpos, newpos; + Py_ssize_t repsize, k; + if (error_handler == _Py_ERROR_UNKNOWN) + error_handler = get_error_handler(errors); + startpos = i-1; - rep = unicode_encode_call_errorhandler( - errors, &errorHandler, "utf-8", "surrogates not allowed", - unicode, &exc, startpos, startpos+1, &newpos); - if (!rep) - goto error; + endpos = startpos+1; - if (PyBytes_Check(rep)) - repsize = PyBytes_GET_SIZE(rep); - else - repsize = PyUnicode_GET_LENGTH(rep); + while ((endpos < size) && Py_UNICODE_IS_SURROGATE(data[endpos])) + endpos++; - if (repsize > max_char_size) { - Py_ssize_t offset; + switch (error_handler) + { + case _Py_ERROR_REPLACE: + memset(p, '?', endpos - startpos); + p += (endpos - startpos); + /* fall through the ignore handler */ + case _Py_ERROR_IGNORE: + i += (endpos - startpos - 1); + break; - if (result == NULL) - offset = p - stackbuf; + + case _Py_ERROR_SURROGATEPASS: + for (k=startpos; k> 12)); + *p++ = (char)(0x80 | ((ch >> 6) & 0x3f)); + *p++ = (char)(0x80 | (ch & 0x3f)); + } + i += (endpos - startpos - 1); + break; + + case _Py_ERROR_SURROGATEESCAPE: + for (k=startpos; k= endpos) { + i += (endpos - startpos - 1); + break; + } + startpos = k; + assert(startpos < endpos); + /* fall through the default handler */ + + default: + rep = unicode_encode_call_errorhandler( + errors, &error_handler_obj, "utf-8", "surrogates not allowed", + unicode, &exc, startpos, endpos, &newpos); + if (!rep) + goto error; + + if (PyBytes_Check(rep)) + repsize = PyBytes_GET_SIZE(rep); else - offset = p - PyBytes_AS_STRING(result); + repsize = PyUnicode_GET_LENGTH(rep); - if (nallocated > PY_SSIZE_T_MAX - repsize + max_char_size) { - /* integer overflow */ - PyErr_NoMemory(); - goto error; + if (repsize > max_char_size) { + Py_ssize_t offset; + + if (result == NULL) + offset = p - stackbuf; + else + offset = p - PyBytes_AS_STRING(result); + + if (nallocated > PY_SSIZE_T_MAX - repsize + max_char_size) { + /* integer overflow */ + PyErr_NoMemory(); + goto error; + } + nallocated += repsize - max_char_size; + if (result != NULL) { + if (_PyBytes_Resize(&result, nallocated) < 0) + goto error; + } else { + result = PyBytes_FromStringAndSize(NULL, nallocated); + if (result == NULL) + goto error; + Py_MEMCPY(PyBytes_AS_STRING(result), stackbuf, offset); + } + p = PyBytes_AS_STRING(result) + offset; } - nallocated += repsize - max_char_size; - if (result != NULL) { - if (_PyBytes_Resize(&result, nallocated) < 0) + + if (PyBytes_Check(rep)) { + memcpy(p, PyBytes_AS_STRING(rep), repsize); + p += repsize; + } + else { + /* rep is unicode */ + if (PyUnicode_READY(rep) < 0) goto error; - } else { - result = PyBytes_FromStringAndSize(NULL, nallocated); - if (result == NULL) - goto error; - Py_MEMCPY(PyBytes_AS_STRING(result), stackbuf, offset); - } - p = PyBytes_AS_STRING(result) + offset; - } - if (PyBytes_Check(rep)) { - char *prep = PyBytes_AS_STRING(rep); - for(k = repsize; k > 0; k--) - *p++ = *prep++; - } else /* rep is unicode */ { - enum PyUnicode_Kind repkind; - void *repdata; - - if (PyUnicode_READY(rep) < 0) - goto error; - repkind = PyUnicode_KIND(rep); - repdata = PyUnicode_DATA(rep); - - for(k=0; k 2 @@ -430,7 +475,7 @@ } #if STRINGLIB_SIZEOF_CHAR > 1 - Py_XDECREF(errorHandler); + Py_XDECREF(error_handler_obj); Py_XDECREF(exc); #endif return result; @@ -438,7 +483,7 @@ #if STRINGLIB_SIZEOF_CHAR > 1 error: Py_XDECREF(rep); - Py_XDECREF(errorHandler); + Py_XDECREF(error_handler_obj); Py_XDECREF(exc); Py_XDECREF(result); return NULL; diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -297,6 +297,7 @@ _Py_ERROR_UNKNOWN=0, _Py_ERROR_STRICT, _Py_ERROR_SURROGATEESCAPE, + _Py_ERROR_SURROGATEPASS, _Py_ERROR_REPLACE, _Py_ERROR_IGNORE, _Py_ERROR_XMLCHARREFREPLACE, @@ -312,6 +313,8 @@ return _Py_ERROR_STRICT; if (strcmp(errors, "surrogateescape") == 0) return _Py_ERROR_SURROGATEESCAPE; + if (strcmp(errors, "surrogatepass") == 0) + return _Py_ERROR_SURROGATEPASS; if (strcmp(errors, "ignore") == 0) return _Py_ERROR_IGNORE; if (strcmp(errors, "replace") == 0) @@ -6479,8 +6482,8 @@ goto onError; case _Py_ERROR_REPLACE: - while (collstart++ < collend) - *str++ = '?'; + memset(str, '?', collend - collstart); + str += (collend - collstart); /* fall through ignore error handler */ case _Py_ERROR_IGNORE: pos = collend; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 00:20:41 2015 From: python-checkins at python.org (steve.dower) Date: Thu, 01 Oct 2015 22:20:41 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1MTY1?= =?utf-8?q?=3A_Windows_uninstallation_should_not_remove_launcher_if_other?= Message-ID: <20151001222040.82642.8890@psf.io> https://hg.python.org/cpython/rev/a2d30dfa46a7 changeset: 98473:a2d30dfa46a7 branch: 3.5 user: Steve Dower date: Thu Oct 01 15:19:39 2015 -0700 summary: Issue #25165: Windows uninstallation should not remove launcher if other versions remain files: Misc/NEWS | 5 ++++- Tools/msi/common.wxs | 6 ++++-- Tools/msi/launcher/launcher.wixproj | 1 + Tools/msi/launcher/launcher.wxs | 7 +++++++ 4 files changed, 16 insertions(+), 3 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -244,7 +244,10 @@ Windows ------- -- Issues #25112: py.exe launcher is missing icons +- Issue #25165: Windows uninstallation should not remove launcher if other + versions remain + +- Issue #25112: py.exe launcher is missing icons - Issue #25102: Windows installer does not precompile for -O or -OO. diff --git a/Tools/msi/common.wxs b/Tools/msi/common.wxs --- a/Tools/msi/common.wxs +++ b/Tools/msi/common.wxs @@ -22,17 +22,19 @@ + + Installed OR NOT MISSING_CORE + Installed OR NOT DOWNGRADE - Installed OR NOT MISSING_CORE Installed OR TARGETDIR OR Suppress_TARGETDIR_Check - UPGRADE + UPGRADE diff --git a/Tools/msi/launcher/launcher.wixproj b/Tools/msi/launcher/launcher.wixproj --- a/Tools/msi/launcher/launcher.wixproj +++ b/Tools/msi/launcher/launcher.wixproj @@ -5,6 +5,7 @@ 2.0 launcher Package + SkipMissingCore=1;$(DefineConstants) diff --git a/Tools/msi/launcher/launcher.wxs b/Tools/msi/launcher/launcher.wxs --- a/Tools/msi/launcher/launcher.wxs +++ b/Tools/msi/launcher/launcher.wxs @@ -26,6 +26,13 @@ NOT Installed AND NOT ALLUSERS=1 NOT Installed AND ALLUSERS=1 + + UPGRADE or REMOVE_OLD_LAUNCHER + + + + + -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 00:20:41 2015 From: python-checkins at python.org (steve.dower) Date: Thu, 01 Oct 2015 22:20:41 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E5=29=3A_Improves_suppo?= =?utf-8?q?rt_for_building_unofficial_versions_of_the_Windows_installer=2E?= Message-ID: <20151001222040.82662.89218@psf.io> https://hg.python.org/cpython/rev/26439091dc71 changeset: 98472:26439091dc71 branch: 3.5 parent: 98466:b54528d8d8c3 user: Steve Dower date: Thu Oct 01 15:18:53 2015 -0700 summary: Improves support for building unofficial versions of the Windows installer. files: Tools/msi/buildrelease.bat | 41 ++++++--- Tools/msi/bundle/bundle.targets | 7 +- Tools/msi/bundle/bundle.wxs | 2 + Tools/msi/bundle/packagegroups/launcher.wxs | 4 + Tools/msi/bundle/packagegroups/packageinstall.wxs | 26 ++++++ Tools/msi/bundle/packagegroups/pip.wxs | 25 ++++++ Tools/msi/bundle/packagegroups/postinstall.wxs | 19 ---- Tools/msi/msi.props | 14 ++- 8 files changed, 98 insertions(+), 40 deletions(-) diff --git a/Tools/msi/buildrelease.bat b/Tools/msi/buildrelease.bat --- a/Tools/msi/buildrelease.bat +++ b/Tools/msi/buildrelease.bat @@ -4,17 +4,28 @@ rem This script is intended for building official releases of Python. rem To use it to build alternative releases, you should clone this file rem and modify the following three URIs. + +rem These two will ensure that your release can be installed +rem alongside an official Python release, by modifying the GUIDs used +rem for all components. rem -rem The first two will ensure that your release can be installed -rem alongside an official Python release, while the second specifies -rem the URL that will be used to download installation files. The -rem files available from this URL *will* conflict with your installer. -rem Trust me, you don't want them, even if it seems like a good idea. +rem The following substitutions will be applied to the release URI: +rem Variable Description Example +rem {arch} architecture amd64, win32 +set RELEASE_URI=http://www.python.org/{arch} -set RELEASE_URI_X86=http://www.python.org/win32 -set RELEASE_URI_X64=http://www.python.org/amd64 -set DOWNLOAD_URL_BASE=https://www.python.org/ftp/python -set DOWNLOAD_URL= +rem This is the URL that will be used to download installation files. +rem The files available from the default URL *will* conflict with your +rem installer. Trust me, you don't want them, even if it seems like a +rem good idea. +rem +rem The following substitutions will be applied to the download URL: +rem Variable Description Example +rem {version} version number 3.5.0 +rem {arch} architecture amd64, win32 +rem {releasename} release name a1, b2, rc3 (or blank for final) +rem {msi} MSI filename core.msi +set DOWNLOAD_URL=https://www.python.org/ftp/python/{version}/{arch}{releasename}/{msi} set D=%~dp0 set PCBUILD=%D%..\..\PCBuild\ @@ -90,14 +101,12 @@ set BUILD_PLAT=Win32 set OUTDIR_PLAT=win32 set OBJDIR_PLAT=x86 - set RELEASE_URI=%RELEASE_URI_X86% ) ELSE ( call "%PCBUILD%env.bat" x86_amd64 set BUILD=%PCBUILD%amd64\ set BUILD_PLAT=x64 set OUTDIR_PLAT=amd64 set OBJDIR_PLAT=x64 - set RELEASE_URI=%RELEASE_URI_X64% ) if exist "%BUILD%en-us" ( @@ -157,10 +166,16 @@ echo --build (-b) Incrementally build Python rather than rebuilding echo --skip-build (-B) Do not build Python (just do the installers) echo --skip-doc (-D) Do not build documentation -echo --download Specify the full download URL for MSIs (should include {2}) +echo --download Specify the full download URL for MSIs echo --test Specify the test directory to run the installer tests echo -h Display this help information echo. echo If no architecture is specified, all architectures will be built. echo If --test is not specified, the installer tests are not run. -echo. \ No newline at end of file +echo. +echo The following substitutions will be applied to the download URL: +echo Variable Description Example +echo {version} version number 3.5.0 +echo {arch} architecture amd64, win32 +echo {releasename} release name a1, b2, rc3 (or blank for final) +echo {msi} MSI filename core.msi diff --git a/Tools/msi/bundle/bundle.targets b/Tools/msi/bundle/bundle.targets --- a/Tools/msi/bundle/bundle.targets +++ b/Tools/msi/bundle/bundle.targets @@ -16,8 +16,9 @@ $(OutputPath)en-us\ $(OutputPath) - $(DownloadUrlBase.TrimEnd(`/`))/$(MajorVersionNumber).$(MinorVersionNumber).$(MicroVersionNumber)/$(ArchName)$(ReleaseLevelName)/ - $(DefineConstants);DownloadUrl=$(DownloadUrl){2} + + $(DownloadUrlBase.TrimEnd(`/`))/{version}/{arch}{releasename}/{msi} + $(DefineConstants);DownloadUrl=$(DownloadUrl.Replace(`{version}`, `$(MajorVersionNumber).$(MinorVersionNumber).$(MicroVersionNumber)`).Replace(`{arch}`, `$(ArchName)`).Replace(`{releasename}`, `$(ReleaseName)`).Replace(`{msi}`, `{2}`)) $(DefineConstants);DownloadUrl={2} @@ -88,7 +89,7 @@ - diff --git a/Tools/msi/bundle/bundle.wxs b/Tools/msi/bundle/bundle.wxs --- a/Tools/msi/bundle/bundle.wxs +++ b/Tools/msi/bundle/bundle.wxs @@ -85,6 +85,8 @@ + + diff --git a/Tools/msi/bundle/packagegroups/launcher.wxs b/Tools/msi/bundle/packagegroups/launcher.wxs --- a/Tools/msi/bundle/packagegroups/launcher.wxs +++ b/Tools/msi/bundle/packagegroups/launcher.wxs @@ -9,6 +9,8 @@ DownloadUrl="$(var.DownloadUrl)" ForcePerMachine="yes" EnableFeatureSelection="yes" + Permanent="yes" + Visible="yes" InstallCondition="(InstallAllUsers or InstallLauncherAllUsers) and Include_launcher" /> diff --git a/Tools/msi/bundle/packagegroups/packageinstall.wxs b/Tools/msi/bundle/packagegroups/packageinstall.wxs new file mode 100644 --- /dev/null +++ b/Tools/msi/bundle/packagegroups/packageinstall.wxs @@ -0,0 +1,26 @@ + + + + + + + + \ No newline at end of file diff --git a/Tools/msi/bundle/packagegroups/pip.wxs b/Tools/msi/bundle/packagegroups/pip.wxs new file mode 100644 --- /dev/null +++ b/Tools/msi/bundle/packagegroups/pip.wxs @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Tools/msi/bundle/packagegroups/postinstall.wxs b/Tools/msi/bundle/packagegroups/postinstall.wxs --- a/Tools/msi/bundle/packagegroups/postinstall.wxs +++ b/Tools/msi/bundle/packagegroups/postinstall.wxs @@ -2,25 +2,6 @@ - - - - - - - - - Release x86 perUser + + + + + + - $(ComputerName) + $(ComputerName)/$(ArchName)/ $(ReleaseUri)/ - - @@ -150,7 +154,7 @@ <_Uuids>@(_Uuid->'("%(Identity)", "$(MajorVersionNumber).$(MinorVersionNumber)/%(Uri)")',',') - <_GenerateCommand>import uuid; print('\n'.join('{}={}'.format(i, uuid.uuid5(uuid.UUID('c8d9733e-a70c-43ff-ab0c-e26456f11083'), '$(ReleaseUri)' + j)) for i,j in [$(_Uuids.Replace(`"`,`'`))])) + <_GenerateCommand>import uuid; print('\n'.join('{}={}'.format(i, uuid.uuid5(uuid.UUID('c8d9733e-a70c-43ff-ab0c-e26456f11083'), '$(ReleaseUri.Replace(`{arch}`, `$(ArchName)`))' + j)) for i,j in [$(_Uuids.Replace(`"`,`'`))])) https://hg.python.org/cpython/rev/c98cc9f7e2c5 changeset: 98474:c98cc9f7e2c5 parent: 98471:3bcf60b12094 parent: 98473:a2d30dfa46a7 user: Steve Dower date: Thu Oct 01 15:20:11 2015 -0700 summary: Merge from 3.5 files: Misc/NEWS | 5 +- Tools/msi/buildrelease.bat | 41 ++++++--- Tools/msi/bundle/bundle.targets | 7 +- Tools/msi/bundle/bundle.wxs | 2 + Tools/msi/bundle/packagegroups/launcher.wxs | 4 + Tools/msi/bundle/packagegroups/packageinstall.wxs | 26 ++++++ Tools/msi/bundle/packagegroups/pip.wxs | 25 ++++++ Tools/msi/bundle/packagegroups/postinstall.wxs | 19 ---- Tools/msi/common.wxs | 6 +- Tools/msi/launcher/launcher.wixproj | 1 + Tools/msi/launcher/launcher.wxs | 7 + Tools/msi/msi.props | 14 ++- 12 files changed, 114 insertions(+), 43 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -358,7 +358,10 @@ Windows ------- -- Issues #25112: py.exe launcher is missing icons +- Issue #25165: Windows uninstallation should not remove launcher if other + versions remain + +- Issue #25112: py.exe launcher is missing icons - Issue #25102: Windows installer does not precompile for -O or -OO. diff --git a/Tools/msi/buildrelease.bat b/Tools/msi/buildrelease.bat --- a/Tools/msi/buildrelease.bat +++ b/Tools/msi/buildrelease.bat @@ -4,17 +4,28 @@ rem This script is intended for building official releases of Python. rem To use it to build alternative releases, you should clone this file rem and modify the following three URIs. + +rem These two will ensure that your release can be installed +rem alongside an official Python release, by modifying the GUIDs used +rem for all components. rem -rem The first two will ensure that your release can be installed -rem alongside an official Python release, while the second specifies -rem the URL that will be used to download installation files. The -rem files available from this URL *will* conflict with your installer. -rem Trust me, you don't want them, even if it seems like a good idea. +rem The following substitutions will be applied to the release URI: +rem Variable Description Example +rem {arch} architecture amd64, win32 +set RELEASE_URI=http://www.python.org/{arch} -set RELEASE_URI_X86=http://www.python.org/win32 -set RELEASE_URI_X64=http://www.python.org/amd64 -set DOWNLOAD_URL_BASE=https://www.python.org/ftp/python -set DOWNLOAD_URL= +rem This is the URL that will be used to download installation files. +rem The files available from the default URL *will* conflict with your +rem installer. Trust me, you don't want them, even if it seems like a +rem good idea. +rem +rem The following substitutions will be applied to the download URL: +rem Variable Description Example +rem {version} version number 3.5.0 +rem {arch} architecture amd64, win32 +rem {releasename} release name a1, b2, rc3 (or blank for final) +rem {msi} MSI filename core.msi +set DOWNLOAD_URL=https://www.python.org/ftp/python/{version}/{arch}{releasename}/{msi} set D=%~dp0 set PCBUILD=%D%..\..\PCBuild\ @@ -90,14 +101,12 @@ set BUILD_PLAT=Win32 set OUTDIR_PLAT=win32 set OBJDIR_PLAT=x86 - set RELEASE_URI=%RELEASE_URI_X86% ) ELSE ( call "%PCBUILD%env.bat" x86_amd64 set BUILD=%PCBUILD%amd64\ set BUILD_PLAT=x64 set OUTDIR_PLAT=amd64 set OBJDIR_PLAT=x64 - set RELEASE_URI=%RELEASE_URI_X64% ) if exist "%BUILD%en-us" ( @@ -157,10 +166,16 @@ echo --build (-b) Incrementally build Python rather than rebuilding echo --skip-build (-B) Do not build Python (just do the installers) echo --skip-doc (-D) Do not build documentation -echo --download Specify the full download URL for MSIs (should include {2}) +echo --download Specify the full download URL for MSIs echo --test Specify the test directory to run the installer tests echo -h Display this help information echo. echo If no architecture is specified, all architectures will be built. echo If --test is not specified, the installer tests are not run. -echo. \ No newline at end of file +echo. +echo The following substitutions will be applied to the download URL: +echo Variable Description Example +echo {version} version number 3.5.0 +echo {arch} architecture amd64, win32 +echo {releasename} release name a1, b2, rc3 (or blank for final) +echo {msi} MSI filename core.msi diff --git a/Tools/msi/bundle/bundle.targets b/Tools/msi/bundle/bundle.targets --- a/Tools/msi/bundle/bundle.targets +++ b/Tools/msi/bundle/bundle.targets @@ -16,8 +16,9 @@ $(OutputPath)en-us\ $(OutputPath) - $(DownloadUrlBase.TrimEnd(`/`))/$(MajorVersionNumber).$(MinorVersionNumber).$(MicroVersionNumber)/$(ArchName)$(ReleaseLevelName)/ - $(DefineConstants);DownloadUrl=$(DownloadUrl){2} + + $(DownloadUrlBase.TrimEnd(`/`))/{version}/{arch}{releasename}/{msi} + $(DefineConstants);DownloadUrl=$(DownloadUrl.Replace(`{version}`, `$(MajorVersionNumber).$(MinorVersionNumber).$(MicroVersionNumber)`).Replace(`{arch}`, `$(ArchName)`).Replace(`{releasename}`, `$(ReleaseName)`).Replace(`{msi}`, `{2}`)) $(DefineConstants);DownloadUrl={2} @@ -88,7 +89,7 @@ - diff --git a/Tools/msi/bundle/bundle.wxs b/Tools/msi/bundle/bundle.wxs --- a/Tools/msi/bundle/bundle.wxs +++ b/Tools/msi/bundle/bundle.wxs @@ -85,6 +85,8 @@ + + diff --git a/Tools/msi/bundle/packagegroups/launcher.wxs b/Tools/msi/bundle/packagegroups/launcher.wxs --- a/Tools/msi/bundle/packagegroups/launcher.wxs +++ b/Tools/msi/bundle/packagegroups/launcher.wxs @@ -9,6 +9,8 @@ DownloadUrl="$(var.DownloadUrl)" ForcePerMachine="yes" EnableFeatureSelection="yes" + Permanent="yes" + Visible="yes" InstallCondition="(InstallAllUsers or InstallLauncherAllUsers) and Include_launcher" /> diff --git a/Tools/msi/bundle/packagegroups/packageinstall.wxs b/Tools/msi/bundle/packagegroups/packageinstall.wxs new file mode 100644 --- /dev/null +++ b/Tools/msi/bundle/packagegroups/packageinstall.wxs @@ -0,0 +1,26 @@ + + + + + + + + \ No newline at end of file diff --git a/Tools/msi/bundle/packagegroups/pip.wxs b/Tools/msi/bundle/packagegroups/pip.wxs new file mode 100644 --- /dev/null +++ b/Tools/msi/bundle/packagegroups/pip.wxs @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Tools/msi/bundle/packagegroups/postinstall.wxs b/Tools/msi/bundle/packagegroups/postinstall.wxs --- a/Tools/msi/bundle/packagegroups/postinstall.wxs +++ b/Tools/msi/bundle/packagegroups/postinstall.wxs @@ -2,25 +2,6 @@ - - - - - - - - - + + Installed OR NOT MISSING_CORE + Installed OR NOT DOWNGRADE - Installed OR NOT MISSING_CORE Installed OR TARGETDIR OR Suppress_TARGETDIR_Check - UPGRADE + UPGRADE diff --git a/Tools/msi/launcher/launcher.wixproj b/Tools/msi/launcher/launcher.wixproj --- a/Tools/msi/launcher/launcher.wixproj +++ b/Tools/msi/launcher/launcher.wixproj @@ -5,6 +5,7 @@ 2.0 launcher Package + SkipMissingCore=1;$(DefineConstants) diff --git a/Tools/msi/launcher/launcher.wxs b/Tools/msi/launcher/launcher.wxs --- a/Tools/msi/launcher/launcher.wxs +++ b/Tools/msi/launcher/launcher.wxs @@ -26,6 +26,13 @@ NOT Installed AND NOT ALLUSERS=1 NOT Installed AND ALLUSERS=1 + + UPGRADE or REMOVE_OLD_LAUNCHER + + + + + diff --git a/Tools/msi/msi.props b/Tools/msi/msi.props --- a/Tools/msi/msi.props +++ b/Tools/msi/msi.props @@ -11,6 +11,12 @@ Release x86 perUser + + + + + + - $(ComputerName) + $(ComputerName)/$(ArchName)/ $(ReleaseUri)/ - - @@ -150,7 +154,7 @@ <_Uuids>@(_Uuid->'("%(Identity)", "$(MajorVersionNumber).$(MinorVersionNumber)/%(Uri)")',',') - <_GenerateCommand>import uuid; print('\n'.join('{}={}'.format(i, uuid.uuid5(uuid.UUID('c8d9733e-a70c-43ff-ab0c-e26456f11083'), '$(ReleaseUri)' + j)) for i,j in [$(_Uuids.Replace(`"`,`'`))])) + <_GenerateCommand>import uuid; print('\n'.join('{}={}'.format(i, uuid.uuid5(uuid.UUID('c8d9733e-a70c-43ff-ab0c-e26456f11083'), '$(ReleaseUri.Replace(`{arch}`, `$(ArchName)`))' + j)) for i,j in [$(_Uuids.Replace(`"`,`'`))])) https://hg.python.org/peps/rev/742700a03e91 changeset: 6105:742700a03e91 user: Ned Deily date: Thu Oct 01 22:21:48 2015 -0400 summary: Update PEP 494 with a detailed Python 3.6 release schedule proposal files: pep-0494.txt | 34 +++++++++++++++++++++++++++------- 1 files changed, 27 insertions(+), 7 deletions(-) diff --git a/pep-0494.txt b/pep-0494.txt --- a/pep-0494.txt +++ b/pep-0494.txt @@ -19,7 +19,7 @@ .. Small features may be added up to the first beta release. Bugs may be fixed until the final release, - which is planned for September 2015. + which is planned for December 2016. Release Manager and Crew @@ -31,17 +31,37 @@ - Documentation: Georg Brandl +3.6 Lifespan +============ + +3.6 will receive bugfix updates approximately every 3-6 months for +approximately 18 months. After the release of 3.7.0 final, a final +3.6 bugfix update will be released. After that, it is expected that +security updates (source only) will be released until 5 years after +the release of 3.6 final, so until approximately December 2021. + + Release Schedule ================ -The releases: +3.6.0 schedule +-------------- -- 3.6.0 alpha 1: TBD -- 3.6.0 beta 1: TBD -- 3.6.0 candidate 1: TBD -- 3.6.0 final: TBD (late 2016?) +- 3.6 development begins: 2015-05-24 +- 3.6.0 alpha 1: 2016-05-15 +- 3.6.0 alpha 2: 2016-06-12 +- 3.6.0 alpha 3: 2016-07-10 +- 3.6.0 alpha 4: 2016-08-07 +- 3.6.0 beta 1: 2016-09-07 -(Beta 1 is also "feature freeze"--no new features beyond this point.) +(No new features beyond this point.) + +- 3.6.0 beta 2: 2016-10-02 +- 3.6.0 beta 3: 2016-10-30 +- 3.6.0 beta 4: 2016-11-20 +- 3.6.0 candidate 1: 2016-12-04 +- 3.6.0 candidate 2 (if needed): 2016-12-11 +- 3.6.0 final: 2016-12-16 Features for 3.6 -- Repository URL: https://hg.python.org/peps From solipsis at pitrou.net Fri Oct 2 10:45:07 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Fri, 02 Oct 2015 08:45:07 +0000 Subject: [Python-checkins] Daily reference leaks (c98cc9f7e2c5): sum=17880 Message-ID: <20151002084506.31177.3204@psf.io> results for c98cc9f7e2c5 on branch "default" -------------------------------------------- test_asyncio leaked [0, 3, 0] memory blocks, sum=3 test_capi leaked [1598, 1598, 1598] references, sum=4794 test_capi leaked [387, 389, 389] memory blocks, sum=1165 test_functools leaked [0, 2, 2] memory blocks, sum=4 test_threading leaked [3196, 3196, 3196] references, sum=9588 test_threading leaked [774, 776, 776] memory blocks, sum=2326 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflogTFm6JI', '--timeout', '7200'] From python-checkins at python.org Fri Oct 2 11:51:07 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Fri, 02 Oct 2015 09:51:07 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI0NDgz?= =?utf-8?q?=3A_C_implementation_of_functools=2Elru=5Fcache=28=29_now_calcu?= =?utf-8?q?lates_key=27s?= Message-ID: <20151002095107.31179.4139@psf.io> https://hg.python.org/cpython/rev/3f4c319a822f changeset: 98475:3f4c319a822f branch: 3.5 parent: 98473:a2d30dfa46a7 user: Serhiy Storchaka date: Fri Oct 02 12:47:11 2015 +0300 summary: Issue #24483: C implementation of functools.lru_cache() now calculates key's hash only once. files: Include/dictobject.h | 4 ++ Misc/NEWS | 3 ++ Modules/_functoolsmodule.c | 26 ++++++++++++++---- Objects/dictobject.c | 37 ++++++++++++++++++++++++++ 4 files changed, 64 insertions(+), 6 deletions(-) diff --git a/Include/dictobject.h b/Include/dictobject.h --- a/Include/dictobject.h +++ b/Include/dictobject.h @@ -72,6 +72,10 @@ PyObject *item, Py_hash_t hash); #endif PyAPI_FUNC(int) PyDict_DelItem(PyObject *mp, PyObject *key); +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyDict_DelItem_KnownHash(PyObject *mp, PyObject *key, + Py_hash_t hash); +#endif PyAPI_FUNC(void) PyDict_Clear(PyObject *mp); PyAPI_FUNC(int) PyDict_Next( PyObject *mp, Py_ssize_t *pos, PyObject **key, PyObject **value); diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -32,6 +32,9 @@ Library ------- +- Issue #24483: C implementation of functools.lru_cache() now calculates key's + hash only once. + - Issue #22958: Constructor and update method of weakref.WeakValueDictionary now accept the self and the dict keyword arguments. diff --git a/Modules/_functoolsmodule.c b/Modules/_functoolsmodule.c --- a/Modules/_functoolsmodule.c +++ b/Modules/_functoolsmodule.c @@ -601,6 +601,7 @@ typedef struct lru_list_elem { PyObject_HEAD struct lru_list_elem *prev, *next; /* borrowed links */ + Py_hash_t hash; PyObject *key, *result; } lru_list_elem; @@ -762,10 +763,14 @@ infinite_lru_cache_wrapper(lru_cache_object *self, PyObject *args, PyObject *kwds) { PyObject *result; + Py_hash_t hash; PyObject *key = lru_cache_make_key(args, kwds, self->typed); if (!key) return NULL; - result = PyDict_GetItemWithError(self->cache, key); + hash = PyObject_Hash(key); + if (hash == -1) + return NULL; + result = _PyDict_GetItem_KnownHash(self->cache, key, hash); if (result) { Py_INCREF(result); self->hits++; @@ -781,7 +786,7 @@ Py_DECREF(key); return NULL; } - if (PyDict_SetItem(self->cache, key, result) < 0) { + if (_PyDict_SetItem_KnownHash(self->cache, key, result, hash) < 0) { Py_DECREF(result); Py_DECREF(key); return NULL; @@ -813,11 +818,15 @@ { lru_list_elem *link; PyObject *key, *result; + Py_hash_t hash; key = lru_cache_make_key(args, kwds, self->typed); if (!key) return NULL; - link = (lru_list_elem *)PyDict_GetItemWithError(self->cache, key); + hash = PyObject_Hash(key); + if (hash == -1) + return NULL; + link = (lru_list_elem *)_PyDict_GetItem_KnownHash(self->cache, key, hash); if (link) { lru_cache_extricate_link(link); lru_cache_append_link(self, link); @@ -845,7 +854,8 @@ /* Remove it from the cache. The cache dict holds one reference to the link, and the linked list holds yet one reference to it. */ - if (PyDict_DelItem(self->cache, link->key) < 0) { + if (_PyDict_DelItem_KnownHash(self->cache, link->key, + link->hash) < 0) { lru_cache_append_link(self, link); Py_DECREF(key); Py_DECREF(result); @@ -859,9 +869,11 @@ oldkey = link->key; oldresult = link->result; + link->hash = hash; link->key = key; link->result = result; - if (PyDict_SetItem(self->cache, key, (PyObject *)link) < 0) { + if (_PyDict_SetItem_KnownHash(self->cache, key, (PyObject *)link, + hash) < 0) { Py_DECREF(link); Py_DECREF(oldkey); Py_DECREF(oldresult); @@ -881,10 +893,12 @@ return NULL; } + link->hash = hash; link->key = key; link->result = result; _PyObject_GC_TRACK(link); - if (PyDict_SetItem(self->cache, key, (PyObject *)link) < 0) { + if (_PyDict_SetItem_KnownHash(self->cache, key, (PyObject *)link, + hash) < 0) { Py_DECREF(link); return NULL; } diff --git a/Objects/dictobject.c b/Objects/dictobject.c --- a/Objects/dictobject.c +++ b/Objects/dictobject.c @@ -1242,6 +1242,7 @@ } assert(key); assert(value); + assert(hash != -1); mp = (PyDictObject *)op; /* insertdict() handles any resizing that might be necessary */ @@ -1290,6 +1291,42 @@ return 0; } +int +_PyDict_DelItem_KnownHash(PyObject *op, PyObject *key, Py_hash_t hash) +{ + PyDictObject *mp; + PyDictKeyEntry *ep; + PyObject *old_key, *old_value; + PyObject **value_addr; + + if (!PyDict_Check(op)) { + PyErr_BadInternalCall(); + return -1; + } + assert(key); + assert(hash != -1); + mp = (PyDictObject *)op; + ep = (mp->ma_keys->dk_lookup)(mp, key, hash, &value_addr); + if (ep == NULL) + return -1; + if (*value_addr == NULL) { + _PyErr_SetKeyError(key); + return -1; + } + old_value = *value_addr; + *value_addr = NULL; + mp->ma_used--; + if (!_PyDict_HasSplitTable(mp)) { + ENSURE_ALLOWS_DELETIONS(mp); + old_key = ep->me_key; + Py_INCREF(dummy); + ep->me_key = dummy; + Py_DECREF(old_key); + } + Py_DECREF(old_value); + return 0; +} + void PyDict_Clear(PyObject *op) { -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 11:51:08 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Fri, 02 Oct 2015 09:51:08 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2324483=3A_C_implementation_of_functools=2Elru=5F?= =?utf-8?q?cache=28=29_now_calculates_key=27s?= Message-ID: <20151002095107.11704.92168@psf.io> https://hg.python.org/cpython/rev/5758b85627c9 changeset: 98476:5758b85627c9 parent: 98474:c98cc9f7e2c5 parent: 98475:3f4c319a822f user: Serhiy Storchaka date: Fri Oct 02 12:47:59 2015 +0300 summary: Issue #24483: C implementation of functools.lru_cache() now calculates key's hash only once. files: Include/dictobject.h | 4 ++ Misc/NEWS | 3 ++ Modules/_functoolsmodule.c | 26 ++++++++++++++---- Objects/dictobject.c | 37 ++++++++++++++++++++++++++ 4 files changed, 64 insertions(+), 6 deletions(-) diff --git a/Include/dictobject.h b/Include/dictobject.h --- a/Include/dictobject.h +++ b/Include/dictobject.h @@ -72,6 +72,10 @@ PyObject *item, Py_hash_t hash); #endif PyAPI_FUNC(int) PyDict_DelItem(PyObject *mp, PyObject *key); +#ifndef Py_LIMITED_API +PyAPI_FUNC(int) _PyDict_DelItem_KnownHash(PyObject *mp, PyObject *key, + Py_hash_t hash); +#endif PyAPI_FUNC(void) PyDict_Clear(PyObject *mp); PyAPI_FUNC(int) PyDict_Next( PyObject *mp, Py_ssize_t *pos, PyObject **key, PyObject **value); diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -203,6 +203,9 @@ Library ------- +- Issue #24483: C implementation of functools.lru_cache() now calculates key's + hash only once. + - Issue #22958: Constructor and update method of weakref.WeakValueDictionary now accept the self and the dict keyword arguments. diff --git a/Modules/_functoolsmodule.c b/Modules/_functoolsmodule.c --- a/Modules/_functoolsmodule.c +++ b/Modules/_functoolsmodule.c @@ -601,6 +601,7 @@ typedef struct lru_list_elem { PyObject_HEAD struct lru_list_elem *prev, *next; /* borrowed links */ + Py_hash_t hash; PyObject *key, *result; } lru_list_elem; @@ -762,10 +763,14 @@ infinite_lru_cache_wrapper(lru_cache_object *self, PyObject *args, PyObject *kwds) { PyObject *result; + Py_hash_t hash; PyObject *key = lru_cache_make_key(args, kwds, self->typed); if (!key) return NULL; - result = PyDict_GetItemWithError(self->cache, key); + hash = PyObject_Hash(key); + if (hash == -1) + return NULL; + result = _PyDict_GetItem_KnownHash(self->cache, key, hash); if (result) { Py_INCREF(result); self->hits++; @@ -781,7 +786,7 @@ Py_DECREF(key); return NULL; } - if (PyDict_SetItem(self->cache, key, result) < 0) { + if (_PyDict_SetItem_KnownHash(self->cache, key, result, hash) < 0) { Py_DECREF(result); Py_DECREF(key); return NULL; @@ -813,11 +818,15 @@ { lru_list_elem *link; PyObject *key, *result; + Py_hash_t hash; key = lru_cache_make_key(args, kwds, self->typed); if (!key) return NULL; - link = (lru_list_elem *)PyDict_GetItemWithError(self->cache, key); + hash = PyObject_Hash(key); + if (hash == -1) + return NULL; + link = (lru_list_elem *)_PyDict_GetItem_KnownHash(self->cache, key, hash); if (link) { lru_cache_extricate_link(link); lru_cache_append_link(self, link); @@ -845,7 +854,8 @@ /* Remove it from the cache. The cache dict holds one reference to the link, and the linked list holds yet one reference to it. */ - if (PyDict_DelItem(self->cache, link->key) < 0) { + if (_PyDict_DelItem_KnownHash(self->cache, link->key, + link->hash) < 0) { lru_cache_append_link(self, link); Py_DECREF(key); Py_DECREF(result); @@ -859,9 +869,11 @@ oldkey = link->key; oldresult = link->result; + link->hash = hash; link->key = key; link->result = result; - if (PyDict_SetItem(self->cache, key, (PyObject *)link) < 0) { + if (_PyDict_SetItem_KnownHash(self->cache, key, (PyObject *)link, + hash) < 0) { Py_DECREF(link); Py_DECREF(oldkey); Py_DECREF(oldresult); @@ -881,10 +893,12 @@ return NULL; } + link->hash = hash; link->key = key; link->result = result; _PyObject_GC_TRACK(link); - if (PyDict_SetItem(self->cache, key, (PyObject *)link) < 0) { + if (_PyDict_SetItem_KnownHash(self->cache, key, (PyObject *)link, + hash) < 0) { Py_DECREF(link); return NULL; } diff --git a/Objects/dictobject.c b/Objects/dictobject.c --- a/Objects/dictobject.c +++ b/Objects/dictobject.c @@ -1242,6 +1242,7 @@ } assert(key); assert(value); + assert(hash != -1); mp = (PyDictObject *)op; /* insertdict() handles any resizing that might be necessary */ @@ -1290,6 +1291,42 @@ return 0; } +int +_PyDict_DelItem_KnownHash(PyObject *op, PyObject *key, Py_hash_t hash) +{ + PyDictObject *mp; + PyDictKeyEntry *ep; + PyObject *old_key, *old_value; + PyObject **value_addr; + + if (!PyDict_Check(op)) { + PyErr_BadInternalCall(); + return -1; + } + assert(key); + assert(hash != -1); + mp = (PyDictObject *)op; + ep = (mp->ma_keys->dk_lookup)(mp, key, hash, &value_addr); + if (ep == NULL) + return -1; + if (*value_addr == NULL) { + _PyErr_SetKeyError(key); + return -1; + } + old_value = *value_addr; + *value_addr = NULL; + mp->ma_used--; + if (!_PyDict_HasSplitTable(mp)) { + ENSURE_ALLOWS_DELETIONS(mp); + old_key = ep->me_key; + Py_INCREF(dummy); + ep->me_key = dummy; + Py_DECREF(old_key); + } + Py_DECREF(old_value); + return 0; +} + void PyDict_Clear(PyObject *op) { -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 12:15:42 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Fri, 02 Oct 2015 10:15:42 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI0ODQ4?= =?utf-8?q?=3A_Fixed_bugs_in_UTF-7_decoding_of_misformed_data=3A?= Message-ID: <20151002101541.31197.99675@psf.io> https://hg.python.org/cpython/rev/3c13567ea642 changeset: 98477:3c13567ea642 branch: 3.4 parent: 98465:72c57c120c19 user: Serhiy Storchaka date: Fri Oct 02 13:07:28 2015 +0300 summary: Issue #24848: Fixed bugs in UTF-7 decoding of misformed data: 1. Non-ASCII bytes were accepted after shift sequence. 2. A low surrogate could be emitted in case of error in high surrogate. files: Lib/test/test_codecs.py | 60 +++++++++++++++++++++++++++- Lib/test/test_unicode.py | 3 +- Misc/NEWS | 2 + Objects/unicodeobject.c | 21 +++++---- 4 files changed, 75 insertions(+), 11 deletions(-) diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py --- a/Lib/test/test_codecs.py +++ b/Lib/test/test_codecs.py @@ -898,6 +898,32 @@ class UTF7Test(ReadTest, unittest.TestCase): encoding = "utf-7" + def test_ascii(self): + # Set D (directly encoded characters) + set_d = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' + 'abcdefghijklmnopqrstuvwxyz' + '0123456789' + '\'(),-./:?') + self.assertEqual(set_d.encode(self.encoding), set_d.encode('ascii')) + self.assertEqual(set_d.encode('ascii').decode(self.encoding), set_d) + # Set O (optional direct characters) + set_o = ' !"#$%&*;<=>@[]^_`{|}' + self.assertEqual(set_o.encode(self.encoding), set_o.encode('ascii')) + self.assertEqual(set_o.encode('ascii').decode(self.encoding), set_o) + # + + self.assertEqual('a+b'.encode(self.encoding), b'a+-b') + self.assertEqual(b'a+-b'.decode(self.encoding), 'a+b') + # White spaces + ws = ' \t\n\r' + self.assertEqual(ws.encode(self.encoding), ws.encode('ascii')) + self.assertEqual(ws.encode('ascii').decode(self.encoding), ws) + # Other ASCII characters + other_ascii = ''.join(sorted(set(bytes(range(0x80)).decode()) - + set(set_d + set_o + '+' + ws))) + self.assertEqual(other_ascii.encode(self.encoding), + b'+AAAAAQACAAMABAAFAAYABwAIAAsADAAOAA8AEAARABIAEwAU' + b'ABUAFgAXABgAGQAaABsAHAAdAB4AHwBcAH4Afw-') + def test_partial(self): self.check_partial( 'a+-b\x00c\x80d\u0100e\U00010000f', @@ -939,7 +965,9 @@ def test_errors(self): tests = [ + (b'\xffb', '\ufffdb'), (b'a\xffb', 'a\ufffdb'), + (b'a\xff\xffb', 'a\ufffd\ufffdb'), (b'a+IK', 'a\ufffd'), (b'a+IK-b', 'a\ufffdb'), (b'a+IK,b', 'a\ufffdb'), @@ -955,6 +983,8 @@ (b'a+//,+IKw-b', 'a\ufffd\u20acb'), (b'a+///,+IKw-b', 'a\uffff\ufffd\u20acb'), (b'a+////,+IKw-b', 'a\uffff\ufffd\u20acb'), + (b'a+IKw-b\xff', 'a\u20acb\ufffd'), + (b'a+IKw\xffb', 'a\u20ac\ufffdb'), ] for raw, expected in tests: with self.subTest(raw=raw): @@ -966,8 +996,36 @@ self.assertEqual('\U000104A0'.encode(self.encoding), b'+2AHcoA-') self.assertEqual('\ud801\udca0'.encode(self.encoding), b'+2AHcoA-') self.assertEqual(b'+2AHcoA-'.decode(self.encoding), '\U000104A0') + self.assertEqual(b'+2AHcoA'.decode(self.encoding), '\U000104A0') + self.assertEqual('\u20ac\U000104A0'.encode(self.encoding), b'+IKzYAdyg-') + self.assertEqual(b'+IKzYAdyg-'.decode(self.encoding), '\u20ac\U000104A0') + self.assertEqual(b'+IKzYAdyg'.decode(self.encoding), '\u20ac\U000104A0') + self.assertEqual('\u20ac\u20ac\U000104A0'.encode(self.encoding), + b'+IKwgrNgB3KA-') + self.assertEqual(b'+IKwgrNgB3KA-'.decode(self.encoding), + '\u20ac\u20ac\U000104A0') + self.assertEqual(b'+IKwgrNgB3KA'.decode(self.encoding), + '\u20ac\u20ac\U000104A0') - test_lone_surrogates = None + def test_lone_surrogates(self): + tests = [ + (b'a+2AE-b', 'a\ud801b'), + (b'a+2AE\xffb', 'a\ufffdb'), + (b'a+2AE', 'a\ufffd'), + (b'a+2AEA-b', 'a\ufffdb'), + (b'a+2AH-b', 'a\ufffdb'), + (b'a+IKzYAQ-b', 'a\u20ac\ud801b'), + (b'a+IKzYAQ\xffb', 'a\u20ac\ufffdb'), + (b'a+IKzYAQA-b', 'a\u20ac\ufffdb'), + (b'a+IKzYAd-b', 'a\u20ac\ufffdb'), + (b'a+IKwgrNgB-b', 'a\u20ac\u20ac\ud801b'), + (b'a+IKwgrNgB\xffb', 'a\u20ac\u20ac\ufffdb'), + (b'a+IKwgrNgB', 'a\u20ac\u20ac\ufffd'), + (b'a+IKwgrNgBA-b', 'a\u20ac\u20ac\ufffdb'), + ] + for raw, expected in tests: + with self.subTest(raw=raw): + self.assertEqual(raw.decode('utf-7', 'replace'), expected) class UTF16ExTest(unittest.TestCase): diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py --- a/Lib/test/test_unicode.py +++ b/Lib/test/test_unicode.py @@ -1524,7 +1524,7 @@ self.assertEqual(b'+2AHab9ze-'.decode('utf-7'), '\uD801\U000abcde') # Issue #2242: crash on some Windows/MSVC versions - self.assertEqual(b'+\xc1'.decode('utf-7'), '\xc1') + self.assertEqual(b'+\xc1'.decode('utf-7', 'ignore'), '') # Direct encoded characters set_d = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'(),-./:?" @@ -1966,6 +1966,7 @@ self.assertRaises(UnicodeError, str, b'Andr\202 x', 'ascii', 'strict') self.assertEqual(str(b'Andr\202 x', 'ascii', 'ignore'), "Andr x") self.assertEqual(str(b'Andr\202 x', 'ascii', 'replace'), 'Andr\uFFFD x') + self.assertEqual(str(b'\202 x', 'ascii', 'replace'), '\uFFFD x') # Error handling (unknown character names) self.assertEqual(b"\\N{foo}xx".decode("unicode-escape", "ignore"), "xx") diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,8 @@ Core and Builtins ----------------- +- Issue #24848: Fixed a number of bugs in UTF-7 decoding of misformed data. + - Issue #25280: Import trace messages emitted in verbose (-v) mode are no longer formatted twice. diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -4381,31 +4381,31 @@ } else { /* now leaving a base-64 section */ inShift = 0; - s++; - if (surrogate) { - if (_PyUnicodeWriter_WriteCharInline(&writer, surrogate) < 0) - goto onError; - surrogate = 0; - } if (base64bits > 0) { /* left-over bits */ if (base64bits >= 6) { /* We've seen at least one base-64 character */ + s++; errmsg = "partial character in shift sequence"; goto utf7Error; } else { /* Some bits remain; they should be zero */ if (base64buffer != 0) { + s++; errmsg = "non-zero padding bits in shift sequence"; goto utf7Error; } } } - if (ch != '-') { + if (surrogate && DECODE_DIRECT(ch)) { + if (_PyUnicodeWriter_WriteCharInline(&writer, surrogate) < 0) + goto onError; + } + surrogate = 0; + if (ch == '-') { /* '-' is absorbed; other terminating characters are preserved */ - if (_PyUnicodeWriter_WriteCharInline(&writer, ch) < 0) - goto onError; + s++; } } } @@ -4419,6 +4419,7 @@ } else { /* begin base64-encoded section */ inShift = 1; + surrogate = 0; shiftOutStart = writer.pos; base64bits = 0; base64buffer = 0; @@ -4450,6 +4451,7 @@ if (inShift && !consumed) { /* in shift sequence, no more to follow */ /* if we're in an inconsistent state, that's an error */ + inShift = 0; if (surrogate || (base64bits >= 6) || (base64bits > 0 && base64buffer != 0)) { @@ -13337,6 +13339,7 @@ if (maxchar > writer->maxchar || writer->readonly) { /* resize + widen */ + maxchar = Py_MAX(maxchar, writer->maxchar); newbuffer = PyUnicode_New(newlen, maxchar); if (newbuffer == NULL) return -1; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 12:15:42 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Fri, 02 Oct 2015 10:15:42 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2324848=3A_Fixed_bugs_in_UTF-7_decoding_of_misformed_da?= =?utf-8?b?dGE6?= Message-ID: <20151002101541.28569.90897@psf.io> https://hg.python.org/cpython/rev/a61fa2b08f87 changeset: 98478:a61fa2b08f87 branch: 3.5 parent: 98475:3f4c319a822f parent: 98477:3c13567ea642 user: Serhiy Storchaka date: Fri Oct 02 13:13:14 2015 +0300 summary: Issue #24848: Fixed bugs in UTF-7 decoding of misformed data: 1. Non-ASCII bytes were accepted after shift sequence. 2. A low surrogate could be emitted in case of error in high surrogate. 3. In some circumstances the '\xfd' character was produced instead of the replacement character '\ufffd' (due to a bug in _PyUnicodeWriter). files: Lib/test/test_codecs.py | 62 +++++++++++++++++++++++++++- Lib/test/test_unicode.py | 3 +- Misc/NEWS | 2 + Objects/unicodeobject.c | 21 +++++---- 4 files changed, 76 insertions(+), 12 deletions(-) diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py --- a/Lib/test/test_codecs.py +++ b/Lib/test/test_codecs.py @@ -903,6 +903,32 @@ class UTF7Test(ReadTest, unittest.TestCase): encoding = "utf-7" + def test_ascii(self): + # Set D (directly encoded characters) + set_d = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' + 'abcdefghijklmnopqrstuvwxyz' + '0123456789' + '\'(),-./:?') + self.assertEqual(set_d.encode(self.encoding), set_d.encode('ascii')) + self.assertEqual(set_d.encode('ascii').decode(self.encoding), set_d) + # Set O (optional direct characters) + set_o = ' !"#$%&*;<=>@[]^_`{|}' + self.assertEqual(set_o.encode(self.encoding), set_o.encode('ascii')) + self.assertEqual(set_o.encode('ascii').decode(self.encoding), set_o) + # + + self.assertEqual('a+b'.encode(self.encoding), b'a+-b') + self.assertEqual(b'a+-b'.decode(self.encoding), 'a+b') + # White spaces + ws = ' \t\n\r' + self.assertEqual(ws.encode(self.encoding), ws.encode('ascii')) + self.assertEqual(ws.encode('ascii').decode(self.encoding), ws) + # Other ASCII characters + other_ascii = ''.join(sorted(set(bytes(range(0x80)).decode()) - + set(set_d + set_o + '+' + ws))) + self.assertEqual(other_ascii.encode(self.encoding), + b'+AAAAAQACAAMABAAFAAYABwAIAAsADAAOAA8AEAARABIAEwAU' + b'ABUAFgAXABgAGQAaABsAHAAdAB4AHwBcAH4Afw-') + def test_partial(self): self.check_partial( 'a+-b\x00c\x80d\u0100e\U00010000f', @@ -944,7 +970,9 @@ def test_errors(self): tests = [ + (b'\xffb', '\ufffdb'), (b'a\xffb', 'a\ufffdb'), + (b'a\xff\xffb', 'a\ufffd\ufffdb'), (b'a+IK', 'a\ufffd'), (b'a+IK-b', 'a\ufffdb'), (b'a+IK,b', 'a\ufffdb'), @@ -960,6 +988,8 @@ (b'a+//,+IKw-b', 'a\ufffd\u20acb'), (b'a+///,+IKw-b', 'a\uffff\ufffd\u20acb'), (b'a+////,+IKw-b', 'a\uffff\ufffd\u20acb'), + (b'a+IKw-b\xff', 'a\u20acb\ufffd'), + (b'a+IKw\xffb', 'a\u20ac\ufffdb'), ] for raw, expected in tests: with self.subTest(raw=raw): @@ -971,8 +1001,36 @@ self.assertEqual('\U000104A0'.encode(self.encoding), b'+2AHcoA-') self.assertEqual('\ud801\udca0'.encode(self.encoding), b'+2AHcoA-') self.assertEqual(b'+2AHcoA-'.decode(self.encoding), '\U000104A0') - - test_lone_surrogates = None + self.assertEqual(b'+2AHcoA'.decode(self.encoding), '\U000104A0') + self.assertEqual('\u20ac\U000104A0'.encode(self.encoding), b'+IKzYAdyg-') + self.assertEqual(b'+IKzYAdyg-'.decode(self.encoding), '\u20ac\U000104A0') + self.assertEqual(b'+IKzYAdyg'.decode(self.encoding), '\u20ac\U000104A0') + self.assertEqual('\u20ac\u20ac\U000104A0'.encode(self.encoding), + b'+IKwgrNgB3KA-') + self.assertEqual(b'+IKwgrNgB3KA-'.decode(self.encoding), + '\u20ac\u20ac\U000104A0') + self.assertEqual(b'+IKwgrNgB3KA'.decode(self.encoding), + '\u20ac\u20ac\U000104A0') + + def test_lone_surrogates(self): + tests = [ + (b'a+2AE-b', 'a\ud801b'), + (b'a+2AE\xffb', 'a\ufffdb'), + (b'a+2AE', 'a\ufffd'), + (b'a+2AEA-b', 'a\ufffdb'), + (b'a+2AH-b', 'a\ufffdb'), + (b'a+IKzYAQ-b', 'a\u20ac\ud801b'), + (b'a+IKzYAQ\xffb', 'a\u20ac\ufffdb'), + (b'a+IKzYAQA-b', 'a\u20ac\ufffdb'), + (b'a+IKzYAd-b', 'a\u20ac\ufffdb'), + (b'a+IKwgrNgB-b', 'a\u20ac\u20ac\ud801b'), + (b'a+IKwgrNgB\xffb', 'a\u20ac\u20ac\ufffdb'), + (b'a+IKwgrNgB', 'a\u20ac\u20ac\ufffd'), + (b'a+IKwgrNgBA-b', 'a\u20ac\u20ac\ufffdb'), + ] + for raw, expected in tests: + with self.subTest(raw=raw): + self.assertEqual(raw.decode('utf-7', 'replace'), expected) class UTF16ExTest(unittest.TestCase): diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py --- a/Lib/test/test_unicode.py +++ b/Lib/test/test_unicode.py @@ -1553,7 +1553,7 @@ self.assertEqual(b'+2AHab9ze-'.decode('utf-7'), '\uD801\U000abcde') # Issue #2242: crash on some Windows/MSVC versions - self.assertEqual(b'+\xc1'.decode('utf-7'), '\xc1') + self.assertEqual(b'+\xc1'.decode('utf-7', 'ignore'), '') # Direct encoded characters set_d = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'(),-./:?" @@ -1995,6 +1995,7 @@ self.assertRaises(UnicodeError, str, b'Andr\202 x', 'ascii', 'strict') self.assertEqual(str(b'Andr\202 x', 'ascii', 'ignore'), "Andr x") self.assertEqual(str(b'Andr\202 x', 'ascii', 'replace'), 'Andr\uFFFD x') + self.assertEqual(str(b'\202 x', 'ascii', 'replace'), '\uFFFD x') # Error handling (unknown character names) self.assertEqual(b"\\N{foo}xx".decode("unicode-escape", "ignore"), "xx") diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -11,6 +11,8 @@ Core and Builtins ----------------- +- Issue #24848: Fixed a number of bugs in UTF-7 decoding of misformed data. + - Issue #25280: Import trace messages emitted in verbose (-v) mode are no longer formatted twice. diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -4330,31 +4330,31 @@ } else { /* now leaving a base-64 section */ inShift = 0; - s++; - if (surrogate) { - if (_PyUnicodeWriter_WriteCharInline(&writer, surrogate) < 0) - goto onError; - surrogate = 0; - } if (base64bits > 0) { /* left-over bits */ if (base64bits >= 6) { /* We've seen at least one base-64 character */ + s++; errmsg = "partial character in shift sequence"; goto utf7Error; } else { /* Some bits remain; they should be zero */ if (base64buffer != 0) { + s++; errmsg = "non-zero padding bits in shift sequence"; goto utf7Error; } } } - if (ch != '-') { + if (surrogate && DECODE_DIRECT(ch)) { + if (_PyUnicodeWriter_WriteCharInline(&writer, surrogate) < 0) + goto onError; + } + surrogate = 0; + if (ch == '-') { /* '-' is absorbed; other terminating characters are preserved */ - if (_PyUnicodeWriter_WriteCharInline(&writer, ch) < 0) - goto onError; + s++; } } } @@ -4368,6 +4368,7 @@ } else { /* begin base64-encoded section */ inShift = 1; + surrogate = 0; shiftOutStart = writer.pos; base64bits = 0; base64buffer = 0; @@ -4399,6 +4400,7 @@ if (inShift && !consumed) { /* in shift sequence, no more to follow */ /* if we're in an inconsistent state, that's an error */ + inShift = 0; if (surrogate || (base64bits >= 6) || (base64bits > 0 && base64buffer != 0)) { @@ -13291,6 +13293,7 @@ if (maxchar > writer->maxchar || writer->readonly) { /* resize + widen */ + maxchar = Py_MAX(maxchar, writer->maxchar); newbuffer = PyUnicode_New(newlen, maxchar); if (newbuffer == NULL) return -1; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 12:15:41 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Fri, 02 Oct 2015 10:15:41 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2324848=3A_Fixed_bugs_in_UTF-7_decoding_of_misfor?= =?utf-8?q?med_data=3A?= Message-ID: <20151002101541.18061.93576@psf.io> https://hg.python.org/cpython/rev/037253b7cd6d changeset: 98479:037253b7cd6d parent: 98476:5758b85627c9 parent: 98478:a61fa2b08f87 user: Serhiy Storchaka date: Fri Oct 02 13:14:03 2015 +0300 summary: Issue #24848: Fixed bugs in UTF-7 decoding of misformed data: 1. Non-ASCII bytes were accepted after shift sequence. 2. A low surrogate could be emitted in case of error in high surrogate. 3. In some circumstances the '\xfd' character was produced instead of the replacement character '\ufffd' (due to a bug in _PyUnicodeWriter). files: Lib/test/test_codecs.py | 62 +++++++++++++++++++++++++++- Lib/test/test_unicode.py | 3 +- Misc/NEWS | 2 + Objects/unicodeobject.c | 21 +++++---- 4 files changed, 76 insertions(+), 12 deletions(-) diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py --- a/Lib/test/test_codecs.py +++ b/Lib/test/test_codecs.py @@ -926,6 +926,32 @@ class UTF7Test(ReadTest, unittest.TestCase): encoding = "utf-7" + def test_ascii(self): + # Set D (directly encoded characters) + set_d = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' + 'abcdefghijklmnopqrstuvwxyz' + '0123456789' + '\'(),-./:?') + self.assertEqual(set_d.encode(self.encoding), set_d.encode('ascii')) + self.assertEqual(set_d.encode('ascii').decode(self.encoding), set_d) + # Set O (optional direct characters) + set_o = ' !"#$%&*;<=>@[]^_`{|}' + self.assertEqual(set_o.encode(self.encoding), set_o.encode('ascii')) + self.assertEqual(set_o.encode('ascii').decode(self.encoding), set_o) + # + + self.assertEqual('a+b'.encode(self.encoding), b'a+-b') + self.assertEqual(b'a+-b'.decode(self.encoding), 'a+b') + # White spaces + ws = ' \t\n\r' + self.assertEqual(ws.encode(self.encoding), ws.encode('ascii')) + self.assertEqual(ws.encode('ascii').decode(self.encoding), ws) + # Other ASCII characters + other_ascii = ''.join(sorted(set(bytes(range(0x80)).decode()) - + set(set_d + set_o + '+' + ws))) + self.assertEqual(other_ascii.encode(self.encoding), + b'+AAAAAQACAAMABAAFAAYABwAIAAsADAAOAA8AEAARABIAEwAU' + b'ABUAFgAXABgAGQAaABsAHAAdAB4AHwBcAH4Afw-') + def test_partial(self): self.check_partial( 'a+-b\x00c\x80d\u0100e\U00010000f', @@ -967,7 +993,9 @@ def test_errors(self): tests = [ + (b'\xffb', '\ufffdb'), (b'a\xffb', 'a\ufffdb'), + (b'a\xff\xffb', 'a\ufffd\ufffdb'), (b'a+IK', 'a\ufffd'), (b'a+IK-b', 'a\ufffdb'), (b'a+IK,b', 'a\ufffdb'), @@ -983,6 +1011,8 @@ (b'a+//,+IKw-b', 'a\ufffd\u20acb'), (b'a+///,+IKw-b', 'a\uffff\ufffd\u20acb'), (b'a+////,+IKw-b', 'a\uffff\ufffd\u20acb'), + (b'a+IKw-b\xff', 'a\u20acb\ufffd'), + (b'a+IKw\xffb', 'a\u20ac\ufffdb'), ] for raw, expected in tests: with self.subTest(raw=raw): @@ -994,8 +1024,36 @@ self.assertEqual('\U000104A0'.encode(self.encoding), b'+2AHcoA-') self.assertEqual('\ud801\udca0'.encode(self.encoding), b'+2AHcoA-') self.assertEqual(b'+2AHcoA-'.decode(self.encoding), '\U000104A0') - - test_lone_surrogates = None + self.assertEqual(b'+2AHcoA'.decode(self.encoding), '\U000104A0') + self.assertEqual('\u20ac\U000104A0'.encode(self.encoding), b'+IKzYAdyg-') + self.assertEqual(b'+IKzYAdyg-'.decode(self.encoding), '\u20ac\U000104A0') + self.assertEqual(b'+IKzYAdyg'.decode(self.encoding), '\u20ac\U000104A0') + self.assertEqual('\u20ac\u20ac\U000104A0'.encode(self.encoding), + b'+IKwgrNgB3KA-') + self.assertEqual(b'+IKwgrNgB3KA-'.decode(self.encoding), + '\u20ac\u20ac\U000104A0') + self.assertEqual(b'+IKwgrNgB3KA'.decode(self.encoding), + '\u20ac\u20ac\U000104A0') + + def test_lone_surrogates(self): + tests = [ + (b'a+2AE-b', 'a\ud801b'), + (b'a+2AE\xffb', 'a\ufffdb'), + (b'a+2AE', 'a\ufffd'), + (b'a+2AEA-b', 'a\ufffdb'), + (b'a+2AH-b', 'a\ufffdb'), + (b'a+IKzYAQ-b', 'a\u20ac\ud801b'), + (b'a+IKzYAQ\xffb', 'a\u20ac\ufffdb'), + (b'a+IKzYAQA-b', 'a\u20ac\ufffdb'), + (b'a+IKzYAd-b', 'a\u20ac\ufffdb'), + (b'a+IKwgrNgB-b', 'a\u20ac\u20ac\ud801b'), + (b'a+IKwgrNgB\xffb', 'a\u20ac\u20ac\ufffdb'), + (b'a+IKwgrNgB', 'a\u20ac\u20ac\ufffd'), + (b'a+IKwgrNgBA-b', 'a\u20ac\u20ac\ufffdb'), + ] + for raw, expected in tests: + with self.subTest(raw=raw): + self.assertEqual(raw.decode('utf-7', 'replace'), expected) class UTF16ExTest(unittest.TestCase): diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py --- a/Lib/test/test_unicode.py +++ b/Lib/test/test_unicode.py @@ -1553,7 +1553,7 @@ self.assertEqual(b'+2AHab9ze-'.decode('utf-7'), '\uD801\U000abcde') # Issue #2242: crash on some Windows/MSVC versions - self.assertEqual(b'+\xc1'.decode('utf-7'), '\xc1') + self.assertEqual(b'+\xc1'.decode('utf-7', 'ignore'), '') # Direct encoded characters set_d = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'(),-./:?" @@ -1995,6 +1995,7 @@ self.assertRaises(UnicodeError, str, b'Andr\202 x', 'ascii', 'strict') self.assertEqual(str(b'Andr\202 x', 'ascii', 'ignore'), "Andr x") self.assertEqual(str(b'Andr\202 x', 'ascii', 'replace'), 'Andr\uFFFD x') + self.assertEqual(str(b'\202 x', 'ascii', 'replace'), '\uFFFD x') # Error handling (unknown character names) self.assertEqual(b"\\N{foo}xx".decode("unicode-escape", "ignore"), "xx") diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,8 @@ Core and Builtins ----------------- +- Issue #24848: Fixed a number of bugs in UTF-7 decoding of misformed data. + - Issue #25267: The UTF-8 encoder is now up to 75 times as fast for error handlers: ``ignore``, ``replace``, ``surrogateescape``, ``surrogatepass``. Patch co-written with Serhiy Storchaka. diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -4360,31 +4360,31 @@ } else { /* now leaving a base-64 section */ inShift = 0; - s++; - if (surrogate) { - if (_PyUnicodeWriter_WriteCharInline(&writer, surrogate) < 0) - goto onError; - surrogate = 0; - } if (base64bits > 0) { /* left-over bits */ if (base64bits >= 6) { /* We've seen at least one base-64 character */ + s++; errmsg = "partial character in shift sequence"; goto utf7Error; } else { /* Some bits remain; they should be zero */ if (base64buffer != 0) { + s++; errmsg = "non-zero padding bits in shift sequence"; goto utf7Error; } } } - if (ch != '-') { + if (surrogate && DECODE_DIRECT(ch)) { + if (_PyUnicodeWriter_WriteCharInline(&writer, surrogate) < 0) + goto onError; + } + surrogate = 0; + if (ch == '-') { /* '-' is absorbed; other terminating characters are preserved */ - if (_PyUnicodeWriter_WriteCharInline(&writer, ch) < 0) - goto onError; + s++; } } } @@ -4398,6 +4398,7 @@ } else { /* begin base64-encoded section */ inShift = 1; + surrogate = 0; shiftOutStart = writer.pos; base64bits = 0; base64buffer = 0; @@ -4429,6 +4430,7 @@ if (inShift && !consumed) { /* in shift sequence, no more to follow */ /* if we're in an inconsistent state, that's an error */ + inShift = 0; if (surrogate || (base64bits >= 6) || (base64bits > 0 && base64buffer != 0)) { @@ -13366,6 +13368,7 @@ if (maxchar > writer->maxchar || writer->readonly) { /* resize + widen */ + maxchar = Py_MAX(maxchar, writer->maxchar); newbuffer = PyUnicode_New(newlen, maxchar); if (newbuffer == NULL) return -1; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 12:15:42 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Fri, 02 Oct 2015 10:15:42 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI0ODQ4?= =?utf-8?q?=3A_Fixed_bugs_in_UTF-7_decoding_of_misformed_data=3A?= Message-ID: <20151002101541.11702.73372@psf.io> https://hg.python.org/cpython/rev/c6eaa722e2c1 changeset: 98480:c6eaa722e2c1 branch: 2.7 parent: 98454:202c827f86df user: Serhiy Storchaka date: Fri Oct 02 13:14:53 2015 +0300 summary: Issue #24848: Fixed bugs in UTF-7 decoding of misformed data: 1. Non-ASCII bytes were accepted after shift sequence. 2. A low surrogate could be emitted in case of error in high surrogate. files: Lib/test/test_codecs.py | 59 ++++++++++++++++++++++++++++ Lib/test/test_unicode.py | 1 + Misc/NEWS | 2 + Objects/unicodeobject.c | 16 ++++--- 4 files changed, 71 insertions(+), 7 deletions(-) diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py --- a/Lib/test/test_codecs.py +++ b/Lib/test/test_codecs.py @@ -642,6 +642,32 @@ class UTF7Test(ReadTest): encoding = "utf-7" + def test_ascii(self): + # Set D (directly encoded characters) + set_d = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' + 'abcdefghijklmnopqrstuvwxyz' + '0123456789' + '\'(),-./:?') + self.assertEqual(set_d.encode(self.encoding), set_d) + self.assertEqual(set_d.decode(self.encoding), set_d) + # Set O (optional direct characters) + set_o = ' !"#$%&*;<=>@[]^_`{|}' + self.assertEqual(set_o.encode(self.encoding), set_o) + self.assertEqual(set_o.decode(self.encoding), set_o) + # + + self.assertEqual(u'a+b'.encode(self.encoding), 'a+-b') + self.assertEqual('a+-b'.decode(self.encoding), u'a+b') + # White spaces + ws = ' \t\n\r' + self.assertEqual(ws.encode(self.encoding), ws) + self.assertEqual(ws.decode(self.encoding), ws) + # Other ASCII characters + other_ascii = ''.join(sorted(set(chr(i) for i in range(0x80)) - + set(set_d + set_o + '+' + ws))) + self.assertEqual(other_ascii.encode(self.encoding), + '+AAAAAQACAAMABAAFAAYABwAIAAsADAAOAA8AEAARABIAEwAU' + 'ABUAFgAXABgAGQAaABsAHAAdAB4AHwBcAH4Afw-') + def test_partial(self): self.check_partial( u"a+-b", @@ -656,7 +682,9 @@ def test_errors(self): tests = [ + ('\xffb', u'\ufffdb'), ('a\xffb', u'a\ufffdb'), + ('a\xff\xffb', u'a\ufffd\ufffdb'), ('a+IK', u'a\ufffd'), ('a+IK-b', u'a\ufffdb'), ('a+IK,b', u'a\ufffdb'), @@ -672,6 +700,8 @@ ('a+//,+IKw-b', u'a\ufffd\u20acb'), ('a+///,+IKw-b', u'a\uffff\ufffd\u20acb'), ('a+////,+IKw-b', u'a\uffff\ufffd\u20acb'), + ('a+IKw-b\xff', u'a\u20acb\ufffd'), + ('a+IKw\xffb', u'a\u20ac\ufffdb'), ] for raw, expected in tests: self.assertRaises(UnicodeDecodeError, codecs.utf_7_decode, @@ -682,6 +712,35 @@ self.assertEqual(u'\U000104A0'.encode(self.encoding), '+2AHcoA-') self.assertEqual(u'\ud801\udca0'.encode(self.encoding), '+2AHcoA-') self.assertEqual('+2AHcoA-'.decode(self.encoding), u'\U000104A0') + self.assertEqual('+2AHcoA'.decode(self.encoding), u'\U000104A0') + self.assertEqual(u'\u20ac\U000104A0'.encode(self.encoding), '+IKzYAdyg-') + self.assertEqual('+IKzYAdyg-'.decode(self.encoding), u'\u20ac\U000104A0') + self.assertEqual('+IKzYAdyg'.decode(self.encoding), u'\u20ac\U000104A0') + self.assertEqual(u'\u20ac\u20ac\U000104A0'.encode(self.encoding), + '+IKwgrNgB3KA-') + self.assertEqual('+IKwgrNgB3KA-'.decode(self.encoding), + u'\u20ac\u20ac\U000104A0') + self.assertEqual('+IKwgrNgB3KA'.decode(self.encoding), + u'\u20ac\u20ac\U000104A0') + + def test_lone_surrogates(self): + tests = [ + ('a+2AE-b', u'a\ud801b'), + ('a+2AE\xffb', u'a\ufffdb'), + ('a+2AE', u'a\ufffd'), + ('a+2AEA-b', u'a\ufffdb'), + ('a+2AH-b', u'a\ufffdb'), + ('a+IKzYAQ-b', u'a\u20ac\ud801b'), + ('a+IKzYAQ\xffb', u'a\u20ac\ufffdb'), + ('a+IKzYAQA-b', u'a\u20ac\ufffdb'), + ('a+IKzYAd-b', u'a\u20ac\ufffdb'), + ('a+IKwgrNgB-b', u'a\u20ac\u20ac\ud801b'), + ('a+IKwgrNgB\xffb', u'a\u20ac\u20ac\ufffdb'), + ('a+IKwgrNgB', u'a\u20ac\u20ac\ufffd'), + ('a+IKwgrNgBA-b', u'a\u20ac\u20ac\ufffdb'), + ] + for raw, expected in tests: + self.assertEqual(raw.decode('utf-7', 'replace'), expected) class UTF16ExTest(unittest.TestCase): diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py --- a/Lib/test/test_unicode.py +++ b/Lib/test/test_unicode.py @@ -1036,6 +1036,7 @@ self.assertRaises(UnicodeError, unicode, 'Andr\202 x', 'ascii','strict') self.assertEqual(unicode('Andr\202 x','ascii','ignore'), u"Andr x") self.assertEqual(unicode('Andr\202 x','ascii','replace'), u'Andr\uFFFD x') + self.assertEqual(unicode('\202 x', 'ascii', 'replace'), u'\uFFFD x') self.assertEqual(u'abcde'.decode('ascii', 'ignore'), u'abcde'.decode('ascii', errors='ignore')) self.assertEqual(u'abcde'.decode('ascii', 'replace'), diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,8 @@ Core and Builtins ----------------- +- Issue #24848: Fixed a number of bugs in UTF-7 decoding of misformed data. + - Issue #25003: os.urandom() doesn't use getentropy() on Solaris because getentropy() is blocking, whereas os.urandom() should not block. getentropy() is supported since Solaris 11.3. diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -1716,29 +1716,29 @@ } else { /* now leaving a base-64 section */ inShift = 0; - s++; - if (surrogate) { - *p++ = surrogate; - surrogate = 0; - } if (base64bits > 0) { /* left-over bits */ if (base64bits >= 6) { /* We've seen at least one base-64 character */ + s++; errmsg = "partial character in shift sequence"; goto utf7Error; } else { /* Some bits remain; they should be zero */ if (base64buffer != 0) { + s++; errmsg = "non-zero padding bits in shift sequence"; goto utf7Error; } } } - if (ch != '-') { + if (surrogate && DECODE_DIRECT(ch)) + *p++ = surrogate; + surrogate = 0; + if (ch == '-') { /* '-' is absorbed; other terminating characters are preserved */ - *p++ = ch; + s++; } } } @@ -1751,6 +1751,7 @@ } else { /* begin base64-encoded section */ inShift = 1; + surrogate = 0; shiftOutStart = p; base64bits = 0; base64buffer = 0; @@ -1782,6 +1783,7 @@ if (inShift && !consumed) { /* in shift sequence, no more to follow */ /* if we're in an inconsistent state, that's an error */ + inShift = 0; if (surrogate || (base64bits >= 6) || (base64bits > 0 && base64buffer != 0)) { -- Repository URL: https://hg.python.org/cpython From lp_benchmark_robot at intel.com Fri Oct 2 17:05:13 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Fri, 2 Oct 2015 16:05:13 +0100 Subject: [Python-checkins] Benchmark Results for Python Default 2015-10-02 Message-ID: <201915f0-c6e4-44a7-8467-ee204c9e99ee@irsmsx151.ger.corp.intel.com> Results for project python_default-nightly, build date 2015-10-02 03:02:00 commit: c98cc9f7e2c51bb4c5469754b5a8896c7c6520e1 revision date: 2015-10-01 22:20:11 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v3.4.3, with hash b4cbecbc0781e89a309d03b60a1f75f8499250e6 from 2015-02-25 12:15:33+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v3.4.3 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.48178% -1.74654% 7.03348% 17.29170% :-| pybench 0.15028% 0.09730% -1.62936% 8.80656% :-( regex_v8 2.74047% -0.00311% -3.98793% 6.26954% :-| nbody 0.48580% -0.20683% -0.07502% 10.24049% :-| json_dump_v2 0.27845% 0.17579% -0.89919% 11.38912% :-| normal_startup 0.65205% 0.02698% 0.25297% 4.87967% ------------------------------------------------------------------------------------------ Note: Benchmark results are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From lp_benchmark_robot at intel.com Fri Oct 2 17:05:47 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Fri, 2 Oct 2015 16:05:47 +0100 Subject: [Python-checkins] Benchmark Results for Python 2.7 2015-10-02 Message-ID: <9b1adbb6-1000-4cc1-90e2-52c6cc6cc9b6@irsmsx151.ger.corp.intel.com> Results for project python_2.7-nightly, build date 2015-10-02 07:24:36 commit: 202c827f86df311af57d47cde6c39b86b8df3155 revision date: 2015-10-01 07:57:26 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v2.7.10, with hash 15c95b7d81dcf821daade360741e00714667653f from 2015-05-23 16:02:14+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v2.7.10 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.11813% 0.53079% 4.53292% 10.61265% :-) pybench 0.16320% -0.52636% 6.12360% 7.31444% :-| regex_v8 0.98661% 0.06033% -1.83641% 6.58988% :-) nbody 0.13567% -0.09577% 8.53668% 4.48159% :-) json_dump_v2 0.28424% 0.18774% 3.62669% 13.46654% :-| normal_startup 1.80744% -0.37050% -1.65924% 1.98921% :-| ssbench 0.60809% -0.53691% 1.03661% 2.63734% ------------------------------------------------------------------------------------------ Note: Benchmark results for ssbench are measured in requests/second while all other are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From python-checkins at python.org Fri Oct 2 18:25:55 2015 From: python-checkins at python.org (berker.peksag) Date: Fri, 02 Oct 2015 16:25:55 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325290=3A_Fix_typo_in_csv=2Ereader=28=29_docstri?= =?utf-8?q?ng?= Message-ID: <20151002162555.8654.48715@psf.io> https://hg.python.org/cpython/rev/7a3073921687 changeset: 98483:7a3073921687 parent: 98479:037253b7cd6d parent: 98482:3b565295eba0 user: Berker Peksag date: Fri Oct 02 19:26:14 2015 +0300 summary: Issue #25290: Fix typo in csv.reader() docstring Patch by Johannes Niediek. files: Modules/_csv.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Modules/_csv.c b/Modules/_csv.c --- a/Modules/_csv.c +++ b/Modules/_csv.c @@ -1562,7 +1562,7 @@ "provided by the dialect.\n" "\n" "The returned object is an iterator. Each iteration returns a row\n" -"of the CSV file (which can span multiple input lines):\n"); +"of the CSV file (which can span multiple input lines).\n"); PyDoc_STRVAR(csv_writer_doc, " csv_writer = csv.writer(fileobj [, dialect='excel']\n" -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 18:25:55 2015 From: python-checkins at python.org (berker.peksag) Date: Fri, 02 Oct 2015 16:25:55 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325290=3A_Fix_typo_in_csv=2Ereader=28=29_docstring?= Message-ID: <20151002162554.11700.31496@psf.io> https://hg.python.org/cpython/rev/3b565295eba0 changeset: 98482:3b565295eba0 branch: 3.5 parent: 98478:a61fa2b08f87 parent: 98481:3940f480ea16 user: Berker Peksag date: Fri Oct 02 19:25:53 2015 +0300 summary: Issue #25290: Fix typo in csv.reader() docstring Patch by Johannes Niediek. files: Modules/_csv.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Modules/_csv.c b/Modules/_csv.c --- a/Modules/_csv.c +++ b/Modules/_csv.c @@ -1562,7 +1562,7 @@ "provided by the dialect.\n" "\n" "The returned object is an iterator. Each iteration returns a row\n" -"of the CSV file (which can span multiple input lines):\n"); +"of the CSV file (which can span multiple input lines).\n"); PyDoc_STRVAR(csv_writer_doc, " csv_writer = csv.writer(fileobj [, dialect='excel']\n" -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 18:25:54 2015 From: python-checkins at python.org (berker.peksag) Date: Fri, 02 Oct 2015 16:25:54 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1Mjkw?= =?utf-8?q?=3A_Fix_typo_in_csv=2Ereader=28=29_docstring?= Message-ID: <20151002162554.93032.12609@psf.io> https://hg.python.org/cpython/rev/3940f480ea16 changeset: 98481:3940f480ea16 branch: 3.4 parent: 98477:3c13567ea642 user: Berker Peksag date: Fri Oct 02 19:25:32 2015 +0300 summary: Issue #25290: Fix typo in csv.reader() docstring Patch by Johannes Niediek. files: Modules/_csv.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Modules/_csv.c b/Modules/_csv.c --- a/Modules/_csv.c +++ b/Modules/_csv.c @@ -1563,7 +1563,7 @@ "provided by the dialect.\n" "\n" "The returned object is an iterator. Each iteration returns a row\n" -"of the CSV file (which can span multiple input lines):\n"); +"of the CSV file (which can span multiple input lines).\n"); PyDoc_STRVAR(csv_writer_doc, " csv_writer = csv.writer(fileobj [, dialect='excel']\n" -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 18:30:05 2015 From: python-checkins at python.org (berker.peksag) Date: Fri, 02 Oct 2015 16:30:05 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI1Mjkw?= =?utf-8?q?=3A_Fix_typo_in_csv=2Ereader=28=29_docstring?= Message-ID: <20151002163005.93038.44749@psf.io> https://hg.python.org/cpython/rev/ceff1babf66e changeset: 98484:ceff1babf66e branch: 2.7 parent: 98480:c6eaa722e2c1 user: Berker Peksag date: Fri Oct 02 19:30:21 2015 +0300 summary: Issue #25290: Fix typo in csv.reader() docstring Patch by Johannes Niediek. files: Modules/_csv.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Modules/_csv.c b/Modules/_csv.c --- a/Modules/_csv.c +++ b/Modules/_csv.c @@ -1520,7 +1520,7 @@ "provided by the dialect.\n" "\n" "The returned object is an iterator. Each iteration returns a row\n" -"of the CSV file (which can span multiple input lines):\n"); +"of the CSV file (which can span multiple input lines).\n"); PyDoc_STRVAR(csv_writer_doc, " csv_writer = csv.writer(fileobj [, dialect='excel']\n" -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 19:24:25 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Fri, 02 Oct 2015 17:24:25 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Removed_the_?= =?utf-8?q?=22b=22_string_prefix_to_make_test=5Fxpickle_compatible_with_Py?= =?utf-8?b?dGhvbiAyLjUu?= Message-ID: <20151002172423.95970.47257@psf.io> https://hg.python.org/cpython/rev/8bbc51f97078 changeset: 98485:8bbc51f97078 branch: 2.7 user: Serhiy Storchaka date: Fri Oct 02 20:23:46 2015 +0300 summary: Removed the "b" string prefix to make test_xpickle compatible with Python 2.5. files: Lib/test/pickletester.py | 66 ++++++++++++++-------------- 1 files changed, 33 insertions(+), 33 deletions(-) diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -498,10 +498,10 @@ self.assertRaises(ValueError, self.loads, buf) def test_correctly_quoted_string(self): - goodpickles = [(b"S''\n.", ''), - (b'S""\n.', ''), - (b'S"\\n"\n.', '\n'), - (b"S'\\n'\n.", '\n')] + goodpickles = [("S''\n.", ''), + ('S""\n.', ''), + ('S"\\n"\n.', '\n'), + ("S'\\n'\n.", '\n')] for p, expected in goodpickles: self.assertEqual(self.loads(p), expected) @@ -521,10 +521,10 @@ 21: b BUILD 22: . STOP """ - pickle0 = (b"(i__main__\n" - b"X\n" - b"p0\n" - b"(dp1\nb.").replace(b'X', xname) + pickle0 = ("(i__main__\n" + "X\n" + "p0\n" + "(dp1\nb.").replace('X', xname) self.assert_is_copy(X(*args), self.loads(pickle0)) # Protocol 1 (binary mode pickle) @@ -539,12 +539,12 @@ 21: b BUILD 22: . STOP """ - pickle1 = (b'(c__main__\n' - b'X\n' - b'q\x00oq\x01}q\x02b.').replace(b'X', xname) + pickle1 = ('(c__main__\n' + 'X\n' + 'q\x00oq\x01}q\x02b.').replace('X', xname) self.assert_is_copy(X(*args), self.loads(pickle1)) - # Protocol 2 (pickle2 = b'\x80\x02' + pickle1) + # Protocol 2 (pickle2 = '\x80\x02' + pickle1) """ 0: \x80 PROTO 2 2: ( MARK @@ -557,63 +557,63 @@ 23: b BUILD 24: . STOP """ - pickle2 = (b'\x80\x02(c__main__\n' - b'X\n' - b'q\x00oq\x01}q\x02b.').replace(b'X', xname) + pickle2 = ('\x80\x02(c__main__\n' + 'X\n' + 'q\x00oq\x01}q\x02b.').replace('X', xname) self.assert_is_copy(X(*args), self.loads(pickle2)) def test_pop_empty_stack(self): # Test issue7455 - s = b'0' + s = '0' self.assertRaises((cPickle.UnpicklingError, IndexError), self.loads, s) def test_load_str(self): # From Python 2: pickle.dumps('a\x00\xa0', protocol=0) - self.assertEqual(self.loads(b"S'a\\x00\\xa0'\n."), b'a\x00\xa0') + self.assertEqual(self.loads("S'a\\x00\\xa0'\n."), 'a\x00\xa0') # From Python 2: pickle.dumps('a\x00\xa0', protocol=1) - self.assertEqual(self.loads(b'U\x03a\x00\xa0.'), b'a\x00\xa0') + self.assertEqual(self.loads('U\x03a\x00\xa0.'), 'a\x00\xa0') # From Python 2: pickle.dumps('a\x00\xa0', protocol=2) - self.assertEqual(self.loads(b'\x80\x02U\x03a\x00\xa0.'), b'a\x00\xa0') + self.assertEqual(self.loads('\x80\x02U\x03a\x00\xa0.'), 'a\x00\xa0') def test_load_unicode(self): # From Python 2: pickle.dumps(u'?', protocol=0) - self.assertEqual(self.loads(b'V\\u03c0\n.'), u'?') + self.assertEqual(self.loads('V\\u03c0\n.'), u'?') # From Python 2: pickle.dumps(u'?', protocol=1) - self.assertEqual(self.loads(b'X\x02\x00\x00\x00\xcf\x80.'), u'?') + self.assertEqual(self.loads('X\x02\x00\x00\x00\xcf\x80.'), u'?') # From Python 2: pickle.dumps(u'?', protocol=2) - self.assertEqual(self.loads(b'\x80\x02X\x02\x00\x00\x00\xcf\x80.'), u'?') + self.assertEqual(self.loads('\x80\x02X\x02\x00\x00\x00\xcf\x80.'), u'?') def test_constants(self): - self.assertIsNone(self.loads(b'N.')) - self.assertIs(self.loads(b'\x88.'), True) - self.assertIs(self.loads(b'\x89.'), False) - self.assertIs(self.loads(b'I01\n.'), True) - self.assertIs(self.loads(b'I00\n.'), False) + self.assertIsNone(self.loads('N.')) + self.assertIs(self.loads('\x88.'), True) + self.assertIs(self.loads('\x89.'), False) + self.assertIs(self.loads('I01\n.'), True) + self.assertIs(self.loads('I00\n.'), False) def test_misc_get(self): - self.assertRaises(self.error, self.loads, b'g0\np0\n') - self.assertRaises(self.error, self.loads, b'h\x00q\x00') + self.assertRaises(self.error, self.loads, 'g0\np0\n') + self.assertRaises(self.error, self.loads, 'h\x00q\x00') def test_get(self): - pickled = b'((lp100000\ng100000\nt.' + pickled = '((lp100000\ng100000\nt.' unpickled = self.loads(pickled) self.assertEqual(unpickled, ([],)*2) self.assertIs(unpickled[0], unpickled[1]) def test_binget(self): - pickled = b'(]q\xffh\xfft.' + pickled = '(]q\xffh\xfft.' unpickled = self.loads(pickled) self.assertEqual(unpickled, ([],)*2) self.assertIs(unpickled[0], unpickled[1]) def test_long_binget(self): - pickled = b'(]r\x00\x00\x01\x00j\x00\x00\x01\x00t.' + pickled = '(]r\x00\x00\x01\x00j\x00\x00\x01\x00t.' unpickled = self.loads(pickled) self.assertEqual(unpickled, ([],)*2) self.assertIs(unpickled[0], unpickled[1]) def test_dup(self): - pickled = b'((l2t.' + pickled = '((l2t.' unpickled = self.loads(pickled) self.assertEqual(unpickled, ([],)*2) self.assertIs(unpickled[0], unpickled[1]) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 20:45:02 2015 From: python-checkins at python.org (brett.cannon) Date: Fri, 02 Oct 2015 18:45:02 +0000 Subject: [Python-checkins] =?utf-8?q?benchmarks=3A_Issue_=2325266=3A_the_m?= =?utf-8?q?ako_benchmark_does_not_work_in_Python_3=2E6=2E?= Message-ID: <20151002184500.95982.90038@psf.io> https://hg.python.org/benchmarks/rev/85edb638dce6 changeset: 222:85edb638dce6 user: Brett Cannon date: Fri Oct 02 11:44:56 2015 -0700 summary: Issue #25266: the mako benchmark does not work in Python 3.6. Thanks to Florin Popa of Intel for the patch. files: perf.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/perf.py b/perf.py --- a/perf.py +++ b/perf.py @@ -1594,7 +1594,7 @@ bm_env = BuildEnv({"PYTHONPATH": mako_path}, options.inherit_env) return MeasureGeneric(python, options, bm_path, bm_env, iteration_scaling=5) - at VersionRange() + at VersionRange(None, '3.5') def BM_mako(*args, **kwargs): return SimpleBenchmark(MeasureMako, *args, **kwargs) -- Repository URL: https://hg.python.org/benchmarks From python-checkins at python.org Fri Oct 2 21:10:08 2015 From: python-checkins at python.org (yury.selivanov) Date: Fri, 02 Oct 2015 19:10:08 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_asyncio=3A_Make_ensure=5Ffuture=28=29_accept_all_kinds_of_awai?= =?utf-8?q?tables=2E?= Message-ID: <20151002191007.11714.24234@psf.io> https://hg.python.org/cpython/rev/901537ff7f80 changeset: 98487:901537ff7f80 branch: 3.5 parent: 98482:3b565295eba0 parent: 98486:b40a61e79893 user: Yury Selivanov date: Fri Oct 02 15:05:59 2015 -0400 summary: asyncio: Make ensure_future() accept all kinds of awaitables. files: Doc/whatsnew/3.5.rst | 7 ++++++ Lib/asyncio/tasks.py | 16 +++++++++++++- Lib/test/test_asyncio/test_tasks.py | 18 +++++++++++++++++ Misc/NEWS | 2 + 4 files changed, 41 insertions(+), 2 deletions(-) diff --git a/Doc/whatsnew/3.5.rst b/Doc/whatsnew/3.5.rst --- a/Doc/whatsnew/3.5.rst +++ b/Doc/whatsnew/3.5.rst @@ -803,6 +803,13 @@ :class:`asyncio.Queue` class. (Contributed by Victor Stinner.) +Updates in 3.5.1: + +* The :func:`~asyncio.ensure_future` function and all functions that + use it, such as :meth:`loop.run_until_complete() `, + now accept all kinds of :term:`awaitable objects `. + (Contributed by Yury Selivanov.) + bz2 --- diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py --- a/Lib/asyncio/tasks.py +++ b/Lib/asyncio/tasks.py @@ -512,7 +512,7 @@ def ensure_future(coro_or_future, *, loop=None): - """Wrap a coroutine in a future. + """Wrap a coroutine or an awaitable in a future. If the argument is a Future, it is returned directly. """ @@ -527,8 +527,20 @@ if task._source_traceback: del task._source_traceback[-1] return task + elif compat.PY35 and inspect.isawaitable(coro_or_future): + return ensure_future(_wrap_awaitable(coro_or_future), loop=loop) else: - raise TypeError('A Future or coroutine is required') + raise TypeError('A Future, a coroutine or an awaitable is required') + + + at coroutine +def _wrap_awaitable(awaitable): + """Helper for asyncio.ensure_future(). + + Wraps awaitable (an object with __await__) into a coroutine + that will later be wrapped in a Task by ensure_future(). + """ + return (yield from awaitable.__await__()) class _GatheringFuture(futures.Future): diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py --- a/Lib/test/test_asyncio/test_tasks.py +++ b/Lib/test/test_asyncio/test_tasks.py @@ -153,6 +153,24 @@ t = asyncio.ensure_future(t_orig, loop=self.loop) self.assertIs(t, t_orig) + @unittest.skipUnless(PY35, 'need python 3.5 or later') + def test_ensure_future_awaitable(self): + class Aw: + def __init__(self, coro): + self.coro = coro + def __await__(self): + return (yield from self.coro) + + @asyncio.coroutine + def coro(): + return 'ok' + + loop = asyncio.new_event_loop() + self.set_event_loop(loop) + fut = asyncio.ensure_future(Aw(coro()), loop=loop) + loop.run_until_complete(fut) + assert fut.result() == 'ok' + def test_ensure_future_neither(self): with self.assertRaises(TypeError): asyncio.ensure_future('ok') diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -152,6 +152,8 @@ - Issue #23572: Fixed functools.singledispatch on classes with falsy metaclasses. Patch by Ethan Furman. +- asyncio: ensure_future() now accepts awaitable objects. + IDLE ---- -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 21:10:10 2015 From: python-checkins at python.org (yury.selivanov) Date: Fri, 02 Oct 2015 19:10:10 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_asyncio=3A_Make_ensure=5Ffuture=28=29_accept_all_kinds_o?= =?utf-8?q?f_awaitables=2E_=28Merge_3=2E5=29?= Message-ID: <20151002191007.81384.14531@psf.io> https://hg.python.org/cpython/rev/4d699bf00be0 changeset: 98488:4d699bf00be0 parent: 98483:7a3073921687 parent: 98487:901537ff7f80 user: Yury Selivanov date: Fri Oct 02 15:09:51 2015 -0400 summary: asyncio: Make ensure_future() accept all kinds of awaitables. (Merge 3.5) files: Doc/whatsnew/3.5.rst | 7 ++++++ Lib/asyncio/tasks.py | 16 +++++++++++++- Lib/test/test_asyncio/test_tasks.py | 18 +++++++++++++++++ 3 files changed, 39 insertions(+), 2 deletions(-) diff --git a/Doc/whatsnew/3.5.rst b/Doc/whatsnew/3.5.rst --- a/Doc/whatsnew/3.5.rst +++ b/Doc/whatsnew/3.5.rst @@ -803,6 +803,13 @@ :class:`asyncio.Queue` class. (Contributed by Victor Stinner.) +Updates in 3.5.1: + +* The :func:`~asyncio.ensure_future` function and all functions that + use it, such as :meth:`loop.run_until_complete() `, + now accept all kinds of :term:`awaitable objects `. + (Contributed by Yury Selivanov.) + bz2 --- diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py --- a/Lib/asyncio/tasks.py +++ b/Lib/asyncio/tasks.py @@ -512,7 +512,7 @@ def ensure_future(coro_or_future, *, loop=None): - """Wrap a coroutine in a future. + """Wrap a coroutine or an awaitable in a future. If the argument is a Future, it is returned directly. """ @@ -527,8 +527,20 @@ if task._source_traceback: del task._source_traceback[-1] return task + elif compat.PY35 and inspect.isawaitable(coro_or_future): + return ensure_future(_wrap_awaitable(coro_or_future), loop=loop) else: - raise TypeError('A Future or coroutine is required') + raise TypeError('A Future, a coroutine or an awaitable is required') + + + at coroutine +def _wrap_awaitable(awaitable): + """Helper for asyncio.ensure_future(). + + Wraps awaitable (an object with __await__) into a coroutine + that will later be wrapped in a Task by ensure_future(). + """ + return (yield from awaitable.__await__()) class _GatheringFuture(futures.Future): diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py --- a/Lib/test/test_asyncio/test_tasks.py +++ b/Lib/test/test_asyncio/test_tasks.py @@ -153,6 +153,24 @@ t = asyncio.ensure_future(t_orig, loop=self.loop) self.assertIs(t, t_orig) + @unittest.skipUnless(PY35, 'need python 3.5 or later') + def test_ensure_future_awaitable(self): + class Aw: + def __init__(self, coro): + self.coro = coro + def __await__(self): + return (yield from self.coro) + + @asyncio.coroutine + def coro(): + return 'ok' + + loop = asyncio.new_event_loop() + self.set_event_loop(loop) + fut = asyncio.ensure_future(Aw(coro()), loop=loop) + loop.run_until_complete(fut) + assert fut.result() == 'ok' + def test_ensure_future_neither(self): with self.assertRaises(TypeError): asyncio.ensure_future('ok') -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 21:10:13 2015 From: python-checkins at python.org (yury.selivanov) Date: Fri, 02 Oct 2015 19:10:13 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogYXN5bmNpbzogZW5z?= =?utf-8?q?ure=5Ffuture=28=29_now_understands_awaitables?= Message-ID: <20151002191007.81372.29588@psf.io> https://hg.python.org/cpython/rev/b40a61e79893 changeset: 98486:b40a61e79893 branch: 3.4 parent: 98481:3940f480ea16 user: Yury Selivanov date: Fri Oct 02 15:00:19 2015 -0400 summary: asyncio: ensure_future() now understands awaitables files: Lib/asyncio/tasks.py | 16 +++++++++++++- Lib/test/test_asyncio/test_tasks.py | 18 +++++++++++++++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py --- a/Lib/asyncio/tasks.py +++ b/Lib/asyncio/tasks.py @@ -512,7 +512,7 @@ def ensure_future(coro_or_future, *, loop=None): - """Wrap a coroutine in a future. + """Wrap a coroutine or an awaitable in a future. If the argument is a Future, it is returned directly. """ @@ -527,8 +527,20 @@ if task._source_traceback: del task._source_traceback[-1] return task + elif compat.PY35 and inspect.isawaitable(coro_or_future): + return ensure_future(_wrap_awaitable(coro_or_future), loop=loop) else: - raise TypeError('A Future or coroutine is required') + raise TypeError('A Future, a coroutine or an awaitable is required') + + + at coroutine +def _wrap_awaitable(awaitable): + """Helper for asyncio.ensure_future(). + + Wraps awaitable (an object with __await__) into a coroutine + that will later be wrapped in a Task by ensure_future(). + """ + return (yield from awaitable.__await__()) class _GatheringFuture(futures.Future): diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py --- a/Lib/test/test_asyncio/test_tasks.py +++ b/Lib/test/test_asyncio/test_tasks.py @@ -153,6 +153,24 @@ t = asyncio.ensure_future(t_orig, loop=self.loop) self.assertIs(t, t_orig) + @unittest.skipUnless(PY35, 'need python 3.5 or later') + def test_ensure_future_awaitable(self): + class Aw: + def __init__(self, coro): + self.coro = coro + def __await__(self): + return (yield from self.coro) + + @asyncio.coroutine + def coro(): + return 'ok' + + loop = asyncio.new_event_loop() + self.set_event_loop(loop) + fut = asyncio.ensure_future(Aw(coro()), loop=loop) + loop.run_until_complete(fut) + assert fut.result() == 'ok' + def test_ensure_future_neither(self): with self.assertRaises(TypeError): asyncio.ensure_future('ok') -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 22:28:53 2015 From: python-checkins at python.org (berker.peksag) Date: Fri, 02 Oct 2015 20:28:53 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_from_3=2E5?= Message-ID: <20151002202851.93026.12716@psf.io> https://hg.python.org/cpython/rev/45640def1e3d changeset: 98490:45640def1e3d parent: 98488:4d699bf00be0 parent: 98489:4eb809fa1130 user: Berker Peksag date: Fri Oct 02 23:29:13 2015 +0300 summary: Merge from 3.5 Hg: -- files: Doc/tools/susp-ignored.csv | 10 +++++----- 1 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Doc/tools/susp-ignored.csv b/Doc/tools/susp-ignored.csv --- a/Doc/tools/susp-ignored.csv +++ b/Doc/tools/susp-ignored.csv @@ -290,8 +290,8 @@ library/stdtypes,,::,>>> m[::2].tolist() library/sys,1115,`,# ``wrapper`` creates a ``wrap(coro)`` coroutine: tutorial/venv,77,:c7b9645a6f35,"Python 3.4.3+ (3.4:c7b9645a6f35+, May 22 2015, 09:31:25)" -whatsnew/3.5,965,:root,'WARNING:root:warning\n' -whatsnew/3.5,965,:warning,'WARNING:root:warning\n' -whatsnew/3.5,1292,::,>>> addr6 = ipaddress.IPv6Address('::1') -whatsnew/3.5,1354,:root,ERROR:root:exception -whatsnew/3.5,1354,:exception,ERROR:root:exception +whatsnew/3.5,,:root,'WARNING:root:warning\n' +whatsnew/3.5,,:warning,'WARNING:root:warning\n' +whatsnew/3.5,,::,>>> addr6 = ipaddress.IPv6Address('::1') +whatsnew/3.5,,:root,ERROR:root:exception +whatsnew/3.5,,:exception,ERROR:root:exception -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 22:28:53 2015 From: python-checkins at python.org (berker.peksag) Date: Fri, 02 Oct 2015 20:28:53 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E5=29=3A_Tweak_susp-ign?= =?utf-8?q?ored=2Ecsv_to_make_buildbots_happy?= Message-ID: <20151002202851.78898.34660@psf.io> https://hg.python.org/cpython/rev/4eb809fa1130 changeset: 98489:4eb809fa1130 branch: 3.5 parent: 98487:901537ff7f80 user: Berker Peksag date: Fri Oct 02 23:28:45 2015 +0300 summary: Tweak susp-ignored.csv to make buildbots happy files: Doc/tools/susp-ignored.csv | 10 +++++----- 1 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Doc/tools/susp-ignored.csv b/Doc/tools/susp-ignored.csv --- a/Doc/tools/susp-ignored.csv +++ b/Doc/tools/susp-ignored.csv @@ -290,8 +290,8 @@ library/stdtypes,,::,>>> m[::2].tolist() library/sys,1115,`,# ``wrapper`` creates a ``wrap(coro)`` coroutine: tutorial/venv,77,:c7b9645a6f35,"Python 3.4.3+ (3.4:c7b9645a6f35+, May 22 2015, 09:31:25)" -whatsnew/3.5,965,:root,'WARNING:root:warning\n' -whatsnew/3.5,965,:warning,'WARNING:root:warning\n' -whatsnew/3.5,1292,::,>>> addr6 = ipaddress.IPv6Address('::1') -whatsnew/3.5,1354,:root,ERROR:root:exception -whatsnew/3.5,1354,:exception,ERROR:root:exception +whatsnew/3.5,,:root,'WARNING:root:warning\n' +whatsnew/3.5,,:warning,'WARNING:root:warning\n' +whatsnew/3.5,,::,>>> addr6 = ipaddress.IPv6Address('::1') +whatsnew/3.5,,:root,ERROR:root:exception +whatsnew/3.5,,:exception,ERROR:root:exception -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 2 23:02:30 2015 From: python-checkins at python.org (victor.stinner) Date: Fri, 02 Oct 2015 21:02:30 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325287=3A_Don=27t_?= =?utf-8?q?add_crypt=2EMETHOD=5FCRYPT_to_crypt=2Emethods_if_it=27s_not?= Message-ID: <20151002210230.11706.11619@psf.io> https://hg.python.org/cpython/rev/4da7edbf78d4 changeset: 98491:4da7edbf78d4 user: Victor Stinner date: Fri Oct 02 23:00:39 2015 +0200 summary: Issue #25287: Don't add crypt.METHOD_CRYPT to crypt.methods if it's not supported. Check if it is supported, it may not be supported on OpenBSD for example. files: Doc/library/crypt.rst | 2 +- Lib/crypt.py | 3 +-- Misc/NEWS | 4 ++++ 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Doc/library/crypt.rst b/Doc/library/crypt.rst --- a/Doc/library/crypt.rst +++ b/Doc/library/crypt.rst @@ -64,7 +64,7 @@ A list of available password hashing algorithms, as ``crypt.METHOD_*`` objects. This list is sorted from strongest to - weakest, and is guaranteed to have at least ``crypt.METHOD_CRYPT``. + weakest. Module Functions diff --git a/Lib/crypt.py b/Lib/crypt.py --- a/Lib/crypt.py +++ b/Lib/crypt.py @@ -54,9 +54,8 @@ METHOD_SHA512 = _Method('SHA512', '6', 16, 106) methods = [] -for _method in (METHOD_SHA512, METHOD_SHA256, METHOD_MD5): +for _method in (METHOD_SHA512, METHOD_SHA256, METHOD_MD5, METHOD_CRYPT): _result = crypt('', _method) if _result and len(_result) == _method.total_size: methods.append(_method) -methods.append(METHOD_CRYPT) del _result, _method diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -40,6 +40,10 @@ Library ------- +- Issue #25287: Don't add crypt.METHOD_CRYPT to crypt.methods if it's not + supported. Check if it is supported, it may not be supported on OpenBSD for + example. + - Issue #23600: Default implementation of tzinfo.fromutc() was returning wrong results in some cases. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 00:47:42 2015 From: python-checkins at python.org (victor.stinner) Date: Fri, 02 Oct 2015 22:47:42 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2318174=3A_=22pytho?= =?utf-8?q?n_-m_test_--huntrleaks_=2E=2E=2E=22_now_also_checks_for_leak_of?= Message-ID: <20151002224742.93030.91404@psf.io> https://hg.python.org/cpython/rev/72129c767c92 changeset: 98492:72129c767c92 user: Victor Stinner date: Sat Oct 03 00:20:56 2015 +0200 summary: Issue #18174: "python -m test --huntrleaks ..." now also checks for leak of file descriptors. Patch written by Richard Oudkerk. files: Lib/test/libregrtest/refleak.py | 47 ++++++++++++++- Lib/test/test_regrtest.py | 63 ++++++++++++++++---- Misc/NEWS | 3 + 3 files changed, 94 insertions(+), 19 deletions(-) diff --git a/Lib/test/libregrtest/refleak.py b/Lib/test/libregrtest/refleak.py --- a/Lib/test/libregrtest/refleak.py +++ b/Lib/test/libregrtest/refleak.py @@ -1,3 +1,4 @@ +import errno import os import re import sys @@ -6,6 +7,36 @@ from test import support +try: + MAXFD = os.sysconf("SC_OPEN_MAX") +except Exception: + MAXFD = 256 + + +def fd_count(): + """Count the number of open file descriptors""" + if sys.platform.startswith(('linux', 'freebsd')): + try: + names = os.listdir("/proc/self/fd") + return len(names) + except FileNotFoundError: + pass + + count = 0 + for fd in range(MAXFD): + try: + # Prefer dup() over fstat(). fstat() can require input/output + # whereas dup() doesn't. + fd2 = os.dup(fd) + except OSError as e: + if e.errno != errno.EBADF: + raise + else: + os.close(fd2) + count += 1 + return count + + def dash_R(the_module, test, indirect_test, huntrleaks): """Run a test multiple times, looking for reference leaks. @@ -42,20 +73,25 @@ repcount = nwarmup + ntracked rc_deltas = [0] * repcount alloc_deltas = [0] * repcount + fd_deltas = [0] * repcount print("beginning", repcount, "repetitions", file=sys.stderr) print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr, flush=True) # initialize variables to make pyflakes quiet - rc_before = alloc_before = 0 + rc_before = alloc_before = fd_before = 0 for i in range(repcount): indirect_test() - alloc_after, rc_after = dash_R_cleanup(fs, ps, pic, zdc, abcs) + alloc_after, rc_after, fd_after = dash_R_cleanup(fs, ps, pic, zdc, + abcs) print('.', end='', flush=True) if i >= nwarmup: rc_deltas[i] = rc_after - rc_before alloc_deltas[i] = alloc_after - alloc_before - alloc_before, rc_before = alloc_after, rc_after + fd_deltas[i] = fd_after - fd_before + alloc_before = alloc_after + rc_before = rc_after + fd_before = fd_after print(file=sys.stderr) # These checkers return False on success, True on failure def check_rc_deltas(deltas): @@ -71,7 +107,8 @@ failed = False for deltas, item_name, checker in [ (rc_deltas, 'references', check_rc_deltas), - (alloc_deltas, 'memory blocks', check_alloc_deltas)]: + (alloc_deltas, 'memory blocks', check_alloc_deltas), + (fd_deltas, 'file descriptors', check_rc_deltas)]: if checker(deltas): msg = '%s leaked %s %s, sum=%s' % ( test, deltas[nwarmup:], item_name, sum(deltas)) @@ -151,7 +188,7 @@ func1 = sys.getallocatedblocks func2 = sys.gettotalrefcount gc.collect() - return func1(), func2() + return func1(), func2(), fd_count() def warm_caches(): diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -383,27 +383,32 @@ self.assertTrue(0 <= randseed <= 10000000, randseed) return randseed - def run_command(self, args, input=None, exitcode=0): + def run_command(self, args, input=None, exitcode=0, **kw): if not input: input = '' + if 'stderr' not in kw: + kw['stderr'] = subprocess.PIPE proc = subprocess.run(args, universal_newlines=True, input=input, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + **kw) if proc.returncode != exitcode: - self.fail("Command %s failed with exit code %s\n" - "\n" - "stdout:\n" - "---\n" - "%s\n" - "---\n" - "\n" - "stderr:\n" - "---\n" - "%s" - "---\n" - % (str(args), proc.returncode, proc.stdout, proc.stderr)) + msg = ("Command %s failed with exit code %s\n" + "\n" + "stdout:\n" + "---\n" + "%s\n" + "---\n" + % (str(args), proc.returncode, proc.stdout)) + if proc.stderr: + msg += ("\n" + "stderr:\n" + "---\n" + "%s" + "---\n" + % proc.stderr) + self.fail(msg) return proc @@ -637,6 +642,36 @@ output = self.run_tests('--forever', test, exitcode=1) self.check_executed_tests(output, [test]*3, failed=test) + def test_huntrleaks_fd_leak(self): + # test --huntrleaks for file descriptor leak + code = textwrap.dedent(""" + import os + import unittest + + class FDLeakTest(unittest.TestCase): + def test_leak(self): + fd = os.open(__file__, os.O_RDONLY) + # bug: never cloes the file descriptor + """) + test = self.create_test(code=code) + + filename = 'reflog.txt' + self.addCleanup(support.unlink, filename) + output = self.run_tests('--huntrleaks', '3:3:', test, + exitcode=1, + stderr=subprocess.STDOUT) + self.check_executed_tests(output, [test], failed=test) + + line = 'beginning 6 repetitions\n123456\n......\n' + self.check_line(output, re.escape(line)) + + line2 = '%s leaked [1, 1, 1] file descriptors, sum=3\n' % test + self.check_line(output, re.escape(line2)) + + with open(filename) as fp: + reflog = fp.read() + self.assertEqual(reflog, line2) + if __name__ == '__main__': unittest.main() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -170,6 +170,9 @@ Tests ----- +- Issue #18174: ``python -m test --huntrleaks ...`` now also checks for leak of + file descriptors. Patch written by Richard Oudkerk. + - Issue #25260: Fix ``python -m test --coverage`` on Windows. Remove the list of ignored directories. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 00:47:42 2015 From: python-checkins at python.org (victor.stinner) Date: Fri, 02 Oct 2015 22:47:42 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2322806=3A_Add_=60?= =?utf-8?q?=60python_-m_test_--list-tests=60=60_command_to_list_tests=2E?= Message-ID: <20151002224742.93024.92929@psf.io> https://hg.python.org/cpython/rev/1005573e6a74 changeset: 98493:1005573e6a74 user: Victor Stinner date: Sat Oct 03 00:21:12 2015 +0200 summary: Issue #22806: Add ``python -m test --list-tests`` command to list tests. files: Lib/test/libregrtest/cmdline.py | 11 +-- Lib/test/libregrtest/main.py | 55 +++++++++++++------- Lib/test/test_regrtest.py | 7 ++ Misc/NEWS | 2 + 4 files changed, 49 insertions(+), 26 deletions(-) diff --git a/Lib/test/libregrtest/cmdline.py b/Lib/test/libregrtest/cmdline.py --- a/Lib/test/libregrtest/cmdline.py +++ b/Lib/test/libregrtest/cmdline.py @@ -1,5 +1,4 @@ import argparse -import faulthandler import os from test import support @@ -234,6 +233,9 @@ group.add_argument('-F', '--forever', action='store_true', help='run the specified tests in a loop, until an ' 'error happens') + group.add_argument('--list-tests', action='store_true', + help="only write the name of tests that will be run, " + "don't execute them") parser.add_argument('args', nargs=argparse.REMAINDER, help=argparse.SUPPRESS) @@ -301,12 +303,7 @@ if ns.quiet: ns.verbose = 0 if ns.timeout is not None: - if hasattr(faulthandler, 'dump_traceback_later'): - if ns.timeout <= 0: - ns.timeout = None - else: - print("Warning: The timeout option requires " - "faulthandler.dump_traceback_later") + if ns.timeout <= 0: ns.timeout = None if ns.use_mp is not None: if ns.use_mp <= 0: diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py --- a/Lib/test/libregrtest/main.py +++ b/Lib/test/libregrtest/main.py @@ -1,3 +1,4 @@ +import faulthandler import os import platform import random @@ -110,8 +111,13 @@ def parse_args(self, kwargs): ns = _parse_args(sys.argv[1:], **kwargs) + if ns.timeout and not hasattr(faulthandler, 'dump_traceback_later'): + print("Warning: The timeout option requires " + "faulthandler.dump_traceback_later", file=sys.stderr) + ns.timeout = None + if ns.threshold is not None and gc is None: - print('No GC available, ignore --threshold.') + print('No GC available, ignore --threshold.', file=sys.stderr) ns.threshold = None if ns.findleaks: @@ -122,7 +128,8 @@ pass #gc.set_debug(gc.DEBUG_SAVEALL) else: - print('No GC available, disabling --findleaks') + print('No GC available, disabling --findleaks', + file=sys.stderr) ns.findleaks = False # Strip .py extensions. @@ -163,20 +170,6 @@ nottests.add(arg) self.ns.args = [] - # For a partial run, we do not need to clutter the output. - if (self.ns.verbose - or self.ns.header - or not (self.ns.quiet or self.ns.single - or self.tests or self.ns.args)): - # Print basic platform information - print("==", platform.python_implementation(), *sys.version.split()) - print("== ", platform.platform(aliased=True), - "%s-endian" % sys.byteorder) - print("== ", "hash algorithm:", sys.hash_info.algorithm, - "64bit" if sys.maxsize > 2**32 else "32bit") - print("== ", os.getcwd()) - print("Testing with flags:", sys.flags) - # if testdir is set, then we are not running the python tests suite, so # don't add default tests to be executed or skipped (pass empty values) if self.ns.testdir: @@ -199,15 +192,18 @@ del self.selected[:self.selected.index(self.ns.start)] except ValueError: print("Couldn't find starting test (%s), using all tests" - % self.ns.start) + % self.ns.start, file=sys.stderr) if self.ns.randomize: if self.ns.random_seed is None: self.ns.random_seed = random.randrange(10000000) random.seed(self.ns.random_seed) - print("Using random seed", self.ns.random_seed) random.shuffle(self.selected) + def list_tests(self): + for name in self.selected: + print(name) + def rerun_failed_tests(self): self.ns.verbose = True self.ns.failfast = False @@ -315,6 +311,23 @@ return def run_tests(self): + # For a partial run, we do not need to clutter the output. + if (self.ns.verbose + or self.ns.header + or not (self.ns.quiet or self.ns.single + or self.tests or self.ns.args)): + # Print basic platform information + print("==", platform.python_implementation(), *sys.version.split()) + print("== ", platform.platform(aliased=True), + "%s-endian" % sys.byteorder) + print("== ", "hash algorithm:", sys.hash_info.algorithm, + "64bit" if sys.maxsize > 2**32 else "32bit") + print("== ", os.getcwd()) + print("Testing with flags:", sys.flags) + + if self.ns.randomize: + print("Using random seed", self.ns.random_seed) + if self.ns.forever: self.tests = self._test_forever(list(self.selected)) self.test_count = '' @@ -359,8 +372,12 @@ setup_tests(self.ns) self.find_tests(tests) + + if self.ns.list_tests: + self.list_tests() + sys.exit(0) + self.run_tests() - self.display_result() if self.ns.verbose2 and self.bad: diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -672,6 +672,13 @@ reflog = fp.read() self.assertEqual(reflog, line2) + def test_list_tests(self): + # test --list-tests + tests = [self.create_test() for i in range(5)] + output = self.run_tests('--list-tests', *tests) + self.assertEqual(output.rstrip().splitlines(), + tests) + if __name__ == '__main__': unittest.main() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -170,6 +170,8 @@ Tests ----- +- Issue #22806: Add ``python -m test --list-tests`` command to list tests. + - Issue #18174: ``python -m test --huntrleaks ...`` now also checks for leak of file descriptors. Patch written by Richard Oudkerk. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 01:20:55 2015 From: python-checkins at python.org (brett.cannon) Date: Fri, 02 Oct 2015 23:20:55 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1MTg4?= =?utf-8?q?=3A_Add_a_-P/--pgo_flag_to_regrtest_to_silence_error_output=2E?= Message-ID: <20151002232054.70970.32628@psf.io> https://hg.python.org/cpython/rev/fb90425017e3 changeset: 98494:fb90425017e3 branch: 3.5 parent: 98489:4eb809fa1130 user: Brett Cannon date: Fri Oct 02 16:16:44 2015 -0700 summary: Issue #25188: Add a -P/--pgo flag to regrtest to silence error output. Since PGO building doesn't care about test failures, silence them when running the test suite for performance reasons. Initial patch by Alecsandru Patrascu of Intel. files: Lib/test/regrtest.py | 84 +++++++++++++++++++------------ Makefile.pre.in | 2 +- 2 files changed, 51 insertions(+), 35 deletions(-) diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py --- a/Lib/test/regrtest.py +++ b/Lib/test/regrtest.py @@ -322,6 +322,8 @@ group.add_argument('-F', '--forever', action='store_true', help='run the specified tests in a loop, until an ' 'error happens') + group.add_argument('-P', '--pgo', dest='pgo', action='store_true', + help='enable Profile Guided Optimization training') parser.add_argument('args', nargs=argparse.REMAINDER, help=argparse.SUPPRESS) @@ -361,7 +363,7 @@ findleaks=False, use_resources=None, trace=False, coverdir='coverage', runleaks=False, huntrleaks=False, verbose2=False, print_slow=False, random_seed=None, use_mp=None, verbose3=False, forever=False, - header=False, failfast=False, match_tests=None) + header=False, failfast=False, match_tests=None, pgo=False) for k, v in kwargs.items(): if not hasattr(ns, k): raise TypeError('%r is an invalid keyword argument ' @@ -435,14 +437,16 @@ from subprocess import Popen, PIPE base_cmd = ([sys.executable] + support.args_from_interpreter_flags() + ['-X', 'faulthandler', '-m', 'test.regrtest']) - + # required to spawn a new process with PGO flag on/off + if ns.pgo: + base_cmd = base_cmd + ['--pgo'] slaveargs = ( (testname, ns.verbose, ns.quiet), dict(huntrleaks=ns.huntrleaks, use_resources=ns.use_resources, output_on_failure=ns.verbose3, timeout=ns.timeout, failfast=ns.failfast, - match_tests=ns.match_tests)) + match_tests=ns.match_tests, pgo=ns.pgo)) # Running the child from the same working directory as regrtest's original # invocation ensures that TEMPDIR for the child is the same when # sysconfig.is_python_build() is true. See issue 15300. @@ -596,13 +600,14 @@ ns.args = [] # For a partial run, we do not need to clutter the output. - if ns.verbose or ns.header or not (ns.quiet or ns.single or tests or ns.args): + if (ns.verbose or ns.header or + not (ns.pgo or ns.quiet or ns.single or tests or ns.args)): # Print basic platform information print("==", platform.python_implementation(), *sys.version.split()) print("== ", platform.platform(aliased=True), - "%s-endian" % sys.byteorder) + "%s-endian" % sys.byteorder) print("== ", "hash algorithm:", sys.hash_info.algorithm, - "64bit" if sys.maxsize > 2**32 else "32bit") + "64bit" if sys.maxsize > 2**32 else "32bit") print("== ", os.getcwd()) print("Testing with flags:", sys.flags) @@ -722,13 +727,16 @@ continue accumulate_result(test, result) if not ns.quiet: - fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}" + if bad and not ns.pgo: + fmt = "[{1:{0}}{2}/{3}] {4}" + else: + fmt = "[{1:{0}}{2}] {4}" print(fmt.format( test_count_width, test_index, test_count, len(bad), test)) if stdout: print(stdout) - if stderr: + if stderr and not ns.pgo: print(stderr, file=sys.stderr) sys.stdout.flush() sys.stderr.flush() @@ -745,7 +753,10 @@ else: for test_index, test in enumerate(tests, 1): if not ns.quiet: - fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}" + if bad and not ns.pgo: + fmt = "[{1:{0}}{2}/{3}] {4}" + else: + fmt = "[{1:{0}}{2}] {4}" print(fmt.format( test_count_width, test_index, test_count, len(bad), test)) sys.stdout.flush() @@ -760,7 +771,7 @@ ns.huntrleaks, output_on_failure=ns.verbose3, timeout=ns.timeout, failfast=ns.failfast, - match_tests=ns.match_tests) + match_tests=ns.match_tests, pgo=ns.pgo) accumulate_result(test, result) except KeyboardInterrupt: interrupted = True @@ -779,14 +790,14 @@ if module not in save_modules and module.startswith("test."): support.unload(module) - if interrupted: + if interrupted and not ns.pgo: # print a newline after ^C print() print("Test suite interrupted by signal SIGINT.") omitted = set(selected) - set(good) - set(bad) - set(skipped) print(count(len(omitted), "test"), "omitted:") printlist(omitted) - if good and not ns.quiet: + if good and not ns.quiet and not ns.pgo: if not bad and not skipped and not interrupted and len(good) > 1: print("All", end=' ') print(count(len(good), "test"), "OK.") @@ -795,26 +806,27 @@ print("10 slowest tests:") for time, test in test_times[:10]: print("%s: %.1fs" % (test, time)) - if bad: + if bad and not ns.pgo: print(count(len(bad), "test"), "failed:") printlist(bad) - if environment_changed: + if environment_changed and not ns.pgo: print("{} altered the execution environment:".format( count(len(environment_changed), "test"))) printlist(environment_changed) - if skipped and not ns.quiet: + if skipped and not ns.quiet and not ns.pgo: print(count(len(skipped), "test"), "skipped:") printlist(skipped) if ns.verbose2 and bad: print("Re-running failed tests in verbose mode") for test in bad[:]: - print("Re-running test %r in verbose mode" % test) + if not ns.pgo: + print("Re-running test %r in verbose mode" % test) sys.stdout.flush() try: ns.verbose = True ok = runtest(test, True, ns.quiet, ns.huntrleaks, - timeout=ns.timeout) + timeout=ns.timeout, pgo=ns.pgo) except KeyboardInterrupt: # print a newline separate from the ^C print() @@ -913,7 +925,7 @@ def runtest(test, verbose, quiet, huntrleaks=False, use_resources=None, output_on_failure=False, failfast=False, match_tests=None, - timeout=None): + timeout=None, *, pgo=False): """Run a single test. test -- the name of the test @@ -926,6 +938,8 @@ timeout -- dump the traceback and exit if a test takes more than timeout seconds failfast, match_tests -- See regrtest command-line flags for these. + pgo -- if true, do not print unnecessary info when running the test + for Profile Guided Optimization build Returns the tuple result, test_time, where result is one of the constants: INTERRUPTED KeyboardInterrupt when run under -j @@ -935,7 +949,6 @@ FAILED test failed PASSED test passed """ - if use_resources is not None: support.use_resources = use_resources use_timeout = (timeout is not None) @@ -965,8 +978,8 @@ sys.stdout = stream sys.stderr = stream result = runtest_inner(test, verbose, quiet, huntrleaks, - display_failure=False) - if result[0] == FAILED: + display_failure=False, pgo=pgo) + if result[0] == FAILED and not pgo: output = stream.getvalue() orig_stderr.write(output) orig_stderr.flush() @@ -976,7 +989,7 @@ else: support.verbose = verbose # Tell tests to be moderately quiet result = runtest_inner(test, verbose, quiet, huntrleaks, - display_failure=not verbose) + display_failure=not verbose, pgo=pgo) return result finally: if use_timeout: @@ -1008,10 +1021,11 @@ changed = False - def __init__(self, testname, verbose=0, quiet=False): + def __init__(self, testname, verbose=0, quiet=False, *, pgo=False): self.testname = testname self.verbose = verbose self.quiet = quiet + self.pgo = pgo # To add things to save and restore, add a name XXX to the resources list # and add corresponding get_XXX/restore_XXX functions. get_XXX should @@ -1240,11 +1254,11 @@ if current != original: self.changed = True restore(original) - if not self.quiet: + if not self.quiet and not self.pgo: print("Warning -- {} was modified by {}".format( name, self.testname), file=sys.stderr) - if self.verbose > 1: + if self.verbose > 1 and not self.pgo: print(" Before: {}\n After: {} ".format( original, current), file=sys.stderr) @@ -1252,7 +1266,7 @@ def runtest_inner(test, verbose, quiet, - huntrleaks=False, display_failure=True): + huntrleaks=False, display_failure=True, pgo=False): support.unload(test) test_time = 0.0 @@ -1263,7 +1277,7 @@ else: # Always import it from the test package abstest = 'test.' + test - with saved_test_environment(test, verbose, quiet) as environment: + with saved_test_environment(test, verbose, quiet, pgo=pgo) as environment: start_time = time.time() the_module = importlib.import_module(abstest) # If the test has a test_main, that will run the appropriate @@ -1283,27 +1297,29 @@ refleak = dash_R(the_module, test, test_runner, huntrleaks) test_time = time.time() - start_time except support.ResourceDenied as msg: - if not quiet: + if not quiet and not pgo: print(test, "skipped --", msg) sys.stdout.flush() return RESOURCE_DENIED, test_time except unittest.SkipTest as msg: - if not quiet: + if not quiet and not pgo: print(test, "skipped --", msg) sys.stdout.flush() return SKIPPED, test_time except KeyboardInterrupt: raise except support.TestFailed as msg: - if display_failure: - print("test", test, "failed --", msg, file=sys.stderr) - else: - print("test", test, "failed", file=sys.stderr) + if not pgo: + if display_failure: + print("test", test, "failed --", msg, file=sys.stderr) + else: + print("test", test, "failed", file=sys.stderr) sys.stderr.flush() return FAILED, test_time except: msg = traceback.format_exc() - print("test", test, "crashed --", msg, file=sys.stderr) + if not pgo: + print("test", test, "crashed --", msg, file=sys.stderr) sys.stderr.flush() return FAILED, test_time else: diff --git a/Makefile.pre.in b/Makefile.pre.in --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -231,7 +231,7 @@ TCLTK_LIBS= @TCLTK_LIBS@ # The task to run while instrument when building the profile-opt target -PROFILE_TASK=-m test.regrtest >/dev/null 2>&1 +PROFILE_TASK=-m test.regrtest --pgo # report files for gcov / lcov coverage report COVERAGE_INFO= $(abs_builddir)/coverage.info -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 01:20:58 2015 From: python-checkins at python.org (brett.cannon) Date: Fri, 02 Oct 2015 23:20:58 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_from_3=2E5_for_issue_=2325188=2E?= Message-ID: <20151002232054.31193.88005@psf.io> https://hg.python.org/cpython/rev/c1ecb258003b changeset: 98495:c1ecb258003b parent: 98493:1005573e6a74 parent: 98494:fb90425017e3 user: Brett Cannon date: Fri Oct 02 16:20:49 2015 -0700 summary: Merge from 3.5 for issue #25188. files: Lib/test/libregrtest/cmdline.py | 6 +++- Lib/test/libregrtest/main.py | 12 ++++++- Lib/test/libregrtest/runtest.py | 28 ++++++++++------- Lib/test/libregrtest/runtest_mp.py | 11 ++++-- Lib/test/libregrtest/save_env.py | 5 +- Makefile.pre.in | 2 +- Misc/NEWS | 4 ++ 7 files changed, 47 insertions(+), 21 deletions(-) diff --git a/Lib/test/libregrtest/cmdline.py b/Lib/test/libregrtest/cmdline.py --- a/Lib/test/libregrtest/cmdline.py +++ b/Lib/test/libregrtest/cmdline.py @@ -236,6 +236,8 @@ group.add_argument('--list-tests', action='store_true', help="only write the name of tests that will be run, " "don't execute them") + group.add_argument('-P', '--pgo', dest='pgo', action='store_true', + help='enable Profile Guided Optimization training') parser.add_argument('args', nargs=argparse.REMAINDER, help=argparse.SUPPRESS) @@ -279,7 +281,7 @@ findleaks=False, use_resources=None, trace=False, coverdir='coverage', runleaks=False, huntrleaks=False, verbose2=False, print_slow=False, random_seed=None, use_mp=None, verbose3=False, forever=False, - header=False, failfast=False, match_tests=None) + header=False, failfast=False, match_tests=None, pgo=False) for k, v in kwargs.items(): if not hasattr(ns, k): raise TypeError('%r is an invalid keyword argument ' @@ -299,6 +301,8 @@ parser.error("-l and -j don't go together!") if ns.failfast and not (ns.verbose or ns.verbose3): parser.error("-G/--failfast needs either -v or -W") + if ns.pgo and (ns.verbose or ns.verbose2 or ns.verbose3): + parser.error("--pgo/-v don't go together!") if ns.quiet: ns.verbose = 0 diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py --- a/Lib/test/libregrtest/main.py +++ b/Lib/test/libregrtest/main.py @@ -103,7 +103,10 @@ def display_progress(self, test_index, test): if self.ns.quiet: return - fmt = "[{1:{0}}{2}/{3}] {4}" if self.bad else "[{1:{0}}{2}] {4}" + if self.bad and not self.ns.pgo: + fmt = "[{1:{0}}{2}/{3}] {4}" + else: + fmt = "[{1:{0}}{2}] {4}" print(fmt.format(self.test_count_width, test_index, self.test_count, len(self.bad), test), flush=True) @@ -238,6 +241,11 @@ print(count(len(omitted), "test"), "omitted:") printlist(omitted) + # If running the test suite for PGO then no one cares about + # results. + if self.ns.pgo: + return + if self.good and not self.ns.quiet: if (not self.bad and not self.skipped @@ -314,7 +322,7 @@ # For a partial run, we do not need to clutter the output. if (self.ns.verbose or self.ns.header - or not (self.ns.quiet or self.ns.single + or not (self.ns.pgo or self.ns.quiet or self.ns.single or self.tests or self.ns.args)): # Print basic platform information print("==", platform.python_implementation(), *sys.version.split()) diff --git a/Lib/test/libregrtest/runtest.py b/Lib/test/libregrtest/runtest.py --- a/Lib/test/libregrtest/runtest.py +++ b/Lib/test/libregrtest/runtest.py @@ -65,6 +65,7 @@ timeout -- dump the traceback and exit if a test takes more than timeout seconds failfast, match_tests -- See regrtest command-line flags for these. + pgo -- if true, suppress any info irrelevant to a generating a PGO build Returns the tuple result, test_time, where result is one of the constants: INTERRUPTED KeyboardInterrupt when run under -j @@ -82,6 +83,7 @@ failfast = ns.failfast match_tests = ns.match_tests timeout = ns.timeout + pgo = ns.pgo use_timeout = (timeout is not None) if use_timeout: @@ -110,7 +112,7 @@ sys.stdout = stream sys.stderr = stream result = runtest_inner(test, verbose, quiet, huntrleaks, - display_failure=False) + display_failure=False, pgo=pgo) if result[0] == FAILED: output = stream.getvalue() orig_stderr.write(output) @@ -121,7 +123,7 @@ else: support.verbose = verbose # Tell tests to be moderately quiet result = runtest_inner(test, verbose, quiet, huntrleaks, - display_failure=not verbose) + display_failure=not verbose, pgo=pgo) return result finally: if use_timeout: @@ -131,7 +133,7 @@ def runtest_inner(test, verbose, quiet, - huntrleaks=False, display_failure=True): + huntrleaks=False, display_failure=True, *, pgo=False): support.unload(test) test_time = 0.0 @@ -142,7 +144,7 @@ else: # Always import it from the test package abstest = 'test.' + test - with saved_test_environment(test, verbose, quiet) as environment: + with saved_test_environment(test, verbose, quiet, pgo=pgo) as environment: start_time = time.time() the_module = importlib.import_module(abstest) # If the test has a test_main, that will run the appropriate @@ -162,24 +164,28 @@ refleak = dash_R(the_module, test, test_runner, huntrleaks) test_time = time.time() - start_time except support.ResourceDenied as msg: - if not quiet: + if not quiet and not pgo: print(test, "skipped --", msg, flush=True) return RESOURCE_DENIED, test_time except unittest.SkipTest as msg: - if not quiet: + if not quiet and not pgo: print(test, "skipped --", msg, flush=True) return SKIPPED, test_time except KeyboardInterrupt: raise except support.TestFailed as msg: - if display_failure: - print("test", test, "failed --", msg, file=sys.stderr, flush=True) - else: - print("test", test, "failed", file=sys.stderr, flush=True) + if not pgo: + if display_failure: + print("test", test, "failed --", msg, file=sys.stderr, + flush=True) + else: + print("test", test, "failed", file=sys.stderr, flush=True) return FAILED, test_time except: msg = traceback.format_exc() - print("test", test, "crashed --", msg, file=sys.stderr, flush=True) + if not pgo: + print("test", test, "crashed --", msg, file=sys.stderr, + flush=True) return FAILED, test_time else: if refleak: diff --git a/Lib/test/libregrtest/runtest_mp.py b/Lib/test/libregrtest/runtest_mp.py --- a/Lib/test/libregrtest/runtest_mp.py +++ b/Lib/test/libregrtest/runtest_mp.py @@ -42,6 +42,8 @@ '-X', 'faulthandler', '-m', 'test.regrtest', '--slaveargs', slaveargs] + if ns.pgo: + cmd += ['--pgo'] # Running the child from the same working directory as regrtest's original # invocation ensures that TEMPDIR for the child is the same when @@ -175,7 +177,7 @@ item = output.get(timeout=timeout) except queue.Empty: running = get_running(workers) - if running: + if running and not regrtest.ns.pgo: print('running: %s' % ', '.join(running)) continue @@ -189,17 +191,18 @@ text = test ok, test_time = result if (ok not in (CHILD_ERROR, INTERRUPTED) - and test_time >= PROGRESS_MIN_TIME): + and test_time >= PROGRESS_MIN_TIME + and not regrtest.ns.pgo): text += ' (%.0f sec)' % test_time running = get_running(workers) - if running: + if running and not regrtest.ns.pgo: text += ' -- running: %s' % ', '.join(running) regrtest.display_progress(test_index, text) # Copy stdout and stderr from the child process if stdout: print(stdout, flush=True) - if stderr: + if stderr and not regrtest.ns.pgo: print(stderr, file=sys.stderr, flush=True) if result[0] == INTERRUPTED: diff --git a/Lib/test/libregrtest/save_env.py b/Lib/test/libregrtest/save_env.py --- a/Lib/test/libregrtest/save_env.py +++ b/Lib/test/libregrtest/save_env.py @@ -41,10 +41,11 @@ changed = False - def __init__(self, testname, verbose=0, quiet=False): + def __init__(self, testname, verbose=0, quiet=False, *, pgo=False): self.testname = testname self.verbose = verbose self.quiet = quiet + self.pgo = pgo # To add things to save and restore, add a name XXX to the resources list # and add corresponding get_XXX/restore_XXX functions. get_XXX should @@ -273,7 +274,7 @@ if current != original: self.changed = True restore(original) - if not self.quiet: + if not self.quiet and not self.pgo: print("Warning -- {} was modified by {}".format( name, self.testname), file=sys.stderr) diff --git a/Makefile.pre.in b/Makefile.pre.in --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -231,7 +231,7 @@ TCLTK_LIBS= @TCLTK_LIBS@ # The task to run while instrument when building the profile-opt target -PROFILE_TASK=-m test.regrtest >/dev/null 2>&1 +PROFILE_TASK=-m test.regrtest --pgo # report files for gcov / lcov coverage report COVERAGE_INFO= $(abs_builddir)/coverage.info diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -170,6 +170,10 @@ Tests ----- +- Issue #25188: Add -P/--pgo to test.regrtest to suppress error output when + running the test suite for the purposes of a PGO build. Initial patch by + Alecsandru Patrascu. + - Issue #22806: Add ``python -m test --list-tests`` command to list tests. - Issue #18174: ``python -m test --huntrleaks ...`` now also checks for leak of -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 01:22:42 2015 From: python-checkins at python.org (brett.cannon) Date: Fri, 02 Oct 2015 23:22:42 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI1MTg4?= =?utf-8?q?=3A_Add_-P/--pgo_to_test=2Eregrtest_for_PGO_building=2E?= Message-ID: <20151002232241.31199.2016@psf.io> https://hg.python.org/cpython/rev/136ad559fa4f changeset: 98496:136ad559fa4f branch: 2.7 parent: 98485:8bbc51f97078 user: Brett Cannon date: Fri Oct 02 16:21:34 2015 -0700 summary: Issue #25188: Add -P/--pgo to test.regrtest for PGO building. Initial patch by Alecsandru Patrascu of Intel. files: Lib/test/regrtest.py | 83 +++++++++++++++++++------------ Makefile.pre.in | 2 +- 2 files changed, 51 insertions(+), 34 deletions(-) diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py --- a/Lib/test/regrtest.py +++ b/Lib/test/regrtest.py @@ -57,6 +57,7 @@ -t/--threshold THRESHOLD -- call gc.set_threshold(THRESHOLD) -F/--forever -- run the specified tests in a loop, until an error happens +-P/--pgo -- enable Profile Guided Optimization training Additional Option Details: @@ -240,7 +241,7 @@ findleaks=False, use_resources=None, trace=False, coverdir='coverage', runleaks=False, huntrleaks=False, verbose2=False, print_slow=False, random_seed=None, use_mp=None, verbose3=False, forever=False, - header=False): + header=False, pgo=False): """Execute a test suite. This also parses command-line options and modifies its behavior @@ -266,12 +267,12 @@ test_support.record_original_stdout(sys.stdout) try: - opts, args = getopt.getopt(sys.argv[1:], 'hvqxsSrf:lu:t:TD:NLR:FwWM:j:', + opts, args = getopt.getopt(sys.argv[1:], 'hvqxsSrf:lu:t:TD:NLR:FwWM:j:P', ['help', 'verbose', 'verbose2', 'verbose3', 'quiet', 'exclude', 'single', 'slow', 'randomize', 'fromfile=', 'findleaks', 'use=', 'threshold=', 'trace', 'coverdir=', 'nocoverdir', 'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=', - 'multiprocess=', 'slaveargs=', 'forever', 'header']) + 'multiprocess=', 'slaveargs=', 'forever', 'header', 'pgo']) except getopt.error, msg: usage(2, msg) @@ -366,6 +367,8 @@ print # Force a newline (just in case) print json.dumps(result) sys.exit(0) + elif o in ('-P', '--pgo'): + pgo = True else: print >>sys.stderr, ("No handler for option {}. Please " "report this as a bug at http://bugs.python.org.").format(o) @@ -431,13 +434,14 @@ # For a partial run, we do not need to clutter the output. if verbose or header or not (quiet or single or tests or args): - # Print basic platform information - print "==", platform.python_implementation(), \ - " ".join(sys.version.split()) - print "== ", platform.platform(aliased=True), \ - "%s-endian" % sys.byteorder - print "== ", os.getcwd() - print "Testing with flags:", sys.flags + if not pgo: + # Print basic platform information + print "==", platform.python_implementation(), \ + " ".join(sys.version.split()) + print "== ", platform.platform(aliased=True), \ + "%s-endian" % sys.byteorder + print "== ", os.getcwd() + print "Testing with flags:", sys.flags alltests = findtests(testdir, stdtests, nottests) selected = tests or args or alltests @@ -510,6 +514,9 @@ pending = tests_and_args() opt_args = test_support.args_from_interpreter_flags() base_cmd = [sys.executable] + opt_args + ['-m', 'test.regrtest'] + # required to spawn a new process with PGO flag on/off + if pgo: + base_cmd = base_cmd + ['--pgo'] def work(): # A worker thread. try: @@ -519,6 +526,9 @@ except StopIteration: output.put((None, None, None, None)) return + # required to permit running tests with PGO flag on/off + if pgo: + args_tuple[1]['pgo']=pgo # -E is needed by some tests, e.g. test_import popen = Popen(base_cmd + ['--slaveargs', json.dumps(args_tuple)], stdout=PIPE, stderr=PIPE, @@ -550,7 +560,7 @@ continue if stdout: print stdout - if stderr: + if stderr and not pgo: print >>sys.stderr, stderr sys.stdout.flush() sys.stderr.flush() @@ -570,7 +580,7 @@ for worker in workers: worker.join() else: - for test_index, test in enumerate(tests, 1): + for test_index, test in enumerate(tests, 1): if not quiet: fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}" print(fmt.format( @@ -583,11 +593,12 @@ globals=globals(), locals=vars()) else: try: - result = runtest(test, verbose, quiet, huntrleaks) + result = runtest(test, verbose, quiet, huntrleaks, None, pgo) accumulate_result(test, result) if verbose3 and result[0] == FAILED: - print "Re-running test %r in verbose mode" % test - runtest(test, True, quiet, huntrleaks) + if not pgo: + print "Re-running test %r in verbose mode" % test + runtest(test, True, quiet, huntrleaks, None, pgo) except KeyboardInterrupt: interrupted = True break @@ -607,14 +618,14 @@ if module not in save_modules and module.startswith("test."): test_support.unload(module) - if interrupted: + if interrupted and not pgo: # print a newline after ^C print print "Test suite interrupted by signal SIGINT." omitted = set(selected) - set(good) - set(bad) - set(skipped) print count(len(omitted), "test"), "omitted:" printlist(omitted) - if good and not quiet: + if good and not quiet and not pgo: if not bad and not skipped and not interrupted and len(good) > 1: print "All", print count(len(good), "test"), "OK." @@ -623,14 +634,14 @@ print "10 slowest tests:" for time, test in test_times[:10]: print "%s: %.1fs" % (test, time) - if bad: + if bad and not pgo: print count(len(bad), "test"), "failed:" printlist(bad) - if environment_changed: + if environment_changed and not pgo: print "{} altered the execution environment:".format( count(len(environment_changed), "test")) printlist(environment_changed) - if skipped and not quiet: + if skipped and not quiet and not pgo: print count(len(skipped), "test"), "skipped:" printlist(skipped) @@ -655,7 +666,7 @@ sys.stdout.flush() try: test_support.verbose = True - ok = runtest(test, True, quiet, huntrleaks) + ok = runtest(test, True, quiet, huntrleaks, None, pgo) except KeyboardInterrupt: # print a newline separate from the ^C print @@ -716,7 +727,7 @@ return stdtests + sorted(tests) def runtest(test, verbose, quiet, - huntrleaks=False, use_resources=None): + huntrleaks=False, use_resources=None, pgo=False): """Run a single test. test -- the name of the test @@ -725,6 +736,9 @@ test_times -- a list of (time, test_name) pairs huntrleaks -- run multiple times to test for leaks; requires a debug build; a triple corresponding to -R's three arguments + pgo -- if true, do not print unnecessary info when running the test + for Profile Guided Optimization build + Returns one of the test result constants: INTERRUPTED KeyboardInterrupt when run under -j RESOURCE_DENIED test skipped because resource denied @@ -738,7 +752,7 @@ if use_resources is not None: test_support.use_resources = use_resources try: - return runtest_inner(test, verbose, quiet, huntrleaks) + return runtest_inner(test, verbose, quiet, huntrleaks, pgo) finally: cleanup_test_droppings(test, verbose) @@ -767,10 +781,11 @@ changed = False - def __init__(self, testname, verbose=0, quiet=False): + def __init__(self, testname, verbose=0, quiet=False, pgo=False): self.testname = testname self.verbose = verbose self.quiet = quiet + self.pgo = pgo # To add things to save and restore, add a name XXX to the resources list # and add corresponding get_XXX/restore_XXX functions. get_XXX should @@ -884,11 +899,11 @@ if current != original: self.changed = True restore(original) - if not self.quiet: + if not self.quiet and not self.pgo: print >>sys.stderr, ( "Warning -- {} was modified by {}".format( name, self.testname)) - if self.verbose > 1: + if self.verbose > 1 and not self.pgo: print >>sys.stderr, ( " Before: {}\n After: {} ".format( original, current)) @@ -899,7 +914,7 @@ return False -def runtest_inner(test, verbose, quiet, huntrleaks=False): +def runtest_inner(test, verbose, quiet, huntrleaks=False, pgo=False): test_support.unload(test) if verbose: capture_stdout = None @@ -918,7 +933,7 @@ else: # Always import it from the test package abstest = 'test.' + test - with saved_test_environment(test, verbose, quiet) as environment: + with saved_test_environment(test, verbose, quiet, pgo) as environment: start_time = time.time() the_package = __import__(abstest, globals(), locals(), []) the_module = getattr(the_package, test) @@ -935,26 +950,28 @@ finally: sys.stdout = save_stdout except test_support.ResourceDenied, msg: - if not quiet: + if not quiet and not pgo: print test, "skipped --", msg sys.stdout.flush() return RESOURCE_DENIED, test_time except unittest.SkipTest, msg: - if not quiet: + if not quiet and not pgo: print test, "skipped --", msg sys.stdout.flush() return SKIPPED, test_time except KeyboardInterrupt: raise except test_support.TestFailed, msg: - print >>sys.stderr, "test", test, "failed --", msg + if not pgo: + print >>sys.stderr, "test", test, "failed --", msg sys.stderr.flush() return FAILED, test_time except: type, value = sys.exc_info()[:2] - print >>sys.stderr, "test", test, "crashed --", str(type) + ":", value + if not pgo: + print >>sys.stderr, "test", test, "crashed --", str(type) + ":", value sys.stderr.flush() - if verbose: + if verbose and not pgo: traceback.print_exc(file=sys.stderr) sys.stderr.flush() return FAILED, test_time diff --git a/Makefile.pre.in b/Makefile.pre.in --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -209,7 +209,7 @@ TCLTK_LIBS= @TCLTK_LIBS@ # The task to run while instrument when building the profile-opt target -PROFILE_TASK=-m test.regrtest >/dev/null 2>&1 +PROFILE_TASK=-m test.regrtest --pgo # === Definitions added by makesetup === -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 01:22:43 2015 From: python-checkins at python.org (brett.cannon) Date: Fri, 02 Oct 2015 23:22:43 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Fix_indentatio?= =?utf-8?q?n?= Message-ID: <20151002232241.55474.47324@psf.io> https://hg.python.org/cpython/rev/30c143a705dd changeset: 98497:30c143a705dd branch: 2.7 user: Brett Cannon date: Fri Oct 02 16:22:32 2015 -0700 summary: Fix indentation files: Lib/test/regrtest.py | 8 ++++---- 1 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py --- a/Lib/test/regrtest.py +++ b/Lib/test/regrtest.py @@ -368,7 +368,7 @@ print json.dumps(result) sys.exit(0) elif o in ('-P', '--pgo'): - pgo = True + pgo = True else: print >>sys.stderr, ("No handler for option {}. Please " "report this as a bug at http://bugs.python.org.").format(o) @@ -580,7 +580,7 @@ for worker in workers: worker.join() else: - for test_index, test in enumerate(tests, 1): + for test_index, test in enumerate(tests, 1): if not quiet: fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}" print(fmt.format( @@ -736,9 +736,9 @@ test_times -- a list of (time, test_name) pairs huntrleaks -- run multiple times to test for leaks; requires a debug build; a triple corresponding to -R's three arguments - pgo -- if true, do not print unnecessary info when running the test + pgo -- if true, do not print unnecessary info when running the test for Profile Guided Optimization build - + Returns one of the test result constants: INTERRUPTED KeyboardInterrupt when run under -j RESOURCE_DENIED test skipped because resource denied -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 01:59:53 2015 From: python-checkins at python.org (victor.stinner) Date: Fri, 02 Oct 2015 23:59:53 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Fix_=5FPyUnicodeWriter=5FP?= =?utf-8?q?repareKind=28=29?= Message-ID: <20151002235952.7246.8479@psf.io> https://hg.python.org/cpython/rev/3f6566a49c13 changeset: 98498:3f6566a49c13 parent: 98495:c1ecb258003b user: Victor Stinner date: Sat Oct 03 01:55:51 2015 +0200 summary: Fix _PyUnicodeWriter_PrepareKind() Initialize kind to 0 (PyUnicode_WCHAR_KIND) to ensure that _PyUnicodeWriter_PrepareKind() handles correctly read-only buffer: copy the buffer. files: Objects/unicodeobject.c | 25 ++++++++++++++++++------- 1 files changed, 18 insertions(+), 7 deletions(-) diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -13294,27 +13294,38 @@ Py_LOCAL_INLINE(void) _PyUnicodeWriter_Update(_PyUnicodeWriter *writer) { - if (!writer->readonly) + writer->maxchar = PyUnicode_MAX_CHAR_VALUE(writer->buffer); + writer->data = PyUnicode_DATA(writer->buffer); + + if (!writer->readonly) { + writer->kind = PyUnicode_KIND(writer->buffer); writer->size = PyUnicode_GET_LENGTH(writer->buffer); + } else { + /* use a value smaller than PyUnicode_1BYTE_KIND() so + _PyUnicodeWriter_PrepareKind() will copy the buffer. */ + writer->kind = PyUnicode_WCHAR_KIND; + assert(writer->kind <= PyUnicode_1BYTE_KIND); + /* Copy-on-write mode: set buffer size to 0 so * _PyUnicodeWriter_Prepare() will copy (and enlarge) the buffer on * next write. */ writer->size = 0; } - writer->maxchar = PyUnicode_MAX_CHAR_VALUE(writer->buffer); - writer->data = PyUnicode_DATA(writer->buffer); - writer->kind = PyUnicode_KIND(writer->buffer); } void _PyUnicodeWriter_Init(_PyUnicodeWriter *writer) { memset(writer, 0, sizeof(*writer)); -#ifdef Py_DEBUG - writer->kind = 5; /* invalid kind */ -#endif + + /* ASCII is the bare minimum */ writer->min_char = 127; + + /* use a value smaller than PyUnicode_1BYTE_KIND() so + _PyUnicodeWriter_PrepareKind() will copy the buffer. */ + writer->kind = PyUnicode_WCHAR_KIND; + assert(writer->kind <= PyUnicode_1BYTE_KIND); } int -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 02:22:17 2015 From: python-checkins at python.org (victor.stinner) Date: Sat, 03 Oct 2015 00:22:17 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2318174=3A_Fix_test?= =?utf-8?q?=5Fregrtest_when_Python_is_compiled_in_release_mode?= Message-ID: <20151003002216.487.34486@psf.io> https://hg.python.org/cpython/rev/ec2ef7525fa5 changeset: 98499:ec2ef7525fa5 user: Victor Stinner date: Sat Oct 03 02:21:35 2015 +0200 summary: Issue #18174: Fix test_regrtest when Python is compiled in release mode files: Lib/test/test_regrtest.py | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -642,6 +642,7 @@ output = self.run_tests('--forever', test, exitcode=1) self.check_executed_tests(output, [test]*3, failed=test) + @unittest.skipUnless(Py_DEBUG, 'need a debug build') def test_huntrleaks_fd_leak(self): # test --huntrleaks for file descriptor leak code = textwrap.dedent(""" -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 04:13:21 2015 From: python-checkins at python.org (terry.reedy) Date: Sat, 03 Oct 2015 02:13:21 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_with_3=2E5?= Message-ID: <20151003021321.128838.36169@psf.io> https://hg.python.org/cpython/rev/fd904e1e4f29 changeset: 98503:fd904e1e4f29 parent: 98499:ec2ef7525fa5 parent: 98502:ff025cf824d0 user: Terry Jan Reedy date: Fri Oct 02 22:12:57 2015 -0400 summary: Merge with 3.5 files: Lib/idlelib/configDialog.py | 30 +++++++++++++----------- 1 files changed, 16 insertions(+), 14 deletions(-) diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -43,19 +43,20 @@ #The first value of the tuple is the sample area tag name. #The second value is the display name list sort index. self.themeElements={ - 'Normal Text':('normal', '00'), - 'Python Keywords':('keyword', '01'), - 'Python Definitions':('definition', '02'), - 'Python Builtins':('builtin', '03'), - 'Python Comments':('comment', '04'), - 'Python Strings':('string', '05'), - 'Selected Text':('hilite', '06'), - 'Found Text':('hit', '07'), - 'Cursor':('cursor', '08'), - 'Error Text':('error', '09'), - 'Shell Normal Text':('console', '10'), - 'Shell Stdout Text':('stdout', '11'), - 'Shell Stderr Text':('stderr', '12'), + 'Normal Text': ('normal', '00'), + 'Python Keywords': ('keyword', '01'), + 'Python Definitions': ('definition', '02'), + 'Python Builtins': ('builtin', '03'), + 'Python Comments': ('comment', '04'), + 'Python Strings': ('string', '05'), + 'Selected Text': ('hilite', '06'), + 'Found Text': ('hit', '07'), + 'Cursor': ('cursor', '08'), + 'Editor Breakpoint': ('break', '09'), + 'Shell Normal Text': ('console', '10'), + 'Shell Error Text': ('error', '11'), + 'Shell Stdout Text': ('stdout', '12'), + 'Shell Stderr Text': ('stderr', '13'), } self.ResetChangedItems() #load initial values in changed items dict self.CreateWidgets() @@ -219,7 +220,8 @@ ("'selected'", 'hilite'), ('\n var2 = ', 'normal'), ("'found'", 'hit'), ('\n var3 = ', 'normal'), ('list', 'builtin'), ('(', 'normal'), - ('None', 'keyword'), (')\n\n', 'normal'), + ('None', 'keyword'), (')\n', 'normal'), + (' breakpoint("line")', 'break'), ('\n\n', 'normal'), (' error ', 'error'), (' ', 'normal'), ('cursor |', 'cursor'), ('\n ', 'normal'), ('shell', 'console'), (' ', 'normal'), -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 04:13:21 2015 From: python-checkins at python.org (terry.reedy) Date: Sat, 03 Oct 2015 02:13:21 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI0ODIw?= =?utf-8?q?=3A_Users_can_now_set_breakpoint_colors_in_Settings_-=3E?= Message-ID: <20151003021321.479.42994@psf.io> https://hg.python.org/cpython/rev/e67da755d614 changeset: 98500:e67da755d614 branch: 2.7 parent: 98497:30c143a705dd user: Terry Jan Reedy date: Fri Oct 02 22:12:09 2015 -0400 summary: Issue #24820: Users can now set breakpoint colors in Settings -> Custom Highlighting. Original patch by Mark Roseman. files: Lib/idlelib/configDialog.py | 30 +++++++++++++----------- 1 files changed, 16 insertions(+), 14 deletions(-) diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -41,19 +41,20 @@ #The first value of the tuple is the sample area tag name. #The second value is the display name list sort index. self.themeElements={ - 'Normal Text':('normal', '00'), - 'Python Keywords':('keyword', '01'), - 'Python Definitions':('definition', '02'), - 'Python Builtins':('builtin', '03'), - 'Python Comments':('comment', '04'), - 'Python Strings':('string', '05'), - 'Selected Text':('hilite', '06'), - 'Found Text':('hit', '07'), - 'Cursor':('cursor', '08'), - 'Error Text':('error', '09'), - 'Shell Normal Text':('console', '10'), - 'Shell Stdout Text':('stdout', '11'), - 'Shell Stderr Text':('stderr', '12'), + 'Normal Text': ('normal', '00'), + 'Python Keywords': ('keyword', '01'), + 'Python Definitions': ('definition', '02'), + 'Python Builtins': ('builtin', '03'), + 'Python Comments': ('comment', '04'), + 'Python Strings': ('string', '05'), + 'Selected Text': ('hilite', '06'), + 'Found Text': ('hit', '07'), + 'Cursor': ('cursor', '08'), + 'Editor Breakpoint': ('break', '09'), + 'Shell Normal Text': ('console', '10'), + 'Shell Error Text': ('error', '11'), + 'Shell Stdout Text': ('stdout', '12'), + 'Shell Stderr Text': ('stderr', '13'), } self.ResetChangedItems() #load initial values in changed items dict self.CreateWidgets() @@ -217,7 +218,8 @@ ("'selected'", 'hilite'), ('\n var2 = ', 'normal'), ("'found'", 'hit'), ('\n var3 = ', 'normal'), ('list', 'builtin'), ('(', 'normal'), - ('None', 'builtin'), (')\n\n', 'normal'), + ('None', 'builtin'), (')\n', 'normal'), + (' breakpoint("line")', 'break'), ('\n\n', 'normal'), (' error ', 'error'), (' ', 'normal'), ('cursor |', 'cursor'), ('\n ', 'normal'), ('shell', 'console'), (' ', 'normal'), -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 04:13:21 2015 From: python-checkins at python.org (terry.reedy) Date: Sat, 03 Oct 2015 02:13:21 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI0ODIw?= =?utf-8?q?=3A_Users_can_now_set_breakpoint_colors_in_Settings_-=3E?= Message-ID: <20151003021321.477.93039@psf.io> https://hg.python.org/cpython/rev/d874a6157223 changeset: 98501:d874a6157223 branch: 3.4 parent: 98486:b40a61e79893 user: Terry Jan Reedy date: Fri Oct 02 22:12:17 2015 -0400 summary: Issue #24820: Users can now set breakpoint colors in Settings -> Custom Highlighting. Original patch by Mark Roseman. files: Lib/idlelib/configDialog.py | 30 +++++++++++++----------- 1 files changed, 16 insertions(+), 14 deletions(-) diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -43,19 +43,20 @@ #The first value of the tuple is the sample area tag name. #The second value is the display name list sort index. self.themeElements={ - 'Normal Text':('normal', '00'), - 'Python Keywords':('keyword', '01'), - 'Python Definitions':('definition', '02'), - 'Python Builtins':('builtin', '03'), - 'Python Comments':('comment', '04'), - 'Python Strings':('string', '05'), - 'Selected Text':('hilite', '06'), - 'Found Text':('hit', '07'), - 'Cursor':('cursor', '08'), - 'Error Text':('error', '09'), - 'Shell Normal Text':('console', '10'), - 'Shell Stdout Text':('stdout', '11'), - 'Shell Stderr Text':('stderr', '12'), + 'Normal Text': ('normal', '00'), + 'Python Keywords': ('keyword', '01'), + 'Python Definitions': ('definition', '02'), + 'Python Builtins': ('builtin', '03'), + 'Python Comments': ('comment', '04'), + 'Python Strings': ('string', '05'), + 'Selected Text': ('hilite', '06'), + 'Found Text': ('hit', '07'), + 'Cursor': ('cursor', '08'), + 'Editor Breakpoint': ('break', '09'), + 'Shell Normal Text': ('console', '10'), + 'Shell Error Text': ('error', '11'), + 'Shell Stdout Text': ('stdout', '12'), + 'Shell Stderr Text': ('stderr', '13'), } self.ResetChangedItems() #load initial values in changed items dict self.CreateWidgets() @@ -219,7 +220,8 @@ ("'selected'", 'hilite'), ('\n var2 = ', 'normal'), ("'found'", 'hit'), ('\n var3 = ', 'normal'), ('list', 'builtin'), ('(', 'normal'), - ('None', 'keyword'), (')\n\n', 'normal'), + ('None', 'keyword'), (')\n', 'normal'), + (' breakpoint("line")', 'break'), ('\n\n', 'normal'), (' error ', 'error'), (' ', 'normal'), ('cursor |', 'cursor'), ('\n ', 'normal'), ('shell', 'console'), (' ', 'normal'), -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 04:13:22 2015 From: python-checkins at python.org (terry.reedy) Date: Sat, 03 Oct 2015 02:13:22 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Merge_with_3=2E4?= Message-ID: <20151003021321.7248.21269@psf.io> https://hg.python.org/cpython/rev/ff025cf824d0 changeset: 98502:ff025cf824d0 branch: 3.5 parent: 98494:fb90425017e3 parent: 98501:d874a6157223 user: Terry Jan Reedy date: Fri Oct 02 22:12:39 2015 -0400 summary: Merge with 3.4 files: Lib/idlelib/configDialog.py | 30 +++++++++++++----------- 1 files changed, 16 insertions(+), 14 deletions(-) diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -43,19 +43,20 @@ #The first value of the tuple is the sample area tag name. #The second value is the display name list sort index. self.themeElements={ - 'Normal Text':('normal', '00'), - 'Python Keywords':('keyword', '01'), - 'Python Definitions':('definition', '02'), - 'Python Builtins':('builtin', '03'), - 'Python Comments':('comment', '04'), - 'Python Strings':('string', '05'), - 'Selected Text':('hilite', '06'), - 'Found Text':('hit', '07'), - 'Cursor':('cursor', '08'), - 'Error Text':('error', '09'), - 'Shell Normal Text':('console', '10'), - 'Shell Stdout Text':('stdout', '11'), - 'Shell Stderr Text':('stderr', '12'), + 'Normal Text': ('normal', '00'), + 'Python Keywords': ('keyword', '01'), + 'Python Definitions': ('definition', '02'), + 'Python Builtins': ('builtin', '03'), + 'Python Comments': ('comment', '04'), + 'Python Strings': ('string', '05'), + 'Selected Text': ('hilite', '06'), + 'Found Text': ('hit', '07'), + 'Cursor': ('cursor', '08'), + 'Editor Breakpoint': ('break', '09'), + 'Shell Normal Text': ('console', '10'), + 'Shell Error Text': ('error', '11'), + 'Shell Stdout Text': ('stdout', '12'), + 'Shell Stderr Text': ('stderr', '13'), } self.ResetChangedItems() #load initial values in changed items dict self.CreateWidgets() @@ -219,7 +220,8 @@ ("'selected'", 'hilite'), ('\n var2 = ', 'normal'), ("'found'", 'hit'), ('\n var3 = ', 'normal'), ('list', 'builtin'), ('(', 'normal'), - ('None', 'keyword'), (')\n\n', 'normal'), + ('None', 'keyword'), (')\n', 'normal'), + (' breakpoint("line")', 'break'), ('\n\n', 'normal'), (' error ', 'error'), (' ', 'normal'), ('cursor |', 'cursor'), ('\n ', 'normal'), ('shell', 'console'), (' ', 'normal'), -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 05:26:20 2015 From: python-checkins at python.org (terry.reedy) Date: Sat, 03 Oct 2015 03:26:20 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1MjI0?= =?utf-8?q?=3A_README=2Etxt_is_now_an_idlelib_index_for_IDLE_developers_an?= =?utf-8?q?d?= Message-ID: <20151003032620.7262.61790@psf.io> https://hg.python.org/cpython/rev/bb1a8d3dd4a1 changeset: 98505:bb1a8d3dd4a1 branch: 3.4 parent: 98501:d874a6157223 user: Terry Jan Reedy date: Fri Oct 02 23:22:59 2015 -0400 summary: Issue #25224: README.txt is now an idlelib index for IDLE developers and curious users. The previous user content is now in the IDLE doc and is redundant. IDLE now means 'Integrated Development and Learning Environment'. files: Doc/library/idle.rst | 2 +- Lib/idlelib/README.txt | 249 ++++++++++++++++++++++++---- Lib/idlelib/help.html | 4 +- 3 files changed, 212 insertions(+), 43 deletions(-) diff --git a/Doc/library/idle.rst b/Doc/library/idle.rst --- a/Doc/library/idle.rst +++ b/Doc/library/idle.rst @@ -10,7 +10,7 @@ .. moduleauthor:: Guido van Rossum -IDLE is the Python IDE built with the :mod:`tkinter` GUI toolkit. +IDLE is Python's Integrated Development and Learning Environment. IDLE has the following features: diff --git a/Lib/idlelib/README.txt b/Lib/idlelib/README.txt --- a/Lib/idlelib/README.txt +++ b/Lib/idlelib/README.txt @@ -1,60 +1,229 @@ -IDLE is Python's Tkinter-based Integrated DeveLopment Environment. +README.txt: an index to idlelib files and the IDLE menu. -IDLE emphasizes a lightweight, clean design with a simple user interface. -Although it is suitable for beginners, even advanced users will find that -IDLE has everything they really need to develop pure Python code. +IDLE is Python?s Integrated Development and Learning +Environment. The user documentation is part of the Library Reference and +is available in IDLE by selecting Help => IDLE Help. This README documents +idlelib for IDLE developers and curious users. -IDLE features a multi-window text editor with multiple undo, Python colorizing, -and many other capabilities, e.g. smart indent, call tips, and autocompletion. +IDLELIB FILES lists files alphabetically by category, +with a short description of each. -The editor has comprehensive search functions, including searching through -multiple files. Class browsers and path browsers provide fast access to -code objects from a top level viewpoint without dealing with code folding. +IDLE MENU show the menu tree, annotated with the module +or module object that implements the corresponding function. -There is a Python Shell window which features colorizing and command recall. +This file is descriptive, not prescriptive, and may have errors +and omissions and lag behind changes in idlelib. -IDLE executes Python code in a separate process, which is restarted for each -Run (F5) initiated from an editor window. The environment can also be -restarted from the Shell window without restarting IDLE. -This enhancement has often been requested, and is now finally available. The -magic "reload/import *" incantations are no longer required when editing and -testing a module two or three steps down the import chain. +IDLELIB FILES +Implemetation files not in IDLE MENU are marked (nim). +Deprecated files and objects are listed separately as the end. -(Personal firewall software may warn about the connection IDLE makes to its -subprocess using this computer's internal loopback interface. This connection -is not visible on any external interface and no data is sent to or received -from the Internet.) +Startup +------- +__init__.py # import, does nothing +__main__.py # -m, starts IDLE +idle.bat +idle.py +idle.pyw -It is possible to interrupt tightly looping user code, even on Windows. +Implementation +-------------- +AutoComplete.py # Complete attribute names or filenames. +AutoCompleteWindow.py # Display completions. +AutoExpand.py # Expand word with previous word in file. +Bindings.py # Define most of IDLE menu. +CallTipWindow.py # Display calltip. +CallTips.py # Create calltip text. +ClassBrowser.py # Create module browser window. +CodeContext.py # Show compound statement headers otherwise not visible. +ColorDelegator.py # Colorize text (nim). +Debugger.py # Debug code run from editor; show window. +Delegator.py # Define base class for delegators (nim). +EditorWindow.py # Define most of editor and utility functions. +FileList.py # Open files and manage list of open windows (nim). +FormatParagraph.py# Re-wrap multiline strings and comments. +GrepDialog.py # Find all occurrences of pattern in multiple files. +HyperParser.py # Parse code around a given index. +IOBinding.py # Open, read, and write files +IdleHistory.py # Get previous or next user input in shell (nim) +MultiCall.py # Wrap tk widget to allow multiple calls per event (nim). +MultiStatusBar.py # Define status bar for windows (nim). +ObjectBrowser.py # Define class used in StackViewer (nim). +OutputWindow.py # Create window for grep output. +ParenMatch.py # Match fenceposts: (), [], and {}. +PathBrowser.py # Create path browser window. +Percolator.py # Manage delegator stack (nim). +PyParse.py # Give information on code indentation +PyShell.py # Start IDLE, manage shell, complete editor window +RemoteDebugger.py # Debug code run in remote process. +RemoteObjectBrowser.py # Communicate objects between processes with rpc (nim). +ReplaceDialog.py # Search and replace pattern in text. +RstripExtension.py# Strip trailing whitespace +ScriptBinding.py # Check and run user code. +ScrolledList.py # Define ScrolledList widget for IDLE (nim). +SearchDialog.py # Search for pattern in text. +SearchDialogBase.py # Define base for search, replace, and grep dialogs. +SearchEngine.py # Define engine for all 3 search dialogs. +StackViewer.py # View stack after exception. +TreeWidget.py # Define tree widger, used in browsers (nim). +UndoDelegator.py # Manage undo stack. +WidgetRedirector.py # Intercept widget subcommands (for percolator) (nim). +WindowList.py # Manage window list and define listed top level. +ZoomHeight.py # Zoom window to full height of screen. +aboutDialog.py # Display About IDLE dialog. +configDialog.py # Display user configuration dialogs. +configHandler.py # Load, fetch, and save configuration (nim). +configHelpSourceEdit.py # Specify help source. +configSectionNameDialog.py # Spefify user config section name +dynOptionMenuWidget.py # define mutable OptionMenu widget (nim). +help.py # Display IDLE's html doc. +keybindingDialog.py # Change keybindings. +macosxSupport.py # Help IDLE run on Macs (nim). +rpc.py # Commuicate between idle and user processes (nim). +run.py # Manage user code execution subprocess. +tabbedpages.py # Define tabbed pages widget (nim). +textView.py # Define read-only text widget (nim). -Applications which cannot support subprocesses and/or sockets can still run -IDLE in a single process. +Configuration +------------- +config-extensions.def # Defaults for extensions +config-highlight.def # Defaults for colorizing +config-keys.def # Defaults for key bindings +config-main.def # Defai;ts fpr font and geneal -IDLE has an integrated debugger with stepping, persistent breakpoints, and call -stack visibility. +Text +---- +CREDITS.txt # not maintained, displayed by About IDLE +HISTORY.txt # NEWS up to July 2001 +NEWS.txt # commits, displayed by About IDLE +README.txt # this file, displeyed by About IDLE +TODO.txt # needs review +extend.txt # about writing extensions +help.html # copy of idle.html in docs, displayed by IDLE Help -There is a GUI configuration manager which makes it easy to select fonts, -colors, keybindings, and startup options. This facility includes a feature -which allows the user to specify additional help sources, either locally or on -the web. +Subdirectories +-------------- +Icons # small image files +idle_test # files for human test and automated unit tests -IDLE is coded in 100% pure Python, using the Tkinter GUI toolkit (Tk/Tcl) -and is cross-platform, working on Unix, Mac, and Windows. +Unused and Deprecated files and objects (nim) +--------------------------------------------- +EditorWindow.py: Helpdialog and helpDialog +ToolTip.py: unused. +help.txt +idlever.py -IDLE accepts command line arguments. Try idle -h to see the options. +IDLE MENUS +Top level items and most submenu items are defined in Bindings. +Extenstions add submenu items when active. The names given are +found, quoted, in one of these modules, paired with a '<>'. +Each pseudoevent is bound to an event handler. Some event handlers +call another function that does the actual work. The annotations below +are intended to at least give the module where the actual work is done. -If you find bugs or have suggestions or patches, let us know about -them by using the Python issue tracker: +File # IOBindig except as noted + New File + Open... # IOBinding.open + Open Module + Recent Files + Class Browser # Class Browser + Path Browser # Path Browser + --- + Save # IDBinding.save + Save As... # IOBinding.save_as + Save Copy As... # IOBindling.save_a_copy + --- + Print Window # IOBinding.print_window + --- + Close + Exit -http://bugs.python.org +Edit + Undo # undoDelegator + Redo # undoDelegator + --- + Cut + Copy + Paste + Select All + --- # Next 5 items use SearchEngine; dialogs use SearchDialogBase + Find # Search Dialog + Find Again + Find Selection + Find in Files... # GrepDialog + Replace... # ReplaceDialog + Go to Line + Show Completions # AutoComplete extension and AutoCompleteWidow (&HP) + Expand Word # AutoExpand extension + Show call tip # Calltips extension and CalltipWindow (& Hyperparser) + Show surrounding parens # ParenMatch (& Hyperparser) -For further details and links, read the Help files and check the IDLE home -page at +Shell # PyShell + View Last Restart # PyShell.? + Restart Shell # PyShell.? -http://www.python.org/idle/ +Debug (Shell only) + Go to File/Line + Debugger # Debugger, RemoteDebugger + Stack Viewer # StackViewer + Auto-open Stack Viewer # StackViewer -There is a mail list for IDLE: idle-dev at python.org. You can join at +Format (Editor only) + Indent Region + Dedent Region + Comment Out Region + Uncomment Region + Tabify Region + Untabify Region + Toggle Tabs + New Indent Width + Format Paragraph # FormatParagraph extension + --- + Strip tailing whitespace # RstripExtension extension -http://mail.python.org/mailman/listinfo/idle-dev +Run (Editor only) + Python Shell # PyShell + --- + Check Module # ScriptBinding + Run Module # ScriptBinding + +Options + Configure IDLE # configDialog + (tabs in the dialog) + Font tab # onfig-main.def + Highlight tab # configSectionNameDialog, config-highlight.def + Keys tab # keybindingDialog, configSectionNameDialog, onfig-keus.def + General tab # configHelpSourceEdit, config-main.def + Configure Extensions # configDialog + Xyz tab # xyz.py, config-extensions.def + --- + Code Context (editor only) # CodeContext extension + +Window + Zoomheight # ZoomHeight extension + --- + # WindowList + +Help + About IDLE # aboutDialog + --- + IDLE Help # help + Python Doc + Turtle Demo + --- + + + (right click) +Defined in EditorWindow, PyShell, Output + Cut + Copy + Paste + --- + Go to file/line (shell and output only) + Set Breakpoint (editor only) + Clear Breakpoint (editor only) + Defined in Debugger + Go to source line + Show stack frame diff --git a/Lib/idlelib/help.html b/Lib/idlelib/help.html --- a/Lib/idlelib/help.html +++ b/Lib/idlelib/help.html @@ -75,7 +75,7 @@

25.5. IDLE??

-

IDLE is the Python IDE built with the tkinter GUI toolkit.

+

IDLE is Python’s Integrated Development and Learning Environment.

IDLE has the following features:

  • coded in 100% pure Python, using the tkinter GUI toolkit
  • @@ -699,7 +699,7 @@ The Python Software Foundation is a non-profit corporation. Please donate.
    - Last updated on Sep 29, 2015. + Last updated on Oct 02, 2015. Found a bug?
    Created using Sphinx 1.2.3. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 05:26:20 2015 From: python-checkins at python.org (terry.reedy) Date: Sat, 03 Oct 2015 03:26:20 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI1MjI0?= =?utf-8?q?=3A_README=2Etxt_is_now_an_idlelib_index_for_IDLE_developers_an?= =?utf-8?q?d?= Message-ID: <20151003032620.128840.59379@psf.io> https://hg.python.org/cpython/rev/4e62989e3688 changeset: 98504:4e62989e3688 branch: 2.7 parent: 98500:e67da755d614 user: Terry Jan Reedy date: Fri Oct 02 23:22:54 2015 -0400 summary: Issue #25224: README.txt is now an idlelib index for IDLE developers and curious users. The previous user content is now in the IDLE doc and is redundant. IDLE now means 'Integrated Development and Learning Environment'. files: Doc/library/idle.rst | 2 +- Lib/idlelib/README.txt | 250 ++++++++++++++++++++++++---- Lib/idlelib/help.html | 4 +- 3 files changed, 211 insertions(+), 45 deletions(-) diff --git a/Doc/library/idle.rst b/Doc/library/idle.rst --- a/Doc/library/idle.rst +++ b/Doc/library/idle.rst @@ -10,7 +10,7 @@ .. moduleauthor:: Guido van Rossum -IDLE is the Python IDE built with the :mod:`tkinter` GUI toolkit. +IDLE is Python's Integrated Development and Learning Environment. IDLE has the following features: diff --git a/Lib/idlelib/README.txt b/Lib/idlelib/README.txt --- a/Lib/idlelib/README.txt +++ b/Lib/idlelib/README.txt @@ -1,63 +1,229 @@ -IDLE is Python's Tkinter-based Integrated DeveLopment Environment. +README.txt: an index to idlelib files and the IDLE menu. -IDLE emphasizes a lightweight, clean design with a simple user interface. -Although it is suitable for beginners, even advanced users will find that -IDLE has everything they really need to develop pure Python code. +IDLE is Python?s Integrated Development and Learning +Environment. The user documentation is part of the Library Reference and +is available in IDLE by selecting Help => IDLE Help. This README documents +idlelib for IDLE developers and curious users. -IDLE features a multi-window text editor with multiple undo, Python colorizing, -and many other capabilities, e.g. smart indent, call tips, and autocompletion. +IDLELIB FILES lists files alphabetically by category, +with a short description of each. -The editor has comprehensive search functions, including searching through -multiple files. Class browsers and path browsers provide fast access to -code objects from a top level viewpoint without dealing with code folding. +IDLE MENU show the menu tree, annotated with the module +or module object that implements the corresponding function. -There is a Python Shell window which features colorizing and command recall. +This file is descriptive, not prescriptive, and may have errors +and omissions and lag behind changes in idlelib. -IDLE executes Python code in a separate process, which is restarted for each -Run (F5) initiated from an editor window. The environment can also be -restarted from the Shell window without restarting IDLE. -This enhancement has often been requested, and is now finally available. The -magic "reload/import *" incantations are no longer required when editing and -testing a module two or three steps down the import chain. +IDLELIB FILES +Implemetation files not in IDLE MENU are marked (nim). +Deprecated files and objects are listed separately as the end. -(Personal firewall software may warn about the connection IDLE makes to its -subprocess using this computer's internal loopback interface. This connection -is not visible on any external interface and no data is sent to or received -from the Internet.) +Startup +------- +__init__.py # import, does nothing +__main__.py # -m, starts IDLE +idle.bat +idle.py +idle.pyw -It is possible to interrupt tightly looping user code, even on Windows. +Implementation +-------------- +AutoComplete.py # Complete attribute names or filenames. +AutoCompleteWindow.py # Display completions. +AutoExpand.py # Expand word with previous word in file. +Bindings.py # Define most of IDLE menu. +CallTipWindow.py # Display calltip. +CallTips.py # Create calltip text. +ClassBrowser.py # Create module browser window. +CodeContext.py # Show compound statement headers otherwise not visible. +ColorDelegator.py # Colorize text (nim). +Debugger.py # Debug code run from editor; show window. +Delegator.py # Define base class for delegators (nim). +EditorWindow.py # Define most of editor and utility functions. +FileList.py # Open files and manage list of open windows (nim). +FormatParagraph.py# Re-wrap multiline strings and comments. +GrepDialog.py # Find all occurrences of pattern in multiple files. +HyperParser.py # Parse code around a given index. +IOBinding.py # Open, read, and write files +IdleHistory.py # Get previous or next user input in shell (nim) +MultiCall.py # Wrap tk widget to allow multiple calls per event (nim). +MultiStatusBar.py # Define status bar for windows (nim). +ObjectBrowser.py # Define class used in StackViewer (nim). +OutputWindow.py # Create window for grep output. +ParenMatch.py # Match fenceposts: (), [], and {}. +PathBrowser.py # Create path browser window. +Percolator.py # Manage delegator stack (nim). +PyParse.py # Give information on code indentation +PyShell.py # Start IDLE, manage shell, complete editor window +RemoteDebugger.py # Debug code run in remote process. +RemoteObjectBrowser.py # Communicate objects between processes with rpc (nim). +ReplaceDialog.py # Search and replace pattern in text. +RstripExtension.py# Strip trailing whitespace +ScriptBinding.py # Check and run user code. +ScrolledList.py # Define ScrolledList widget for IDLE (nim). +SearchDialog.py # Search for pattern in text. +SearchDialogBase.py # Define base for search, replace, and grep dialogs. +SearchEngine.py # Define engine for all 3 search dialogs. +StackViewer.py # View stack after exception. +TreeWidget.py # Define tree widger, used in browsers (nim). +UndoDelegator.py # Manage undo stack. +WidgetRedirector.py # Intercept widget subcommands (for percolator) (nim). +WindowList.py # Manage window list and define listed top level. +ZoomHeight.py # Zoom window to full height of screen. +aboutDialog.py # Display About IDLE dialog. +configDialog.py # Display user configuration dialogs. +configHandler.py # Load, fetch, and save configuration (nim). +configHelpSourceEdit.py # Specify help source. +configSectionNameDialog.py # Spefify user config section name +dynOptionMenuWidget.py # define mutable OptionMenu widget (nim). +help.py # Display IDLE's html doc. +keybindingDialog.py # Change keybindings. +macosxSupport.py # Help IDLE run on Macs (nim). +rpc.py # Commuicate between idle and user processes (nim). +run.py # Manage user code execution subprocess. +tabbedpages.py # Define tabbed pages widget (nim). +textView.py # Define read-only text widget (nim). -Applications which cannot support subprocesses and/or sockets can still run -IDLE in a single process. +Configuration +------------- +config-extensions.def # Defaults for extensions +config-highlight.def # Defaults for colorizing +config-keys.def # Defaults for key bindings +config-main.def # Defai;ts fpr font and geneal -IDLE has an integrated debugger with stepping, persistent breakpoints, and call -stack visibility. +Text +---- +CREDITS.txt # not maintained, displayed by About IDLE +HISTORY.txt # NEWS up to July 2001 +NEWS.txt # commits, displayed by About IDLE +README.txt # this file, displeyed by About IDLE +TODO.txt # needs review +extend.txt # about writing extensions +help.html # copy of idle.html in docs, displayed by IDLE Help -There is a GUI configuration manager which makes it easy to select fonts, -colors, keybindings, and startup options. This facility includes a feature -which allows the user to specify additional help sources, either locally or on -the web. +Subdirectories +-------------- +Icons # small image files +idle_test # files for human test and automated unit tests -IDLE is coded in 100% pure Python, using the Tkinter GUI toolkit (Tk/Tcl) -and is cross-platform, working on Unix, Mac, and Windows. +Unused and Deprecated files and objects (nim) +--------------------------------------------- +EditorWindow.py: Helpdialog and helpDialog +ToolTip.py: unused. +help.txt +idlever.py -IDLE accepts command line arguments. Try idle -h to see the options. +IDLE MENUS +Top level items and most submenu items are defined in Bindings. +Extenstions add submenu items when active. The names given are +found, quoted, in one of these modules, paired with a '<>'. +Each pseudoevent is bound to an event handler. Some event handlers +call another function that does the actual work. The annotations below +are intended to at least give the module where the actual work is done. -If you find bugs or have suggestions, let us know about them by using the -Python Bug Tracker: +File # IOBindig except as noted + New File + Open... # IOBinding.open + Open Module + Recent Files + Class Browser # Class Browser + Path Browser # Path Browser + --- + Save # IDBinding.save + Save As... # IOBinding.save_as + Save Copy As... # IOBindling.save_a_copy + --- + Print Window # IOBinding.print_window + --- + Close + Exit -http://sourceforge.net/projects/python +Edit + Undo # undoDelegator + Redo # undoDelegator + --- + Cut + Copy + Paste + Select All + --- # Next 5 items use SearchEngine; dialogs use SearchDialogBase + Find # Search Dialog + Find Again + Find Selection + Find in Files... # GrepDialog + Replace... # ReplaceDialog + Go to Line + Show Completions # AutoComplete extension and AutoCompleteWidow (&HP) + Expand Word # AutoExpand extension + Show call tip # Calltips extension and CalltipWindow (& Hyperparser) + Show surrounding parens # ParenMatch (& Hyperparser) -Patches are always appreciated at the Python Patch Tracker, and change -requests should be posted to the RFE Tracker. +Shell # PyShell + View Last Restart # PyShell.? + Restart Shell # PyShell.? -For further details and links, read the Help files and check the IDLE home -page at +Debug (Shell only) + Go to File/Line + Debugger # Debugger, RemoteDebugger + Stack Viewer # StackViewer + Auto-open Stack Viewer # StackViewer -http://www.python.org/idle/ +Format (Editor only) + Indent Region + Dedent Region + Comment Out Region + Uncomment Region + Tabify Region + Untabify Region + Toggle Tabs + New Indent Width + Format Paragraph # FormatParagraph extension + --- + Strip tailing whitespace # RstripExtension extension -There is a mail list for IDLE: idle-dev at python.org. You can join at +Run (Editor only) + Python Shell # PyShell + --- + Check Module # ScriptBinding + Run Module # ScriptBinding -http://mail.python.org/mailman/listinfo/idle-dev +Options + Configure IDLE # configDialog + (tabs in the dialog) + Font tab # onfig-main.def + Highlight tab # configSectionNameDialog, config-highlight.def + Keys tab # keybindingDialog, configSectionNameDialog, onfig-keus.def + General tab # configHelpSourceEdit, config-main.def + Configure Extensions # configDialog + Xyz tab # xyz.py, config-extensions.def + --- + Code Context (editor only) # CodeContext extension + +Window + Zoomheight # ZoomHeight extension + --- + # WindowList + +Help + About IDLE # aboutDialog + --- + IDLE Help # help + Python Doc + Turtle Demo + --- + + + (right click) +Defined in EditorWindow, PyShell, Output + Cut + Copy + Paste + --- + Go to file/line (shell and output only) + Set Breakpoint (editor only) + Clear Breakpoint (editor only) + Defined in Debugger + Go to source line + Show stack frame diff --git a/Lib/idlelib/help.html b/Lib/idlelib/help.html --- a/Lib/idlelib/help.html +++ b/Lib/idlelib/help.html @@ -75,7 +75,7 @@

    24.6. IDLE??

    -

    IDLE is the Python IDE built with the tkinter GUI toolkit.

    +

    IDLE is Python’s Integrated Development and Learning Environment.

    IDLE has the following features:

    • coded in 100% pure Python, using the tkinter GUI toolkit
    • @@ -699,7 +699,7 @@ The Python Software Foundation is a non-profit corporation. Please donate.
      - Last updated on Sep 29, 2015. + Last updated on Oct 02, 2015. Found a bug?
      Created using Sphinx 1.2.3. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 05:26:21 2015 From: python-checkins at python.org (terry.reedy) Date: Sat, 03 Oct 2015 03:26:21 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Merge_with_3=2E4?= Message-ID: <20151003032621.2685.39202@psf.io> https://hg.python.org/cpython/rev/227f959f43e7 changeset: 98506:227f959f43e7 branch: 3.5 parent: 98502:ff025cf824d0 parent: 98505:bb1a8d3dd4a1 user: Terry Jan Reedy date: Fri Oct 02 23:25:14 2015 -0400 summary: Merge with 3.4 files: Doc/library/idle.rst | 2 +- Lib/idlelib/README.txt | 249 ++++++++++++++++++++++++---- Lib/idlelib/help.html | 4 +- 3 files changed, 212 insertions(+), 43 deletions(-) diff --git a/Doc/library/idle.rst b/Doc/library/idle.rst --- a/Doc/library/idle.rst +++ b/Doc/library/idle.rst @@ -10,7 +10,7 @@ .. moduleauthor:: Guido van Rossum -IDLE is the Python IDE built with the :mod:`tkinter` GUI toolkit. +IDLE is Python's Integrated Development and Learning Environment. IDLE has the following features: diff --git a/Lib/idlelib/README.txt b/Lib/idlelib/README.txt --- a/Lib/idlelib/README.txt +++ b/Lib/idlelib/README.txt @@ -1,60 +1,229 @@ -IDLE is Python's Tkinter-based Integrated DeveLopment Environment. +README.txt: an index to idlelib files and the IDLE menu. -IDLE emphasizes a lightweight, clean design with a simple user interface. -Although it is suitable for beginners, even advanced users will find that -IDLE has everything they really need to develop pure Python code. +IDLE is Python?s Integrated Development and Learning +Environment. The user documentation is part of the Library Reference and +is available in IDLE by selecting Help => IDLE Help. This README documents +idlelib for IDLE developers and curious users. -IDLE features a multi-window text editor with multiple undo, Python colorizing, -and many other capabilities, e.g. smart indent, call tips, and autocompletion. +IDLELIB FILES lists files alphabetically by category, +with a short description of each. -The editor has comprehensive search functions, including searching through -multiple files. Class browsers and path browsers provide fast access to -code objects from a top level viewpoint without dealing with code folding. +IDLE MENU show the menu tree, annotated with the module +or module object that implements the corresponding function. -There is a Python Shell window which features colorizing and command recall. +This file is descriptive, not prescriptive, and may have errors +and omissions and lag behind changes in idlelib. -IDLE executes Python code in a separate process, which is restarted for each -Run (F5) initiated from an editor window. The environment can also be -restarted from the Shell window without restarting IDLE. -This enhancement has often been requested, and is now finally available. The -magic "reload/import *" incantations are no longer required when editing and -testing a module two or three steps down the import chain. +IDLELIB FILES +Implemetation files not in IDLE MENU are marked (nim). +Deprecated files and objects are listed separately as the end. -(Personal firewall software may warn about the connection IDLE makes to its -subprocess using this computer's internal loopback interface. This connection -is not visible on any external interface and no data is sent to or received -from the Internet.) +Startup +------- +__init__.py # import, does nothing +__main__.py # -m, starts IDLE +idle.bat +idle.py +idle.pyw -It is possible to interrupt tightly looping user code, even on Windows. +Implementation +-------------- +AutoComplete.py # Complete attribute names or filenames. +AutoCompleteWindow.py # Display completions. +AutoExpand.py # Expand word with previous word in file. +Bindings.py # Define most of IDLE menu. +CallTipWindow.py # Display calltip. +CallTips.py # Create calltip text. +ClassBrowser.py # Create module browser window. +CodeContext.py # Show compound statement headers otherwise not visible. +ColorDelegator.py # Colorize text (nim). +Debugger.py # Debug code run from editor; show window. +Delegator.py # Define base class for delegators (nim). +EditorWindow.py # Define most of editor and utility functions. +FileList.py # Open files and manage list of open windows (nim). +FormatParagraph.py# Re-wrap multiline strings and comments. +GrepDialog.py # Find all occurrences of pattern in multiple files. +HyperParser.py # Parse code around a given index. +IOBinding.py # Open, read, and write files +IdleHistory.py # Get previous or next user input in shell (nim) +MultiCall.py # Wrap tk widget to allow multiple calls per event (nim). +MultiStatusBar.py # Define status bar for windows (nim). +ObjectBrowser.py # Define class used in StackViewer (nim). +OutputWindow.py # Create window for grep output. +ParenMatch.py # Match fenceposts: (), [], and {}. +PathBrowser.py # Create path browser window. +Percolator.py # Manage delegator stack (nim). +PyParse.py # Give information on code indentation +PyShell.py # Start IDLE, manage shell, complete editor window +RemoteDebugger.py # Debug code run in remote process. +RemoteObjectBrowser.py # Communicate objects between processes with rpc (nim). +ReplaceDialog.py # Search and replace pattern in text. +RstripExtension.py# Strip trailing whitespace +ScriptBinding.py # Check and run user code. +ScrolledList.py # Define ScrolledList widget for IDLE (nim). +SearchDialog.py # Search for pattern in text. +SearchDialogBase.py # Define base for search, replace, and grep dialogs. +SearchEngine.py # Define engine for all 3 search dialogs. +StackViewer.py # View stack after exception. +TreeWidget.py # Define tree widger, used in browsers (nim). +UndoDelegator.py # Manage undo stack. +WidgetRedirector.py # Intercept widget subcommands (for percolator) (nim). +WindowList.py # Manage window list and define listed top level. +ZoomHeight.py # Zoom window to full height of screen. +aboutDialog.py # Display About IDLE dialog. +configDialog.py # Display user configuration dialogs. +configHandler.py # Load, fetch, and save configuration (nim). +configHelpSourceEdit.py # Specify help source. +configSectionNameDialog.py # Spefify user config section name +dynOptionMenuWidget.py # define mutable OptionMenu widget (nim). +help.py # Display IDLE's html doc. +keybindingDialog.py # Change keybindings. +macosxSupport.py # Help IDLE run on Macs (nim). +rpc.py # Commuicate between idle and user processes (nim). +run.py # Manage user code execution subprocess. +tabbedpages.py # Define tabbed pages widget (nim). +textView.py # Define read-only text widget (nim). -Applications which cannot support subprocesses and/or sockets can still run -IDLE in a single process. +Configuration +------------- +config-extensions.def # Defaults for extensions +config-highlight.def # Defaults for colorizing +config-keys.def # Defaults for key bindings +config-main.def # Defai;ts fpr font and geneal -IDLE has an integrated debugger with stepping, persistent breakpoints, and call -stack visibility. +Text +---- +CREDITS.txt # not maintained, displayed by About IDLE +HISTORY.txt # NEWS up to July 2001 +NEWS.txt # commits, displayed by About IDLE +README.txt # this file, displeyed by About IDLE +TODO.txt # needs review +extend.txt # about writing extensions +help.html # copy of idle.html in docs, displayed by IDLE Help -There is a GUI configuration manager which makes it easy to select fonts, -colors, keybindings, and startup options. This facility includes a feature -which allows the user to specify additional help sources, either locally or on -the web. +Subdirectories +-------------- +Icons # small image files +idle_test # files for human test and automated unit tests -IDLE is coded in 100% pure Python, using the Tkinter GUI toolkit (Tk/Tcl) -and is cross-platform, working on Unix, Mac, and Windows. +Unused and Deprecated files and objects (nim) +--------------------------------------------- +EditorWindow.py: Helpdialog and helpDialog +ToolTip.py: unused. +help.txt +idlever.py -IDLE accepts command line arguments. Try idle -h to see the options. +IDLE MENUS +Top level items and most submenu items are defined in Bindings. +Extenstions add submenu items when active. The names given are +found, quoted, in one of these modules, paired with a '<>'. +Each pseudoevent is bound to an event handler. Some event handlers +call another function that does the actual work. The annotations below +are intended to at least give the module where the actual work is done. -If you find bugs or have suggestions or patches, let us know about -them by using the Python issue tracker: +File # IOBindig except as noted + New File + Open... # IOBinding.open + Open Module + Recent Files + Class Browser # Class Browser + Path Browser # Path Browser + --- + Save # IDBinding.save + Save As... # IOBinding.save_as + Save Copy As... # IOBindling.save_a_copy + --- + Print Window # IOBinding.print_window + --- + Close + Exit -http://bugs.python.org +Edit + Undo # undoDelegator + Redo # undoDelegator + --- + Cut + Copy + Paste + Select All + --- # Next 5 items use SearchEngine; dialogs use SearchDialogBase + Find # Search Dialog + Find Again + Find Selection + Find in Files... # GrepDialog + Replace... # ReplaceDialog + Go to Line + Show Completions # AutoComplete extension and AutoCompleteWidow (&HP) + Expand Word # AutoExpand extension + Show call tip # Calltips extension and CalltipWindow (& Hyperparser) + Show surrounding parens # ParenMatch (& Hyperparser) -For further details and links, read the Help files and check the IDLE home -page at +Shell # PyShell + View Last Restart # PyShell.? + Restart Shell # PyShell.? -http://www.python.org/idle/ +Debug (Shell only) + Go to File/Line + Debugger # Debugger, RemoteDebugger + Stack Viewer # StackViewer + Auto-open Stack Viewer # StackViewer -There is a mail list for IDLE: idle-dev at python.org. You can join at +Format (Editor only) + Indent Region + Dedent Region + Comment Out Region + Uncomment Region + Tabify Region + Untabify Region + Toggle Tabs + New Indent Width + Format Paragraph # FormatParagraph extension + --- + Strip tailing whitespace # RstripExtension extension -http://mail.python.org/mailman/listinfo/idle-dev +Run (Editor only) + Python Shell # PyShell + --- + Check Module # ScriptBinding + Run Module # ScriptBinding + +Options + Configure IDLE # configDialog + (tabs in the dialog) + Font tab # onfig-main.def + Highlight tab # configSectionNameDialog, config-highlight.def + Keys tab # keybindingDialog, configSectionNameDialog, onfig-keus.def + General tab # configHelpSourceEdit, config-main.def + Configure Extensions # configDialog + Xyz tab # xyz.py, config-extensions.def + --- + Code Context (editor only) # CodeContext extension + +Window + Zoomheight # ZoomHeight extension + --- + # WindowList + +Help + About IDLE # aboutDialog + --- + IDLE Help # help + Python Doc + Turtle Demo + --- + + + (right click) +Defined in EditorWindow, PyShell, Output + Cut + Copy + Paste + --- + Go to file/line (shell and output only) + Set Breakpoint (editor only) + Clear Breakpoint (editor only) + Defined in Debugger + Go to source line + Show stack frame diff --git a/Lib/idlelib/help.html b/Lib/idlelib/help.html --- a/Lib/idlelib/help.html +++ b/Lib/idlelib/help.html @@ -75,7 +75,7 @@

      25.5. IDLE??

      -

      IDLE is the Python IDE built with the tkinter GUI toolkit.

      +

      IDLE is Python’s Integrated Development and Learning Environment.

      IDLE has the following features:

      • coded in 100% pure Python, using the tkinter GUI toolkit
      • @@ -699,7 +699,7 @@ The Python Software Foundation is a non-profit corporation. Please donate.
        - Last updated on Sep 29, 2015. + Last updated on Oct 02, 2015. Found a bug?
        Created using Sphinx 1.2.3. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 05:26:21 2015 From: python-checkins at python.org (terry.reedy) Date: Sat, 03 Oct 2015 03:26:21 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_with_3=2E5?= Message-ID: <20151003032621.3287.38436@psf.io> https://hg.python.org/cpython/rev/4202abcf5702 changeset: 98507:4202abcf5702 parent: 98503:fd904e1e4f29 parent: 98506:227f959f43e7 user: Terry Jan Reedy date: Fri Oct 02 23:25:54 2015 -0400 summary: Merge with 3.5 files: Doc/library/idle.rst | 2 +- Lib/idlelib/README.txt | 249 ++++++++++++++++++++++++---- Lib/idlelib/help.html | 4 +- 3 files changed, 212 insertions(+), 43 deletions(-) diff --git a/Doc/library/idle.rst b/Doc/library/idle.rst --- a/Doc/library/idle.rst +++ b/Doc/library/idle.rst @@ -10,7 +10,7 @@ .. moduleauthor:: Guido van Rossum -IDLE is the Python IDE built with the :mod:`tkinter` GUI toolkit. +IDLE is Python's Integrated Development and Learning Environment. IDLE has the following features: diff --git a/Lib/idlelib/README.txt b/Lib/idlelib/README.txt --- a/Lib/idlelib/README.txt +++ b/Lib/idlelib/README.txt @@ -1,60 +1,229 @@ -IDLE is Python's Tkinter-based Integrated DeveLopment Environment. +README.txt: an index to idlelib files and the IDLE menu. -IDLE emphasizes a lightweight, clean design with a simple user interface. -Although it is suitable for beginners, even advanced users will find that -IDLE has everything they really need to develop pure Python code. +IDLE is Python?s Integrated Development and Learning +Environment. The user documentation is part of the Library Reference and +is available in IDLE by selecting Help => IDLE Help. This README documents +idlelib for IDLE developers and curious users. -IDLE features a multi-window text editor with multiple undo, Python colorizing, -and many other capabilities, e.g. smart indent, call tips, and autocompletion. +IDLELIB FILES lists files alphabetically by category, +with a short description of each. -The editor has comprehensive search functions, including searching through -multiple files. Class browsers and path browsers provide fast access to -code objects from a top level viewpoint without dealing with code folding. +IDLE MENU show the menu tree, annotated with the module +or module object that implements the corresponding function. -There is a Python Shell window which features colorizing and command recall. +This file is descriptive, not prescriptive, and may have errors +and omissions and lag behind changes in idlelib. -IDLE executes Python code in a separate process, which is restarted for each -Run (F5) initiated from an editor window. The environment can also be -restarted from the Shell window without restarting IDLE. -This enhancement has often been requested, and is now finally available. The -magic "reload/import *" incantations are no longer required when editing and -testing a module two or three steps down the import chain. +IDLELIB FILES +Implemetation files not in IDLE MENU are marked (nim). +Deprecated files and objects are listed separately as the end. -(Personal firewall software may warn about the connection IDLE makes to its -subprocess using this computer's internal loopback interface. This connection -is not visible on any external interface and no data is sent to or received -from the Internet.) +Startup +------- +__init__.py # import, does nothing +__main__.py # -m, starts IDLE +idle.bat +idle.py +idle.pyw -It is possible to interrupt tightly looping user code, even on Windows. +Implementation +-------------- +AutoComplete.py # Complete attribute names or filenames. +AutoCompleteWindow.py # Display completions. +AutoExpand.py # Expand word with previous word in file. +Bindings.py # Define most of IDLE menu. +CallTipWindow.py # Display calltip. +CallTips.py # Create calltip text. +ClassBrowser.py # Create module browser window. +CodeContext.py # Show compound statement headers otherwise not visible. +ColorDelegator.py # Colorize text (nim). +Debugger.py # Debug code run from editor; show window. +Delegator.py # Define base class for delegators (nim). +EditorWindow.py # Define most of editor and utility functions. +FileList.py # Open files and manage list of open windows (nim). +FormatParagraph.py# Re-wrap multiline strings and comments. +GrepDialog.py # Find all occurrences of pattern in multiple files. +HyperParser.py # Parse code around a given index. +IOBinding.py # Open, read, and write files +IdleHistory.py # Get previous or next user input in shell (nim) +MultiCall.py # Wrap tk widget to allow multiple calls per event (nim). +MultiStatusBar.py # Define status bar for windows (nim). +ObjectBrowser.py # Define class used in StackViewer (nim). +OutputWindow.py # Create window for grep output. +ParenMatch.py # Match fenceposts: (), [], and {}. +PathBrowser.py # Create path browser window. +Percolator.py # Manage delegator stack (nim). +PyParse.py # Give information on code indentation +PyShell.py # Start IDLE, manage shell, complete editor window +RemoteDebugger.py # Debug code run in remote process. +RemoteObjectBrowser.py # Communicate objects between processes with rpc (nim). +ReplaceDialog.py # Search and replace pattern in text. +RstripExtension.py# Strip trailing whitespace +ScriptBinding.py # Check and run user code. +ScrolledList.py # Define ScrolledList widget for IDLE (nim). +SearchDialog.py # Search for pattern in text. +SearchDialogBase.py # Define base for search, replace, and grep dialogs. +SearchEngine.py # Define engine for all 3 search dialogs. +StackViewer.py # View stack after exception. +TreeWidget.py # Define tree widger, used in browsers (nim). +UndoDelegator.py # Manage undo stack. +WidgetRedirector.py # Intercept widget subcommands (for percolator) (nim). +WindowList.py # Manage window list and define listed top level. +ZoomHeight.py # Zoom window to full height of screen. +aboutDialog.py # Display About IDLE dialog. +configDialog.py # Display user configuration dialogs. +configHandler.py # Load, fetch, and save configuration (nim). +configHelpSourceEdit.py # Specify help source. +configSectionNameDialog.py # Spefify user config section name +dynOptionMenuWidget.py # define mutable OptionMenu widget (nim). +help.py # Display IDLE's html doc. +keybindingDialog.py # Change keybindings. +macosxSupport.py # Help IDLE run on Macs (nim). +rpc.py # Commuicate between idle and user processes (nim). +run.py # Manage user code execution subprocess. +tabbedpages.py # Define tabbed pages widget (nim). +textView.py # Define read-only text widget (nim). -Applications which cannot support subprocesses and/or sockets can still run -IDLE in a single process. +Configuration +------------- +config-extensions.def # Defaults for extensions +config-highlight.def # Defaults for colorizing +config-keys.def # Defaults for key bindings +config-main.def # Defai;ts fpr font and geneal -IDLE has an integrated debugger with stepping, persistent breakpoints, and call -stack visibility. +Text +---- +CREDITS.txt # not maintained, displayed by About IDLE +HISTORY.txt # NEWS up to July 2001 +NEWS.txt # commits, displayed by About IDLE +README.txt # this file, displeyed by About IDLE +TODO.txt # needs review +extend.txt # about writing extensions +help.html # copy of idle.html in docs, displayed by IDLE Help -There is a GUI configuration manager which makes it easy to select fonts, -colors, keybindings, and startup options. This facility includes a feature -which allows the user to specify additional help sources, either locally or on -the web. +Subdirectories +-------------- +Icons # small image files +idle_test # files for human test and automated unit tests -IDLE is coded in 100% pure Python, using the Tkinter GUI toolkit (Tk/Tcl) -and is cross-platform, working on Unix, Mac, and Windows. +Unused and Deprecated files and objects (nim) +--------------------------------------------- +EditorWindow.py: Helpdialog and helpDialog +ToolTip.py: unused. +help.txt +idlever.py -IDLE accepts command line arguments. Try idle -h to see the options. +IDLE MENUS +Top level items and most submenu items are defined in Bindings. +Extenstions add submenu items when active. The names given are +found, quoted, in one of these modules, paired with a '<>'. +Each pseudoevent is bound to an event handler. Some event handlers +call another function that does the actual work. The annotations below +are intended to at least give the module where the actual work is done. -If you find bugs or have suggestions or patches, let us know about -them by using the Python issue tracker: +File # IOBindig except as noted + New File + Open... # IOBinding.open + Open Module + Recent Files + Class Browser # Class Browser + Path Browser # Path Browser + --- + Save # IDBinding.save + Save As... # IOBinding.save_as + Save Copy As... # IOBindling.save_a_copy + --- + Print Window # IOBinding.print_window + --- + Close + Exit -http://bugs.python.org +Edit + Undo # undoDelegator + Redo # undoDelegator + --- + Cut + Copy + Paste + Select All + --- # Next 5 items use SearchEngine; dialogs use SearchDialogBase + Find # Search Dialog + Find Again + Find Selection + Find in Files... # GrepDialog + Replace... # ReplaceDialog + Go to Line + Show Completions # AutoComplete extension and AutoCompleteWidow (&HP) + Expand Word # AutoExpand extension + Show call tip # Calltips extension and CalltipWindow (& Hyperparser) + Show surrounding parens # ParenMatch (& Hyperparser) -For further details and links, read the Help files and check the IDLE home -page at +Shell # PyShell + View Last Restart # PyShell.? + Restart Shell # PyShell.? -http://www.python.org/idle/ +Debug (Shell only) + Go to File/Line + Debugger # Debugger, RemoteDebugger + Stack Viewer # StackViewer + Auto-open Stack Viewer # StackViewer -There is a mail list for IDLE: idle-dev at python.org. You can join at +Format (Editor only) + Indent Region + Dedent Region + Comment Out Region + Uncomment Region + Tabify Region + Untabify Region + Toggle Tabs + New Indent Width + Format Paragraph # FormatParagraph extension + --- + Strip tailing whitespace # RstripExtension extension -http://mail.python.org/mailman/listinfo/idle-dev +Run (Editor only) + Python Shell # PyShell + --- + Check Module # ScriptBinding + Run Module # ScriptBinding + +Options + Configure IDLE # configDialog + (tabs in the dialog) + Font tab # onfig-main.def + Highlight tab # configSectionNameDialog, config-highlight.def + Keys tab # keybindingDialog, configSectionNameDialog, onfig-keus.def + General tab # configHelpSourceEdit, config-main.def + Configure Extensions # configDialog + Xyz tab # xyz.py, config-extensions.def + --- + Code Context (editor only) # CodeContext extension + +Window + Zoomheight # ZoomHeight extension + --- + # WindowList + +Help + About IDLE # aboutDialog + --- + IDLE Help # help + Python Doc + Turtle Demo + --- + + + (right click) +Defined in EditorWindow, PyShell, Output + Cut + Copy + Paste + --- + Go to file/line (shell and output only) + Set Breakpoint (editor only) + Clear Breakpoint (editor only) + Defined in Debugger + Go to source line + Show stack frame diff --git a/Lib/idlelib/help.html b/Lib/idlelib/help.html --- a/Lib/idlelib/help.html +++ b/Lib/idlelib/help.html @@ -75,7 +75,7 @@

        25.5. IDLE??

        -

        IDLE is the Python IDE built with the tkinter GUI toolkit.

        +

        IDLE is Python’s Integrated Development and Learning Environment.

        IDLE has the following features:

        • coded in 100% pure Python, using the tkinter GUI toolkit
        • @@ -699,7 +699,7 @@ The Python Software Foundation is a non-profit corporation. Please donate.
          - Last updated on Sep 29, 2015. + Last updated on Oct 02, 2015. Found a bug?
          Created using Sphinx 1.2.3. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 08:17:40 2015 From: python-checkins at python.org (raymond.hettinger) Date: Sat, 03 Oct 2015 06:17:40 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Factor_out_common_iterator?= =?utf-8?q?_finalization_code?= Message-ID: <20151003061740.128842.49976@psf.io> https://hg.python.org/cpython/rev/8c21f32c5882 changeset: 98508:8c21f32c5882 user: Raymond Hettinger date: Fri Oct 02 23:17:33 2015 -0700 summary: Factor out common iterator finalization code files: Modules/_collectionsmodule.c | 45 ++++++++++------------- 1 files changed, 20 insertions(+), 25 deletions(-) diff --git a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c --- a/Modules/_collectionsmodule.c +++ b/Modules/_collectionsmodule.c @@ -350,21 +350,34 @@ PyDoc_STRVAR(appendleft_doc, "Add an element to the left side of the deque."); +static PyObject* +finalize_iterator(PyObject *it) +{ + if (PyErr_Occurred()) { + if (PyErr_ExceptionMatches(PyExc_StopIteration)) + PyErr_Clear(); + else { + Py_DECREF(it); + return NULL; + } + } + Py_DECREF(it); + Py_RETURN_NONE; +} /* Run an iterator to exhaustion. Shortcut for the extend/extendleft methods when maxlen == 0. */ static PyObject* consume_iterator(PyObject *it) { + PyObject *(*iternext)(PyObject *); PyObject *item; - while ((item = PyIter_Next(it)) != NULL) { + iternext = *Py_TYPE(it)->tp_iternext; + while ((item = iternext(it)) != NULL) { Py_DECREF(item); } - Py_DECREF(it); - if (PyErr_Occurred()) - return NULL; - Py_RETURN_NONE; + return finalize_iterator(it); } static PyObject * @@ -423,16 +436,7 @@ if (trim) deque_trim_left(deque); } - if (PyErr_Occurred()) { - if (PyErr_ExceptionMatches(PyExc_StopIteration)) - PyErr_Clear(); - else { - Py_DECREF(it); - return NULL; - } - } - Py_DECREF(it); - Py_RETURN_NONE; + return finalize_iterator(it); } PyDoc_STRVAR(extend_doc, @@ -494,16 +498,7 @@ if (trim) deque_trim_right(deque); } - if (PyErr_Occurred()) { - if (PyErr_ExceptionMatches(PyExc_StopIteration)) - PyErr_Clear(); - else { - Py_DECREF(it); - return NULL; - } - } - Py_DECREF(it); - Py_RETURN_NONE; + return finalize_iterator(it); } PyDoc_STRVAR(extendleft_doc, -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 08:44:33 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 03 Oct 2015 06:44:33 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1MjMy?= =?utf-8?q?=3A_Fix_CGIRequestHandler=27s_splitting_of_URL_query?= Message-ID: <20151003064433.464.15192@psf.io> https://hg.python.org/cpython/rev/969afbf501af changeset: 98509:969afbf501af branch: 3.4 parent: 98505:bb1a8d3dd4a1 user: Martin Panter date: Sat Oct 03 05:38:07 2015 +0000 summary: Issue #25232: Fix CGIRequestHandler's splitting of URL query Patch from Xiang Zhang. files: Lib/http/server.py | 6 +----- Lib/test/test_httpservers.py | 24 ++++++++++++++++++++++++ Misc/ACKS | 1 + Misc/NEWS | 3 +++ 4 files changed, 29 insertions(+), 5 deletions(-) diff --git a/Lib/http/server.py b/Lib/http/server.py --- a/Lib/http/server.py +++ b/Lib/http/server.py @@ -1032,11 +1032,7 @@ break # find an explicit query string, if present. - i = rest.rfind('?') - if i >= 0: - rest, query = rest[:i], rest[i+1:] - else: - query = '' + rest, _, query = rest.partition('?') # dissect the part after the directory name into a script name & # a possible additional path, to be stored in PATH_INFO. diff --git a/Lib/test/test_httpservers.py b/Lib/test/test_httpservers.py --- a/Lib/test/test_httpservers.py +++ b/Lib/test/test_httpservers.py @@ -366,6 +366,16 @@ form.getfirst("bacon"))) """ +cgi_file4 = """\ +#!%s +import os + +print("Content-type: text/html") +print() + +print(os.environ["%s"]) +""" + @unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0, "This test can't be run reliably as root (issue #13308).") @@ -387,6 +397,7 @@ self.file1_path = None self.file2_path = None self.file3_path = None + self.file4_path = None # The shebang line should be pure ASCII: use symlink if possible. # See issue #7668. @@ -425,6 +436,11 @@ file3.write(cgi_file1 % self.pythonexe) os.chmod(self.file3_path, 0o777) + self.file4_path = os.path.join(self.cgi_dir, 'file4.py') + with open(self.file4_path, 'w', encoding='utf-8') as file4: + file4.write(cgi_file4 % (self.pythonexe, 'QUERY_STRING')) + os.chmod(self.file4_path, 0o777) + os.chdir(self.parent_dir) def tearDown(self): @@ -440,6 +456,8 @@ os.remove(self.file2_path) if self.file3_path: os.remove(self.file3_path) + if self.file4_path: + os.remove(self.file4_path) os.rmdir(self.cgi_child_dir) os.rmdir(self.cgi_dir) os.rmdir(self.parent_dir) @@ -541,6 +559,12 @@ self.assertEqual((b'Hello World' + self.linesep, 'text/html', 200), (res.read(), res.getheader('Content-type'), res.status)) + def test_query_with_multiple_question_mark(self): + res = self.request('/cgi-bin/file4.py?a=b?c=d') + self.assertEqual( + (b'a=b?c=d' + self.linesep, 'text/html', 200), + (res.read(), res.getheader('Content-type'), res.status)) + class SocketlessRequestHandler(SimpleHTTPRequestHandler): def __init__(self): diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1531,6 +1531,7 @@ Daniel Wozniak Heiko Wundram Doug Wyatt +Xiang Zhang Robert Xiao Florent Xicluna Hirokazu Yamamoto diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -90,6 +90,9 @@ Library ------- +- Issue #25232: Fix CGIRequestHandler to split the query from the URL at the + first question mark (?) rather than the last. Patch from Xiang Zhang. + - Issue #22958: Constructor and update method of weakref.WeakValueDictionary now accept the self and the dict keyword arguments. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 08:44:33 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 03 Oct 2015 06:44:33 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI0NjU3?= =?utf-8?q?=3A_Prevent_CGIRequestHandler_from_collapsing_the_URL_query?= Message-ID: <20151003064433.128856.78507@psf.io> https://hg.python.org/cpython/rev/634fe6a90e0c changeset: 98510:634fe6a90e0c branch: 3.4 user: Martin Panter date: Sat Oct 03 05:55:46 2015 +0000 summary: Issue #24657: Prevent CGIRequestHandler from collapsing the URL query Initial patch from Xiang Zhang. Also fix out-of-date _url_collapse_path() doc string. files: Lib/http/server.py | 13 +++++++++---- Lib/test/test_httpservers.py | 7 +++++++ Misc/NEWS | 3 +++ 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/Lib/http/server.py b/Lib/http/server.py --- a/Lib/http/server.py +++ b/Lib/http/server.py @@ -887,13 +887,15 @@ The utility of this function is limited to is_cgi method and helps preventing some security attacks. - Returns: A tuple of (head, tail) where tail is everything after the final / - and head is everything before it. Head will always start with a '/' and, - if it contains anything else, never have a trailing '/'. + Returns: The reconstituted URL, which will always start with a '/'. Raises: IndexError if too many '..' occur within the path. """ + # Query component should not be involved. + path, _, query = path.partition('?') + path = urllib.parse.unquote(path) + # Similar to os.path.split(os.path.normpath(path)) but specific to URL # path semantics rather than local operating system semantics. path_parts = path.split('/') @@ -914,6 +916,9 @@ else: tail_part = '' + if query: + tail_part = '?'.join((tail_part, query)) + splitpath = ('/' + '/'.join(head_parts), tail_part) collapsed_path = "/".join(splitpath) @@ -995,7 +1000,7 @@ (and the next character is a '/' or the end of the string). """ - collapsed_path = _url_collapse_path(urllib.parse.unquote(self.path)) + collapsed_path = _url_collapse_path(self.path) dir_sep = collapsed_path.find('/', 1) head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] if head in self.cgi_directories: diff --git a/Lib/test/test_httpservers.py b/Lib/test/test_httpservers.py --- a/Lib/test/test_httpservers.py +++ b/Lib/test/test_httpservers.py @@ -565,6 +565,13 @@ (b'a=b?c=d' + self.linesep, 'text/html', 200), (res.read(), res.getheader('Content-type'), res.status)) + def test_query_with_continuous_slashes(self): + res = self.request('/cgi-bin/file4.py?k=aa%2F%2Fbb&//q//p//=//a//b//') + self.assertEqual( + (b'k=aa%2F%2Fbb&//q//p//=//a//b//' + self.linesep, + 'text/html', 200), + (res.read(), res.getheader('Content-type'), res.status)) + class SocketlessRequestHandler(SimpleHTTPRequestHandler): def __init__(self): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -93,6 +93,9 @@ - Issue #25232: Fix CGIRequestHandler to split the query from the URL at the first question mark (?) rather than the last. Patch from Xiang Zhang. +- Issue #24657: Prevent CGIRequestHandler from collapsing slashes in the + query part of the URL as if it were a path. Patch from Xiang Zhang. + - Issue #22958: Constructor and update method of weakref.WeakValueDictionary now accept the self and the dict keyword arguments. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 08:44:33 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 03 Oct 2015 06:44:33 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWVzICMyNTIz?= =?utf-8?q?2=2C_=2324657=3A_Use_new_enum_status_to_match_rest_of_tests?= Message-ID: <20151003064433.18388.7153@psf.io> https://hg.python.org/cpython/rev/88918f2a54df changeset: 98512:88918f2a54df branch: 3.5 user: Martin Panter date: Sat Oct 03 06:07:22 2015 +0000 summary: Issues #25232, #24657: Use new enum status to match rest of tests files: Lib/test/test_httpservers.py | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_httpservers.py b/Lib/test/test_httpservers.py --- a/Lib/test/test_httpservers.py +++ b/Lib/test/test_httpservers.py @@ -627,14 +627,14 @@ def test_query_with_multiple_question_mark(self): res = self.request('/cgi-bin/file4.py?a=b?c=d') self.assertEqual( - (b'a=b?c=d' + self.linesep, 'text/html', 200), + (b'a=b?c=d' + self.linesep, 'text/html', HTTPStatus.OK), (res.read(), res.getheader('Content-type'), res.status)) def test_query_with_continuous_slashes(self): res = self.request('/cgi-bin/file4.py?k=aa%2F%2Fbb&//q//p//=//a//b//') self.assertEqual( (b'k=aa%2F%2Fbb&//q//p//=//a//b//' + self.linesep, - 'text/html', 200), + 'text/html', HTTPStatus.OK), (res.read(), res.getheader('Content-type'), res.status)) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 08:44:33 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 03 Oct 2015 06:44:33 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issues_=2325232=2C_=2324657=3A_Merge_two_CGI_server_fixes_from?= =?utf-8?q?_3=2E4_into_3=2E5?= Message-ID: <20151003064433.70981.68303@psf.io> https://hg.python.org/cpython/rev/ba1e3c112e42 changeset: 98511:ba1e3c112e42 branch: 3.5 parent: 98506:227f959f43e7 parent: 98510:634fe6a90e0c user: Martin Panter date: Sat Oct 03 06:03:25 2015 +0000 summary: Issues #25232, #24657: Merge two CGI server fixes from 3.4 into 3.5 files: Lib/http/server.py | 19 +++++++------ Lib/test/test_httpservers.py | 31 ++++++++++++++++++++++++ Misc/ACKS | 1 + Misc/NEWS | 6 ++++ 4 files changed, 48 insertions(+), 9 deletions(-) diff --git a/Lib/http/server.py b/Lib/http/server.py --- a/Lib/http/server.py +++ b/Lib/http/server.py @@ -837,13 +837,15 @@ The utility of this function is limited to is_cgi method and helps preventing some security attacks. - Returns: A tuple of (head, tail) where tail is everything after the final / - and head is everything before it. Head will always start with a '/' and, - if it contains anything else, never have a trailing '/'. + Returns: The reconstituted URL, which will always start with a '/'. Raises: IndexError if too many '..' occur within the path. """ + # Query component should not be involved. + path, _, query = path.partition('?') + path = urllib.parse.unquote(path) + # Similar to os.path.split(os.path.normpath(path)) but specific to URL # path semantics rather than local operating system semantics. path_parts = path.split('/') @@ -864,6 +866,9 @@ else: tail_part = '' + if query: + tail_part = '?'.join((tail_part, query)) + splitpath = ('/' + '/'.join(head_parts), tail_part) collapsed_path = "/".join(splitpath) @@ -947,7 +952,7 @@ (and the next character is a '/' or the end of the string). """ - collapsed_path = _url_collapse_path(urllib.parse.unquote(self.path)) + collapsed_path = _url_collapse_path(self.path) dir_sep = collapsed_path.find('/', 1) head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] if head in self.cgi_directories: @@ -984,11 +989,7 @@ break # find an explicit query string, if present. - i = rest.rfind('?') - if i >= 0: - rest, query = rest[:i], rest[i+1:] - else: - query = '' + rest, _, query = rest.partition('?') # dissect the part after the directory name into a script name & # a possible additional path, to be stored in PATH_INFO. diff --git a/Lib/test/test_httpservers.py b/Lib/test/test_httpservers.py --- a/Lib/test/test_httpservers.py +++ b/Lib/test/test_httpservers.py @@ -425,6 +425,16 @@ form.getfirst("bacon"))) """ +cgi_file4 = """\ +#!%s +import os + +print("Content-type: text/html") +print() + +print(os.environ["%s"]) +""" + @unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0, "This test can't be run reliably as root (issue #13308).") @@ -446,6 +456,7 @@ self.file1_path = None self.file2_path = None self.file3_path = None + self.file4_path = None # The shebang line should be pure ASCII: use symlink if possible. # See issue #7668. @@ -484,6 +495,11 @@ file3.write(cgi_file1 % self.pythonexe) os.chmod(self.file3_path, 0o777) + self.file4_path = os.path.join(self.cgi_dir, 'file4.py') + with open(self.file4_path, 'w', encoding='utf-8') as file4: + file4.write(cgi_file4 % (self.pythonexe, 'QUERY_STRING')) + os.chmod(self.file4_path, 0o777) + os.chdir(self.parent_dir) def tearDown(self): @@ -499,6 +515,8 @@ os.remove(self.file2_path) if self.file3_path: os.remove(self.file3_path) + if self.file4_path: + os.remove(self.file4_path) os.rmdir(self.cgi_child_dir) os.rmdir(self.cgi_dir) os.rmdir(self.parent_dir) @@ -606,6 +624,19 @@ (b'Hello World' + self.linesep, 'text/html', HTTPStatus.OK), (res.read(), res.getheader('Content-type'), res.status)) + def test_query_with_multiple_question_mark(self): + res = self.request('/cgi-bin/file4.py?a=b?c=d') + self.assertEqual( + (b'a=b?c=d' + self.linesep, 'text/html', 200), + (res.read(), res.getheader('Content-type'), res.status)) + + def test_query_with_continuous_slashes(self): + res = self.request('/cgi-bin/file4.py?k=aa%2F%2Fbb&//q//p//=//a//b//') + self.assertEqual( + (b'k=aa%2F%2Fbb&//q//p//=//a//b//' + self.linesep, + 'text/html', 200), + (res.read(), res.getheader('Content-type'), res.status)) + class SocketlessRequestHandler(SimpleHTTPRequestHandler): def __init__(self): diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1580,6 +1580,7 @@ Wei Wu Heiko Wundram Doug Wyatt +Xiang Zhang Robert Xiao Florent Xicluna Hirokazu Yamamoto diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -34,6 +34,12 @@ Library ------- +- Issue #25232: Fix CGIRequestHandler to split the query from the URL at the + first question mark (?) rather than the last. Patch from Xiang Zhang. + +- Issue #24657: Prevent CGIRequestHandler from collapsing slashes in the + query part of the URL as if it were a path. Patch from Xiang Zhang. + - Issue #24483: C implementation of functools.lru_cache() now calculates key's hash only once. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 08:44:34 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 03 Oct 2015 06:44:34 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issues_=2325232=2C_=2324657=3A_Merge_two_CGI_server_fixe?= =?utf-8?q?s_from_3=2E5?= Message-ID: <20151003064433.7246.58921@psf.io> https://hg.python.org/cpython/rev/0f03023d4318 changeset: 98513:0f03023d4318 parent: 98508:8c21f32c5882 parent: 98512:88918f2a54df user: Martin Panter date: Sat Oct 03 06:43:19 2015 +0000 summary: Issues #25232, #24657: Merge two CGI server fixes from 3.5 files: Lib/http/server.py | 19 +++++++------ Lib/test/test_httpservers.py | 31 ++++++++++++++++++++++++ Misc/ACKS | 1 + Misc/NEWS | 6 ++++ 4 files changed, 48 insertions(+), 9 deletions(-) diff --git a/Lib/http/server.py b/Lib/http/server.py --- a/Lib/http/server.py +++ b/Lib/http/server.py @@ -837,13 +837,15 @@ The utility of this function is limited to is_cgi method and helps preventing some security attacks. - Returns: A tuple of (head, tail) where tail is everything after the final / - and head is everything before it. Head will always start with a '/' and, - if it contains anything else, never have a trailing '/'. + Returns: The reconstituted URL, which will always start with a '/'. Raises: IndexError if too many '..' occur within the path. """ + # Query component should not be involved. + path, _, query = path.partition('?') + path = urllib.parse.unquote(path) + # Similar to os.path.split(os.path.normpath(path)) but specific to URL # path semantics rather than local operating system semantics. path_parts = path.split('/') @@ -864,6 +866,9 @@ else: tail_part = '' + if query: + tail_part = '?'.join((tail_part, query)) + splitpath = ('/' + '/'.join(head_parts), tail_part) collapsed_path = "/".join(splitpath) @@ -947,7 +952,7 @@ (and the next character is a '/' or the end of the string). """ - collapsed_path = _url_collapse_path(urllib.parse.unquote(self.path)) + collapsed_path = _url_collapse_path(self.path) dir_sep = collapsed_path.find('/', 1) head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] if head in self.cgi_directories: @@ -984,11 +989,7 @@ break # find an explicit query string, if present. - i = rest.rfind('?') - if i >= 0: - rest, query = rest[:i], rest[i+1:] - else: - query = '' + rest, _, query = rest.partition('?') # dissect the part after the directory name into a script name & # a possible additional path, to be stored in PATH_INFO. diff --git a/Lib/test/test_httpservers.py b/Lib/test/test_httpservers.py --- a/Lib/test/test_httpservers.py +++ b/Lib/test/test_httpservers.py @@ -425,6 +425,16 @@ form.getfirst("bacon"))) """ +cgi_file4 = """\ +#!%s +import os + +print("Content-type: text/html") +print() + +print(os.environ["%s"]) +""" + @unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0, "This test can't be run reliably as root (issue #13308).") @@ -446,6 +456,7 @@ self.file1_path = None self.file2_path = None self.file3_path = None + self.file4_path = None # The shebang line should be pure ASCII: use symlink if possible. # See issue #7668. @@ -484,6 +495,11 @@ file3.write(cgi_file1 % self.pythonexe) os.chmod(self.file3_path, 0o777) + self.file4_path = os.path.join(self.cgi_dir, 'file4.py') + with open(self.file4_path, 'w', encoding='utf-8') as file4: + file4.write(cgi_file4 % (self.pythonexe, 'QUERY_STRING')) + os.chmod(self.file4_path, 0o777) + os.chdir(self.parent_dir) def tearDown(self): @@ -499,6 +515,8 @@ os.remove(self.file2_path) if self.file3_path: os.remove(self.file3_path) + if self.file4_path: + os.remove(self.file4_path) os.rmdir(self.cgi_child_dir) os.rmdir(self.cgi_dir) os.rmdir(self.parent_dir) @@ -606,6 +624,19 @@ (b'Hello World' + self.linesep, 'text/html', HTTPStatus.OK), (res.read(), res.getheader('Content-type'), res.status)) + def test_query_with_multiple_question_mark(self): + res = self.request('/cgi-bin/file4.py?a=b?c=d') + self.assertEqual( + (b'a=b?c=d' + self.linesep, 'text/html', HTTPStatus.OK), + (res.read(), res.getheader('Content-type'), res.status)) + + def test_query_with_continuous_slashes(self): + res = self.request('/cgi-bin/file4.py?k=aa%2F%2Fbb&//q//p//=//a//b//') + self.assertEqual( + (b'k=aa%2F%2Fbb&//q//p//=//a//b//' + self.linesep, + 'text/html', HTTPStatus.OK), + (res.read(), res.getheader('Content-type'), res.status)) + class SocketlessRequestHandler(SimpleHTTPRequestHandler): def __init__(self): diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1582,6 +1582,7 @@ Wei Wu Heiko Wundram Doug Wyatt +Xiang Zhang Robert Xiao Florent Xicluna Hirokazu Yamamoto diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -218,6 +218,12 @@ Library ------- +- Issue #25232: Fix CGIRequestHandler to split the query from the URL at the + first question mark (?) rather than the last. Patch from Xiang Zhang. + +- Issue #24657: Prevent CGIRequestHandler from collapsing slashes in the + query part of the URL as if it were a path. Patch from Xiang Zhang. + - Issue #24483: C implementation of functools.lru_cache() now calculates key's hash only once. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 08:44:34 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 03 Oct 2015 06:44:34 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issues_=2325232=2C_=232465?= =?utf-8?q?7=3A_Add_NEWS_to_3=2E6=2E0a1_section?= Message-ID: <20151003064434.55474.95508@psf.io> https://hg.python.org/cpython/rev/3c006ee38287 changeset: 98514:3c006ee38287 user: Martin Panter date: Sat Oct 03 06:25:43 2015 +0000 summary: Issues #25232, #24657: Add NEWS to 3.6.0a1 section files: Misc/NEWS | 6 ++++++ 1 files changed, 6 insertions(+), 0 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -40,6 +40,12 @@ Library ------- +- Issue #25232: Fix CGIRequestHandler to split the query from the URL at the + first question mark (?) rather than the last. Patch from Xiang Zhang. + +- Issue #24657: Prevent CGIRequestHandler from collapsing slashes in the + query part of the URL as if it were a path. Patch from Xiang Zhang. + - Issue #25287: Don't add crypt.METHOD_CRYPT to crypt.methods if it's not supported. Check if it is supported, it may not be supported on OpenBSD for example. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 09:27:37 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 03 Oct 2015 07:27:37 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI1MjMy?= =?utf-8?q?=3A_Fix_CGIRequestHandler=27s_splitting_of_URL_query?= Message-ID: <20151003072737.128828.54306@psf.io> https://hg.python.org/cpython/rev/b12b30dc8617 changeset: 98515:b12b30dc8617 branch: 2.7 parent: 98504:4e62989e3688 user: Martin Panter date: Sat Oct 03 05:38:07 2015 +0000 summary: Issue #25232: Fix CGIRequestHandler's splitting of URL query Patch from Xiang Zhang. files: Lib/CGIHTTPServer.py | 6 +----- Lib/test/test_httpservers.py | 22 ++++++++++++++++++++++ Misc/ACKS | 1 + Misc/NEWS | 3 +++ 4 files changed, 27 insertions(+), 5 deletions(-) diff --git a/Lib/CGIHTTPServer.py b/Lib/CGIHTTPServer.py --- a/Lib/CGIHTTPServer.py +++ b/Lib/CGIHTTPServer.py @@ -120,11 +120,7 @@ break # find an explicit query string, if present. - i = rest.rfind('?') - if i >= 0: - rest, query = rest[:i], rest[i+1:] - else: - query = '' + rest, _, query = rest.partition('?') # dissect the part after the directory name into a script name & # a possible additional path, to be stored in PATH_INFO. diff --git a/Lib/test/test_httpservers.py b/Lib/test/test_httpservers.py --- a/Lib/test/test_httpservers.py +++ b/Lib/test/test_httpservers.py @@ -381,6 +381,16 @@ form.getfirst("bacon")) """ +cgi_file4 = """\ +#!%s +import os + +print("Content-type: text/html") +print() + +print(os.environ["%s"]) +""" + @unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0, "This test can't be run reliably as root (issue #13308).") @@ -424,6 +434,11 @@ file3.write(cgi_file1 % self.pythonexe) os.chmod(self.file3_path, 0777) + self.file4_path = os.path.join(self.cgi_dir, 'file4.py') + with open(self.file4_path, 'w') as file4: + file4.write(cgi_file4 % (self.pythonexe, 'QUERY_STRING')) + os.chmod(self.file4_path, 0o777) + self.cwd = os.getcwd() os.chdir(self.parent_dir) @@ -436,6 +451,7 @@ os.remove(self.file1_path) os.remove(self.file2_path) os.remove(self.file3_path) + os.remove(self.file4_path) os.rmdir(self.cgi_child_dir) os.rmdir(self.cgi_dir) os.rmdir(self.parent_dir) @@ -536,6 +552,12 @@ self.assertEqual((b'Hello World\n', 'text/html', 200), (res.read(), res.getheader('Content-type'), res.status)) + def test_query_with_multiple_question_mark(self): + res = self.request('/cgi-bin/file4.py?a=b?c=d') + self.assertEqual( + (b'a=b?c=d\n', 'text/html', 200), + (res.read(), res.getheader('Content-type'), res.status)) + class SimpleHTTPRequestHandlerTestCase(unittest.TestCase): """ Test url parsing """ diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1505,6 +1505,7 @@ Daniel Wozniak Heiko Wundram Doug Wyatt +Xiang Zhang Robert Xiao Florent Xicluna Hirokazu Yamamoto diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -43,6 +43,9 @@ Library ------- +- Issue #25232: Fix CGIRequestHandler to split the query from the URL at the + first question mark (?) rather than the last. Patch from Xiang Zhang. + - Issue #22958: Constructor and update method of weakref.WeakValueDictionary now accept the self keyword argument. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 09:27:37 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 03 Oct 2015 07:27:37 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI0NjU3?= =?utf-8?q?=3A_Prevent_CGIRequestHandler_from_collapsing_the_URL_query?= Message-ID: <20151003072737.453.9704@psf.io> https://hg.python.org/cpython/rev/a4302005f9a2 changeset: 98516:a4302005f9a2 branch: 2.7 user: Martin Panter date: Sat Oct 03 05:55:46 2015 +0000 summary: Issue #24657: Prevent CGIRequestHandler from collapsing the URL query Initial patch from Xiang Zhang. Also fix out-of-date _url_collapse_path() doc string. files: Lib/CGIHTTPServer.py | 13 +++++++++---- Lib/test/test_httpservers.py | 7 +++++++ Misc/NEWS | 3 +++ 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/Lib/CGIHTTPServer.py b/Lib/CGIHTTPServer.py --- a/Lib/CGIHTTPServer.py +++ b/Lib/CGIHTTPServer.py @@ -84,7 +84,7 @@ path begins with one of the strings in self.cgi_directories (and the next character is a '/' or the end of the string). """ - collapsed_path = _url_collapse_path(urllib.unquote(self.path)) + collapsed_path = _url_collapse_path(self.path) dir_sep = collapsed_path.find('/', 1) head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] if head in self.cgi_directories: @@ -304,13 +304,15 @@ The utility of this function is limited to is_cgi method and helps preventing some security attacks. - Returns: A tuple of (head, tail) where tail is everything after the final / - and head is everything before it. Head will always start with a '/' and, - if it contains anything else, never have a trailing '/'. + Returns: The reconstituted URL, which will always start with a '/'. Raises: IndexError if too many '..' occur within the path. """ + # Query component should not be involved. + path, _, query = path.partition('?') + path = urllib.unquote(path) + # Similar to os.path.split(os.path.normpath(path)) but specific to URL # path semantics rather than local operating system semantics. path_parts = path.split('/') @@ -331,6 +333,9 @@ else: tail_part = '' + if query: + tail_part = '?'.join((tail_part, query)) + splitpath = ('/' + '/'.join(head_parts), tail_part) collapsed_path = "/".join(splitpath) diff --git a/Lib/test/test_httpservers.py b/Lib/test/test_httpservers.py --- a/Lib/test/test_httpservers.py +++ b/Lib/test/test_httpservers.py @@ -558,6 +558,13 @@ (b'a=b?c=d\n', 'text/html', 200), (res.read(), res.getheader('Content-type'), res.status)) + def test_query_with_continuous_slashes(self): + res = self.request('/cgi-bin/file4.py?k=aa%2F%2Fbb&//q//p//=//a//b//') + self.assertEqual( + (b'k=aa%2F%2Fbb&//q//p//=//a//b//\n', + 'text/html', 200), + (res.read(), res.getheader('Content-type'), res.status)) + class SimpleHTTPRequestHandlerTestCase(unittest.TestCase): """ Test url parsing """ diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -46,6 +46,9 @@ - Issue #25232: Fix CGIRequestHandler to split the query from the URL at the first question mark (?) rather than the last. Patch from Xiang Zhang. +- Issue #24657: Prevent CGIRequestHandler from collapsing slashes in the + query part of the URL as if it were a path. Patch from Xiang Zhang. + - Issue #22958: Constructor and update method of weakref.WeakValueDictionary now accept the self keyword argument. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 10:02:22 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 03 Oct 2015 08:02:22 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzE2NzAx?= =?utf-8?q?=3A_Document_+=3D_and_*=3D_for_mutable_sequences?= Message-ID: <20151003080222.128844.82741@psf.io> https://hg.python.org/cpython/rev/f83db23bec7f changeset: 98518:f83db23bec7f branch: 3.4 parent: 98510:634fe6a90e0c user: Martin Panter date: Sat Oct 03 07:46:04 2015 +0000 summary: Issue #16701: Document += and *= for mutable sequences files: Doc/library/stdtypes.rst | 14 ++++++++++++-- 1 files changed, 12 insertions(+), 2 deletions(-) diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst --- a/Doc/library/stdtypes.rst +++ b/Doc/library/stdtypes.rst @@ -1063,10 +1063,14 @@ | ``s.copy()`` | creates a shallow copy of ``s``| \(5) | | | (same as ``s[:]``) | | +------------------------------+--------------------------------+---------------------+ -| ``s.extend(t)`` | extends *s* with the | | -| | contents of *t* (same as | | +| ``s.extend(t)`` or | extends *s* with the | | +| ``s += t`` | contents of *t* (for the | | +| | most part the same as | | | | ``s[len(s):len(s)] = t``) | | +------------------------------+--------------------------------+---------------------+ +| ``s *= n`` | updates *s* with its contents | \(6) | +| | repeated *n* times | | ++------------------------------+--------------------------------+---------------------+ | ``s.insert(i, x)`` | inserts *x* into *s* at the | | | | index given by *i* | | | | (same as ``s[i:i] = [x]``) | | @@ -1107,6 +1111,12 @@ .. versionadded:: 3.3 :meth:`clear` and :meth:`!copy` methods. +(6) + The value *n* is an integer, or an object implementing + :meth:`~object.__index__`. Zero and negative values of *n* clear + the sequence. Items in the sequence are not copied; they are referenced + multiple times, as explained for ``s * n`` under :ref:`typesseq-common`. + .. _typesseq-list: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 10:02:22 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 03 Oct 2015 08:02:22 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzE2NzAx?= =?utf-8?q?=3A_Document_+=3D_and_*=3D_for_mutable_sequences?= Message-ID: <20151003080222.2677.42413@psf.io> https://hg.python.org/cpython/rev/ec373d762213 changeset: 98517:ec373d762213 branch: 2.7 user: Martin Panter date: Sat Oct 03 07:37:22 2015 +0000 summary: Issue #16701: Document += and *= for mutable sequences files: Doc/library/stdtypes.rst | 13 +++++++++++-- 1 files changed, 11 insertions(+), 2 deletions(-) diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst --- a/Doc/library/stdtypes.rst +++ b/Doc/library/stdtypes.rst @@ -1613,8 +1613,11 @@ | ``s.append(x)`` | same as ``s[len(s):len(s)] = | \(2) | | | [x]`` | | +------------------------------+--------------------------------+---------------------+ -| ``s.extend(x)`` | same as ``s[len(s):len(s)] = | \(3) | -| | x`` | | +| ``s.extend(x)`` or | for the most part the same as | \(3) | +| ``s += t`` | ``s[len(s):len(s)] = x`` | | ++------------------------------+--------------------------------+---------------------+ +| ``s *= n`` | updates *s* with its contents | \(11) | +| | repeated *n* times | | +------------------------------+--------------------------------+---------------------+ | ``s.count(x)`` | return number of *i*'s for | | | | which ``s[i] == x`` | | @@ -1720,6 +1723,12 @@ :exc:`ValueError` if it can detect that the list has been mutated during a sort. +(11) + The value *n* is an integer, or an object implementing + :meth:`~object.__index__`. Zero and negative values of *n* clear + the sequence. Items in the sequence are not copied; they are referenced + multiple times, as explained for ``s * n`` under :ref:`typesseq`. + .. _types-set: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 10:02:23 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 03 Oct 2015 08:02:23 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2316701=3A_Merge_sequence_docs_from_3=2E4_into_3=2E5?= Message-ID: <20151003080222.70990.91299@psf.io> https://hg.python.org/cpython/rev/6e43a3833293 changeset: 98519:6e43a3833293 branch: 3.5 parent: 98512:88918f2a54df parent: 98518:f83db23bec7f user: Martin Panter date: Sat Oct 03 07:53:49 2015 +0000 summary: Issue #16701: Merge sequence docs from 3.4 into 3.5 files: Doc/library/stdtypes.rst | 14 ++++++++++++-- 1 files changed, 12 insertions(+), 2 deletions(-) diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst --- a/Doc/library/stdtypes.rst +++ b/Doc/library/stdtypes.rst @@ -1063,10 +1063,14 @@ | ``s.copy()`` | creates a shallow copy of ``s``| \(5) | | | (same as ``s[:]``) | | +------------------------------+--------------------------------+---------------------+ -| ``s.extend(t)`` | extends *s* with the | | -| | contents of *t* (same as | | +| ``s.extend(t)`` or | extends *s* with the | | +| ``s += t`` | contents of *t* (for the | | +| | most part the same as | | | | ``s[len(s):len(s)] = t``) | | +------------------------------+--------------------------------+---------------------+ +| ``s *= n`` | updates *s* with its contents | \(6) | +| | repeated *n* times | | ++------------------------------+--------------------------------+---------------------+ | ``s.insert(i, x)`` | inserts *x* into *s* at the | | | | index given by *i* | | | | (same as ``s[i:i] = [x]``) | | @@ -1107,6 +1111,12 @@ .. versionadded:: 3.3 :meth:`clear` and :meth:`!copy` methods. +(6) + The value *n* is an integer, or an object implementing + :meth:`~object.__index__`. Zero and negative values of *n* clear + the sequence. Items in the sequence are not copied; they are referenced + multiple times, as explained for ``s * n`` under :ref:`typesseq-common`. + .. _typesseq-list: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 10:02:23 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 03 Oct 2015 08:02:23 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2316701=3A_Merge_sequence_docs_from_3=2E5?= Message-ID: <20151003080222.55460.3299@psf.io> https://hg.python.org/cpython/rev/a92466bf16cc changeset: 98520:a92466bf16cc parent: 98514:3c006ee38287 parent: 98519:6e43a3833293 user: Martin Panter date: Sat Oct 03 07:54:08 2015 +0000 summary: Issue #16701: Merge sequence docs from 3.5 files: Doc/library/stdtypes.rst | 14 ++++++++++++-- 1 files changed, 12 insertions(+), 2 deletions(-) diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst --- a/Doc/library/stdtypes.rst +++ b/Doc/library/stdtypes.rst @@ -1063,10 +1063,14 @@ | ``s.copy()`` | creates a shallow copy of ``s``| \(5) | | | (same as ``s[:]``) | | +------------------------------+--------------------------------+---------------------+ -| ``s.extend(t)`` | extends *s* with the | | -| | contents of *t* (same as | | +| ``s.extend(t)`` or | extends *s* with the | | +| ``s += t`` | contents of *t* (for the | | +| | most part the same as | | | | ``s[len(s):len(s)] = t``) | | +------------------------------+--------------------------------+---------------------+ +| ``s *= n`` | updates *s* with its contents | \(6) | +| | repeated *n* times | | ++------------------------------+--------------------------------+---------------------+ | ``s.insert(i, x)`` | inserts *x* into *s* at the | | | | index given by *i* | | | | (same as ``s[i:i] = [x]``) | | @@ -1107,6 +1111,12 @@ .. versionadded:: 3.3 :meth:`clear` and :meth:`!copy` methods. +(6) + The value *n* is an integer, or an object implementing + :meth:`~object.__index__`. Zero and negative values of *n* clear + the sequence. Items in the sequence are not copied; they are referenced + multiple times, as explained for ``s * n`` under :ref:`typesseq-common`. + .. _typesseq-list: -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Sat Oct 3 10:44:16 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Sat, 03 Oct 2015 08:44:16 +0000 Subject: [Python-checkins] Daily reference leaks (4202abcf5702): sum=61491 Message-ID: <20151003084416.3279.19397@psf.io> results for 4202abcf5702 on branch "default" -------------------------------------------- test_capi leaked [5410, 5410, 5410] references, sum=16230 test_capi leaked [1421, 1423, 1423] memory blocks, sum=4267 test_functools leaked [0, 2, 2] memory blocks, sum=4 test_threading leaked [10820, 10820, 10820] references, sum=32460 test_threading leaked [2842, 2844, 2844] memory blocks, sum=8530 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflogP1RQ4e', '--timeout', '7200'] From python-checkins at python.org Sat Oct 3 17:35:55 2015 From: python-checkins at python.org (guido.van.rossum) Date: Sat, 03 Oct 2015 15:35:55 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325304=3A_Add_asyncio=2Erun=5Fcoroutine=5Fthreadsafe?= =?utf-8?q?=28=29=2E_By_Vincent_Michel=2E_=28Merge?= Message-ID: <20151003153555.20759.28452@psf.io> https://hg.python.org/cpython/rev/e0db10d8c95e changeset: 98522:e0db10d8c95e branch: 3.5 parent: 98519:6e43a3833293 parent: 98521:25e05b3e1869 user: Guido van Rossum date: Sat Oct 03 08:34:34 2015 -0700 summary: Issue #25304: Add asyncio.run_coroutine_threadsafe(). By Vincent Michel. (Merge 3.4->3.5.) files: Lib/asyncio/futures.py | 74 +++++++++++--- Lib/asyncio/tasks.py | 18 +++- Lib/test/test_asyncio/test_futures.py | 2 - Lib/test/test_asyncio/test_tasks.py | 67 +++++++++++++ Misc/ACKS | 1 + Misc/NEWS | 4 + 6 files changed, 147 insertions(+), 19 deletions(-) diff --git a/Lib/asyncio/futures.py b/Lib/asyncio/futures.py --- a/Lib/asyncio/futures.py +++ b/Lib/asyncio/futures.py @@ -390,22 +390,64 @@ __await__ = __iter__ # make compatible with 'await' expression -def wrap_future(fut, *, loop=None): +def _set_concurrent_future_state(concurrent, source): + """Copy state from a future to a concurrent.futures.Future.""" + assert source.done() + if source.cancelled(): + concurrent.cancel() + if not concurrent.set_running_or_notify_cancel(): + return + exception = source.exception() + if exception is not None: + concurrent.set_exception(exception) + else: + result = source.result() + concurrent.set_result(result) + + +def _chain_future(source, destination): + """Chain two futures so that when one completes, so does the other. + + The result (or exception) of source will be copied to destination. + If destination is cancelled, source gets cancelled too. + Compatible with both asyncio.Future and concurrent.futures.Future. + """ + if not isinstance(source, (Future, concurrent.futures.Future)): + raise TypeError('A future is required for source argument') + if not isinstance(destination, (Future, concurrent.futures.Future)): + raise TypeError('A future is required for destination argument') + source_loop = source._loop if isinstance(source, Future) else None + dest_loop = destination._loop if isinstance(destination, Future) else None + + def _set_state(future, other): + if isinstance(future, Future): + future._copy_state(other) + else: + _set_concurrent_future_state(future, other) + + def _call_check_cancel(destination): + if destination.cancelled(): + if source_loop is None or source_loop is dest_loop: + source.cancel() + else: + source_loop.call_soon_threadsafe(source.cancel) + + def _call_set_state(source): + if dest_loop is None or dest_loop is source_loop: + _set_state(destination, source) + else: + dest_loop.call_soon_threadsafe(_set_state, destination, source) + + destination.add_done_callback(_call_check_cancel) + source.add_done_callback(_call_set_state) + + +def wrap_future(future, *, loop=None): """Wrap concurrent.futures.Future object.""" - if isinstance(fut, Future): - return fut - assert isinstance(fut, concurrent.futures.Future), \ - 'concurrent.futures.Future is expected, got {!r}'.format(fut) - if loop is None: - loop = events.get_event_loop() + if isinstance(future, Future): + return future + assert isinstance(future, concurrent.futures.Future), \ + 'concurrent.futures.Future is expected, got {!r}'.format(future) new_future = Future(loop=loop) - - def _check_cancel_other(f): - if f.cancelled(): - fut.cancel() - - new_future.add_done_callback(_check_cancel_other) - fut.add_done_callback( - lambda future: loop.call_soon_threadsafe( - new_future._copy_state, future)) + _chain_future(future, new_future) return new_future diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py --- a/Lib/asyncio/tasks.py +++ b/Lib/asyncio/tasks.py @@ -3,7 +3,7 @@ __all__ = ['Task', 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED', 'wait', 'wait_for', 'as_completed', 'sleep', 'async', - 'gather', 'shield', 'ensure_future', + 'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe', ] import concurrent.futures @@ -692,3 +692,19 @@ inner.add_done_callback(_done_callback) return outer + + +def run_coroutine_threadsafe(coro, loop): + """Submit a coroutine object to a given event loop. + + Return a concurrent.futures.Future to access the result. + """ + if not coroutines.iscoroutine(coro): + raise TypeError('A coroutine object is required') + future = concurrent.futures.Future() + + def callback(): + futures._chain_future(ensure_future(coro, loop=loop), future) + + loop.call_soon_threadsafe(callback) + return future diff --git a/Lib/test/test_asyncio/test_futures.py b/Lib/test/test_asyncio/test_futures.py --- a/Lib/test/test_asyncio/test_futures.py +++ b/Lib/test/test_asyncio/test_futures.py @@ -174,8 +174,6 @@ '') def test_copy_state(self): - # Test the internal _copy_state method since it's being directly - # invoked in other modules. f = asyncio.Future(loop=self.loop) f.set_result(10) diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py --- a/Lib/test/test_asyncio/test_tasks.py +++ b/Lib/test/test_asyncio/test_tasks.py @@ -2100,5 +2100,72 @@ self.assertIsInstance(f.exception(), RuntimeError) +class RunCoroutineThreadsafeTests(test_utils.TestCase): + """Test case for futures.submit_to_loop.""" + + def setUp(self): + self.loop = self.new_test_loop(self.time_gen) + + def time_gen(self): + """Handle the timer.""" + yield 0 # second + yield 1 # second + + @asyncio.coroutine + def add(self, a, b, fail=False, cancel=False): + """Wait 1 second and return a + b.""" + yield from asyncio.sleep(1, loop=self.loop) + if fail: + raise RuntimeError("Fail!") + if cancel: + asyncio.tasks.Task.current_task(self.loop).cancel() + yield + return a + b + + def target(self, fail=False, cancel=False, timeout=None): + """Run add coroutine in the event loop.""" + coro = self.add(1, 2, fail=fail, cancel=cancel) + future = asyncio.run_coroutine_threadsafe(coro, self.loop) + try: + return future.result(timeout) + finally: + future.done() or future.cancel() + + def test_run_coroutine_threadsafe(self): + """Test coroutine submission from a thread to an event loop.""" + future = self.loop.run_in_executor(None, self.target) + result = self.loop.run_until_complete(future) + self.assertEqual(result, 3) + + def test_run_coroutine_threadsafe_with_exception(self): + """Test coroutine submission from a thread to an event loop + when an exception is raised.""" + future = self.loop.run_in_executor(None, self.target, True) + with self.assertRaises(RuntimeError) as exc_context: + self.loop.run_until_complete(future) + self.assertIn("Fail!", exc_context.exception.args) + + def test_run_coroutine_threadsafe_with_timeout(self): + """Test coroutine submission from a thread to an event loop + when a timeout is raised.""" + callback = lambda: self.target(timeout=0) + future = self.loop.run_in_executor(None, callback) + with self.assertRaises(asyncio.TimeoutError): + self.loop.run_until_complete(future) + # Clear the time generator and tasks + test_utils.run_briefly(self.loop) + # Check that there's no pending task (add has been cancelled) + for task in asyncio.Task.all_tasks(self.loop): + self.assertTrue(task.done()) + + def test_run_coroutine_threadsafe_task_cancelled(self): + """Test coroutine submission from a tread to an event loop + when the task is cancelled.""" + callback = lambda: self.target(cancel=True) + future = self.loop.run_in_executor(None, callback) + with self.assertRaises(asyncio.CancelledError): + self.loop.run_until_complete(future) + + if __name__ == '__main__': unittest.main() diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -957,6 +957,7 @@ Trent Mick Jason Michalski Franck Michea +Vincent Michel Tom Middleton Thomas Miedema Stan Mihai diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -34,6 +34,10 @@ Library ------- +- Issue #25304: Add asyncio.run_coroutine_threadsafe(). This lets you + submit a coroutine to a loop from another thread, returning a + concurrent.futures.Future. By Vincent Michel. + - Issue #25232: Fix CGIRequestHandler to split the query from the URL at the first question mark (?) rather than the last. Patch from Xiang Zhang. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 17:35:55 2015 From: python-checkins at python.org (guido.van.rossum) Date: Sat, 03 Oct 2015 15:35:55 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325304=3A_Add_asyncio=2Erun=5Fcoroutine=5Fthread?= =?utf-8?q?safe=28=29=2E_By_Vincent_Michel=2E_=28Merge?= Message-ID: <20151003153555.20769.35597@psf.io> https://hg.python.org/cpython/rev/69829a7fccde changeset: 98523:69829a7fccde parent: 98520:a92466bf16cc parent: 98522:e0db10d8c95e user: Guido van Rossum date: Sat Oct 03 08:35:28 2015 -0700 summary: Issue #25304: Add asyncio.run_coroutine_threadsafe(). By Vincent Michel. (Merge 3.5->3.6.) files: Lib/asyncio/futures.py | 74 +++++++++++--- Lib/asyncio/tasks.py | 18 +++- Lib/test/test_asyncio/test_futures.py | 2 - Lib/test/test_asyncio/test_tasks.py | 67 +++++++++++++ Misc/ACKS | 1 + Misc/NEWS | 4 + 6 files changed, 147 insertions(+), 19 deletions(-) diff --git a/Lib/asyncio/futures.py b/Lib/asyncio/futures.py --- a/Lib/asyncio/futures.py +++ b/Lib/asyncio/futures.py @@ -390,22 +390,64 @@ __await__ = __iter__ # make compatible with 'await' expression -def wrap_future(fut, *, loop=None): +def _set_concurrent_future_state(concurrent, source): + """Copy state from a future to a concurrent.futures.Future.""" + assert source.done() + if source.cancelled(): + concurrent.cancel() + if not concurrent.set_running_or_notify_cancel(): + return + exception = source.exception() + if exception is not None: + concurrent.set_exception(exception) + else: + result = source.result() + concurrent.set_result(result) + + +def _chain_future(source, destination): + """Chain two futures so that when one completes, so does the other. + + The result (or exception) of source will be copied to destination. + If destination is cancelled, source gets cancelled too. + Compatible with both asyncio.Future and concurrent.futures.Future. + """ + if not isinstance(source, (Future, concurrent.futures.Future)): + raise TypeError('A future is required for source argument') + if not isinstance(destination, (Future, concurrent.futures.Future)): + raise TypeError('A future is required for destination argument') + source_loop = source._loop if isinstance(source, Future) else None + dest_loop = destination._loop if isinstance(destination, Future) else None + + def _set_state(future, other): + if isinstance(future, Future): + future._copy_state(other) + else: + _set_concurrent_future_state(future, other) + + def _call_check_cancel(destination): + if destination.cancelled(): + if source_loop is None or source_loop is dest_loop: + source.cancel() + else: + source_loop.call_soon_threadsafe(source.cancel) + + def _call_set_state(source): + if dest_loop is None or dest_loop is source_loop: + _set_state(destination, source) + else: + dest_loop.call_soon_threadsafe(_set_state, destination, source) + + destination.add_done_callback(_call_check_cancel) + source.add_done_callback(_call_set_state) + + +def wrap_future(future, *, loop=None): """Wrap concurrent.futures.Future object.""" - if isinstance(fut, Future): - return fut - assert isinstance(fut, concurrent.futures.Future), \ - 'concurrent.futures.Future is expected, got {!r}'.format(fut) - if loop is None: - loop = events.get_event_loop() + if isinstance(future, Future): + return future + assert isinstance(future, concurrent.futures.Future), \ + 'concurrent.futures.Future is expected, got {!r}'.format(future) new_future = Future(loop=loop) - - def _check_cancel_other(f): - if f.cancelled(): - fut.cancel() - - new_future.add_done_callback(_check_cancel_other) - fut.add_done_callback( - lambda future: loop.call_soon_threadsafe( - new_future._copy_state, future)) + _chain_future(future, new_future) return new_future diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py --- a/Lib/asyncio/tasks.py +++ b/Lib/asyncio/tasks.py @@ -3,7 +3,7 @@ __all__ = ['Task', 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED', 'wait', 'wait_for', 'as_completed', 'sleep', 'async', - 'gather', 'shield', 'ensure_future', + 'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe', ] import concurrent.futures @@ -692,3 +692,19 @@ inner.add_done_callback(_done_callback) return outer + + +def run_coroutine_threadsafe(coro, loop): + """Submit a coroutine object to a given event loop. + + Return a concurrent.futures.Future to access the result. + """ + if not coroutines.iscoroutine(coro): + raise TypeError('A coroutine object is required') + future = concurrent.futures.Future() + + def callback(): + futures._chain_future(ensure_future(coro, loop=loop), future) + + loop.call_soon_threadsafe(callback) + return future diff --git a/Lib/test/test_asyncio/test_futures.py b/Lib/test/test_asyncio/test_futures.py --- a/Lib/test/test_asyncio/test_futures.py +++ b/Lib/test/test_asyncio/test_futures.py @@ -174,8 +174,6 @@ '') def test_copy_state(self): - # Test the internal _copy_state method since it's being directly - # invoked in other modules. f = asyncio.Future(loop=self.loop) f.set_result(10) diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py --- a/Lib/test/test_asyncio/test_tasks.py +++ b/Lib/test/test_asyncio/test_tasks.py @@ -2100,5 +2100,72 @@ self.assertIsInstance(f.exception(), RuntimeError) +class RunCoroutineThreadsafeTests(test_utils.TestCase): + """Test case for futures.submit_to_loop.""" + + def setUp(self): + self.loop = self.new_test_loop(self.time_gen) + + def time_gen(self): + """Handle the timer.""" + yield 0 # second + yield 1 # second + + @asyncio.coroutine + def add(self, a, b, fail=False, cancel=False): + """Wait 1 second and return a + b.""" + yield from asyncio.sleep(1, loop=self.loop) + if fail: + raise RuntimeError("Fail!") + if cancel: + asyncio.tasks.Task.current_task(self.loop).cancel() + yield + return a + b + + def target(self, fail=False, cancel=False, timeout=None): + """Run add coroutine in the event loop.""" + coro = self.add(1, 2, fail=fail, cancel=cancel) + future = asyncio.run_coroutine_threadsafe(coro, self.loop) + try: + return future.result(timeout) + finally: + future.done() or future.cancel() + + def test_run_coroutine_threadsafe(self): + """Test coroutine submission from a thread to an event loop.""" + future = self.loop.run_in_executor(None, self.target) + result = self.loop.run_until_complete(future) + self.assertEqual(result, 3) + + def test_run_coroutine_threadsafe_with_exception(self): + """Test coroutine submission from a thread to an event loop + when an exception is raised.""" + future = self.loop.run_in_executor(None, self.target, True) + with self.assertRaises(RuntimeError) as exc_context: + self.loop.run_until_complete(future) + self.assertIn("Fail!", exc_context.exception.args) + + def test_run_coroutine_threadsafe_with_timeout(self): + """Test coroutine submission from a thread to an event loop + when a timeout is raised.""" + callback = lambda: self.target(timeout=0) + future = self.loop.run_in_executor(None, callback) + with self.assertRaises(asyncio.TimeoutError): + self.loop.run_until_complete(future) + # Clear the time generator and tasks + test_utils.run_briefly(self.loop) + # Check that there's no pending task (add has been cancelled) + for task in asyncio.Task.all_tasks(self.loop): + self.assertTrue(task.done()) + + def test_run_coroutine_threadsafe_task_cancelled(self): + """Test coroutine submission from a tread to an event loop + when the task is cancelled.""" + callback = lambda: self.target(cancel=True) + future = self.loop.run_in_executor(None, callback) + with self.assertRaises(asyncio.CancelledError): + self.loop.run_until_complete(future) + + if __name__ == '__main__': unittest.main() diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -958,6 +958,7 @@ Trent Mick Jason Michalski Franck Michea +Vincent Michel Tom Middleton Thomas Miedema Stan Mihai diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -40,6 +40,10 @@ Library ------- +- Issue #25304: Add asyncio.run_coroutine_threadsafe(). This lets you + submit a coroutine to a loop from another thread, returning a + concurrent.futures.Future. By Vincent Michel. + - Issue #25232: Fix CGIRequestHandler to split the query from the URL at the first question mark (?) rather than the last. Patch from Xiang Zhang. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 17:35:56 2015 From: python-checkins at python.org (guido.van.rossum) Date: Sat, 03 Oct 2015 15:35:56 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1MzA0?= =?utf-8?q?=3A_Add_asyncio=2Erun=5Fcoroutine=5Fthreadsafe=28=29=2E_By_Vinc?= =?utf-8?q?ent_Michel=2E?= Message-ID: <20151003153555.20757.60480@psf.io> https://hg.python.org/cpython/rev/25e05b3e1869 changeset: 98521:25e05b3e1869 branch: 3.4 parent: 98518:f83db23bec7f user: Guido van Rossum date: Sat Oct 03 08:31:42 2015 -0700 summary: Issue #25304: Add asyncio.run_coroutine_threadsafe(). By Vincent Michel. files: Lib/asyncio/futures.py | 74 +++++++++++--- Lib/asyncio/tasks.py | 18 +++- Lib/test/test_asyncio/test_futures.py | 2 - Lib/test/test_asyncio/test_tasks.py | 67 +++++++++++++ Misc/ACKS | 1 + Misc/NEWS | 4 + 6 files changed, 147 insertions(+), 19 deletions(-) diff --git a/Lib/asyncio/futures.py b/Lib/asyncio/futures.py --- a/Lib/asyncio/futures.py +++ b/Lib/asyncio/futures.py @@ -390,22 +390,64 @@ __await__ = __iter__ # make compatible with 'await' expression -def wrap_future(fut, *, loop=None): +def _set_concurrent_future_state(concurrent, source): + """Copy state from a future to a concurrent.futures.Future.""" + assert source.done() + if source.cancelled(): + concurrent.cancel() + if not concurrent.set_running_or_notify_cancel(): + return + exception = source.exception() + if exception is not None: + concurrent.set_exception(exception) + else: + result = source.result() + concurrent.set_result(result) + + +def _chain_future(source, destination): + """Chain two futures so that when one completes, so does the other. + + The result (or exception) of source will be copied to destination. + If destination is cancelled, source gets cancelled too. + Compatible with both asyncio.Future and concurrent.futures.Future. + """ + if not isinstance(source, (Future, concurrent.futures.Future)): + raise TypeError('A future is required for source argument') + if not isinstance(destination, (Future, concurrent.futures.Future)): + raise TypeError('A future is required for destination argument') + source_loop = source._loop if isinstance(source, Future) else None + dest_loop = destination._loop if isinstance(destination, Future) else None + + def _set_state(future, other): + if isinstance(future, Future): + future._copy_state(other) + else: + _set_concurrent_future_state(future, other) + + def _call_check_cancel(destination): + if destination.cancelled(): + if source_loop is None or source_loop is dest_loop: + source.cancel() + else: + source_loop.call_soon_threadsafe(source.cancel) + + def _call_set_state(source): + if dest_loop is None or dest_loop is source_loop: + _set_state(destination, source) + else: + dest_loop.call_soon_threadsafe(_set_state, destination, source) + + destination.add_done_callback(_call_check_cancel) + source.add_done_callback(_call_set_state) + + +def wrap_future(future, *, loop=None): """Wrap concurrent.futures.Future object.""" - if isinstance(fut, Future): - return fut - assert isinstance(fut, concurrent.futures.Future), \ - 'concurrent.futures.Future is expected, got {!r}'.format(fut) - if loop is None: - loop = events.get_event_loop() + if isinstance(future, Future): + return future + assert isinstance(future, concurrent.futures.Future), \ + 'concurrent.futures.Future is expected, got {!r}'.format(future) new_future = Future(loop=loop) - - def _check_cancel_other(f): - if f.cancelled(): - fut.cancel() - - new_future.add_done_callback(_check_cancel_other) - fut.add_done_callback( - lambda future: loop.call_soon_threadsafe( - new_future._copy_state, future)) + _chain_future(future, new_future) return new_future diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py --- a/Lib/asyncio/tasks.py +++ b/Lib/asyncio/tasks.py @@ -3,7 +3,7 @@ __all__ = ['Task', 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED', 'wait', 'wait_for', 'as_completed', 'sleep', 'async', - 'gather', 'shield', 'ensure_future', + 'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe', ] import concurrent.futures @@ -692,3 +692,19 @@ inner.add_done_callback(_done_callback) return outer + + +def run_coroutine_threadsafe(coro, loop): + """Submit a coroutine object to a given event loop. + + Return a concurrent.futures.Future to access the result. + """ + if not coroutines.iscoroutine(coro): + raise TypeError('A coroutine object is required') + future = concurrent.futures.Future() + + def callback(): + futures._chain_future(ensure_future(coro, loop=loop), future) + + loop.call_soon_threadsafe(callback) + return future diff --git a/Lib/test/test_asyncio/test_futures.py b/Lib/test/test_asyncio/test_futures.py --- a/Lib/test/test_asyncio/test_futures.py +++ b/Lib/test/test_asyncio/test_futures.py @@ -174,8 +174,6 @@ '') def test_copy_state(self): - # Test the internal _copy_state method since it's being directly - # invoked in other modules. f = asyncio.Future(loop=self.loop) f.set_result(10) diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py --- a/Lib/test/test_asyncio/test_tasks.py +++ b/Lib/test/test_asyncio/test_tasks.py @@ -2100,5 +2100,72 @@ self.assertIsInstance(f.exception(), RuntimeError) +class RunCoroutineThreadsafeTests(test_utils.TestCase): + """Test case for futures.submit_to_loop.""" + + def setUp(self): + self.loop = self.new_test_loop(self.time_gen) + + def time_gen(self): + """Handle the timer.""" + yield 0 # second + yield 1 # second + + @asyncio.coroutine + def add(self, a, b, fail=False, cancel=False): + """Wait 1 second and return a + b.""" + yield from asyncio.sleep(1, loop=self.loop) + if fail: + raise RuntimeError("Fail!") + if cancel: + asyncio.tasks.Task.current_task(self.loop).cancel() + yield + return a + b + + def target(self, fail=False, cancel=False, timeout=None): + """Run add coroutine in the event loop.""" + coro = self.add(1, 2, fail=fail, cancel=cancel) + future = asyncio.run_coroutine_threadsafe(coro, self.loop) + try: + return future.result(timeout) + finally: + future.done() or future.cancel() + + def test_run_coroutine_threadsafe(self): + """Test coroutine submission from a thread to an event loop.""" + future = self.loop.run_in_executor(None, self.target) + result = self.loop.run_until_complete(future) + self.assertEqual(result, 3) + + def test_run_coroutine_threadsafe_with_exception(self): + """Test coroutine submission from a thread to an event loop + when an exception is raised.""" + future = self.loop.run_in_executor(None, self.target, True) + with self.assertRaises(RuntimeError) as exc_context: + self.loop.run_until_complete(future) + self.assertIn("Fail!", exc_context.exception.args) + + def test_run_coroutine_threadsafe_with_timeout(self): + """Test coroutine submission from a thread to an event loop + when a timeout is raised.""" + callback = lambda: self.target(timeout=0) + future = self.loop.run_in_executor(None, callback) + with self.assertRaises(asyncio.TimeoutError): + self.loop.run_until_complete(future) + # Clear the time generator and tasks + test_utils.run_briefly(self.loop) + # Check that there's no pending task (add has been cancelled) + for task in asyncio.Task.all_tasks(self.loop): + self.assertTrue(task.done()) + + def test_run_coroutine_threadsafe_task_cancelled(self): + """Test coroutine submission from a tread to an event loop + when the task is cancelled.""" + callback = lambda: self.target(cancel=True) + future = self.loop.run_in_executor(None, callback) + with self.assertRaises(asyncio.CancelledError): + self.loop.run_until_complete(future) + + if __name__ == '__main__': unittest.main() diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -929,6 +929,7 @@ Trent Mick Jason Michalski Franck Michea +Vincent Michel Tom Middleton Thomas Miedema Stan Mihai diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -90,6 +90,10 @@ Library ------- +- Issue #25304: Add asyncio.run_coroutine_threadsafe(). This lets you + submit a coroutine to a loop from another thread, returning a + concurrent.futures.Future. By Vincent Michel. + - Issue #25232: Fix CGIRequestHandler to split the query from the URL at the first question mark (?) rather than the last. Patch from Xiang Zhang. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 21:22:48 2015 From: python-checkins at python.org (victor.stinner) Date: Sat, 03 Oct 2015 19:22:48 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325306=3A_Skip_tes?= =?utf-8?q?t=5Fhuntrleaks=5Ffd=5Fleak=28=29_of_test=5Fregrtest_until_the_b?= =?utf-8?q?ug_is?= Message-ID: <20151003192247.97700.36068@psf.io> https://hg.python.org/cpython/rev/fd915645627a changeset: 98524:fd915645627a user: Victor Stinner date: Sat Oct 03 21:20:41 2015 +0200 summary: Issue #25306: Skip test_huntrleaks_fd_leak() of test_regrtest until the bug is fixed. files: Lib/test/test_regrtest.py | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -643,6 +643,8 @@ self.check_executed_tests(output, [test]*3, failed=test) @unittest.skipUnless(Py_DEBUG, 'need a debug build') + # Issue #25306: the test hangs sometimes on Windows + @unittest.skipIf(sys.platform == 'win32', 'test broken on Windows') def test_huntrleaks_fd_leak(self): # test --huntrleaks for file descriptor leak code = textwrap.dedent(""" -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 3 21:41:01 2015 From: python-checkins at python.org (victor.stinner) Date: Sat, 03 Oct 2015 19:41:01 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325306=3A_Try_to_f?= =?utf-8?q?ix_test=5Fhuntrleaks=5Ffd=5Fleak=28=29_on_Windows?= Message-ID: <20151003194101.18376.99223@psf.io> https://hg.python.org/cpython/rev/850efcc9155c changeset: 98525:850efcc9155c user: Victor Stinner date: Sat Oct 03 21:40:21 2015 +0200 summary: Issue #25306: Try to fix test_huntrleaks_fd_leak() on Windows Issue #25306: Disable popup and logs to stderr on assertion failures in MSCRT. files: Lib/test/test_regrtest.py | 14 ++++++++++++-- 1 files changed, 12 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -643,14 +643,24 @@ self.check_executed_tests(output, [test]*3, failed=test) @unittest.skipUnless(Py_DEBUG, 'need a debug build') - # Issue #25306: the test hangs sometimes on Windows - @unittest.skipIf(sys.platform == 'win32', 'test broken on Windows') def test_huntrleaks_fd_leak(self): # test --huntrleaks for file descriptor leak code = textwrap.dedent(""" import os import unittest + # Issue #25306: Disable popups and logs to stderr on assertion + # failures in MSCRT + try: + import msvcrt + msvcrt.CrtSetReportMode + except (ImportError, AttributeError): + # no Windows, o release build + pass + else: + for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]: + msvcrt.CrtSetReportMode(m, 0) + class FDLeakTest(unittest.TestCase): def test_leak(self): fd = os.open(__file__, os.O_RDONLY) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 4 05:03:38 2015 From: python-checkins at python.org (terry.reedy) Date: Sun, 04 Oct 2015 03:03:38 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=239232=3A_Escape_rst_markup_char_in_NEWS_entry_to?= =?utf-8?q?_avoid_Sphinx_warning=2E?= Message-ID: <20151004030338.97722.10650@psf.io> https://hg.python.org/cpython/rev/6db349fac3ec changeset: 98527:6db349fac3ec parent: 98525:850efcc9155c parent: 98526:20e0906a808e user: Terry Jan Reedy date: Sat Oct 03 23:03:15 2015 -0400 summary: Issue #9232: Escape rst markup char in NEWS entry to avoid Sphinx warning. files: Misc/NEWS | 6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -29,7 +29,7 @@ doesn't need such high-quality entropy. - Issue #9232: Modify Python's grammar to allow trailing commas in the - argument list of a function declaration. For example, "def f(*, a = + argument list of a function declaration. For example, "def f(\*, a = 3,): pass" is now legal. Patch from Mark Dickinson. - Issue #24965: Implement PEP 498 "Literal String Interpolation". This @@ -555,8 +555,8 @@ - Issue #17527: Add PATCH to wsgiref.validator. Patch from Luca Sbardella. -- Issue #24791: Fix grammar regression for call syntax: 'g(*a or b)'. - +- Issue #24791: Fix grammar regression for call syntax: 'g(\*a or b)'. +p IDLE ---- -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 4 05:03:38 2015 From: python-checkins at python.org (terry.reedy) Date: Sun, 04 Oct 2015 03:03:38 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI0Nzkx?= =?utf-8?q?=3A_Escape_rst_markup_char_in_NEWS_entry_to_avoid_Sphinx_warnin?= =?utf-8?q?g=2E?= Message-ID: <20151004030338.2673.92117@psf.io> https://hg.python.org/cpython/rev/20e0906a808e changeset: 98526:20e0906a808e branch: 3.5 parent: 98522:e0db10d8c95e user: Terry Jan Reedy date: Sat Oct 03 23:01:46 2015 -0400 summary: Issue #24791: Escape rst markup char in NEWS entry to avoid Sphinx warning. files: Misc/NEWS | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -423,7 +423,7 @@ - Issue #17527: Add PATCH to wsgiref.validator. Patch from Luca Sbardella. -- Issue #24791: Fix grammar regression for call syntax: 'g(*a or b)'. +- Issue #24791: Fix grammar regression for call syntax: 'g(\*a or b)'. IDLE ---- -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 4 06:32:03 2015 From: python-checkins at python.org (terry.reedy) Date: Sun, 04 Oct 2015 04:32:03 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI0ODIw?= =?utf-8?q?=3A_Add_=27IDLE_Dark=27_text_color_theme=2C_warning=2C_and_solu?= =?utf-8?q?tion=2E?= Message-ID: <20151004043203.7248.13110@psf.io> https://hg.python.org/cpython/rev/1de01a63f360 changeset: 98529:1de01a63f360 branch: 3.4 parent: 98521:25e05b3e1869 user: Terry Jan Reedy date: Sun Oct 04 00:31:05 2015 -0400 summary: Issue #24820: Add 'IDLE Dark' text color theme, warning, and solution. files: Lib/idlelib/config-highlight.def | 29 ++++++++++++++++++++ Lib/idlelib/configDialog.py | 14 +++++++++ 2 files changed, 43 insertions(+), 0 deletions(-) diff --git a/Lib/idlelib/config-highlight.def b/Lib/idlelib/config-highlight.def --- a/Lib/idlelib/config-highlight.def +++ b/Lib/idlelib/config-highlight.def @@ -62,3 +62,32 @@ stderr-background= #ffffff console-foreground= #770000 console-background= #ffffff + +[IDLE Dark] +comment-foreground = #dd0000 +console-foreground = #ff4d4d +error-foreground = #FFFFFF +hilite-background = #7e7e7e +string-foreground = #02ff02 +stderr-background = #002240 +stderr-foreground = #ffb3b3 +console-background = #002240 +hit-background = #fbfbfb +string-background = #002240 +normal-background = #002240 +hilite-foreground = #FFFFFF +keyword-foreground = #ff8000 +error-background = #c86464 +keyword-background = #002240 +builtin-background = #002240 +break-background = #808000 +builtin-foreground = #ff00ff +definition-foreground = #5e5eff +stdout-foreground = #c2d1fa +definition-background = #002240 +normal-foreground = #FFFFFF +cursor-foreground = #ffffff +stdout-background = #002240 +hit-foreground = #002240 +comment-background = #002240 +break-foreground = #FFFFFF diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -507,6 +507,20 @@ def VarChanged_builtinTheme(self, *params): value = self.builtinTheme.get() + if value == 'IDLE Dark': + tkMessageBox.showwarning( + title="The 'IDLE Dark' Text Color Theme", + message="IDLE Dark is new in October, 2015. Trying to " + "run earlier versions of IDLE with it selected " + "will disable colorizing, or worse.\n\n" + "If you might ever run an earlier release of IDLE, " + "then before exiting this version, " + "either switch to another theme or " + "hit the 'Save as New Custom Theme' button. " + "The latter requires a new name, such as " + "'Custom Dark', but the custom theme will work " + "with any IDLE release, and can be modified.", + parent=self) self.AddChangedItem('main', 'Theme', 'name', value) self.PaintThemeSample() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 4 06:32:03 2015 From: python-checkins at python.org (terry.reedy) Date: Sun, 04 Oct 2015 04:32:03 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_with_3=2E5?= Message-ID: <20151004043203.3275.40945@psf.io> https://hg.python.org/cpython/rev/254cd94b653b changeset: 98531:254cd94b653b parent: 98527:6db349fac3ec parent: 98530:16e3fe295243 user: Terry Jan Reedy date: Sun Oct 04 00:31:36 2015 -0400 summary: Merge with 3.5 files: Lib/idlelib/config-highlight.def | 29 ++++++++++++++++++++ Lib/idlelib/configDialog.py | 14 +++++++++ 2 files changed, 43 insertions(+), 0 deletions(-) diff --git a/Lib/idlelib/config-highlight.def b/Lib/idlelib/config-highlight.def --- a/Lib/idlelib/config-highlight.def +++ b/Lib/idlelib/config-highlight.def @@ -62,3 +62,32 @@ stderr-background= #ffffff console-foreground= #770000 console-background= #ffffff + +[IDLE Dark] +comment-foreground = #dd0000 +console-foreground = #ff4d4d +error-foreground = #FFFFFF +hilite-background = #7e7e7e +string-foreground = #02ff02 +stderr-background = #002240 +stderr-foreground = #ffb3b3 +console-background = #002240 +hit-background = #fbfbfb +string-background = #002240 +normal-background = #002240 +hilite-foreground = #FFFFFF +keyword-foreground = #ff8000 +error-background = #c86464 +keyword-background = #002240 +builtin-background = #002240 +break-background = #808000 +builtin-foreground = #ff00ff +definition-foreground = #5e5eff +stdout-foreground = #c2d1fa +definition-background = #002240 +normal-foreground = #FFFFFF +cursor-foreground = #ffffff +stdout-background = #002240 +hit-foreground = #002240 +comment-background = #002240 +break-foreground = #FFFFFF diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -507,6 +507,20 @@ def VarChanged_builtinTheme(self, *params): value = self.builtinTheme.get() + if value == 'IDLE Dark': + tkMessageBox.showwarning( + title="The 'IDLE Dark' Text Color Theme", + message="IDLE Dark is new in October, 2015. Trying to " + "run earlier versions of IDLE with it selected " + "will disable colorizing, or worse.\n\n" + "If you might ever run an earlier release of IDLE, " + "then before exiting this version, " + "either switch to another theme or " + "hit the 'Save as New Custom Theme' button. " + "The latter requires a new name, such as " + "'Custom Dark', but the custom theme will work " + "with any IDLE release, and can be modified.", + parent=self) self.AddChangedItem('main', 'Theme', 'name', value) self.PaintThemeSample() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 4 06:32:03 2015 From: python-checkins at python.org (terry.reedy) Date: Sun, 04 Oct 2015 04:32:03 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Merge_with_3=2E4?= Message-ID: <20151004043203.7266.77280@psf.io> https://hg.python.org/cpython/rev/16e3fe295243 changeset: 98530:16e3fe295243 branch: 3.5 parent: 98526:20e0906a808e parent: 98529:1de01a63f360 user: Terry Jan Reedy date: Sun Oct 04 00:31:23 2015 -0400 summary: Merge with 3.4 files: Lib/idlelib/config-highlight.def | 29 ++++++++++++++++++++ Lib/idlelib/configDialog.py | 14 +++++++++ 2 files changed, 43 insertions(+), 0 deletions(-) diff --git a/Lib/idlelib/config-highlight.def b/Lib/idlelib/config-highlight.def --- a/Lib/idlelib/config-highlight.def +++ b/Lib/idlelib/config-highlight.def @@ -62,3 +62,32 @@ stderr-background= #ffffff console-foreground= #770000 console-background= #ffffff + +[IDLE Dark] +comment-foreground = #dd0000 +console-foreground = #ff4d4d +error-foreground = #FFFFFF +hilite-background = #7e7e7e +string-foreground = #02ff02 +stderr-background = #002240 +stderr-foreground = #ffb3b3 +console-background = #002240 +hit-background = #fbfbfb +string-background = #002240 +normal-background = #002240 +hilite-foreground = #FFFFFF +keyword-foreground = #ff8000 +error-background = #c86464 +keyword-background = #002240 +builtin-background = #002240 +break-background = #808000 +builtin-foreground = #ff00ff +definition-foreground = #5e5eff +stdout-foreground = #c2d1fa +definition-background = #002240 +normal-foreground = #FFFFFF +cursor-foreground = #ffffff +stdout-background = #002240 +hit-foreground = #002240 +comment-background = #002240 +break-foreground = #FFFFFF diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -507,6 +507,20 @@ def VarChanged_builtinTheme(self, *params): value = self.builtinTheme.get() + if value == 'IDLE Dark': + tkMessageBox.showwarning( + title="The 'IDLE Dark' Text Color Theme", + message="IDLE Dark is new in October, 2015. Trying to " + "run earlier versions of IDLE with it selected " + "will disable colorizing, or worse.\n\n" + "If you might ever run an earlier release of IDLE, " + "then before exiting this version, " + "either switch to another theme or " + "hit the 'Save as New Custom Theme' button. " + "The latter requires a new name, such as " + "'Custom Dark', but the custom theme will work " + "with any IDLE release, and can be modified.", + parent=self) self.AddChangedItem('main', 'Theme', 'name', value) self.PaintThemeSample() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 4 06:32:04 2015 From: python-checkins at python.org (terry.reedy) Date: Sun, 04 Oct 2015 04:32:04 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI0ODIw?= =?utf-8?q?=3A_Add_=27IDLE_Dark=27_text_color_theme=2C_warning=2C_and_solu?= =?utf-8?q?tion=2E?= Message-ID: <20151004043203.55472.96354@psf.io> https://hg.python.org/cpython/rev/afa95f032de1 changeset: 98528:afa95f032de1 branch: 2.7 parent: 98517:ec373d762213 user: Terry Jan Reedy date: Sun Oct 04 00:30:59 2015 -0400 summary: Issue #24820: Add 'IDLE Dark' text color theme, warning, and solution. files: Lib/idlelib/config-highlight.def | 29 ++++++++++++++++++++ Lib/idlelib/configDialog.py | 14 +++++++++ 2 files changed, 43 insertions(+), 0 deletions(-) diff --git a/Lib/idlelib/config-highlight.def b/Lib/idlelib/config-highlight.def --- a/Lib/idlelib/config-highlight.def +++ b/Lib/idlelib/config-highlight.def @@ -62,3 +62,32 @@ stderr-background= #ffffff console-foreground= #770000 console-background= #ffffff + +[IDLE Dark] +comment-foreground = #dd0000 +console-foreground = #ff4d4d +error-foreground = #FFFFFF +hilite-background = #7e7e7e +string-foreground = #02ff02 +stderr-background = #002240 +stderr-foreground = #ffb3b3 +console-background = #002240 +hit-background = #fbfbfb +string-background = #002240 +normal-background = #002240 +hilite-foreground = #FFFFFF +keyword-foreground = #ff8000 +error-background = #c86464 +keyword-background = #002240 +builtin-background = #002240 +break-background = #808000 +builtin-foreground = #ff00ff +definition-foreground = #5e5eff +stdout-foreground = #c2d1fa +definition-background = #002240 +normal-foreground = #FFFFFF +cursor-foreground = #ffffff +stdout-background = #002240 +hit-foreground = #002240 +comment-background = #002240 +break-foreground = #FFFFFF diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -524,6 +524,20 @@ def VarChanged_builtinTheme(self, *params): value = self.builtinTheme.get() + if value == 'IDLE Dark': + tkMessageBox.showwarning( + title="The 'IDLE Dark' Text Color Theme", + message="IDLE Dark is new in October, 2015. Trying to " + "run earlier versions of IDLE with it selected " + "will disable colorizing, or worse.\n\n" + "If you might ever run an earlier release of IDLE, " + "then before exiting this version, " + "either switch to another theme or " + "hit the 'Save as New Custom Theme' button. " + "The latter requires a new name, such as " + "'Custom Dark', but the custom theme will work " + "with any IDLE release, and can be modified.", + parent=self) self.AddChangedItem('main', 'Theme', 'name', value) self.PaintThemeSample() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 4 07:20:01 2015 From: python-checkins at python.org (terry.reedy) Date: Sun, 04 Oct 2015 05:20:01 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI0ODIw?= =?utf-8?q?=3A_Update_IDLE_NEWS_items=2E?= Message-ID: <20151004052001.55465.21539@psf.io> https://hg.python.org/cpython/rev/739cc9ca55cd changeset: 98532:739cc9ca55cd branch: 2.7 parent: 98528:afa95f032de1 user: Terry Jan Reedy date: Sun Oct 04 01:14:45 2015 -0400 summary: Issue #24820: Update IDLE NEWS items. files: Lib/idlelib/NEWS.txt | 19 +++++++++++++++++++ Misc/NEWS | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+), 0 deletions(-) diff --git a/Lib/idlelib/NEWS.txt b/Lib/idlelib/NEWS.txt --- a/Lib/idlelib/NEWS.txt +++ b/Lib/idlelib/NEWS.txt @@ -2,6 +2,25 @@ ========================= *Release date: +- Issue #24820: IDLE now has an 'IDLE Dark' built-in text color theme. + It is more or less IDLE Classic inverted, with a cobalt blue background. + Strings, comments, keywords, ... are still green, red, orange, ... . + Selecting it displays the following warning and solution. + "IDLE Dark is new in October, 2015. Trying to run earlier versions + of IDLE with it selected will disable colorizing, or worse. + If you might ever run an earlier release of IDLE, then before + exiting this version, either switch to another theme or hit the + 'Save as New Custom Theme' button. The latter requires a new name, + such as 'Custom Dark', but the custom theme will work with any IDLE + release, and can be modified." + +- Issue #25224: README.txt is now an idlelib index for IDLE developers and + curious users. The previous user content is now in the IDLE doc and is + redundant. IDLE now means 'Integrated Development and Learning Environment'. + +- Issue #24820: Users can now set breakpoint colors in + Settings -> Custom Highlighting. Original patch by Mark Roseman. + - Issue #24972: Inactive selection background now matches active selection background, as configured by user, on all systems. Found items are now always highlighted on Windows. Initial patch by Mark Roseman. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -177,6 +177,25 @@ IDLE ---- +- Issue #24820: IDLE now has an 'IDLE Dark' built-in text color theme. + It is more or less IDLE Classic inverted, with a cobalt blue background. + Strings, comments, keywords, ... are still green, red, orange, ... . + Selecting it displays the following warning and solution. + "IDLE Dark is new in October, 2015. Trying to run earlier versions + of IDLE with it selected will disable colorizing, or worse. + If you might ever run an earlier release of IDLE, then before + exiting this version, either switch to another theme or hit the + 'Save as New Custom Theme' button. The latter requires a new name, + such as 'Custom Dark', but the custom theme will work with any IDLE + release, and can be modified." + +- Issue #25224: README.txt is now an idlelib index for IDLE developers and + curious users. The previous user content is now in the IDLE doc and is + redundant. IDLE now means 'Integrated Development and Learning Environment'. + +- Issue #24820: Users can now set breakpoint colors in + Settings -> Custom Highlighting. Original patch by Mark Roseman. + - Issue #24972: Inactive selection background now matches active selection background, as configured by user, on all systems. Found items are now always highlighted on Windows. Initial patch by Mark Roseman. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 4 07:20:01 2015 From: python-checkins at python.org (terry.reedy) Date: Sun, 04 Oct 2015 05:20:01 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2324820=3A_Update_IDLE_NEWS_items=2E?= Message-ID: <20151004052001.2689.53755@psf.io> https://hg.python.org/cpython/rev/89a1e03b4639 changeset: 98534:89a1e03b4639 branch: 3.5 parent: 98530:16e3fe295243 parent: 98533:233974dfda03 user: Terry Jan Reedy date: Sun Oct 04 01:17:13 2015 -0400 summary: Issue #24820: Update IDLE NEWS items. files: Lib/idlelib/NEWS.txt | 19 +++++++++++++++++++ Misc/NEWS | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+), 0 deletions(-) diff --git a/Lib/idlelib/NEWS.txt b/Lib/idlelib/NEWS.txt --- a/Lib/idlelib/NEWS.txt +++ b/Lib/idlelib/NEWS.txt @@ -2,6 +2,25 @@ ========================= *Release date: 2015-09-13* +- Issue #24820: IDLE now has an 'IDLE Dark' built-in text color theme. + It is more or less IDLE Classic inverted, with a cobalt blue background. + Strings, comments, keywords, ... are still green, red, orange, ... . + Selecting it displays the following warning and solution. + "IDLE Dark is new in October, 2015. Trying to run earlier versions + of IDLE with it selected will disable colorizing, or worse. + If you might ever run an earlier release of IDLE, then before + exiting this version, either switch to another theme or hit the + 'Save as New Custom Theme' button. The latter requires a new name, + such as 'Custom Dark', but the custom theme will work with any IDLE + release, and can be modified." + +- Issue #25224: README.txt is now an idlelib index for IDLE developers and + curious users. The previous user content is now in the IDLE doc and is + redundant. IDLE now means 'Integrated Development and Learning Environment'. + +- Issue #24820: Users can now set breakpoint colors in + Settings -> Custom Highlighting. Original patch by Mark Roseman. + - Issue #24972: Inactive selection background now matches active selection background, as configured by user, on all systems. Found items are now always highlighted on Windows. Initial patch by Mark Roseman. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -167,6 +167,25 @@ IDLE ---- +- Issue #24820: IDLE now has an 'IDLE Dark' built-in text color theme. + It is more or less IDLE Classic inverted, with a cobalt blue background. + Strings, comments, keywords, ... are still green, red, orange, ... . + Selecting it displays the following warning and solution. + "IDLE Dark is new in October, 2015. Trying to run earlier versions + of IDLE with it selected will disable colorizing, or worse. + If you might ever run an earlier release of IDLE, then before + exiting this version, either switch to another theme or hit the + 'Save as New Custom Theme' button. The latter requires a new name, + such as 'Custom Dark', but the custom theme will work with any IDLE + release, and can be modified." + +- Issue #25224: README.txt is now an idlelib index for IDLE developers and + curious users. The previous user content is now in the IDLE doc and is + redundant. IDLE now means 'Integrated Development and Learning Environment'. + +- Issue #24820: Users can now set breakpoint colors in + Settings -> Custom Highlighting. Original patch by Mark Roseman. + - Issue #24972: Inactive selection background now matches active selection background, as configured by user, on all systems. Found items are now always highlighted on Windows. Initial patch by Mark Roseman. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 4 07:20:01 2015 From: python-checkins at python.org (terry.reedy) Date: Sun, 04 Oct 2015 05:20:01 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy41?= Message-ID: <20151004052001.20777.20260@psf.io> https://hg.python.org/cpython/rev/f51921883f50 changeset: 98535:f51921883f50 parent: 98531:254cd94b653b parent: 98534:89a1e03b4639 user: Terry Jan Reedy date: Sun Oct 04 01:19:36 2015 -0400 summary: merge 3.5 files: Lib/idlelib/NEWS.txt | 19 +++++++++++++++++++ Misc/NEWS | 21 ++++++++++++++++++++- 2 files changed, 39 insertions(+), 1 deletions(-) diff --git a/Lib/idlelib/NEWS.txt b/Lib/idlelib/NEWS.txt --- a/Lib/idlelib/NEWS.txt +++ b/Lib/idlelib/NEWS.txt @@ -2,6 +2,25 @@ =========================== *Release date: 2017?* +- Issue #24820: IDLE now has an 'IDLE Dark' built-in text color theme. + It is more or less IDLE Classic inverted, with a cobalt blue background. + Strings, comments, keywords, ... are still green, red, orange, ... . + Selecting it displays the following warning and solution. + "IDLE Dark is new in October, 2015. Trying to run earlier versions + of IDLE with it selected will disable colorizing, or worse. + If you might ever run an earlier release of IDLE, then before + exiting this version, either switch to another theme or hit the + 'Save as New Custom Theme' button. The latter requires a new name, + such as 'Custom Dark', but the custom theme will work with any IDLE + release, and can be modified." + +- Issue #25224: README.txt is now an idlelib index for IDLE developers and + curious users. The previous user content is now in the IDLE doc and is + redundant. IDLE now means 'Integrated Development and Learning Environment'. + +- Issue #24820: Users can now set breakpoint colors in + Settings -> Custom Highlighting. Original patch by Mark Roseman. + - Issue #24972: Inactive selection background now matches active selection background, as configured by user, on all systems. Found items are now always highlighted on Windows. Initial patch by Mark Roseman. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -119,6 +119,25 @@ IDLE ---- +- Issue #24820: IDLE now has an 'IDLE Dark' built-in text color theme. + It is more or less IDLE Classic inverted, with a cobalt blue background. + Strings, comments, keywords, ... are still green, red, orange, ... . + Selecting it displays the following warning and solution. + "IDLE Dark is new in October, 2015. Trying to run earlier versions + of IDLE with it selected will disable colorizing, or worse. + If you might ever run an earlier release of IDLE, then before + exiting this version, either switch to another theme or hit the + 'Save as New Custom Theme' button. The latter requires a new name, + such as 'Custom Dark', but the custom theme will work with any IDLE + release, and can be modified." + +- Issue #25224: README.txt is now an idlelib index for IDLE developers and + curious users. The previous user content is now in the IDLE doc and is + redundant. IDLE now means 'Integrated Development and Learning Environment'. + +- Issue #24820: Users can now set breakpoint colors in + Settings -> Custom Highlighting. Original patch by Mark Roseman. + - Issue #24972: Inactive selection background now matches active selection background, as configured by user, on all systems. Found items are now always highlighted on Windows. Initial patch by Mark Roseman. @@ -556,7 +575,7 @@ - Issue #17527: Add PATCH to wsgiref.validator. Patch from Luca Sbardella. - Issue #24791: Fix grammar regression for call syntax: 'g(\*a or b)'. -p + IDLE ---- -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 4 07:20:02 2015 From: python-checkins at python.org (terry.reedy) Date: Sun, 04 Oct 2015 05:20:02 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI0ODIw?= =?utf-8?q?=3A_Update_IDLE_NEWS_items=2E?= Message-ID: <20151004052001.20765.35169@psf.io> https://hg.python.org/cpython/rev/233974dfda03 changeset: 98533:233974dfda03 branch: 3.4 parent: 98529:1de01a63f360 user: Terry Jan Reedy date: Sun Oct 04 01:14:51 2015 -0400 summary: Issue #24820: Update IDLE NEWS items. files: Lib/idlelib/NEWS.txt | 19 +++++++++++++++++++ Misc/NEWS | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+), 0 deletions(-) diff --git a/Lib/idlelib/NEWS.txt b/Lib/idlelib/NEWS.txt --- a/Lib/idlelib/NEWS.txt +++ b/Lib/idlelib/NEWS.txt @@ -2,6 +2,25 @@ ========================= *Release date: 2015-??-??* +- Issue #24820: IDLE now has an 'IDLE Dark' built-in text color theme. + It is more or less IDLE Classic inverted, with a cobalt blue background. + Strings, comments, keywords, ... are still green, red, orange, ... . + Selecting it displays the following warning and solution. + "IDLE Dark is new in October, 2015. Trying to run earlier versions + of IDLE with it selected will disable colorizing, or worse. + If you might ever run an earlier release of IDLE, then before + exiting this version, either switch to another theme or hit the + 'Save as New Custom Theme' button. The latter requires a new name, + such as 'Custom Dark', but the custom theme will work with any IDLE + release, and can be modified." + +- Issue #25224: README.txt is now an idlelib index for IDLE developers and + curious users. The previous user content is now in the IDLE doc and is + redundant. IDLE now means 'Integrated Development and Learning Environment'. + +- Issue #24820: Users can now set breakpoint colors in + Settings -> Custom Highlighting. Original patch by Mark Roseman. + - Issue #24972: Inactive selection background now matches active selection background, as configured by user, on all systems. Found items are now always highlighted on Windows. Initial patch by Mark Roseman. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -492,6 +492,25 @@ IDLE ---- +- Issue #24820: IDLE now has an 'IDLE Dark' built-in text color theme. + It is more or less IDLE Classic inverted, with a cobalt blue background. + Strings, comments, keywords, ... are still green, red, orange, ... . + Selecting it displays the following warning and solution. + "IDLE Dark is new in October, 2015. Trying to run earlier versions + of IDLE with it selected will disable colorizing, or worse. + If you might ever run an earlier release of IDLE, then before + exiting this version, either switch to another theme or hit the + 'Save as New Custom Theme' button. The latter requires a new name, + such as 'Custom Dark', but the custom theme will work with any IDLE + release, and can be modified." + +- Issue #25224: README.txt is now an idlelib index for IDLE developers and + curious users. The previous user content is now in the IDLE doc and is + redundant. IDLE now means 'Integrated Development and Learning Environment'. + +- Issue #24820: Users can now set breakpoint colors in + Settings -> Custom Highlighting. Original patch by Mark Roseman. + - Issue #24972: Inactive selection background now matches active selection background, as configured by user, on all systems. Found items are now always highlighted on Windows. Initial patch by Mark Roseman. -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Sun Oct 4 10:46:22 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Sun, 04 Oct 2015 08:46:22 +0000 Subject: [Python-checkins] Daily reference leaks (f51921883f50): sum=61494 Message-ID: <20151004084622.20765.66976@psf.io> results for f51921883f50 on branch "default" -------------------------------------------- test_asyncio leaked [3, 0, 0] memory blocks, sum=3 test_capi leaked [5410, 5410, 5410] references, sum=16230 test_capi leaked [1421, 1423, 1423] memory blocks, sum=4267 test_functools leaked [0, 2, 2] memory blocks, sum=4 test_threading leaked [10820, 10820, 10820] references, sum=32460 test_threading leaked [2842, 2844, 2844] memory blocks, sum=8530 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflogAfUcYa', '--timeout', '7200'] From python-checkins at python.org Sun Oct 4 12:53:15 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 04 Oct 2015 10:53:15 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Make_error_rep?= =?utf-8?q?ort_in_test=5Fcodecs_more_informative=2E?= Message-ID: <20151004105315.128830.66026@psf.io> https://hg.python.org/cpython/rev/45a04eadefd6 changeset: 98536:45a04eadefd6 branch: 2.7 parent: 98532:739cc9ca55cd user: Serhiy Storchaka date: Sun Oct 04 13:52:40 2015 +0300 summary: Make error report in test_codecs more informative. files: Lib/test/test_codecs.py | 36 ++++++++++++++++++++-------- 1 files changed, 26 insertions(+), 10 deletions(-) diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py --- a/Lib/test/test_codecs.py +++ b/Lib/test/test_codecs.py @@ -573,9 +573,13 @@ (b'\x00\xdcA\x00', u'\ufffdA'), ] for raw, expected in tests: - self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode, - raw, 'strict', True) - self.assertEqual(raw.decode('utf-16le', 'replace'), expected) + try: + with self.assertRaises(UnicodeDecodeError): + codecs.utf_16_le_decode(raw, 'strict', True) + self.assertEqual(raw.decode('utf-16le', 'replace'), expected) + except: + print 'raw=%r' % raw + raise class UTF16BETest(ReadTest): encoding = "utf-16-be" @@ -610,9 +614,13 @@ (b'\xdc\x00\x00A', u'\ufffdA'), ] for raw, expected in tests: - self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode, - raw, 'strict', True) - self.assertEqual(raw.decode('utf-16be', 'replace'), expected) + try: + with self.assertRaises(UnicodeDecodeError): + codecs.utf_16_be_decode(raw, 'strict', True) + self.assertEqual(raw.decode('utf-16be', 'replace'), expected) + except: + print 'raw=%r' % raw + raise class UTF8Test(ReadTest): encoding = "utf-8" @@ -704,9 +712,13 @@ ('a+IKw\xffb', u'a\u20ac\ufffdb'), ] for raw, expected in tests: - self.assertRaises(UnicodeDecodeError, codecs.utf_7_decode, - raw, 'strict', True) - self.assertEqual(raw.decode('utf-7', 'replace'), expected) + try: + with self.assertRaises(UnicodeDecodeError): + codecs.utf_7_decode(raw, 'strict', True) + self.assertEqual(raw.decode('utf-7', 'replace'), expected) + except: + print 'raw=%r' % raw + raise def test_nonbmp(self): self.assertEqual(u'\U000104A0'.encode(self.encoding), '+2AHcoA-') @@ -740,7 +752,11 @@ ('a+IKwgrNgBA-b', u'a\u20ac\u20ac\ufffdb'), ] for raw, expected in tests: - self.assertEqual(raw.decode('utf-7', 'replace'), expected) + try: + self.assertEqual(raw.decode('utf-7', 'replace'), expected) + except: + print 'raw=%r' % raw + raise class UTF16ExTest(unittest.TestCase): -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Mon Oct 5 10:48:11 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Mon, 05 Oct 2015 08:48:11 +0000 Subject: [Python-checkins] Daily reference leaks (f51921883f50): sum=61491 Message-ID: <20151005084810.7258.29002@psf.io> results for f51921883f50 on branch "default" -------------------------------------------- test_capi leaked [5410, 5410, 5410] references, sum=16230 test_capi leaked [1421, 1423, 1423] memory blocks, sum=4267 test_functools leaked [0, 2, 2] memory blocks, sum=4 test_threading leaked [10820, 10820, 10820] references, sum=32460 test_threading leaked [2842, 2844, 2844] memory blocks, sum=8530 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflogZ7hcsa', '--timeout', '7200'] From python-checkins at python.org Mon Oct 5 13:44:01 2015 From: python-checkins at python.org (victor.stinner) Date: Mon, 05 Oct 2015 11:44:01 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325301=3A_The_UTF-?= =?utf-8?q?8_decoder_is_now_up_to_15_times_as_fast_for_error?= Message-ID: <20151005114359.97718.6957@psf.io> https://hg.python.org/cpython/rev/3152e4038d97 changeset: 98537:3152e4038d97 parent: 98535:f51921883f50 user: Victor Stinner date: Mon Oct 05 13:43:50 2015 +0200 summary: Issue #25301: The UTF-8 decoder is now up to 15 times as fast for error handlers: ``ignore``, ``replace`` and ``surrogateescape``. files: Doc/whatsnew/3.6.rst | 3 + Lib/test/test_codecs.py | 12 +++++++ Misc/NEWS | 3 + Objects/unicodeobject.c | 48 +++++++++++++++++++++++----- 4 files changed, 57 insertions(+), 9 deletions(-) diff --git a/Doc/whatsnew/3.6.rst b/Doc/whatsnew/3.6.rst --- a/Doc/whatsnew/3.6.rst +++ b/Doc/whatsnew/3.6.rst @@ -123,6 +123,9 @@ * The UTF-8 encoder is now up to 75 times as fast for error handlers: ``ignore``, ``replace``, ``surrogateescape``, ``surrogatepass``. +* The UTF-8 decoder is now up to 15 times as fast for error handlers: + ``ignore``, ``replace`` and ``surrogateescape``. + Build and C API Changes ======================= diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py --- a/Lib/test/test_codecs.py +++ b/Lib/test/test_codecs.py @@ -788,6 +788,18 @@ self.check_state_handling_decode(self.encoding, u, u.encode(self.encoding)) + def test_decode_error(self): + for data, error_handler, expected in ( + (b'[\x80\xff]', 'ignore', '[]'), + (b'[\x80\xff]', 'replace', '[\ufffd\ufffd]'), + (b'[\x80\xff]', 'surrogateescape', '[\udc80\udcff]'), + (b'[\x80\xff]', 'backslashreplace', '[\\x80\\xff]'), + ): + with self.subTest(data=data, error_handler=error_handler, + expected=expected): + self.assertEqual(data.decode(self.encoding, error_handler), + expected) + def test_lone_surrogates(self): super().test_lone_surrogates() # not sure if this is making sense for diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +* Issue #25301: The UTF-8 decoder is now up to 15 times as fast for error + handlers: ``ignore``, ``replace`` and ``surrogateescape``. + - Issue #24848: Fixed a number of bugs in UTF-7 decoding of misformed data. - Issue #25267: The UTF-8 encoder is now up to 75 times as fast for error diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -4714,8 +4714,9 @@ Py_ssize_t startinpos; Py_ssize_t endinpos; const char *errmsg = ""; - PyObject *errorHandler = NULL; + PyObject *error_handler_obj = NULL; PyObject *exc = NULL; + _Py_error_handler error_handler = _Py_ERROR_UNKNOWN; if (size == 0) { if (consumed) @@ -4740,6 +4741,7 @@ while (s < end) { Py_UCS4 ch; int kind = writer.kind; + if (kind == PyUnicode_1BYTE_KIND) { if (PyUnicode_IS_ASCII(writer.buffer)) ch = asciilib_utf8_decode(&s, end, writer.data, &writer.pos); @@ -4778,24 +4780,52 @@ continue; } - if (unicode_decode_call_errorhandler_writer( - errors, &errorHandler, - "utf-8", errmsg, - &starts, &end, &startinpos, &endinpos, &exc, &s, - &writer)) - goto onError; + if (error_handler == _Py_ERROR_UNKNOWN) + error_handler = get_error_handler(errors); + + switch (error_handler) { + case _Py_ERROR_IGNORE: + s += (endinpos - startinpos); + break; + + case _Py_ERROR_REPLACE: + if (_PyUnicodeWriter_WriteCharInline(&writer, 0xfffd) < 0) + goto onError; + s += (endinpos - startinpos); + break; + + case _Py_ERROR_SURROGATEESCAPE: + if (_PyUnicodeWriter_PrepareKind(&writer, PyUnicode_2BYTE_KIND) < 0) + goto onError; + for (Py_ssize_t i=startinpos; i https://hg.python.org/cpython/rev/5b9ffea7e7c3 changeset: 98538:5b9ffea7e7c3 user: Victor Stinner date: Mon Oct 05 13:49:26 2015 +0200 summary: Issue #25301: Fix compatibility with ISO C90 files: Objects/unicodeobject.c | 6 +++++- 1 files changed, 5 insertions(+), 1 deletions(-) diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -4795,9 +4795,12 @@ break; case _Py_ERROR_SURROGATEESCAPE: + { + Py_ssize_t i; + if (_PyUnicodeWriter_PrepareKind(&writer, PyUnicode_2BYTE_KIND) < 0) goto onError; - for (Py_ssize_t i=startinpos; i Results for project python_default-nightly, build date 2015-10-05 03:02:04 commit: f51921883f50be87405c81030bf01e6a29211c5e revision date: 2015-10-04 05:19:36 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v3.4.3, with hash b4cbecbc0781e89a309d03b60a1f75f8499250e6 from 2015-02-25 12:15:33+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v3.4.3 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.46435% -0.26186% 7.78646% 16.61379% :-| pybench 0.13488% 0.02614% -1.91868% 8.48274% :-( regex_v8 2.58012% 0.03680% -5.19978% 0.37598% :-| nbody 0.13849% 0.77933% -0.26838% 8.96712% :-| json_dump_v2 0.35644% 1.12972% -1.09199% 10.21158% :-| normal_startup 0.76398% 0.35423% 0.50319% 5.00412% ------------------------------------------------------------------------------------------ Note: Benchmark results are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From lp_benchmark_robot at intel.com Mon Oct 5 14:44:12 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Mon, 5 Oct 2015 13:44:12 +0100 Subject: [Python-checkins] Benchmark Results for Python 2.7 2015-10-05 Message-ID: <030f3e1a-f52f-427e-9f24-1fb43d4a8726@irsmsx102.ger.corp.intel.com> No new revisions. Here are the previous results: Results for project python_2.7-nightly, build date 2015-10-05 08:13:47 commit: 45a04eadefd6ed13c110059375d2932f6b0d7490 revision date: 2015-10-04 10:52:40 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v2.7.10, with hash 15c95b7d81dcf821daade360741e00714667653f from 2015-05-23 16:02:14+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v2.7.10 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.11335% 0.94969% 5.43955% 8.53025% :-) pybench 0.16785% 0.69147% 6.77273% 6.45979% :-| regex_v8 1.07392% 0.01169% -1.82451% 7.81632% :-) nbody 0.15352% 0.15454% 8.67803% 3.87488% :-) json_dump_v2 0.21320% -0.26509% 3.37121% 12.71810% :-( normal_startup 1.91283% -0.47298% -2.14007% 3.41653% :-| ssbench 0.10483% 0.27749% 1.31698% 1.08129% ------------------------------------------------------------------------------------------ Note: Benchmark results for ssbench are measured in requests/second while all other are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From python-checkins at python.org Mon Oct 5 18:30:47 2015 From: python-checkins at python.org (guido.van.rossum) Date: Mon, 05 Oct 2015 16:30:47 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIzOTcy?= =?utf-8?q?=3A_updates_to_asyncio_datagram_API=2E_By_Chris_Laws=2E?= Message-ID: <20151005163044.7250.52332@psf.io> https://hg.python.org/cpython/rev/5e7e9b131904 changeset: 98539:5e7e9b131904 branch: 3.4 parent: 98533:233974dfda03 user: Guido van Rossum date: Mon Oct 05 09:15:28 2015 -0700 summary: Issue #23972: updates to asyncio datagram API. By Chris Laws. files: Doc/library/asyncio-eventloop.rst | 46 ++- Lib/asyncio/base_events.py | 160 ++++++--- Lib/asyncio/events.py | 40 ++- Lib/test/test_asyncio/test_base_events.py | 140 ++++++++- Lib/test/test_asyncio/test_events.py | 52 +++ Misc/ACKS | 1 + Misc/NEWS | 6 + 7 files changed, 378 insertions(+), 67 deletions(-) diff --git a/Doc/library/asyncio-eventloop.rst b/Doc/library/asyncio-eventloop.rst --- a/Doc/library/asyncio-eventloop.rst +++ b/Doc/library/asyncio-eventloop.rst @@ -283,17 +283,50 @@ (:class:`StreamReader`, :class:`StreamWriter`) instead of a protocol. -.. coroutinemethod:: BaseEventLoop.create_datagram_endpoint(protocol_factory, local_addr=None, remote_addr=None, \*, family=0, proto=0, flags=0) +.. coroutinemethod:: BaseEventLoop.create_datagram_endpoint(protocol_factory, local_addr=None, remote_addr=None, \*, family=0, proto=0, flags=0, reuse_address=None, reuse_port=None, allow_broadcast=None, sock=None) Create datagram connection: socket family :py:data:`~socket.AF_INET` or :py:data:`~socket.AF_INET6` depending on *host* (or *family* if specified), - socket type :py:data:`~socket.SOCK_DGRAM`. + socket type :py:data:`~socket.SOCK_DGRAM`. *protocol_factory* must be a + callable returning a :ref:`protocol ` instance. This method is a :ref:`coroutine ` which will try to establish the connection in the background. When successful, the coroutine returns a ``(transport, protocol)`` pair. - See the :meth:`BaseEventLoop.create_connection` method for parameters. + Options changing how the connection is created: + + * *local_addr*, if given, is a ``(local_host, local_port)`` tuple used + to bind the socket to locally. The *local_host* and *local_port* + are looked up using :meth:`getaddrinfo`. + + * *remote_addr*, if given, is a ``(remote_host, remote_port)`` tuple used + to connect the socket to a remote address. The *remote_host* and + *remote_port* are looked up using :meth:`getaddrinfo`. + + * *family*, *proto*, *flags* are the optional address family, protocol + and flags to be passed through to :meth:`getaddrinfo` for *host* + resolution. If given, these should all be integers from the + corresponding :mod:`socket` module constants. + + * *reuse_address* tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified will automatically be set to True on + UNIX. + + * *reuse_port* tells the kernel to allow this endpoint to be bound to the + same port as other existing endpoints are bound to, so long as they all + set this flag when being created. This option is not supported on Windows + and some UNIX's. If the :py:data:`~socket.SO_REUSEPORT` constant is not + defined then this capability is unsupported. + + * *allow_broadcast* tells the kernel to allow this endpoint to send + messages to the broadcast address. + + * *sock* can optionally be specified in order to use a preexisting, + already connected, :class:`socket.socket` object to be used by the + transport. If specified, *local_addr* and *remote_addr* should be omitted + (must be :const:`None`). On Windows with :class:`ProactorEventLoop`, this method is not supported. @@ -320,7 +353,7 @@ Creating listening connections ------------------------------ -.. coroutinemethod:: BaseEventLoop.create_server(protocol_factory, host=None, port=None, \*, family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, sock=None, backlog=100, ssl=None, reuse_address=None) +.. coroutinemethod:: BaseEventLoop.create_server(protocol_factory, host=None, port=None, \*, family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, sock=None, backlog=100, ssl=None, reuse_address=None, reuse_port=None) Create a TCP server (socket type :data:`~socket.SOCK_STREAM`) bound to *host* and *port*. @@ -359,6 +392,11 @@ expire. If not specified will automatically be set to True on UNIX. + * *reuse_port* tells the kernel to allow this endpoint to be bound to the + same port as other existing endpoints are bound to, so long as they all + set this flag when being created. This option is not supported on + Windows. + This method is a :ref:`coroutine `. On Windows with :class:`ProactorEventLoop`, SSL/TLS is not supported. diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py --- a/Lib/asyncio/base_events.py +++ b/Lib/asyncio/base_events.py @@ -700,75 +700,109 @@ @coroutine def create_datagram_endpoint(self, protocol_factory, local_addr=None, remote_addr=None, *, - family=0, proto=0, flags=0): + family=0, proto=0, flags=0, + reuse_address=None, reuse_port=None, + allow_broadcast=None, sock=None): """Create datagram connection.""" - if not (local_addr or remote_addr): - if family == 0: - raise ValueError('unexpected address family') - addr_pairs_info = (((family, proto), (None, None)),) + if sock is not None: + if (local_addr or remote_addr or + family or proto or flags or + reuse_address or reuse_port or allow_broadcast): + # show the problematic kwargs in exception msg + opts = dict(local_addr=local_addr, remote_addr=remote_addr, + family=family, proto=proto, flags=flags, + reuse_address=reuse_address, reuse_port=reuse_port, + allow_broadcast=allow_broadcast) + problems = ', '.join( + '{}={}'.format(k, v) for k, v in opts.items() if v) + raise ValueError( + 'socket modifier keyword arguments can not be used ' + 'when sock is specified. ({})'.format(problems)) + sock.setblocking(False) + r_addr = None else: - # join address by (family, protocol) - addr_infos = collections.OrderedDict() - for idx, addr in ((0, local_addr), (1, remote_addr)): - if addr is not None: - assert isinstance(addr, tuple) and len(addr) == 2, ( - '2-tuple is expected') + if not (local_addr or remote_addr): + if family == 0: + raise ValueError('unexpected address family') + addr_pairs_info = (((family, proto), (None, None)),) + else: + # join address by (family, protocol) + addr_infos = collections.OrderedDict() + for idx, addr in ((0, local_addr), (1, remote_addr)): + if addr is not None: + assert isinstance(addr, tuple) and len(addr) == 2, ( + '2-tuple is expected') - infos = yield from self.getaddrinfo( - *addr, family=family, type=socket.SOCK_DGRAM, - proto=proto, flags=flags) - if not infos: - raise OSError('getaddrinfo() returned empty list') + infos = yield from self.getaddrinfo( + *addr, family=family, type=socket.SOCK_DGRAM, + proto=proto, flags=flags) + if not infos: + raise OSError('getaddrinfo() returned empty list') - for fam, _, pro, _, address in infos: - key = (fam, pro) - if key not in addr_infos: - addr_infos[key] = [None, None] - addr_infos[key][idx] = address + for fam, _, pro, _, address in infos: + key = (fam, pro) + if key not in addr_infos: + addr_infos[key] = [None, None] + addr_infos[key][idx] = address - # each addr has to have info for each (family, proto) pair - addr_pairs_info = [ - (key, addr_pair) for key, addr_pair in addr_infos.items() - if not ((local_addr and addr_pair[0] is None) or - (remote_addr and addr_pair[1] is None))] + # each addr has to have info for each (family, proto) pair + addr_pairs_info = [ + (key, addr_pair) for key, addr_pair in addr_infos.items() + if not ((local_addr and addr_pair[0] is None) or + (remote_addr and addr_pair[1] is None))] - if not addr_pairs_info: - raise ValueError('can not get address information') + if not addr_pairs_info: + raise ValueError('can not get address information') - exceptions = [] + exceptions = [] - for ((family, proto), - (local_address, remote_address)) in addr_pairs_info: - sock = None - r_addr = None - try: - sock = socket.socket( - family=family, type=socket.SOCK_DGRAM, proto=proto) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - sock.setblocking(False) + if reuse_address is None: + reuse_address = os.name == 'posix' and sys.platform != 'cygwin' - if local_addr: - sock.bind(local_address) - if remote_addr: - yield from self.sock_connect(sock, remote_address) - r_addr = remote_address - except OSError as exc: - if sock is not None: - sock.close() - exceptions.append(exc) - except: - if sock is not None: - sock.close() - raise + for ((family, proto), + (local_address, remote_address)) in addr_pairs_info: + sock = None + r_addr = None + try: + sock = socket.socket( + family=family, type=socket.SOCK_DGRAM, proto=proto) + if reuse_address: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + if reuse_port: + if not hasattr(socket, 'SO_REUSEPORT'): + raise ValueError( + 'reuse_port not supported by socket module') + else: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + if allow_broadcast: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + sock.setblocking(False) + + if local_addr: + sock.bind(local_address) + if remote_addr: + yield from self.sock_connect(sock, remote_address) + r_addr = remote_address + except OSError as exc: + if sock is not None: + sock.close() + exceptions.append(exc) + except: + if sock is not None: + sock.close() + raise + else: + break else: - break - else: - raise exceptions[0] + raise exceptions[0] protocol = protocol_factory() waiter = futures.Future(loop=self) - transport = self._make_datagram_transport(sock, protocol, r_addr, - waiter) + transport = self._make_datagram_transport( + sock, protocol, r_addr, waiter) if self._debug: if local_addr: logger.info("Datagram endpoint local_addr=%r remote_addr=%r " @@ -804,7 +838,8 @@ sock=None, backlog=100, ssl=None, - reuse_address=None): + reuse_address=None, + reuse_port=None): """Create a TCP server. The host parameter can be a string, in that case the TCP server is bound @@ -857,8 +892,15 @@ continue sockets.append(sock) if reuse_address: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, - True) + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR, True) + if reuse_port: + if not hasattr(socket, 'SO_REUSEPORT'): + raise ValueError( + 'reuse_port not supported by socket module') + else: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT, True) # Disable IPv4/IPv6 dual stack support (enabled by # default on Linux) which makes a single socket # listen on both address families. diff --git a/Lib/asyncio/events.py b/Lib/asyncio/events.py --- a/Lib/asyncio/events.py +++ b/Lib/asyncio/events.py @@ -297,7 +297,8 @@ def create_server(self, protocol_factory, host=None, port=None, *, family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, - sock=None, backlog=100, ssl=None, reuse_address=None): + sock=None, backlog=100, ssl=None, reuse_address=None, + reuse_port=None): """A coroutine which creates a TCP server bound to host and port. The return value is a Server object which can be used to stop @@ -327,6 +328,11 @@ TIME_WAIT state, without waiting for its natural timeout to expire. If not specified will automatically be set to True on UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows. """ raise NotImplementedError @@ -358,7 +364,37 @@ def create_datagram_endpoint(self, protocol_factory, local_addr=None, remote_addr=None, *, - family=0, proto=0, flags=0): + family=0, proto=0, flags=0, + reuse_address=None, reuse_port=None, + allow_broadcast=None, sock=None): + """A coroutine which creates a datagram endpoint. + + This method will try to establish the endpoint in the background. + When successful, the coroutine returns a (transport, protocol) pair. + + protocol_factory must be a callable returning a protocol instance. + + socket family AF_INET or socket.AF_INET6 depending on host (or + family if specified), socket type SOCK_DGRAM. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified it will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows and some UNIX's. If the + :py:data:`~socket.SO_REUSEPORT` constant is not defined then this + capability is unsupported. + + allow_broadcast tells the kernel to allow this endpoint to send + messages to the broadcast address. + + sock can optionally be specified in order to use a preexisting + socket object. + """ raise NotImplementedError # Pipes and subprocesses. diff --git a/Lib/test/test_asyncio/test_base_events.py b/Lib/test/test_asyncio/test_base_events.py --- a/Lib/test/test_asyncio/test_base_events.py +++ b/Lib/test/test_asyncio/test_base_events.py @@ -3,6 +3,7 @@ import errno import logging import math +import os import socket import sys import threading @@ -790,11 +791,11 @@ class MyDatagramProto(asyncio.DatagramProtocol): done = None - def __init__(self, create_future=False): + def __init__(self, create_future=False, loop=None): self.state = 'INITIAL' self.nbytes = 0 if create_future: - self.done = asyncio.Future() + self.done = asyncio.Future(loop=loop) def connection_made(self, transport): self.transport = transport @@ -1100,6 +1101,19 @@ self.assertRaises(OSError, self.loop.run_until_complete, f) @mock.patch('asyncio.base_events.socket') + def test_create_server_nosoreuseport(self, m_socket): + m_socket.getaddrinfo = socket.getaddrinfo + m_socket.SOCK_STREAM = socket.SOCK_STREAM + m_socket.SOL_SOCKET = socket.SOL_SOCKET + del m_socket.SO_REUSEPORT + m_socket.socket.return_value = mock.Mock() + + f = self.loop.create_server( + MyProto, '0.0.0.0', 0, reuse_port=True) + + self.assertRaises(ValueError, self.loop.run_until_complete, f) + + @mock.patch('asyncio.base_events.socket') def test_create_server_cant_bind(self, m_socket): class Err(OSError): @@ -1199,6 +1213,128 @@ self.assertRaises(Err, self.loop.run_until_complete, fut) self.assertTrue(m_sock.close.called) + def test_create_datagram_endpoint_sock(self): + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + fut = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(create_future=True, loop=self.loop), + sock=sock) + transport, protocol = self.loop.run_until_complete(fut) + transport.close() + self.loop.run_until_complete(protocol.done) + self.assertEqual('CLOSED', protocol.state) + + def test_create_datagram_endpoint_sock_sockopts(self): + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, local_addr=('127.0.0.1', 0), sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, remote_addr=('127.0.0.1', 0), sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, family=1, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, proto=1, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, flags=1, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, reuse_address=True, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, reuse_port=True, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, allow_broadcast=True, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + def test_create_datagram_endpoint_sockopts(self): + # Socket options should not be applied unless asked for. + # SO_REUSEADDR defaults to on for UNIX. + # SO_REUSEPORT is not available on all platforms. + + coro = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(create_future=True, loop=self.loop), + local_addr=('127.0.0.1', 0)) + transport, protocol = self.loop.run_until_complete(coro) + sock = transport.get_extra_info('socket') + + reuse_address_default_on = ( + os.name == 'posix' and sys.platform != 'cygwin') + reuseport_supported = hasattr(socket, 'SO_REUSEPORT') + + if reuse_address_default_on: + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR)) + else: + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR)) + if reuseport_supported: + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_BROADCAST)) + + transport.close() + self.loop.run_until_complete(protocol.done) + self.assertEqual('CLOSED', protocol.state) + + coro = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(create_future=True, loop=self.loop), + local_addr=('127.0.0.1', 0), + reuse_address=True, + reuse_port=reuseport_supported, + allow_broadcast=True) + transport, protocol = self.loop.run_until_complete(coro) + sock = transport.get_extra_info('socket') + + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR)) + if reuseport_supported: + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + else: + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_BROADCAST)) + + transport.close() + self.loop.run_until_complete(protocol.done) + self.assertEqual('CLOSED', protocol.state) + + @mock.patch('asyncio.base_events.socket') + def test_create_datagram_endpoint_nosoreuseport(self, m_socket): + m_socket.getaddrinfo = socket.getaddrinfo + m_socket.SOCK_DGRAM = socket.SOCK_DGRAM + m_socket.SOL_SOCKET = socket.SOL_SOCKET + del m_socket.SO_REUSEPORT + m_socket.socket.return_value = mock.Mock() + + coro = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(loop=self.loop), + local_addr=('127.0.0.1', 0), + reuse_address=False, + reuse_port=True) + + self.assertRaises(ValueError, self.loop.run_until_complete, coro) + def test_accept_connection_retry(self): sock = mock.Mock() sock.accept.side_effect = BlockingIOError() diff --git a/Lib/test/test_asyncio/test_events.py b/Lib/test/test_asyncio/test_events.py --- a/Lib/test/test_asyncio/test_events.py +++ b/Lib/test/test_asyncio/test_events.py @@ -814,6 +814,32 @@ # close server server.close() + @unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT') + def test_create_server_reuse_port(self): + proto = MyProto(self.loop) + f = self.loop.create_server( + lambda: proto, '0.0.0.0', 0) + server = self.loop.run_until_complete(f) + self.assertEqual(len(server.sockets), 1) + sock = server.sockets[0] + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + server.close() + + test_utils.run_briefly(self.loop) + + proto = MyProto(self.loop) + f = self.loop.create_server( + lambda: proto, '0.0.0.0', 0, reuse_port=True) + server = self.loop.run_until_complete(f) + self.assertEqual(len(server.sockets), 1) + sock = server.sockets[0] + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + server.close() + def _make_unix_server(self, factory, **kwargs): path = test_utils.gen_unix_socket_path() self.addCleanup(lambda: os.path.exists(path) and os.unlink(path)) @@ -1264,6 +1290,32 @@ self.assertEqual('CLOSED', client.state) server.transport.close() + def test_create_datagram_endpoint_sock(self): + sock = None + local_address = ('127.0.0.1', 0) + infos = self.loop.run_until_complete( + self.loop.getaddrinfo( + *local_address, type=socket.SOCK_DGRAM)) + for family, type, proto, cname, address in infos: + try: + sock = socket.socket(family=family, type=type, proto=proto) + sock.setblocking(False) + sock.bind(address) + except: + pass + else: + break + else: + assert False, 'Can not create socket.' + + f = self.loop.create_connection( + lambda: MyDatagramProto(loop=self.loop), sock=sock) + tr, pr = self.loop.run_until_complete(f) + self.assertIsInstance(tr, asyncio.Transport) + self.assertIsInstance(pr, MyDatagramProto) + tr.close() + self.loop.run_until_complete(pr.done) + def test_internal_fds(self): loop = self.create_event_loop() if not isinstance(loop, selector_events.BaseSelectorEventLoop): diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -789,6 +789,7 @@ Simon Law Julia Lawall Chris Lawrence +Chris Laws Brian Leair Mathieu Leduc-Hamel Amandine Lee diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -90,6 +90,12 @@ Library ------- +- Issue #23972: Updates asyncio datagram create method allowing reuseport + and reuseaddr socket options to be set prior to binding the socket. + Mirroring the existing asyncio create_server method the reuseaddr option + for datagram sockets defaults to True if the O/S is 'posix' (except if the + platform is Cygwin). Patch by Chris Laws. + - Issue #25304: Add asyncio.run_coroutine_threadsafe(). This lets you submit a coroutine to a loop from another thread, returning a concurrent.futures.Future. By Vincent Michel. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 5 18:30:47 2015 From: python-checkins at python.org (guido.van.rossum) Date: Mon, 05 Oct 2015 16:30:47 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2323972=3A_updates_to_asyncio_datagram_API=2E_By_Chris_?= =?utf-8?b?TGF3cy4gKE1lcmdlIDMuNC0+My41Lik=?= Message-ID: <20151005163044.3273.59596@psf.io> https://hg.python.org/cpython/rev/ba956289fe66 changeset: 98540:ba956289fe66 branch: 3.5 parent: 98534:89a1e03b4639 parent: 98539:5e7e9b131904 user: Guido van Rossum date: Mon Oct 05 09:19:11 2015 -0700 summary: Issue #23972: updates to asyncio datagram API. By Chris Laws. (Merge 3.4->3.5.) files: Doc/library/asyncio-eventloop.rst | 46 ++- Lib/asyncio/base_events.py | 160 ++++++--- Lib/asyncio/events.py | 40 ++- Lib/test/test_asyncio/test_base_events.py | 140 ++++++++- Lib/test/test_asyncio/test_events.py | 52 +++ Misc/ACKS | 1 + Misc/NEWS | 6 + 7 files changed, 378 insertions(+), 67 deletions(-) diff --git a/Doc/library/asyncio-eventloop.rst b/Doc/library/asyncio-eventloop.rst --- a/Doc/library/asyncio-eventloop.rst +++ b/Doc/library/asyncio-eventloop.rst @@ -285,17 +285,50 @@ (:class:`StreamReader`, :class:`StreamWriter`) instead of a protocol. -.. coroutinemethod:: BaseEventLoop.create_datagram_endpoint(protocol_factory, local_addr=None, remote_addr=None, \*, family=0, proto=0, flags=0) +.. coroutinemethod:: BaseEventLoop.create_datagram_endpoint(protocol_factory, local_addr=None, remote_addr=None, \*, family=0, proto=0, flags=0, reuse_address=None, reuse_port=None, allow_broadcast=None, sock=None) Create datagram connection: socket family :py:data:`~socket.AF_INET` or :py:data:`~socket.AF_INET6` depending on *host* (or *family* if specified), - socket type :py:data:`~socket.SOCK_DGRAM`. + socket type :py:data:`~socket.SOCK_DGRAM`. *protocol_factory* must be a + callable returning a :ref:`protocol ` instance. This method is a :ref:`coroutine ` which will try to establish the connection in the background. When successful, the coroutine returns a ``(transport, protocol)`` pair. - See the :meth:`BaseEventLoop.create_connection` method for parameters. + Options changing how the connection is created: + + * *local_addr*, if given, is a ``(local_host, local_port)`` tuple used + to bind the socket to locally. The *local_host* and *local_port* + are looked up using :meth:`getaddrinfo`. + + * *remote_addr*, if given, is a ``(remote_host, remote_port)`` tuple used + to connect the socket to a remote address. The *remote_host* and + *remote_port* are looked up using :meth:`getaddrinfo`. + + * *family*, *proto*, *flags* are the optional address family, protocol + and flags to be passed through to :meth:`getaddrinfo` for *host* + resolution. If given, these should all be integers from the + corresponding :mod:`socket` module constants. + + * *reuse_address* tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified will automatically be set to True on + UNIX. + + * *reuse_port* tells the kernel to allow this endpoint to be bound to the + same port as other existing endpoints are bound to, so long as they all + set this flag when being created. This option is not supported on Windows + and some UNIX's. If the :py:data:`~socket.SO_REUSEPORT` constant is not + defined then this capability is unsupported. + + * *allow_broadcast* tells the kernel to allow this endpoint to send + messages to the broadcast address. + + * *sock* can optionally be specified in order to use a preexisting, + already connected, :class:`socket.socket` object to be used by the + transport. If specified, *local_addr* and *remote_addr* should be omitted + (must be :const:`None`). On Windows with :class:`ProactorEventLoop`, this method is not supported. @@ -322,7 +355,7 @@ Creating listening connections ------------------------------ -.. coroutinemethod:: BaseEventLoop.create_server(protocol_factory, host=None, port=None, \*, family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, sock=None, backlog=100, ssl=None, reuse_address=None) +.. coroutinemethod:: BaseEventLoop.create_server(protocol_factory, host=None, port=None, \*, family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, sock=None, backlog=100, ssl=None, reuse_address=None, reuse_port=None) Create a TCP server (socket type :data:`~socket.SOCK_STREAM`) bound to *host* and *port*. @@ -361,6 +394,11 @@ expire. If not specified will automatically be set to True on UNIX. + * *reuse_port* tells the kernel to allow this endpoint to be bound to the + same port as other existing endpoints are bound to, so long as they all + set this flag when being created. This option is not supported on + Windows. + This method is a :ref:`coroutine `. .. versionchanged:: 3.5 diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py --- a/Lib/asyncio/base_events.py +++ b/Lib/asyncio/base_events.py @@ -700,75 +700,109 @@ @coroutine def create_datagram_endpoint(self, protocol_factory, local_addr=None, remote_addr=None, *, - family=0, proto=0, flags=0): + family=0, proto=0, flags=0, + reuse_address=None, reuse_port=None, + allow_broadcast=None, sock=None): """Create datagram connection.""" - if not (local_addr or remote_addr): - if family == 0: - raise ValueError('unexpected address family') - addr_pairs_info = (((family, proto), (None, None)),) + if sock is not None: + if (local_addr or remote_addr or + family or proto or flags or + reuse_address or reuse_port or allow_broadcast): + # show the problematic kwargs in exception msg + opts = dict(local_addr=local_addr, remote_addr=remote_addr, + family=family, proto=proto, flags=flags, + reuse_address=reuse_address, reuse_port=reuse_port, + allow_broadcast=allow_broadcast) + problems = ', '.join( + '{}={}'.format(k, v) for k, v in opts.items() if v) + raise ValueError( + 'socket modifier keyword arguments can not be used ' + 'when sock is specified. ({})'.format(problems)) + sock.setblocking(False) + r_addr = None else: - # join address by (family, protocol) - addr_infos = collections.OrderedDict() - for idx, addr in ((0, local_addr), (1, remote_addr)): - if addr is not None: - assert isinstance(addr, tuple) and len(addr) == 2, ( - '2-tuple is expected') + if not (local_addr or remote_addr): + if family == 0: + raise ValueError('unexpected address family') + addr_pairs_info = (((family, proto), (None, None)),) + else: + # join address by (family, protocol) + addr_infos = collections.OrderedDict() + for idx, addr in ((0, local_addr), (1, remote_addr)): + if addr is not None: + assert isinstance(addr, tuple) and len(addr) == 2, ( + '2-tuple is expected') - infos = yield from self.getaddrinfo( - *addr, family=family, type=socket.SOCK_DGRAM, - proto=proto, flags=flags) - if not infos: - raise OSError('getaddrinfo() returned empty list') + infos = yield from self.getaddrinfo( + *addr, family=family, type=socket.SOCK_DGRAM, + proto=proto, flags=flags) + if not infos: + raise OSError('getaddrinfo() returned empty list') - for fam, _, pro, _, address in infos: - key = (fam, pro) - if key not in addr_infos: - addr_infos[key] = [None, None] - addr_infos[key][idx] = address + for fam, _, pro, _, address in infos: + key = (fam, pro) + if key not in addr_infos: + addr_infos[key] = [None, None] + addr_infos[key][idx] = address - # each addr has to have info for each (family, proto) pair - addr_pairs_info = [ - (key, addr_pair) for key, addr_pair in addr_infos.items() - if not ((local_addr and addr_pair[0] is None) or - (remote_addr and addr_pair[1] is None))] + # each addr has to have info for each (family, proto) pair + addr_pairs_info = [ + (key, addr_pair) for key, addr_pair in addr_infos.items() + if not ((local_addr and addr_pair[0] is None) or + (remote_addr and addr_pair[1] is None))] - if not addr_pairs_info: - raise ValueError('can not get address information') + if not addr_pairs_info: + raise ValueError('can not get address information') - exceptions = [] + exceptions = [] - for ((family, proto), - (local_address, remote_address)) in addr_pairs_info: - sock = None - r_addr = None - try: - sock = socket.socket( - family=family, type=socket.SOCK_DGRAM, proto=proto) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - sock.setblocking(False) + if reuse_address is None: + reuse_address = os.name == 'posix' and sys.platform != 'cygwin' - if local_addr: - sock.bind(local_address) - if remote_addr: - yield from self.sock_connect(sock, remote_address) - r_addr = remote_address - except OSError as exc: - if sock is not None: - sock.close() - exceptions.append(exc) - except: - if sock is not None: - sock.close() - raise + for ((family, proto), + (local_address, remote_address)) in addr_pairs_info: + sock = None + r_addr = None + try: + sock = socket.socket( + family=family, type=socket.SOCK_DGRAM, proto=proto) + if reuse_address: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + if reuse_port: + if not hasattr(socket, 'SO_REUSEPORT'): + raise ValueError( + 'reuse_port not supported by socket module') + else: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + if allow_broadcast: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + sock.setblocking(False) + + if local_addr: + sock.bind(local_address) + if remote_addr: + yield from self.sock_connect(sock, remote_address) + r_addr = remote_address + except OSError as exc: + if sock is not None: + sock.close() + exceptions.append(exc) + except: + if sock is not None: + sock.close() + raise + else: + break else: - break - else: - raise exceptions[0] + raise exceptions[0] protocol = protocol_factory() waiter = futures.Future(loop=self) - transport = self._make_datagram_transport(sock, protocol, r_addr, - waiter) + transport = self._make_datagram_transport( + sock, protocol, r_addr, waiter) if self._debug: if local_addr: logger.info("Datagram endpoint local_addr=%r remote_addr=%r " @@ -804,7 +838,8 @@ sock=None, backlog=100, ssl=None, - reuse_address=None): + reuse_address=None, + reuse_port=None): """Create a TCP server. The host parameter can be a string, in that case the TCP server is bound @@ -857,8 +892,15 @@ continue sockets.append(sock) if reuse_address: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, - True) + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR, True) + if reuse_port: + if not hasattr(socket, 'SO_REUSEPORT'): + raise ValueError( + 'reuse_port not supported by socket module') + else: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT, True) # Disable IPv4/IPv6 dual stack support (enabled by # default on Linux) which makes a single socket # listen on both address families. diff --git a/Lib/asyncio/events.py b/Lib/asyncio/events.py --- a/Lib/asyncio/events.py +++ b/Lib/asyncio/events.py @@ -297,7 +297,8 @@ def create_server(self, protocol_factory, host=None, port=None, *, family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, - sock=None, backlog=100, ssl=None, reuse_address=None): + sock=None, backlog=100, ssl=None, reuse_address=None, + reuse_port=None): """A coroutine which creates a TCP server bound to host and port. The return value is a Server object which can be used to stop @@ -327,6 +328,11 @@ TIME_WAIT state, without waiting for its natural timeout to expire. If not specified will automatically be set to True on UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows. """ raise NotImplementedError @@ -358,7 +364,37 @@ def create_datagram_endpoint(self, protocol_factory, local_addr=None, remote_addr=None, *, - family=0, proto=0, flags=0): + family=0, proto=0, flags=0, + reuse_address=None, reuse_port=None, + allow_broadcast=None, sock=None): + """A coroutine which creates a datagram endpoint. + + This method will try to establish the endpoint in the background. + When successful, the coroutine returns a (transport, protocol) pair. + + protocol_factory must be a callable returning a protocol instance. + + socket family AF_INET or socket.AF_INET6 depending on host (or + family if specified), socket type SOCK_DGRAM. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified it will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows and some UNIX's. If the + :py:data:`~socket.SO_REUSEPORT` constant is not defined then this + capability is unsupported. + + allow_broadcast tells the kernel to allow this endpoint to send + messages to the broadcast address. + + sock can optionally be specified in order to use a preexisting + socket object. + """ raise NotImplementedError # Pipes and subprocesses. diff --git a/Lib/test/test_asyncio/test_base_events.py b/Lib/test/test_asyncio/test_base_events.py --- a/Lib/test/test_asyncio/test_base_events.py +++ b/Lib/test/test_asyncio/test_base_events.py @@ -3,6 +3,7 @@ import errno import logging import math +import os import socket import sys import threading @@ -790,11 +791,11 @@ class MyDatagramProto(asyncio.DatagramProtocol): done = None - def __init__(self, create_future=False): + def __init__(self, create_future=False, loop=None): self.state = 'INITIAL' self.nbytes = 0 if create_future: - self.done = asyncio.Future() + self.done = asyncio.Future(loop=loop) def connection_made(self, transport): self.transport = transport @@ -1100,6 +1101,19 @@ self.assertRaises(OSError, self.loop.run_until_complete, f) @mock.patch('asyncio.base_events.socket') + def test_create_server_nosoreuseport(self, m_socket): + m_socket.getaddrinfo = socket.getaddrinfo + m_socket.SOCK_STREAM = socket.SOCK_STREAM + m_socket.SOL_SOCKET = socket.SOL_SOCKET + del m_socket.SO_REUSEPORT + m_socket.socket.return_value = mock.Mock() + + f = self.loop.create_server( + MyProto, '0.0.0.0', 0, reuse_port=True) + + self.assertRaises(ValueError, self.loop.run_until_complete, f) + + @mock.patch('asyncio.base_events.socket') def test_create_server_cant_bind(self, m_socket): class Err(OSError): @@ -1199,6 +1213,128 @@ self.assertRaises(Err, self.loop.run_until_complete, fut) self.assertTrue(m_sock.close.called) + def test_create_datagram_endpoint_sock(self): + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + fut = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(create_future=True, loop=self.loop), + sock=sock) + transport, protocol = self.loop.run_until_complete(fut) + transport.close() + self.loop.run_until_complete(protocol.done) + self.assertEqual('CLOSED', protocol.state) + + def test_create_datagram_endpoint_sock_sockopts(self): + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, local_addr=('127.0.0.1', 0), sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, remote_addr=('127.0.0.1', 0), sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, family=1, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, proto=1, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, flags=1, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, reuse_address=True, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, reuse_port=True, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, allow_broadcast=True, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + def test_create_datagram_endpoint_sockopts(self): + # Socket options should not be applied unless asked for. + # SO_REUSEADDR defaults to on for UNIX. + # SO_REUSEPORT is not available on all platforms. + + coro = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(create_future=True, loop=self.loop), + local_addr=('127.0.0.1', 0)) + transport, protocol = self.loop.run_until_complete(coro) + sock = transport.get_extra_info('socket') + + reuse_address_default_on = ( + os.name == 'posix' and sys.platform != 'cygwin') + reuseport_supported = hasattr(socket, 'SO_REUSEPORT') + + if reuse_address_default_on: + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR)) + else: + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR)) + if reuseport_supported: + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_BROADCAST)) + + transport.close() + self.loop.run_until_complete(protocol.done) + self.assertEqual('CLOSED', protocol.state) + + coro = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(create_future=True, loop=self.loop), + local_addr=('127.0.0.1', 0), + reuse_address=True, + reuse_port=reuseport_supported, + allow_broadcast=True) + transport, protocol = self.loop.run_until_complete(coro) + sock = transport.get_extra_info('socket') + + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR)) + if reuseport_supported: + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + else: + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_BROADCAST)) + + transport.close() + self.loop.run_until_complete(protocol.done) + self.assertEqual('CLOSED', protocol.state) + + @mock.patch('asyncio.base_events.socket') + def test_create_datagram_endpoint_nosoreuseport(self, m_socket): + m_socket.getaddrinfo = socket.getaddrinfo + m_socket.SOCK_DGRAM = socket.SOCK_DGRAM + m_socket.SOL_SOCKET = socket.SOL_SOCKET + del m_socket.SO_REUSEPORT + m_socket.socket.return_value = mock.Mock() + + coro = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(loop=self.loop), + local_addr=('127.0.0.1', 0), + reuse_address=False, + reuse_port=True) + + self.assertRaises(ValueError, self.loop.run_until_complete, coro) + def test_accept_connection_retry(self): sock = mock.Mock() sock.accept.side_effect = BlockingIOError() diff --git a/Lib/test/test_asyncio/test_events.py b/Lib/test/test_asyncio/test_events.py --- a/Lib/test/test_asyncio/test_events.py +++ b/Lib/test/test_asyncio/test_events.py @@ -814,6 +814,32 @@ # close server server.close() + @unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT') + def test_create_server_reuse_port(self): + proto = MyProto(self.loop) + f = self.loop.create_server( + lambda: proto, '0.0.0.0', 0) + server = self.loop.run_until_complete(f) + self.assertEqual(len(server.sockets), 1) + sock = server.sockets[0] + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + server.close() + + test_utils.run_briefly(self.loop) + + proto = MyProto(self.loop) + f = self.loop.create_server( + lambda: proto, '0.0.0.0', 0, reuse_port=True) + server = self.loop.run_until_complete(f) + self.assertEqual(len(server.sockets), 1) + sock = server.sockets[0] + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + server.close() + def _make_unix_server(self, factory, **kwargs): path = test_utils.gen_unix_socket_path() self.addCleanup(lambda: os.path.exists(path) and os.unlink(path)) @@ -1264,6 +1290,32 @@ self.assertEqual('CLOSED', client.state) server.transport.close() + def test_create_datagram_endpoint_sock(self): + sock = None + local_address = ('127.0.0.1', 0) + infos = self.loop.run_until_complete( + self.loop.getaddrinfo( + *local_address, type=socket.SOCK_DGRAM)) + for family, type, proto, cname, address in infos: + try: + sock = socket.socket(family=family, type=type, proto=proto) + sock.setblocking(False) + sock.bind(address) + except: + pass + else: + break + else: + assert False, 'Can not create socket.' + + f = self.loop.create_connection( + lambda: MyDatagramProto(loop=self.loop), sock=sock) + tr, pr = self.loop.run_until_complete(f) + self.assertIsInstance(tr, asyncio.Transport) + self.assertIsInstance(pr, MyDatagramProto) + tr.close() + self.loop.run_until_complete(pr.done) + def test_internal_fds(self): loop = self.create_event_loop() if not isinstance(loop, selector_events.BaseSelectorEventLoop): diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -813,6 +813,7 @@ Julia Lawall Chris Lawrence Mark Lawrence +Chris Laws Brian Leair Mathieu Leduc-Hamel Amandine Lee diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -34,6 +34,12 @@ Library ------- +- Issue #23972: Updates asyncio datagram create method allowing reuseport + and reuseaddr socket options to be set prior to binding the socket. + Mirroring the existing asyncio create_server method the reuseaddr option + for datagram sockets defaults to True if the O/S is 'posix' (except if the + platform is Cygwin). Patch by Chris Laws. + - Issue #25304: Add asyncio.run_coroutine_threadsafe(). This lets you submit a coroutine to a loop from another thread, returning a concurrent.futures.Future. By Vincent Michel. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 5 18:30:49 2015 From: python-checkins at python.org (guido.van.rossum) Date: Mon, 05 Oct 2015 16:30:49 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2323972=3A_updates_to_asyncio_datagram_API=2E_By_?= =?utf-8?b?Q2hyaXMgTGF3cy4gKE1lcmdlIDMuNS0+My42Lik=?= Message-ID: <20151005163045.128852.41658@psf.io> https://hg.python.org/cpython/rev/c0f1f882737c changeset: 98541:c0f1f882737c parent: 98538:5b9ffea7e7c3 parent: 98540:ba956289fe66 user: Guido van Rossum date: Mon Oct 05 09:29:32 2015 -0700 summary: Issue #23972: updates to asyncio datagram API. By Chris Laws. (Merge 3.5->3.6.) files: Doc/library/asyncio-eventloop.rst | 46 ++- Lib/asyncio/base_events.py | 160 ++++++--- Lib/asyncio/events.py | 40 ++- Lib/test/test_asyncio/test_base_events.py | 140 ++++++++- Lib/test/test_asyncio/test_events.py | 52 +++ Misc/ACKS | 1 + Misc/NEWS | 6 + 7 files changed, 378 insertions(+), 67 deletions(-) diff --git a/Doc/library/asyncio-eventloop.rst b/Doc/library/asyncio-eventloop.rst --- a/Doc/library/asyncio-eventloop.rst +++ b/Doc/library/asyncio-eventloop.rst @@ -285,17 +285,50 @@ (:class:`StreamReader`, :class:`StreamWriter`) instead of a protocol. -.. coroutinemethod:: BaseEventLoop.create_datagram_endpoint(protocol_factory, local_addr=None, remote_addr=None, \*, family=0, proto=0, flags=0) +.. coroutinemethod:: BaseEventLoop.create_datagram_endpoint(protocol_factory, local_addr=None, remote_addr=None, \*, family=0, proto=0, flags=0, reuse_address=None, reuse_port=None, allow_broadcast=None, sock=None) Create datagram connection: socket family :py:data:`~socket.AF_INET` or :py:data:`~socket.AF_INET6` depending on *host* (or *family* if specified), - socket type :py:data:`~socket.SOCK_DGRAM`. + socket type :py:data:`~socket.SOCK_DGRAM`. *protocol_factory* must be a + callable returning a :ref:`protocol ` instance. This method is a :ref:`coroutine ` which will try to establish the connection in the background. When successful, the coroutine returns a ``(transport, protocol)`` pair. - See the :meth:`BaseEventLoop.create_connection` method for parameters. + Options changing how the connection is created: + + * *local_addr*, if given, is a ``(local_host, local_port)`` tuple used + to bind the socket to locally. The *local_host* and *local_port* + are looked up using :meth:`getaddrinfo`. + + * *remote_addr*, if given, is a ``(remote_host, remote_port)`` tuple used + to connect the socket to a remote address. The *remote_host* and + *remote_port* are looked up using :meth:`getaddrinfo`. + + * *family*, *proto*, *flags* are the optional address family, protocol + and flags to be passed through to :meth:`getaddrinfo` for *host* + resolution. If given, these should all be integers from the + corresponding :mod:`socket` module constants. + + * *reuse_address* tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified will automatically be set to True on + UNIX. + + * *reuse_port* tells the kernel to allow this endpoint to be bound to the + same port as other existing endpoints are bound to, so long as they all + set this flag when being created. This option is not supported on Windows + and some UNIX's. If the :py:data:`~socket.SO_REUSEPORT` constant is not + defined then this capability is unsupported. + + * *allow_broadcast* tells the kernel to allow this endpoint to send + messages to the broadcast address. + + * *sock* can optionally be specified in order to use a preexisting, + already connected, :class:`socket.socket` object to be used by the + transport. If specified, *local_addr* and *remote_addr* should be omitted + (must be :const:`None`). On Windows with :class:`ProactorEventLoop`, this method is not supported. @@ -322,7 +355,7 @@ Creating listening connections ------------------------------ -.. coroutinemethod:: BaseEventLoop.create_server(protocol_factory, host=None, port=None, \*, family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, sock=None, backlog=100, ssl=None, reuse_address=None) +.. coroutinemethod:: BaseEventLoop.create_server(protocol_factory, host=None, port=None, \*, family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, sock=None, backlog=100, ssl=None, reuse_address=None, reuse_port=None) Create a TCP server (socket type :data:`~socket.SOCK_STREAM`) bound to *host* and *port*. @@ -361,6 +394,11 @@ expire. If not specified will automatically be set to True on UNIX. + * *reuse_port* tells the kernel to allow this endpoint to be bound to the + same port as other existing endpoints are bound to, so long as they all + set this flag when being created. This option is not supported on + Windows. + This method is a :ref:`coroutine `. .. versionchanged:: 3.5 diff --git a/Lib/asyncio/base_events.py b/Lib/asyncio/base_events.py --- a/Lib/asyncio/base_events.py +++ b/Lib/asyncio/base_events.py @@ -700,75 +700,109 @@ @coroutine def create_datagram_endpoint(self, protocol_factory, local_addr=None, remote_addr=None, *, - family=0, proto=0, flags=0): + family=0, proto=0, flags=0, + reuse_address=None, reuse_port=None, + allow_broadcast=None, sock=None): """Create datagram connection.""" - if not (local_addr or remote_addr): - if family == 0: - raise ValueError('unexpected address family') - addr_pairs_info = (((family, proto), (None, None)),) + if sock is not None: + if (local_addr or remote_addr or + family or proto or flags or + reuse_address or reuse_port or allow_broadcast): + # show the problematic kwargs in exception msg + opts = dict(local_addr=local_addr, remote_addr=remote_addr, + family=family, proto=proto, flags=flags, + reuse_address=reuse_address, reuse_port=reuse_port, + allow_broadcast=allow_broadcast) + problems = ', '.join( + '{}={}'.format(k, v) for k, v in opts.items() if v) + raise ValueError( + 'socket modifier keyword arguments can not be used ' + 'when sock is specified. ({})'.format(problems)) + sock.setblocking(False) + r_addr = None else: - # join address by (family, protocol) - addr_infos = collections.OrderedDict() - for idx, addr in ((0, local_addr), (1, remote_addr)): - if addr is not None: - assert isinstance(addr, tuple) and len(addr) == 2, ( - '2-tuple is expected') + if not (local_addr or remote_addr): + if family == 0: + raise ValueError('unexpected address family') + addr_pairs_info = (((family, proto), (None, None)),) + else: + # join address by (family, protocol) + addr_infos = collections.OrderedDict() + for idx, addr in ((0, local_addr), (1, remote_addr)): + if addr is not None: + assert isinstance(addr, tuple) and len(addr) == 2, ( + '2-tuple is expected') - infos = yield from self.getaddrinfo( - *addr, family=family, type=socket.SOCK_DGRAM, - proto=proto, flags=flags) - if not infos: - raise OSError('getaddrinfo() returned empty list') + infos = yield from self.getaddrinfo( + *addr, family=family, type=socket.SOCK_DGRAM, + proto=proto, flags=flags) + if not infos: + raise OSError('getaddrinfo() returned empty list') - for fam, _, pro, _, address in infos: - key = (fam, pro) - if key not in addr_infos: - addr_infos[key] = [None, None] - addr_infos[key][idx] = address + for fam, _, pro, _, address in infos: + key = (fam, pro) + if key not in addr_infos: + addr_infos[key] = [None, None] + addr_infos[key][idx] = address - # each addr has to have info for each (family, proto) pair - addr_pairs_info = [ - (key, addr_pair) for key, addr_pair in addr_infos.items() - if not ((local_addr and addr_pair[0] is None) or - (remote_addr and addr_pair[1] is None))] + # each addr has to have info for each (family, proto) pair + addr_pairs_info = [ + (key, addr_pair) for key, addr_pair in addr_infos.items() + if not ((local_addr and addr_pair[0] is None) or + (remote_addr and addr_pair[1] is None))] - if not addr_pairs_info: - raise ValueError('can not get address information') + if not addr_pairs_info: + raise ValueError('can not get address information') - exceptions = [] + exceptions = [] - for ((family, proto), - (local_address, remote_address)) in addr_pairs_info: - sock = None - r_addr = None - try: - sock = socket.socket( - family=family, type=socket.SOCK_DGRAM, proto=proto) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - sock.setblocking(False) + if reuse_address is None: + reuse_address = os.name == 'posix' and sys.platform != 'cygwin' - if local_addr: - sock.bind(local_address) - if remote_addr: - yield from self.sock_connect(sock, remote_address) - r_addr = remote_address - except OSError as exc: - if sock is not None: - sock.close() - exceptions.append(exc) - except: - if sock is not None: - sock.close() - raise + for ((family, proto), + (local_address, remote_address)) in addr_pairs_info: + sock = None + r_addr = None + try: + sock = socket.socket( + family=family, type=socket.SOCK_DGRAM, proto=proto) + if reuse_address: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + if reuse_port: + if not hasattr(socket, 'SO_REUSEPORT'): + raise ValueError( + 'reuse_port not supported by socket module') + else: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + if allow_broadcast: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + sock.setblocking(False) + + if local_addr: + sock.bind(local_address) + if remote_addr: + yield from self.sock_connect(sock, remote_address) + r_addr = remote_address + except OSError as exc: + if sock is not None: + sock.close() + exceptions.append(exc) + except: + if sock is not None: + sock.close() + raise + else: + break else: - break - else: - raise exceptions[0] + raise exceptions[0] protocol = protocol_factory() waiter = futures.Future(loop=self) - transport = self._make_datagram_transport(sock, protocol, r_addr, - waiter) + transport = self._make_datagram_transport( + sock, protocol, r_addr, waiter) if self._debug: if local_addr: logger.info("Datagram endpoint local_addr=%r remote_addr=%r " @@ -804,7 +838,8 @@ sock=None, backlog=100, ssl=None, - reuse_address=None): + reuse_address=None, + reuse_port=None): """Create a TCP server. The host parameter can be a string, in that case the TCP server is bound @@ -857,8 +892,15 @@ continue sockets.append(sock) if reuse_address: - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, - True) + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR, True) + if reuse_port: + if not hasattr(socket, 'SO_REUSEPORT'): + raise ValueError( + 'reuse_port not supported by socket module') + else: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT, True) # Disable IPv4/IPv6 dual stack support (enabled by # default on Linux) which makes a single socket # listen on both address families. diff --git a/Lib/asyncio/events.py b/Lib/asyncio/events.py --- a/Lib/asyncio/events.py +++ b/Lib/asyncio/events.py @@ -297,7 +297,8 @@ def create_server(self, protocol_factory, host=None, port=None, *, family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, - sock=None, backlog=100, ssl=None, reuse_address=None): + sock=None, backlog=100, ssl=None, reuse_address=None, + reuse_port=None): """A coroutine which creates a TCP server bound to host and port. The return value is a Server object which can be used to stop @@ -327,6 +328,11 @@ TIME_WAIT state, without waiting for its natural timeout to expire. If not specified will automatically be set to True on UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows. """ raise NotImplementedError @@ -358,7 +364,37 @@ def create_datagram_endpoint(self, protocol_factory, local_addr=None, remote_addr=None, *, - family=0, proto=0, flags=0): + family=0, proto=0, flags=0, + reuse_address=None, reuse_port=None, + allow_broadcast=None, sock=None): + """A coroutine which creates a datagram endpoint. + + This method will try to establish the endpoint in the background. + When successful, the coroutine returns a (transport, protocol) pair. + + protocol_factory must be a callable returning a protocol instance. + + socket family AF_INET or socket.AF_INET6 depending on host (or + family if specified), socket type SOCK_DGRAM. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified it will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows and some UNIX's. If the + :py:data:`~socket.SO_REUSEPORT` constant is not defined then this + capability is unsupported. + + allow_broadcast tells the kernel to allow this endpoint to send + messages to the broadcast address. + + sock can optionally be specified in order to use a preexisting + socket object. + """ raise NotImplementedError # Pipes and subprocesses. diff --git a/Lib/test/test_asyncio/test_base_events.py b/Lib/test/test_asyncio/test_base_events.py --- a/Lib/test/test_asyncio/test_base_events.py +++ b/Lib/test/test_asyncio/test_base_events.py @@ -3,6 +3,7 @@ import errno import logging import math +import os import socket import sys import threading @@ -790,11 +791,11 @@ class MyDatagramProto(asyncio.DatagramProtocol): done = None - def __init__(self, create_future=False): + def __init__(self, create_future=False, loop=None): self.state = 'INITIAL' self.nbytes = 0 if create_future: - self.done = asyncio.Future() + self.done = asyncio.Future(loop=loop) def connection_made(self, transport): self.transport = transport @@ -1100,6 +1101,19 @@ self.assertRaises(OSError, self.loop.run_until_complete, f) @mock.patch('asyncio.base_events.socket') + def test_create_server_nosoreuseport(self, m_socket): + m_socket.getaddrinfo = socket.getaddrinfo + m_socket.SOCK_STREAM = socket.SOCK_STREAM + m_socket.SOL_SOCKET = socket.SOL_SOCKET + del m_socket.SO_REUSEPORT + m_socket.socket.return_value = mock.Mock() + + f = self.loop.create_server( + MyProto, '0.0.0.0', 0, reuse_port=True) + + self.assertRaises(ValueError, self.loop.run_until_complete, f) + + @mock.patch('asyncio.base_events.socket') def test_create_server_cant_bind(self, m_socket): class Err(OSError): @@ -1199,6 +1213,128 @@ self.assertRaises(Err, self.loop.run_until_complete, fut) self.assertTrue(m_sock.close.called) + def test_create_datagram_endpoint_sock(self): + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + fut = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(create_future=True, loop=self.loop), + sock=sock) + transport, protocol = self.loop.run_until_complete(fut) + transport.close() + self.loop.run_until_complete(protocol.done) + self.assertEqual('CLOSED', protocol.state) + + def test_create_datagram_endpoint_sock_sockopts(self): + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, local_addr=('127.0.0.1', 0), sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, remote_addr=('127.0.0.1', 0), sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, family=1, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, proto=1, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, flags=1, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, reuse_address=True, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, reuse_port=True, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + fut = self.loop.create_datagram_endpoint( + MyDatagramProto, allow_broadcast=True, sock=object()) + self.assertRaises(ValueError, self.loop.run_until_complete, fut) + + def test_create_datagram_endpoint_sockopts(self): + # Socket options should not be applied unless asked for. + # SO_REUSEADDR defaults to on for UNIX. + # SO_REUSEPORT is not available on all platforms. + + coro = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(create_future=True, loop=self.loop), + local_addr=('127.0.0.1', 0)) + transport, protocol = self.loop.run_until_complete(coro) + sock = transport.get_extra_info('socket') + + reuse_address_default_on = ( + os.name == 'posix' and sys.platform != 'cygwin') + reuseport_supported = hasattr(socket, 'SO_REUSEPORT') + + if reuse_address_default_on: + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR)) + else: + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR)) + if reuseport_supported: + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_BROADCAST)) + + transport.close() + self.loop.run_until_complete(protocol.done) + self.assertEqual('CLOSED', protocol.state) + + coro = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(create_future=True, loop=self.loop), + local_addr=('127.0.0.1', 0), + reuse_address=True, + reuse_port=reuseport_supported, + allow_broadcast=True) + transport, protocol = self.loop.run_until_complete(coro) + sock = transport.get_extra_info('socket') + + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR)) + if reuseport_supported: + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + else: + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_BROADCAST)) + + transport.close() + self.loop.run_until_complete(protocol.done) + self.assertEqual('CLOSED', protocol.state) + + @mock.patch('asyncio.base_events.socket') + def test_create_datagram_endpoint_nosoreuseport(self, m_socket): + m_socket.getaddrinfo = socket.getaddrinfo + m_socket.SOCK_DGRAM = socket.SOCK_DGRAM + m_socket.SOL_SOCKET = socket.SOL_SOCKET + del m_socket.SO_REUSEPORT + m_socket.socket.return_value = mock.Mock() + + coro = self.loop.create_datagram_endpoint( + lambda: MyDatagramProto(loop=self.loop), + local_addr=('127.0.0.1', 0), + reuse_address=False, + reuse_port=True) + + self.assertRaises(ValueError, self.loop.run_until_complete, coro) + def test_accept_connection_retry(self): sock = mock.Mock() sock.accept.side_effect = BlockingIOError() diff --git a/Lib/test/test_asyncio/test_events.py b/Lib/test/test_asyncio/test_events.py --- a/Lib/test/test_asyncio/test_events.py +++ b/Lib/test/test_asyncio/test_events.py @@ -814,6 +814,32 @@ # close server server.close() + @unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT') + def test_create_server_reuse_port(self): + proto = MyProto(self.loop) + f = self.loop.create_server( + lambda: proto, '0.0.0.0', 0) + server = self.loop.run_until_complete(f) + self.assertEqual(len(server.sockets), 1) + sock = server.sockets[0] + self.assertFalse( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + server.close() + + test_utils.run_briefly(self.loop) + + proto = MyProto(self.loop) + f = self.loop.create_server( + lambda: proto, '0.0.0.0', 0, reuse_port=True) + server = self.loop.run_until_complete(f) + self.assertEqual(len(server.sockets), 1) + sock = server.sockets[0] + self.assertTrue( + sock.getsockopt( + socket.SOL_SOCKET, socket.SO_REUSEPORT)) + server.close() + def _make_unix_server(self, factory, **kwargs): path = test_utils.gen_unix_socket_path() self.addCleanup(lambda: os.path.exists(path) and os.unlink(path)) @@ -1264,6 +1290,32 @@ self.assertEqual('CLOSED', client.state) server.transport.close() + def test_create_datagram_endpoint_sock(self): + sock = None + local_address = ('127.0.0.1', 0) + infos = self.loop.run_until_complete( + self.loop.getaddrinfo( + *local_address, type=socket.SOCK_DGRAM)) + for family, type, proto, cname, address in infos: + try: + sock = socket.socket(family=family, type=type, proto=proto) + sock.setblocking(False) + sock.bind(address) + except: + pass + else: + break + else: + assert False, 'Can not create socket.' + + f = self.loop.create_connection( + lambda: MyDatagramProto(loop=self.loop), sock=sock) + tr, pr = self.loop.run_until_complete(f) + self.assertIsInstance(tr, asyncio.Transport) + self.assertIsInstance(pr, MyDatagramProto) + tr.close() + self.loop.run_until_complete(pr.done) + def test_internal_fds(self): loop = self.create_event_loop() if not isinstance(loop, selector_events.BaseSelectorEventLoop): diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -813,6 +813,7 @@ Julia Lawall Chris Lawrence Mark Lawrence +Chris Laws Brian Leair Mathieu Leduc-Hamel Amandine Lee diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -43,6 +43,12 @@ Library ------- +- Issue #23972: Updates asyncio datagram create method allowing reuseport + and reuseaddr socket options to be set prior to binding the socket. + Mirroring the existing asyncio create_server method the reuseaddr option + for datagram sockets defaults to True if the O/S is 'posix' (except if the + platform is Cygwin). Patch by Chris Laws. + - Issue #25304: Add asyncio.run_coroutine_threadsafe(). This lets you submit a coroutine to a loop from another thread, returning a concurrent.futures.Future. By Vincent Michel. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 5 19:35:44 2015 From: python-checkins at python.org (steve.dower) Date: Mon, 05 Oct 2015 17:35:44 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325316=3A_distutils_raises_OSError_instead_of_Di?= =?utf-8?q?stutilsPlatformError_when?= Message-ID: <20151005173544.3273.18324@psf.io> https://hg.python.org/cpython/rev/07161dd8a078 changeset: 98543:07161dd8a078 parent: 98541:c0f1f882737c parent: 98542:a2016b29762c user: Steve Dower date: Mon Oct 05 10:35:19 2015 -0700 summary: Issue #25316: distutils raises OSError instead of DistutilsPlatformError when MSVC is not installed. files: Lib/distutils/_msvccompiler.py | 18 ++++++++++-------- Misc/NEWS | 3 +++ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/Lib/distutils/_msvccompiler.py b/Lib/distutils/_msvccompiler.py --- a/Lib/distutils/_msvccompiler.py +++ b/Lib/distutils/_msvccompiler.py @@ -28,15 +28,17 @@ from itertools import count def _find_vcvarsall(plat_spec): - with winreg.OpenKeyEx( - winreg.HKEY_LOCAL_MACHINE, - r"Software\Microsoft\VisualStudio\SxS\VC7", - access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY - ) as key: - if not key: - log.debug("Visual C++ is not registered") - return None, None + try: + key = winreg.OpenKeyEx( + winreg.HKEY_LOCAL_MACHINE, + r"Software\Microsoft\VisualStudio\SxS\VC7", + access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY + ) + except OSError: + log.debug("Visual C++ is not registered") + return None, None + with key: best_version = 0 best_dir = None for i in count(): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -43,6 +43,9 @@ Library ------- +- Issue #25316: distutils raises OSError instead of DistutilsPlatformError + when MSVC is not installed. + - Issue #23972: Updates asyncio datagram create method allowing reuseport and reuseaddr socket options to be set prior to binding the socket. Mirroring the existing asyncio create_server method the reuseaddr option -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 5 19:35:44 2015 From: python-checkins at python.org (steve.dower) Date: Mon, 05 Oct 2015 17:35:44 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1MzE2?= =?utf-8?q?=3A_distutils_raises_OSError_instead_of_DistutilsPlatformError_?= =?utf-8?q?when?= Message-ID: <20151005173544.20783.81966@psf.io> https://hg.python.org/cpython/rev/a2016b29762c changeset: 98542:a2016b29762c branch: 3.5 parent: 98540:ba956289fe66 user: Steve Dower date: Mon Oct 05 10:35:00 2015 -0700 summary: Issue #25316: distutils raises OSError instead of DistutilsPlatformError when MSVC is not installed. files: Lib/distutils/_msvccompiler.py | 18 ++++++++++-------- Misc/NEWS | 3 +++ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/Lib/distutils/_msvccompiler.py b/Lib/distutils/_msvccompiler.py --- a/Lib/distutils/_msvccompiler.py +++ b/Lib/distutils/_msvccompiler.py @@ -28,15 +28,17 @@ from itertools import count def _find_vcvarsall(plat_spec): - with winreg.OpenKeyEx( - winreg.HKEY_LOCAL_MACHINE, - r"Software\Microsoft\VisualStudio\SxS\VC7", - access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY - ) as key: - if not key: - log.debug("Visual C++ is not registered") - return None, None + try: + key = winreg.OpenKeyEx( + winreg.HKEY_LOCAL_MACHINE, + r"Software\Microsoft\VisualStudio\SxS\VC7", + access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY + ) + except OSError: + log.debug("Visual C++ is not registered") + return None, None + with key: best_version = 0 best_dir = None for i in count(): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -34,6 +34,9 @@ Library ------- +- Issue #25316: distutils raises OSError instead of DistutilsPlatformError + when MSVC is not installed. + - Issue #23972: Updates asyncio datagram create method allowing reuseport and reuseaddr socket options to be set prior to binding the socket. Mirroring the existing asyncio create_server method the reuseaddr option -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 01:27:05 2015 From: python-checkins at python.org (guido.van.rossum) Date: Mon, 05 Oct 2015 23:27:05 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Docs_and_one_small_improvement_for_issue_=2325304=2C_by_?= =?utf-8?q?Vincent_Michel=2E_=28Merge?= Message-ID: <20151005232704.20771.8010@psf.io> https://hg.python.org/cpython/rev/cba27498a2f7 changeset: 98546:cba27498a2f7 parent: 98543:07161dd8a078 parent: 98545:28fcd7f13613 user: Guido van Rossum date: Mon Oct 05 16:26:00 2015 -0700 summary: Docs and one small improvement for issue #25304, by Vincent Michel. (Merge 3.5->3.6.) files: Doc/library/asyncio-dev.rst | 12 +++- Doc/library/asyncio-task.rst | 39 +++++++++++++++++ Lib/asyncio/tasks.py | 7 ++- Lib/test/test_asyncio/test_tasks.py | 21 +++++++++ 4 files changed, 75 insertions(+), 4 deletions(-) diff --git a/Doc/library/asyncio-dev.rst b/Doc/library/asyncio-dev.rst --- a/Doc/library/asyncio-dev.rst +++ b/Doc/library/asyncio-dev.rst @@ -96,10 +96,9 @@ and the event loop executes the next task. To schedule a callback from a different thread, the -:meth:`BaseEventLoop.call_soon_threadsafe` method should be used. Example to -schedule a coroutine from a different thread:: +:meth:`BaseEventLoop.call_soon_threadsafe` method should be used. Example:: - loop.call_soon_threadsafe(asyncio.ensure_future, coro_func()) + loop.call_soon_threadsafe(callback, *args) Most asyncio objects are not thread safe. You should only worry if you access objects outside the event loop. For example, to cancel a future, don't call @@ -110,6 +109,13 @@ To handle signals and to execute subprocesses, the event loop must be run in the main thread. +To schedule a coroutine object from a different thread, the +:func:`run_coroutine_threadsafe` function should be used. It returns a +:class:`concurrent.futures.Future` to access the result:: + + future = asyncio.run_coroutine_threadsafe(coro_func(), loop) + result = future.result(timeout) # Wait for the result with a timeout + The :meth:`BaseEventLoop.run_in_executor` method can be used with a thread pool executor to execute a callback in different thread to not block the thread of the event loop. diff --git a/Doc/library/asyncio-task.rst b/Doc/library/asyncio-task.rst --- a/Doc/library/asyncio-task.rst +++ b/Doc/library/asyncio-task.rst @@ -683,3 +683,42 @@ .. versionchanged:: 3.4.3 If the wait is cancelled, the future *fut* is now also cancelled. + +.. function:: run_coroutine_threadsafe(coro, loop) + + Submit a :ref:`coroutine object ` to a given event loop. + + Return a :class:`concurrent.futures.Future` to access the result. + + This function is meant to be called from a different thread than the one + where the event loop is running. Usage:: + + # Create a coroutine + coro = asyncio.sleep(1, result=3) + # Submit the coroutine to a given loop + future = asyncio.run_coroutine_threadsafe(coro, loop) + # Wait for the result with an optional timeout argument + assert future.result(timeout) == 3 + + If an exception is raised in the coroutine, the returned future will be + notified. It can also be used to cancel the task in the event loop:: + + try: + result = future.result(timeout) + except asyncio.TimeoutError: + print('The coroutine took too long, cancelling the task...') + future.cancel() + except Exception as exc: + print('The coroutine raised an exception: {!r}'.format(exc)) + else: + print('The coroutine returned: {!r}'.format(result)) + + See the :ref:`concurrency and multithreading ` + section of the documentation. + + .. note:: + + Unlike the functions above, :func:`run_coroutine_threadsafe` requires the + *loop* argument to be passed explicitely. + + .. versionadded:: 3.4.4 diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py --- a/Lib/asyncio/tasks.py +++ b/Lib/asyncio/tasks.py @@ -704,7 +704,12 @@ future = concurrent.futures.Future() def callback(): - futures._chain_future(ensure_future(coro, loop=loop), future) + try: + futures._chain_future(ensure_future(coro, loop=loop), future) + except Exception as exc: + if future.set_running_or_notify_cancel(): + future.set_exception(exc) + raise loop.call_soon_threadsafe(callback) return future diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py --- a/Lib/test/test_asyncio/test_tasks.py +++ b/Lib/test/test_asyncio/test_tasks.py @@ -2166,6 +2166,27 @@ with self.assertRaises(asyncio.CancelledError): self.loop.run_until_complete(future) + def test_run_coroutine_threadsafe_task_factory_exception(self): + """Test coroutine submission from a tread to an event loop + when the task factory raise an exception.""" + # Clear the time generator + asyncio.ensure_future(self.add(1, 2), loop=self.loop) + # Schedule the target + future = self.loop.run_in_executor(None, self.target) + # Set corrupted task factory + self.loop.set_task_factory(lambda loop, coro: wrong_name) + # Set exception handler + callback = test_utils.MockCallback() + self.loop.set_exception_handler(callback) + # Run event loop + with self.assertRaises(NameError) as exc_context: + self.loop.run_until_complete(future) + # Check exceptions + self.assertIn('wrong_name', exc_context.exception.args[0]) + self.assertEqual(len(callback.call_args_list), 1) + (loop, context), kwargs = callback.call_args + self.assertEqual(context['exception'], exc_context.exception) + if __name__ == '__main__': unittest.main() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 01:27:05 2015 From: python-checkins at python.org (guido.van.rossum) Date: Mon, 05 Oct 2015 23:27:05 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Docs_and_one_small_improvement_for_issue_=2325304=2C_by_Vincen?= =?utf-8?q?t_Michel=2E_=28Merge?= Message-ID: <20151005232704.457.50658@psf.io> https://hg.python.org/cpython/rev/28fcd7f13613 changeset: 98545:28fcd7f13613 branch: 3.5 parent: 98542:a2016b29762c parent: 98544:54c77fdcdb2e user: Guido van Rossum date: Mon Oct 05 16:23:13 2015 -0700 summary: Docs and one small improvement for issue #25304, by Vincent Michel. (Merge 3.4->3.5.) files: Doc/library/asyncio-dev.rst | 12 +++- Doc/library/asyncio-task.rst | 39 +++++++++++++++++ Lib/asyncio/tasks.py | 7 ++- Lib/test/test_asyncio/test_tasks.py | 21 +++++++++ 4 files changed, 75 insertions(+), 4 deletions(-) diff --git a/Doc/library/asyncio-dev.rst b/Doc/library/asyncio-dev.rst --- a/Doc/library/asyncio-dev.rst +++ b/Doc/library/asyncio-dev.rst @@ -96,10 +96,9 @@ and the event loop executes the next task. To schedule a callback from a different thread, the -:meth:`BaseEventLoop.call_soon_threadsafe` method should be used. Example to -schedule a coroutine from a different thread:: +:meth:`BaseEventLoop.call_soon_threadsafe` method should be used. Example:: - loop.call_soon_threadsafe(asyncio.ensure_future, coro_func()) + loop.call_soon_threadsafe(callback, *args) Most asyncio objects are not thread safe. You should only worry if you access objects outside the event loop. For example, to cancel a future, don't call @@ -110,6 +109,13 @@ To handle signals and to execute subprocesses, the event loop must be run in the main thread. +To schedule a coroutine object from a different thread, the +:func:`run_coroutine_threadsafe` function should be used. It returns a +:class:`concurrent.futures.Future` to access the result:: + + future = asyncio.run_coroutine_threadsafe(coro_func(), loop) + result = future.result(timeout) # Wait for the result with a timeout + The :meth:`BaseEventLoop.run_in_executor` method can be used with a thread pool executor to execute a callback in different thread to not block the thread of the event loop. diff --git a/Doc/library/asyncio-task.rst b/Doc/library/asyncio-task.rst --- a/Doc/library/asyncio-task.rst +++ b/Doc/library/asyncio-task.rst @@ -683,3 +683,42 @@ .. versionchanged:: 3.4.3 If the wait is cancelled, the future *fut* is now also cancelled. + +.. function:: run_coroutine_threadsafe(coro, loop) + + Submit a :ref:`coroutine object ` to a given event loop. + + Return a :class:`concurrent.futures.Future` to access the result. + + This function is meant to be called from a different thread than the one + where the event loop is running. Usage:: + + # Create a coroutine + coro = asyncio.sleep(1, result=3) + # Submit the coroutine to a given loop + future = asyncio.run_coroutine_threadsafe(coro, loop) + # Wait for the result with an optional timeout argument + assert future.result(timeout) == 3 + + If an exception is raised in the coroutine, the returned future will be + notified. It can also be used to cancel the task in the event loop:: + + try: + result = future.result(timeout) + except asyncio.TimeoutError: + print('The coroutine took too long, cancelling the task...') + future.cancel() + except Exception as exc: + print('The coroutine raised an exception: {!r}'.format(exc)) + else: + print('The coroutine returned: {!r}'.format(result)) + + See the :ref:`concurrency and multithreading ` + section of the documentation. + + .. note:: + + Unlike the functions above, :func:`run_coroutine_threadsafe` requires the + *loop* argument to be passed explicitely. + + .. versionadded:: 3.4.4 diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py --- a/Lib/asyncio/tasks.py +++ b/Lib/asyncio/tasks.py @@ -704,7 +704,12 @@ future = concurrent.futures.Future() def callback(): - futures._chain_future(ensure_future(coro, loop=loop), future) + try: + futures._chain_future(ensure_future(coro, loop=loop), future) + except Exception as exc: + if future.set_running_or_notify_cancel(): + future.set_exception(exc) + raise loop.call_soon_threadsafe(callback) return future diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py --- a/Lib/test/test_asyncio/test_tasks.py +++ b/Lib/test/test_asyncio/test_tasks.py @@ -2166,6 +2166,27 @@ with self.assertRaises(asyncio.CancelledError): self.loop.run_until_complete(future) + def test_run_coroutine_threadsafe_task_factory_exception(self): + """Test coroutine submission from a tread to an event loop + when the task factory raise an exception.""" + # Clear the time generator + asyncio.ensure_future(self.add(1, 2), loop=self.loop) + # Schedule the target + future = self.loop.run_in_executor(None, self.target) + # Set corrupted task factory + self.loop.set_task_factory(lambda loop, coro: wrong_name) + # Set exception handler + callback = test_utils.MockCallback() + self.loop.set_exception_handler(callback) + # Run event loop + with self.assertRaises(NameError) as exc_context: + self.loop.run_until_complete(future) + # Check exceptions + self.assertIn('wrong_name', exc_context.exception.args[0]) + self.assertEqual(len(callback.call_args_list), 1) + (loop, context), kwargs = callback.call_args + self.assertEqual(context['exception'], exc_context.exception) + if __name__ == '__main__': unittest.main() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 01:27:05 2015 From: python-checkins at python.org (guido.van.rossum) Date: Mon, 05 Oct 2015 23:27:05 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Docs_and_one_s?= =?utf-8?q?mall_improvement_for_issue_=2325304=2C_by_Vincent_Michel=2E?= Message-ID: <20151005232704.2677.14135@psf.io> https://hg.python.org/cpython/rev/54c77fdcdb2e changeset: 98544:54c77fdcdb2e branch: 3.4 parent: 98539:5e7e9b131904 user: Guido van Rossum date: Mon Oct 05 16:20:00 2015 -0700 summary: Docs and one small improvement for issue #25304, by Vincent Michel. files: Doc/library/asyncio-dev.rst | 12 +++- Doc/library/asyncio-task.rst | 39 +++++++++++++++++ Lib/asyncio/tasks.py | 7 ++- Lib/test/test_asyncio/test_tasks.py | 21 +++++++++ 4 files changed, 75 insertions(+), 4 deletions(-) diff --git a/Doc/library/asyncio-dev.rst b/Doc/library/asyncio-dev.rst --- a/Doc/library/asyncio-dev.rst +++ b/Doc/library/asyncio-dev.rst @@ -96,10 +96,9 @@ and the event loop executes the next task. To schedule a callback from a different thread, the -:meth:`BaseEventLoop.call_soon_threadsafe` method should be used. Example to -schedule a coroutine from a different thread:: +:meth:`BaseEventLoop.call_soon_threadsafe` method should be used. Example:: - loop.call_soon_threadsafe(asyncio.async, coro_func()) + loop.call_soon_threadsafe(callback, *args) Most asyncio objects are not thread safe. You should only worry if you access objects outside the event loop. For example, to cancel a future, don't call @@ -107,6 +106,13 @@ loop.call_soon_threadsafe(fut.cancel) +To schedule a coroutine object from a different thread, the +:func:`run_coroutine_threadsafe` function should be used. It returns a +:class:`concurrent.futures.Future` to access the result:: + + future = asyncio.run_coroutine_threadsafe(coro_func(), loop) + result = future.result(timeout) # Wait for the result with a timeout + To handle signals and to execute subprocesses, the event loop must be run in the main thread. diff --git a/Doc/library/asyncio-task.rst b/Doc/library/asyncio-task.rst --- a/Doc/library/asyncio-task.rst +++ b/Doc/library/asyncio-task.rst @@ -661,3 +661,42 @@ .. versionchanged:: 3.4.3 If the wait is cancelled, the future *fut* is now also cancelled. + +.. function:: run_coroutine_threadsafe(coro, loop) + + Submit a :ref:`coroutine object ` to a given event loop. + + Return a :class:`concurrent.futures.Future` to access the result. + + This function is meant to be called from a different thread than the one + where the event loop is running. Usage:: + + # Create a coroutine + coro = asyncio.sleep(1, result=3) + # Submit the coroutine to a given loop + future = asyncio.run_coroutine_threadsafe(coro, loop) + # Wait for the result with an optional timeout argument + assert future.result(timeout) == 3 + + If an exception is raised in the coroutine, the returned future will be + notified. It can also be used to cancel the task in the event loop:: + + try: + result = future.result(timeout) + except asyncio.TimeoutError: + print('The coroutine took too long, cancelling the task...') + future.cancel() + except Exception as exc: + print('The coroutine raised an exception: {!r}'.format(exc)) + else: + print('The coroutine returned: {!r}'.format(result)) + + See the :ref:`concurrency and multithreading ` + section of the documentation. + + .. note:: + + Unlike the functions above, :func:`run_coroutine_threadsafe` requires the + *loop* argument to be passed explicitely. + + .. versionadded:: 3.4.4 diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py --- a/Lib/asyncio/tasks.py +++ b/Lib/asyncio/tasks.py @@ -704,7 +704,12 @@ future = concurrent.futures.Future() def callback(): - futures._chain_future(ensure_future(coro, loop=loop), future) + try: + futures._chain_future(ensure_future(coro, loop=loop), future) + except Exception as exc: + if future.set_running_or_notify_cancel(): + future.set_exception(exc) + raise loop.call_soon_threadsafe(callback) return future diff --git a/Lib/test/test_asyncio/test_tasks.py b/Lib/test/test_asyncio/test_tasks.py --- a/Lib/test/test_asyncio/test_tasks.py +++ b/Lib/test/test_asyncio/test_tasks.py @@ -2166,6 +2166,27 @@ with self.assertRaises(asyncio.CancelledError): self.loop.run_until_complete(future) + def test_run_coroutine_threadsafe_task_factory_exception(self): + """Test coroutine submission from a tread to an event loop + when the task factory raise an exception.""" + # Clear the time generator + asyncio.ensure_future(self.add(1, 2), loop=self.loop) + # Schedule the target + future = self.loop.run_in_executor(None, self.target) + # Set corrupted task factory + self.loop.set_task_factory(lambda loop, coro: wrong_name) + # Set exception handler + callback = test_utils.MockCallback() + self.loop.set_exception_handler(callback) + # Run event loop + with self.assertRaises(NameError) as exc_context: + self.loop.run_until_complete(future) + # Check exceptions + self.assertIn('wrong_name', exc_context.exception.args[0]) + self.assertEqual(len(callback.call_args_list), 1) + (loop, context), kwargs = callback.call_args + self.assertEqual(context['exception'], exc_context.exception) + if __name__ == '__main__': unittest.main() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 04:52:41 2015 From: python-checkins at python.org (raymond.hettinger) Date: Tue, 06 Oct 2015 02:52:41 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Eliminate_unnecessary_test?= Message-ID: <20151006025241.464.75509@psf.io> https://hg.python.org/cpython/rev/d00c0544880a changeset: 98547:d00c0544880a user: Raymond Hettinger date: Mon Oct 05 22:52:37 2015 -0400 summary: Eliminate unnecessary test files: Modules/_collectionsmodule.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c --- a/Modules/_collectionsmodule.c +++ b/Modules/_collectionsmodule.c @@ -530,7 +530,7 @@ return NULL; new_deque->maxlen = old_deque->maxlen; /* Fast path for the deque_repeat() common case where len(deque) == 1 */ - if (Py_SIZE(deque) == 1 && new_deque->maxlen != 0) { + if (Py_SIZE(deque) == 1) { PyObject *item = old_deque->leftblock->data[old_deque->leftindex]; rv = deque_append(new_deque, item); } else { -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 07:01:38 2015 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 06 Oct 2015 05:01:38 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_reinitialize_a?= =?utf-8?q?n_Event=27s_Condition_with_a_regular_lock_=28closes_=2325319=29?= Message-ID: <20151006050136.451.13572@psf.io> https://hg.python.org/cpython/rev/69a26f0800b3 changeset: 98548:69a26f0800b3 branch: 2.7 parent: 98536:45a04eadefd6 user: Benjamin Peterson date: Mon Oct 05 21:56:22 2015 -0700 summary: reinitialize an Event's Condition with a regular lock (closes #25319) files: Lib/test/lock_tests.py | 8 ++++++++ Lib/threading.py | 2 +- Misc/ACKS | 1 + Misc/NEWS | 3 +++ 4 files changed, 13 insertions(+), 1 deletions(-) diff --git a/Lib/test/lock_tests.py b/Lib/test/lock_tests.py --- a/Lib/test/lock_tests.py +++ b/Lib/test/lock_tests.py @@ -305,6 +305,14 @@ for r, dt in results2: self.assertTrue(r) + def test_reset_internal_locks(self): + evt = self.eventtype() + old_lock = evt._Event__cond._Condition__lock + evt._reset_internal_locks() + new_lock = evt._Event__cond._Condition__lock + self.assertIsNot(new_lock, old_lock) + self.assertIs(type(new_lock), type(old_lock)) + class ConditionTests(BaseTestCase): """ diff --git a/Lib/threading.py b/Lib/threading.py --- a/Lib/threading.py +++ b/Lib/threading.py @@ -565,7 +565,7 @@ def _reset_internal_locks(self): # private! called by Thread._reset_internal_locks by _after_fork() - self.__cond.__init__() + self.__cond.__init__(Lock()) def isSet(self): 'Return true if and only if the internal flag is true.' diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1290,6 +1290,7 @@ Rafal Smotrzyk Eric Snow Dirk Soede +Nir Soffer Paul Sokolovsky Evgeny Sologubov Cody Somerville diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -43,6 +43,9 @@ Library ------- +- Issue #25319: When threading.Event is reinitialized, the underlying condition + should use a regular lock rather than a recursive lock. + - Issue #25232: Fix CGIRequestHandler to split the query from the URL at the first question mark (?) rather than the last. Patch from Xiang Zhang. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 07:01:37 2015 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 06 Oct 2015 05:01:37 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_reinitialize_a?= =?utf-8?q?n_Event=27s_Condition_with_a_regular_lock_=28closes_=2325319=29?= Message-ID: <20151006050136.3291.68053@psf.io> https://hg.python.org/cpython/rev/6108d30dde21 changeset: 98549:6108d30dde21 branch: 3.4 parent: 98544:54c77fdcdb2e user: Benjamin Peterson date: Mon Oct 05 21:56:22 2015 -0700 summary: reinitialize an Event's Condition with a regular lock (closes #25319) files: Lib/test/lock_tests.py | 8 ++++++++ Lib/threading.py | 2 +- Misc/ACKS | 1 + Misc/NEWS | 3 +++ 4 files changed, 13 insertions(+), 1 deletions(-) diff --git a/Lib/test/lock_tests.py b/Lib/test/lock_tests.py --- a/Lib/test/lock_tests.py +++ b/Lib/test/lock_tests.py @@ -388,6 +388,14 @@ b.wait_for_finished() self.assertEqual(results, [True] * N) + def test_reset_internal_locks(self): + evt = self.eventtype() + old_lock = evt._cond._lock + evt._reset_internal_locks() + new_lock = evt._cond._lock + self.assertIsNot(new_lock, old_lock) + self.assertIs(type(new_lock), type(old_lock)) + class ConditionTests(BaseTestCase): """ diff --git a/Lib/threading.py b/Lib/threading.py --- a/Lib/threading.py +++ b/Lib/threading.py @@ -496,7 +496,7 @@ def _reset_internal_locks(self): # private! called by Thread._reset_internal_locks by _after_fork() - self._cond.__init__() + self._cond.__init__(Lock()) def is_set(self): """Return true if and only if the internal flag is true.""" diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1315,6 +1315,7 @@ Rafal Smotrzyk Eric Snow Dirk Soede +Nir Soffer Paul Sokolovsky Evgeny Sologubov Cody Somerville diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -100,6 +100,9 @@ submit a coroutine to a loop from another thread, returning a concurrent.futures.Future. By Vincent Michel. +- Issue #25319: When threading.Event is reinitialized, the underlying condition + should use a regular lock rather than a recursive lock. + - Issue #25232: Fix CGIRequestHandler to split the query from the URL at the first question mark (?) rather than the last. Patch from Xiang Zhang. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 07:01:52 2015 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 06 Oct 2015 05:01:52 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy41ICgjMjUzMTkp?= Message-ID: <20151006050137.55460.52904@psf.io> https://hg.python.org/cpython/rev/41f29bbf520d changeset: 98551:41f29bbf520d parent: 98547:d00c0544880a parent: 98550:3719e842a7b1 user: Benjamin Peterson date: Mon Oct 05 22:01:29 2015 -0700 summary: merge 3.5 (#25319) files: Lib/test/lock_tests.py | 8 ++++++++ Lib/threading.py | 2 +- Misc/ACKS | 1 + Misc/NEWS | 3 +++ 4 files changed, 13 insertions(+), 1 deletions(-) diff --git a/Lib/test/lock_tests.py b/Lib/test/lock_tests.py --- a/Lib/test/lock_tests.py +++ b/Lib/test/lock_tests.py @@ -394,6 +394,14 @@ b.wait_for_finished() self.assertEqual(results, [True] * N) + def test_reset_internal_locks(self): + evt = self.eventtype() + old_lock = evt._cond._lock + evt._reset_internal_locks() + new_lock = evt._cond._lock + self.assertIsNot(new_lock, old_lock) + self.assertIs(type(new_lock), type(old_lock)) + class ConditionTests(BaseTestCase): """ diff --git a/Lib/threading.py b/Lib/threading.py --- a/Lib/threading.py +++ b/Lib/threading.py @@ -499,7 +499,7 @@ def _reset_internal_locks(self): # private! called by Thread._reset_internal_locks by _after_fork() - self._cond.__init__() + self._cond.__init__(Lock()) def is_set(self): """Return true if and only if the internal flag is true.""" diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1353,6 +1353,7 @@ Rafal Smotrzyk Eric Snow Dirk Soede +Nir Soffer Paul Sokolovsky Evgeny Sologubov Cody Somerville diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -128,6 +128,9 @@ - Issue #13248: Remove deprecated inspect.getargspec and inspect.getmoduleinfo functions. +- Issue #25319: When threading.Event is reinitialized, the underlying condition + should use a regular lock rather than a recursive lock. + IDLE ---- -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 07:01:52 2015 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 06 Oct 2015 05:01:52 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_merge_3=2E4_=28=2325319=29?= Message-ID: <20151006050137.2667.40281@psf.io> https://hg.python.org/cpython/rev/3719e842a7b1 changeset: 98550:3719e842a7b1 branch: 3.5 parent: 98545:28fcd7f13613 parent: 98549:6108d30dde21 user: Benjamin Peterson date: Mon Oct 05 22:00:33 2015 -0700 summary: merge 3.4 (#25319) files: Lib/test/lock_tests.py | 8 ++++++++ Lib/threading.py | 2 +- Misc/ACKS | 1 + Misc/NEWS | 3 +++ 4 files changed, 13 insertions(+), 1 deletions(-) diff --git a/Lib/test/lock_tests.py b/Lib/test/lock_tests.py --- a/Lib/test/lock_tests.py +++ b/Lib/test/lock_tests.py @@ -394,6 +394,14 @@ b.wait_for_finished() self.assertEqual(results, [True] * N) + def test_reset_internal_locks(self): + evt = self.eventtype() + old_lock = evt._cond._lock + evt._reset_internal_locks() + new_lock = evt._cond._lock + self.assertIsNot(new_lock, old_lock) + self.assertIs(type(new_lock), type(old_lock)) + class ConditionTests(BaseTestCase): """ diff --git a/Lib/threading.py b/Lib/threading.py --- a/Lib/threading.py +++ b/Lib/threading.py @@ -499,7 +499,7 @@ def _reset_internal_locks(self): # private! called by Thread._reset_internal_locks by _after_fork() - self._cond.__init__() + self._cond.__init__(Lock()) def is_set(self): """Return true if and only if the internal flag is true.""" diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1352,6 +1352,7 @@ Rafal Smotrzyk Eric Snow Dirk Soede +Nir Soffer Paul Sokolovsky Evgeny Sologubov Cody Somerville diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -150,6 +150,9 @@ - Issue #24881: Fixed setting binary mode in Python implementation of FileIO on Windows and Cygwin. Patch from Akira Li. +- Issue #25319: When threading.Event is reinitialized, the underlying condition + should use a regular lock rather than a recursive lock. + - Issue #21112: Fix regression in unittest.expectedFailure on subclasses. Patch from Berker Peksag. -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Tue Oct 6 10:45:15 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Tue, 06 Oct 2015 08:45:15 +0000 Subject: [Python-checkins] Daily reference leaks (41f29bbf520d): sum=61491 Message-ID: <20151006084514.3293.31633@psf.io> results for 41f29bbf520d on branch "default" -------------------------------------------- test_capi leaked [5410, 5410, 5410] references, sum=16230 test_capi leaked [1421, 1423, 1423] memory blocks, sum=4267 test_functools leaked [0, 2, 2] memory blocks, sum=4 test_threading leaked [10820, 10820, 10820] references, sum=32460 test_threading leaked [2842, 2844, 2844] memory blocks, sum=8530 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflogHMljtJ', '--timeout', '7200'] From python-checkins at python.org Tue Oct 6 14:11:21 2015 From: python-checkins at python.org (steven.daprano) Date: Tue, 06 Oct 2015 12:11:21 +0000 Subject: [Python-checkins] =?utf-8?q?peps_=28merge_default_-=3E_default=29?= =?utf-8?q?=3A_add_discussion_on_defaults=2C_naming_conventions=2C_possibl?= =?utf-8?q?e_implementations?= Message-ID: <20151006121121.2685.95754@psf.io> https://hg.python.org/peps/rev/ce879f833f82 changeset: 6108:ce879f833f82 parent: 6105:742700a03e91 parent: 6107:850897ef2790 user: Steven D'Aprano date: Tue Oct 06 23:10:55 2015 +1100 summary: add discussion on defaults, naming conventions, possible implementations files: pep-0506.txt | 140 +++++++++++++++++++++++++++++--------- 1 files changed, 107 insertions(+), 33 deletions(-) diff --git a/pep-0506.txt b/pep-0506.txt --- a/pep-0506.txt +++ b/pep-0506.txt @@ -50,7 +50,7 @@ and expressed some concern [1]_ about the use of MT for generating sensitive information such as passwords, secure tokens, session keys and similar. -Although the documentation for the random module explicitly states that +Although the documentation for the ``random`` module explicitly states that the default is not suitable for security purposes [2]_, it is strongly believed that this warning may be missed, ignored or misunderstood by many Python developers. In particular: @@ -58,7 +58,7 @@ * developers may not have read the documentation and consequently not seen the warning; -* they may not realise that their specific use of it has security +* they may not realise that their specific use of the module has security implications; or * not realising that there could be a problem, they have copied code @@ -140,20 +140,71 @@ the ``random`` module to support these uses, ``SystemRandom`` will be sufficient. -Some illustrative implementations have been given by Nick Coghlan [10]_. -This idea has also been discussed on the issue tracker for the -"cryptography" module [11]_. +Some illustrative implementations have been given by Nick Coghlan [10]_ +and a minimalist API by Tim Peters [11]_. This idea has also been discussed +on the issue tracker for the "cryptography" module [12]_. The following +pseudo-code can be taken as a possible starting point for the real +implementation:: + + from random import SystemRandom + from hmac import compare_digest as equal + + _sysrand = SystemRandom() + + randrange = _sysrand.randrange + randint = _sysrand.randint + randbits = _sysrand.getrandbits + choice = _sysrand.choice + + def randbelow(exclusive_upper_bound): + return _sysrand._randbelow(exclusive_upper_bound) + + DEFAULT_ENTROPY = 32 # bytes + + def token_bytes(nbytes=None): + if nbytes is None: + nbytes = DEFAULT_ENTROPY + return os.urandom(nbytes) + + def token_hex(nbytes=None): + return binascii.hexlify(token_bytes(nbytes)).decode('ascii') + + def token_url(nbytes=None): + tok = token_bytes(nbytes) + return base64.urlsafe_b64encode(tok).rstrip(b'=').decode('ascii') + The ``secrets`` module itself will be pure Python, and other Python implementations can easily make use of it unchanged, or adapt it as necessary. +Default arguments +~~~~~~~~~~~~~~~~~ + +One difficult question is "How many bytes should my token be?". We can +help with this question by providing a default amount of entropy for the +"token_*" functions. If the ``nbytes`` argument is None or not given, the +default entropy will be used. This default value should be large enough +to be expected to be secure for medium-security uses, but is expected to +change in the future, possibly even in a maintenance release [13]_. + +Naming conventions +~~~~~~~~~~~~~~~~~~ + +One question is the naming conventions used in the module [14]_, whether to +use C-like naming conventions such as "randrange" or more Pythonic names +such as "random_range". + +Functions which are simply bound methods of the private ``SystemRandom`` +instance (e.g. ``randrange``), or a thin wrapper around such, should keep +the familiar names. Those which are something new (such as the various +``token_*`` functions) will use more Pythonic names. Alternatives ============ One alternative is to change the default PRNG provided by the ``random`` -module [12]_. This received considerable scepticism and outright opposition: +module [15]_. This received considerable scepticism and outright opposition: * There is fear that a CSPRNG may be slower than the current PRNG (which in the case of MT is already quite slow). @@ -172,13 +223,13 @@ * Demonstrated attacks against MT are typically against PHP applications. It is believed that PHP's version of MT is a significantly softer target - than Python's version, due to a poor seeding technique [13]_. Consequently, + than Python's version, due to a poor seeding technique [16]_. Consequently, without a proven attack against Python applications, many people object to a backwards-incompatible change. Nick Coghlan made an earlier suggestion for a globally configurable PRNG -which uses the system CSPRNG by default [14]_, but has since hinted that he -may withdraw it in favour of this proposal [15]_. +which uses the system CSPRNG by default [17]_, but has since withdrawn it +in favour of this proposal. Comparison To Other Languages @@ -186,7 +237,7 @@ * PHP - PHP includes a function ``uniqid`` [16]_ which by default returns a + PHP includes a function ``uniqid`` [18]_ which by default returns a thirteen character string based on the current time in microseconds. Translated into Python syntax, it has the following signature:: @@ -197,7 +248,7 @@ applications use it for that purpose (citation needed). PHP 5.3 and better also includes a function ``openssl_random_pseudo_bytes`` - [17]_. Translated into Python syntax, it has roughly the following + [19]_. Translated into Python syntax, it has roughly the following signature:: def openssl_random_pseudo_bytes(length:int)->Tuple[str, bool] @@ -209,16 +260,16 @@ * Javascript - Based on a rather cursory search [18]_, there doesn't appear to be any + Based on a rather cursory search [20]_, there do not appear to be any well-known standard functions for producing strong random values in Javascript, although there may be good quality third-party libraries. Standard Javascript doesn't seem to include an interface to the system CSPRNG either, and people have extensively written about the - weaknesses of Javascript's ``Math.random`` [19]_. + weaknesses of Javascript's ``Math.random`` [21]_. * Ruby - The Ruby standard library includes a module ``SecureRandom`` [20]_ + The Ruby standard library includes a module ``SecureRandom`` [22]_ which includes the following methods: * base64 - returns a Base64 encoded random string. @@ -240,12 +291,15 @@ There was a proposal to add a "random.safe" submodule, quoting the Zen of Python "Namespaces are one honking great idea" koan. However, the -author of the Zen, Tim Peters, has come out against this idea [21]_, and +author of the Zen, Tim Peters, has come out against this idea [23]_, and recommends a top-level module. In discussion on the python-ideas mailing list so far, the name "secrets" has received some approval, and no strong opposition. +There is already an existing third-party module with the same name [24]_, +but it appears to be unused and abandoned. + Frequently Asked Questions ========================== @@ -255,9 +309,9 @@ A: The consensus among security professionals is that MT is not safe in security contexts. It is not difficult to reconstruct the internal - state of MT [22]_ [23]_ and so predict all past and future values. There + state of MT [25]_ [26]_ and so predict all past and future values. There are a number of known, practical attacks on systems using MT for - randomness [24]_. + randomness [27]_. While there are currently no known direct attacks on applications written in Python due to the use of MT, there is widespread agreement @@ -268,7 +322,7 @@ A: No. This is a "batteries included" solution, not a full-featured "nuclear reactor". It is intended to mitigate against some basic security errors, not be a solution to all security-related issues. To - quote Nick Coghlan referring to his earlier proposal [25]_:: + quote Nick Coghlan referring to his earlier proposal [28]_:: "...folks really are better off learning to use things like cryptography.io for security sensitive software, so this change @@ -276,6 +330,14 @@ non-trivial proportion of the millions of current and future Python developers won't do that." +* Q: What about a password generator? + + A: The consensus is that the requirements for password generators are too + variable for it to be a good match for the standard library [29]_. No + password generator will be included in the initial release of the + module, instead it will be given in the documentation as a recipe (? la + the recipes in the ``itertools`` module) [30]_. + References ========== @@ -305,38 +367,50 @@ .. [10] https://mail.python.org/pipermail/python-ideas/2015-September/036271.html -.. [11] https://github.com/pyca/cryptography/issues/2347 +.. [11] https://mail.python.org/pipermail/python-ideas/2015-September/036350.html -.. [12] Link needed. +.. [12] https://github.com/pyca/cryptography/issues/2347 -.. [13] By default PHP seeds the MT PRNG with the time (citation needed), +.. [13] https://mail.python.org/pipermail/python-ideas/2015-September/036517.html + https://mail.python.org/pipermail/python-ideas/2015-September/036515.html + +.. [14] https://mail.python.org/pipermail/python-ideas/2015-September/036474.html + +.. [15] Link needed. + +.. [16] By default PHP seeds the MT PRNG with the time (citation needed), which is exploitable by attackers, while Python seeds the PRNG with output from the system CSPRNG, which is believed to be much harder to exploit. -.. [14] http://legacy.python.org/dev/peps/pep-0504/ +.. [17] http://legacy.python.org/dev/peps/pep-0504/ -.. [15] https://mail.python.org/pipermail/python-ideas/2015-September/036243.html +.. [18] http://php.net/manual/en/function.uniqid.php -.. [16] http://php.net/manual/en/function.uniqid.php +.. [19] http://php.net/manual/en/function.openssl-random-pseudo-bytes.php -.. [17] http://php.net/manual/en/function.openssl-random-pseudo-bytes.php +.. [20] Volunteers and patches are welcome. -.. [18] Volunteers and patches are welcome. +.. [21] http://ifsec.blogspot.fr/2012/05/cross-domain-mathrandom-prediction.html -.. [19] http://ifsec.blogspot.fr/2012/05/cross-domain-mathrandom-prediction.html +.. [22] http://ruby-doc.org/stdlib-2.1.2/libdoc/securerandom/rdoc/SecureRandom.html -.. [20] http://ruby-doc.org/stdlib-2.1.2/libdoc/securerandom/rdoc/SecureRandom.html +.. [23] https://mail.python.org/pipermail/python-ideas/2015-September/036254.html -.. [21] https://mail.python.org/pipermail/python-ideas/2015-September/036254.html +.. [24] https://pypi.python.org/pypi/secrets -.. [22] https://jazzy.id.au/2010/09/22/cracking_random_number_generators_part_3.html +.. [25] https://jazzy.id.au/2010/09/22/cracking_random_number_generators_part_3.html -.. [23] https://mail.python.org/pipermail/python-ideas/2015-September/036077.html +.. [26] https://mail.python.org/pipermail/python-ideas/2015-September/036077.html -.. [24] https://media.blackhat.com/bh-us-12/Briefings/Argyros/BH_US_12_Argyros_PRNG_WP.pdf +.. [27] https://media.blackhat.com/bh-us-12/Briefings/Argyros/BH_US_12_Argyros_PRNG_WP.pdf -.. [25] https://mail.python.org/pipermail/python-ideas/2015-September/036157.html +.. [28] https://mail.python.org/pipermail/python-ideas/2015-September/036157.html + +.. [29] https://mail.python.org/pipermail/python-ideas/2015-September/036476.html + https://mail.python.org/pipermail/python-ideas/2015-September/036478.html + +.. [30] https://mail.python.org/pipermail/python-ideas/2015-September/036488.html Copyright -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Tue Oct 6 14:11:23 2015 From: python-checkins at python.org (steven.daprano) Date: Tue, 06 Oct 2015 12:11:23 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_Add_default_entropy=2E?= Message-ID: <20151006121120.18380.93612@psf.io> https://hg.python.org/peps/rev/850897ef2790 changeset: 6107:850897ef2790 user: Steven D'Aprano date: Mon Oct 05 03:11:17 2015 +1100 summary: Add default entropy. files: pep-0506.txt | 96 ++++++++++++++++++++------------------- 1 files changed, 50 insertions(+), 46 deletions(-) diff --git a/pep-0506.txt b/pep-0506.txt --- a/pep-0506.txt +++ b/pep-0506.txt @@ -159,14 +159,19 @@ def randbelow(exclusive_upper_bound): return _sysrand._randbelow(exclusive_upper_bound) - def token_bytes(nbytes=32): + DEFAULT_ENTROPY = 32 # bytes + + def token_bytes(nbytes=None): + if nbytes is None: + nbytes = DEFAULT_ENTROPY return os.urandom(nbytes) - def token_hex(nbytes=32): + def token_hex(nbytes=None): return binascii.hexlify(token_bytes(nbytes)).decode('ascii') - def token_url(nbytes=32): - return base64.urlsafe_b64encode(token_bytes(nbytes)).decode('ascii') + def token_url(nbytes=None): + tok = token_bytes(nbytes) + return base64.urlsafe_b64encode(tok).rstrip(b'=').decode('ascii') The ``secrets`` module itself will be pure Python, and other Python @@ -176,18 +181,17 @@ Default arguments ~~~~~~~~~~~~~~~~~ -One difficult question is "How many bytes should my token be?" We can help -with this question by giving the "token_*" functions a sensible default for -the ``nbytes`` argument. This default value should be large enough to be -expected to be secure for medium-security uses [xxx]_. - -It is expected that future versions will need to increase those default -values, possibly even during +One difficult question is "How many bytes should my token be?". We can +help with this question by providing a default amount of entropy for the +"token_*" functions. If the ``nbytes`` argument is None or not given, the +default entropy will be used. This default value should be large enough +to be expected to be secure for medium-security uses, but is expected to +change in the future, possibly even in a maintenance release [13]_. Naming conventions ~~~~~~~~~~~~~~~~~~ -One question is the naming conventions used in the module [13]_, whether to +One question is the naming conventions used in the module [14]_, whether to use C-like naming conventions such as "randrange" or more Pythonic names such as "random_range". @@ -200,7 +204,7 @@ ============ One alternative is to change the default PRNG provided by the ``random`` -module [14]_. This received considerable scepticism and outright opposition: +module [15]_. This received considerable scepticism and outright opposition: * There is fear that a CSPRNG may be slower than the current PRNG (which in the case of MT is already quite slow). @@ -219,12 +223,12 @@ * Demonstrated attacks against MT are typically against PHP applications. It is believed that PHP's version of MT is a significantly softer target - than Python's version, due to a poor seeding technique [15]_. Consequently, + than Python's version, due to a poor seeding technique [16]_. Consequently, without a proven attack against Python applications, many people object to a backwards-incompatible change. Nick Coghlan made an earlier suggestion for a globally configurable PRNG -which uses the system CSPRNG by default [16]_, but has since withdrawn it +which uses the system CSPRNG by default [17]_, but has since withdrawn it in favour of this proposal. @@ -233,7 +237,7 @@ * PHP - PHP includes a function ``uniqid`` [17]_ which by default returns a + PHP includes a function ``uniqid`` [18]_ which by default returns a thirteen character string based on the current time in microseconds. Translated into Python syntax, it has the following signature:: @@ -244,7 +248,7 @@ applications use it for that purpose (citation needed). PHP 5.3 and better also includes a function ``openssl_random_pseudo_bytes`` - [18]_. Translated into Python syntax, it has roughly the following + [19]_. Translated into Python syntax, it has roughly the following signature:: def openssl_random_pseudo_bytes(length:int)->Tuple[str, bool] @@ -256,16 +260,16 @@ * Javascript - Based on a rather cursory search [19]_, there do not appear to be any + Based on a rather cursory search [20]_, there do not appear to be any well-known standard functions for producing strong random values in Javascript, although there may be good quality third-party libraries. Standard Javascript doesn't seem to include an interface to the system CSPRNG either, and people have extensively written about the - weaknesses of Javascript's ``Math.random`` [20]_. + weaknesses of Javascript's ``Math.random`` [21]_. * Ruby - The Ruby standard library includes a module ``SecureRandom`` [21]_ + The Ruby standard library includes a module ``SecureRandom`` [22]_ which includes the following methods: * base64 - returns a Base64 encoded random string. @@ -287,13 +291,13 @@ There was a proposal to add a "random.safe" submodule, quoting the Zen of Python "Namespaces are one honking great idea" koan. However, the -author of the Zen, Tim Peters, has come out against this idea [22]_, and +author of the Zen, Tim Peters, has come out against this idea [23]_, and recommends a top-level module. In discussion on the python-ideas mailing list so far, the name "secrets" has received some approval, and no strong opposition. -There is already an existing third-party module with the same name [23]_, +There is already an existing third-party module with the same name [24]_, but it appears to be unused and abandoned. @@ -305,9 +309,9 @@ A: The consensus among security professionals is that MT is not safe in security contexts. It is not difficult to reconstruct the internal - state of MT [24]_ [25]_ and so predict all past and future values. There + state of MT [25]_ [26]_ and so predict all past and future values. There are a number of known, practical attacks on systems using MT for - randomness [26]_. + randomness [27]_. While there are currently no known direct attacks on applications written in Python due to the use of MT, there is widespread agreement @@ -318,7 +322,7 @@ A: No. This is a "batteries included" solution, not a full-featured "nuclear reactor". It is intended to mitigate against some basic security errors, not be a solution to all security-related issues. To - quote Nick Coghlan referring to his earlier proposal [27]_:: + quote Nick Coghlan referring to his earlier proposal [28]_:: "...folks really are better off learning to use things like cryptography.io for security sensitive software, so this change @@ -329,10 +333,10 @@ * Q: What about a password generator? A: The consensus is that the requirements for password generators are too - variable for it to be a good match for the standard library [28]_. No + variable for it to be a good match for the standard library [29]_. No password generator will be included in the initial release of the module, instead it will be given in the documentation as a recipe (? la - the recipes in the ``itertools`` module) [29]_. + the recipes in the ``itertools`` module) [30]_. References @@ -367,46 +371,46 @@ .. [12] https://github.com/pyca/cryptography/issues/2347 -.. [xx] See discussion thread starting with - https://mail.python.org/pipermail/python-ideas/2015-September/036509.html +.. [13] https://mail.python.org/pipermail/python-ideas/2015-September/036517.html + https://mail.python.org/pipermail/python-ideas/2015-September/036515.html -.. [13] https://mail.python.org/pipermail/python-ideas/2015-September/036474.html +.. [14] https://mail.python.org/pipermail/python-ideas/2015-September/036474.html -.. [14] Link needed. +.. [15] Link needed. -.. [15] By default PHP seeds the MT PRNG with the time (citation needed), +.. [16] By default PHP seeds the MT PRNG with the time (citation needed), which is exploitable by attackers, while Python seeds the PRNG with output from the system CSPRNG, which is believed to be much harder to exploit. -.. [16] http://legacy.python.org/dev/peps/pep-0504/ +.. [17] http://legacy.python.org/dev/peps/pep-0504/ -.. [17] http://php.net/manual/en/function.uniqid.php +.. [18] http://php.net/manual/en/function.uniqid.php -.. [18] http://php.net/manual/en/function.openssl-random-pseudo-bytes.php +.. [19] http://php.net/manual/en/function.openssl-random-pseudo-bytes.php -.. [19] Volunteers and patches are welcome. +.. [20] Volunteers and patches are welcome. -.. [20] http://ifsec.blogspot.fr/2012/05/cross-domain-mathrandom-prediction.html +.. [21] http://ifsec.blogspot.fr/2012/05/cross-domain-mathrandom-prediction.html -.. [21] http://ruby-doc.org/stdlib-2.1.2/libdoc/securerandom/rdoc/SecureRandom.html +.. [22] http://ruby-doc.org/stdlib-2.1.2/libdoc/securerandom/rdoc/SecureRandom.html -.. [22] https://mail.python.org/pipermail/python-ideas/2015-September/036254.html +.. [23] https://mail.python.org/pipermail/python-ideas/2015-September/036254.html -.. [23] https://pypi.python.org/pypi/secrets +.. [24] https://pypi.python.org/pypi/secrets -.. [24] https://jazzy.id.au/2010/09/22/cracking_random_number_generators_part_3.html +.. [25] https://jazzy.id.au/2010/09/22/cracking_random_number_generators_part_3.html -.. [25] https://mail.python.org/pipermail/python-ideas/2015-September/036077.html +.. [26] https://mail.python.org/pipermail/python-ideas/2015-September/036077.html -.. [26] https://media.blackhat.com/bh-us-12/Briefings/Argyros/BH_US_12_Argyros_PRNG_WP.pdf +.. [27] https://media.blackhat.com/bh-us-12/Briefings/Argyros/BH_US_12_Argyros_PRNG_WP.pdf -.. [27] https://mail.python.org/pipermail/python-ideas/2015-September/036157.html +.. [28] https://mail.python.org/pipermail/python-ideas/2015-September/036157.html -.. [28] https://mail.python.org/pipermail/python-ideas/2015-September/036476.html +.. [29] https://mail.python.org/pipermail/python-ideas/2015-September/036476.html https://mail.python.org/pipermail/python-ideas/2015-September/036478.html -.. [29] https://mail.python.org/pipermail/python-ideas/2015-September/036488.html +.. [30] https://mail.python.org/pipermail/python-ideas/2015-September/036488.html Copyright -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Tue Oct 6 14:11:23 2015 From: python-checkins at python.org (steven.daprano) Date: Tue, 06 Oct 2015 12:11:23 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_Add_sample_code=2C_default_ar?= =?utf-8?q?guments=2C_naming_conventions=2E?= Message-ID: <20151006121120.457.51643@psf.io> https://hg.python.org/peps/rev/6de7d0d8d201 changeset: 6106:6de7d0d8d201 parent: 6102:c7712f920a8f user: Steven D'Aprano date: Sat Oct 03 21:42:05 2015 +1000 summary: Add sample code, default arguments, naming conventions. files: pep-0506.txt | 136 +++++++++++++++++++++++++++++--------- 1 files changed, 103 insertions(+), 33 deletions(-) diff --git a/pep-0506.txt b/pep-0506.txt --- a/pep-0506.txt +++ b/pep-0506.txt @@ -50,7 +50,7 @@ and expressed some concern [1]_ about the use of MT for generating sensitive information such as passwords, secure tokens, session keys and similar. -Although the documentation for the random module explicitly states that +Although the documentation for the ``random`` module explicitly states that the default is not suitable for security purposes [2]_, it is strongly believed that this warning may be missed, ignored or misunderstood by many Python developers. In particular: @@ -58,7 +58,7 @@ * developers may not have read the documentation and consequently not seen the warning; -* they may not realise that their specific use of it has security +* they may not realise that their specific use of the module has security implications; or * not realising that there could be a problem, they have copied code @@ -140,20 +140,67 @@ the ``random`` module to support these uses, ``SystemRandom`` will be sufficient. -Some illustrative implementations have been given by Nick Coghlan [10]_. -This idea has also been discussed on the issue tracker for the -"cryptography" module [11]_. +Some illustrative implementations have been given by Nick Coghlan [10]_ +and a minimalist API by Tim Peters [11]_. This idea has also been discussed +on the issue tracker for the "cryptography" module [12]_. The following +pseudo-code can be taken as a possible starting point for the real +implementation:: + + from random import SystemRandom + from hmac import compare_digest as equal + + _sysrand = SystemRandom() + + randrange = _sysrand.randrange + randint = _sysrand.randint + randbits = _sysrand.getrandbits + choice = _sysrand.choice + + def randbelow(exclusive_upper_bound): + return _sysrand._randbelow(exclusive_upper_bound) + + def token_bytes(nbytes=32): + return os.urandom(nbytes) + + def token_hex(nbytes=32): + return binascii.hexlify(token_bytes(nbytes)).decode('ascii') + + def token_url(nbytes=32): + return base64.urlsafe_b64encode(token_bytes(nbytes)).decode('ascii') + The ``secrets`` module itself will be pure Python, and other Python implementations can easily make use of it unchanged, or adapt it as necessary. +Default arguments +~~~~~~~~~~~~~~~~~ + +One difficult question is "How many bytes should my token be?" We can help +with this question by giving the "token_*" functions a sensible default for +the ``nbytes`` argument. This default value should be large enough to be +expected to be secure for medium-security uses [xxx]_. + +It is expected that future versions will need to increase those default +values, possibly even during + +Naming conventions +~~~~~~~~~~~~~~~~~~ + +One question is the naming conventions used in the module [13]_, whether to +use C-like naming conventions such as "randrange" or more Pythonic names +such as "random_range". + +Functions which are simply bound methods of the private ``SystemRandom`` +instance (e.g. ``randrange``), or a thin wrapper around such, should keep +the familiar names. Those which are something new (such as the various +``token_*`` functions) will use more Pythonic names. Alternatives ============ One alternative is to change the default PRNG provided by the ``random`` -module [12]_. This received considerable scepticism and outright opposition: +module [14]_. This received considerable scepticism and outright opposition: * There is fear that a CSPRNG may be slower than the current PRNG (which in the case of MT is already quite slow). @@ -172,13 +219,13 @@ * Demonstrated attacks against MT are typically against PHP applications. It is believed that PHP's version of MT is a significantly softer target - than Python's version, due to a poor seeding technique [13]_. Consequently, + than Python's version, due to a poor seeding technique [15]_. Consequently, without a proven attack against Python applications, many people object to a backwards-incompatible change. Nick Coghlan made an earlier suggestion for a globally configurable PRNG -which uses the system CSPRNG by default [14]_, but has since hinted that he -may withdraw it in favour of this proposal [15]_. +which uses the system CSPRNG by default [16]_, but has since withdrawn it +in favour of this proposal. Comparison To Other Languages @@ -186,7 +233,7 @@ * PHP - PHP includes a function ``uniqid`` [16]_ which by default returns a + PHP includes a function ``uniqid`` [17]_ which by default returns a thirteen character string based on the current time in microseconds. Translated into Python syntax, it has the following signature:: @@ -197,7 +244,7 @@ applications use it for that purpose (citation needed). PHP 5.3 and better also includes a function ``openssl_random_pseudo_bytes`` - [17]_. Translated into Python syntax, it has roughly the following + [18]_. Translated into Python syntax, it has roughly the following signature:: def openssl_random_pseudo_bytes(length:int)->Tuple[str, bool] @@ -209,16 +256,16 @@ * Javascript - Based on a rather cursory search [18]_, there doesn't appear to be any + Based on a rather cursory search [19]_, there do not appear to be any well-known standard functions for producing strong random values in Javascript, although there may be good quality third-party libraries. Standard Javascript doesn't seem to include an interface to the system CSPRNG either, and people have extensively written about the - weaknesses of Javascript's ``Math.random`` [19]_. + weaknesses of Javascript's ``Math.random`` [20]_. * Ruby - The Ruby standard library includes a module ``SecureRandom`` [20]_ + The Ruby standard library includes a module ``SecureRandom`` [21]_ which includes the following methods: * base64 - returns a Base64 encoded random string. @@ -240,12 +287,15 @@ There was a proposal to add a "random.safe" submodule, quoting the Zen of Python "Namespaces are one honking great idea" koan. However, the -author of the Zen, Tim Peters, has come out against this idea [21]_, and +author of the Zen, Tim Peters, has come out against this idea [22]_, and recommends a top-level module. In discussion on the python-ideas mailing list so far, the name "secrets" has received some approval, and no strong opposition. +There is already an existing third-party module with the same name [23]_, +but it appears to be unused and abandoned. + Frequently Asked Questions ========================== @@ -255,9 +305,9 @@ A: The consensus among security professionals is that MT is not safe in security contexts. It is not difficult to reconstruct the internal - state of MT [22]_ [23]_ and so predict all past and future values. There + state of MT [24]_ [25]_ and so predict all past and future values. There are a number of known, practical attacks on systems using MT for - randomness [24]_. + randomness [26]_. While there are currently no known direct attacks on applications written in Python due to the use of MT, there is widespread agreement @@ -268,7 +318,7 @@ A: No. This is a "batteries included" solution, not a full-featured "nuclear reactor". It is intended to mitigate against some basic security errors, not be a solution to all security-related issues. To - quote Nick Coghlan referring to his earlier proposal [25]_:: + quote Nick Coghlan referring to his earlier proposal [27]_:: "...folks really are better off learning to use things like cryptography.io for security sensitive software, so this change @@ -276,6 +326,14 @@ non-trivial proportion of the millions of current and future Python developers won't do that." +* Q: What about a password generator? + + A: The consensus is that the requirements for password generators are too + variable for it to be a good match for the standard library [28]_. No + password generator will be included in the initial release of the + module, instead it will be given in the documentation as a recipe (? la + the recipes in the ``itertools`` module) [29]_. + References ========== @@ -305,38 +363,50 @@ .. [10] https://mail.python.org/pipermail/python-ideas/2015-September/036271.html -.. [11] https://github.com/pyca/cryptography/issues/2347 +.. [11] https://mail.python.org/pipermail/python-ideas/2015-September/036350.html -.. [12] Link needed. +.. [12] https://github.com/pyca/cryptography/issues/2347 -.. [13] By default PHP seeds the MT PRNG with the time (citation needed), +.. [xx] See discussion thread starting with + https://mail.python.org/pipermail/python-ideas/2015-September/036509.html + +.. [13] https://mail.python.org/pipermail/python-ideas/2015-September/036474.html + +.. [14] Link needed. + +.. [15] By default PHP seeds the MT PRNG with the time (citation needed), which is exploitable by attackers, while Python seeds the PRNG with output from the system CSPRNG, which is believed to be much harder to exploit. -.. [14] http://legacy.python.org/dev/peps/pep-0504/ +.. [16] http://legacy.python.org/dev/peps/pep-0504/ -.. [15] https://mail.python.org/pipermail/python-ideas/2015-September/036243.html +.. [17] http://php.net/manual/en/function.uniqid.php -.. [16] http://php.net/manual/en/function.uniqid.php +.. [18] http://php.net/manual/en/function.openssl-random-pseudo-bytes.php -.. [17] http://php.net/manual/en/function.openssl-random-pseudo-bytes.php +.. [19] Volunteers and patches are welcome. -.. [18] Volunteers and patches are welcome. +.. [20] http://ifsec.blogspot.fr/2012/05/cross-domain-mathrandom-prediction.html -.. [19] http://ifsec.blogspot.fr/2012/05/cross-domain-mathrandom-prediction.html +.. [21] http://ruby-doc.org/stdlib-2.1.2/libdoc/securerandom/rdoc/SecureRandom.html -.. [20] http://ruby-doc.org/stdlib-2.1.2/libdoc/securerandom/rdoc/SecureRandom.html +.. [22] https://mail.python.org/pipermail/python-ideas/2015-September/036254.html -.. [21] https://mail.python.org/pipermail/python-ideas/2015-September/036254.html +.. [23] https://pypi.python.org/pypi/secrets -.. [22] https://jazzy.id.au/2010/09/22/cracking_random_number_generators_part_3.html +.. [24] https://jazzy.id.au/2010/09/22/cracking_random_number_generators_part_3.html -.. [23] https://mail.python.org/pipermail/python-ideas/2015-September/036077.html +.. [25] https://mail.python.org/pipermail/python-ideas/2015-September/036077.html -.. [24] https://media.blackhat.com/bh-us-12/Briefings/Argyros/BH_US_12_Argyros_PRNG_WP.pdf +.. [26] https://media.blackhat.com/bh-us-12/Briefings/Argyros/BH_US_12_Argyros_PRNG_WP.pdf -.. [25] https://mail.python.org/pipermail/python-ideas/2015-September/036157.html +.. [27] https://mail.python.org/pipermail/python-ideas/2015-September/036157.html + +.. [28] https://mail.python.org/pipermail/python-ideas/2015-September/036476.html + https://mail.python.org/pipermail/python-ideas/2015-September/036478.html + +.. [29] https://mail.python.org/pipermail/python-ideas/2015-September/036488.html Copyright -- Repository URL: https://hg.python.org/peps From lp_benchmark_robot at intel.com Tue Oct 6 17:14:55 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Tue, 6 Oct 2015 16:14:55 +0100 Subject: [Python-checkins] Benchmark Results for Python 2.7 2015-10-06 Message-ID: <1a9c8028-b466-4540-830c-53f3ddb078b0@irsmsx152.ger.corp.intel.com> No new revisions. Here are the previous results: Results for project python_2.7-nightly, build date 2015-10-06 03:44:26 commit: 45a04eadefd6ed13c110059375d2932f6b0d7490 revision date: 2015-10-04 10:52:40 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v2.7.10, with hash 15c95b7d81dcf821daade360741e00714667653f from 2015-05-23 16:02:14+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v2.7.10 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.11335% 0.94969% 5.43955% 8.53025% :-) pybench 0.16785% 0.69147% 6.77273% 6.45979% :-| regex_v8 1.07392% 0.01169% -1.82451% 7.81632% :-) nbody 0.15352% 0.15454% 8.67803% 3.87488% :-) json_dump_v2 0.21320% -0.26509% 3.37121% 12.71810% :-( normal_startup 1.91283% -0.47298% -2.14007% 3.41653% :-| ssbench 0.10483% 0.27749% 1.31698% 1.08129% ------------------------------------------------------------------------------------------ Note: Benchmark results for ssbench are measured in requests/second while all other are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From lp_benchmark_robot at intel.com Tue Oct 6 17:14:29 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Tue, 6 Oct 2015 16:14:29 +0100 Subject: [Python-checkins] Benchmark Results for Python Default 2015-10-06 Message-ID: <7e5ab4d6-44a2-4bb4-aa87-747d5dc3813a@irsmsx152.ger.corp.intel.com> Results for project python_default-nightly, build date 2015-10-06 03:02:01 commit: d00c0544880a15dd134cdd268accc1a7e32dfc71 revision date: 2015-10-06 02:52:37 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v3.4.3, with hash b4cbecbc0781e89a309d03b60a1f75f8499250e6 from 2015-02-25 12:15:33+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v3.4.3 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.27960% 1.26479% 8.95277% 14.81902% :-( pybench 0.10000% -0.08965% -2.01005% 7.82206% :-( regex_v8 2.64818% -0.03498% -5.23658% 8.22164% :-| nbody 0.38417% -0.90068% -1.17148% 9.92874% :-| json_dump_v2 0.29148% 0.88046% -0.20192% 11.46522% :-| normal_startup 0.97431% -0.18504% 0.36361% 5.51143% ------------------------------------------------------------------------------------------ Note: Benchmark results are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From python-checkins at python.org Tue Oct 6 17:25:53 2015 From: python-checkins at python.org (guido.van.rossum) Date: Tue, 06 Oct 2015 15:25:53 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIzOTcy?= =?utf-8?q?=3A_Fix_tests_for_Windows_and_Debian=2E?= Message-ID: <20151006152550.20771.48320@psf.io> https://hg.python.org/cpython/rev/aebbf205ef6f changeset: 98552:aebbf205ef6f branch: 3.4 parent: 98549:6108d30dde21 user: Guido van Rossum date: Tue Oct 06 08:24:10 2015 -0700 summary: Issue #23972: Fix tests for Windows and Debian. files: Lib/test/test_asyncio/test_base_events.py | 5 +---- 1 files changed, 1 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_asyncio/test_base_events.py b/Lib/test/test_asyncio/test_base_events.py --- a/Lib/test/test_asyncio/test_base_events.py +++ b/Lib/test/test_asyncio/test_base_events.py @@ -1215,6 +1215,7 @@ def test_create_datagram_endpoint_sock(self): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + sock.bind(('127.0.0.1', 0)) fut = self.loop.create_datagram_endpoint( lambda: MyDatagramProto(create_future=True, loop=self.loop), sock=sock) @@ -1307,10 +1308,6 @@ self.assertTrue( sock.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEPORT)) - else: - self.assertFalse( - sock.getsockopt( - socket.SOL_SOCKET, socket.SO_REUSEPORT)) self.assertTrue( sock.getsockopt( socket.SOL_SOCKET, socket.SO_BROADCAST)) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 17:25:53 2015 From: python-checkins at python.org (guido.van.rossum) Date: Tue, 06 Oct 2015 15:25:53 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2323972=3A_Fix_tests_for_Windows_and_Debian=2E_?= =?utf-8?b?KE1lcmdlIDMuNS0+My42KQ==?= Message-ID: <20151006152550.3289.90769@psf.io> https://hg.python.org/cpython/rev/3e2218a4e629 changeset: 98554:3e2218a4e629 parent: 98551:41f29bbf520d parent: 98553:4d643c5df2a5 user: Guido van Rossum date: Tue Oct 06 08:25:21 2015 -0700 summary: Issue #23972: Fix tests for Windows and Debian. (Merge 3.5->3.6) files: Lib/test/test_asyncio/test_base_events.py | 5 +---- 1 files changed, 1 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_asyncio/test_base_events.py b/Lib/test/test_asyncio/test_base_events.py --- a/Lib/test/test_asyncio/test_base_events.py +++ b/Lib/test/test_asyncio/test_base_events.py @@ -1215,6 +1215,7 @@ def test_create_datagram_endpoint_sock(self): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + sock.bind(('127.0.0.1', 0)) fut = self.loop.create_datagram_endpoint( lambda: MyDatagramProto(create_future=True, loop=self.loop), sock=sock) @@ -1307,10 +1308,6 @@ self.assertTrue( sock.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEPORT)) - else: - self.assertFalse( - sock.getsockopt( - socket.SOL_SOCKET, socket.SO_REUSEPORT)) self.assertTrue( sock.getsockopt( socket.SOL_SOCKET, socket.SO_BROADCAST)) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 17:25:52 2015 From: python-checkins at python.org (guido.van.rossum) Date: Tue, 06 Oct 2015 15:25:52 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2323972=3A_Fix_tests_for_Windows_and_Debian=2E_=28Merge?= =?utf-8?b?IDMuNC0+My41KQ==?= Message-ID: <20151006152550.18370.19374@psf.io> https://hg.python.org/cpython/rev/4d643c5df2a5 changeset: 98553:4d643c5df2a5 branch: 3.5 parent: 98550:3719e842a7b1 parent: 98552:aebbf205ef6f user: Guido van Rossum date: Tue Oct 06 08:24:44 2015 -0700 summary: Issue #23972: Fix tests for Windows and Debian. (Merge 3.4->3.5) files: Lib/test/test_asyncio/test_base_events.py | 5 +---- 1 files changed, 1 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_asyncio/test_base_events.py b/Lib/test/test_asyncio/test_base_events.py --- a/Lib/test/test_asyncio/test_base_events.py +++ b/Lib/test/test_asyncio/test_base_events.py @@ -1215,6 +1215,7 @@ def test_create_datagram_endpoint_sock(self): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + sock.bind(('127.0.0.1', 0)) fut = self.loop.create_datagram_endpoint( lambda: MyDatagramProto(create_future=True, loop=self.loop), sock=sock) @@ -1307,10 +1308,6 @@ self.assertTrue( sock.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEPORT)) - else: - self.assertFalse( - sock.getsockopt( - socket.SOL_SOCKET, socket.SO_REUSEPORT)) self.assertTrue( sock.getsockopt( socket.SOL_SOCKET, socket.SO_BROADCAST)) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 17:53:47 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 06 Oct 2015 15:53:47 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI1MzE3?= =?utf-8?q?=3A_Converted_doctests_in_test=5Ftokenize_to_unittests=2E?= Message-ID: <20151006155346.70990.58390@psf.io> https://hg.python.org/cpython/rev/7b2af8ee6dfa changeset: 98555:7b2af8ee6dfa branch: 2.7 parent: 98548:69a26f0800b3 user: Serhiy Storchaka date: Tue Oct 06 18:13:38 2015 +0300 summary: Issue #25317: Converted doctests in test_tokenize to unittests. files: Lib/test/test_tokenize.py | 544 ++++++++++++++----------- 1 files changed, 294 insertions(+), 250 deletions(-) diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -1,20 +1,42 @@ -doctests = """ -Tests for the tokenize module. +from test import test_support +from tokenize import (untokenize, generate_tokens, NUMBER, NAME, OP, + STRING, ENDMARKER, tok_name, Untokenizer, tokenize) +from StringIO import StringIO +import os +from unittest import TestCase - >>> import glob, random, sys -The tests can be really simple. Given a small fragment of source -code, print out a table with tokens. The ENDMARKER is omitted for -brevity. +class TokenizeTest(TestCase): + # Tests for the tokenize module. - >>> dump_tokens("1 + 1") + # The tests can be really simple. Given a small fragment of source + # code, print out a table with tokens. The ENDMARKER is omitted for + # brevity. + + def check_tokenize(self, s, expected): + # Format the tokens in s in a table format. + # The ENDMARKER is omitted. + result = [] + f = StringIO(s) + for type, token, start, end, line in generate_tokens(f.readline): + if type == ENDMARKER: + break + type = tok_name[type] + result.append(" %(type)-10.10s %(token)-13.13r %(start)s %(end)s" % + locals()) + self.assertEqual(result, + expected.rstrip().splitlines()) + + + def test_basic(self): + self.check_tokenize("1 + 1", """\ NUMBER '1' (1, 0) (1, 1) OP '+' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) - - >>> dump_tokens("if False:\\n" - ... " # NL\\n" - ... " True = False # NEWLINE\\n") + """) + self.check_tokenize("if False:\n" + " # NL\n" + " True = False # NEWLINE\n", """\ NAME 'if' (1, 0) (1, 2) NAME 'False' (1, 3) (1, 8) OP ':' (1, 8) (1, 9) @@ -28,122 +50,48 @@ COMMENT '# NEWLINE' (3, 17) (3, 26) NEWLINE '\\n' (3, 26) (3, 27) DEDENT '' (4, 0) (4, 0) + """) - >>> indent_error_file = \""" - ... def k(x): - ... x += 2 - ... x += 5 - ... \""" + indent_error_file = """\ +def k(x): + x += 2 + x += 5 +""" + with self.assertRaisesRegexp(IndentationError, + "unindent does not match any " + "outer indentation level"): + for tok in generate_tokens(StringIO(indent_error_file).readline): + pass - >>> for tok in generate_tokens(StringIO(indent_error_file).readline): pass - Traceback (most recent call last): - ... - IndentationError: unindent does not match any outer indentation level - -Test roundtrip for `untokenize`. `f` is an open file or a string. The source -code in f is tokenized, converted back to source code via tokenize.untokenize(), -and tokenized again from the latter. The test fails if the second tokenization -doesn't match the first. - - >>> def roundtrip(f): - ... if isinstance(f, str): f = StringIO(f) - ... token_list = list(generate_tokens(f.readline)) - ... f.close() - ... tokens1 = [tok[:2] for tok in token_list] - ... new_text = untokenize(tokens1) - ... readline = iter(new_text.splitlines(1)).next - ... tokens2 = [tok[:2] for tok in generate_tokens(readline)] - ... return tokens1 == tokens2 - ... - -There are some standard formatting practices that are easy to get right. - - >>> roundtrip("if x == 1:\\n" - ... " print x\\n") - True - - >>> roundtrip("# This is a comment\\n# This also") - True - -Some people use different formatting conventions, which makes -untokenize a little trickier. Note that this test involves trailing -whitespace after the colon. Note that we use hex escapes to make the -two trailing blanks apperant in the expected output. - - >>> roundtrip("if x == 1 : \\n" - ... " print x\\n") - True - - >>> f = test_support.findfile("tokenize_tests" + os.extsep + "txt") - >>> roundtrip(open(f)) - True - - >>> roundtrip("if x == 1:\\n" - ... " # A comment by itself.\\n" - ... " print x # Comment here, too.\\n" - ... " # Another comment.\\n" - ... "after_if = True\\n") - True - - >>> roundtrip("if (x # The comments need to go in the right place\\n" - ... " == 1):\\n" - ... " print 'x==1'\\n") - True - - >>> roundtrip("class Test: # A comment here\\n" - ... " # A comment with weird indent\\n" - ... " after_com = 5\\n" - ... " def x(m): return m*5 # a one liner\\n" - ... " def y(m): # A whitespace after the colon\\n" - ... " return y*4 # 3-space indent\\n") - True - -Some error-handling code - - >>> roundtrip("try: import somemodule\\n" - ... "except ImportError: # comment\\n" - ... " print 'Can not import' # comment2\\n" - ... "else: print 'Loaded'\\n") - True - -Balancing continuation - - >>> roundtrip("a = (3,4, \\n" - ... "5,6)\\n" - ... "y = [3, 4,\\n" - ... "5]\\n" - ... "z = {'a': 5,\\n" - ... "'b':15, 'c':True}\\n" - ... "x = len(y) + 5 - a[\\n" - ... "3] - a[2]\\n" - ... "+ len(z) - z[\\n" - ... "'b']\\n") - True - -Ordinary integers and binary operators - - >>> dump_tokens("0xff <= 255") + def test_int(self): + # Ordinary integers and binary operators + self.check_tokenize("0xff <= 255", """\ NUMBER '0xff' (1, 0) (1, 4) OP '<=' (1, 5) (1, 7) NUMBER '255' (1, 8) (1, 11) - >>> dump_tokens("0b10 <= 255") + """) + self.check_tokenize("0b10 <= 255", """\ NUMBER '0b10' (1, 0) (1, 4) OP '<=' (1, 5) (1, 7) NUMBER '255' (1, 8) (1, 11) - >>> dump_tokens("0o123 <= 0123") + """) + self.check_tokenize("0o123 <= 0123", """\ NUMBER '0o123' (1, 0) (1, 5) OP '<=' (1, 6) (1, 8) NUMBER '0123' (1, 9) (1, 13) - >>> dump_tokens("01234567 > ~0x15") + """) + self.check_tokenize("01234567 > ~0x15", """\ NUMBER '01234567' (1, 0) (1, 8) OP '>' (1, 9) (1, 10) OP '~' (1, 11) (1, 12) NUMBER '0x15' (1, 12) (1, 16) - >>> dump_tokens("2134568 != 01231515") + """) + self.check_tokenize("2134568 != 01231515", """\ NUMBER '2134568' (1, 0) (1, 7) OP '!=' (1, 8) (1, 10) NUMBER '01231515' (1, 11) (1, 19) - >>> dump_tokens("(-124561-1) & 0200000000") + """) + self.check_tokenize("(-124561-1) & 0200000000", """\ OP '(' (1, 0) (1, 1) OP '-' (1, 1) (1, 2) NUMBER '124561' (1, 2) (1, 8) @@ -152,78 +100,93 @@ OP ')' (1, 10) (1, 11) OP '&' (1, 12) (1, 13) NUMBER '0200000000' (1, 14) (1, 24) - >>> dump_tokens("0xdeadbeef != -1") + """) + self.check_tokenize("0xdeadbeef != -1", """\ NUMBER '0xdeadbeef' (1, 0) (1, 10) OP '!=' (1, 11) (1, 13) OP '-' (1, 14) (1, 15) NUMBER '1' (1, 15) (1, 16) - >>> dump_tokens("0xdeadc0de & 012345") + """) + self.check_tokenize("0xdeadc0de & 012345", """\ NUMBER '0xdeadc0de' (1, 0) (1, 10) OP '&' (1, 11) (1, 12) NUMBER '012345' (1, 13) (1, 19) - >>> dump_tokens("0xFF & 0x15 | 1234") + """) + self.check_tokenize("0xFF & 0x15 | 1234", """\ NUMBER '0xFF' (1, 0) (1, 4) OP '&' (1, 5) (1, 6) NUMBER '0x15' (1, 7) (1, 11) OP '|' (1, 12) (1, 13) NUMBER '1234' (1, 14) (1, 18) + """) -Long integers - - >>> dump_tokens("x = 0L") + def test_long(self): + # Long integers + self.check_tokenize("x = 0L", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '0L' (1, 4) (1, 6) - >>> dump_tokens("x = 0xfffffffffff") + """) + self.check_tokenize("x = 0xfffffffffff", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '0xffffffffff (1, 4) (1, 17) - >>> dump_tokens("x = 123141242151251616110l") + """) + self.check_tokenize("x = 123141242151251616110l", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '123141242151 (1, 4) (1, 26) - >>> dump_tokens("x = -15921590215012591L") + """) + self.check_tokenize("x = -15921590215012591L", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) OP '-' (1, 4) (1, 5) NUMBER '159215902150 (1, 5) (1, 23) + """) -Floating point numbers - - >>> dump_tokens("x = 3.14159") + def test_float(self): + # Floating point numbers + self.check_tokenize("x = 3.14159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3.14159' (1, 4) (1, 11) - >>> dump_tokens("x = 314159.") + """) + self.check_tokenize("x = 314159.", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '314159.' (1, 4) (1, 11) - >>> dump_tokens("x = .314159") + """) + self.check_tokenize("x = .314159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '.314159' (1, 4) (1, 11) - >>> dump_tokens("x = 3e14159") + """) + self.check_tokenize("x = 3e14159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3e14159' (1, 4) (1, 11) - >>> dump_tokens("x = 3E123") + """) + self.check_tokenize("x = 3E123", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3E123' (1, 4) (1, 9) - >>> dump_tokens("x+y = 3e-1230") + """) + self.check_tokenize("x+y = 3e-1230", """\ NAME 'x' (1, 0) (1, 1) OP '+' (1, 1) (1, 2) NAME 'y' (1, 2) (1, 3) OP '=' (1, 4) (1, 5) NUMBER '3e-1230' (1, 6) (1, 13) - >>> dump_tokens("x = 3.14e159") + """) + self.check_tokenize("x = 3.14e159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3.14e159' (1, 4) (1, 12) + """) -String literals - - >>> dump_tokens("x = ''; y = \\\"\\\"") + def test_string(self): + # String literals + self.check_tokenize("x = ''; y = \"\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "''" (1, 4) (1, 6) @@ -231,7 +194,8 @@ NAME 'y' (1, 8) (1, 9) OP '=' (1, 10) (1, 11) STRING '""' (1, 12) (1, 14) - >>> dump_tokens("x = '\\\"'; y = \\\"'\\\"") + """) + self.check_tokenize("x = '\"'; y = \"'\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING '\\'"\\'' (1, 4) (1, 7) @@ -239,25 +203,29 @@ NAME 'y' (1, 9) (1, 10) OP '=' (1, 11) (1, 12) STRING '"\\'"' (1, 13) (1, 16) - >>> dump_tokens("x = \\\"doesn't \\\"shrink\\\", does it\\\"") + """) + self.check_tokenize("x = \"doesn't \"shrink\", does it\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING '"doesn\\'t "' (1, 4) (1, 14) NAME 'shrink' (1, 14) (1, 20) STRING '", does it"' (1, 20) (1, 31) - >>> dump_tokens("x = u'abc' + U'ABC'") + """) + self.check_tokenize("x = u'abc' + U'ABC'", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "u'abc'" (1, 4) (1, 10) OP '+' (1, 11) (1, 12) STRING "U'ABC'" (1, 13) (1, 19) - >>> dump_tokens('y = u"ABC" + U"ABC"') + """) + self.check_tokenize('y = u"ABC" + U"ABC"', """\ NAME 'y' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING 'u"ABC"' (1, 4) (1, 10) OP '+' (1, 11) (1, 12) STRING 'U"ABC"' (1, 13) (1, 19) - >>> dump_tokens("x = ur'abc' + Ur'ABC' + uR'ABC' + UR'ABC'") + """) + self.check_tokenize("x = ur'abc' + Ur'ABC' + uR'ABC' + UR'ABC'", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "ur'abc'" (1, 4) (1, 11) @@ -267,7 +235,8 @@ STRING "uR'ABC'" (1, 24) (1, 31) OP '+' (1, 32) (1, 33) STRING "UR'ABC'" (1, 34) (1, 41) - >>> dump_tokens('y = ur"abc" + Ur"ABC" + uR"ABC" + UR"ABC"') + """) + self.check_tokenize('y = ur"abc" + Ur"ABC" + uR"ABC" + UR"ABC"', """\ NAME 'y' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING 'ur"abc"' (1, 4) (1, 11) @@ -278,15 +247,18 @@ OP '+' (1, 32) (1, 33) STRING 'UR"ABC"' (1, 34) (1, 41) - >>> dump_tokens("b'abc' + B'abc'") + """) + self.check_tokenize("b'abc' + B'abc'", """\ STRING "b'abc'" (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING "B'abc'" (1, 9) (1, 15) - >>> dump_tokens('b"abc" + B"abc"') + """) + self.check_tokenize('b"abc" + B"abc"', """\ STRING 'b"abc"' (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING 'B"abc"' (1, 9) (1, 15) - >>> dump_tokens("br'abc' + bR'abc' + Br'abc' + BR'abc'") + """) + self.check_tokenize("br'abc' + bR'abc' + Br'abc' + BR'abc'", """\ STRING "br'abc'" (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING "bR'abc'" (1, 10) (1, 17) @@ -294,7 +266,8 @@ STRING "Br'abc'" (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING "BR'abc'" (1, 30) (1, 37) - >>> dump_tokens('br"abc" + bR"abc" + Br"abc" + BR"abc"') + """) + self.check_tokenize('br"abc" + bR"abc" + Br"abc" + BR"abc"', """\ STRING 'br"abc"' (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING 'bR"abc"' (1, 10) (1, 17) @@ -302,10 +275,10 @@ STRING 'Br"abc"' (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING 'BR"abc"' (1, 30) (1, 37) + """) -Operators - - >>> dump_tokens("def d22(a, b, c=2, d=2, *k): pass") + def test_function(self): + self.check_tokenize("def d22(a, b, c=2, d=2, *k): pass", """\ NAME 'def' (1, 0) (1, 3) NAME 'd22' (1, 4) (1, 7) OP '(' (1, 7) (1, 8) @@ -326,7 +299,8 @@ OP ')' (1, 26) (1, 27) OP ':' (1, 27) (1, 28) NAME 'pass' (1, 29) (1, 33) - >>> dump_tokens("def d01v_(a=1, *k, **w): pass") + """) + self.check_tokenize("def d01v_(a=1, *k, **w): pass", """\ NAME 'def' (1, 0) (1, 3) NAME 'd01v_' (1, 4) (1, 9) OP '(' (1, 9) (1, 10) @@ -342,11 +316,12 @@ OP ')' (1, 22) (1, 23) OP ':' (1, 23) (1, 24) NAME 'pass' (1, 25) (1, 29) + """) -Comparison - - >>> dump_tokens("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " + - ... "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass") + def test_comparison(self): + # Comparison + self.check_tokenize("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " + + "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass", """\ NAME 'if' (1, 0) (1, 2) NUMBER '1' (1, 3) (1, 4) OP '<' (1, 5) (1, 6) @@ -379,10 +354,11 @@ NUMBER '1' (1, 81) (1, 82) OP ':' (1, 82) (1, 83) NAME 'pass' (1, 84) (1, 88) + """) -Shift - - >>> dump_tokens("x = 1 << 1 >> 5") + def test_shift(self): + # Shift + self.check_tokenize("x = 1 << 1 >> 5", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -390,10 +366,11 @@ NUMBER '1' (1, 9) (1, 10) OP '>>' (1, 11) (1, 13) NUMBER '5' (1, 14) (1, 15) + """) -Additive - - >>> dump_tokens("x = 1 - y + 15 - 01 + 0x124 + z + a[5]") + def test_additive(self): + # Additive + self.check_tokenize("x = 1 - y + 15 - 01 + 0x124 + z + a[5]", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -412,10 +389,11 @@ OP '[' (1, 35) (1, 36) NUMBER '5' (1, 36) (1, 37) OP ']' (1, 37) (1, 38) + """) -Multiplicative - - >>> dump_tokens("x = 1//1*1/5*12%0x12") + def test_multiplicative(self): + # Multiplicative + self.check_tokenize("x = 1//1*1/5*12%0x12", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -429,10 +407,11 @@ NUMBER '12' (1, 13) (1, 15) OP '%' (1, 15) (1, 16) NUMBER '0x12' (1, 16) (1, 20) + """) -Unary - - >>> dump_tokens("~1 ^ 1 & 1 |1 ^ -1") + def test_unary(self): + # Unary + self.check_tokenize("~1 ^ 1 & 1 |1 ^ -1", """\ OP '~' (1, 0) (1, 1) NUMBER '1' (1, 1) (1, 2) OP '^' (1, 3) (1, 4) @@ -444,7 +423,8 @@ OP '^' (1, 14) (1, 15) OP '-' (1, 16) (1, 17) NUMBER '1' (1, 17) (1, 18) - >>> dump_tokens("-1*1/1+1*1//1 - ---1**1") + """) + self.check_tokenize("-1*1/1+1*1//1 - ---1**1", """\ OP '-' (1, 0) (1, 1) NUMBER '1' (1, 1) (1, 2) OP '*' (1, 2) (1, 3) @@ -464,10 +444,12 @@ NUMBER '1' (1, 19) (1, 20) OP '**' (1, 20) (1, 22) NUMBER '1' (1, 22) (1, 23) + """) -Selector - - >>> dump_tokens("import sys, time\\nx = sys.modules['time'].time()") + def test_selector(self): + # Selector + self.check_tokenize("import sys, time\n" + "x = sys.modules['time'].time()", """\ NAME 'import' (1, 0) (1, 6) NAME 'sys' (1, 7) (1, 10) OP ',' (1, 10) (1, 11) @@ -485,10 +467,12 @@ NAME 'time' (2, 24) (2, 28) OP '(' (2, 28) (2, 29) OP ')' (2, 29) (2, 30) + """) -Methods - - >>> dump_tokens("@staticmethod\\ndef foo(x,y): pass") + def test_method(self): + # Methods + self.check_tokenize("@staticmethod\n" + "def foo(x,y): pass", """\ OP '@' (1, 0) (1, 1) NAME 'staticmethod (1, 1) (1, 13) NEWLINE '\\n' (1, 13) (1, 14) @@ -501,41 +485,13 @@ OP ')' (2, 11) (2, 12) OP ':' (2, 12) (2, 13) NAME 'pass' (2, 14) (2, 18) + """) -Backslash means line continuation, except for comments - - >>> roundtrip("x=1+\\\\n" - ... "1\\n" - ... "# This is a comment\\\\n" - ... "# This also\\n") - True - >>> roundtrip("# Comment \\\\nx = 0") - True - -Two string literals on the same line - - >>> roundtrip("'' ''") - True - -Test roundtrip on random python modules. -pass the '-ucpu' option to process the full directory. - - >>> - >>> tempdir = os.path.dirname(f) or os.curdir - >>> testfiles = glob.glob(os.path.join(tempdir, "test*.py")) - - >>> if not test_support.is_resource_enabled("cpu"): - ... testfiles = random.sample(testfiles, 10) - ... - >>> for testfile in testfiles: - ... if not roundtrip(open(testfile)): - ... print "Roundtrip failed for file %s" % testfile - ... break - ... else: True - True - -Evil tabs - >>> dump_tokens("def f():\\n\\tif x\\n \\tpass") + def test_tabs(self): + # Evil tabs + self.check_tokenize("def f():\n" + "\tif x\n" + " \tpass", """\ NAME 'def' (1, 0) (1, 3) NAME 'f' (1, 4) (1, 5) OP '(' (1, 5) (1, 6) @@ -550,56 +506,16 @@ NAME 'pass' (3, 9) (3, 13) DEDENT '' (4, 0) (4, 0) DEDENT '' (4, 0) (4, 0) + """) -Pathological whitespace (http://bugs.python.org/issue16152) - >>> dump_tokens("@ ") + def test_pathological_trailing_whitespace(self): + # Pathological whitespace (http://bugs.python.org/issue16152) + self.check_tokenize("@ ", """\ OP '@' (1, 0) (1, 1) -""" + """) -from test import test_support -from tokenize import (untokenize, generate_tokens, NUMBER, NAME, OP, - STRING, ENDMARKER, tok_name, Untokenizer, tokenize) -from StringIO import StringIO -import os -from unittest import TestCase - -def dump_tokens(s): - """Print out the tokens in s in a table format. - - The ENDMARKER is omitted. - """ - f = StringIO(s) - for type, token, start, end, line in generate_tokens(f.readline): - if type == ENDMARKER: - break - type = tok_name[type] - print("%(type)-10.10s %(token)-13.13r %(start)s %(end)s" % locals()) - -# This is an example from the docs, set up as a doctest. def decistmt(s): - """Substitute Decimals for floats in a string of statements. - - >>> from decimal import Decimal - >>> s = 'print +21.3e-5*-.1234/81.7' - >>> decistmt(s) - "print +Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')" - - The format of the exponent is inherited from the platform C library. - Known cases are "e-007" (Windows) and "e-07" (not Windows). Since - we're only showing 12 digits, and the 13th isn't close to 5, the - rest of the output should be platform-independent. - - >>> exec(s) #doctest: +ELLIPSIS - -3.21716034272e-0...7 - - Output from calculations with Decimal should be identical across all - platforms. - - >>> exec(decistmt(s)) - -3.217160342717258261933904529E-7 - """ - result = [] g = generate_tokens(StringIO(s).readline) # tokenize the string for toknum, tokval, _, _, _ in g: @@ -614,6 +530,27 @@ result.append((toknum, tokval)) return untokenize(result) +class TestMisc(TestCase): + + def test_decistmt(self): + # Substitute Decimals for floats in a string of statements. + # This is an example from the docs. + + from decimal import Decimal + s = '+21.3e-5*-.1234/81.7' + self.assertEqual(decistmt(s), + "+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')") + + # The format of the exponent is inherited from the platform C library. + # Known cases are "e-007" (Windows) and "e-07" (not Windows). Since + # we're only showing 12 digits, and the 13th isn't close to 5, the + # rest of the output should be platform-independent. + self.assertRegexpMatches(str(eval(s)), '-3.21716034272e-0+7') + + # Output from calculations with Decimal should be identical across all + # platforms. + self.assertEqual(eval(decistmt(s)), Decimal('-3.217160342717258261933904529E-7')) + class UntokenizeTest(TestCase): @@ -651,6 +588,115 @@ class TestRoundtrip(TestCase): + + def check_roundtrip(self, f): + """ + Test roundtrip for `untokenize`. `f` is an open file or a string. + The source code in f is tokenized, converted back to source code + via tokenize.untokenize(), and tokenized again from the latter. + The test fails if the second tokenization doesn't match the first. + """ + if isinstance(f, str): f = StringIO(f) + token_list = list(generate_tokens(f.readline)) + f.close() + tokens1 = [tok[:2] for tok in token_list] + new_text = untokenize(tokens1) + readline = iter(new_text.splitlines(1)).next + tokens2 = [tok[:2] for tok in generate_tokens(readline)] + self.assertEqual(tokens2, tokens1) + + def test_roundtrip(self): + # There are some standard formatting practices that are easy to get right. + + self.check_roundtrip("if x == 1:\n" + " print(x)\n") + + # There are some standard formatting practices that are easy to get right. + + self.check_roundtrip("if x == 1:\n" + " print x\n") + self.check_roundtrip("# This is a comment\n" + "# This also") + + # Some people use different formatting conventions, which makes + # untokenize a little trickier. Note that this test involves trailing + # whitespace after the colon. Note that we use hex escapes to make the + # two trailing blanks apperant in the expected output. + + self.check_roundtrip("if x == 1 : \n" + " print x\n") + fn = test_support.findfile("tokenize_tests" + os.extsep + "txt") + with open(fn) as f: + self.check_roundtrip(f) + self.check_roundtrip("if x == 1:\n" + " # A comment by itself.\n" + " print x # Comment here, too.\n" + " # Another comment.\n" + "after_if = True\n") + self.check_roundtrip("if (x # The comments need to go in the right place\n" + " == 1):\n" + " print 'x==1'\n") + self.check_roundtrip("class Test: # A comment here\n" + " # A comment with weird indent\n" + " after_com = 5\n" + " def x(m): return m*5 # a one liner\n" + " def y(m): # A whitespace after the colon\n" + " return y*4 # 3-space indent\n") + + # Some error-handling code + + self.check_roundtrip("try: import somemodule\n" + "except ImportError: # comment\n" + " print 'Can not import' # comment2\n" + "else: print 'Loaded'\n") + + def test_continuation(self): + # Balancing continuation + self.check_roundtrip("a = (3,4, \n" + "5,6)\n" + "y = [3, 4,\n" + "5]\n" + "z = {'a': 5,\n" + "'b':15, 'c':True}\n" + "x = len(y) + 5 - a[\n" + "3] - a[2]\n" + "+ len(z) - z[\n" + "'b']\n") + + def test_backslash_continuation(self): + # Backslash means line continuation, except for comments + self.check_roundtrip("x=1+\\\n" + "1\n" + "# This is a comment\\\n" + "# This also\n") + self.check_roundtrip("# Comment \\\n" + "x = 0") + + def test_string_concatenation(self): + # Two string literals on the same line + self.check_roundtrip("'' ''") + + def test_random_files(self): + # Test roundtrip on random python modules. + # pass the '-ucpu' option to process the full directory. + + import glob, random + fn = test_support.findfile("tokenize_tests" + os.extsep + "txt") + tempdir = os.path.dirname(fn) or os.curdir + testfiles = glob.glob(os.path.join(tempdir, "test*.py")) + + if not test_support.is_resource_enabled("cpu"): + testfiles = random.sample(testfiles, 10) + + for testfile in testfiles: + try: + with open(testfile, 'rb') as f: + self.check_roundtrip(f) + except: + print "Roundtrip failed for file %s" % testfile + raise + + def roundtrip(self, code): if isinstance(code, str): code = code.encode('utf-8') @@ -667,13 +713,11 @@ self.assertEqual(codelines[1], codelines[2]) -__test__ = {"doctests" : doctests, 'decistmt': decistmt} - def test_main(): - from test import test_tokenize - test_support.run_doctest(test_tokenize, True) + test_support.run_unittest(TokenizeTest) test_support.run_unittest(UntokenizeTest) test_support.run_unittest(TestRoundtrip) + test_support.run_unittest(TestMisc) if __name__ == "__main__": test_main() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 17:54:19 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 06 Oct 2015 15:54:19 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Null_merge?= Message-ID: <20151006155418.3285.23768@psf.io> https://hg.python.org/cpython/rev/b0ce3ef2ea21 changeset: 98562:b0ce3ef2ea21 branch: 3.5 parent: 98560:df8ccac22006 parent: 98559:91f36d2b097a user: Serhiy Storchaka date: Tue Oct 06 18:47:26 2015 +0300 summary: Null merge files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 17:54:19 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 06 Oct 2015 15:54:19 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_default_-=3E_default?= =?utf-8?q?=29=3A_Merge_heads?= Message-ID: <20151006155418.20763.71889@psf.io> https://hg.python.org/cpython/rev/727d72b05ff5 changeset: 98561:727d72b05ff5 parent: 98558:66d239660997 parent: 98554:3e2218a4e629 user: Serhiy Storchaka date: Tue Oct 06 18:40:09 2015 +0300 summary: Merge heads files: Lib/test/test_asyncio/test_base_events.py | 5 +---- 1 files changed, 1 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_asyncio/test_base_events.py b/Lib/test/test_asyncio/test_base_events.py --- a/Lib/test/test_asyncio/test_base_events.py +++ b/Lib/test/test_asyncio/test_base_events.py @@ -1215,6 +1215,7 @@ def test_create_datagram_endpoint_sock(self): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + sock.bind(('127.0.0.1', 0)) fut = self.loop.create_datagram_endpoint( lambda: MyDatagramProto(create_future=True, loop=self.loop), sock=sock) @@ -1307,10 +1308,6 @@ self.assertTrue( sock.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEPORT)) - else: - self.assertFalse( - sock.getsockopt( - socket.SOL_SOCKET, socket.SO_REUSEPORT)) self.assertTrue( sock.getsockopt( socket.SOL_SOCKET, socket.SO_BROADCAST)) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 17:54:19 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 06 Oct 2015 15:54:19 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy41IC0+IDMuNSk6?= =?utf-8?q?_Merge_heads?= Message-ID: <20151006155417.2685.80829@psf.io> https://hg.python.org/cpython/rev/df8ccac22006 changeset: 98560:df8ccac22006 branch: 3.5 parent: 98553:4d643c5df2a5 parent: 98557:bff40616d2a5 user: Serhiy Storchaka date: Tue Oct 06 18:39:58 2015 +0300 summary: Merge heads files: Lib/test/test_tokenize.py | 811 ++++++++++++------------- 1 files changed, 395 insertions(+), 416 deletions(-) diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -1,22 +1,44 @@ -doctests = """ -Tests for the tokenize module. +from test import support +from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP, + STRING, ENDMARKER, ENCODING, tok_name, detect_encoding, + open as tokenize_open, Untokenizer) +from io import BytesIO +from unittest import TestCase, mock +import os +import token -The tests can be really simple. Given a small fragment of source -code, print out a table with tokens. The ENDMARKER is omitted for -brevity. - >>> import glob +class TokenizeTest(TestCase): + # Tests for the tokenize module. - >>> dump_tokens("1 + 1") - ENCODING 'utf-8' (0, 0) (0, 0) + # The tests can be really simple. Given a small fragment of source + # code, print out a table with tokens. The ENDMARKER is omitted for + # brevity. + + def check_tokenize(self, s, expected): + # Format the tokens in s in a table format. + # The ENDMARKER is omitted. + result = [] + f = BytesIO(s.encode('utf-8')) + for type, token, start, end, line in tokenize(f.readline): + if type == ENDMARKER: + break + type = tok_name[type] + result.append(" %(type)-10.10s %(token)-13.13r %(start)s %(end)s" % + locals()) + self.assertEqual(result, + [" ENCODING 'utf-8' (0, 0) (0, 0)"] + + expected.rstrip().splitlines()) + + def test_basic(self): + self.check_tokenize("1 + 1", """\ NUMBER '1' (1, 0) (1, 1) OP '+' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) - - >>> dump_tokens("if False:\\n" - ... " # NL\\n" - ... " True = False # NEWLINE\\n") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("if False:\n" + " # NL\n" + " True = False # NEWLINE\n", """\ NAME 'if' (1, 0) (1, 2) NAME 'False' (1, 3) (1, 8) OP ':' (1, 8) (1, 9) @@ -30,112 +52,48 @@ COMMENT '# NEWLINE' (3, 17) (3, 26) NEWLINE '\\n' (3, 26) (3, 27) DEDENT '' (4, 0) (4, 0) + """) + indent_error_file = b"""\ +def k(x): + x += 2 + x += 5 +""" + readline = BytesIO(indent_error_file).readline + with self.assertRaisesRegex(IndentationError, + "unindent does not match any " + "outer indentation level"): + for tok in tokenize(readline): + pass - >>> indent_error_file = \""" - ... def k(x): - ... x += 2 - ... x += 5 - ... \""" - >>> readline = BytesIO(indent_error_file.encode('utf-8')).readline - >>> for tok in tokenize(readline): pass - Traceback (most recent call last): - ... - IndentationError: unindent does not match any outer indentation level - -There are some standard formatting practices that are easy to get right. - - >>> roundtrip("if x == 1:\\n" - ... " print(x)\\n") - True - - >>> roundtrip("# This is a comment\\n# This also") - True - -Some people use different formatting conventions, which makes -untokenize a little trickier. Note that this test involves trailing -whitespace after the colon. Note that we use hex escapes to make the -two trailing blanks apparent in the expected output. - - >>> roundtrip("if x == 1 : \\n" - ... " print(x)\\n") - True - - >>> f = support.findfile("tokenize_tests.txt") - >>> roundtrip(open(f, 'rb')) - True - - >>> roundtrip("if x == 1:\\n" - ... " # A comment by itself.\\n" - ... " print(x) # Comment here, too.\\n" - ... " # Another comment.\\n" - ... "after_if = True\\n") - True - - >>> roundtrip("if (x # The comments need to go in the right place\\n" - ... " == 1):\\n" - ... " print('x==1')\\n") - True - - >>> roundtrip("class Test: # A comment here\\n" - ... " # A comment with weird indent\\n" - ... " after_com = 5\\n" - ... " def x(m): return m*5 # a one liner\\n" - ... " def y(m): # A whitespace after the colon\\n" - ... " return y*4 # 3-space indent\\n") - True - -Some error-handling code - - >>> roundtrip("try: import somemodule\\n" - ... "except ImportError: # comment\\n" - ... " print('Can not import' # comment2\\n)" - ... "else: print('Loaded')\\n") - True - -Balancing continuation - - >>> roundtrip("a = (3,4, \\n" - ... "5,6)\\n" - ... "y = [3, 4,\\n" - ... "5]\\n" - ... "z = {'a': 5,\\n" - ... "'b':15, 'c':True}\\n" - ... "x = len(y) + 5 - a[\\n" - ... "3] - a[2]\\n" - ... "+ len(z) - z[\\n" - ... "'b']\\n") - True - -Ordinary integers and binary operators - - >>> dump_tokens("0xff <= 255") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_int(self): + # Ordinary integers and binary operators + self.check_tokenize("0xff <= 255", """\ NUMBER '0xff' (1, 0) (1, 4) OP '<=' (1, 5) (1, 7) NUMBER '255' (1, 8) (1, 11) - >>> dump_tokens("0b10 <= 255") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0b10 <= 255", """\ NUMBER '0b10' (1, 0) (1, 4) OP '<=' (1, 5) (1, 7) NUMBER '255' (1, 8) (1, 11) - >>> dump_tokens("0o123 <= 0O123") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0o123 <= 0O123", """\ NUMBER '0o123' (1, 0) (1, 5) OP '<=' (1, 6) (1, 8) NUMBER '0O123' (1, 9) (1, 14) - >>> dump_tokens("1234567 > ~0x15") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("1234567 > ~0x15", """\ NUMBER '1234567' (1, 0) (1, 7) OP '>' (1, 8) (1, 9) OP '~' (1, 10) (1, 11) NUMBER '0x15' (1, 11) (1, 15) - >>> dump_tokens("2134568 != 1231515") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("2134568 != 1231515", """\ NUMBER '2134568' (1, 0) (1, 7) OP '!=' (1, 8) (1, 10) NUMBER '1231515' (1, 11) (1, 18) - >>> dump_tokens("(-124561-1) & 200000000") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("(-124561-1) & 200000000", """\ OP '(' (1, 0) (1, 1) OP '-' (1, 1) (1, 2) NUMBER '124561' (1, 2) (1, 8) @@ -144,93 +102,93 @@ OP ')' (1, 10) (1, 11) OP '&' (1, 12) (1, 13) NUMBER '200000000' (1, 14) (1, 23) - >>> dump_tokens("0xdeadbeef != -1") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0xdeadbeef != -1", """\ NUMBER '0xdeadbeef' (1, 0) (1, 10) OP '!=' (1, 11) (1, 13) OP '-' (1, 14) (1, 15) NUMBER '1' (1, 15) (1, 16) - >>> dump_tokens("0xdeadc0de & 12345") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0xdeadc0de & 12345", """\ NUMBER '0xdeadc0de' (1, 0) (1, 10) OP '&' (1, 11) (1, 12) NUMBER '12345' (1, 13) (1, 18) - >>> dump_tokens("0xFF & 0x15 | 1234") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0xFF & 0x15 | 1234", """\ NUMBER '0xFF' (1, 0) (1, 4) OP '&' (1, 5) (1, 6) NUMBER '0x15' (1, 7) (1, 11) OP '|' (1, 12) (1, 13) NUMBER '1234' (1, 14) (1, 18) + """) -Long integers - - >>> dump_tokens("x = 0") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_long(self): + # Long integers + self.check_tokenize("x = 0", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '0' (1, 4) (1, 5) - >>> dump_tokens("x = 0xfffffffffff") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 0xfffffffffff", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '0xffffffffff (1, 4) (1, 17) - >>> dump_tokens("x = 123141242151251616110") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 123141242151251616110", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '123141242151 (1, 4) (1, 25) - >>> dump_tokens("x = -15921590215012591") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = -15921590215012591", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) OP '-' (1, 4) (1, 5) NUMBER '159215902150 (1, 5) (1, 22) + """) -Floating point numbers - - >>> dump_tokens("x = 3.14159") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_float(self): + # Floating point numbers + self.check_tokenize("x = 3.14159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3.14159' (1, 4) (1, 11) - >>> dump_tokens("x = 314159.") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 314159.", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '314159.' (1, 4) (1, 11) - >>> dump_tokens("x = .314159") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = .314159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '.314159' (1, 4) (1, 11) - >>> dump_tokens("x = 3e14159") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 3e14159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3e14159' (1, 4) (1, 11) - >>> dump_tokens("x = 3E123") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 3E123", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3E123' (1, 4) (1, 9) - >>> dump_tokens("x+y = 3e-1230") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x+y = 3e-1230", """\ NAME 'x' (1, 0) (1, 1) OP '+' (1, 1) (1, 2) NAME 'y' (1, 2) (1, 3) OP '=' (1, 4) (1, 5) NUMBER '3e-1230' (1, 6) (1, 13) - >>> dump_tokens("x = 3.14e159") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 3.14e159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3.14e159' (1, 4) (1, 12) + """) -String literals - - >>> dump_tokens("x = ''; y = \\\"\\\"") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_string(self): + # String literals + self.check_tokenize("x = ''; y = \"\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "''" (1, 4) (1, 6) @@ -238,8 +196,8 @@ NAME 'y' (1, 8) (1, 9) OP '=' (1, 10) (1, 11) STRING '""' (1, 12) (1, 14) - >>> dump_tokens("x = '\\\"'; y = \\\"'\\\"") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = '\"'; y = \"'\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING '\\'"\\'' (1, 4) (1, 7) @@ -247,29 +205,29 @@ NAME 'y' (1, 9) (1, 10) OP '=' (1, 11) (1, 12) STRING '"\\'"' (1, 13) (1, 16) - >>> dump_tokens("x = \\\"doesn't \\\"shrink\\\", does it\\\"") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = \"doesn't \"shrink\", does it\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING '"doesn\\'t "' (1, 4) (1, 14) NAME 'shrink' (1, 14) (1, 20) STRING '", does it"' (1, 20) (1, 31) - >>> dump_tokens("x = 'abc' + 'ABC'") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 'abc' + 'ABC'", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "'abc'" (1, 4) (1, 9) OP '+' (1, 10) (1, 11) STRING "'ABC'" (1, 12) (1, 17) - >>> dump_tokens('y = "ABC" + "ABC"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('y = "ABC" + "ABC"', """\ NAME 'y' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING '"ABC"' (1, 4) (1, 9) OP '+' (1, 10) (1, 11) STRING '"ABC"' (1, 12) (1, 17) - >>> dump_tokens("x = r'abc' + r'ABC' + R'ABC' + R'ABC'") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = r'abc' + r'ABC' + R'ABC' + R'ABC'", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "r'abc'" (1, 4) (1, 10) @@ -279,8 +237,8 @@ STRING "R'ABC'" (1, 22) (1, 28) OP '+' (1, 29) (1, 30) STRING "R'ABC'" (1, 31) (1, 37) - >>> dump_tokens('y = r"abc" + r"ABC" + R"ABC" + R"ABC"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('y = r"abc" + r"ABC" + R"ABC" + R"ABC"', """\ NAME 'y' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING 'r"abc"' (1, 4) (1, 10) @@ -290,30 +248,30 @@ STRING 'R"ABC"' (1, 22) (1, 28) OP '+' (1, 29) (1, 30) STRING 'R"ABC"' (1, 31) (1, 37) + """) - >>> dump_tokens("u'abc' + U'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("u'abc' + U'abc'", """\ STRING "u'abc'" (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING "U'abc'" (1, 9) (1, 15) - >>> dump_tokens('u"abc" + U"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('u"abc" + U"abc"', """\ STRING 'u"abc"' (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING 'U"abc"' (1, 9) (1, 15) + """) - >>> dump_tokens("b'abc' + B'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("b'abc' + B'abc'", """\ STRING "b'abc'" (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING "B'abc'" (1, 9) (1, 15) - >>> dump_tokens('b"abc" + B"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('b"abc" + B"abc"', """\ STRING 'b"abc"' (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING 'B"abc"' (1, 9) (1, 15) - >>> dump_tokens("br'abc' + bR'abc' + Br'abc' + BR'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("br'abc' + bR'abc' + Br'abc' + BR'abc'", """\ STRING "br'abc'" (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING "bR'abc'" (1, 10) (1, 17) @@ -321,8 +279,8 @@ STRING "Br'abc'" (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING "BR'abc'" (1, 30) (1, 37) - >>> dump_tokens('br"abc" + bR"abc" + Br"abc" + BR"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('br"abc" + bR"abc" + Br"abc" + BR"abc"', """\ STRING 'br"abc"' (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING 'bR"abc"' (1, 10) (1, 17) @@ -330,8 +288,8 @@ STRING 'Br"abc"' (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING 'BR"abc"' (1, 30) (1, 37) - >>> dump_tokens("rb'abc' + rB'abc' + Rb'abc' + RB'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("rb'abc' + rB'abc' + Rb'abc' + RB'abc'", """\ STRING "rb'abc'" (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING "rB'abc'" (1, 10) (1, 17) @@ -339,8 +297,8 @@ STRING "Rb'abc'" (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING "RB'abc'" (1, 30) (1, 37) - >>> dump_tokens('rb"abc" + rB"abc" + Rb"abc" + RB"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('rb"abc" + rB"abc" + Rb"abc" + RB"abc"', """\ STRING 'rb"abc"' (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING 'rB"abc"' (1, 10) (1, 17) @@ -348,11 +306,10 @@ STRING 'Rb"abc"' (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING 'RB"abc"' (1, 30) (1, 37) + """) -Operators - - >>> dump_tokens("def d22(a, b, c=2, d=2, *k): pass") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_function(self): + self.check_tokenize("def d22(a, b, c=2, d=2, *k): pass", """\ NAME 'def' (1, 0) (1, 3) NAME 'd22' (1, 4) (1, 7) OP '(' (1, 7) (1, 8) @@ -373,8 +330,8 @@ OP ')' (1, 26) (1, 27) OP ':' (1, 27) (1, 28) NAME 'pass' (1, 29) (1, 33) - >>> dump_tokens("def d01v_(a=1, *k, **w): pass") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("def d01v_(a=1, *k, **w): pass", """\ NAME 'def' (1, 0) (1, 3) NAME 'd01v_' (1, 4) (1, 9) OP '(' (1, 9) (1, 10) @@ -390,12 +347,12 @@ OP ')' (1, 22) (1, 23) OP ':' (1, 23) (1, 24) NAME 'pass' (1, 25) (1, 29) + """) -Comparison - - >>> dump_tokens("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " + - ... "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_comparison(self): + # Comparison + self.check_tokenize("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " + "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass", """\ NAME 'if' (1, 0) (1, 2) NUMBER '1' (1, 3) (1, 4) OP '<' (1, 5) (1, 6) @@ -428,11 +385,11 @@ NUMBER '1' (1, 81) (1, 82) OP ':' (1, 82) (1, 83) NAME 'pass' (1, 84) (1, 88) + """) -Shift - - >>> dump_tokens("x = 1 << 1 >> 5") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_shift(self): + # Shift + self.check_tokenize("x = 1 << 1 >> 5", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -440,11 +397,11 @@ NUMBER '1' (1, 9) (1, 10) OP '>>' (1, 11) (1, 13) NUMBER '5' (1, 14) (1, 15) + """) -Additive - - >>> dump_tokens("x = 1 - y + 15 - 1 + 0x124 + z + a[5]") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_additive(self): + # Additive + self.check_tokenize("x = 1 - y + 15 - 1 + 0x124 + z + a[5]", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -463,11 +420,11 @@ OP '[' (1, 34) (1, 35) NUMBER '5' (1, 35) (1, 36) OP ']' (1, 36) (1, 37) + """) -Multiplicative - - >>> dump_tokens("x = 1//1*1/5*12%0x12 at 42") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_multiplicative(self): + # Multiplicative + self.check_tokenize("x = 1//1*1/5*12%0x12 at 42", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -483,11 +440,11 @@ NUMBER '0x12' (1, 16) (1, 20) OP '@' (1, 20) (1, 21) NUMBER '42' (1, 21) (1, 23) + """) -Unary - - >>> dump_tokens("~1 ^ 1 & 1 |1 ^ -1") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_unary(self): + # Unary + self.check_tokenize("~1 ^ 1 & 1 |1 ^ -1", """\ OP '~' (1, 0) (1, 1) NUMBER '1' (1, 1) (1, 2) OP '^' (1, 3) (1, 4) @@ -499,8 +456,8 @@ OP '^' (1, 14) (1, 15) OP '-' (1, 16) (1, 17) NUMBER '1' (1, 17) (1, 18) - >>> dump_tokens("-1*1/1+1*1//1 - ---1**1") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("-1*1/1+1*1//1 - ---1**1", """\ OP '-' (1, 0) (1, 1) NUMBER '1' (1, 1) (1, 2) OP '*' (1, 2) (1, 3) @@ -520,11 +477,11 @@ NUMBER '1' (1, 19) (1, 20) OP '**' (1, 20) (1, 22) NUMBER '1' (1, 22) (1, 23) + """) -Selector - - >>> dump_tokens("import sys, time\\nx = sys.modules['time'].time()") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_selector(self): + # Selector + self.check_tokenize("import sys, time\nx = sys.modules['time'].time()", """\ NAME 'import' (1, 0) (1, 6) NAME 'sys' (1, 7) (1, 10) OP ',' (1, 10) (1, 11) @@ -542,11 +499,11 @@ NAME 'time' (2, 24) (2, 28) OP '(' (2, 28) (2, 29) OP ')' (2, 29) (2, 30) + """) -Methods - - >>> dump_tokens("@staticmethod\\ndef foo(x,y): pass") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_method(self): + # Methods + self.check_tokenize("@staticmethod\ndef foo(x,y): pass", """\ OP '@' (1, 0) (1, 1) NAME 'staticmethod (1, 1) (1, 13) NEWLINE '\\n' (1, 13) (1, 14) @@ -559,52 +516,13 @@ OP ')' (2, 11) (2, 12) OP ':' (2, 12) (2, 13) NAME 'pass' (2, 14) (2, 18) + """) -Backslash means line continuation, except for comments - - >>> roundtrip("x=1+\\\\n" - ... "1\\n" - ... "# This is a comment\\\\n" - ... "# This also\\n") - True - >>> roundtrip("# Comment \\\\nx = 0") - True - -Two string literals on the same line - - >>> roundtrip("'' ''") - True - -Test roundtrip on random python modules. -pass the '-ucpu' option to process the full directory. - - >>> import random - >>> tempdir = os.path.dirname(f) or os.curdir - >>> testfiles = glob.glob(os.path.join(tempdir, "test*.py")) - -Tokenize is broken on test_pep3131.py because regular expressions are -broken on the obscure unicode identifiers in it. *sigh* -With roundtrip extended to test the 5-tuple mode of untokenize, -7 more testfiles fail. Remove them also until the failure is diagnosed. - - >>> testfiles.remove(os.path.join(tempdir, "test_pep3131.py")) - >>> for f in ('buffer', 'builtin', 'fileio', 'inspect', 'os', 'platform', 'sys'): - ... testfiles.remove(os.path.join(tempdir, "test_%s.py") % f) - ... - >>> if not support.is_resource_enabled("cpu"): - ... testfiles = random.sample(testfiles, 10) - ... - >>> for testfile in testfiles: - ... if not roundtrip(open(testfile, 'rb')): - ... print("Roundtrip failed for file %s" % testfile) - ... break - ... else: True - True - -Evil tabs - - >>> dump_tokens("def f():\\n\\tif x\\n \\tpass") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_tabs(self): + # Evil tabs + self.check_tokenize("def f():\n" + "\tif x\n" + " \tpass", """\ NAME 'def' (1, 0) (1, 3) NAME 'f' (1, 4) (1, 5) OP '(' (1, 5) (1, 6) @@ -619,11 +537,11 @@ NAME 'pass' (3, 9) (3, 13) DEDENT '' (4, 0) (4, 0) DEDENT '' (4, 0) (4, 0) + """) -Non-ascii identifiers - - >>> dump_tokens("?rter = 'places'\\ngr?n = 'green'") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_non_ascii_identifiers(self): + # Non-ascii identifiers + self.check_tokenize("?rter = 'places'\ngr?n = 'green'", """\ NAME '?rter' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) STRING "'places'" (1, 8) (1, 16) @@ -631,11 +549,11 @@ NAME 'gr?n' (2, 0) (2, 4) OP '=' (2, 5) (2, 6) STRING "'green'" (2, 7) (2, 14) + """) -Legacy unicode literals: - - >>> dump_tokens("?rter = u'places'\\ngr?n = U'green'") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_unicode(self): + # Legacy unicode literals: + self.check_tokenize("?rter = u'places'\ngr?n = U'green'", """\ NAME '?rter' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) STRING "u'places'" (1, 8) (1, 17) @@ -643,17 +561,17 @@ NAME 'gr?n' (2, 0) (2, 4) OP '=' (2, 5) (2, 6) STRING "U'green'" (2, 7) (2, 15) + """) -Async/await extension: - - >>> dump_tokens("async = 1") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_async(self): + # Async/await extension: + self.check_tokenize("async = 1", """\ NAME 'async' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) NUMBER '1' (1, 8) (1, 9) + """) - >>> dump_tokens("a = (async = 1)") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("a = (async = 1)", """\ NAME 'a' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) OP '(' (1, 4) (1, 5) @@ -661,15 +579,15 @@ OP '=' (1, 11) (1, 12) NUMBER '1' (1, 13) (1, 14) OP ')' (1, 14) (1, 15) + """) - >>> dump_tokens("async()") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async()", """\ NAME 'async' (1, 0) (1, 5) OP '(' (1, 5) (1, 6) OP ')' (1, 6) (1, 7) + """) - >>> dump_tokens("class async(Bar):pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("class async(Bar):pass", """\ NAME 'class' (1, 0) (1, 5) NAME 'async' (1, 6) (1, 11) OP '(' (1, 11) (1, 12) @@ -677,28 +595,28 @@ OP ')' (1, 15) (1, 16) OP ':' (1, 16) (1, 17) NAME 'pass' (1, 17) (1, 21) + """) - >>> dump_tokens("class async:pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("class async:pass", """\ NAME 'class' (1, 0) (1, 5) NAME 'async' (1, 6) (1, 11) OP ':' (1, 11) (1, 12) NAME 'pass' (1, 12) (1, 16) + """) - >>> dump_tokens("await = 1") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("await = 1", """\ NAME 'await' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) NUMBER '1' (1, 8) (1, 9) + """) - >>> dump_tokens("foo.async") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("foo.async", """\ NAME 'foo' (1, 0) (1, 3) OP '.' (1, 3) (1, 4) NAME 'async' (1, 4) (1, 9) + """) - >>> dump_tokens("async for a in b: pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async for a in b: pass", """\ NAME 'async' (1, 0) (1, 5) NAME 'for' (1, 6) (1, 9) NAME 'a' (1, 10) (1, 11) @@ -706,9 +624,9 @@ NAME 'b' (1, 15) (1, 16) OP ':' (1, 16) (1, 17) NAME 'pass' (1, 18) (1, 22) + """) - >>> dump_tokens("async with a as b: pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async with a as b: pass", """\ NAME 'async' (1, 0) (1, 5) NAME 'with' (1, 6) (1, 10) NAME 'a' (1, 11) (1, 12) @@ -716,49 +634,49 @@ NAME 'b' (1, 16) (1, 17) OP ':' (1, 17) (1, 18) NAME 'pass' (1, 19) (1, 23) + """) - >>> dump_tokens("async.foo") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async.foo", """\ NAME 'async' (1, 0) (1, 5) OP '.' (1, 5) (1, 6) NAME 'foo' (1, 6) (1, 9) + """) - >>> dump_tokens("async") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async", """\ NAME 'async' (1, 0) (1, 5) + """) - >>> dump_tokens("async\\n#comment\\nawait") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async\n#comment\nawait", """\ NAME 'async' (1, 0) (1, 5) NEWLINE '\\n' (1, 5) (1, 6) COMMENT '#comment' (2, 0) (2, 8) NL '\\n' (2, 8) (2, 9) NAME 'await' (3, 0) (3, 5) + """) - >>> dump_tokens("async\\n...\\nawait") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async\n...\nawait", """\ NAME 'async' (1, 0) (1, 5) NEWLINE '\\n' (1, 5) (1, 6) OP '...' (2, 0) (2, 3) NEWLINE '\\n' (2, 3) (2, 4) NAME 'await' (3, 0) (3, 5) + """) - >>> dump_tokens("async\\nawait") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async\nawait", """\ NAME 'async' (1, 0) (1, 5) NEWLINE '\\n' (1, 5) (1, 6) NAME 'await' (2, 0) (2, 5) + """) - >>> dump_tokens("foo.async + 1") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("foo.async + 1", """\ NAME 'foo' (1, 0) (1, 3) OP '.' (1, 3) (1, 4) NAME 'async' (1, 4) (1, 9) OP '+' (1, 10) (1, 11) NUMBER '1' (1, 12) (1, 13) + """) - >>> dump_tokens("async def foo(): pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async def foo(): pass", """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'foo' (1, 10) (1, 13) @@ -766,15 +684,16 @@ OP ')' (1, 14) (1, 15) OP ':' (1, 15) (1, 16) NAME 'pass' (1, 17) (1, 21) + """) - >>> dump_tokens('''async def foo(): - ... def foo(await): - ... await = 1 - ... if 1: - ... await - ... async += 1 - ... ''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''\ +async def foo(): + def foo(await): + await = 1 + if 1: + await +async += 1 +''', """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'foo' (1, 10) (1, 13) @@ -809,10 +728,11 @@ OP '+=' (6, 6) (6, 8) NUMBER '1' (6, 9) (6, 10) NEWLINE '\\n' (6, 10) (6, 11) + """) - >>> dump_tokens('''async def foo(): - ... async for i in 1: pass''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''\ +async def foo(): + async for i in 1: pass''', """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'foo' (1, 10) (1, 13) @@ -829,9 +749,9 @@ OP ':' (2, 18) (2, 19) NAME 'pass' (2, 20) (2, 24) DEDENT '' (3, 0) (3, 0) + """) - >>> dump_tokens('''async def foo(async): await''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''async def foo(async): await''', """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'foo' (1, 10) (1, 13) @@ -840,14 +760,15 @@ OP ')' (1, 19) (1, 20) OP ':' (1, 20) (1, 21) AWAIT 'await' (1, 22) (1, 27) + """) - >>> dump_tokens('''def f(): - ... - ... def baz(): pass - ... async def bar(): pass - ... - ... await = 2''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''\ +def f(): + + def baz(): pass + async def bar(): pass + + await = 2''', """\ NAME 'def' (1, 0) (1, 3) NAME 'f' (1, 4) (1, 5) OP '(' (1, 5) (1, 6) @@ -876,14 +797,15 @@ OP '=' (6, 8) (6, 9) NUMBER '2' (6, 10) (6, 11) DEDENT '' (7, 0) (7, 0) + """) - >>> dump_tokens('''async def f(): - ... - ... def baz(): pass - ... async def bar(): pass - ... - ... await = 2''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''\ +async def f(): + + def baz(): pass + async def bar(): pass + + await = 2''', """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'f' (1, 10) (1, 11) @@ -913,89 +835,10 @@ OP '=' (6, 8) (6, 9) NUMBER '2' (6, 10) (6, 11) DEDENT '' (7, 0) (7, 0) -""" + """) -from test import support -from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP, - STRING, ENDMARKER, ENCODING, tok_name, detect_encoding, - open as tokenize_open, Untokenizer) -from io import BytesIO -from unittest import TestCase, mock -import os -import token -def dump_tokens(s): - """Print out the tokens in s in a table format. - - The ENDMARKER is omitted. - """ - f = BytesIO(s.encode('utf-8')) - for type, token, start, end, line in tokenize(f.readline): - if type == ENDMARKER: - break - type = tok_name[type] - print("%(type)-10.10s %(token)-13.13r %(start)s %(end)s" % locals()) - -def roundtrip(f): - """ - Test roundtrip for `untokenize`. `f` is an open file or a string. - The source code in f is tokenized to both 5- and 2-tuples. - Both sequences are converted back to source code via - tokenize.untokenize(), and the latter tokenized again to 2-tuples. - The test fails if the 3 pair tokenizations do not match. - - When untokenize bugs are fixed, untokenize with 5-tuples should - reproduce code that does not contain a backslash continuation - following spaces. A proper test should test this. - - This function would be more useful for correcting bugs if it reported - the first point of failure, like assertEqual, rather than just - returning False -- or if it were only used in unittests and not - doctest and actually used assertEqual. - """ - # Get source code and original tokenizations - if isinstance(f, str): - code = f.encode('utf-8') - else: - code = f.read() - f.close() - readline = iter(code.splitlines(keepends=True)).__next__ - tokens5 = list(tokenize(readline)) - tokens2 = [tok[:2] for tok in tokens5] - # Reproduce tokens2 from pairs - bytes_from2 = untokenize(tokens2) - readline2 = iter(bytes_from2.splitlines(keepends=True)).__next__ - tokens2_from2 = [tok[:2] for tok in tokenize(readline2)] - # Reproduce tokens2 from 5-tuples - bytes_from5 = untokenize(tokens5) - readline5 = iter(bytes_from5.splitlines(keepends=True)).__next__ - tokens2_from5 = [tok[:2] for tok in tokenize(readline5)] - # Compare 3 versions - return tokens2 == tokens2_from2 == tokens2_from5 - -# This is an example from the docs, set up as a doctest. def decistmt(s): - """Substitute Decimals for floats in a string of statements. - - >>> from decimal import Decimal - >>> s = 'print(+21.3e-5*-.1234/81.7)' - >>> decistmt(s) - "print (+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7'))" - - The format of the exponent is inherited from the platform C library. - Known cases are "e-007" (Windows) and "e-07" (not Windows). Since - we're only showing 11 digits, and the 12th isn't close to 5, the - rest of the output should be platform-independent. - - >>> exec(s) #doctest: +ELLIPSIS - -3.2171603427...e-0...7 - - Output from calculations with Decimal should be identical across all - platforms. - - >>> exec(decistmt(s)) - -3.217160342717258261933904529E-7 - """ result = [] g = tokenize(BytesIO(s.encode('utf-8')).readline) # tokenize the string for toknum, tokval, _, _, _ in g: @@ -1010,6 +853,28 @@ result.append((toknum, tokval)) return untokenize(result).decode('utf-8') +class TestMisc(TestCase): + + def test_decistmt(self): + # Substitute Decimals for floats in a string of statements. + # This is an example from the docs. + + from decimal import Decimal + s = '+21.3e-5*-.1234/81.7' + self.assertEqual(decistmt(s), + "+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')") + + # The format of the exponent is inherited from the platform C library. + # Known cases are "e-007" (Windows) and "e-07" (not Windows). Since + # we're only showing 11 digits, and the 12th isn't close to 5, the + # rest of the output should be platform-independent. + self.assertRegex(repr(eval(s)), '-3.2171603427[0-9]*e-0+7') + + # Output from calculations with Decimal should be identical across all + # platforms. + self.assertEqual(eval(decistmt(s)), + Decimal('-3.217160342717258261933904529E-7')) + class TestTokenizerAdheresToPep0263(TestCase): """ @@ -1018,11 +883,11 @@ def _testFile(self, filename): path = os.path.join(os.path.dirname(__file__), filename) - return roundtrip(open(path, 'rb')) + TestRoundtrip.check_roundtrip(self, open(path, 'rb')) def test_utf8_coding_cookie_and_no_utf8_bom(self): f = 'tokenize_tests-utf8-coding-cookie-and-no-utf8-bom-sig.txt' - self.assertTrue(self._testFile(f)) + self._testFile(f) def test_latin1_coding_cookie_and_utf8_bom(self): """ @@ -1037,11 +902,11 @@ def test_no_coding_cookie_and_utf8_bom(self): f = 'tokenize_tests-no-coding-cookie-and-utf8-bom-sig-only.txt' - self.assertTrue(self._testFile(f)) + self._testFile(f) def test_utf8_coding_cookie_and_utf8_bom(self): f = 'tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt' - self.assertTrue(self._testFile(f)) + self._testFile(f) def test_bad_coding_cookie(self): self.assertRaises(SyntaxError, self._testFile, 'bad_coding.py') @@ -1340,7 +1205,6 @@ self.assertTrue(m.closed) - class TestTokenize(TestCase): def test_tokenize(self): @@ -1472,6 +1336,7 @@ # See http://bugs.python.org/issue16152 self.assertExactTypeEqual('@ ', token.AT) + class UntokenizeTest(TestCase): def test_bad_input_order(self): @@ -1497,7 +1362,7 @@ u.prev_row = 2 u.add_whitespace((4, 4)) self.assertEqual(u.tokens, ['\\\n', '\\\n\\\n', ' ']) - self.assertTrue(roundtrip('a\n b\n c\n \\\n c\n')) + TestRoundtrip.check_roundtrip(self, 'a\n b\n c\n \\\n c\n') def test_iter_compat(self): u = Untokenizer() @@ -1514,6 +1379,131 @@ class TestRoundtrip(TestCase): + + def check_roundtrip(self, f): + """ + Test roundtrip for `untokenize`. `f` is an open file or a string. + The source code in f is tokenized to both 5- and 2-tuples. + Both sequences are converted back to source code via + tokenize.untokenize(), and the latter tokenized again to 2-tuples. + The test fails if the 3 pair tokenizations do not match. + + When untokenize bugs are fixed, untokenize with 5-tuples should + reproduce code that does not contain a backslash continuation + following spaces. A proper test should test this. + """ + # Get source code and original tokenizations + if isinstance(f, str): + code = f.encode('utf-8') + else: + code = f.read() + f.close() + readline = iter(code.splitlines(keepends=True)).__next__ + tokens5 = list(tokenize(readline)) + tokens2 = [tok[:2] for tok in tokens5] + # Reproduce tokens2 from pairs + bytes_from2 = untokenize(tokens2) + readline2 = iter(bytes_from2.splitlines(keepends=True)).__next__ + tokens2_from2 = [tok[:2] for tok in tokenize(readline2)] + self.assertEqual(tokens2_from2, tokens2) + # Reproduce tokens2 from 5-tuples + bytes_from5 = untokenize(tokens5) + readline5 = iter(bytes_from5.splitlines(keepends=True)).__next__ + tokens2_from5 = [tok[:2] for tok in tokenize(readline5)] + self.assertEqual(tokens2_from5, tokens2) + + def test_roundtrip(self): + # There are some standard formatting practices that are easy to get right. + + self.check_roundtrip("if x == 1:\n" + " print(x)\n") + self.check_roundtrip("# This is a comment\n" + "# This also") + + # Some people use different formatting conventions, which makes + # untokenize a little trickier. Note that this test involves trailing + # whitespace after the colon. Note that we use hex escapes to make the + # two trailing blanks apparent in the expected output. + + self.check_roundtrip("if x == 1 : \n" + " print(x)\n") + fn = support.findfile("tokenize_tests.txt") + with open(fn, 'rb') as f: + self.check_roundtrip(f) + self.check_roundtrip("if x == 1:\n" + " # A comment by itself.\n" + " print(x) # Comment here, too.\n" + " # Another comment.\n" + "after_if = True\n") + self.check_roundtrip("if (x # The comments need to go in the right place\n" + " == 1):\n" + " print('x==1')\n") + self.check_roundtrip("class Test: # A comment here\n" + " # A comment with weird indent\n" + " after_com = 5\n" + " def x(m): return m*5 # a one liner\n" + " def y(m): # A whitespace after the colon\n" + " return y*4 # 3-space indent\n") + + # Some error-handling code + self.check_roundtrip("try: import somemodule\n" + "except ImportError: # comment\n" + " print('Can not import' # comment2\n)" + "else: print('Loaded')\n") + + def test_continuation(self): + # Balancing continuation + self.check_roundtrip("a = (3,4, \n" + "5,6)\n" + "y = [3, 4,\n" + "5]\n" + "z = {'a': 5,\n" + "'b':15, 'c':True}\n" + "x = len(y) + 5 - a[\n" + "3] - a[2]\n" + "+ len(z) - z[\n" + "'b']\n") + + def test_backslash_continuation(self): + # Backslash means line continuation, except for comments + self.check_roundtrip("x=1+\\\n" + "1\n" + "# This is a comment\\\n" + "# This also\n") + self.check_roundtrip("# Comment \\\n" + "x = 0") + + def test_string_concatenation(self): + # Two string literals on the same line + self.check_roundtrip("'' ''") + + def test_random_files(self): + # Test roundtrip on random python modules. + # pass the '-ucpu' option to process the full directory. + + import glob, random + fn = support.findfile("tokenize_tests.txt") + tempdir = os.path.dirname(fn) or os.curdir + testfiles = glob.glob(os.path.join(tempdir, "test*.py")) + + # Tokenize is broken on test_pep3131.py because regular expressions are + # broken on the obscure unicode identifiers in it. *sigh* + # With roundtrip extended to test the 5-tuple mode of untokenize, + # 7 more testfiles fail. Remove them also until the failure is diagnosed. + + testfiles.remove(os.path.join(tempdir, "test_pep3131.py")) + for f in ('buffer', 'builtin', 'fileio', 'inspect', 'os', 'platform', 'sys'): + testfiles.remove(os.path.join(tempdir, "test_%s.py") % f) + + if not support.is_resource_enabled("cpu"): + testfiles = random.sample(testfiles, 10) + + for testfile in testfiles: + with open(testfile, 'rb') as f: + with self.subTest(file=testfile): + self.check_roundtrip(f) + + def roundtrip(self, code): if isinstance(code, str): code = code.encode('utf-8') @@ -1527,19 +1517,8 @@ code = "if False:\n\tx=3\n\tx=3\n" codelines = self.roundtrip(code).split('\n') self.assertEqual(codelines[1], codelines[2]) + self.check_roundtrip(code) -__test__ = {"doctests" : doctests, 'decistmt': decistmt} - -def test_main(): - from test import test_tokenize - support.run_doctest(test_tokenize, True) - support.run_unittest(TestTokenizerAdheresToPep0263) - support.run_unittest(Test_Tokenize) - support.run_unittest(TestDetectEncoding) - support.run_unittest(TestTokenize) - support.run_unittest(UntokenizeTest) - support.run_unittest(TestRoundtrip) - if __name__ == "__main__": - test_main() + unittest.main() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 17:54:19 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 06 Oct 2015 15:54:19 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?b?KTogTWVyZ2Ugd2l0aCAzLjUu?= Message-ID: <20151006155418.2667.13806@psf.io> https://hg.python.org/cpython/rev/39a24ce364dc changeset: 98563:39a24ce364dc parent: 98561:727d72b05ff5 parent: 98562:b0ce3ef2ea21 user: Serhiy Storchaka date: Tue Oct 06 18:52:52 2015 +0300 summary: Merge with 3.5. files: Lib/pydoc.py | 0 Lib/site-packages/README | 2 ++ Lib/test/regrtest.py | 0 Lib/timeit.py | 0 Parser/asdl_c.py | 0 Tools/buildbot/build-amd64.bat | 5 +++++ Tools/buildbot/clean-amd64.bat | 5 +++++ Tools/buildbot/external-amd64.bat | 3 +++ Tools/buildbot/external.bat | 3 +++ Tools/buildbot/test-amd64.bat | 6 ++++++ configure | 0 11 files changed, 24 insertions(+), 0 deletions(-) diff --git a/Lib/pydoc.py b/Lib/pydoc.py old mode 100755 new mode 100644 diff --git a/Lib/site-packages/README b/Lib/site-packages/README new file mode 100644 --- /dev/null +++ b/Lib/site-packages/README @@ -0,0 +1,2 @@ +This directory exists so that 3rd party packages can be installed +here. Read the source for site.py for more details. diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py old mode 100755 new mode 100644 diff --git a/Lib/timeit.py b/Lib/timeit.py old mode 100755 new mode 100644 diff --git a/Parser/asdl_c.py b/Parser/asdl_c.py old mode 100755 new mode 100644 diff --git a/Tools/buildbot/build-amd64.bat b/Tools/buildbot/build-amd64.bat new file mode 100644 --- /dev/null +++ b/Tools/buildbot/build-amd64.bat @@ -0,0 +1,5 @@ + at rem Formerly used by the buildbot "compile" step. + at echo This script is no longer used and may be removed in the future. + at echo To get the same effect as this script, use + at echo PCbuild\build.bat -d -e -k -p x64 +call "%~dp0build.bat" -p x64 %* diff --git a/Tools/buildbot/clean-amd64.bat b/Tools/buildbot/clean-amd64.bat new file mode 100644 --- /dev/null +++ b/Tools/buildbot/clean-amd64.bat @@ -0,0 +1,5 @@ + at rem Formerly used by the buildbot "clean" step. + at echo This script is no longer used and may be removed in the future. + at echo To get the same effect as this script, use `clean.bat` from this + at echo directory and pass `-p x64` as two arguments. +call "%~dp0clean.bat" -p x64 %* diff --git a/Tools/buildbot/external-amd64.bat b/Tools/buildbot/external-amd64.bat new file mode 100644 --- /dev/null +++ b/Tools/buildbot/external-amd64.bat @@ -0,0 +1,3 @@ + at echo This script is no longer used and may be removed in the future. + at echo Please use PCbuild\get_externals.bat instead. +@"%~dp0..\..\PCbuild\get_externals.bat" %* diff --git a/Tools/buildbot/external.bat b/Tools/buildbot/external.bat new file mode 100644 --- /dev/null +++ b/Tools/buildbot/external.bat @@ -0,0 +1,3 @@ + at echo This script is no longer used and may be removed in the future. + at echo Please use PCbuild\get_externals.bat instead. +@"%~dp0..\..\PCbuild\get_externals.bat" %* diff --git a/Tools/buildbot/test-amd64.bat b/Tools/buildbot/test-amd64.bat new file mode 100644 --- /dev/null +++ b/Tools/buildbot/test-amd64.bat @@ -0,0 +1,6 @@ + at rem Formerly used by the buildbot "test" step. + at echo This script is no longer used and may be removed in the future. + at echo To get the same effect as this script, use + at echo PCbuild\rt.bat -q -d -x64 -uall -rwW + at echo or use `test.bat` in this directory and pass `-x64` as an argument. +call "%~dp0test.bat" -x64 %* diff --git a/configure b/configure old mode 100755 new mode 100644 -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 17:54:19 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 06 Oct 2015 15:54:19 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNCk6?= =?utf-8?q?_Merge_heads?= Message-ID: <20151006155417.97710.94766@psf.io> https://hg.python.org/cpython/rev/91f36d2b097a changeset: 98559:91f36d2b097a branch: 3.4 parent: 98556:d272f3cbae05 parent: 98552:aebbf205ef6f user: Serhiy Storchaka date: Tue Oct 06 18:38:25 2015 +0300 summary: Merge heads files: Lib/test/test_asyncio/test_base_events.py | 5 +---- 1 files changed, 1 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_asyncio/test_base_events.py b/Lib/test/test_asyncio/test_base_events.py --- a/Lib/test/test_asyncio/test_base_events.py +++ b/Lib/test/test_asyncio/test_base_events.py @@ -1215,6 +1215,7 @@ def test_create_datagram_endpoint_sock(self): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + sock.bind(('127.0.0.1', 0)) fut = self.loop.create_datagram_endpoint( lambda: MyDatagramProto(create_future=True, loop=self.loop), sock=sock) @@ -1307,10 +1308,6 @@ self.assertTrue( sock.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEPORT)) - else: - self.assertFalse( - sock.getsockopt( - socket.SOL_SOCKET, socket.SO_REUSEPORT)) self.assertTrue( sock.getsockopt( socket.SOL_SOCKET, socket.SO_BROADCAST)) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 17:54:19 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 06 Oct 2015 15:54:19 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325317=3A_Converted_doctests_in_test=5Ftokenize_?= =?utf-8?q?to_unittests=2E?= Message-ID: <20151006155417.97700.68973@psf.io> https://hg.python.org/cpython/rev/66d239660997 changeset: 98558:66d239660997 parent: 98551:41f29bbf520d parent: 98557:bff40616d2a5 user: Serhiy Storchaka date: Tue Oct 06 18:24:46 2015 +0300 summary: Issue #25317: Converted doctests in test_tokenize to unittests. Made test_tokenize discoverable. files: Lib/test/test_tokenize.py | 811 ++++++++++++------------- 1 files changed, 395 insertions(+), 416 deletions(-) diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -1,22 +1,44 @@ -doctests = """ -Tests for the tokenize module. +from test import support +from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP, + STRING, ENDMARKER, ENCODING, tok_name, detect_encoding, + open as tokenize_open, Untokenizer) +from io import BytesIO +from unittest import TestCase, mock +import os +import token -The tests can be really simple. Given a small fragment of source -code, print out a table with tokens. The ENDMARKER is omitted for -brevity. - >>> import glob +class TokenizeTest(TestCase): + # Tests for the tokenize module. - >>> dump_tokens("1 + 1") - ENCODING 'utf-8' (0, 0) (0, 0) + # The tests can be really simple. Given a small fragment of source + # code, print out a table with tokens. The ENDMARKER is omitted for + # brevity. + + def check_tokenize(self, s, expected): + # Format the tokens in s in a table format. + # The ENDMARKER is omitted. + result = [] + f = BytesIO(s.encode('utf-8')) + for type, token, start, end, line in tokenize(f.readline): + if type == ENDMARKER: + break + type = tok_name[type] + result.append(" %(type)-10.10s %(token)-13.13r %(start)s %(end)s" % + locals()) + self.assertEqual(result, + [" ENCODING 'utf-8' (0, 0) (0, 0)"] + + expected.rstrip().splitlines()) + + def test_basic(self): + self.check_tokenize("1 + 1", """\ NUMBER '1' (1, 0) (1, 1) OP '+' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) - - >>> dump_tokens("if False:\\n" - ... " # NL\\n" - ... " True = False # NEWLINE\\n") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("if False:\n" + " # NL\n" + " True = False # NEWLINE\n", """\ NAME 'if' (1, 0) (1, 2) NAME 'False' (1, 3) (1, 8) OP ':' (1, 8) (1, 9) @@ -30,112 +52,48 @@ COMMENT '# NEWLINE' (3, 17) (3, 26) NEWLINE '\\n' (3, 26) (3, 27) DEDENT '' (4, 0) (4, 0) + """) + indent_error_file = b"""\ +def k(x): + x += 2 + x += 5 +""" + readline = BytesIO(indent_error_file).readline + with self.assertRaisesRegex(IndentationError, + "unindent does not match any " + "outer indentation level"): + for tok in tokenize(readline): + pass - >>> indent_error_file = \""" - ... def k(x): - ... x += 2 - ... x += 5 - ... \""" - >>> readline = BytesIO(indent_error_file.encode('utf-8')).readline - >>> for tok in tokenize(readline): pass - Traceback (most recent call last): - ... - IndentationError: unindent does not match any outer indentation level - -There are some standard formatting practices that are easy to get right. - - >>> roundtrip("if x == 1:\\n" - ... " print(x)\\n") - True - - >>> roundtrip("# This is a comment\\n# This also") - True - -Some people use different formatting conventions, which makes -untokenize a little trickier. Note that this test involves trailing -whitespace after the colon. Note that we use hex escapes to make the -two trailing blanks apparent in the expected output. - - >>> roundtrip("if x == 1 : \\n" - ... " print(x)\\n") - True - - >>> f = support.findfile("tokenize_tests.txt") - >>> roundtrip(open(f, 'rb')) - True - - >>> roundtrip("if x == 1:\\n" - ... " # A comment by itself.\\n" - ... " print(x) # Comment here, too.\\n" - ... " # Another comment.\\n" - ... "after_if = True\\n") - True - - >>> roundtrip("if (x # The comments need to go in the right place\\n" - ... " == 1):\\n" - ... " print('x==1')\\n") - True - - >>> roundtrip("class Test: # A comment here\\n" - ... " # A comment with weird indent\\n" - ... " after_com = 5\\n" - ... " def x(m): return m*5 # a one liner\\n" - ... " def y(m): # A whitespace after the colon\\n" - ... " return y*4 # 3-space indent\\n") - True - -Some error-handling code - - >>> roundtrip("try: import somemodule\\n" - ... "except ImportError: # comment\\n" - ... " print('Can not import' # comment2\\n)" - ... "else: print('Loaded')\\n") - True - -Balancing continuation - - >>> roundtrip("a = (3,4, \\n" - ... "5,6)\\n" - ... "y = [3, 4,\\n" - ... "5]\\n" - ... "z = {'a': 5,\\n" - ... "'b':15, 'c':True}\\n" - ... "x = len(y) + 5 - a[\\n" - ... "3] - a[2]\\n" - ... "+ len(z) - z[\\n" - ... "'b']\\n") - True - -Ordinary integers and binary operators - - >>> dump_tokens("0xff <= 255") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_int(self): + # Ordinary integers and binary operators + self.check_tokenize("0xff <= 255", """\ NUMBER '0xff' (1, 0) (1, 4) OP '<=' (1, 5) (1, 7) NUMBER '255' (1, 8) (1, 11) - >>> dump_tokens("0b10 <= 255") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0b10 <= 255", """\ NUMBER '0b10' (1, 0) (1, 4) OP '<=' (1, 5) (1, 7) NUMBER '255' (1, 8) (1, 11) - >>> dump_tokens("0o123 <= 0O123") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0o123 <= 0O123", """\ NUMBER '0o123' (1, 0) (1, 5) OP '<=' (1, 6) (1, 8) NUMBER '0O123' (1, 9) (1, 14) - >>> dump_tokens("1234567 > ~0x15") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("1234567 > ~0x15", """\ NUMBER '1234567' (1, 0) (1, 7) OP '>' (1, 8) (1, 9) OP '~' (1, 10) (1, 11) NUMBER '0x15' (1, 11) (1, 15) - >>> dump_tokens("2134568 != 1231515") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("2134568 != 1231515", """\ NUMBER '2134568' (1, 0) (1, 7) OP '!=' (1, 8) (1, 10) NUMBER '1231515' (1, 11) (1, 18) - >>> dump_tokens("(-124561-1) & 200000000") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("(-124561-1) & 200000000", """\ OP '(' (1, 0) (1, 1) OP '-' (1, 1) (1, 2) NUMBER '124561' (1, 2) (1, 8) @@ -144,93 +102,93 @@ OP ')' (1, 10) (1, 11) OP '&' (1, 12) (1, 13) NUMBER '200000000' (1, 14) (1, 23) - >>> dump_tokens("0xdeadbeef != -1") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0xdeadbeef != -1", """\ NUMBER '0xdeadbeef' (1, 0) (1, 10) OP '!=' (1, 11) (1, 13) OP '-' (1, 14) (1, 15) NUMBER '1' (1, 15) (1, 16) - >>> dump_tokens("0xdeadc0de & 12345") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0xdeadc0de & 12345", """\ NUMBER '0xdeadc0de' (1, 0) (1, 10) OP '&' (1, 11) (1, 12) NUMBER '12345' (1, 13) (1, 18) - >>> dump_tokens("0xFF & 0x15 | 1234") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0xFF & 0x15 | 1234", """\ NUMBER '0xFF' (1, 0) (1, 4) OP '&' (1, 5) (1, 6) NUMBER '0x15' (1, 7) (1, 11) OP '|' (1, 12) (1, 13) NUMBER '1234' (1, 14) (1, 18) + """) -Long integers - - >>> dump_tokens("x = 0") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_long(self): + # Long integers + self.check_tokenize("x = 0", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '0' (1, 4) (1, 5) - >>> dump_tokens("x = 0xfffffffffff") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 0xfffffffffff", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '0xffffffffff (1, 4) (1, 17) - >>> dump_tokens("x = 123141242151251616110") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 123141242151251616110", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '123141242151 (1, 4) (1, 25) - >>> dump_tokens("x = -15921590215012591") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = -15921590215012591", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) OP '-' (1, 4) (1, 5) NUMBER '159215902150 (1, 5) (1, 22) + """) -Floating point numbers - - >>> dump_tokens("x = 3.14159") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_float(self): + # Floating point numbers + self.check_tokenize("x = 3.14159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3.14159' (1, 4) (1, 11) - >>> dump_tokens("x = 314159.") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 314159.", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '314159.' (1, 4) (1, 11) - >>> dump_tokens("x = .314159") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = .314159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '.314159' (1, 4) (1, 11) - >>> dump_tokens("x = 3e14159") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 3e14159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3e14159' (1, 4) (1, 11) - >>> dump_tokens("x = 3E123") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 3E123", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3E123' (1, 4) (1, 9) - >>> dump_tokens("x+y = 3e-1230") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x+y = 3e-1230", """\ NAME 'x' (1, 0) (1, 1) OP '+' (1, 1) (1, 2) NAME 'y' (1, 2) (1, 3) OP '=' (1, 4) (1, 5) NUMBER '3e-1230' (1, 6) (1, 13) - >>> dump_tokens("x = 3.14e159") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 3.14e159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3.14e159' (1, 4) (1, 12) + """) -String literals - - >>> dump_tokens("x = ''; y = \\\"\\\"") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_string(self): + # String literals + self.check_tokenize("x = ''; y = \"\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "''" (1, 4) (1, 6) @@ -238,8 +196,8 @@ NAME 'y' (1, 8) (1, 9) OP '=' (1, 10) (1, 11) STRING '""' (1, 12) (1, 14) - >>> dump_tokens("x = '\\\"'; y = \\\"'\\\"") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = '\"'; y = \"'\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING '\\'"\\'' (1, 4) (1, 7) @@ -247,29 +205,29 @@ NAME 'y' (1, 9) (1, 10) OP '=' (1, 11) (1, 12) STRING '"\\'"' (1, 13) (1, 16) - >>> dump_tokens("x = \\\"doesn't \\\"shrink\\\", does it\\\"") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = \"doesn't \"shrink\", does it\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING '"doesn\\'t "' (1, 4) (1, 14) NAME 'shrink' (1, 14) (1, 20) STRING '", does it"' (1, 20) (1, 31) - >>> dump_tokens("x = 'abc' + 'ABC'") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 'abc' + 'ABC'", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "'abc'" (1, 4) (1, 9) OP '+' (1, 10) (1, 11) STRING "'ABC'" (1, 12) (1, 17) - >>> dump_tokens('y = "ABC" + "ABC"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('y = "ABC" + "ABC"', """\ NAME 'y' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING '"ABC"' (1, 4) (1, 9) OP '+' (1, 10) (1, 11) STRING '"ABC"' (1, 12) (1, 17) - >>> dump_tokens("x = r'abc' + r'ABC' + R'ABC' + R'ABC'") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = r'abc' + r'ABC' + R'ABC' + R'ABC'", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "r'abc'" (1, 4) (1, 10) @@ -279,8 +237,8 @@ STRING "R'ABC'" (1, 22) (1, 28) OP '+' (1, 29) (1, 30) STRING "R'ABC'" (1, 31) (1, 37) - >>> dump_tokens('y = r"abc" + r"ABC" + R"ABC" + R"ABC"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('y = r"abc" + r"ABC" + R"ABC" + R"ABC"', """\ NAME 'y' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING 'r"abc"' (1, 4) (1, 10) @@ -290,30 +248,30 @@ STRING 'R"ABC"' (1, 22) (1, 28) OP '+' (1, 29) (1, 30) STRING 'R"ABC"' (1, 31) (1, 37) + """) - >>> dump_tokens("u'abc' + U'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("u'abc' + U'abc'", """\ STRING "u'abc'" (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING "U'abc'" (1, 9) (1, 15) - >>> dump_tokens('u"abc" + U"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('u"abc" + U"abc"', """\ STRING 'u"abc"' (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING 'U"abc"' (1, 9) (1, 15) + """) - >>> dump_tokens("b'abc' + B'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("b'abc' + B'abc'", """\ STRING "b'abc'" (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING "B'abc'" (1, 9) (1, 15) - >>> dump_tokens('b"abc" + B"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('b"abc" + B"abc"', """\ STRING 'b"abc"' (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING 'B"abc"' (1, 9) (1, 15) - >>> dump_tokens("br'abc' + bR'abc' + Br'abc' + BR'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("br'abc' + bR'abc' + Br'abc' + BR'abc'", """\ STRING "br'abc'" (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING "bR'abc'" (1, 10) (1, 17) @@ -321,8 +279,8 @@ STRING "Br'abc'" (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING "BR'abc'" (1, 30) (1, 37) - >>> dump_tokens('br"abc" + bR"abc" + Br"abc" + BR"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('br"abc" + bR"abc" + Br"abc" + BR"abc"', """\ STRING 'br"abc"' (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING 'bR"abc"' (1, 10) (1, 17) @@ -330,8 +288,8 @@ STRING 'Br"abc"' (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING 'BR"abc"' (1, 30) (1, 37) - >>> dump_tokens("rb'abc' + rB'abc' + Rb'abc' + RB'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("rb'abc' + rB'abc' + Rb'abc' + RB'abc'", """\ STRING "rb'abc'" (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING "rB'abc'" (1, 10) (1, 17) @@ -339,8 +297,8 @@ STRING "Rb'abc'" (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING "RB'abc'" (1, 30) (1, 37) - >>> dump_tokens('rb"abc" + rB"abc" + Rb"abc" + RB"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('rb"abc" + rB"abc" + Rb"abc" + RB"abc"', """\ STRING 'rb"abc"' (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING 'rB"abc"' (1, 10) (1, 17) @@ -348,11 +306,10 @@ STRING 'Rb"abc"' (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING 'RB"abc"' (1, 30) (1, 37) + """) -Operators - - >>> dump_tokens("def d22(a, b, c=2, d=2, *k): pass") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_function(self): + self.check_tokenize("def d22(a, b, c=2, d=2, *k): pass", """\ NAME 'def' (1, 0) (1, 3) NAME 'd22' (1, 4) (1, 7) OP '(' (1, 7) (1, 8) @@ -373,8 +330,8 @@ OP ')' (1, 26) (1, 27) OP ':' (1, 27) (1, 28) NAME 'pass' (1, 29) (1, 33) - >>> dump_tokens("def d01v_(a=1, *k, **w): pass") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("def d01v_(a=1, *k, **w): pass", """\ NAME 'def' (1, 0) (1, 3) NAME 'd01v_' (1, 4) (1, 9) OP '(' (1, 9) (1, 10) @@ -390,12 +347,12 @@ OP ')' (1, 22) (1, 23) OP ':' (1, 23) (1, 24) NAME 'pass' (1, 25) (1, 29) + """) -Comparison - - >>> dump_tokens("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " + - ... "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_comparison(self): + # Comparison + self.check_tokenize("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " + "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass", """\ NAME 'if' (1, 0) (1, 2) NUMBER '1' (1, 3) (1, 4) OP '<' (1, 5) (1, 6) @@ -428,11 +385,11 @@ NUMBER '1' (1, 81) (1, 82) OP ':' (1, 82) (1, 83) NAME 'pass' (1, 84) (1, 88) + """) -Shift - - >>> dump_tokens("x = 1 << 1 >> 5") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_shift(self): + # Shift + self.check_tokenize("x = 1 << 1 >> 5", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -440,11 +397,11 @@ NUMBER '1' (1, 9) (1, 10) OP '>>' (1, 11) (1, 13) NUMBER '5' (1, 14) (1, 15) + """) -Additive - - >>> dump_tokens("x = 1 - y + 15 - 1 + 0x124 + z + a[5]") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_additive(self): + # Additive + self.check_tokenize("x = 1 - y + 15 - 1 + 0x124 + z + a[5]", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -463,11 +420,11 @@ OP '[' (1, 34) (1, 35) NUMBER '5' (1, 35) (1, 36) OP ']' (1, 36) (1, 37) + """) -Multiplicative - - >>> dump_tokens("x = 1//1*1/5*12%0x12 at 42") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_multiplicative(self): + # Multiplicative + self.check_tokenize("x = 1//1*1/5*12%0x12 at 42", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -483,11 +440,11 @@ NUMBER '0x12' (1, 16) (1, 20) OP '@' (1, 20) (1, 21) NUMBER '42' (1, 21) (1, 23) + """) -Unary - - >>> dump_tokens("~1 ^ 1 & 1 |1 ^ -1") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_unary(self): + # Unary + self.check_tokenize("~1 ^ 1 & 1 |1 ^ -1", """\ OP '~' (1, 0) (1, 1) NUMBER '1' (1, 1) (1, 2) OP '^' (1, 3) (1, 4) @@ -499,8 +456,8 @@ OP '^' (1, 14) (1, 15) OP '-' (1, 16) (1, 17) NUMBER '1' (1, 17) (1, 18) - >>> dump_tokens("-1*1/1+1*1//1 - ---1**1") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("-1*1/1+1*1//1 - ---1**1", """\ OP '-' (1, 0) (1, 1) NUMBER '1' (1, 1) (1, 2) OP '*' (1, 2) (1, 3) @@ -520,11 +477,11 @@ NUMBER '1' (1, 19) (1, 20) OP '**' (1, 20) (1, 22) NUMBER '1' (1, 22) (1, 23) + """) -Selector - - >>> dump_tokens("import sys, time\\nx = sys.modules['time'].time()") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_selector(self): + # Selector + self.check_tokenize("import sys, time\nx = sys.modules['time'].time()", """\ NAME 'import' (1, 0) (1, 6) NAME 'sys' (1, 7) (1, 10) OP ',' (1, 10) (1, 11) @@ -542,11 +499,11 @@ NAME 'time' (2, 24) (2, 28) OP '(' (2, 28) (2, 29) OP ')' (2, 29) (2, 30) + """) -Methods - - >>> dump_tokens("@staticmethod\\ndef foo(x,y): pass") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_method(self): + # Methods + self.check_tokenize("@staticmethod\ndef foo(x,y): pass", """\ OP '@' (1, 0) (1, 1) NAME 'staticmethod (1, 1) (1, 13) NEWLINE '\\n' (1, 13) (1, 14) @@ -559,52 +516,13 @@ OP ')' (2, 11) (2, 12) OP ':' (2, 12) (2, 13) NAME 'pass' (2, 14) (2, 18) + """) -Backslash means line continuation, except for comments - - >>> roundtrip("x=1+\\\\n" - ... "1\\n" - ... "# This is a comment\\\\n" - ... "# This also\\n") - True - >>> roundtrip("# Comment \\\\nx = 0") - True - -Two string literals on the same line - - >>> roundtrip("'' ''") - True - -Test roundtrip on random python modules. -pass the '-ucpu' option to process the full directory. - - >>> import random - >>> tempdir = os.path.dirname(f) or os.curdir - >>> testfiles = glob.glob(os.path.join(tempdir, "test*.py")) - -Tokenize is broken on test_pep3131.py because regular expressions are -broken on the obscure unicode identifiers in it. *sigh* -With roundtrip extended to test the 5-tuple mode of untokenize, -7 more testfiles fail. Remove them also until the failure is diagnosed. - - >>> testfiles.remove(os.path.join(tempdir, "test_pep3131.py")) - >>> for f in ('buffer', 'builtin', 'fileio', 'inspect', 'os', 'platform', 'sys'): - ... testfiles.remove(os.path.join(tempdir, "test_%s.py") % f) - ... - >>> if not support.is_resource_enabled("cpu"): - ... testfiles = random.sample(testfiles, 10) - ... - >>> for testfile in testfiles: - ... if not roundtrip(open(testfile, 'rb')): - ... print("Roundtrip failed for file %s" % testfile) - ... break - ... else: True - True - -Evil tabs - - >>> dump_tokens("def f():\\n\\tif x\\n \\tpass") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_tabs(self): + # Evil tabs + self.check_tokenize("def f():\n" + "\tif x\n" + " \tpass", """\ NAME 'def' (1, 0) (1, 3) NAME 'f' (1, 4) (1, 5) OP '(' (1, 5) (1, 6) @@ -619,11 +537,11 @@ NAME 'pass' (3, 9) (3, 13) DEDENT '' (4, 0) (4, 0) DEDENT '' (4, 0) (4, 0) + """) -Non-ascii identifiers - - >>> dump_tokens("?rter = 'places'\\ngr?n = 'green'") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_non_ascii_identifiers(self): + # Non-ascii identifiers + self.check_tokenize("?rter = 'places'\ngr?n = 'green'", """\ NAME '?rter' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) STRING "'places'" (1, 8) (1, 16) @@ -631,11 +549,11 @@ NAME 'gr?n' (2, 0) (2, 4) OP '=' (2, 5) (2, 6) STRING "'green'" (2, 7) (2, 14) + """) -Legacy unicode literals: - - >>> dump_tokens("?rter = u'places'\\ngr?n = U'green'") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_unicode(self): + # Legacy unicode literals: + self.check_tokenize("?rter = u'places'\ngr?n = U'green'", """\ NAME '?rter' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) STRING "u'places'" (1, 8) (1, 17) @@ -643,17 +561,17 @@ NAME 'gr?n' (2, 0) (2, 4) OP '=' (2, 5) (2, 6) STRING "U'green'" (2, 7) (2, 15) + """) -Async/await extension: - - >>> dump_tokens("async = 1") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_async(self): + # Async/await extension: + self.check_tokenize("async = 1", """\ NAME 'async' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) NUMBER '1' (1, 8) (1, 9) + """) - >>> dump_tokens("a = (async = 1)") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("a = (async = 1)", """\ NAME 'a' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) OP '(' (1, 4) (1, 5) @@ -661,15 +579,15 @@ OP '=' (1, 11) (1, 12) NUMBER '1' (1, 13) (1, 14) OP ')' (1, 14) (1, 15) + """) - >>> dump_tokens("async()") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async()", """\ NAME 'async' (1, 0) (1, 5) OP '(' (1, 5) (1, 6) OP ')' (1, 6) (1, 7) + """) - >>> dump_tokens("class async(Bar):pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("class async(Bar):pass", """\ NAME 'class' (1, 0) (1, 5) NAME 'async' (1, 6) (1, 11) OP '(' (1, 11) (1, 12) @@ -677,28 +595,28 @@ OP ')' (1, 15) (1, 16) OP ':' (1, 16) (1, 17) NAME 'pass' (1, 17) (1, 21) + """) - >>> dump_tokens("class async:pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("class async:pass", """\ NAME 'class' (1, 0) (1, 5) NAME 'async' (1, 6) (1, 11) OP ':' (1, 11) (1, 12) NAME 'pass' (1, 12) (1, 16) + """) - >>> dump_tokens("await = 1") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("await = 1", """\ NAME 'await' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) NUMBER '1' (1, 8) (1, 9) + """) - >>> dump_tokens("foo.async") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("foo.async", """\ NAME 'foo' (1, 0) (1, 3) OP '.' (1, 3) (1, 4) NAME 'async' (1, 4) (1, 9) + """) - >>> dump_tokens("async for a in b: pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async for a in b: pass", """\ NAME 'async' (1, 0) (1, 5) NAME 'for' (1, 6) (1, 9) NAME 'a' (1, 10) (1, 11) @@ -706,9 +624,9 @@ NAME 'b' (1, 15) (1, 16) OP ':' (1, 16) (1, 17) NAME 'pass' (1, 18) (1, 22) + """) - >>> dump_tokens("async with a as b: pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async with a as b: pass", """\ NAME 'async' (1, 0) (1, 5) NAME 'with' (1, 6) (1, 10) NAME 'a' (1, 11) (1, 12) @@ -716,49 +634,49 @@ NAME 'b' (1, 16) (1, 17) OP ':' (1, 17) (1, 18) NAME 'pass' (1, 19) (1, 23) + """) - >>> dump_tokens("async.foo") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async.foo", """\ NAME 'async' (1, 0) (1, 5) OP '.' (1, 5) (1, 6) NAME 'foo' (1, 6) (1, 9) + """) - >>> dump_tokens("async") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async", """\ NAME 'async' (1, 0) (1, 5) + """) - >>> dump_tokens("async\\n#comment\\nawait") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async\n#comment\nawait", """\ NAME 'async' (1, 0) (1, 5) NEWLINE '\\n' (1, 5) (1, 6) COMMENT '#comment' (2, 0) (2, 8) NL '\\n' (2, 8) (2, 9) NAME 'await' (3, 0) (3, 5) + """) - >>> dump_tokens("async\\n...\\nawait") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async\n...\nawait", """\ NAME 'async' (1, 0) (1, 5) NEWLINE '\\n' (1, 5) (1, 6) OP '...' (2, 0) (2, 3) NEWLINE '\\n' (2, 3) (2, 4) NAME 'await' (3, 0) (3, 5) + """) - >>> dump_tokens("async\\nawait") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async\nawait", """\ NAME 'async' (1, 0) (1, 5) NEWLINE '\\n' (1, 5) (1, 6) NAME 'await' (2, 0) (2, 5) + """) - >>> dump_tokens("foo.async + 1") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("foo.async + 1", """\ NAME 'foo' (1, 0) (1, 3) OP '.' (1, 3) (1, 4) NAME 'async' (1, 4) (1, 9) OP '+' (1, 10) (1, 11) NUMBER '1' (1, 12) (1, 13) + """) - >>> dump_tokens("async def foo(): pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async def foo(): pass", """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'foo' (1, 10) (1, 13) @@ -766,15 +684,16 @@ OP ')' (1, 14) (1, 15) OP ':' (1, 15) (1, 16) NAME 'pass' (1, 17) (1, 21) + """) - >>> dump_tokens('''async def foo(): - ... def foo(await): - ... await = 1 - ... if 1: - ... await - ... async += 1 - ... ''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''\ +async def foo(): + def foo(await): + await = 1 + if 1: + await +async += 1 +''', """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'foo' (1, 10) (1, 13) @@ -809,10 +728,11 @@ OP '+=' (6, 6) (6, 8) NUMBER '1' (6, 9) (6, 10) NEWLINE '\\n' (6, 10) (6, 11) + """) - >>> dump_tokens('''async def foo(): - ... async for i in 1: pass''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''\ +async def foo(): + async for i in 1: pass''', """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'foo' (1, 10) (1, 13) @@ -829,9 +749,9 @@ OP ':' (2, 18) (2, 19) NAME 'pass' (2, 20) (2, 24) DEDENT '' (3, 0) (3, 0) + """) - >>> dump_tokens('''async def foo(async): await''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''async def foo(async): await''', """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'foo' (1, 10) (1, 13) @@ -840,14 +760,15 @@ OP ')' (1, 19) (1, 20) OP ':' (1, 20) (1, 21) AWAIT 'await' (1, 22) (1, 27) + """) - >>> dump_tokens('''def f(): - ... - ... def baz(): pass - ... async def bar(): pass - ... - ... await = 2''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''\ +def f(): + + def baz(): pass + async def bar(): pass + + await = 2''', """\ NAME 'def' (1, 0) (1, 3) NAME 'f' (1, 4) (1, 5) OP '(' (1, 5) (1, 6) @@ -876,14 +797,15 @@ OP '=' (6, 8) (6, 9) NUMBER '2' (6, 10) (6, 11) DEDENT '' (7, 0) (7, 0) + """) - >>> dump_tokens('''async def f(): - ... - ... def baz(): pass - ... async def bar(): pass - ... - ... await = 2''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''\ +async def f(): + + def baz(): pass + async def bar(): pass + + await = 2''', """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'f' (1, 10) (1, 11) @@ -913,89 +835,10 @@ OP '=' (6, 8) (6, 9) NUMBER '2' (6, 10) (6, 11) DEDENT '' (7, 0) (7, 0) -""" + """) -from test import support -from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP, - STRING, ENDMARKER, ENCODING, tok_name, detect_encoding, - open as tokenize_open, Untokenizer) -from io import BytesIO -from unittest import TestCase, mock -import os -import token -def dump_tokens(s): - """Print out the tokens in s in a table format. - - The ENDMARKER is omitted. - """ - f = BytesIO(s.encode('utf-8')) - for type, token, start, end, line in tokenize(f.readline): - if type == ENDMARKER: - break - type = tok_name[type] - print("%(type)-10.10s %(token)-13.13r %(start)s %(end)s" % locals()) - -def roundtrip(f): - """ - Test roundtrip for `untokenize`. `f` is an open file or a string. - The source code in f is tokenized to both 5- and 2-tuples. - Both sequences are converted back to source code via - tokenize.untokenize(), and the latter tokenized again to 2-tuples. - The test fails if the 3 pair tokenizations do not match. - - When untokenize bugs are fixed, untokenize with 5-tuples should - reproduce code that does not contain a backslash continuation - following spaces. A proper test should test this. - - This function would be more useful for correcting bugs if it reported - the first point of failure, like assertEqual, rather than just - returning False -- or if it were only used in unittests and not - doctest and actually used assertEqual. - """ - # Get source code and original tokenizations - if isinstance(f, str): - code = f.encode('utf-8') - else: - code = f.read() - f.close() - readline = iter(code.splitlines(keepends=True)).__next__ - tokens5 = list(tokenize(readline)) - tokens2 = [tok[:2] for tok in tokens5] - # Reproduce tokens2 from pairs - bytes_from2 = untokenize(tokens2) - readline2 = iter(bytes_from2.splitlines(keepends=True)).__next__ - tokens2_from2 = [tok[:2] for tok in tokenize(readline2)] - # Reproduce tokens2 from 5-tuples - bytes_from5 = untokenize(tokens5) - readline5 = iter(bytes_from5.splitlines(keepends=True)).__next__ - tokens2_from5 = [tok[:2] for tok in tokenize(readline5)] - # Compare 3 versions - return tokens2 == tokens2_from2 == tokens2_from5 - -# This is an example from the docs, set up as a doctest. def decistmt(s): - """Substitute Decimals for floats in a string of statements. - - >>> from decimal import Decimal - >>> s = 'print(+21.3e-5*-.1234/81.7)' - >>> decistmt(s) - "print (+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7'))" - - The format of the exponent is inherited from the platform C library. - Known cases are "e-007" (Windows) and "e-07" (not Windows). Since - we're only showing 11 digits, and the 12th isn't close to 5, the - rest of the output should be platform-independent. - - >>> exec(s) #doctest: +ELLIPSIS - -3.2171603427...e-0...7 - - Output from calculations with Decimal should be identical across all - platforms. - - >>> exec(decistmt(s)) - -3.217160342717258261933904529E-7 - """ result = [] g = tokenize(BytesIO(s.encode('utf-8')).readline) # tokenize the string for toknum, tokval, _, _, _ in g: @@ -1010,6 +853,28 @@ result.append((toknum, tokval)) return untokenize(result).decode('utf-8') +class TestMisc(TestCase): + + def test_decistmt(self): + # Substitute Decimals for floats in a string of statements. + # This is an example from the docs. + + from decimal import Decimal + s = '+21.3e-5*-.1234/81.7' + self.assertEqual(decistmt(s), + "+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')") + + # The format of the exponent is inherited from the platform C library. + # Known cases are "e-007" (Windows) and "e-07" (not Windows). Since + # we're only showing 11 digits, and the 12th isn't close to 5, the + # rest of the output should be platform-independent. + self.assertRegex(repr(eval(s)), '-3.2171603427[0-9]*e-0+7') + + # Output from calculations with Decimal should be identical across all + # platforms. + self.assertEqual(eval(decistmt(s)), + Decimal('-3.217160342717258261933904529E-7')) + class TestTokenizerAdheresToPep0263(TestCase): """ @@ -1018,11 +883,11 @@ def _testFile(self, filename): path = os.path.join(os.path.dirname(__file__), filename) - return roundtrip(open(path, 'rb')) + TestRoundtrip.check_roundtrip(self, open(path, 'rb')) def test_utf8_coding_cookie_and_no_utf8_bom(self): f = 'tokenize_tests-utf8-coding-cookie-and-no-utf8-bom-sig.txt' - self.assertTrue(self._testFile(f)) + self._testFile(f) def test_latin1_coding_cookie_and_utf8_bom(self): """ @@ -1037,11 +902,11 @@ def test_no_coding_cookie_and_utf8_bom(self): f = 'tokenize_tests-no-coding-cookie-and-utf8-bom-sig-only.txt' - self.assertTrue(self._testFile(f)) + self._testFile(f) def test_utf8_coding_cookie_and_utf8_bom(self): f = 'tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt' - self.assertTrue(self._testFile(f)) + self._testFile(f) def test_bad_coding_cookie(self): self.assertRaises(SyntaxError, self._testFile, 'bad_coding.py') @@ -1340,7 +1205,6 @@ self.assertTrue(m.closed) - class TestTokenize(TestCase): def test_tokenize(self): @@ -1472,6 +1336,7 @@ # See http://bugs.python.org/issue16152 self.assertExactTypeEqual('@ ', token.AT) + class UntokenizeTest(TestCase): def test_bad_input_order(self): @@ -1497,7 +1362,7 @@ u.prev_row = 2 u.add_whitespace((4, 4)) self.assertEqual(u.tokens, ['\\\n', '\\\n\\\n', ' ']) - self.assertTrue(roundtrip('a\n b\n c\n \\\n c\n')) + TestRoundtrip.check_roundtrip(self, 'a\n b\n c\n \\\n c\n') def test_iter_compat(self): u = Untokenizer() @@ -1514,6 +1379,131 @@ class TestRoundtrip(TestCase): + + def check_roundtrip(self, f): + """ + Test roundtrip for `untokenize`. `f` is an open file or a string. + The source code in f is tokenized to both 5- and 2-tuples. + Both sequences are converted back to source code via + tokenize.untokenize(), and the latter tokenized again to 2-tuples. + The test fails if the 3 pair tokenizations do not match. + + When untokenize bugs are fixed, untokenize with 5-tuples should + reproduce code that does not contain a backslash continuation + following spaces. A proper test should test this. + """ + # Get source code and original tokenizations + if isinstance(f, str): + code = f.encode('utf-8') + else: + code = f.read() + f.close() + readline = iter(code.splitlines(keepends=True)).__next__ + tokens5 = list(tokenize(readline)) + tokens2 = [tok[:2] for tok in tokens5] + # Reproduce tokens2 from pairs + bytes_from2 = untokenize(tokens2) + readline2 = iter(bytes_from2.splitlines(keepends=True)).__next__ + tokens2_from2 = [tok[:2] for tok in tokenize(readline2)] + self.assertEqual(tokens2_from2, tokens2) + # Reproduce tokens2 from 5-tuples + bytes_from5 = untokenize(tokens5) + readline5 = iter(bytes_from5.splitlines(keepends=True)).__next__ + tokens2_from5 = [tok[:2] for tok in tokenize(readline5)] + self.assertEqual(tokens2_from5, tokens2) + + def test_roundtrip(self): + # There are some standard formatting practices that are easy to get right. + + self.check_roundtrip("if x == 1:\n" + " print(x)\n") + self.check_roundtrip("# This is a comment\n" + "# This also") + + # Some people use different formatting conventions, which makes + # untokenize a little trickier. Note that this test involves trailing + # whitespace after the colon. Note that we use hex escapes to make the + # two trailing blanks apparent in the expected output. + + self.check_roundtrip("if x == 1 : \n" + " print(x)\n") + fn = support.findfile("tokenize_tests.txt") + with open(fn, 'rb') as f: + self.check_roundtrip(f) + self.check_roundtrip("if x == 1:\n" + " # A comment by itself.\n" + " print(x) # Comment here, too.\n" + " # Another comment.\n" + "after_if = True\n") + self.check_roundtrip("if (x # The comments need to go in the right place\n" + " == 1):\n" + " print('x==1')\n") + self.check_roundtrip("class Test: # A comment here\n" + " # A comment with weird indent\n" + " after_com = 5\n" + " def x(m): return m*5 # a one liner\n" + " def y(m): # A whitespace after the colon\n" + " return y*4 # 3-space indent\n") + + # Some error-handling code + self.check_roundtrip("try: import somemodule\n" + "except ImportError: # comment\n" + " print('Can not import' # comment2\n)" + "else: print('Loaded')\n") + + def test_continuation(self): + # Balancing continuation + self.check_roundtrip("a = (3,4, \n" + "5,6)\n" + "y = [3, 4,\n" + "5]\n" + "z = {'a': 5,\n" + "'b':15, 'c':True}\n" + "x = len(y) + 5 - a[\n" + "3] - a[2]\n" + "+ len(z) - z[\n" + "'b']\n") + + def test_backslash_continuation(self): + # Backslash means line continuation, except for comments + self.check_roundtrip("x=1+\\\n" + "1\n" + "# This is a comment\\\n" + "# This also\n") + self.check_roundtrip("# Comment \\\n" + "x = 0") + + def test_string_concatenation(self): + # Two string literals on the same line + self.check_roundtrip("'' ''") + + def test_random_files(self): + # Test roundtrip on random python modules. + # pass the '-ucpu' option to process the full directory. + + import glob, random + fn = support.findfile("tokenize_tests.txt") + tempdir = os.path.dirname(fn) or os.curdir + testfiles = glob.glob(os.path.join(tempdir, "test*.py")) + + # Tokenize is broken on test_pep3131.py because regular expressions are + # broken on the obscure unicode identifiers in it. *sigh* + # With roundtrip extended to test the 5-tuple mode of untokenize, + # 7 more testfiles fail. Remove them also until the failure is diagnosed. + + testfiles.remove(os.path.join(tempdir, "test_pep3131.py")) + for f in ('buffer', 'builtin', 'fileio', 'inspect', 'os', 'platform', 'sys'): + testfiles.remove(os.path.join(tempdir, "test_%s.py") % f) + + if not support.is_resource_enabled("cpu"): + testfiles = random.sample(testfiles, 10) + + for testfile in testfiles: + with open(testfile, 'rb') as f: + with self.subTest(file=testfile): + self.check_roundtrip(f) + + def roundtrip(self, code): if isinstance(code, str): code = code.encode('utf-8') @@ -1527,19 +1517,8 @@ code = "if False:\n\tx=3\n\tx=3\n" codelines = self.roundtrip(code).split('\n') self.assertEqual(codelines[1], codelines[2]) + self.check_roundtrip(code) -__test__ = {"doctests" : doctests, 'decistmt': decistmt} - -def test_main(): - from test import test_tokenize - support.run_doctest(test_tokenize, True) - support.run_unittest(TestTokenizerAdheresToPep0263) - support.run_unittest(Test_Tokenize) - support.run_unittest(TestDetectEncoding) - support.run_unittest(TestTokenize) - support.run_unittest(UntokenizeTest) - support.run_unittest(TestRoundtrip) - if __name__ == "__main__": - test_main() + unittest.main() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 17:54:19 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 06 Oct 2015 15:54:19 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325317=3A_Converted_doctests_in_test=5Ftokenize_to_uni?= =?utf-8?q?ttests=2E?= Message-ID: <20151006155416.2685.27346@psf.io> https://hg.python.org/cpython/rev/bff40616d2a5 changeset: 98557:bff40616d2a5 branch: 3.5 parent: 98550:3719e842a7b1 parent: 98556:d272f3cbae05 user: Serhiy Storchaka date: Tue Oct 06 18:23:12 2015 +0300 summary: Issue #25317: Converted doctests in test_tokenize to unittests. Made test_tokenize discoverable. files: Lib/test/test_tokenize.py | 811 ++++++++++++------------- 1 files changed, 395 insertions(+), 416 deletions(-) diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -1,22 +1,44 @@ -doctests = """ -Tests for the tokenize module. +from test import support +from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP, + STRING, ENDMARKER, ENCODING, tok_name, detect_encoding, + open as tokenize_open, Untokenizer) +from io import BytesIO +from unittest import TestCase, mock +import os +import token -The tests can be really simple. Given a small fragment of source -code, print out a table with tokens. The ENDMARKER is omitted for -brevity. - >>> import glob +class TokenizeTest(TestCase): + # Tests for the tokenize module. - >>> dump_tokens("1 + 1") - ENCODING 'utf-8' (0, 0) (0, 0) + # The tests can be really simple. Given a small fragment of source + # code, print out a table with tokens. The ENDMARKER is omitted for + # brevity. + + def check_tokenize(self, s, expected): + # Format the tokens in s in a table format. + # The ENDMARKER is omitted. + result = [] + f = BytesIO(s.encode('utf-8')) + for type, token, start, end, line in tokenize(f.readline): + if type == ENDMARKER: + break + type = tok_name[type] + result.append(" %(type)-10.10s %(token)-13.13r %(start)s %(end)s" % + locals()) + self.assertEqual(result, + [" ENCODING 'utf-8' (0, 0) (0, 0)"] + + expected.rstrip().splitlines()) + + def test_basic(self): + self.check_tokenize("1 + 1", """\ NUMBER '1' (1, 0) (1, 1) OP '+' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) - - >>> dump_tokens("if False:\\n" - ... " # NL\\n" - ... " True = False # NEWLINE\\n") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("if False:\n" + " # NL\n" + " True = False # NEWLINE\n", """\ NAME 'if' (1, 0) (1, 2) NAME 'False' (1, 3) (1, 8) OP ':' (1, 8) (1, 9) @@ -30,112 +52,48 @@ COMMENT '# NEWLINE' (3, 17) (3, 26) NEWLINE '\\n' (3, 26) (3, 27) DEDENT '' (4, 0) (4, 0) + """) + indent_error_file = b"""\ +def k(x): + x += 2 + x += 5 +""" + readline = BytesIO(indent_error_file).readline + with self.assertRaisesRegex(IndentationError, + "unindent does not match any " + "outer indentation level"): + for tok in tokenize(readline): + pass - >>> indent_error_file = \""" - ... def k(x): - ... x += 2 - ... x += 5 - ... \""" - >>> readline = BytesIO(indent_error_file.encode('utf-8')).readline - >>> for tok in tokenize(readline): pass - Traceback (most recent call last): - ... - IndentationError: unindent does not match any outer indentation level - -There are some standard formatting practices that are easy to get right. - - >>> roundtrip("if x == 1:\\n" - ... " print(x)\\n") - True - - >>> roundtrip("# This is a comment\\n# This also") - True - -Some people use different formatting conventions, which makes -untokenize a little trickier. Note that this test involves trailing -whitespace after the colon. Note that we use hex escapes to make the -two trailing blanks apparent in the expected output. - - >>> roundtrip("if x == 1 : \\n" - ... " print(x)\\n") - True - - >>> f = support.findfile("tokenize_tests.txt") - >>> roundtrip(open(f, 'rb')) - True - - >>> roundtrip("if x == 1:\\n" - ... " # A comment by itself.\\n" - ... " print(x) # Comment here, too.\\n" - ... " # Another comment.\\n" - ... "after_if = True\\n") - True - - >>> roundtrip("if (x # The comments need to go in the right place\\n" - ... " == 1):\\n" - ... " print('x==1')\\n") - True - - >>> roundtrip("class Test: # A comment here\\n" - ... " # A comment with weird indent\\n" - ... " after_com = 5\\n" - ... " def x(m): return m*5 # a one liner\\n" - ... " def y(m): # A whitespace after the colon\\n" - ... " return y*4 # 3-space indent\\n") - True - -Some error-handling code - - >>> roundtrip("try: import somemodule\\n" - ... "except ImportError: # comment\\n" - ... " print('Can not import' # comment2\\n)" - ... "else: print('Loaded')\\n") - True - -Balancing continuation - - >>> roundtrip("a = (3,4, \\n" - ... "5,6)\\n" - ... "y = [3, 4,\\n" - ... "5]\\n" - ... "z = {'a': 5,\\n" - ... "'b':15, 'c':True}\\n" - ... "x = len(y) + 5 - a[\\n" - ... "3] - a[2]\\n" - ... "+ len(z) - z[\\n" - ... "'b']\\n") - True - -Ordinary integers and binary operators - - >>> dump_tokens("0xff <= 255") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_int(self): + # Ordinary integers and binary operators + self.check_tokenize("0xff <= 255", """\ NUMBER '0xff' (1, 0) (1, 4) OP '<=' (1, 5) (1, 7) NUMBER '255' (1, 8) (1, 11) - >>> dump_tokens("0b10 <= 255") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0b10 <= 255", """\ NUMBER '0b10' (1, 0) (1, 4) OP '<=' (1, 5) (1, 7) NUMBER '255' (1, 8) (1, 11) - >>> dump_tokens("0o123 <= 0O123") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0o123 <= 0O123", """\ NUMBER '0o123' (1, 0) (1, 5) OP '<=' (1, 6) (1, 8) NUMBER '0O123' (1, 9) (1, 14) - >>> dump_tokens("1234567 > ~0x15") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("1234567 > ~0x15", """\ NUMBER '1234567' (1, 0) (1, 7) OP '>' (1, 8) (1, 9) OP '~' (1, 10) (1, 11) NUMBER '0x15' (1, 11) (1, 15) - >>> dump_tokens("2134568 != 1231515") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("2134568 != 1231515", """\ NUMBER '2134568' (1, 0) (1, 7) OP '!=' (1, 8) (1, 10) NUMBER '1231515' (1, 11) (1, 18) - >>> dump_tokens("(-124561-1) & 200000000") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("(-124561-1) & 200000000", """\ OP '(' (1, 0) (1, 1) OP '-' (1, 1) (1, 2) NUMBER '124561' (1, 2) (1, 8) @@ -144,93 +102,93 @@ OP ')' (1, 10) (1, 11) OP '&' (1, 12) (1, 13) NUMBER '200000000' (1, 14) (1, 23) - >>> dump_tokens("0xdeadbeef != -1") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0xdeadbeef != -1", """\ NUMBER '0xdeadbeef' (1, 0) (1, 10) OP '!=' (1, 11) (1, 13) OP '-' (1, 14) (1, 15) NUMBER '1' (1, 15) (1, 16) - >>> dump_tokens("0xdeadc0de & 12345") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0xdeadc0de & 12345", """\ NUMBER '0xdeadc0de' (1, 0) (1, 10) OP '&' (1, 11) (1, 12) NUMBER '12345' (1, 13) (1, 18) - >>> dump_tokens("0xFF & 0x15 | 1234") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("0xFF & 0x15 | 1234", """\ NUMBER '0xFF' (1, 0) (1, 4) OP '&' (1, 5) (1, 6) NUMBER '0x15' (1, 7) (1, 11) OP '|' (1, 12) (1, 13) NUMBER '1234' (1, 14) (1, 18) + """) -Long integers - - >>> dump_tokens("x = 0") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_long(self): + # Long integers + self.check_tokenize("x = 0", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '0' (1, 4) (1, 5) - >>> dump_tokens("x = 0xfffffffffff") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 0xfffffffffff", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '0xffffffffff (1, 4) (1, 17) - >>> dump_tokens("x = 123141242151251616110") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 123141242151251616110", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '123141242151 (1, 4) (1, 25) - >>> dump_tokens("x = -15921590215012591") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = -15921590215012591", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) OP '-' (1, 4) (1, 5) NUMBER '159215902150 (1, 5) (1, 22) + """) -Floating point numbers - - >>> dump_tokens("x = 3.14159") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_float(self): + # Floating point numbers + self.check_tokenize("x = 3.14159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3.14159' (1, 4) (1, 11) - >>> dump_tokens("x = 314159.") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 314159.", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '314159.' (1, 4) (1, 11) - >>> dump_tokens("x = .314159") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = .314159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '.314159' (1, 4) (1, 11) - >>> dump_tokens("x = 3e14159") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 3e14159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3e14159' (1, 4) (1, 11) - >>> dump_tokens("x = 3E123") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 3E123", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3E123' (1, 4) (1, 9) - >>> dump_tokens("x+y = 3e-1230") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x+y = 3e-1230", """\ NAME 'x' (1, 0) (1, 1) OP '+' (1, 1) (1, 2) NAME 'y' (1, 2) (1, 3) OP '=' (1, 4) (1, 5) NUMBER '3e-1230' (1, 6) (1, 13) - >>> dump_tokens("x = 3.14e159") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 3.14e159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3.14e159' (1, 4) (1, 12) + """) -String literals - - >>> dump_tokens("x = ''; y = \\\"\\\"") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_string(self): + # String literals + self.check_tokenize("x = ''; y = \"\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "''" (1, 4) (1, 6) @@ -238,8 +196,8 @@ NAME 'y' (1, 8) (1, 9) OP '=' (1, 10) (1, 11) STRING '""' (1, 12) (1, 14) - >>> dump_tokens("x = '\\\"'; y = \\\"'\\\"") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = '\"'; y = \"'\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING '\\'"\\'' (1, 4) (1, 7) @@ -247,29 +205,29 @@ NAME 'y' (1, 9) (1, 10) OP '=' (1, 11) (1, 12) STRING '"\\'"' (1, 13) (1, 16) - >>> dump_tokens("x = \\\"doesn't \\\"shrink\\\", does it\\\"") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = \"doesn't \"shrink\", does it\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING '"doesn\\'t "' (1, 4) (1, 14) NAME 'shrink' (1, 14) (1, 20) STRING '", does it"' (1, 20) (1, 31) - >>> dump_tokens("x = 'abc' + 'ABC'") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = 'abc' + 'ABC'", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "'abc'" (1, 4) (1, 9) OP '+' (1, 10) (1, 11) STRING "'ABC'" (1, 12) (1, 17) - >>> dump_tokens('y = "ABC" + "ABC"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('y = "ABC" + "ABC"', """\ NAME 'y' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING '"ABC"' (1, 4) (1, 9) OP '+' (1, 10) (1, 11) STRING '"ABC"' (1, 12) (1, 17) - >>> dump_tokens("x = r'abc' + r'ABC' + R'ABC' + R'ABC'") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("x = r'abc' + r'ABC' + R'ABC' + R'ABC'", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "r'abc'" (1, 4) (1, 10) @@ -279,8 +237,8 @@ STRING "R'ABC'" (1, 22) (1, 28) OP '+' (1, 29) (1, 30) STRING "R'ABC'" (1, 31) (1, 37) - >>> dump_tokens('y = r"abc" + r"ABC" + R"ABC" + R"ABC"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('y = r"abc" + r"ABC" + R"ABC" + R"ABC"', """\ NAME 'y' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING 'r"abc"' (1, 4) (1, 10) @@ -290,30 +248,30 @@ STRING 'R"ABC"' (1, 22) (1, 28) OP '+' (1, 29) (1, 30) STRING 'R"ABC"' (1, 31) (1, 37) + """) - >>> dump_tokens("u'abc' + U'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("u'abc' + U'abc'", """\ STRING "u'abc'" (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING "U'abc'" (1, 9) (1, 15) - >>> dump_tokens('u"abc" + U"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('u"abc" + U"abc"', """\ STRING 'u"abc"' (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING 'U"abc"' (1, 9) (1, 15) + """) - >>> dump_tokens("b'abc' + B'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("b'abc' + B'abc'", """\ STRING "b'abc'" (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING "B'abc'" (1, 9) (1, 15) - >>> dump_tokens('b"abc" + B"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('b"abc" + B"abc"', """\ STRING 'b"abc"' (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING 'B"abc"' (1, 9) (1, 15) - >>> dump_tokens("br'abc' + bR'abc' + Br'abc' + BR'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("br'abc' + bR'abc' + Br'abc' + BR'abc'", """\ STRING "br'abc'" (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING "bR'abc'" (1, 10) (1, 17) @@ -321,8 +279,8 @@ STRING "Br'abc'" (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING "BR'abc'" (1, 30) (1, 37) - >>> dump_tokens('br"abc" + bR"abc" + Br"abc" + BR"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('br"abc" + bR"abc" + Br"abc" + BR"abc"', """\ STRING 'br"abc"' (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING 'bR"abc"' (1, 10) (1, 17) @@ -330,8 +288,8 @@ STRING 'Br"abc"' (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING 'BR"abc"' (1, 30) (1, 37) - >>> dump_tokens("rb'abc' + rB'abc' + Rb'abc' + RB'abc'") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("rb'abc' + rB'abc' + Rb'abc' + RB'abc'", """\ STRING "rb'abc'" (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING "rB'abc'" (1, 10) (1, 17) @@ -339,8 +297,8 @@ STRING "Rb'abc'" (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING "RB'abc'" (1, 30) (1, 37) - >>> dump_tokens('rb"abc" + rB"abc" + Rb"abc" + RB"abc"') - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize('rb"abc" + rB"abc" + Rb"abc" + RB"abc"', """\ STRING 'rb"abc"' (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING 'rB"abc"' (1, 10) (1, 17) @@ -348,11 +306,10 @@ STRING 'Rb"abc"' (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING 'RB"abc"' (1, 30) (1, 37) + """) -Operators - - >>> dump_tokens("def d22(a, b, c=2, d=2, *k): pass") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_function(self): + self.check_tokenize("def d22(a, b, c=2, d=2, *k): pass", """\ NAME 'def' (1, 0) (1, 3) NAME 'd22' (1, 4) (1, 7) OP '(' (1, 7) (1, 8) @@ -373,8 +330,8 @@ OP ')' (1, 26) (1, 27) OP ':' (1, 27) (1, 28) NAME 'pass' (1, 29) (1, 33) - >>> dump_tokens("def d01v_(a=1, *k, **w): pass") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("def d01v_(a=1, *k, **w): pass", """\ NAME 'def' (1, 0) (1, 3) NAME 'd01v_' (1, 4) (1, 9) OP '(' (1, 9) (1, 10) @@ -390,12 +347,12 @@ OP ')' (1, 22) (1, 23) OP ':' (1, 23) (1, 24) NAME 'pass' (1, 25) (1, 29) + """) -Comparison - - >>> dump_tokens("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " + - ... "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_comparison(self): + # Comparison + self.check_tokenize("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " + "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass", """\ NAME 'if' (1, 0) (1, 2) NUMBER '1' (1, 3) (1, 4) OP '<' (1, 5) (1, 6) @@ -428,11 +385,11 @@ NUMBER '1' (1, 81) (1, 82) OP ':' (1, 82) (1, 83) NAME 'pass' (1, 84) (1, 88) + """) -Shift - - >>> dump_tokens("x = 1 << 1 >> 5") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_shift(self): + # Shift + self.check_tokenize("x = 1 << 1 >> 5", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -440,11 +397,11 @@ NUMBER '1' (1, 9) (1, 10) OP '>>' (1, 11) (1, 13) NUMBER '5' (1, 14) (1, 15) + """) -Additive - - >>> dump_tokens("x = 1 - y + 15 - 1 + 0x124 + z + a[5]") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_additive(self): + # Additive + self.check_tokenize("x = 1 - y + 15 - 1 + 0x124 + z + a[5]", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -463,11 +420,11 @@ OP '[' (1, 34) (1, 35) NUMBER '5' (1, 35) (1, 36) OP ']' (1, 36) (1, 37) + """) -Multiplicative - - >>> dump_tokens("x = 1//1*1/5*12%0x12 at 42") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_multiplicative(self): + # Multiplicative + self.check_tokenize("x = 1//1*1/5*12%0x12 at 42", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -483,11 +440,11 @@ NUMBER '0x12' (1, 16) (1, 20) OP '@' (1, 20) (1, 21) NUMBER '42' (1, 21) (1, 23) + """) -Unary - - >>> dump_tokens("~1 ^ 1 & 1 |1 ^ -1") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_unary(self): + # Unary + self.check_tokenize("~1 ^ 1 & 1 |1 ^ -1", """\ OP '~' (1, 0) (1, 1) NUMBER '1' (1, 1) (1, 2) OP '^' (1, 3) (1, 4) @@ -499,8 +456,8 @@ OP '^' (1, 14) (1, 15) OP '-' (1, 16) (1, 17) NUMBER '1' (1, 17) (1, 18) - >>> dump_tokens("-1*1/1+1*1//1 - ---1**1") - ENCODING 'utf-8' (0, 0) (0, 0) + """) + self.check_tokenize("-1*1/1+1*1//1 - ---1**1", """\ OP '-' (1, 0) (1, 1) NUMBER '1' (1, 1) (1, 2) OP '*' (1, 2) (1, 3) @@ -520,11 +477,11 @@ NUMBER '1' (1, 19) (1, 20) OP '**' (1, 20) (1, 22) NUMBER '1' (1, 22) (1, 23) + """) -Selector - - >>> dump_tokens("import sys, time\\nx = sys.modules['time'].time()") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_selector(self): + # Selector + self.check_tokenize("import sys, time\nx = sys.modules['time'].time()", """\ NAME 'import' (1, 0) (1, 6) NAME 'sys' (1, 7) (1, 10) OP ',' (1, 10) (1, 11) @@ -542,11 +499,11 @@ NAME 'time' (2, 24) (2, 28) OP '(' (2, 28) (2, 29) OP ')' (2, 29) (2, 30) + """) -Methods - - >>> dump_tokens("@staticmethod\\ndef foo(x,y): pass") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_method(self): + # Methods + self.check_tokenize("@staticmethod\ndef foo(x,y): pass", """\ OP '@' (1, 0) (1, 1) NAME 'staticmethod (1, 1) (1, 13) NEWLINE '\\n' (1, 13) (1, 14) @@ -559,52 +516,13 @@ OP ')' (2, 11) (2, 12) OP ':' (2, 12) (2, 13) NAME 'pass' (2, 14) (2, 18) + """) -Backslash means line continuation, except for comments - - >>> roundtrip("x=1+\\\\n" - ... "1\\n" - ... "# This is a comment\\\\n" - ... "# This also\\n") - True - >>> roundtrip("# Comment \\\\nx = 0") - True - -Two string literals on the same line - - >>> roundtrip("'' ''") - True - -Test roundtrip on random python modules. -pass the '-ucpu' option to process the full directory. - - >>> import random - >>> tempdir = os.path.dirname(f) or os.curdir - >>> testfiles = glob.glob(os.path.join(tempdir, "test*.py")) - -Tokenize is broken on test_pep3131.py because regular expressions are -broken on the obscure unicode identifiers in it. *sigh* -With roundtrip extended to test the 5-tuple mode of untokenize, -7 more testfiles fail. Remove them also until the failure is diagnosed. - - >>> testfiles.remove(os.path.join(tempdir, "test_pep3131.py")) - >>> for f in ('buffer', 'builtin', 'fileio', 'inspect', 'os', 'platform', 'sys'): - ... testfiles.remove(os.path.join(tempdir, "test_%s.py") % f) - ... - >>> if not support.is_resource_enabled("cpu"): - ... testfiles = random.sample(testfiles, 10) - ... - >>> for testfile in testfiles: - ... if not roundtrip(open(testfile, 'rb')): - ... print("Roundtrip failed for file %s" % testfile) - ... break - ... else: True - True - -Evil tabs - - >>> dump_tokens("def f():\\n\\tif x\\n \\tpass") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_tabs(self): + # Evil tabs + self.check_tokenize("def f():\n" + "\tif x\n" + " \tpass", """\ NAME 'def' (1, 0) (1, 3) NAME 'f' (1, 4) (1, 5) OP '(' (1, 5) (1, 6) @@ -619,11 +537,11 @@ NAME 'pass' (3, 9) (3, 13) DEDENT '' (4, 0) (4, 0) DEDENT '' (4, 0) (4, 0) + """) -Non-ascii identifiers - - >>> dump_tokens("?rter = 'places'\\ngr?n = 'green'") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_non_ascii_identifiers(self): + # Non-ascii identifiers + self.check_tokenize("?rter = 'places'\ngr?n = 'green'", """\ NAME '?rter' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) STRING "'places'" (1, 8) (1, 16) @@ -631,11 +549,11 @@ NAME 'gr?n' (2, 0) (2, 4) OP '=' (2, 5) (2, 6) STRING "'green'" (2, 7) (2, 14) + """) -Legacy unicode literals: - - >>> dump_tokens("?rter = u'places'\\ngr?n = U'green'") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_unicode(self): + # Legacy unicode literals: + self.check_tokenize("?rter = u'places'\ngr?n = U'green'", """\ NAME '?rter' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) STRING "u'places'" (1, 8) (1, 17) @@ -643,17 +561,17 @@ NAME 'gr?n' (2, 0) (2, 4) OP '=' (2, 5) (2, 6) STRING "U'green'" (2, 7) (2, 15) + """) -Async/await extension: - - >>> dump_tokens("async = 1") - ENCODING 'utf-8' (0, 0) (0, 0) + def test_async(self): + # Async/await extension: + self.check_tokenize("async = 1", """\ NAME 'async' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) NUMBER '1' (1, 8) (1, 9) + """) - >>> dump_tokens("a = (async = 1)") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("a = (async = 1)", """\ NAME 'a' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) OP '(' (1, 4) (1, 5) @@ -661,15 +579,15 @@ OP '=' (1, 11) (1, 12) NUMBER '1' (1, 13) (1, 14) OP ')' (1, 14) (1, 15) + """) - >>> dump_tokens("async()") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async()", """\ NAME 'async' (1, 0) (1, 5) OP '(' (1, 5) (1, 6) OP ')' (1, 6) (1, 7) + """) - >>> dump_tokens("class async(Bar):pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("class async(Bar):pass", """\ NAME 'class' (1, 0) (1, 5) NAME 'async' (1, 6) (1, 11) OP '(' (1, 11) (1, 12) @@ -677,28 +595,28 @@ OP ')' (1, 15) (1, 16) OP ':' (1, 16) (1, 17) NAME 'pass' (1, 17) (1, 21) + """) - >>> dump_tokens("class async:pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("class async:pass", """\ NAME 'class' (1, 0) (1, 5) NAME 'async' (1, 6) (1, 11) OP ':' (1, 11) (1, 12) NAME 'pass' (1, 12) (1, 16) + """) - >>> dump_tokens("await = 1") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("await = 1", """\ NAME 'await' (1, 0) (1, 5) OP '=' (1, 6) (1, 7) NUMBER '1' (1, 8) (1, 9) + """) - >>> dump_tokens("foo.async") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("foo.async", """\ NAME 'foo' (1, 0) (1, 3) OP '.' (1, 3) (1, 4) NAME 'async' (1, 4) (1, 9) + """) - >>> dump_tokens("async for a in b: pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async for a in b: pass", """\ NAME 'async' (1, 0) (1, 5) NAME 'for' (1, 6) (1, 9) NAME 'a' (1, 10) (1, 11) @@ -706,9 +624,9 @@ NAME 'b' (1, 15) (1, 16) OP ':' (1, 16) (1, 17) NAME 'pass' (1, 18) (1, 22) + """) - >>> dump_tokens("async with a as b: pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async with a as b: pass", """\ NAME 'async' (1, 0) (1, 5) NAME 'with' (1, 6) (1, 10) NAME 'a' (1, 11) (1, 12) @@ -716,49 +634,49 @@ NAME 'b' (1, 16) (1, 17) OP ':' (1, 17) (1, 18) NAME 'pass' (1, 19) (1, 23) + """) - >>> dump_tokens("async.foo") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async.foo", """\ NAME 'async' (1, 0) (1, 5) OP '.' (1, 5) (1, 6) NAME 'foo' (1, 6) (1, 9) + """) - >>> dump_tokens("async") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async", """\ NAME 'async' (1, 0) (1, 5) + """) - >>> dump_tokens("async\\n#comment\\nawait") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async\n#comment\nawait", """\ NAME 'async' (1, 0) (1, 5) NEWLINE '\\n' (1, 5) (1, 6) COMMENT '#comment' (2, 0) (2, 8) NL '\\n' (2, 8) (2, 9) NAME 'await' (3, 0) (3, 5) + """) - >>> dump_tokens("async\\n...\\nawait") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async\n...\nawait", """\ NAME 'async' (1, 0) (1, 5) NEWLINE '\\n' (1, 5) (1, 6) OP '...' (2, 0) (2, 3) NEWLINE '\\n' (2, 3) (2, 4) NAME 'await' (3, 0) (3, 5) + """) - >>> dump_tokens("async\\nawait") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async\nawait", """\ NAME 'async' (1, 0) (1, 5) NEWLINE '\\n' (1, 5) (1, 6) NAME 'await' (2, 0) (2, 5) + """) - >>> dump_tokens("foo.async + 1") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("foo.async + 1", """\ NAME 'foo' (1, 0) (1, 3) OP '.' (1, 3) (1, 4) NAME 'async' (1, 4) (1, 9) OP '+' (1, 10) (1, 11) NUMBER '1' (1, 12) (1, 13) + """) - >>> dump_tokens("async def foo(): pass") - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize("async def foo(): pass", """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'foo' (1, 10) (1, 13) @@ -766,15 +684,16 @@ OP ')' (1, 14) (1, 15) OP ':' (1, 15) (1, 16) NAME 'pass' (1, 17) (1, 21) + """) - >>> dump_tokens('''async def foo(): - ... def foo(await): - ... await = 1 - ... if 1: - ... await - ... async += 1 - ... ''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''\ +async def foo(): + def foo(await): + await = 1 + if 1: + await +async += 1 +''', """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'foo' (1, 10) (1, 13) @@ -809,10 +728,11 @@ OP '+=' (6, 6) (6, 8) NUMBER '1' (6, 9) (6, 10) NEWLINE '\\n' (6, 10) (6, 11) + """) - >>> dump_tokens('''async def foo(): - ... async for i in 1: pass''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''\ +async def foo(): + async for i in 1: pass''', """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'foo' (1, 10) (1, 13) @@ -829,9 +749,9 @@ OP ':' (2, 18) (2, 19) NAME 'pass' (2, 20) (2, 24) DEDENT '' (3, 0) (3, 0) + """) - >>> dump_tokens('''async def foo(async): await''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''async def foo(async): await''', """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'foo' (1, 10) (1, 13) @@ -840,14 +760,15 @@ OP ')' (1, 19) (1, 20) OP ':' (1, 20) (1, 21) AWAIT 'await' (1, 22) (1, 27) + """) - >>> dump_tokens('''def f(): - ... - ... def baz(): pass - ... async def bar(): pass - ... - ... await = 2''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''\ +def f(): + + def baz(): pass + async def bar(): pass + + await = 2''', """\ NAME 'def' (1, 0) (1, 3) NAME 'f' (1, 4) (1, 5) OP '(' (1, 5) (1, 6) @@ -876,14 +797,15 @@ OP '=' (6, 8) (6, 9) NUMBER '2' (6, 10) (6, 11) DEDENT '' (7, 0) (7, 0) + """) - >>> dump_tokens('''async def f(): - ... - ... def baz(): pass - ... async def bar(): pass - ... - ... await = 2''') - ENCODING 'utf-8' (0, 0) (0, 0) + self.check_tokenize('''\ +async def f(): + + def baz(): pass + async def bar(): pass + + await = 2''', """\ ASYNC 'async' (1, 0) (1, 5) NAME 'def' (1, 6) (1, 9) NAME 'f' (1, 10) (1, 11) @@ -913,89 +835,10 @@ OP '=' (6, 8) (6, 9) NUMBER '2' (6, 10) (6, 11) DEDENT '' (7, 0) (7, 0) -""" + """) -from test import support -from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP, - STRING, ENDMARKER, ENCODING, tok_name, detect_encoding, - open as tokenize_open, Untokenizer) -from io import BytesIO -from unittest import TestCase, mock -import os -import token -def dump_tokens(s): - """Print out the tokens in s in a table format. - - The ENDMARKER is omitted. - """ - f = BytesIO(s.encode('utf-8')) - for type, token, start, end, line in tokenize(f.readline): - if type == ENDMARKER: - break - type = tok_name[type] - print("%(type)-10.10s %(token)-13.13r %(start)s %(end)s" % locals()) - -def roundtrip(f): - """ - Test roundtrip for `untokenize`. `f` is an open file or a string. - The source code in f is tokenized to both 5- and 2-tuples. - Both sequences are converted back to source code via - tokenize.untokenize(), and the latter tokenized again to 2-tuples. - The test fails if the 3 pair tokenizations do not match. - - When untokenize bugs are fixed, untokenize with 5-tuples should - reproduce code that does not contain a backslash continuation - following spaces. A proper test should test this. - - This function would be more useful for correcting bugs if it reported - the first point of failure, like assertEqual, rather than just - returning False -- or if it were only used in unittests and not - doctest and actually used assertEqual. - """ - # Get source code and original tokenizations - if isinstance(f, str): - code = f.encode('utf-8') - else: - code = f.read() - f.close() - readline = iter(code.splitlines(keepends=True)).__next__ - tokens5 = list(tokenize(readline)) - tokens2 = [tok[:2] for tok in tokens5] - # Reproduce tokens2 from pairs - bytes_from2 = untokenize(tokens2) - readline2 = iter(bytes_from2.splitlines(keepends=True)).__next__ - tokens2_from2 = [tok[:2] for tok in tokenize(readline2)] - # Reproduce tokens2 from 5-tuples - bytes_from5 = untokenize(tokens5) - readline5 = iter(bytes_from5.splitlines(keepends=True)).__next__ - tokens2_from5 = [tok[:2] for tok in tokenize(readline5)] - # Compare 3 versions - return tokens2 == tokens2_from2 == tokens2_from5 - -# This is an example from the docs, set up as a doctest. def decistmt(s): - """Substitute Decimals for floats in a string of statements. - - >>> from decimal import Decimal - >>> s = 'print(+21.3e-5*-.1234/81.7)' - >>> decistmt(s) - "print (+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7'))" - - The format of the exponent is inherited from the platform C library. - Known cases are "e-007" (Windows) and "e-07" (not Windows). Since - we're only showing 11 digits, and the 12th isn't close to 5, the - rest of the output should be platform-independent. - - >>> exec(s) #doctest: +ELLIPSIS - -3.2171603427...e-0...7 - - Output from calculations with Decimal should be identical across all - platforms. - - >>> exec(decistmt(s)) - -3.217160342717258261933904529E-7 - """ result = [] g = tokenize(BytesIO(s.encode('utf-8')).readline) # tokenize the string for toknum, tokval, _, _, _ in g: @@ -1010,6 +853,28 @@ result.append((toknum, tokval)) return untokenize(result).decode('utf-8') +class TestMisc(TestCase): + + def test_decistmt(self): + # Substitute Decimals for floats in a string of statements. + # This is an example from the docs. + + from decimal import Decimal + s = '+21.3e-5*-.1234/81.7' + self.assertEqual(decistmt(s), + "+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')") + + # The format of the exponent is inherited from the platform C library. + # Known cases are "e-007" (Windows) and "e-07" (not Windows). Since + # we're only showing 11 digits, and the 12th isn't close to 5, the + # rest of the output should be platform-independent. + self.assertRegex(repr(eval(s)), '-3.2171603427[0-9]*e-0+7') + + # Output from calculations with Decimal should be identical across all + # platforms. + self.assertEqual(eval(decistmt(s)), + Decimal('-3.217160342717258261933904529E-7')) + class TestTokenizerAdheresToPep0263(TestCase): """ @@ -1018,11 +883,11 @@ def _testFile(self, filename): path = os.path.join(os.path.dirname(__file__), filename) - return roundtrip(open(path, 'rb')) + TestRoundtrip.check_roundtrip(self, open(path, 'rb')) def test_utf8_coding_cookie_and_no_utf8_bom(self): f = 'tokenize_tests-utf8-coding-cookie-and-no-utf8-bom-sig.txt' - self.assertTrue(self._testFile(f)) + self._testFile(f) def test_latin1_coding_cookie_and_utf8_bom(self): """ @@ -1037,11 +902,11 @@ def test_no_coding_cookie_and_utf8_bom(self): f = 'tokenize_tests-no-coding-cookie-and-utf8-bom-sig-only.txt' - self.assertTrue(self._testFile(f)) + self._testFile(f) def test_utf8_coding_cookie_and_utf8_bom(self): f = 'tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt' - self.assertTrue(self._testFile(f)) + self._testFile(f) def test_bad_coding_cookie(self): self.assertRaises(SyntaxError, self._testFile, 'bad_coding.py') @@ -1340,7 +1205,6 @@ self.assertTrue(m.closed) - class TestTokenize(TestCase): def test_tokenize(self): @@ -1472,6 +1336,7 @@ # See http://bugs.python.org/issue16152 self.assertExactTypeEqual('@ ', token.AT) + class UntokenizeTest(TestCase): def test_bad_input_order(self): @@ -1497,7 +1362,7 @@ u.prev_row = 2 u.add_whitespace((4, 4)) self.assertEqual(u.tokens, ['\\\n', '\\\n\\\n', ' ']) - self.assertTrue(roundtrip('a\n b\n c\n \\\n c\n')) + TestRoundtrip.check_roundtrip(self, 'a\n b\n c\n \\\n c\n') def test_iter_compat(self): u = Untokenizer() @@ -1514,6 +1379,131 @@ class TestRoundtrip(TestCase): + + def check_roundtrip(self, f): + """ + Test roundtrip for `untokenize`. `f` is an open file or a string. + The source code in f is tokenized to both 5- and 2-tuples. + Both sequences are converted back to source code via + tokenize.untokenize(), and the latter tokenized again to 2-tuples. + The test fails if the 3 pair tokenizations do not match. + + When untokenize bugs are fixed, untokenize with 5-tuples should + reproduce code that does not contain a backslash continuation + following spaces. A proper test should test this. + """ + # Get source code and original tokenizations + if isinstance(f, str): + code = f.encode('utf-8') + else: + code = f.read() + f.close() + readline = iter(code.splitlines(keepends=True)).__next__ + tokens5 = list(tokenize(readline)) + tokens2 = [tok[:2] for tok in tokens5] + # Reproduce tokens2 from pairs + bytes_from2 = untokenize(tokens2) + readline2 = iter(bytes_from2.splitlines(keepends=True)).__next__ + tokens2_from2 = [tok[:2] for tok in tokenize(readline2)] + self.assertEqual(tokens2_from2, tokens2) + # Reproduce tokens2 from 5-tuples + bytes_from5 = untokenize(tokens5) + readline5 = iter(bytes_from5.splitlines(keepends=True)).__next__ + tokens2_from5 = [tok[:2] for tok in tokenize(readline5)] + self.assertEqual(tokens2_from5, tokens2) + + def test_roundtrip(self): + # There are some standard formatting practices that are easy to get right. + + self.check_roundtrip("if x == 1:\n" + " print(x)\n") + self.check_roundtrip("# This is a comment\n" + "# This also") + + # Some people use different formatting conventions, which makes + # untokenize a little trickier. Note that this test involves trailing + # whitespace after the colon. Note that we use hex escapes to make the + # two trailing blanks apparent in the expected output. + + self.check_roundtrip("if x == 1 : \n" + " print(x)\n") + fn = support.findfile("tokenize_tests.txt") + with open(fn, 'rb') as f: + self.check_roundtrip(f) + self.check_roundtrip("if x == 1:\n" + " # A comment by itself.\n" + " print(x) # Comment here, too.\n" + " # Another comment.\n" + "after_if = True\n") + self.check_roundtrip("if (x # The comments need to go in the right place\n" + " == 1):\n" + " print('x==1')\n") + self.check_roundtrip("class Test: # A comment here\n" + " # A comment with weird indent\n" + " after_com = 5\n" + " def x(m): return m*5 # a one liner\n" + " def y(m): # A whitespace after the colon\n" + " return y*4 # 3-space indent\n") + + # Some error-handling code + self.check_roundtrip("try: import somemodule\n" + "except ImportError: # comment\n" + " print('Can not import' # comment2\n)" + "else: print('Loaded')\n") + + def test_continuation(self): + # Balancing continuation + self.check_roundtrip("a = (3,4, \n" + "5,6)\n" + "y = [3, 4,\n" + "5]\n" + "z = {'a': 5,\n" + "'b':15, 'c':True}\n" + "x = len(y) + 5 - a[\n" + "3] - a[2]\n" + "+ len(z) - z[\n" + "'b']\n") + + def test_backslash_continuation(self): + # Backslash means line continuation, except for comments + self.check_roundtrip("x=1+\\\n" + "1\n" + "# This is a comment\\\n" + "# This also\n") + self.check_roundtrip("# Comment \\\n" + "x = 0") + + def test_string_concatenation(self): + # Two string literals on the same line + self.check_roundtrip("'' ''") + + def test_random_files(self): + # Test roundtrip on random python modules. + # pass the '-ucpu' option to process the full directory. + + import glob, random + fn = support.findfile("tokenize_tests.txt") + tempdir = os.path.dirname(fn) or os.curdir + testfiles = glob.glob(os.path.join(tempdir, "test*.py")) + + # Tokenize is broken on test_pep3131.py because regular expressions are + # broken on the obscure unicode identifiers in it. *sigh* + # With roundtrip extended to test the 5-tuple mode of untokenize, + # 7 more testfiles fail. Remove them also until the failure is diagnosed. + + testfiles.remove(os.path.join(tempdir, "test_pep3131.py")) + for f in ('buffer', 'builtin', 'fileio', 'inspect', 'os', 'platform', 'sys'): + testfiles.remove(os.path.join(tempdir, "test_%s.py") % f) + + if not support.is_resource_enabled("cpu"): + testfiles = random.sample(testfiles, 10) + + for testfile in testfiles: + with open(testfile, 'rb') as f: + with self.subTest(file=testfile): + self.check_roundtrip(f) + + def roundtrip(self, code): if isinstance(code, str): code = code.encode('utf-8') @@ -1527,19 +1517,8 @@ code = "if False:\n\tx=3\n\tx=3\n" codelines = self.roundtrip(code).split('\n') self.assertEqual(codelines[1], codelines[2]) + self.check_roundtrip(code) -__test__ = {"doctests" : doctests, 'decistmt': decistmt} - -def test_main(): - from test import test_tokenize - support.run_doctest(test_tokenize, True) - support.run_unittest(TestTokenizerAdheresToPep0263) - support.run_unittest(Test_Tokenize) - support.run_unittest(TestDetectEncoding) - support.run_unittest(TestTokenize) - support.run_unittest(UntokenizeTest) - support.run_unittest(TestRoundtrip) - if __name__ == "__main__": - test_main() + unittest.main() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 19:30:04 2015 From: python-checkins at python.org (alexander.belopolsky) Date: Tue, 06 Oct 2015 17:30:04 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Closes_issue_=2312006=3A_A?= =?utf-8?q?dd_ISO_8601_year=2C_week=2C_and_day_directives_to_strptime=2E?= Message-ID: <20151006173002.7236.2403@psf.io> https://hg.python.org/cpython/rev/acdebfbfbdcf changeset: 98564:acdebfbfbdcf user: Alexander Belopolsky date: Tue Oct 06 13:29:56 2015 -0400 summary: Closes issue #12006: Add ISO 8601 year, week, and day directives to strptime. This commit adds %G, %V, and %u directives to strptime. Thanks Ashley Anderson for the implementation. files: Doc/library/datetime.rst | 37 ++++++++++++- Doc/whatsnew/3.6.rst | 8 ++ Lib/_strptime.py | 81 ++++++++++++++++++++++---- Lib/test/test_strptime.py | 57 ++++++++++++++---- Misc/NEWS | 3 + 5 files changed, 159 insertions(+), 27 deletions(-) diff --git a/Doc/library/datetime.rst b/Doc/library/datetime.rst --- a/Doc/library/datetime.rst +++ b/Doc/library/datetime.rst @@ -1909,6 +1909,34 @@ | ``%%`` | A literal ``'%'`` character. | % | | +-----------+--------------------------------+------------------------+-------+ +Several additional directives not required by the C89 standard are included for +convenience. These parameters all correspond to ISO 8601 date values. These +may not be available on all platforms when used with the :meth:`strftime` +method. The ISO 8601 year and ISO 8601 week directives are not interchangeable +with the year and week number directives above. Calling :meth:`strptime` with +incomplete or ambiguous ISO 8601 directives will raise a :exc:`ValueError`. + ++-----------+--------------------------------+------------------------+-------+ +| Directive | Meaning | Example | Notes | ++===========+================================+========================+=======+ +| ``%G`` | ISO 8601 year with century | 0001, 0002, ..., 2013, | \(8) | +| | representing the year that | 2014, ..., 9998, 9999 | | +| | contains the greater part of | | | +| | the ISO week (``%V``). | | | ++-----------+--------------------------------+------------------------+-------+ +| ``%u`` | ISO 8601 weekday as a decimal | 1, 2, ..., 7 | | +| | number where 1 is Monday. | | | ++-----------+--------------------------------+------------------------+-------+ +| ``%V`` | ISO 8601 week as a decimal | 01, 02, ..., 53 | \(8) | +| | number with Monday as | | | +| | the first day of the week. | | | +| | Week 01 is the week containing | | | +| | Jan 4. | | | ++-----------+--------------------------------+------------------------+-------+ + +.. versionadded:: 3.6 + ``%G``, ``%u`` and ``%V`` were added. + Notes: (1) @@ -1973,7 +2001,14 @@ (7) When used with the :meth:`strptime` method, ``%U`` and ``%W`` are only used - in calculations when the day of the week and the year are specified. + in calculations when the day of the week and the calendar year (``%Y``) + are specified. + +(8) + Similar to ``%U`` and ``%W``, ``%V`` is only used in calculations when the + day of the week and the ISO year (``%G``) are specified in a + :meth:`strptime` format string. Also note that ``%G`` and ``%Y`` are not + interchangable. .. rubric:: Footnotes diff --git a/Doc/whatsnew/3.6.rst b/Doc/whatsnew/3.6.rst --- a/Doc/whatsnew/3.6.rst +++ b/Doc/whatsnew/3.6.rst @@ -110,6 +110,14 @@ with underscores. A space or a colon can be added after completed keyword. (Contributed by Serhiy Storchaka in :issue:`25011` and :issue:`25209`.) +datetime +-------- + +* :meth:`datetime.stftime ` and + :meth:`date.stftime ` methods now support ISO 8601 + date directives ``%G``, ``%u`` and ``%V``. + (Contributed by Ashley Anderson in :issue:`12006`.) + Optimizations ============= diff --git a/Lib/_strptime.py b/Lib/_strptime.py --- a/Lib/_strptime.py +++ b/Lib/_strptime.py @@ -195,12 +195,15 @@ 'f': r"(?P[0-9]{1,6})", 'H': r"(?P2[0-3]|[0-1]\d|\d)", 'I': r"(?P1[0-2]|0[1-9]|[1-9])", + 'G': r"(?P\d\d\d\d)", 'j': r"(?P36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])", 'm': r"(?P1[0-2]|0[1-9]|[1-9])", 'M': r"(?P[0-5]\d|\d)", 'S': r"(?P6[0-1]|[0-5]\d|\d)", 'U': r"(?P5[0-3]|[0-4]\d|\d)", 'w': r"(?P[0-6])", + 'u': r"(?P[1-7])", + 'V': r"(?P5[0-3]|0[1-9]|[1-4]\d|\d)", # W is set below by using 'U' 'y': r"(?P\d\d)", #XXX: Does 'Y' need to worry about having less or more than @@ -295,6 +298,22 @@ return 1 + days_to_week + day_of_week +def _calc_julian_from_V(iso_year, iso_week, iso_weekday): + """Calculate the Julian day based on the ISO 8601 year, week, and weekday. + ISO weeks start on Mondays, with week 01 being the week containing 4 Jan. + ISO week days range from 1 (Monday) to 7 (Sunday). + """ + correction = datetime_date(iso_year, 1, 4).isoweekday() + 3 + ordinal = (iso_week * 7) + iso_weekday - correction + # ordinal may be negative or 0 now, which means the date is in the previous + # calendar year + if ordinal < 1: + ordinal += datetime_date(iso_year, 1, 1).toordinal() + iso_year -= 1 + ordinal -= datetime_date(iso_year, 1, 1).toordinal() + return iso_year, ordinal + + def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"): """Return a 2-tuple consisting of a time struct and an int containing the number of microseconds based on the input string and the @@ -339,15 +358,15 @@ raise ValueError("unconverted data remains: %s" % data_string[found.end():]) - year = None + iso_year = year = None month = day = 1 hour = minute = second = fraction = 0 tz = -1 tzoffset = None # Default to -1 to signify that values not known; not critical to have, # though - week_of_year = -1 - week_of_year_start = -1 + iso_week = week_of_year = None + week_of_year_start = None # weekday and julian defaulted to None so as to signal need to calculate # values weekday = julian = None @@ -369,6 +388,8 @@ year += 1900 elif group_key == 'Y': year = int(found_dict['Y']) + elif group_key == 'G': + iso_year = int(found_dict['G']) elif group_key == 'm': month = int(found_dict['m']) elif group_key == 'B': @@ -414,6 +435,9 @@ weekday = 6 else: weekday -= 1 + elif group_key == 'u': + weekday = int(found_dict['u']) + weekday -= 1 elif group_key == 'j': julian = int(found_dict['j']) elif group_key in ('U', 'W'): @@ -424,6 +448,8 @@ else: # W starts week on Monday. week_of_year_start = 0 + elif group_key == 'V': + iso_week = int(found_dict['V']) elif group_key == 'z': z = found_dict['z'] tzoffset = int(z[1:3]) * 60 + int(z[3:5]) @@ -444,28 +470,57 @@ else: tz = value break + # Deal with the cases where ambiguities arize + # don't assume default values for ISO week/year + if year is None and iso_year is not None: + if iso_week is None or weekday is None: + raise ValueError("ISO year directive '%G' must be used with " + "the ISO week directive '%V' and a weekday " + "directive ('%A', '%a', '%w', or '%u').") + if julian is not None: + raise ValueError("Day of the year directive '%j' is not " + "compatible with ISO year directive '%G'. " + "Use '%Y' instead.") + elif week_of_year is None and iso_week is not None: + if weekday is None: + raise ValueError("ISO week directive '%V' must be used with " + "the ISO year directive '%G' and a weekday " + "directive ('%A', '%a', '%w', or '%u').") + else: + raise ValueError("ISO week directive '%V' is incompatible with " + "the year directive '%Y'. Use the ISO year '%G' " + "instead.") + leap_year_fix = False if year is None and month == 2 and day == 29: year = 1904 # 1904 is first leap year of 20th century leap_year_fix = True elif year is None: year = 1900 + + # If we know the week of the year and what day of that week, we can figure # out the Julian day of the year. - if julian is None and week_of_year != -1 and weekday is not None: - week_starts_Mon = True if week_of_year_start == 0 else False - julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, - week_starts_Mon) - # Cannot pre-calculate datetime_date() since can change in Julian - # calculation and thus could have different value for the day of the week - # calculation. + if julian is None and weekday is not None: + if week_of_year is not None: + week_starts_Mon = True if week_of_year_start == 0 else False + julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, + week_starts_Mon) + elif iso_year is not None and iso_week is not None: + year, julian = _calc_julian_from_V(iso_year, iso_week, weekday + 1) + if julian is None: + # Cannot pre-calculate datetime_date() since can change in Julian + # calculation and thus could have different value for the day of + # the week calculation. # Need to add 1 to result since first day of the year is 1, not 0. julian = datetime_date(year, month, day).toordinal() - \ datetime_date(year, 1, 1).toordinal() + 1 - else: # Assume that if they bothered to include Julian day it will - # be accurate. - datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal()) + else: # Assume that if they bothered to include Julian day (or if it was + # calculated above with year/week/weekday) it will be accurate. + datetime_result = datetime_date.fromordinal( + (julian - 1) + + datetime_date(year, 1, 1).toordinal()) year = datetime_result.year month = datetime_result.month day = datetime_result.day diff --git a/Lib/test/test_strptime.py b/Lib/test/test_strptime.py --- a/Lib/test/test_strptime.py +++ b/Lib/test/test_strptime.py @@ -152,8 +152,8 @@ "'%s' using '%s'; group 'a' = '%s', group 'b' = %s'" % (found.string, found.re.pattern, found.group('a'), found.group('b'))) - for directive in ('a','A','b','B','c','d','H','I','j','m','M','p','S', - 'U','w','W','x','X','y','Y','Z','%'): + for directive in ('a','A','b','B','c','d','G','H','I','j','m','M','p', + 'S','u','U','V','w','W','x','X','y','Y','Z','%'): compiled = self.time_re.compile("%" + directive) found = compiled.match(time.strftime("%" + directive)) self.assertTrue(found, "Matching failed on '%s' using '%s' regex" % @@ -218,6 +218,26 @@ else: self.fail("'%s' did not raise ValueError" % bad_format) + # Ambiguous or incomplete cases using ISO year/week/weekday directives + # 1. ISO week (%V) is specified, but the year is specified with %Y + # instead of %G + with self.assertRaises(ValueError): + _strptime._strptime("1999 50", "%Y %V") + # 2. ISO year (%G) and ISO week (%V) are specified, but weekday is not + with self.assertRaises(ValueError): + _strptime._strptime("1999 51", "%G %V") + # 3. ISO year (%G) and weekday are specified, but ISO week (%V) is not + for w in ('A', 'a', 'w', 'u'): + with self.assertRaises(ValueError): + _strptime._strptime("1999 51","%G %{}".format(w)) + # 4. ISO year is specified alone (e.g. time.strptime('2015', '%G')) + with self.assertRaises(ValueError): + _strptime._strptime("2015", "%G") + # 5. Julian/ordinal day (%j) is specified with %G, but not %Y + with self.assertRaises(ValueError): + _strptime._strptime("1999 256", "%G %j") + + def test_strptime_exception_context(self): # check that this doesn't chain exceptions needlessly (see #17572) with self.assertRaises(ValueError) as e: @@ -289,7 +309,7 @@ def test_weekday(self): # Test weekday directives - for directive in ('A', 'a', 'w'): + for directive in ('A', 'a', 'w', 'u'): self.helper(directive,6) def test_julian(self): @@ -458,16 +478,20 @@ # Should be able to infer date if given year, week of year (%U or %W) # and day of the week def test_helper(ymd_tuple, test_reason): - for directive in ('W', 'U'): - format_string = "%%Y %%%s %%w" % directive - dt_date = datetime_date(*ymd_tuple) - strp_input = dt_date.strftime(format_string) - strp_output = _strptime._strptime_time(strp_input, format_string) - self.assertTrue(strp_output[:3] == ymd_tuple, - "%s(%s) test failed w/ '%s': %s != %s (%s != %s)" % - (test_reason, directive, strp_input, - strp_output[:3], ymd_tuple, - strp_output[7], dt_date.timetuple()[7])) + for year_week_format in ('%Y %W', '%Y %U', '%G %V'): + for weekday_format in ('%w', '%u', '%a', '%A'): + format_string = year_week_format + ' ' + weekday_format + with self.subTest(test_reason, + date=ymd_tuple, + format=format_string): + dt_date = datetime_date(*ymd_tuple) + strp_input = dt_date.strftime(format_string) + strp_output = _strptime._strptime_time(strp_input, + format_string) + msg = "%r: %s != %s" % (strp_input, + strp_output[7], + dt_date.timetuple()[7]) + self.assertEqual(strp_output[:3], ymd_tuple, msg) test_helper((1901, 1, 3), "week 0") test_helper((1901, 1, 8), "common case") test_helper((1901, 1, 13), "day on Sunday") @@ -499,18 +523,25 @@ self.assertEqual(_strptime._strptime_time(value, format)[:-1], expected) check('2015 0 0', '%Y %U %w', 2014, 12, 28, 0, 0, 0, 6, -3) check('2015 0 0', '%Y %W %w', 2015, 1, 4, 0, 0, 0, 6, 4) + check('2015 1 1', '%G %V %u', 2014, 12, 29, 0, 0, 0, 0, 363) check('2015 0 1', '%Y %U %w', 2014, 12, 29, 0, 0, 0, 0, -2) check('2015 0 1', '%Y %W %w', 2014, 12, 29, 0, 0, 0, 0, -2) + check('2015 1 2', '%G %V %u', 2014, 12, 30, 0, 0, 0, 1, 364) check('2015 0 2', '%Y %U %w', 2014, 12, 30, 0, 0, 0, 1, -1) check('2015 0 2', '%Y %W %w', 2014, 12, 30, 0, 0, 0, 1, -1) + check('2015 1 3', '%G %V %u', 2014, 12, 31, 0, 0, 0, 2, 365) check('2015 0 3', '%Y %U %w', 2014, 12, 31, 0, 0, 0, 2, 0) check('2015 0 3', '%Y %W %w', 2014, 12, 31, 0, 0, 0, 2, 0) + check('2015 1 4', '%G %V %u', 2015, 1, 1, 0, 0, 0, 3, 1) check('2015 0 4', '%Y %U %w', 2015, 1, 1, 0, 0, 0, 3, 1) check('2015 0 4', '%Y %W %w', 2015, 1, 1, 0, 0, 0, 3, 1) + check('2015 1 5', '%G %V %u', 2015, 1, 2, 0, 0, 0, 4, 2) check('2015 0 5', '%Y %U %w', 2015, 1, 2, 0, 0, 0, 4, 2) check('2015 0 5', '%Y %W %w', 2015, 1, 2, 0, 0, 0, 4, 2) + check('2015 1 6', '%G %V %u', 2015, 1, 3, 0, 0, 0, 5, 3) check('2015 0 6', '%Y %U %w', 2015, 1, 3, 0, 0, 0, 5, 3) check('2015 0 6', '%Y %W %w', 2015, 1, 3, 0, 0, 0, 5, 3) + check('2015 1 7', '%G %V %u', 2015, 1, 4, 0, 0, 0, 6, 4) class CacheTests(unittest.TestCase): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -383,6 +383,9 @@ - Issue #23572: Fixed functools.singledispatch on classes with falsy metaclasses. Patch by Ethan Furman. +- Issue #12006: Add ISO 8601 year, week, and day directives (%G, %V, %u) to + strptime. + Documentation ------------- -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 22:23:27 2015 From: python-checkins at python.org (zach.ware) Date: Tue, 06 Oct 2015 20:23:27 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1MDk3?= =?utf-8?q?=3A_fix_Windows_error_number_access?= Message-ID: <20151006202326.464.17724@psf.io> https://hg.python.org/cpython/rev/4e7697ccceeb changeset: 98565:4e7697ccceeb branch: 3.4 parent: 98559:91f36d2b097a user: Zachary Ware date: Tue Oct 06 15:22:13 2015 -0500 summary: Issue #25097: fix Windows error number access files: Lib/test/test_logging.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py --- a/Lib/test/test_logging.py +++ b/Lib/test/test_logging.py @@ -4104,7 +4104,7 @@ try: h = logging.handlers.NTEventLogHandler('test_logging') except pywintypes.error as e: - if e[0] == 5: # access denied + if e.winerror == 5: # access denied raise unittest.SkipTest('Insufficient privileges to run test') r = logging.makeLogRecord({'msg': 'Test Log Message'}) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 22:23:27 2015 From: python-checkins at python.org (zach.ware) Date: Tue, 06 Oct 2015 20:23:27 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325097=3A_Merge_with_3=2E4?= Message-ID: <20151006202326.485.26636@psf.io> https://hg.python.org/cpython/rev/440d4da352fa changeset: 98566:440d4da352fa branch: 3.5 parent: 98562:b0ce3ef2ea21 parent: 98565:4e7697ccceeb user: Zachary Ware date: Tue Oct 06 15:22:41 2015 -0500 summary: Issue #25097: Merge with 3.4 files: Lib/test/test_logging.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py --- a/Lib/test/test_logging.py +++ b/Lib/test/test_logging.py @@ -4134,7 +4134,7 @@ try: h = logging.handlers.NTEventLogHandler('test_logging') except pywintypes.error as e: - if e[0] == 5: # access denied + if e.winerror == 5: # access denied raise unittest.SkipTest('Insufficient privileges to run test') r = logging.makeLogRecord({'msg': 'Test Log Message'}) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 22:23:32 2015 From: python-checkins at python.org (zach.ware) Date: Tue, 06 Oct 2015 20:23:32 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325097=3A_Merge_with_3=2E5?= Message-ID: <20151006202331.18370.70039@psf.io> https://hg.python.org/cpython/rev/d91f9fc7b85d changeset: 98567:d91f9fc7b85d parent: 98564:acdebfbfbdcf parent: 98566:440d4da352fa user: Zachary Ware date: Tue Oct 06 15:23:16 2015 -0500 summary: Issue #25097: Merge with 3.5 files: Lib/test/test_logging.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py --- a/Lib/test/test_logging.py +++ b/Lib/test/test_logging.py @@ -4134,7 +4134,7 @@ try: h = logging.handlers.NTEventLogHandler('test_logging') except pywintypes.error as e: - if e[0] == 5: # access denied + if e.winerror == 5: # access denied raise unittest.SkipTest('Insufficient privileges to run test') r = logging.makeLogRecord({'msg': 'Test Log Message'}) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 22:29:16 2015 From: python-checkins at python.org (zach.ware) Date: Tue, 06 Oct 2015 20:29:16 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325097=3A_Merge_with_3=2E4?= Message-ID: <20151006202916.20763.99249@psf.io> https://hg.python.org/cpython/rev/95a26798819b changeset: 98569:95a26798819b branch: 3.5 parent: 98566:440d4da352fa parent: 98568:03a569eb0e0e user: Zachary Ware date: Tue Oct 06 15:28:56 2015 -0500 summary: Issue #25097: Merge with 3.4 files: Lib/test/test_logging.py | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py --- a/Lib/test/test_logging.py +++ b/Lib/test/test_logging.py @@ -4136,6 +4136,7 @@ except pywintypes.error as e: if e.winerror == 5: # access denied raise unittest.SkipTest('Insufficient privileges to run test') + raise r = logging.makeLogRecord({'msg': 'Test Log Message'}) h.handle(r) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 22:29:17 2015 From: python-checkins at python.org (zach.ware) Date: Tue, 06 Oct 2015 20:29:17 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325097=3A_Merge_with_3=2E5?= Message-ID: <20151006202916.18370.4720@psf.io> https://hg.python.org/cpython/rev/db782c81bba9 changeset: 98570:db782c81bba9 parent: 98567:d91f9fc7b85d parent: 98569:95a26798819b user: Zachary Ware date: Tue Oct 06 15:29:09 2015 -0500 summary: Issue #25097: Merge with 3.5 files: Lib/test/test_logging.py | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py --- a/Lib/test/test_logging.py +++ b/Lib/test/test_logging.py @@ -4136,6 +4136,7 @@ except pywintypes.error as e: if e.winerror == 5: # access denied raise unittest.SkipTest('Insufficient privileges to run test') + raise r = logging.makeLogRecord({'msg': 'Test Log Message'}) h.handle(r) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 6 22:29:20 2015 From: python-checkins at python.org (zach.ware) Date: Tue, 06 Oct 2015 20:29:20 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1MDk3?= =?utf-8?q?=3A_Re-raise_any_other_pywin32_error?= Message-ID: <20151006202916.70994.33433@psf.io> https://hg.python.org/cpython/rev/03a569eb0e0e changeset: 98568:03a569eb0e0e branch: 3.4 parent: 98565:4e7697ccceeb user: Zachary Ware date: Tue Oct 06 15:28:43 2015 -0500 summary: Issue #25097: Re-raise any other pywin32 error files: Lib/test/test_logging.py | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py --- a/Lib/test/test_logging.py +++ b/Lib/test/test_logging.py @@ -4106,6 +4106,7 @@ except pywintypes.error as e: if e.winerror == 5: # access denied raise unittest.SkipTest('Insufficient privileges to run test') + raise r = logging.makeLogRecord({'msg': 'Test Log Message'}) h.handle(r) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 04:42:56 2015 From: python-checkins at python.org (benjamin.peterson) Date: Wed, 07 Oct 2015 02:42:56 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_prevent_unacce?= =?utf-8?q?ptable_bases_from_becoming_bases_through_multiple_inheritance?= Message-ID: <20151007024256.18362.14028@psf.io> https://hg.python.org/cpython/rev/e670b37e7b14 changeset: 98573:e670b37e7b14 branch: 3.4 parent: 98568:03a569eb0e0e user: Benjamin Peterson date: Tue Oct 06 19:36:54 2015 -0700 summary: prevent unacceptable bases from becoming bases through multiple inheritance (#24806) files: Lib/test/test_descr.py | 31 ++++++++++++++++++++++++++++++ Misc/NEWS | 3 ++ Objects/typeobject.c | 12 +++++----- 3 files changed, 40 insertions(+), 6 deletions(-) diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py --- a/Lib/test/test_descr.py +++ b/Lib/test/test_descr.py @@ -3735,6 +3735,37 @@ else: assert 0, "best_base calculation found wanting" + def test_unsubclassable_types(self): + with self.assertRaises(TypeError): + class X(type(None)): + pass + with self.assertRaises(TypeError): + class X(object, type(None)): + pass + with self.assertRaises(TypeError): + class X(type(None), object): + pass + class O(object): + pass + with self.assertRaises(TypeError): + class X(O, type(None)): + pass + with self.assertRaises(TypeError): + class X(type(None), O): + pass + + class X(object): + pass + with self.assertRaises(TypeError): + X.__bases__ = type(None), + with self.assertRaises(TypeError): + X.__bases__ = object, type(None) + with self.assertRaises(TypeError): + X.__bases__ = type(None), object + with self.assertRaises(TypeError): + X.__bases__ = O, type(None) + with self.assertRaises(TypeError): + X.__bases__ = type(None), O def test_mutable_bases_with_failing_mro(self): # Testing mutable bases with failing mro... diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #24806: Prevent builtin types that are not allowed to be subclassed from + being subclassed through multiple inheritance. + - Issue #24848: Fixed a number of bugs in UTF-7 decoding of misformed data. - Issue #25280: Import trace messages emitted in verbose (-v) mode are no diff --git a/Objects/typeobject.c b/Objects/typeobject.c --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -1937,6 +1937,12 @@ if (PyType_Ready(base_i) < 0) return NULL; } + if (!PyType_HasFeature(base_i, Py_TPFLAGS_BASETYPE)) { + PyErr_Format(PyExc_TypeError, + "type '%.100s' is not an acceptable base type", + base_i->tp_name); + return NULL; + } candidate = solid_base(base_i); if (winner == NULL) { winner = candidate; @@ -2317,12 +2323,6 @@ if (base == NULL) { goto error; } - if (!PyType_HasFeature(base, Py_TPFLAGS_BASETYPE)) { - PyErr_Format(PyExc_TypeError, - "type '%.100s' is not an acceptable base type", - base->tp_name); - goto error; - } dict = PyDict_Copy(orig_dict); if (dict == NULL) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 04:42:56 2015 From: python-checkins at python.org (benjamin.peterson) Date: Wed, 07 Oct 2015 02:42:56 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_merge_3=2E4_=28=2324806=29?= Message-ID: <20151007024256.3297.13349@psf.io> https://hg.python.org/cpython/rev/e02e4afcce6a changeset: 98574:e02e4afcce6a branch: 3.5 parent: 98569:95a26798819b parent: 98573:e670b37e7b14 user: Benjamin Peterson date: Tue Oct 06 19:42:02 2015 -0700 summary: merge 3.4 (#24806) files: Lib/test/test_descr.py | 31 ++++++++++++++++++++++++++++++ Misc/NEWS | 3 ++ Objects/typeobject.c | 12 +++++----- 3 files changed, 40 insertions(+), 6 deletions(-) diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py --- a/Lib/test/test_descr.py +++ b/Lib/test/test_descr.py @@ -3798,6 +3798,37 @@ else: assert 0, "best_base calculation found wanting" + def test_unsubclassable_types(self): + with self.assertRaises(TypeError): + class X(type(None)): + pass + with self.assertRaises(TypeError): + class X(object, type(None)): + pass + with self.assertRaises(TypeError): + class X(type(None), object): + pass + class O(object): + pass + with self.assertRaises(TypeError): + class X(O, type(None)): + pass + with self.assertRaises(TypeError): + class X(type(None), O): + pass + + class X(object): + pass + with self.assertRaises(TypeError): + X.__bases__ = type(None), + with self.assertRaises(TypeError): + X.__bases__ = object, type(None) + with self.assertRaises(TypeError): + X.__bases__ = type(None), object + with self.assertRaises(TypeError): + X.__bases__ = O, type(None) + with self.assertRaises(TypeError): + X.__bases__ = type(None), O def test_mutable_bases_with_failing_mro(self): # Testing mutable bases with failing mro... diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -11,6 +11,9 @@ Core and Builtins ----------------- +- Issue #24806: Prevent builtin types that are not allowed to be subclassed from + being subclassed through multiple inheritance. + - Issue #24848: Fixed a number of bugs in UTF-7 decoding of misformed data. - Issue #25280: Import trace messages emitted in verbose (-v) mode are no diff --git a/Objects/typeobject.c b/Objects/typeobject.c --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -1965,6 +1965,12 @@ if (PyType_Ready(base_i) < 0) return NULL; } + if (!PyType_HasFeature(base_i, Py_TPFLAGS_BASETYPE)) { + PyErr_Format(PyExc_TypeError, + "type '%.100s' is not an acceptable base type", + base_i->tp_name); + return NULL; + } candidate = solid_base(base_i); if (winner == NULL) { winner = candidate; @@ -2345,12 +2351,6 @@ if (base == NULL) { goto error; } - if (!PyType_HasFeature(base, Py_TPFLAGS_BASETYPE)) { - PyErr_Format(PyExc_TypeError, - "type '%.100s' is not an acceptable base type", - base->tp_name); - goto error; - } dict = PyDict_Copy(orig_dict); if (dict == NULL) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 04:42:57 2015 From: python-checkins at python.org (benjamin.peterson) Date: Wed, 07 Oct 2015 02:42:57 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_prevent_unacce?= =?utf-8?q?ptable_bases_from_becoming_bases_through_multiple_inheritance?= Message-ID: <20151007024256.20763.54858@psf.io> https://hg.python.org/cpython/rev/c46ccfac8763 changeset: 98571:c46ccfac8763 branch: 2.7 parent: 98548:69a26f0800b3 user: Benjamin Peterson date: Tue Oct 06 19:36:54 2015 -0700 summary: prevent unacceptable bases from becoming bases through multiple inheritance (#24806) files: Lib/test/test_descr.py | 31 ++++++++++++++++++++++++++++++ Misc/NEWS | 3 ++ Objects/typeobject.c | 13 +++++------ 3 files changed, 40 insertions(+), 7 deletions(-) diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py --- a/Lib/test/test_descr.py +++ b/Lib/test/test_descr.py @@ -4065,6 +4065,37 @@ else: assert 0, "best_base calculation found wanting" + def test_unsubclassable_types(self): + with self.assertRaises(TypeError): + class X(types.NoneType): + pass + with self.assertRaises(TypeError): + class X(object, types.NoneType): + pass + with self.assertRaises(TypeError): + class X(types.NoneType, object): + pass + class O(object): + pass + with self.assertRaises(TypeError): + class X(O, types.NoneType): + pass + with self.assertRaises(TypeError): + class X(types.NoneType, O): + pass + + class X(object): + pass + with self.assertRaises(TypeError): + X.__bases__ = types.NoneType, + with self.assertRaises(TypeError): + X.__bases__ = object, types.NoneType + with self.assertRaises(TypeError): + X.__bases__ = types.NoneType, object + with self.assertRaises(TypeError): + X.__bases__ = O, types.NoneType + with self.assertRaises(TypeError): + X.__bases__ = types.NoneType, O def test_mutable_bases_with_failing_mro(self): # Testing mutable bases with failing mro... diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #24806: Prevent builtin types that are not allowed to be subclassed from + being subclassed through multiple inheritance. + - Issue #24848: Fixed a number of bugs in UTF-7 decoding of misformed data. - Issue #25003: os.urandom() doesn't use getentropy() on Solaris because diff --git a/Objects/typeobject.c b/Objects/typeobject.c --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -1724,6 +1724,12 @@ if (PyType_Ready(base_i) < 0) return NULL; } + if (!PyType_HasFeature(base_i, Py_TPFLAGS_BASETYPE)) { + PyErr_Format(PyExc_TypeError, + "type '%.100s' is not an acceptable base type", + base_i->tp_name); + return NULL; + } candidate = solid_base(base_i); if (winner == NULL) { winner = candidate; @@ -2148,13 +2154,6 @@ Py_DECREF(bases); return NULL; } - if (!PyType_HasFeature(base, Py_TPFLAGS_BASETYPE)) { - PyErr_Format(PyExc_TypeError, - "type '%.100s' is not an acceptable base type", - base->tp_name); - Py_DECREF(bases); - return NULL; - } /* Check for a __slots__ sequence variable in dict, and count it */ slots = PyDict_GetItemString(dict, "__slots__"); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 04:42:56 2015 From: python-checkins at python.org (benjamin.peterson) Date: Wed, 07 Oct 2015 02:42:56 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMi43IC0+IDIuNyk6?= =?utf-8?q?_merge_heads?= Message-ID: <20151007024256.18388.80772@psf.io> https://hg.python.org/cpython/rev/60c44a09c5fc changeset: 98572:60c44a09c5fc branch: 2.7 parent: 98571:c46ccfac8763 parent: 98555:7b2af8ee6dfa user: Benjamin Peterson date: Tue Oct 06 19:37:15 2015 -0700 summary: merge heads files: Lib/test/test_tokenize.py | 544 ++++++++++++++----------- 1 files changed, 294 insertions(+), 250 deletions(-) diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -1,20 +1,42 @@ -doctests = """ -Tests for the tokenize module. +from test import test_support +from tokenize import (untokenize, generate_tokens, NUMBER, NAME, OP, + STRING, ENDMARKER, tok_name, Untokenizer, tokenize) +from StringIO import StringIO +import os +from unittest import TestCase - >>> import glob, random, sys -The tests can be really simple. Given a small fragment of source -code, print out a table with tokens. The ENDMARKER is omitted for -brevity. +class TokenizeTest(TestCase): + # Tests for the tokenize module. - >>> dump_tokens("1 + 1") + # The tests can be really simple. Given a small fragment of source + # code, print out a table with tokens. The ENDMARKER is omitted for + # brevity. + + def check_tokenize(self, s, expected): + # Format the tokens in s in a table format. + # The ENDMARKER is omitted. + result = [] + f = StringIO(s) + for type, token, start, end, line in generate_tokens(f.readline): + if type == ENDMARKER: + break + type = tok_name[type] + result.append(" %(type)-10.10s %(token)-13.13r %(start)s %(end)s" % + locals()) + self.assertEqual(result, + expected.rstrip().splitlines()) + + + def test_basic(self): + self.check_tokenize("1 + 1", """\ NUMBER '1' (1, 0) (1, 1) OP '+' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) - - >>> dump_tokens("if False:\\n" - ... " # NL\\n" - ... " True = False # NEWLINE\\n") + """) + self.check_tokenize("if False:\n" + " # NL\n" + " True = False # NEWLINE\n", """\ NAME 'if' (1, 0) (1, 2) NAME 'False' (1, 3) (1, 8) OP ':' (1, 8) (1, 9) @@ -28,122 +50,48 @@ COMMENT '# NEWLINE' (3, 17) (3, 26) NEWLINE '\\n' (3, 26) (3, 27) DEDENT '' (4, 0) (4, 0) + """) - >>> indent_error_file = \""" - ... def k(x): - ... x += 2 - ... x += 5 - ... \""" + indent_error_file = """\ +def k(x): + x += 2 + x += 5 +""" + with self.assertRaisesRegexp(IndentationError, + "unindent does not match any " + "outer indentation level"): + for tok in generate_tokens(StringIO(indent_error_file).readline): + pass - >>> for tok in generate_tokens(StringIO(indent_error_file).readline): pass - Traceback (most recent call last): - ... - IndentationError: unindent does not match any outer indentation level - -Test roundtrip for `untokenize`. `f` is an open file or a string. The source -code in f is tokenized, converted back to source code via tokenize.untokenize(), -and tokenized again from the latter. The test fails if the second tokenization -doesn't match the first. - - >>> def roundtrip(f): - ... if isinstance(f, str): f = StringIO(f) - ... token_list = list(generate_tokens(f.readline)) - ... f.close() - ... tokens1 = [tok[:2] for tok in token_list] - ... new_text = untokenize(tokens1) - ... readline = iter(new_text.splitlines(1)).next - ... tokens2 = [tok[:2] for tok in generate_tokens(readline)] - ... return tokens1 == tokens2 - ... - -There are some standard formatting practices that are easy to get right. - - >>> roundtrip("if x == 1:\\n" - ... " print x\\n") - True - - >>> roundtrip("# This is a comment\\n# This also") - True - -Some people use different formatting conventions, which makes -untokenize a little trickier. Note that this test involves trailing -whitespace after the colon. Note that we use hex escapes to make the -two trailing blanks apperant in the expected output. - - >>> roundtrip("if x == 1 : \\n" - ... " print x\\n") - True - - >>> f = test_support.findfile("tokenize_tests" + os.extsep + "txt") - >>> roundtrip(open(f)) - True - - >>> roundtrip("if x == 1:\\n" - ... " # A comment by itself.\\n" - ... " print x # Comment here, too.\\n" - ... " # Another comment.\\n" - ... "after_if = True\\n") - True - - >>> roundtrip("if (x # The comments need to go in the right place\\n" - ... " == 1):\\n" - ... " print 'x==1'\\n") - True - - >>> roundtrip("class Test: # A comment here\\n" - ... " # A comment with weird indent\\n" - ... " after_com = 5\\n" - ... " def x(m): return m*5 # a one liner\\n" - ... " def y(m): # A whitespace after the colon\\n" - ... " return y*4 # 3-space indent\\n") - True - -Some error-handling code - - >>> roundtrip("try: import somemodule\\n" - ... "except ImportError: # comment\\n" - ... " print 'Can not import' # comment2\\n" - ... "else: print 'Loaded'\\n") - True - -Balancing continuation - - >>> roundtrip("a = (3,4, \\n" - ... "5,6)\\n" - ... "y = [3, 4,\\n" - ... "5]\\n" - ... "z = {'a': 5,\\n" - ... "'b':15, 'c':True}\\n" - ... "x = len(y) + 5 - a[\\n" - ... "3] - a[2]\\n" - ... "+ len(z) - z[\\n" - ... "'b']\\n") - True - -Ordinary integers and binary operators - - >>> dump_tokens("0xff <= 255") + def test_int(self): + # Ordinary integers and binary operators + self.check_tokenize("0xff <= 255", """\ NUMBER '0xff' (1, 0) (1, 4) OP '<=' (1, 5) (1, 7) NUMBER '255' (1, 8) (1, 11) - >>> dump_tokens("0b10 <= 255") + """) + self.check_tokenize("0b10 <= 255", """\ NUMBER '0b10' (1, 0) (1, 4) OP '<=' (1, 5) (1, 7) NUMBER '255' (1, 8) (1, 11) - >>> dump_tokens("0o123 <= 0123") + """) + self.check_tokenize("0o123 <= 0123", """\ NUMBER '0o123' (1, 0) (1, 5) OP '<=' (1, 6) (1, 8) NUMBER '0123' (1, 9) (1, 13) - >>> dump_tokens("01234567 > ~0x15") + """) + self.check_tokenize("01234567 > ~0x15", """\ NUMBER '01234567' (1, 0) (1, 8) OP '>' (1, 9) (1, 10) OP '~' (1, 11) (1, 12) NUMBER '0x15' (1, 12) (1, 16) - >>> dump_tokens("2134568 != 01231515") + """) + self.check_tokenize("2134568 != 01231515", """\ NUMBER '2134568' (1, 0) (1, 7) OP '!=' (1, 8) (1, 10) NUMBER '01231515' (1, 11) (1, 19) - >>> dump_tokens("(-124561-1) & 0200000000") + """) + self.check_tokenize("(-124561-1) & 0200000000", """\ OP '(' (1, 0) (1, 1) OP '-' (1, 1) (1, 2) NUMBER '124561' (1, 2) (1, 8) @@ -152,78 +100,93 @@ OP ')' (1, 10) (1, 11) OP '&' (1, 12) (1, 13) NUMBER '0200000000' (1, 14) (1, 24) - >>> dump_tokens("0xdeadbeef != -1") + """) + self.check_tokenize("0xdeadbeef != -1", """\ NUMBER '0xdeadbeef' (1, 0) (1, 10) OP '!=' (1, 11) (1, 13) OP '-' (1, 14) (1, 15) NUMBER '1' (1, 15) (1, 16) - >>> dump_tokens("0xdeadc0de & 012345") + """) + self.check_tokenize("0xdeadc0de & 012345", """\ NUMBER '0xdeadc0de' (1, 0) (1, 10) OP '&' (1, 11) (1, 12) NUMBER '012345' (1, 13) (1, 19) - >>> dump_tokens("0xFF & 0x15 | 1234") + """) + self.check_tokenize("0xFF & 0x15 | 1234", """\ NUMBER '0xFF' (1, 0) (1, 4) OP '&' (1, 5) (1, 6) NUMBER '0x15' (1, 7) (1, 11) OP '|' (1, 12) (1, 13) NUMBER '1234' (1, 14) (1, 18) + """) -Long integers - - >>> dump_tokens("x = 0L") + def test_long(self): + # Long integers + self.check_tokenize("x = 0L", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '0L' (1, 4) (1, 6) - >>> dump_tokens("x = 0xfffffffffff") + """) + self.check_tokenize("x = 0xfffffffffff", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '0xffffffffff (1, 4) (1, 17) - >>> dump_tokens("x = 123141242151251616110l") + """) + self.check_tokenize("x = 123141242151251616110l", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '123141242151 (1, 4) (1, 26) - >>> dump_tokens("x = -15921590215012591L") + """) + self.check_tokenize("x = -15921590215012591L", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) OP '-' (1, 4) (1, 5) NUMBER '159215902150 (1, 5) (1, 23) + """) -Floating point numbers - - >>> dump_tokens("x = 3.14159") + def test_float(self): + # Floating point numbers + self.check_tokenize("x = 3.14159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3.14159' (1, 4) (1, 11) - >>> dump_tokens("x = 314159.") + """) + self.check_tokenize("x = 314159.", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '314159.' (1, 4) (1, 11) - >>> dump_tokens("x = .314159") + """) + self.check_tokenize("x = .314159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '.314159' (1, 4) (1, 11) - >>> dump_tokens("x = 3e14159") + """) + self.check_tokenize("x = 3e14159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3e14159' (1, 4) (1, 11) - >>> dump_tokens("x = 3E123") + """) + self.check_tokenize("x = 3E123", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3E123' (1, 4) (1, 9) - >>> dump_tokens("x+y = 3e-1230") + """) + self.check_tokenize("x+y = 3e-1230", """\ NAME 'x' (1, 0) (1, 1) OP '+' (1, 1) (1, 2) NAME 'y' (1, 2) (1, 3) OP '=' (1, 4) (1, 5) NUMBER '3e-1230' (1, 6) (1, 13) - >>> dump_tokens("x = 3.14e159") + """) + self.check_tokenize("x = 3.14e159", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '3.14e159' (1, 4) (1, 12) + """) -String literals - - >>> dump_tokens("x = ''; y = \\\"\\\"") + def test_string(self): + # String literals + self.check_tokenize("x = ''; y = \"\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "''" (1, 4) (1, 6) @@ -231,7 +194,8 @@ NAME 'y' (1, 8) (1, 9) OP '=' (1, 10) (1, 11) STRING '""' (1, 12) (1, 14) - >>> dump_tokens("x = '\\\"'; y = \\\"'\\\"") + """) + self.check_tokenize("x = '\"'; y = \"'\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING '\\'"\\'' (1, 4) (1, 7) @@ -239,25 +203,29 @@ NAME 'y' (1, 9) (1, 10) OP '=' (1, 11) (1, 12) STRING '"\\'"' (1, 13) (1, 16) - >>> dump_tokens("x = \\\"doesn't \\\"shrink\\\", does it\\\"") + """) + self.check_tokenize("x = \"doesn't \"shrink\", does it\"", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING '"doesn\\'t "' (1, 4) (1, 14) NAME 'shrink' (1, 14) (1, 20) STRING '", does it"' (1, 20) (1, 31) - >>> dump_tokens("x = u'abc' + U'ABC'") + """) + self.check_tokenize("x = u'abc' + U'ABC'", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "u'abc'" (1, 4) (1, 10) OP '+' (1, 11) (1, 12) STRING "U'ABC'" (1, 13) (1, 19) - >>> dump_tokens('y = u"ABC" + U"ABC"') + """) + self.check_tokenize('y = u"ABC" + U"ABC"', """\ NAME 'y' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING 'u"ABC"' (1, 4) (1, 10) OP '+' (1, 11) (1, 12) STRING 'U"ABC"' (1, 13) (1, 19) - >>> dump_tokens("x = ur'abc' + Ur'ABC' + uR'ABC' + UR'ABC'") + """) + self.check_tokenize("x = ur'abc' + Ur'ABC' + uR'ABC' + UR'ABC'", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING "ur'abc'" (1, 4) (1, 11) @@ -267,7 +235,8 @@ STRING "uR'ABC'" (1, 24) (1, 31) OP '+' (1, 32) (1, 33) STRING "UR'ABC'" (1, 34) (1, 41) - >>> dump_tokens('y = ur"abc" + Ur"ABC" + uR"ABC" + UR"ABC"') + """) + self.check_tokenize('y = ur"abc" + Ur"ABC" + uR"ABC" + UR"ABC"', """\ NAME 'y' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) STRING 'ur"abc"' (1, 4) (1, 11) @@ -278,15 +247,18 @@ OP '+' (1, 32) (1, 33) STRING 'UR"ABC"' (1, 34) (1, 41) - >>> dump_tokens("b'abc' + B'abc'") + """) + self.check_tokenize("b'abc' + B'abc'", """\ STRING "b'abc'" (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING "B'abc'" (1, 9) (1, 15) - >>> dump_tokens('b"abc" + B"abc"') + """) + self.check_tokenize('b"abc" + B"abc"', """\ STRING 'b"abc"' (1, 0) (1, 6) OP '+' (1, 7) (1, 8) STRING 'B"abc"' (1, 9) (1, 15) - >>> dump_tokens("br'abc' + bR'abc' + Br'abc' + BR'abc'") + """) + self.check_tokenize("br'abc' + bR'abc' + Br'abc' + BR'abc'", """\ STRING "br'abc'" (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING "bR'abc'" (1, 10) (1, 17) @@ -294,7 +266,8 @@ STRING "Br'abc'" (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING "BR'abc'" (1, 30) (1, 37) - >>> dump_tokens('br"abc" + bR"abc" + Br"abc" + BR"abc"') + """) + self.check_tokenize('br"abc" + bR"abc" + Br"abc" + BR"abc"', """\ STRING 'br"abc"' (1, 0) (1, 7) OP '+' (1, 8) (1, 9) STRING 'bR"abc"' (1, 10) (1, 17) @@ -302,10 +275,10 @@ STRING 'Br"abc"' (1, 20) (1, 27) OP '+' (1, 28) (1, 29) STRING 'BR"abc"' (1, 30) (1, 37) + """) -Operators - - >>> dump_tokens("def d22(a, b, c=2, d=2, *k): pass") + def test_function(self): + self.check_tokenize("def d22(a, b, c=2, d=2, *k): pass", """\ NAME 'def' (1, 0) (1, 3) NAME 'd22' (1, 4) (1, 7) OP '(' (1, 7) (1, 8) @@ -326,7 +299,8 @@ OP ')' (1, 26) (1, 27) OP ':' (1, 27) (1, 28) NAME 'pass' (1, 29) (1, 33) - >>> dump_tokens("def d01v_(a=1, *k, **w): pass") + """) + self.check_tokenize("def d01v_(a=1, *k, **w): pass", """\ NAME 'def' (1, 0) (1, 3) NAME 'd01v_' (1, 4) (1, 9) OP '(' (1, 9) (1, 10) @@ -342,11 +316,12 @@ OP ')' (1, 22) (1, 23) OP ':' (1, 23) (1, 24) NAME 'pass' (1, 25) (1, 29) + """) -Comparison - - >>> dump_tokens("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " + - ... "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass") + def test_comparison(self): + # Comparison + self.check_tokenize("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " + + "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass", """\ NAME 'if' (1, 0) (1, 2) NUMBER '1' (1, 3) (1, 4) OP '<' (1, 5) (1, 6) @@ -379,10 +354,11 @@ NUMBER '1' (1, 81) (1, 82) OP ':' (1, 82) (1, 83) NAME 'pass' (1, 84) (1, 88) + """) -Shift - - >>> dump_tokens("x = 1 << 1 >> 5") + def test_shift(self): + # Shift + self.check_tokenize("x = 1 << 1 >> 5", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -390,10 +366,11 @@ NUMBER '1' (1, 9) (1, 10) OP '>>' (1, 11) (1, 13) NUMBER '5' (1, 14) (1, 15) + """) -Additive - - >>> dump_tokens("x = 1 - y + 15 - 01 + 0x124 + z + a[5]") + def test_additive(self): + # Additive + self.check_tokenize("x = 1 - y + 15 - 01 + 0x124 + z + a[5]", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -412,10 +389,11 @@ OP '[' (1, 35) (1, 36) NUMBER '5' (1, 36) (1, 37) OP ']' (1, 37) (1, 38) + """) -Multiplicative - - >>> dump_tokens("x = 1//1*1/5*12%0x12") + def test_multiplicative(self): + # Multiplicative + self.check_tokenize("x = 1//1*1/5*12%0x12", """\ NAME 'x' (1, 0) (1, 1) OP '=' (1, 2) (1, 3) NUMBER '1' (1, 4) (1, 5) @@ -429,10 +407,11 @@ NUMBER '12' (1, 13) (1, 15) OP '%' (1, 15) (1, 16) NUMBER '0x12' (1, 16) (1, 20) + """) -Unary - - >>> dump_tokens("~1 ^ 1 & 1 |1 ^ -1") + def test_unary(self): + # Unary + self.check_tokenize("~1 ^ 1 & 1 |1 ^ -1", """\ OP '~' (1, 0) (1, 1) NUMBER '1' (1, 1) (1, 2) OP '^' (1, 3) (1, 4) @@ -444,7 +423,8 @@ OP '^' (1, 14) (1, 15) OP '-' (1, 16) (1, 17) NUMBER '1' (1, 17) (1, 18) - >>> dump_tokens("-1*1/1+1*1//1 - ---1**1") + """) + self.check_tokenize("-1*1/1+1*1//1 - ---1**1", """\ OP '-' (1, 0) (1, 1) NUMBER '1' (1, 1) (1, 2) OP '*' (1, 2) (1, 3) @@ -464,10 +444,12 @@ NUMBER '1' (1, 19) (1, 20) OP '**' (1, 20) (1, 22) NUMBER '1' (1, 22) (1, 23) + """) -Selector - - >>> dump_tokens("import sys, time\\nx = sys.modules['time'].time()") + def test_selector(self): + # Selector + self.check_tokenize("import sys, time\n" + "x = sys.modules['time'].time()", """\ NAME 'import' (1, 0) (1, 6) NAME 'sys' (1, 7) (1, 10) OP ',' (1, 10) (1, 11) @@ -485,10 +467,12 @@ NAME 'time' (2, 24) (2, 28) OP '(' (2, 28) (2, 29) OP ')' (2, 29) (2, 30) + """) -Methods - - >>> dump_tokens("@staticmethod\\ndef foo(x,y): pass") + def test_method(self): + # Methods + self.check_tokenize("@staticmethod\n" + "def foo(x,y): pass", """\ OP '@' (1, 0) (1, 1) NAME 'staticmethod (1, 1) (1, 13) NEWLINE '\\n' (1, 13) (1, 14) @@ -501,41 +485,13 @@ OP ')' (2, 11) (2, 12) OP ':' (2, 12) (2, 13) NAME 'pass' (2, 14) (2, 18) + """) -Backslash means line continuation, except for comments - - >>> roundtrip("x=1+\\\\n" - ... "1\\n" - ... "# This is a comment\\\\n" - ... "# This also\\n") - True - >>> roundtrip("# Comment \\\\nx = 0") - True - -Two string literals on the same line - - >>> roundtrip("'' ''") - True - -Test roundtrip on random python modules. -pass the '-ucpu' option to process the full directory. - - >>> - >>> tempdir = os.path.dirname(f) or os.curdir - >>> testfiles = glob.glob(os.path.join(tempdir, "test*.py")) - - >>> if not test_support.is_resource_enabled("cpu"): - ... testfiles = random.sample(testfiles, 10) - ... - >>> for testfile in testfiles: - ... if not roundtrip(open(testfile)): - ... print "Roundtrip failed for file %s" % testfile - ... break - ... else: True - True - -Evil tabs - >>> dump_tokens("def f():\\n\\tif x\\n \\tpass") + def test_tabs(self): + # Evil tabs + self.check_tokenize("def f():\n" + "\tif x\n" + " \tpass", """\ NAME 'def' (1, 0) (1, 3) NAME 'f' (1, 4) (1, 5) OP '(' (1, 5) (1, 6) @@ -550,56 +506,16 @@ NAME 'pass' (3, 9) (3, 13) DEDENT '' (4, 0) (4, 0) DEDENT '' (4, 0) (4, 0) + """) -Pathological whitespace (http://bugs.python.org/issue16152) - >>> dump_tokens("@ ") + def test_pathological_trailing_whitespace(self): + # Pathological whitespace (http://bugs.python.org/issue16152) + self.check_tokenize("@ ", """\ OP '@' (1, 0) (1, 1) -""" + """) -from test import test_support -from tokenize import (untokenize, generate_tokens, NUMBER, NAME, OP, - STRING, ENDMARKER, tok_name, Untokenizer, tokenize) -from StringIO import StringIO -import os -from unittest import TestCase - -def dump_tokens(s): - """Print out the tokens in s in a table format. - - The ENDMARKER is omitted. - """ - f = StringIO(s) - for type, token, start, end, line in generate_tokens(f.readline): - if type == ENDMARKER: - break - type = tok_name[type] - print("%(type)-10.10s %(token)-13.13r %(start)s %(end)s" % locals()) - -# This is an example from the docs, set up as a doctest. def decistmt(s): - """Substitute Decimals for floats in a string of statements. - - >>> from decimal import Decimal - >>> s = 'print +21.3e-5*-.1234/81.7' - >>> decistmt(s) - "print +Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')" - - The format of the exponent is inherited from the platform C library. - Known cases are "e-007" (Windows) and "e-07" (not Windows). Since - we're only showing 12 digits, and the 13th isn't close to 5, the - rest of the output should be platform-independent. - - >>> exec(s) #doctest: +ELLIPSIS - -3.21716034272e-0...7 - - Output from calculations with Decimal should be identical across all - platforms. - - >>> exec(decistmt(s)) - -3.217160342717258261933904529E-7 - """ - result = [] g = generate_tokens(StringIO(s).readline) # tokenize the string for toknum, tokval, _, _, _ in g: @@ -614,6 +530,27 @@ result.append((toknum, tokval)) return untokenize(result) +class TestMisc(TestCase): + + def test_decistmt(self): + # Substitute Decimals for floats in a string of statements. + # This is an example from the docs. + + from decimal import Decimal + s = '+21.3e-5*-.1234/81.7' + self.assertEqual(decistmt(s), + "+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')") + + # The format of the exponent is inherited from the platform C library. + # Known cases are "e-007" (Windows) and "e-07" (not Windows). Since + # we're only showing 12 digits, and the 13th isn't close to 5, the + # rest of the output should be platform-independent. + self.assertRegexpMatches(str(eval(s)), '-3.21716034272e-0+7') + + # Output from calculations with Decimal should be identical across all + # platforms. + self.assertEqual(eval(decistmt(s)), Decimal('-3.217160342717258261933904529E-7')) + class UntokenizeTest(TestCase): @@ -651,6 +588,115 @@ class TestRoundtrip(TestCase): + + def check_roundtrip(self, f): + """ + Test roundtrip for `untokenize`. `f` is an open file or a string. + The source code in f is tokenized, converted back to source code + via tokenize.untokenize(), and tokenized again from the latter. + The test fails if the second tokenization doesn't match the first. + """ + if isinstance(f, str): f = StringIO(f) + token_list = list(generate_tokens(f.readline)) + f.close() + tokens1 = [tok[:2] for tok in token_list] + new_text = untokenize(tokens1) + readline = iter(new_text.splitlines(1)).next + tokens2 = [tok[:2] for tok in generate_tokens(readline)] + self.assertEqual(tokens2, tokens1) + + def test_roundtrip(self): + # There are some standard formatting practices that are easy to get right. + + self.check_roundtrip("if x == 1:\n" + " print(x)\n") + + # There are some standard formatting practices that are easy to get right. + + self.check_roundtrip("if x == 1:\n" + " print x\n") + self.check_roundtrip("# This is a comment\n" + "# This also") + + # Some people use different formatting conventions, which makes + # untokenize a little trickier. Note that this test involves trailing + # whitespace after the colon. Note that we use hex escapes to make the + # two trailing blanks apperant in the expected output. + + self.check_roundtrip("if x == 1 : \n" + " print x\n") + fn = test_support.findfile("tokenize_tests" + os.extsep + "txt") + with open(fn) as f: + self.check_roundtrip(f) + self.check_roundtrip("if x == 1:\n" + " # A comment by itself.\n" + " print x # Comment here, too.\n" + " # Another comment.\n" + "after_if = True\n") + self.check_roundtrip("if (x # The comments need to go in the right place\n" + " == 1):\n" + " print 'x==1'\n") + self.check_roundtrip("class Test: # A comment here\n" + " # A comment with weird indent\n" + " after_com = 5\n" + " def x(m): return m*5 # a one liner\n" + " def y(m): # A whitespace after the colon\n" + " return y*4 # 3-space indent\n") + + # Some error-handling code + + self.check_roundtrip("try: import somemodule\n" + "except ImportError: # comment\n" + " print 'Can not import' # comment2\n" + "else: print 'Loaded'\n") + + def test_continuation(self): + # Balancing continuation + self.check_roundtrip("a = (3,4, \n" + "5,6)\n" + "y = [3, 4,\n" + "5]\n" + "z = {'a': 5,\n" + "'b':15, 'c':True}\n" + "x = len(y) + 5 - a[\n" + "3] - a[2]\n" + "+ len(z) - z[\n" + "'b']\n") + + def test_backslash_continuation(self): + # Backslash means line continuation, except for comments + self.check_roundtrip("x=1+\\\n" + "1\n" + "# This is a comment\\\n" + "# This also\n") + self.check_roundtrip("# Comment \\\n" + "x = 0") + + def test_string_concatenation(self): + # Two string literals on the same line + self.check_roundtrip("'' ''") + + def test_random_files(self): + # Test roundtrip on random python modules. + # pass the '-ucpu' option to process the full directory. + + import glob, random + fn = test_support.findfile("tokenize_tests" + os.extsep + "txt") + tempdir = os.path.dirname(fn) or os.curdir + testfiles = glob.glob(os.path.join(tempdir, "test*.py")) + + if not test_support.is_resource_enabled("cpu"): + testfiles = random.sample(testfiles, 10) + + for testfile in testfiles: + try: + with open(testfile, 'rb') as f: + self.check_roundtrip(f) + except: + print "Roundtrip failed for file %s" % testfile + raise + + def roundtrip(self, code): if isinstance(code, str): code = code.encode('utf-8') @@ -667,13 +713,11 @@ self.assertEqual(codelines[1], codelines[2]) -__test__ = {"doctests" : doctests, 'decistmt': decistmt} - def test_main(): - from test import test_tokenize - test_support.run_doctest(test_tokenize, True) + test_support.run_unittest(TokenizeTest) test_support.run_unittest(UntokenizeTest) test_support.run_unittest(TestRoundtrip) + test_support.run_unittest(TestMisc) if __name__ == "__main__": test_main() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 04:42:57 2015 From: python-checkins at python.org (benjamin.peterson) Date: Wed, 07 Oct 2015 02:42:57 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_merge_3=2E5_=28closes_=2324806=29?= Message-ID: <20151007024256.2685.95193@psf.io> https://hg.python.org/cpython/rev/4b2a2688d2ad changeset: 98575:4b2a2688d2ad parent: 98570:db782c81bba9 parent: 98574:e02e4afcce6a user: Benjamin Peterson date: Tue Oct 06 19:42:46 2015 -0700 summary: merge 3.5 (closes #24806) files: Lib/test/test_descr.py | 31 ++++++++++++++++++++++++++++++ Misc/NEWS | 3 ++ Objects/typeobject.c | 12 +++++----- 3 files changed, 40 insertions(+), 6 deletions(-) diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py --- a/Lib/test/test_descr.py +++ b/Lib/test/test_descr.py @@ -3798,6 +3798,37 @@ else: assert 0, "best_base calculation found wanting" + def test_unsubclassable_types(self): + with self.assertRaises(TypeError): + class X(type(None)): + pass + with self.assertRaises(TypeError): + class X(object, type(None)): + pass + with self.assertRaises(TypeError): + class X(type(None), object): + pass + class O(object): + pass + with self.assertRaises(TypeError): + class X(O, type(None)): + pass + with self.assertRaises(TypeError): + class X(type(None), O): + pass + + class X(object): + pass + with self.assertRaises(TypeError): + X.__bases__ = type(None), + with self.assertRaises(TypeError): + X.__bases__ = object, type(None) + with self.assertRaises(TypeError): + X.__bases__ = type(None), object + with self.assertRaises(TypeError): + X.__bases__ = O, type(None) + with self.assertRaises(TypeError): + X.__bases__ = type(None), O def test_mutable_bases_with_failing_mro(self): # Testing mutable bases with failing mro... diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #24806: Prevent builtin types that are not allowed to be subclassed from + being subclassed through multiple inheritance. + * Issue #25301: The UTF-8 decoder is now up to 15 times as fast for error handlers: ``ignore``, ``replace`` and ``surrogateescape``. diff --git a/Objects/typeobject.c b/Objects/typeobject.c --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -1973,6 +1973,12 @@ if (PyType_Ready(base_i) < 0) return NULL; } + if (!PyType_HasFeature(base_i, Py_TPFLAGS_BASETYPE)) { + PyErr_Format(PyExc_TypeError, + "type '%.100s' is not an acceptable base type", + base_i->tp_name); + return NULL; + } candidate = solid_base(base_i); if (winner == NULL) { winner = candidate; @@ -2353,12 +2359,6 @@ if (base == NULL) { goto error; } - if (!PyType_HasFeature(base, Py_TPFLAGS_BASETYPE)) { - PyErr_Format(PyExc_TypeError, - "type '%.100s' is not an acceptable base type", - base->tp_name); - goto error; - } dict = PyDict_Copy(orig_dict); if (dict == NULL) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 05:08:17 2015 From: python-checkins at python.org (raymond.hettinger) Date: Wed, 07 Oct 2015 03:08:17 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E5=29=3A_Backport_early?= =?utf-8?q?-out_91259f061cfb_to_reduce_the_cost_of_bb1a2944bcb6?= Message-ID: <20151007030817.97720.65964@psf.io> https://hg.python.org/cpython/rev/6fb0f26ed858 changeset: 98576:6fb0f26ed858 branch: 3.5 parent: 98574:e02e4afcce6a user: Raymond Hettinger date: Tue Oct 06 23:06:17 2015 -0400 summary: Backport early-out 91259f061cfb to reduce the cost of bb1a2944bcb6 files: Modules/_collectionsmodule.c | 6 +++++- 1 files changed, 5 insertions(+), 1 deletions(-) diff --git a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c --- a/Modules/_collectionsmodule.c +++ b/Modules/_collectionsmodule.c @@ -1045,6 +1045,9 @@ Py_ssize_t n; PyObject *item; + if (Py_SIZE(deque) == 0) + return; + /* During the process of clearing a deque, decrefs can cause the deque to mutate. To avoid fatal confusion, we have to make the deque empty before clearing the blocks and never refer to @@ -1423,7 +1426,8 @@ } } deque->maxlen = maxlen; - deque_clear(deque); + if (Py_SIZE(deque) > 0) + deque_clear(deque); if (iterable != NULL) { PyObject *rv = deque_extend(deque, iterable); if (rv == NULL) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 05:08:18 2015 From: python-checkins at python.org (raymond.hettinger) Date: Wed, 07 Oct 2015 03:08:18 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_merge?= Message-ID: <20151007030817.55482.17044@psf.io> https://hg.python.org/cpython/rev/bf803e582cb4 changeset: 98577:bf803e582cb4 parent: 98575:4b2a2688d2ad parent: 98576:6fb0f26ed858 user: Raymond Hettinger date: Tue Oct 06 23:08:11 2015 -0400 summary: merge files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 05:12:07 2015 From: python-checkins at python.org (raymond.hettinger) Date: Wed, 07 Oct 2015 03:12:07 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Backport_early?= =?utf-8?q?-out_91259f061cfb_to_reduce_the_cost_of_bb1a2944bcb6?= Message-ID: <20151007031207.18390.20414@psf.io> https://hg.python.org/cpython/rev/37aee118e1a3 changeset: 98578:37aee118e1a3 branch: 2.7 parent: 98572:60c44a09c5fc user: Raymond Hettinger date: Tue Oct 06 23:12:02 2015 -0400 summary: Backport early-out 91259f061cfb to reduce the cost of bb1a2944bcb6 files: Modules/_collectionsmodule.c | 6 +++++- 1 files changed, 5 insertions(+), 1 deletions(-) diff --git a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c --- a/Modules/_collectionsmodule.c +++ b/Modules/_collectionsmodule.c @@ -651,6 +651,9 @@ Py_ssize_t n; PyObject *item; + if (Py_SIZE(deque) == 0) + return; + /* During the process of clearing a deque, decrefs can cause the deque to mutate. To avoid fatal confusion, we have to make the deque empty before clearing the blocks and never refer to @@ -1083,7 +1086,8 @@ } } deque->maxlen = maxlen; - deque_clear(deque); + if (Py_SIZE(deque) > 0) + deque_clear(deque); if (iterable != NULL) { PyObject *rv = deque_extend(deque, iterable); if (rv == NULL) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 05:37:07 2015 From: python-checkins at python.org (benjamin.peterson) Date: Wed, 07 Oct 2015 03:37:07 +0000 Subject: [Python-checkins] =?utf-8?q?devguide=3A_no_DSA_keys_allowed?= Message-ID: <20151007033707.18390.50804@psf.io> https://hg.python.org/devguide/rev/a55c0ddbd6cb changeset: 766:a55c0ddbd6cb user: Benjamin Peterson date: Tue Oct 06 20:37:03 2015 -0700 summary: no DSA keys allowed files: faq.rst | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/faq.rst b/faq.rst --- a/faq.rst +++ b/faq.rst @@ -875,8 +875,8 @@ How do I generate an SSH-2 public key? -------------------------------------- -All generated SSH keys should be sent to hgaccounts at python.org for -adding to the list of keys. +All generated SSH keys should be sent to hgaccounts at python.org for adding to the +list of keys. DSA keys are unacceptable. UNIX '''' -- Repository URL: https://hg.python.org/devguide From python-checkins at python.org Wed Oct 7 05:37:07 2015 From: python-checkins at python.org (benjamin.peterson) Date: Wed, 07 Oct 2015 03:37:07 +0000 Subject: [Python-checkins] =?utf-8?q?devguide=3A_tell_people_to_generate_r?= =?utf-8?q?sa_keys_with_puttygen?= Message-ID: <20151007033707.3291.67205@psf.io> https://hg.python.org/devguide/rev/847ccac0e0eb changeset: 765:847ccac0e0eb user: Benjamin Peterson date: Tue Oct 06 20:36:36 2015 -0700 summary: tell people to generate rsa keys with puttygen files: faq.rst | 8 ++++---- 1 files changed, 4 insertions(+), 4 deletions(-) diff --git a/faq.rst b/faq.rst --- a/faq.rst +++ b/faq.rst @@ -891,10 +891,10 @@ Windows ''''''' -Use PuTTYgen_ to generate your public key. Choose the "SSH2 DSA" radio button, -have it create an OpenSSH formatted key, choose a password, and save the private -key to a file. Copy the section with the public key (using Alt-P) to a file; -that file now has your public key. +Use PuTTYgen_ to generate your public key. Choose the "SSH-2 RSA" radio button, +set 4096 as the key size, choose a password, and save the private key to a file. +Copy the section with the public key (using Alt-P) to a file; that file now has +your public key. .. _PuTTYgen: http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html -- Repository URL: https://hg.python.org/devguide From python-checkins at python.org Wed Oct 7 06:17:07 2015 From: python-checkins at python.org (benjamin.peterson) Date: Wed, 07 Oct 2015 04:17:07 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_make_configure_executable?= Message-ID: <20151007041706.3275.19090@psf.io> https://hg.python.org/cpython/rev/9322f381ddb7 changeset: 98579:9322f381ddb7 parent: 98577:bf803e582cb4 user: Benjamin Peterson date: Tue Oct 06 21:17:02 2015 -0700 summary: make configure executable files: configure | 0 1 files changed, 0 insertions(+), 0 deletions(-) diff --git a/configure b/configure old mode 100644 new mode 100755 -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 06:53:40 2015 From: python-checkins at python.org (berker.peksag) Date: Wed, 07 Oct 2015 04:53:40 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Add_a_versionadded_directi?= =?utf-8?q?ve_for_reopenIfNeeded=28=29?= Message-ID: <20151007045339.20779.43794@psf.io> https://hg.python.org/cpython/rev/b6bfc2dad87f changeset: 98580:b6bfc2dad87f user: Berker Peksag date: Wed Oct 07 07:54:23 2015 +0300 summary: Add a versionadded directive for reopenIfNeeded() files: Doc/library/logging.handlers.rst | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/Doc/library/logging.handlers.rst b/Doc/library/logging.handlers.rst --- a/Doc/library/logging.handlers.rst +++ b/Doc/library/logging.handlers.rst @@ -168,6 +168,8 @@ flushed and closed and the file opened again, typically as a precursor to outputting the record to the file. + .. versionadded:: 3.6 + .. method:: emit(record) -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Wed Oct 7 10:44:06 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Wed, 07 Oct 2015 08:44:06 +0000 Subject: [Python-checkins] Daily reference leaks (b6bfc2dad87f): sum=17880 Message-ID: <20151007084404.97718.90590@psf.io> results for b6bfc2dad87f on branch "default" -------------------------------------------- test_asyncio leaked [3, 0, 0] memory blocks, sum=3 test_capi leaked [1598, 1598, 1598] references, sum=4794 test_capi leaked [387, 389, 389] memory blocks, sum=1165 test_functools leaked [0, 2, 2] memory blocks, sum=4 test_threading leaked [3196, 3196, 3196] references, sum=9588 test_threading leaked [774, 776, 776] memory blocks, sum=2326 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflogyFh9ty', '--timeout', '7200'] From python-checkins at python.org Wed Oct 7 12:09:29 2015 From: python-checkins at python.org (martin.panter) Date: Wed, 07 Oct 2015 10:09:29 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1Mjg2?= =?utf-8?q?=3A_Dictionary_views_are_not_sequences?= Message-ID: <20151007100929.7260.15300@psf.io> https://hg.python.org/cpython/rev/92429e01f444 changeset: 98581:92429e01f444 branch: 3.4 parent: 98573:e670b37e7b14 user: Martin Panter date: Wed Oct 07 09:56:46 2015 +0000 summary: Issue #25286: Dictionary views are not sequences Also change glossary heading from view files: Doc/glossary.rst | 15 ++++++++------- Doc/library/collections.abc.rst | 2 +- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -183,6 +183,14 @@ keys can be any object with :meth:`__hash__` and :meth:`__eq__` methods. Called a hash in Perl. + dictionary view + The objects returned from :meth:`dict.keys`, :meth:`dict.values`, and + :meth:`dict.items` are called dictionary views. They provide a dynamic + view on the dictionary?s entries, which means that when the dictionary + changes, the view reflects these changes. To force the + dictionary view to become a full list use ``list(dictview)``. See + :ref:`dict-views`. + docstring A string literal which appears as the first expression in a class, function or module. While ignored when the suite is executed, it is @@ -853,13 +861,6 @@ ``'\r'``. See :pep:`278` and :pep:`3116`, as well as :func:`bytes.splitlines` for an additional use. - view - The objects returned from :meth:`dict.keys`, :meth:`dict.values`, and - :meth:`dict.items` are called dictionary views. They are lazy sequences - that will see changes in the underlying dictionary. To force the - dictionary view to become a full list use ``list(dictview)``. See - :ref:`dict-views`. - virtual environment A cooperatively isolated runtime environment that allows Python users and applications to install and upgrade Python distribution packages diff --git a/Doc/library/collections.abc.rst b/Doc/library/collections.abc.rst --- a/Doc/library/collections.abc.rst +++ b/Doc/library/collections.abc.rst @@ -122,7 +122,7 @@ KeysView ValuesView - ABCs for mapping, items, keys, and values :term:`views `. + ABCs for mapping, items, keys, and values :term:`views `. These ABCs allow us to ask classes or instances if they provide -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 12:09:29 2015 From: python-checkins at python.org (martin.panter) Date: Wed, 07 Oct 2015 10:09:29 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325286=3A_Merge_dictionary_view_glossary_from_3=2E4_in?= =?utf-8?q?to_3=2E5?= Message-ID: <20151007100929.3293.38926@psf.io> https://hg.python.org/cpython/rev/c29f1114ef65 changeset: 98582:c29f1114ef65 branch: 3.5 parent: 98576:6fb0f26ed858 parent: 98581:92429e01f444 user: Martin Panter date: Wed Oct 07 10:01:04 2015 +0000 summary: Issue #25286: Merge dictionary view glossary from 3.4 into 3.5 files: Doc/glossary.rst | 15 ++++++++------- Doc/library/collections.abc.rst | 2 +- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -241,6 +241,14 @@ keys can be any object with :meth:`__hash__` and :meth:`__eq__` methods. Called a hash in Perl. + dictionary view + The objects returned from :meth:`dict.keys`, :meth:`dict.values`, and + :meth:`dict.items` are called dictionary views. They provide a dynamic + view on the dictionary?s entries, which means that when the dictionary + changes, the view reflects these changes. To force the + dictionary view to become a full list use ``list(dictview)``. See + :ref:`dict-views`. + docstring A string literal which appears as the first expression in a class, function or module. While ignored when the suite is executed, it is @@ -934,13 +942,6 @@ ``'\r'``. See :pep:`278` and :pep:`3116`, as well as :func:`bytes.splitlines` for an additional use. - view - The objects returned from :meth:`dict.keys`, :meth:`dict.values`, and - :meth:`dict.items` are called dictionary views. They are lazy sequences - that will see changes in the underlying dictionary. To force the - dictionary view to become a full list use ``list(dictview)``. See - :ref:`dict-views`. - virtual environment A cooperatively isolated runtime environment that allows Python users and applications to install and upgrade Python distribution packages diff --git a/Doc/library/collections.abc.rst b/Doc/library/collections.abc.rst --- a/Doc/library/collections.abc.rst +++ b/Doc/library/collections.abc.rst @@ -150,7 +150,7 @@ KeysView ValuesView - ABCs for mapping, items, keys, and values :term:`views `. + ABCs for mapping, items, keys, and values :term:`views `. .. class:: Awaitable -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 12:09:29 2015 From: python-checkins at python.org (martin.panter) Date: Wed, 07 Oct 2015 10:09:29 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1Mjg2?= =?utf-8?q?=3A_Update_dictionary_view_link=3B_patch_by_Akira_Li?= Message-ID: <20151007100929.2687.97090@psf.io> https://hg.python.org/cpython/rev/d43c33f032a2 changeset: 98583:d43c33f032a2 branch: 3.5 user: Martin Panter date: Wed Oct 07 10:03:20 2015 +0000 summary: Issue #25286: Update dictionary view link; patch by Akira Li files: Doc/library/collections.rst | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Doc/library/collections.rst b/Doc/library/collections.rst --- a/Doc/library/collections.rst +++ b/Doc/library/collections.rst @@ -1018,8 +1018,8 @@ semantics pass-in keyword arguments using a regular unordered dictionary. .. versionchanged:: 3.5 - The items, keys, and values :term:`views ` of :class:`OrderedDict` now - support reverse iteration using :func:`reversed`. + The items, keys, and values :term:`views ` + of :class:`OrderedDict` now support reverse iteration using :func:`reversed`. :class:`OrderedDict` Examples and Recipes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 12:09:29 2015 From: python-checkins at python.org (martin.panter) Date: Wed, 07 Oct 2015 10:09:29 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325286=3A_Merge_dictionary_view_glossary_from_3?= =?utf-8?q?=2E5?= Message-ID: <20151007100929.70972.18589@psf.io> https://hg.python.org/cpython/rev/7067420c3e72 changeset: 98584:7067420c3e72 parent: 98580:b6bfc2dad87f parent: 98583:d43c33f032a2 user: Martin Panter date: Wed Oct 07 10:04:49 2015 +0000 summary: Issue #25286: Merge dictionary view glossary from 3.5 files: Doc/glossary.rst | 15 ++++++++------- Doc/library/collections.abc.rst | 2 +- Doc/library/collections.rst | 4 ++-- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -241,6 +241,14 @@ keys can be any object with :meth:`__hash__` and :meth:`__eq__` methods. Called a hash in Perl. + dictionary view + The objects returned from :meth:`dict.keys`, :meth:`dict.values`, and + :meth:`dict.items` are called dictionary views. They provide a dynamic + view on the dictionary?s entries, which means that when the dictionary + changes, the view reflects these changes. To force the + dictionary view to become a full list use ``list(dictview)``. See + :ref:`dict-views`. + docstring A string literal which appears as the first expression in a class, function or module. While ignored when the suite is executed, it is @@ -934,13 +942,6 @@ ``'\r'``. See :pep:`278` and :pep:`3116`, as well as :func:`bytes.splitlines` for an additional use. - view - The objects returned from :meth:`dict.keys`, :meth:`dict.values`, and - :meth:`dict.items` are called dictionary views. They are lazy sequences - that will see changes in the underlying dictionary. To force the - dictionary view to become a full list use ``list(dictview)``. See - :ref:`dict-views`. - virtual environment A cooperatively isolated runtime environment that allows Python users and applications to install and upgrade Python distribution packages diff --git a/Doc/library/collections.abc.rst b/Doc/library/collections.abc.rst --- a/Doc/library/collections.abc.rst +++ b/Doc/library/collections.abc.rst @@ -150,7 +150,7 @@ KeysView ValuesView - ABCs for mapping, items, keys, and values :term:`views `. + ABCs for mapping, items, keys, and values :term:`views `. .. class:: Awaitable diff --git a/Doc/library/collections.rst b/Doc/library/collections.rst --- a/Doc/library/collections.rst +++ b/Doc/library/collections.rst @@ -1021,8 +1021,8 @@ semantics pass-in keyword arguments using a regular unordered dictionary. .. versionchanged:: 3.5 - The items, keys, and values :term:`views ` of :class:`OrderedDict` now - support reverse iteration using :func:`reversed`. + The items, keys, and values :term:`views ` + of :class:`OrderedDict` now support reverse iteration using :func:`reversed`. :class:`OrderedDict` Examples and Recipes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 12:23:58 2015 From: python-checkins at python.org (martin.panter) Date: Wed, 07 Oct 2015 10:23:58 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI1Mjg2?= =?utf-8?q?=3A_Dictionary_views_are_not_sequences?= Message-ID: <20151007102357.128852.76937@psf.io> https://hg.python.org/cpython/rev/41e1f2500047 changeset: 98585:41e1f2500047 branch: 2.7 parent: 98578:37aee118e1a3 user: Martin Panter date: Wed Oct 07 10:19:39 2015 +0000 summary: Issue #25286: Dictionary views are not sequences Also change glossary heading from "view" to "dictionary view". Patch by Akira Li. files: Doc/glossary.rst | 15 ++++++++------- Doc/library/collections.rst | 2 +- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -182,6 +182,14 @@ keys can be any object with :meth:`__hash__` and :meth:`__eq__` methods. Called a hash in Perl. + dictionary view + The objects returned from :meth:`dict.viewkeys`, :meth:`dict.viewvalues`, + and :meth:`dict.viewitems` are called dictionary views. They provide a dynamic + view on the dictionary?s entries, which means that when the dictionary + changes, the view reflects these changes. To force + dictionary view to become a full list use ``list(dictview)``. See + :ref:`dict-views`. + docstring A string literal which appears as the first expression in a class, function or module. While ignored when the suite is executed, it is @@ -713,13 +721,6 @@ ``'\r'``. See :pep:`278` and :pep:`3116`, as well as :func:`str.splitlines` for an additional use. - view - The objects returned from :meth:`dict.viewkeys`, :meth:`dict.viewvalues`, - and :meth:`dict.viewitems` are called dictionary views. They are lazy - sequences that will see changes in the underlying dictionary. To force - the dictionary view to become a full list use ``list(dictview)``. See - :ref:`dict-views`. - virtual environment A cooperatively isolated runtime environment that allows Python users and applications to install and upgrade Python distribution packages diff --git a/Doc/library/collections.rst b/Doc/library/collections.rst --- a/Doc/library/collections.rst +++ b/Doc/library/collections.rst @@ -978,7 +978,7 @@ KeysView ValuesView - ABCs for mapping, items, keys, and values :term:`views `. + ABCs for mapping, items, keys, and values :term:`views `. These ABCs allow us to ask classes or instances if they provide -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 12:40:52 2015 From: python-checkins at python.org (martin.panter) Date: Wed, 07 Oct 2015 10:40:52 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI1Mjg2?= =?utf-8?q?=3A_Accidentally_dropped_=22the=22?= Message-ID: <20151007104052.7240.71260@psf.io> https://hg.python.org/cpython/rev/04815b55227f changeset: 98586:04815b55227f branch: 2.7 user: Martin Panter date: Wed Oct 07 10:39:13 2015 +0000 summary: Issue #25286: Accidentally dropped "the" files: Doc/glossary.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -186,7 +186,7 @@ The objects returned from :meth:`dict.viewkeys`, :meth:`dict.viewvalues`, and :meth:`dict.viewitems` are called dictionary views. They provide a dynamic view on the dictionary?s entries, which means that when the dictionary - changes, the view reflects these changes. To force + changes, the view reflects these changes. To force the dictionary view to become a full list use ``list(dictview)``. See :ref:`dict-views`. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 13:28:45 2015 From: python-checkins at python.org (martin.panter) Date: Wed, 07 Oct 2015 11:28:45 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Merge_typo_fixes_from_3=2E4_into_3=2E5?= Message-ID: <20151007112845.7262.85052@psf.io> https://hg.python.org/cpython/rev/ab7601859c99 changeset: 98588:ab7601859c99 branch: 3.5 parent: 98583:d43c33f032a2 parent: 98587:a9cbf3becfb7 user: Martin Panter date: Wed Oct 07 11:01:47 2015 +0000 summary: Merge typo fixes from 3.4 into 3.5 files: Doc/library/collections.rst | 2 +- Doc/library/test.rst | 2 +- Lib/http/server.py | 2 +- Misc/HISTORY | 2 +- Misc/NEWS | 2 +- Modules/_io/stringio.c | 2 +- Objects/object.c | 2 +- Objects/typeobject.c | 2 +- Python/ceval.c | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Doc/library/collections.rst b/Doc/library/collections.rst --- a/Doc/library/collections.rst +++ b/Doc/library/collections.rst @@ -1015,7 +1015,7 @@ The :class:`OrderedDict` constructor and :meth:`update` method both accept keyword arguments, but their order is lost because Python's function call -semantics pass-in keyword arguments using a regular unordered dictionary. +semantics pass in keyword arguments using a regular unordered dictionary. .. versionchanged:: 3.5 The items, keys, and values :term:`views ` diff --git a/Doc/library/test.rst b/Doc/library/test.rst --- a/Doc/library/test.rst +++ b/Doc/library/test.rst @@ -160,7 +160,7 @@ The :mod:`test` package can be run as a script to drive Python's regression test suite, thanks to the :option:`-m` option: :program:`python -m test`. Under the hood, it uses :mod:`test.regrtest`; the call :program:`python -m -test.regrtest` used in previous Python versions still works). Running the +test.regrtest` used in previous Python versions still works. Running the script by itself automatically starts running all regression tests in the :mod:`test` package. It does this by finding all modules in the package whose name starts with ``test_``, importing them, and executing the function diff --git a/Lib/http/server.py b/Lib/http/server.py --- a/Lib/http/server.py +++ b/Lib/http/server.py @@ -831,7 +831,7 @@ def _url_collapse_path(path): """ Given a URL path, remove extra '/'s and '.' path elements and collapse - any '..' references and returns a colllapsed path. + any '..' references and returns a collapsed path. Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. The utility of this function is limited to is_cgi method and helps diff --git a/Misc/HISTORY b/Misc/HISTORY --- a/Misc/HISTORY +++ b/Misc/HISTORY @@ -6751,7 +6751,7 @@ - Issue #7895: platform.mac_ver() no longer crashes after calling os.fork(). -- Issue #9323: Fixed a bug in trace.py that resulted in loosing the name of the +- Issue #9323: Fixed a bug in trace.py that resulted in losing the name of the script being traced. Patch by Eli Bendersky. - Issue #9282: Fixed --listfuncs option of trace.py. Thanks Eli Bendersky for diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -1717,7 +1717,7 @@ engine friendly) error messages when "exec" and "print" are used as statements. -- Issue #21642: If the conditional if-else expression, allow an integer written +- Issue #21642: In the conditional if-else expression, allow an integer written with no space between itself and the ``else`` keyword (e.g. ``True if 42else False``) to be valid syntax. diff --git a/Modules/_io/stringio.c b/Modules/_io/stringio.c --- a/Modules/_io/stringio.c +++ b/Modules/_io/stringio.c @@ -750,7 +750,7 @@ /* If newline == "", we don't translate anything. If newline == "\n" or newline == None, we translate to "\n", which is a no-op. - (for newline == None, TextIOWrapper translates to os.sepline, but it + (for newline == None, TextIOWrapper translates to os.linesep, but it is pointless for StringIO) */ if (newline != NULL && newline[0] == '\r') { diff --git a/Objects/object.c b/Objects/object.c --- a/Objects/object.c +++ b/Objects/object.c @@ -475,7 +475,7 @@ #ifdef Py_DEBUG /* PyObject_Repr() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); #endif diff --git a/Objects/typeobject.c b/Objects/typeobject.c --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -901,7 +901,7 @@ #ifdef Py_DEBUG /* type_call() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); #endif diff --git a/Python/ceval.c b/Python/ceval.c --- a/Python/ceval.c +++ b/Python/ceval.c @@ -1214,7 +1214,7 @@ #ifdef Py_DEBUG /* PyEval_EvalFrameEx() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); #endif -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 13:28:45 2015 From: python-checkins at python.org (martin.panter) Date: Wed, 07 Oct 2015 11:28:45 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_typo_fixes_from_3=2E5?= Message-ID: <20151007112845.20783.83659@psf.io> https://hg.python.org/cpython/rev/2333efe17938 changeset: 98590:2333efe17938 parent: 98584:7067420c3e72 parent: 98589:e75e8aeb8ac7 user: Martin Panter date: Wed Oct 07 11:13:55 2015 +0000 summary: Merge typo fixes from 3.5 files: Doc/library/collections.rst | 2 +- Doc/library/test.rst | 2 +- Lib/http/server.py | 2 +- Misc/HISTORY | 2 +- Misc/NEWS | 6 +++--- Modules/_io/stringio.c | 2 +- Objects/methodobject.c | 2 +- Objects/object.c | 2 +- Objects/typeobject.c | 2 +- Python/ceval.c | 2 +- 10 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Doc/library/collections.rst b/Doc/library/collections.rst --- a/Doc/library/collections.rst +++ b/Doc/library/collections.rst @@ -1018,7 +1018,7 @@ The :class:`OrderedDict` constructor and :meth:`update` method both accept keyword arguments, but their order is lost because Python's function call -semantics pass-in keyword arguments using a regular unordered dictionary. +semantics pass in keyword arguments using a regular unordered dictionary. .. versionchanged:: 3.5 The items, keys, and values :term:`views ` diff --git a/Doc/library/test.rst b/Doc/library/test.rst --- a/Doc/library/test.rst +++ b/Doc/library/test.rst @@ -160,7 +160,7 @@ The :mod:`test` package can be run as a script to drive Python's regression test suite, thanks to the :option:`-m` option: :program:`python -m test`. Under the hood, it uses :mod:`test.regrtest`; the call :program:`python -m -test.regrtest` used in previous Python versions still works). Running the +test.regrtest` used in previous Python versions still works. Running the script by itself automatically starts running all regression tests in the :mod:`test` package. It does this by finding all modules in the package whose name starts with ``test_``, importing them, and executing the function diff --git a/Lib/http/server.py b/Lib/http/server.py --- a/Lib/http/server.py +++ b/Lib/http/server.py @@ -831,7 +831,7 @@ def _url_collapse_path(path): """ Given a URL path, remove extra '/'s and '.' path elements and collapse - any '..' references and returns a colllapsed path. + any '..' references and returns a collapsed path. Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. The utility of this function is limited to is_cgi method and helps diff --git a/Misc/HISTORY b/Misc/HISTORY --- a/Misc/HISTORY +++ b/Misc/HISTORY @@ -6751,7 +6751,7 @@ - Issue #7895: platform.mac_ver() no longer crashes after calling os.fork(). -- Issue #9323: Fixed a bug in trace.py that resulted in loosing the name of the +- Issue #9323: Fixed a bug in trace.py that resulted in losing the name of the script being traced. Patch by Eli Bendersky. - Issue #9282: Fixed --listfuncs option of trace.py. Thanks Eli Bendersky for diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -421,7 +421,7 @@ ----- - Issue #24915: Add LLVM support for PGO builds and use the test suite to - generate the profile data. Initiial patch by Alecsandru Patrascu of Intel. + generate the profile data. Initial patch by Alecsandru Patrascu of Intel. - Issue #24910: Windows MSIs now have unique display names. @@ -1830,7 +1830,7 @@ type) can now be weakref'ed. Patch by Wei Wu. - Issue #22077: Improve index error messages for bytearrays, bytes, lists, - and tuples by adding 'or slices'. Added ', not ' for bytearrays. Original patch by Claudiu Popa. - Issue #20179: Apply Argument Clinic to bytes and bytearray. @@ -1854,7 +1854,7 @@ engine friendly) error messages when "exec" and "print" are used as statements. -- Issue #21642: If the conditional if-else expression, allow an integer written +- Issue #21642: In the conditional if-else expression, allow an integer written with no space between itself and the ``else`` keyword (e.g. ``True if 42else False``) to be valid syntax. diff --git a/Modules/_io/stringio.c b/Modules/_io/stringio.c --- a/Modules/_io/stringio.c +++ b/Modules/_io/stringio.c @@ -750,7 +750,7 @@ /* If newline == "", we don't translate anything. If newline == "\n" or newline == None, we translate to "\n", which is a no-op. - (for newline == None, TextIOWrapper translates to os.sepline, but it + (for newline == None, TextIOWrapper translates to os.linesep, but it is pointless for StringIO) */ if (newline != NULL && newline[0] == '\r') { diff --git a/Objects/methodobject.c b/Objects/methodobject.c --- a/Objects/methodobject.c +++ b/Objects/methodobject.c @@ -89,7 +89,7 @@ /* PyCFunction_Call() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); flags = PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST); diff --git a/Objects/object.c b/Objects/object.c --- a/Objects/object.c +++ b/Objects/object.c @@ -475,7 +475,7 @@ #ifdef Py_DEBUG /* PyObject_Repr() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); #endif diff --git a/Objects/typeobject.c b/Objects/typeobject.c --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -901,7 +901,7 @@ #ifdef Py_DEBUG /* type_call() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); #endif diff --git a/Python/ceval.c b/Python/ceval.c --- a/Python/ceval.c +++ b/Python/ceval.c @@ -1214,7 +1214,7 @@ #ifdef Py_DEBUG /* PyEval_EvalFrameEx() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); #endif -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 13:28:46 2015 From: python-checkins at python.org (martin.panter) Date: Wed, 07 Oct 2015 11:28:46 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_One_more_typo_in_a_comment?= Message-ID: <20151007112845.18360.33091@psf.io> https://hg.python.org/cpython/rev/3291e6132a67 changeset: 98591:3291e6132a67 user: Martin Panter date: Wed Oct 07 11:15:15 2015 +0000 summary: One more typo in a comment files: Python/ast.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Python/ast.c b/Python/ast.c --- a/Python/ast.c +++ b/Python/ast.c @@ -4035,7 +4035,7 @@ assert(expr_end >= 0 && expr_end < PyUnicode_GET_LENGTH(str)); assert(expr_end >= expr_start); - /* There has to be at least on character on each side of the + /* There has to be at least one character on each side of the expression inside this str. This will have been caught before we're called. */ assert(expr_start >= 1); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 13:28:47 2015 From: python-checkins at python.org (martin.panter) Date: Wed, 07 Oct 2015 11:28:47 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E5=29=3A_More_typos_in_?= =?utf-8?q?3=2E5_documentation_and_comments?= Message-ID: <20151007112845.97700.15264@psf.io> https://hg.python.org/cpython/rev/e75e8aeb8ac7 changeset: 98589:e75e8aeb8ac7 branch: 3.5 user: Martin Panter date: Wed Oct 07 11:03:53 2015 +0000 summary: More typos in 3.5 documentation and comments files: Lib/test/test_time.py | 4 ++-- Misc/NEWS | 4 ++-- Objects/abstract.c | 2 +- Objects/methodobject.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Lib/test/test_time.py b/Lib/test/test_time.py --- a/Lib/test/test_time.py +++ b/Lib/test/test_time.py @@ -773,7 +773,7 @@ (2**23 - 1e-9, 8388607999999999), (2**23, 8388608000000000), - # start loosing precision for value > 2^23 seconds + # start losing precision for value > 2^23 seconds (2**23 + 1e-9, 8388608000000002), # nanoseconds are lost for value > 2^23 seconds @@ -848,7 +848,7 @@ (4194304000000000, 2**22), (4194304000000001, 2**22 + 1e-9), - # start loosing precision for value > 2^23 seconds + # start losing precision for value > 2^23 seconds (8388608000000002, 2**23 + 1e-9), # nanoseconds are lost for value > 2^23 seconds diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -285,7 +285,7 @@ ----- - Issue #24915: Add LLVM support for PGO builds and use the test suite to - generate the profile data. Initiial patch by Alecsandru Patrascu of Intel. + generate the profile data. Initial patch by Alecsandru Patrascu of Intel. - Issue #24910: Windows MSIs now have unique display names. @@ -1693,7 +1693,7 @@ type) can now be weakref'ed. Patch by Wei Wu. - Issue #22077: Improve index error messages for bytearrays, bytes, lists, - and tuples by adding 'or slices'. Added ', not ' for bytearrays. Original patch by Claudiu Popa. - Issue #20179: Apply Argument Clinic to bytes and bytearray. diff --git a/Objects/abstract.c b/Objects/abstract.c --- a/Objects/abstract.c +++ b/Objects/abstract.c @@ -2131,7 +2131,7 @@ /* PyObject_Call() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); call = func->ob_type->tp_call; diff --git a/Objects/methodobject.c b/Objects/methodobject.c --- a/Objects/methodobject.c +++ b/Objects/methodobject.c @@ -89,7 +89,7 @@ /* PyCFunction_Call() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); flags = PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 7 13:28:47 2015 From: python-checkins at python.org (martin.panter) Date: Wed, 07 Oct 2015 11:28:47 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Various_minor_?= =?utf-8?q?typos_in_documentation_and_comments?= Message-ID: <20151007112845.97720.29719@psf.io> https://hg.python.org/cpython/rev/a9cbf3becfb7 changeset: 98587:a9cbf3becfb7 branch: 3.4 parent: 98581:92429e01f444 user: Martin Panter date: Wed Oct 07 10:26:23 2015 +0000 summary: Various minor typos in documentation and comments files: Doc/library/collections.rst | 2 +- Doc/library/test.rst | 2 +- Lib/http/server.py | 2 +- Misc/HISTORY | 2 +- Misc/NEWS | 2 +- Modules/_io/stringio.c | 2 +- Objects/object.c | 2 +- Objects/typeobject.c | 2 +- PC/VS9.0/kill_python.c | 2 +- PCbuild/kill_python.c | 2 +- Python/ceval.c | 4 ++-- 11 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Doc/library/collections.rst b/Doc/library/collections.rst --- a/Doc/library/collections.rst +++ b/Doc/library/collections.rst @@ -980,7 +980,7 @@ The :class:`OrderedDict` constructor and :meth:`update` method both accept keyword arguments, but their order is lost because Python's function call -semantics pass-in keyword arguments using a regular unordered dictionary. +semantics pass in keyword arguments using a regular unordered dictionary. :class:`OrderedDict` Examples and Recipes diff --git a/Doc/library/test.rst b/Doc/library/test.rst --- a/Doc/library/test.rst +++ b/Doc/library/test.rst @@ -160,7 +160,7 @@ The :mod:`test` package can be run as a script to drive Python's regression test suite, thanks to the :option:`-m` option: :program:`python -m test`. Under the hood, it uses :mod:`test.regrtest`; the call :program:`python -m -test.regrtest` used in previous Python versions still works). Running the +test.regrtest` used in previous Python versions still works. Running the script by itself automatically starts running all regression tests in the :mod:`test` package. It does this by finding all modules in the package whose name starts with ``test_``, importing them, and executing the function diff --git a/Lib/http/server.py b/Lib/http/server.py --- a/Lib/http/server.py +++ b/Lib/http/server.py @@ -881,7 +881,7 @@ def _url_collapse_path(path): """ Given a URL path, remove extra '/'s and '.' path elements and collapse - any '..' references and returns a colllapsed path. + any '..' references and returns a collapsed path. Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. The utility of this function is limited to is_cgi method and helps diff --git a/Misc/HISTORY b/Misc/HISTORY --- a/Misc/HISTORY +++ b/Misc/HISTORY @@ -6718,7 +6718,7 @@ - Issue #7895: platform.mac_ver() no longer crashes after calling os.fork(). -- Issue #9323: Fixed a bug in trace.py that resulted in loosing the name of the +- Issue #9323: Fixed a bug in trace.py that resulted in losing the name of the script being traced. Patch by Eli Bendersky. - Issue #9282: Fixed --listfuncs option of trace.py. Thanks Eli Bendersky for diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -1131,7 +1131,7 @@ engine friendly) error messages when "exec" and "print" are used as statements. -- Issue #21642: If the conditional if-else expression, allow an integer written +- Issue #21642: In the conditional if-else expression, allow an integer written with no space between itself and the ``else`` keyword (e.g. ``True if 42else False``) to be valid syntax. diff --git a/Modules/_io/stringio.c b/Modules/_io/stringio.c --- a/Modules/_io/stringio.c +++ b/Modules/_io/stringio.c @@ -711,7 +711,7 @@ /* If newline == "", we don't translate anything. If newline == "\n" or newline == None, we translate to "\n", which is a no-op. - (for newline == None, TextIOWrapper translates to os.sepline, but it + (for newline == None, TextIOWrapper translates to os.linesep, but it is pointless for StringIO) */ if (newline != NULL && newline[0] == '\r') { diff --git a/Objects/object.c b/Objects/object.c --- a/Objects/object.c +++ b/Objects/object.c @@ -459,7 +459,7 @@ #ifdef Py_DEBUG /* PyObject_Repr() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); #endif diff --git a/Objects/typeobject.c b/Objects/typeobject.c --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -876,7 +876,7 @@ #ifdef Py_DEBUG /* type_call() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); #endif diff --git a/PC/VS9.0/kill_python.c b/PC/VS9.0/kill_python.c --- a/PC/VS9.0/kill_python.c +++ b/PC/VS9.0/kill_python.c @@ -108,7 +108,7 @@ * modules for all processes (not just the python[_d].exe ones) * and see if any of our DLLs are loaded (i.e. python34[_d].dll), * as that would also inhibit our ability to rebuild the solution. - * Not worth loosing sleep over though; for now, a simple check + * Not worth losing sleep over though; for now, a simple check * for just the python executable should be sufficient. */ diff --git a/PCbuild/kill_python.c b/PCbuild/kill_python.c --- a/PCbuild/kill_python.c +++ b/PCbuild/kill_python.c @@ -108,7 +108,7 @@ * modules for all processes (not just the python[_d].exe ones) * and see if any of our DLLs are loaded (i.e. python34[_d].dll), * as that would also inhibit our ability to rebuild the solution. - * Not worth loosing sleep over though; for now, a simple check + * Not worth losing sleep over though; for now, a simple check * for just the python executable should be sufficient. */ diff --git a/Python/ceval.c b/Python/ceval.c --- a/Python/ceval.c +++ b/Python/ceval.c @@ -1212,7 +1212,7 @@ #ifdef Py_DEBUG /* PyEval_EvalFrameEx() must not be called with an exception set, because it may clear it (directly or indirectly) and so the - caller looses its exception */ + caller loses its exception */ assert(!PyErr_Occurred()); #endif @@ -4087,7 +4087,7 @@ #ifdef Py_DEBUG /* PyEval_CallObjectWithKeywords() must not be called with an exception set, because it may clear it (directly or indirectly) - and so the caller looses its exception */ + and so the caller loses its exception */ assert(!PyErr_Occurred()); #endif -- Repository URL: https://hg.python.org/cpython From lp_benchmark_robot at intel.com Wed Oct 7 16:00:14 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Wed, 7 Oct 2015 15:00:14 +0100 Subject: [Python-checkins] Benchmark Results for Python Default 2015-10-07 Message-ID: <43070b8d-1f6e-41e7-95d9-afa1126479ba@irsmsx153.ger.corp.intel.com> Results for project python_default-nightly, build date 2015-10-07 06:11:19 commit: b6bfc2dad87ff7e2dffc60e0d9c1dab07fc3c35a revision date: 2015-10-07 04:54:23 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v3.4.3, with hash b4cbecbc0781e89a309d03b60a1f75f8499250e6 from 2015-02-25 12:15:33+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v3.4.3 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.39336% -1.60495% 7.49150% 16.66576% :-( pybench 0.15681% -0.04105% -2.05193% 8.57239% :-( regex_v8 2.66830% 0.72266% -4.47608% 0.20973% :-| nbody 0.12791% 1.30144% 0.14520% 9.16120% :-| json_dump_v2 0.33505% -0.49813% -0.70106% 9.53451% :-| normal_startup 1.06599% -0.47143% 0.12012% 5.23171% ------------------------------------------------------------------------------------------ Note: Benchmark results are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From lp_benchmark_robot at intel.com Wed Oct 7 16:00:32 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Wed, 7 Oct 2015 15:00:32 +0100 Subject: [Python-checkins] Benchmark Results for Python 2.7 2015-10-07 Message-ID: Results for project python_2.7-nightly, build date 2015-10-07 06:54:17 commit: 37aee118e1a395b58aea0a746e1e3008b04350bd revision date: 2015-10-07 03:12:02 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v2.7.10, with hash 15c95b7d81dcf821daade360741e00714667653f from 2015-05-23 16:02:14+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v2.7.10 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.44333% -1.37467% 3.92133% 10.00153% :-) pybench 0.17090% 0.04833% 6.88154% 6.28198% :-( regex_v8 1.18170% -0.12320% -2.56983% 9.26005% :-) nbody 0.16317% -0.00817% 6.70789% 4.14536% :-) json_dump_v2 0.25562% -0.42525% 2.92309% 13.77286% :-( normal_startup 1.77929% 0.07002% -2.62910% 2.82728% :-| ssbench 0.63170% -0.04144% 0.57537% 2.35317% ------------------------------------------------------------------------------------------ Note: Benchmark results for ssbench are measured in requests/second while all other are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From python-checkins at python.org Thu Oct 8 05:35:05 2015 From: python-checkins at python.org (berker.peksag) Date: Thu, 08 Oct 2015 03:35:05 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzE2ODAy?= =?utf-8?q?=3A_Document_fileno_parameter_of_socket=2Esocket=28=29?= Message-ID: <20151008033504.55456.49787@psf.io> https://hg.python.org/cpython/rev/f4606117d571 changeset: 98592:f4606117d571 branch: 3.4 parent: 98587:a9cbf3becfb7 user: Berker Peksag date: Thu Oct 08 06:34:01 2015 +0300 summary: Issue #16802: Document fileno parameter of socket.socket() Patch by Henrik Heimbuerger and Bar Harel. files: Doc/library/socket.rst | 6 +++++- 1 files changed, 5 insertions(+), 1 deletions(-) diff --git a/Doc/library/socket.rst b/Doc/library/socket.rst --- a/Doc/library/socket.rst +++ b/Doc/library/socket.rst @@ -369,7 +369,11 @@ :const:`SOCK_DGRAM`, :const:`SOCK_RAW` or perhaps one of the other ``SOCK_`` constants. The protocol number is usually zero and may be omitted or in the case where the address family is :const:`AF_CAN` the protocol should be one - of :const:`CAN_RAW` or :const:`CAN_BCM`. + of :const:`CAN_RAW` or :const:`CAN_BCM`. If *fileno* is specified, the other + arguments are ignored, causing the socket with the specified file descriptor + to return. Unlike :func:`socket.fromfd`, *fileno* will return the same + socket and not a duplicate. This may help close a detached socket using + :meth:`socket.close()`. The newly created socket is :ref:`non-inheritable `. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 8 05:35:04 2015 From: python-checkins at python.org (berker.peksag) Date: Thu, 08 Oct 2015 03:35:04 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2316802=3A_Document_fileno_parameter_of_socket=2Esocket?= =?utf-8?b?KCk=?= Message-ID: <20151008033504.3293.89070@psf.io> https://hg.python.org/cpython/rev/1d14675c6050 changeset: 98593:1d14675c6050 branch: 3.5 parent: 98589:e75e8aeb8ac7 parent: 98592:f4606117d571 user: Berker Peksag date: Thu Oct 08 06:34:31 2015 +0300 summary: Issue #16802: Document fileno parameter of socket.socket() Patch by Henrik Heimbuerger and Bar Harel. files: Doc/library/socket.rst | 6 +++++- 1 files changed, 5 insertions(+), 1 deletions(-) diff --git a/Doc/library/socket.rst b/Doc/library/socket.rst --- a/Doc/library/socket.rst +++ b/Doc/library/socket.rst @@ -384,7 +384,11 @@ :const:`SOCK_DGRAM`, :const:`SOCK_RAW` or perhaps one of the other ``SOCK_`` constants. The protocol number is usually zero and may be omitted or in the case where the address family is :const:`AF_CAN` the protocol should be one - of :const:`CAN_RAW` or :const:`CAN_BCM`. + of :const:`CAN_RAW` or :const:`CAN_BCM`. If *fileno* is specified, the other + arguments are ignored, causing the socket with the specified file descriptor + to return. Unlike :func:`socket.fromfd`, *fileno* will return the same + socket and not a duplicate. This may help close a detached socket using + :meth:`socket.close()`. The newly created socket is :ref:`non-inheritable `. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 8 05:35:05 2015 From: python-checkins at python.org (berker.peksag) Date: Thu, 08 Oct 2015 03:35:05 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2316802=3A_Document_fileno_parameter_of_socket=2E?= =?utf-8?q?socket=28=29?= Message-ID: <20151008033504.7242.23885@psf.io> https://hg.python.org/cpython/rev/9115c63cf3d2 changeset: 98594:9115c63cf3d2 parent: 98591:3291e6132a67 parent: 98593:1d14675c6050 user: Berker Peksag date: Thu Oct 08 06:34:57 2015 +0300 summary: Issue #16802: Document fileno parameter of socket.socket() Patch by Henrik Heimbuerger and Bar Harel. files: Doc/library/socket.rst | 6 +++++- 1 files changed, 5 insertions(+), 1 deletions(-) diff --git a/Doc/library/socket.rst b/Doc/library/socket.rst --- a/Doc/library/socket.rst +++ b/Doc/library/socket.rst @@ -384,7 +384,11 @@ :const:`SOCK_DGRAM`, :const:`SOCK_RAW` or perhaps one of the other ``SOCK_`` constants. The protocol number is usually zero and may be omitted or in the case where the address family is :const:`AF_CAN` the protocol should be one - of :const:`CAN_RAW` or :const:`CAN_BCM`. + of :const:`CAN_RAW` or :const:`CAN_BCM`. If *fileno* is specified, the other + arguments are ignored, causing the socket with the specified file descriptor + to return. Unlike :func:`socket.fromfd`, *fileno* will return the same + socket and not a duplicate. This may help close a detached socket using + :meth:`socket.close()`. The newly created socket is :ref:`non-inheritable `. -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Thu Oct 8 10:43:41 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Thu, 08 Oct 2015 08:43:41 +0000 Subject: [Python-checkins] Daily reference leaks (9115c63cf3d2): sum=61491 Message-ID: <20151008084341.3275.62438@psf.io> results for 9115c63cf3d2 on branch "default" -------------------------------------------- test_capi leaked [5410, 5410, 5410] references, sum=16230 test_capi leaked [1421, 1423, 1423] memory blocks, sum=4267 test_functools leaked [0, 2, 2] memory blocks, sum=4 test_threading leaked [10820, 10820, 10820] references, sum=32460 test_threading leaked [2842, 2844, 2844] memory blocks, sum=8530 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflogYAlTZW', '--timeout', '7200'] From python-checkins at python.org Thu Oct 8 11:27:11 2015 From: python-checkins at python.org (berker.peksag) Date: Thu, 08 Oct 2015 09:27:11 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2316099=3A_RobotFil?= =?utf-8?q?eParser_now_supports_Crawl-delay_and_Request-rate?= Message-ID: <20151008092710.3285.69884@psf.io> https://hg.python.org/cpython/rev/dbed7cacfb7e changeset: 98595:dbed7cacfb7e user: Berker Peksag date: Thu Oct 08 12:27:06 2015 +0300 summary: Issue #16099: RobotFileParser now supports Crawl-delay and Request-rate extensions. Patch by Nikolay Bogoychev. files: Doc/library/urllib.robotparser.rst | 30 +++++- Doc/whatsnew/3.6.rst | 8 + Lib/test/test_robotparser.py | 90 +++++++++++++---- Lib/urllib/robotparser.py | 39 +++++++- Misc/ACKS | 1 + Misc/NEWS | 5 +- 6 files changed, 147 insertions(+), 26 deletions(-) diff --git a/Doc/library/urllib.robotparser.rst b/Doc/library/urllib.robotparser.rst --- a/Doc/library/urllib.robotparser.rst +++ b/Doc/library/urllib.robotparser.rst @@ -53,15 +53,41 @@ Sets the time the ``robots.txt`` file was last fetched to the current time. + .. method:: crawl_delay(useragent) -The following example demonstrates basic use of the RobotFileParser class. + Returns the value of the ``Crawl-delay`` parameter from ``robots.txt`` + for the *useragent* in question. If there is no such parameter or it + doesn't apply to the *useragent* specified or the ``robots.txt`` entry + for this parameter has invalid syntax, return ``None``. + + .. versionadded:: 3.6 + + .. method:: request_rate(useragent) + + Returns the contents of the ``Request-rate`` parameter from + ``robots.txt`` in the form of a :func:`~collections.namedtuple` + ``(requests, seconds)``. If there is no such parameter or it doesn't + apply to the *useragent* specified or the ``robots.txt`` entry for this + parameter has invalid syntax, return ``None``. + + .. versionadded:: 3.6 + + +The following example demonstrates basic use of the :class:`RobotFileParser` +class:: >>> import urllib.robotparser >>> rp = urllib.robotparser.RobotFileParser() >>> rp.set_url("http://www.musi-cal.com/robots.txt") >>> rp.read() + >>> rrate = rp.request_rate("*") + >>> rrate.requests + 3 + >>> rrate.seconds + 20 + >>> rp.crawl_delay("*") + 6 >>> rp.can_fetch("*", "http://www.musi-cal.com/cgi-bin/search?city=San+Francisco") False >>> rp.can_fetch("*", "http://www.musi-cal.com/") True - diff --git a/Doc/whatsnew/3.6.rst b/Doc/whatsnew/3.6.rst --- a/Doc/whatsnew/3.6.rst +++ b/Doc/whatsnew/3.6.rst @@ -119,6 +119,14 @@ (Contributed by Ashley Anderson in :issue:`12006`.) +urllib.robotparser +------------------ + +:class:`~urllib.robotparser.RobotFileParser` now supports ``Crawl-delay`` and +``Request-rate`` extensions. +(Contributed by Nikolay Bogoychev in :issue:`16099`.) + + Optimizations ============= diff --git a/Lib/test/test_robotparser.py b/Lib/test/test_robotparser.py --- a/Lib/test/test_robotparser.py +++ b/Lib/test/test_robotparser.py @@ -1,6 +1,7 @@ import io import unittest import urllib.robotparser +from collections import namedtuple from urllib.error import URLError, HTTPError from urllib.request import urlopen from test import support @@ -12,7 +13,8 @@ class RobotTestCase(unittest.TestCase): - def __init__(self, index=None, parser=None, url=None, good=None, agent=None): + def __init__(self, index=None, parser=None, url=None, good=None, + agent=None, request_rate=None, crawl_delay=None): # workaround to make unittest discovery work (see #17066) if not isinstance(index, int): return @@ -25,6 +27,8 @@ self.url = url self.good = good self.agent = agent + self.request_rate = request_rate + self.crawl_delay = crawl_delay def runTest(self): if isinstance(self.url, tuple): @@ -34,6 +38,18 @@ agent = self.agent if self.good: self.assertTrue(self.parser.can_fetch(agent, url)) + self.assertEqual(self.parser.crawl_delay(agent), self.crawl_delay) + # if we have actual values for request rate + if self.request_rate and self.parser.request_rate(agent): + self.assertEqual( + self.parser.request_rate(agent).requests, + self.request_rate.requests + ) + self.assertEqual( + self.parser.request_rate(agent).seconds, + self.request_rate.seconds + ) + self.assertEqual(self.parser.request_rate(agent), self.request_rate) else: self.assertFalse(self.parser.can_fetch(agent, url)) @@ -43,15 +59,17 @@ tests = unittest.TestSuite() def RobotTest(index, robots_txt, good_urls, bad_urls, - agent="test_robotparser"): + request_rate, crawl_delay, agent="test_robotparser"): lines = io.StringIO(robots_txt).readlines() parser = urllib.robotparser.RobotFileParser() parser.parse(lines) for url in good_urls: - tests.addTest(RobotTestCase(index, parser, url, 1, agent)) + tests.addTest(RobotTestCase(index, parser, url, 1, agent, + request_rate, crawl_delay)) for url in bad_urls: - tests.addTest(RobotTestCase(index, parser, url, 0, agent)) + tests.addTest(RobotTestCase(index, parser, url, 0, agent, + request_rate, crawl_delay)) # Examples from http://www.robotstxt.org/wc/norobots.html (fetched 2002) @@ -65,14 +83,18 @@ good = ['/','/test.html'] bad = ['/cyberworld/map/index.html','/tmp/xxx','/foo.html'] +request_rate = None +crawl_delay = None -RobotTest(1, doc, good, bad) +RobotTest(1, doc, good, bad, request_rate, crawl_delay) # 2. doc = """ # robots.txt for http://www.example.com/ User-agent: * +Crawl-delay: 1 +Request-rate: 3/15 Disallow: /cyberworld/map/ # This is an infinite virtual URL space # Cybermapper knows where to go. @@ -83,8 +105,10 @@ good = ['/','/test.html',('cybermapper','/cyberworld/map/index.html')] bad = ['/cyberworld/map/index.html'] +request_rate = None # The parameters should be equal to None since they +crawl_delay = None # don't apply to the cybermapper user agent -RobotTest(2, doc, good, bad) +RobotTest(2, doc, good, bad, request_rate, crawl_delay) # 3. doc = """ @@ -95,14 +119,18 @@ good = [] bad = ['/cyberworld/map/index.html','/','/tmp/'] +request_rate = None +crawl_delay = None -RobotTest(3, doc, good, bad) +RobotTest(3, doc, good, bad, request_rate, crawl_delay) # Examples from http://www.robotstxt.org/wc/norobots-rfc.html (fetched 2002) # 4. doc = """ User-agent: figtree +Crawl-delay: 3 +Request-rate: 9/30 Disallow: /tmp Disallow: /a%3cd.html Disallow: /a%2fb.html @@ -115,8 +143,17 @@ '/~joe/index.html' ] -RobotTest(4, doc, good, bad, 'figtree') -RobotTest(5, doc, good, bad, 'FigTree Robot libwww-perl/5.04') +request_rate = namedtuple('req_rate', 'requests seconds') +request_rate.requests = 9 +request_rate.seconds = 30 +crawl_delay = 3 +request_rate_bad = None # not actually tested, but we still need to parse it +crawl_delay_bad = None # in order to accommodate the input parameters + + +RobotTest(4, doc, good, bad, request_rate, crawl_delay, 'figtree' ) +RobotTest(5, doc, good, bad, request_rate_bad, crawl_delay_bad, + 'FigTree Robot libwww-perl/5.04') # 6. doc = """ @@ -125,14 +162,18 @@ Disallow: /a%3Cd.html Disallow: /a/b.html Disallow: /%7ejoe/index.html +Crawl-delay: 3 +Request-rate: 9/banana """ good = ['/tmp',] # XFAIL: '/a%2fb.html' bad = ['/tmp/','/tmp/a.html', '/a%3cd.html','/a%3Cd.html',"/a/b.html", '/%7Ejoe/index.html'] +crawl_delay = 3 +request_rate = None # since request rate has invalid syntax, return None -RobotTest(6, doc, good, bad) +RobotTest(6, doc, good, bad, None, None) # From bug report #523041 @@ -140,12 +181,16 @@ doc = """ User-Agent: * Disallow: /. +Crawl-delay: pears """ good = ['/foo.html'] -bad = [] # Bug report says "/" should be denied, but that is not in the RFC +bad = [] # bug report says "/" should be denied, but that is not in the RFC -RobotTest(7, doc, good, bad) +crawl_delay = None # since crawl delay has invalid syntax, return None +request_rate = None + +RobotTest(7, doc, good, bad, crawl_delay, request_rate) # From Google: http://www.google.com/support/webmasters/bin/answer.py?hl=en&answer=40364 @@ -154,12 +199,15 @@ User-agent: Googlebot Allow: /folder1/myfile.html Disallow: /folder1/ +Request-rate: whale/banana """ good = ['/folder1/myfile.html'] bad = ['/folder1/anotherfile.html'] +crawl_delay = None +request_rate = None # invalid syntax, return none -RobotTest(8, doc, good, bad, agent="Googlebot") +RobotTest(8, doc, good, bad, crawl_delay, request_rate, agent="Googlebot") # 9. This file is incorrect because "Googlebot" is a substring of # "Googlebot-Mobile", so test 10 works just like test 9. @@ -174,12 +222,12 @@ good = [] bad = ['/something.jpg'] -RobotTest(9, doc, good, bad, agent="Googlebot") +RobotTest(9, doc, good, bad, None, None, agent="Googlebot") good = [] bad = ['/something.jpg'] -RobotTest(10, doc, good, bad, agent="Googlebot-Mobile") +RobotTest(10, doc, good, bad, None, None, agent="Googlebot-Mobile") # 11. Get the order correct. doc = """ @@ -193,12 +241,12 @@ good = [] bad = ['/something.jpg'] -RobotTest(11, doc, good, bad, agent="Googlebot") +RobotTest(11, doc, good, bad, None, None, agent="Googlebot") good = ['/something.jpg'] bad = [] -RobotTest(12, doc, good, bad, agent="Googlebot-Mobile") +RobotTest(12, doc, good, bad, None, None, agent="Googlebot-Mobile") # 13. Google also got the order wrong in #8. You need to specify the @@ -212,7 +260,7 @@ good = ['/folder1/myfile.html'] bad = ['/folder1/anotherfile.html'] -RobotTest(13, doc, good, bad, agent="googlebot") +RobotTest(13, doc, good, bad, None, None, agent="googlebot") # 14. For issue #6325 (query string support) @@ -224,7 +272,7 @@ good = ['/some/path'] bad = ['/some/path?name=value'] -RobotTest(14, doc, good, bad) +RobotTest(14, doc, good, bad, None, None) # 15. For issue #4108 (obey first * entry) doc = """ @@ -238,7 +286,7 @@ good = ['/another/path'] bad = ['/some/path'] -RobotTest(15, doc, good, bad) +RobotTest(15, doc, good, bad, None, None) # 16. Empty query (issue #17403). Normalizing the url first. doc = """ @@ -250,7 +298,7 @@ good = ['/some/path?'] bad = ['/another/path?'] -RobotTest(16, doc, good, bad) +RobotTest(16, doc, good, bad, None, None) class RobotHandler(BaseHTTPRequestHandler): diff --git a/Lib/urllib/robotparser.py b/Lib/urllib/robotparser.py --- a/Lib/urllib/robotparser.py +++ b/Lib/urllib/robotparser.py @@ -10,7 +10,9 @@ http://www.robotstxt.org/norobots-rfc.txt """ -import urllib.parse, urllib.request +import collections +import urllib.parse +import urllib.request __all__ = ["RobotFileParser"] @@ -120,10 +122,29 @@ if state != 0: entry.rulelines.append(RuleLine(line[1], True)) state = 2 + elif line[0] == "crawl-delay": + if state != 0: + # before trying to convert to int we need to make + # sure that robots.txt has valid syntax otherwise + # it will crash + if line[1].strip().isdigit(): + entry.delay = int(line[1]) + state = 2 + elif line[0] == "request-rate": + if state != 0: + numbers = line[1].split('/') + # check if all values are sane + if (len(numbers) == 2 and numbers[0].strip().isdigit() + and numbers[1].strip().isdigit()): + req_rate = collections.namedtuple('req_rate', + 'requests seconds') + entry.req_rate = req_rate + entry.req_rate.requests = int(numbers[0]) + entry.req_rate.seconds = int(numbers[1]) + state = 2 if state == 2: self._add_entry(entry) - def can_fetch(self, useragent, url): """using the parsed robots.txt decide if useragent can fetch url""" if self.disallow_all: @@ -153,6 +174,18 @@ # agent not found ==> access granted return True + def crawl_delay(self, useragent): + for entry in self.entries: + if entry.applies_to(useragent): + return entry.delay + return None + + def request_rate(self, useragent): + for entry in self.entries: + if entry.applies_to(useragent): + return entry.req_rate + return None + def __str__(self): return ''.join([str(entry) + "\n" for entry in self.entries]) @@ -180,6 +213,8 @@ def __init__(self): self.useragents = [] self.rulelines = [] + self.delay = None + self.req_rate = None def __str__(self): ret = [] diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -151,6 +151,7 @@ Paul Boddie Matthew Boedicker Robin Boerdijk +Nikolay Bogoychev David Bolen Wouter Bolsterlee Gawain Bolton diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -1,4 +1,4 @@ -+++++++++++ +?+++++++++++ Python News +++++++++++ @@ -46,6 +46,9 @@ Library ------- +- Issue #16099: RobotFileParser now supports Crawl-delay and Request-rate + extensions. Patch by Nikolay Bogoychev. + - Issue #25316: distutils raises OSError instead of DistutilsPlatformError when MSVC is not installed. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 8 12:58:53 2015 From: python-checkins at python.org (berker.peksag) Date: Thu, 08 Oct 2015 10:58:53 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Sort_module_names_in_whats?= =?utf-8?q?new/3=2E6=2Erst?= Message-ID: <20151008105853.20775.7876@psf.io> https://hg.python.org/cpython/rev/ed47b841f02e changeset: 98596:ed47b841f02e user: Berker Peksag date: Thu Oct 08 13:58:49 2015 +0300 summary: Sort module names in whatsnew/3.6.rst files: Doc/whatsnew/3.6.rst | 23 ++++++++++++----------- 1 files changed, 12 insertions(+), 11 deletions(-) diff --git a/Doc/whatsnew/3.6.rst b/Doc/whatsnew/3.6.rst --- a/Doc/whatsnew/3.6.rst +++ b/Doc/whatsnew/3.6.rst @@ -95,12 +95,21 @@ Improved Modules ================ +datetime +-------- + +:meth:`datetime.stftime ` and +:meth:`date.stftime ` methods now support ISO 8601 date +directives ``%G``, ``%u`` and ``%V``. +(Contributed by Ashley Anderson in :issue:`12006`.) + + operator -------- -* New object :data:`operator.subscript` makes it easier to create complex - indexers. For example: ``subscript[0:10:2] == slice(0, 10, 2)`` - (Contributed by Joe Jevnik in :issue:`24379`.) +New object :data:`operator.subscript` makes it easier to create complex +indexers. For example: ``subscript[0:10:2] == slice(0, 10, 2)`` +(Contributed by Joe Jevnik in :issue:`24379`.) rlcomplete @@ -110,14 +119,6 @@ with underscores. A space or a colon can be added after completed keyword. (Contributed by Serhiy Storchaka in :issue:`25011` and :issue:`25209`.) -datetime --------- - -* :meth:`datetime.stftime ` and - :meth:`date.stftime ` methods now support ISO 8601 - date directives ``%G``, ``%u`` and ``%V``. - (Contributed by Ashley Anderson in :issue:`12006`.) - urllib.robotparser ------------------ -- Repository URL: https://hg.python.org/cpython From lp_benchmark_robot at intel.com Thu Oct 8 16:12:03 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Thu, 8 Oct 2015 15:12:03 +0100 Subject: [Python-checkins] Benchmark Results for Python Default 2015-10-08 Message-ID: <4af6cb8c-0688-4ba6-adb7-3bf6f67bab31@irsmsx103.ger.corp.intel.com> Results for project python_default-nightly, build date 2015-10-08 03:02:03 commit: 3291e6132a674606af028be2d500701e5ff8285a revision date: 2015-10-07 11:15:15 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v3.4.3, with hash b4cbecbc0781e89a309d03b60a1f75f8499250e6 from 2015-02-25 12:15:33+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v3.4.3 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.46296% 1.14672% 8.55231% 14.86189% :-( pybench 0.17124% -0.04103% -2.09380% 8.78514% :-( regex_v8 2.65849% 0.00322% -4.47271% 4.96087% :-| nbody 0.06992% -0.35455% -0.20884% 9.39964% :-| json_dump_v2 0.34646% 0.42105% -0.27706% 10.07059% :-| normal_startup 0.66576% 0.53474% 0.25984% 5.40658% ------------------------------------------------------------------------------------------ Note: Benchmark results are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From lp_benchmark_robot at intel.com Thu Oct 8 16:12:50 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Thu, 8 Oct 2015 15:12:50 +0100 Subject: [Python-checkins] Benchmark Results for Python 2.7 2015-10-08 Message-ID: <25570a22-09b1-4a5f-8d47-9889dcedb426@irsmsx103.ger.corp.intel.com> Results for project python_2.7-nightly, build date 2015-10-08 03:45:04 commit: 04815b55227f8855eb67c41361310aaa56d90626 revision date: 2015-10-07 10:39:13 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v2.7.10, with hash 15c95b7d81dcf821daade360741e00714667653f from 2015-05-23 16:02:14+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v2.7.10 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.18215% 2.21212% 6.04671% 9.57055% :-) pybench 0.17620% -0.07253% 6.81400% 6.64788% :-( regex_v8 1.08666% 0.34510% -2.21586% 7.62728% :-) nbody 0.14237% 0.02137% 6.72783% 5.82344% :-) json_dump_v2 0.32811% 0.42392% 3.33462% 14.85349% :-| normal_startup 1.83767% 0.94491% -1.65935% 2.70445% :-| ssbench 0.42751% 0.67199% 1.25123% 1.26939% ------------------------------------------------------------------------------------------ Note: Benchmark results for ssbench are measured in requests/second while all other are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From python-checkins at python.org Thu Oct 8 18:08:04 2015 From: python-checkins at python.org (steve.dower) Date: Thu, 08 Oct 2015 16:08:04 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E5=29=3A_Removes_deprec?= =?utf-8?q?ated_-n_option_from_buildbot_script=2E?= Message-ID: <20151008160804.7236.9559@psf.io> https://hg.python.org/cpython/rev/0b7df139a5f7 changeset: 98599:0b7df139a5f7 branch: 3.5 parent: 98597:69c4fa62b608 user: Steve Dower date: Thu Oct 08 09:06:17 2015 -0700 summary: Removes deprecated -n option from buildbot script. files: Tools/buildbot/test.bat | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Tools/buildbot/test.bat b/Tools/buildbot/test.bat --- a/Tools/buildbot/test.bat +++ b/Tools/buildbot/test.bat @@ -16,4 +16,4 @@ if NOT "%1"=="" (set regrtest_args=%regrtest_args% %1) & shift & goto CheckOpts echo on -call "%here%..\..\PCbuild\rt.bat" %rt_opts% -uall -rwW -n --timeout=3600 %regrtest_args% +call "%here%..\..\PCbuild\rt.bat" %rt_opts% -uall -rwW --timeout=3600 %regrtest_args% -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 8 18:08:04 2015 From: python-checkins at python.org (steve.dower) Date: Thu, 08 Oct 2015 16:08:04 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2323919=3A_Prevents?= =?utf-8?q?_assert_dialogs_appearing_in_the_test_suite=2E?= Message-ID: <20151008160804.471.25310@psf.io> https://hg.python.org/cpython/rev/62897db9ae51 changeset: 98598:62897db9ae51 parent: 98596:ed47b841f02e user: Steve Dower date: Thu Oct 08 09:05:36 2015 -0700 summary: Issue #23919: Prevents assert dialogs appearing in the test suite. files: Lib/test/libregrtest/cmdline.py | 4 ++++ Lib/test/libregrtest/setup.py | 12 +++++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/Lib/test/libregrtest/cmdline.py b/Lib/test/libregrtest/cmdline.py --- a/Lib/test/libregrtest/cmdline.py +++ b/Lib/test/libregrtest/cmdline.py @@ -304,6 +304,10 @@ if ns.pgo and (ns.verbose or ns.verbose2 or ns.verbose3): parser.error("--pgo/-v don't go together!") + if ns.nowindows: + print("Warning: the --nowindows (-n) option is deprecated. " + "Use -vv to display assertions in stderr.", file=sys.stderr) + if ns.quiet: ns.verbose = 0 if ns.timeout is not None: diff --git a/Lib/test/libregrtest/setup.py b/Lib/test/libregrtest/setup.py --- a/Lib/test/libregrtest/setup.py +++ b/Lib/test/libregrtest/setup.py @@ -75,8 +75,11 @@ if ns.threshold is not None: gc.set_threshold(ns.threshold) - if ns.nowindows: + try: import msvcrt + except ImportError: + pass + else: msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS| msvcrt.SEM_NOALIGNMENTFAULTEXCEPT| msvcrt.SEM_NOGPFAULTERRORBOX| @@ -88,8 +91,11 @@ pass else: for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]: - msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE) - msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR) + if ns.verbose and ns.verbose >= 2: + msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE) + msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR) + else: + msvcrt.CrtSetReportMode(m, 0) support.use_resources = ns.use_resources -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 8 18:08:35 2015 From: python-checkins at python.org (steve.dower) Date: Thu, 08 Oct 2015 16:08:35 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_from_3=2E5?= Message-ID: <20151008160805.70970.292@psf.io> https://hg.python.org/cpython/rev/0b7cccd9c7d5 changeset: 98600:0b7cccd9c7d5 parent: 98598:62897db9ae51 parent: 98599:0b7df139a5f7 user: Steve Dower date: Thu Oct 08 09:06:39 2015 -0700 summary: Merge from 3.5 files: Misc/NEWS | 2 ++ Tools/buildbot/test.bat | 2 +- 2 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -417,6 +417,8 @@ Tests ----- +- Issue #23919: Prevents assert dialogs appearing in the test suite. + - PCbuild\rt.bat now accepts an unlimited number of arguments to pass along to regrtest.py. Previously there was a limit of 9. diff --git a/Tools/buildbot/test.bat b/Tools/buildbot/test.bat --- a/Tools/buildbot/test.bat +++ b/Tools/buildbot/test.bat @@ -16,4 +16,4 @@ if NOT "%1"=="" (set regrtest_args=%regrtest_args% %1) & shift & goto CheckOpts echo on -call "%here%..\..\PCbuild\rt.bat" %rt_opts% -uall -rwW -n --timeout=3600 %regrtest_args% +call "%here%..\..\PCbuild\rt.bat" %rt_opts% -uall -rwW --timeout=3600 %regrtest_args% -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 8 18:08:35 2015 From: python-checkins at python.org (steve.dower) Date: Thu, 08 Oct 2015 16:08:35 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzIzOTE5?= =?utf-8?q?=3A_Prevents_assert_dialogs_appearing_in_the_test_suite=2E?= Message-ID: <20151008160804.70990.52712@psf.io> https://hg.python.org/cpython/rev/69c4fa62b608 changeset: 98597:69c4fa62b608 branch: 3.5 parent: 98593:1d14675c6050 user: Steve Dower date: Thu Oct 08 08:56:06 2015 -0700 summary: Issue #23919: Prevents assert dialogs appearing in the test suite. files: Lib/test/regrtest.py | 13 +++++++++++-- Misc/NEWS | 2 ++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py --- a/Lib/test/regrtest.py +++ b/Lib/test/regrtest.py @@ -511,7 +511,13 @@ import gc gc.set_threshold(ns.threshold) if ns.nowindows: + print('The --nowindows (-n) option is deprecated. ' + 'Use -vv to display assertions in stderr.') + try: import msvcrt + except ImportError: + pass + else: msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS| msvcrt.SEM_NOALIGNMENTFAULTEXCEPT| msvcrt.SEM_NOGPFAULTERRORBOX| @@ -523,8 +529,11 @@ pass else: for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]: - msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE) - msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR) + if ns.verbose and ns.verbose >= 2: + msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE) + msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR) + else: + msvcrt.CrtSetReportMode(m, 0) if ns.wait: input("Press any key to continue...") diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -278,6 +278,8 @@ Tests ----- +- Issue #23919: Prevents assert dialogs appearing in the test suite. + - PCbuild\rt.bat now accepts an unlimited number of arguments to pass along to regrtest.py. Previously there was a limit of 9. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 8 19:01:27 2015 From: python-checkins at python.org (steve.dower) Date: Thu, 08 Oct 2015 17:01:27 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1MDg5?= =?utf-8?q?=3A_Adds_logging_to_installer_for_case_where_launcher_is_not_se?= =?utf-8?q?lected?= Message-ID: <20151008170113.70963.73432@psf.io> https://hg.python.org/cpython/rev/1e99ba6b7c98 changeset: 98601:1e99ba6b7c98 branch: 3.5 parent: 98599:0b7df139a5f7 user: Steve Dower date: Thu Oct 08 09:55:49 2015 -0700 summary: Issue #25089: Adds logging to installer for case where launcher is not selected on upgrade. files: Misc/NEWS | 3 +++ Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp | 7 +++++++ 2 files changed, 10 insertions(+), 0 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -297,6 +297,9 @@ Windows ------- +- Issue #25089: Adds logging to installer for case where launcher is not + selected on upgrade. + - Issue #25165: Windows uninstallation should not remove launcher if other versions remain diff --git a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp --- a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp +++ b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp @@ -687,6 +687,13 @@ if (hr == S_FALSE) { hr = LoadLauncherStateFromKey(_engine, HKEY_LOCAL_MACHINE); } + if (FAILED(hr)) { + BalLog( + BOOTSTRAPPER_LOG_LEVEL_ERROR, + "Failed to load launcher state: error code 0x%08X", + hr + ); + } } else if (BOOTSTRAPPER_RELATED_OPERATION_NONE == operation) { if (_command.action == BOOTSTRAPPER_ACTION_INSTALL) { LOC_STRING *pLocString = nullptr; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 8 19:01:31 2015 From: python-checkins at python.org (steve.dower) Date: Thu, 08 Oct 2015 17:01:31 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_from_3=2E5?= Message-ID: <20151008170114.128826.76489@psf.io> https://hg.python.org/cpython/rev/898500fa5de3 changeset: 98602:898500fa5de3 parent: 98600:0b7cccd9c7d5 parent: 98601:1e99ba6b7c98 user: Steve Dower date: Thu Oct 08 10:00:55 2015 -0700 summary: Merge from 3.5 files: Misc/NEWS | 3 +++ Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp | 7 +++++++ 2 files changed, 10 insertions(+), 0 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -437,6 +437,9 @@ Windows ------- +- Issue #25089: Adds logging to installer for case where launcher is not + selected on upgrade. + - Issue #25165: Windows uninstallation should not remove launcher if other versions remain diff --git a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp --- a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp +++ b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp @@ -687,6 +687,13 @@ if (hr == S_FALSE) { hr = LoadLauncherStateFromKey(_engine, HKEY_LOCAL_MACHINE); } + if (FAILED(hr)) { + BalLog( + BOOTSTRAPPER_LOG_LEVEL_ERROR, + "Failed to load launcher state: error code 0x%08X", + hr + ); + } } else if (BOOTSTRAPPER_RELATED_OPERATION_NONE == operation) { if (_command.action == BOOTSTRAPPER_ACTION_INSTALL) { LOC_STRING *pLocString = nullptr; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Oct 8 20:34:35 2015 From: python-checkins at python.org (steve.dower) Date: Thu, 08 Oct 2015 18:34:35 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Fix_missing_import_in_libr?= =?utf-8?q?egrtest=2E?= Message-ID: <20151008183434.451.16012@psf.io> https://hg.python.org/cpython/rev/ee1ef5a97e8f changeset: 98603:ee1ef5a97e8f user: Steve Dower date: Thu Oct 08 11:34:07 2015 -0700 summary: Fix missing import in libregrtest. files: Lib/test/libregrtest/cmdline.py | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/Lib/test/libregrtest/cmdline.py b/Lib/test/libregrtest/cmdline.py --- a/Lib/test/libregrtest/cmdline.py +++ b/Lib/test/libregrtest/cmdline.py @@ -1,5 +1,6 @@ import argparse import os +import sys from test import support -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 00:59:52 2015 From: python-checkins at python.org (victor.stinner) Date: Thu, 08 Oct 2015 22:59:52 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325318=3A_Add_=5FP?= =?utf-8?q?yBytesWriter_API?= Message-ID: <20151008225951.18380.95196@psf.io> https://hg.python.org/cpython/rev/1a2175149c5e changeset: 98604:1a2175149c5e user: Victor Stinner date: Fri Oct 09 00:33:49 2015 +0200 summary: Issue #25318: Add _PyBytesWriter API Add a new private API to optimize Unicode encoders. It uses a small buffer allocated on the stack and supports overallocation. Use _PyBytesWriter API for UCS1 (ASCII and Latin1) and UTF-8 encoders. Enable overallocation for the UTF-8 encoder with error handlers. unicode_encode_ucs1(): initialize collend to collstart+1 to not check the current character twice, we already know that it is not ASCII. files: Include/unicodeobject.h | 2 +- Objects/stringlib/codecs.h | 84 +---- Objects/unicodeobject.c | 320 +++++++++++++++++++----- 3 files changed, 271 insertions(+), 135 deletions(-) diff --git a/Include/unicodeobject.h b/Include/unicodeobject.h --- a/Include/unicodeobject.h +++ b/Include/unicodeobject.h @@ -908,7 +908,7 @@ /* minimum character (default: 127, ASCII) */ Py_UCS4 min_char; - /* If non-zero, overallocate the buffer by 25% (default: 0). */ + /* If non-zero, overallocate the buffer (default: 0). */ unsigned char overallocate; /* If readonly is 1, buffer is a shared string (cannot be modified) diff --git a/Objects/stringlib/codecs.h b/Objects/stringlib/codecs.h --- a/Objects/stringlib/codecs.h +++ b/Objects/stringlib/codecs.h @@ -263,10 +263,7 @@ #define MAX_SHORT_UNICHARS 300 /* largest size we'll do on the stack */ Py_ssize_t i; /* index into s of next input byte */ - PyObject *result; /* result string object */ char *p; /* next free byte in output buffer */ - Py_ssize_t nallocated; /* number of result bytes allocated */ - Py_ssize_t nneeded; /* number of result bytes needed */ #if STRINGLIB_SIZEOF_CHAR > 1 PyObject *error_handler_obj = NULL; PyObject *exc = NULL; @@ -275,38 +272,24 @@ #endif #if STRINGLIB_SIZEOF_CHAR == 1 const Py_ssize_t max_char_size = 2; - char stackbuf[MAX_SHORT_UNICHARS * 2]; #elif STRINGLIB_SIZEOF_CHAR == 2 const Py_ssize_t max_char_size = 3; - char stackbuf[MAX_SHORT_UNICHARS * 3]; #else /* STRINGLIB_SIZEOF_CHAR == 4 */ const Py_ssize_t max_char_size = 4; - char stackbuf[MAX_SHORT_UNICHARS * 4]; #endif + _PyBytesWriter writer; assert(size >= 0); + _PyBytesWriter_Init(&writer); - if (size <= MAX_SHORT_UNICHARS) { - /* Write into the stack buffer; nallocated can't overflow. - * At the end, we'll allocate exactly as much heap space as it - * turns out we need. - */ - nallocated = Py_SAFE_DOWNCAST(sizeof(stackbuf), size_t, int); - result = NULL; /* will allocate after we're done */ - p = stackbuf; + if (size > PY_SSIZE_T_MAX / max_char_size) { + /* integer overflow */ + return PyErr_NoMemory(); } - else { - if (size > PY_SSIZE_T_MAX / max_char_size) { - /* integer overflow */ - return PyErr_NoMemory(); - } - /* Overallocate on the heap, and give the excess back at the end. */ - nallocated = size * max_char_size; - result = PyBytes_FromStringAndSize(NULL, nallocated); - if (result == NULL) - return NULL; - p = PyBytes_AS_STRING(result); - } + + p = _PyBytesWriter_Alloc(&writer, size * max_char_size); + if (p == NULL) + return NULL; for (i = 0; i < size;) { Py_UCS4 ch = data[i++]; @@ -338,6 +321,9 @@ while ((endpos < size) && Py_UNICODE_IS_SURROGATE(data[endpos])) endpos++; + /* Only overallocate the buffer if it's not the last write */ + writer.overallocate = (endpos < size); + switch (error_handler) { case _Py_ERROR_REPLACE: @@ -387,29 +373,10 @@ repsize = PyUnicode_GET_LENGTH(rep); if (repsize > max_char_size) { - Py_ssize_t offset; - - if (result == NULL) - offset = p - stackbuf; - else - offset = p - PyBytes_AS_STRING(result); - - if (nallocated > PY_SSIZE_T_MAX - repsize + max_char_size) { - /* integer overflow */ - PyErr_NoMemory(); + p = _PyBytesWriter_Prepare(&writer, p, + repsize - max_char_size); + if (p == NULL) goto error; - } - nallocated += repsize - max_char_size; - if (result != NULL) { - if (_PyBytes_Resize(&result, nallocated) < 0) - goto error; - } else { - result = PyBytes_FromStringAndSize(NULL, nallocated); - if (result == NULL) - goto error; - Py_MEMCPY(PyBytes_AS_STRING(result), stackbuf, offset); - } - p = PyBytes_AS_STRING(result) + offset; } if (PyBytes_Check(rep)) { @@ -437,6 +404,10 @@ i = newpos; } + + /* If overallocation was disabled, ensure that it was the last + write. Otherwise, we missed an optimization */ + assert(writer.overallocate || i == size); } else #if STRINGLIB_SIZEOF_CHAR > 2 @@ -461,31 +432,18 @@ #endif /* STRINGLIB_SIZEOF_CHAR > 1 */ } - if (result == NULL) { - /* This was stack allocated. */ - nneeded = p - stackbuf; - assert(nneeded <= nallocated); - result = PyBytes_FromStringAndSize(stackbuf, nneeded); - } - else { - /* Cut back to size actually needed. */ - nneeded = p - PyBytes_AS_STRING(result); - assert(nneeded <= nallocated); - _PyBytes_Resize(&result, nneeded); - } - #if STRINGLIB_SIZEOF_CHAR > 1 Py_XDECREF(error_handler_obj); Py_XDECREF(exc); #endif - return result; + return _PyBytesWriter_Finish(&writer, p); #if STRINGLIB_SIZEOF_CHAR > 1 error: Py_XDECREF(rep); Py_XDECREF(error_handler_obj); Py_XDECREF(exc); - Py_XDECREF(result); + _PyBytesWriter_Dealloc(&writer); return NULL; #endif diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -163,6 +163,14 @@ *_to++ = (to_type) *_iter++; \ } while (0) +#ifdef MS_WINDOWS + /* On Windows, overallocate by 50% is the best factor */ +# define OVERALLOCATE_FACTOR 2 +#else + /* On Linux, overallocate by 25% is the best factor */ +# define OVERALLOCATE_FACTOR 4 +#endif + /* This dictionary holds all interned unicode strings. Note that references to strings in this dictionary are *not* counted in the string's ob_refcnt. When the interned string reaches a refcnt of 0 the string deallocation @@ -338,6 +346,216 @@ #endif } +/* The _PyBytesWriter structure is big: it contains an embeded "stack buffer". + A _PyBytesWriter variable must be declared at the end of variables in a + function to optimize the memory allocation on the stack. */ +typedef struct { + /* bytes object */ + PyObject *buffer; + + /* Number of allocated size */ + Py_ssize_t allocated; + + /* Current size of the buffer (can be smaller than the allocated size) */ + Py_ssize_t size; + + /* If non-zero, overallocate the buffer (default: 0). */ + int overallocate; + + /* Stack buffer */ + int use_stack_buffer; + char stack_buffer[512]; +} _PyBytesWriter; + +static void +_PyBytesWriter_Init(_PyBytesWriter *writer) +{ + writer->buffer = NULL; + writer->allocated = 0; + writer->size = 0; + writer->overallocate = 0; + writer->use_stack_buffer = 0; +#ifdef Py_DEBUG + memset(writer->stack_buffer, 0xCB, sizeof(writer->stack_buffer)); +#endif +} + +static void +_PyBytesWriter_Dealloc(_PyBytesWriter *writer) +{ + Py_CLEAR(writer->buffer); +} + +static char* +_PyBytesWriter_AsString(_PyBytesWriter *writer) +{ + if (!writer->use_stack_buffer) { + assert(writer->buffer != NULL); + return PyBytes_AS_STRING(writer->buffer); + } + else { + assert(writer->buffer == NULL); + return writer->stack_buffer; + } +} + +Py_LOCAL_INLINE(Py_ssize_t) +_PyBytesWriter_GetPos(_PyBytesWriter *writer, char *str) +{ + char *start = _PyBytesWriter_AsString(writer); + assert(str != NULL); + assert(str >= start); + return str - start; +} + +Py_LOCAL_INLINE(void) +_PyBytesWriter_CheckConsistency(_PyBytesWriter *writer, char *str) +{ +#ifdef Py_DEBUG + char *start, *end; + + if (!writer->use_stack_buffer) { + assert(writer->buffer != NULL); + assert(PyBytes_CheckExact(writer->buffer)); + assert(Py_REFCNT(writer->buffer) == 1); + } + else { + assert(writer->buffer == NULL); + } + + start = _PyBytesWriter_AsString(writer); + assert(0 <= writer->size && writer->size <= writer->allocated); + /* the last byte must always be null */ + assert(start[writer->allocated] == 0); + + end = start + writer->allocated; + assert(str != NULL); + assert(start <= str && str <= end); +#endif +} + +/* Add *size* bytes to the buffer. + str is the current pointer inside the buffer. + Return the updated current pointer inside the buffer. + Raise an exception and return NULL on error. */ +static char* +_PyBytesWriter_Prepare(_PyBytesWriter *writer, char *str, Py_ssize_t size) +{ + Py_ssize_t allocated, pos; + + _PyBytesWriter_CheckConsistency(writer, str); + assert(size >= 0); + + if (size == 0) { + /* nothing to do */ + return str; + } + + if (writer->size > PY_SSIZE_T_MAX - size) { + PyErr_NoMemory(); + _PyBytesWriter_Dealloc(writer); + return NULL; + } + writer->size += size; + + allocated = writer->allocated; + if (writer->size <= allocated) + return str; + + allocated = writer->size; + if (writer->overallocate + && allocated <= (PY_SSIZE_T_MAX - allocated / OVERALLOCATE_FACTOR)) { + /* overallocate to limit the number of realloc() */ + allocated += allocated / OVERALLOCATE_FACTOR; + } + + pos = _PyBytesWriter_GetPos(writer, str); + if (!writer->use_stack_buffer) { + /* Note: Don't use a bytearray object because the conversion from + byterray to bytes requires to copy all bytes. */ + if (_PyBytes_Resize(&writer->buffer, allocated)) { + assert(writer->buffer == NULL); + return NULL; + } + } + else { + /* convert from stack buffer to bytes object buffer */ + assert(writer->buffer == NULL); + + writer->buffer = PyBytes_FromStringAndSize(NULL, allocated); + if (writer->buffer == NULL) + return NULL; + + if (pos != 0) { + Py_MEMCPY(PyBytes_AS_STRING(writer->buffer), + writer->stack_buffer, + pos); + } + +#ifdef Py_DEBUG + memset(writer->stack_buffer, 0xDB, sizeof(writer->stack_buffer)); +#endif + + writer->use_stack_buffer = 0; + } + writer->allocated = allocated; + + str = _PyBytesWriter_AsString(writer) + pos; + _PyBytesWriter_CheckConsistency(writer, str); + return str; +} + +/* Allocate the buffer to write size bytes. + Return the pointer to the beginning of buffer data. + Raise an exception and return NULL on error. */ +static char* +_PyBytesWriter_Alloc(_PyBytesWriter *writer, Py_ssize_t size) +{ + /* ensure that _PyBytesWriter_Alloc() is only called once */ + assert(writer->size == 0 && writer->buffer == NULL); + assert(size >= 0); + + writer->use_stack_buffer = 1; +#if Py_DEBUG + /* the last byte is reserved, it must be '\0' */ + writer->stack_buffer[sizeof(writer->stack_buffer) - 1] = 0; + writer->allocated = sizeof(writer->stack_buffer) - 1; +#else + writer->allocated = sizeof(writer->stack_buffer); +#endif + return _PyBytesWriter_Prepare(writer, writer->stack_buffer, size); +} + +/* Get the buffer content and reset the writer. + Return a bytes object. + Raise an exception and return NULL on error. */ +static PyObject * +_PyBytesWriter_Finish(_PyBytesWriter *writer, char *str) +{ + Py_ssize_t pos; + PyObject *result; + + _PyBytesWriter_CheckConsistency(writer, str); + + pos = _PyBytesWriter_GetPos(writer, str); + if (!writer->use_stack_buffer) { + if (pos != writer->allocated) { + if (_PyBytes_Resize(&writer->buffer, pos)) { + assert(writer->buffer == NULL); + return NULL; + } + } + + result = writer->buffer; + writer->buffer = NULL; + } + else { + result = PyBytes_FromStringAndSize(writer->stack_buffer, pos); + } + + return result; +} + #ifdef Py_DEBUG int _PyUnicode_CheckConsistency(PyObject *op, int check_content) @@ -6460,17 +6678,15 @@ Py_ssize_t pos=0, size; int kind; void *data; - /* output object */ - PyObject *res; /* pointer into the output */ char *str; - /* current output position */ - Py_ssize_t ressize; const char *encoding = (limit == 256) ? "latin-1" : "ascii"; const char *reason = (limit == 256) ? "ordinal not in range(256)" : "ordinal not in range(128)"; PyObject *error_handler_obj = NULL; PyObject *exc = NULL; _Py_error_handler error_handler = _Py_ERROR_UNKNOWN; + /* output object */ + _PyBytesWriter writer; if (PyUnicode_READY(unicode) == -1) return NULL; @@ -6481,11 +6697,11 @@ replacements, if we need more, we'll resize */ if (size == 0) return PyBytes_FromStringAndSize(NULL, 0); - res = PyBytes_FromStringAndSize(NULL, size); - if (res == NULL) - return NULL; - str = PyBytes_AS_STRING(res); - ressize = size; + + _PyBytesWriter_Init(&writer); + str = _PyBytesWriter_Alloc(&writer, size); + if (str == NULL) + return NULL; while (pos < size) { Py_UCS4 ch = PyUnicode_READ(kind, data, pos); @@ -6499,15 +6715,18 @@ else { Py_ssize_t requiredsize; PyObject *repunicode; - Py_ssize_t repsize, newpos, respos, i; + Py_ssize_t repsize, newpos, i; /* startpos for collecting unencodable chars */ Py_ssize_t collstart = pos; - Py_ssize_t collend = pos; + Py_ssize_t collend = collstart + 1; /* find all unecodable characters */ while ((collend < size) && (PyUnicode_READ(kind, data, collend) >= limit)) ++collend; + /* Only overallocate the buffer if it's not the last write */ + writer.overallocate = (collend < size); + /* cache callback name lookup (if not done yet, i.e. it's the first error) */ if (error_handler == _Py_ERROR_UNKNOWN) error_handler = get_error_handler(errors); @@ -6526,8 +6745,7 @@ break; case _Py_ERROR_XMLCHARREFREPLACE: - respos = str - PyBytes_AS_STRING(res); - requiredsize = respos; + requiredsize = 0; /* determine replacement size */ for (i = collstart; i < collend; ++i) { Py_ssize_t incr; @@ -6553,17 +6771,11 @@ goto overflow; requiredsize += incr; } - if (requiredsize > PY_SSIZE_T_MAX - (size - collend)) - goto overflow; - requiredsize += size - collend; - if (requiredsize > ressize) { - if (ressize <= PY_SSIZE_T_MAX/2 && requiredsize < 2*ressize) - requiredsize = 2*ressize; - if (_PyBytes_Resize(&res, requiredsize)) - goto onError; - str = PyBytes_AS_STRING(res) + respos; - ressize = requiredsize; - } + + str = _PyBytesWriter_Prepare(&writer, str, requiredsize-1); + if (str == NULL) + goto onError; + /* generate replacement */ for (i = collstart; i < collend; ++i) { str += sprintf(str, "&#%d;", PyUnicode_READ(kind, data, i)); @@ -6598,20 +6810,9 @@ if (PyBytes_Check(repunicode)) { /* Directly copy bytes result to output. */ repsize = PyBytes_Size(repunicode); - if (repsize > 1) { - /* Make room for all additional bytes. */ - respos = str - PyBytes_AS_STRING(res); - if (ressize > PY_SSIZE_T_MAX - repsize - 1) { - Py_DECREF(repunicode); - goto overflow; - } - if (_PyBytes_Resize(&res, ressize+repsize-1)) { - Py_DECREF(repunicode); - goto onError; - } - str = PyBytes_AS_STRING(res) + respos; - ressize += repsize-1; - } + str = _PyBytesWriter_Prepare(&writer, str, repsize-1); + if (str == NULL) + goto onError; memcpy(str, PyBytes_AsString(repunicode), repsize); str += repsize; pos = newpos; @@ -6622,24 +6823,11 @@ /* need more space? (at least enough for what we have+the replacement+the rest of the string, so we won't have to check space for encodable characters) */ - respos = str - PyBytes_AS_STRING(res); repsize = PyUnicode_GET_LENGTH(repunicode); - requiredsize = respos; - if (requiredsize > PY_SSIZE_T_MAX - repsize) - goto overflow; - requiredsize += repsize; - if (requiredsize > PY_SSIZE_T_MAX - (size - collend)) - goto overflow; - requiredsize += size - collend; - if (requiredsize > ressize) { - if (ressize <= PY_SSIZE_T_MAX/2 && requiredsize < 2*ressize) - requiredsize = 2*ressize; - if (_PyBytes_Resize(&res, requiredsize)) { - Py_DECREF(repunicode); + if (repsize > 1) { + str = _PyBytesWriter_Prepare(&writer, str, repsize-1); + if (str == NULL) goto onError; - } - str = PyBytes_AS_STRING(res) + respos; - ressize = requiredsize; } /* check if there is anything unencodable in the replacement @@ -6657,26 +6845,23 @@ pos = newpos; Py_DECREF(repunicode); } - } - } - /* Resize if we allocated to much */ - size = str - PyBytes_AS_STRING(res); - if (size < ressize) { /* If this falls res will be NULL */ - assert(size >= 0); - if (_PyBytes_Resize(&res, size) < 0) - goto onError; + + /* If overallocation was disabled, ensure that it was the last + write. Otherwise, we missed an optimization */ + assert(writer.overallocate || pos == size); + } } Py_XDECREF(error_handler_obj); Py_XDECREF(exc); - return res; + return _PyBytesWriter_Finish(&writer, str); overflow: PyErr_SetString(PyExc_OverflowError, "encoded result is too long for a Python string"); onError: - Py_XDECREF(res); + _PyBytesWriter_Dealloc(&writer); Py_XDECREF(error_handler_obj); Py_XDECREF(exc); return NULL; @@ -13366,13 +13551,6 @@ _PyUnicodeWriter_PrepareInternal(_PyUnicodeWriter *writer, Py_ssize_t length, Py_UCS4 maxchar) { -#ifdef MS_WINDOWS - /* On Windows, overallocate by 50% is the best factor */ -# define OVERALLOCATE_FACTOR 2 -#else - /* On Linux, overallocate by 25% is the best factor */ -# define OVERALLOCATE_FACTOR 4 -#endif Py_ssize_t newlen; PyObject *newbuffer; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 01:46:58 2015 From: python-checkins at python.org (victor.stinner) Date: Thu, 08 Oct 2015 23:46:58 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Optimize_backslashreplace_?= =?utf-8?q?error_handler?= Message-ID: <20151008234645.97706.9292@psf.io> https://hg.python.org/cpython/rev/59f4806a5add changeset: 98605:59f4806a5add user: Victor Stinner date: Fri Oct 09 01:39:28 2015 +0200 summary: Optimize backslashreplace error handler Issue #25318: Optimize backslashreplace and xmlcharrefreplace error handlers in UTF-8 encoder. Optimize also backslashreplace error handler for ASCII and Latin1 encoders. Use the new _PyBytesWriter API to optimize these error handlers for the encoders. It avoids to create an exception and call the slow implementation of the error handler. files: Objects/stringlib/codecs.h | 18 ++- Objects/unicodeobject.c | 195 ++++++++++++++++++------ 2 files changed, 161 insertions(+), 52 deletions(-) diff --git a/Objects/stringlib/codecs.h b/Objects/stringlib/codecs.h --- a/Objects/stringlib/codecs.h +++ b/Objects/stringlib/codecs.h @@ -334,7 +334,6 @@ i += (endpos - startpos - 1); break; - case _Py_ERROR_SURROGATEPASS: for (k=startpos; k PY_SSIZE_T_MAX - incr) { + PyErr_SetString(PyExc_OverflowError, + "encoded result is too long for a Python string"); + return NULL; + } + size += incr; + } + + prealloc = prealloc_per_char * (collend - collstart); + if (size > prealloc) { + str = _PyBytesWriter_Prepare(writer, str, size - prealloc); + if (str == NULL) + return NULL; + } + + /* generate replacement */ + for (i = collstart; i < collend; ++i) { + ch = PyUnicode_READ(kind, data, i); + if (ch < 0x100) + str += sprintf(str, "\\x%02x", ch); + else if (ch < 0x10000) + str += sprintf(str, "\\u%04x", ch); + else { + assert(ch <= MAX_UNICODE); + str += sprintf(str, "\\U%08x", ch); + } + } + return str; +} + +/* Implementation of the "xmlcharrefreplace" error handler for 8-bit encodings: + ASCII, Latin1, UTF-8, etc. */ +static char* +xmlcharrefreplace(_PyBytesWriter *writer, Py_ssize_t prealloc_per_char, + char *str, + PyObject *unicode, Py_ssize_t collstart, Py_ssize_t collend) +{ + Py_ssize_t size, i, prealloc; + Py_UCS4 ch; + enum PyUnicode_Kind kind; + void *data; + + assert(PyUnicode_IS_READY(unicode)); + kind = PyUnicode_KIND(unicode); + data = PyUnicode_DATA(unicode); + + size = 0; + /* determine replacement size */ + for (i = collstart; i < collend; ++i) { + Py_ssize_t incr; + + ch = PyUnicode_READ(kind, data, i); + if (ch < 10) + incr = 2+1+1; + else if (ch < 100) + incr = 2+2+1; + else if (ch < 1000) + incr = 2+3+1; + else if (ch < 10000) + incr = 2+4+1; + else if (ch < 100000) + incr = 2+5+1; + else if (ch < 1000000) + incr = 2+6+1; + else { + assert(ch <= MAX_UNICODE); + incr = 2+7+1; + } + if (size > PY_SSIZE_T_MAX - incr) { + PyErr_SetString(PyExc_OverflowError, + "encoded result is too long for a Python string"); + return NULL; + } + size += incr; + } + + prealloc = prealloc_per_char * (collend - collstart); + if (size > prealloc) { + str = _PyBytesWriter_Prepare(writer, str, size - prealloc); + if (str == NULL) + return NULL; + } + + /* generate replacement */ + for (i = collstart; i < collend; ++i) { + str += sprintf(str, "&#%d;", PyUnicode_READ(kind, data, i)); + } + return str; +} + /* --- Bloom Filters ----------------------------------------------------- */ /* stuff to implement simple "bloom filters" for Unicode characters. @@ -6713,7 +6834,6 @@ ++pos; } else { - Py_ssize_t requiredsize; PyObject *repunicode; Py_ssize_t repsize, newpos, i; /* startpos for collecting unencodable chars */ @@ -6744,42 +6864,19 @@ pos = collend; break; - case _Py_ERROR_XMLCHARREFREPLACE: - requiredsize = 0; - /* determine replacement size */ - for (i = collstart; i < collend; ++i) { - Py_ssize_t incr; - - ch = PyUnicode_READ(kind, data, i); - if (ch < 10) - incr = 2+1+1; - else if (ch < 100) - incr = 2+2+1; - else if (ch < 1000) - incr = 2+3+1; - else if (ch < 10000) - incr = 2+4+1; - else if (ch < 100000) - incr = 2+5+1; - else if (ch < 1000000) - incr = 2+6+1; - else { - assert(ch <= MAX_UNICODE); - incr = 2+7+1; - } - if (requiredsize > PY_SSIZE_T_MAX - incr) - goto overflow; - requiredsize += incr; - } - - str = _PyBytesWriter_Prepare(&writer, str, requiredsize-1); + case _Py_ERROR_BACKSLASHREPLACE: + str = backslashreplace(&writer, 1, str, + unicode, collstart, collend); if (str == NULL) goto onError; - - /* generate replacement */ - for (i = collstart; i < collend; ++i) { - str += sprintf(str, "&#%d;", PyUnicode_READ(kind, data, i)); - } + pos = collend; + break; + + case _Py_ERROR_XMLCHARREFREPLACE: + str = xmlcharrefreplace(&writer, 1, str, + unicode, collstart, collend); + if (str == NULL) + goto onError; pos = collend; break; @@ -6810,9 +6907,11 @@ if (PyBytes_Check(repunicode)) { /* Directly copy bytes result to output. */ repsize = PyBytes_Size(repunicode); - str = _PyBytesWriter_Prepare(&writer, str, repsize-1); - if (str == NULL) - goto onError; + if (repsize > 1) { + str = _PyBytesWriter_Prepare(&writer, str, repsize-1); + if (str == NULL) + goto onError; + } memcpy(str, PyBytes_AsString(repunicode), repsize); str += repsize; pos = newpos; @@ -6856,10 +6955,6 @@ Py_XDECREF(exc); return _PyBytesWriter_Finish(&writer, str); - overflow: - PyErr_SetString(PyExc_OverflowError, - "encoded result is too long for a Python string"); - onError: _PyBytesWriter_Dealloc(&writer); Py_XDECREF(error_handler_obj); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 02:32:50 2015 From: python-checkins at python.org (victor.stinner) Date: Fri, 09 Oct 2015 00:32:50 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325318=3A_Move_=5F?= =?utf-8?q?PyBytesWriter_to_bytesobject=2Ec?= Message-ID: <20151009003249.20759.13167@psf.io> https://hg.python.org/cpython/rev/c134eddcb347 changeset: 98606:c134eddcb347 user: Victor Stinner date: Fri Oct 09 01:53:21 2015 +0200 summary: Issue #25318: Move _PyBytesWriter to bytesobject.c Declare also the private API in bytesobject.h. files: Include/bytesobject.h | 52 ++++++ Objects/bytesobject.c | 193 +++++++++++++++++++++++++ Objects/unicodeobject.c | 210 ---------------------------- 3 files changed, 245 insertions(+), 210 deletions(-) diff --git a/Include/bytesobject.h b/Include/bytesobject.h --- a/Include/bytesobject.h +++ b/Include/bytesobject.h @@ -123,6 +123,58 @@ #define F_ALT (1<<3) #define F_ZERO (1<<4) +#ifndef Py_LIMITED_API +/* The _PyBytesWriter structure is big: it contains an embeded "stack buffer". + A _PyBytesWriter variable must be declared at the end of variables in a + function to optimize the memory allocation on the stack. */ +typedef struct { + /* bytes object */ + PyObject *buffer; + + /* Number of allocated size */ + Py_ssize_t allocated; + + /* Current size of the buffer (can be smaller than the allocated size) */ + Py_ssize_t size; + + /* If non-zero, overallocate the buffer (default: 0). */ + int overallocate; + + /* Stack buffer */ + int use_stack_buffer; + char stack_buffer[512]; +} _PyBytesWriter; + +/* Initialize a bytes writer + + By default, the overallocation is disabled. Set the overallocate attribute + to control the allocation of the buffer. */ +PyAPI_FUNC(void) _PyBytesWriter_Init(_PyBytesWriter *writer); + +/* Get the buffer content and reset the writer. + Return a bytes object. + Raise an exception and return NULL on error. */ +PyAPI_FUNC(PyObject *) _PyBytesWriter_Finish(_PyBytesWriter *writer, + char *str); + +/* Deallocate memory of a writer (clear its internal buffer). */ +PyAPI_FUNC(void) _PyBytesWriter_Dealloc(_PyBytesWriter *writer); + +/* Allocate the buffer to write size bytes. + Return the pointer to the beginning of buffer data. + Raise an exception and return NULL on error. */ +PyAPI_FUNC(char*) _PyBytesWriter_Alloc(_PyBytesWriter *writer, + Py_ssize_t size); + +/* Add *size* bytes to the buffer. + str is the current pointer inside the buffer. + Return the updated current pointer inside the buffer. + Raise an exception and return NULL on error. */ +PyAPI_FUNC(char*) _PyBytesWriter_Prepare(_PyBytesWriter *writer, + char *str, + Py_ssize_t size); +#endif /* Py_LIMITED_API */ + #ifdef __cplusplus } #endif diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -3735,3 +3735,196 @@ _PyObject_GC_TRACK(it); return (PyObject *)it; } + + +/* _PyBytesWriter API */ + +#ifdef MS_WINDOWS + /* On Windows, overallocate by 50% is the best factor */ +# define OVERALLOCATE_FACTOR 2 +#else + /* On Linux, overallocate by 25% is the best factor */ +# define OVERALLOCATE_FACTOR 4 +#endif + +void +_PyBytesWriter_Init(_PyBytesWriter *writer) +{ + writer->buffer = NULL; + writer->allocated = 0; + writer->size = 0; + writer->overallocate = 0; + writer->use_stack_buffer = 0; +#ifdef Py_DEBUG + memset(writer->stack_buffer, 0xCB, sizeof(writer->stack_buffer)); +#endif +} + +void +_PyBytesWriter_Dealloc(_PyBytesWriter *writer) +{ + Py_CLEAR(writer->buffer); +} + +Py_LOCAL_INLINE(char*) +_PyBytesWriter_AsString(_PyBytesWriter *writer) +{ + if (!writer->use_stack_buffer) { + assert(writer->buffer != NULL); + return PyBytes_AS_STRING(writer->buffer); + } + else { + assert(writer->buffer == NULL); + return writer->stack_buffer; + } +} + +Py_LOCAL_INLINE(Py_ssize_t) +_PyBytesWriter_GetPos(_PyBytesWriter *writer, char *str) +{ + char *start = _PyBytesWriter_AsString(writer); + assert(str != NULL); + assert(str >= start); + return str - start; +} + +Py_LOCAL_INLINE(void) +_PyBytesWriter_CheckConsistency(_PyBytesWriter *writer, char *str) +{ +#ifdef Py_DEBUG + char *start, *end; + + if (!writer->use_stack_buffer) { + assert(writer->buffer != NULL); + assert(PyBytes_CheckExact(writer->buffer)); + assert(Py_REFCNT(writer->buffer) == 1); + } + else { + assert(writer->buffer == NULL); + } + + start = _PyBytesWriter_AsString(writer); + assert(0 <= writer->size && writer->size <= writer->allocated); + /* the last byte must always be null */ + assert(start[writer->allocated] == 0); + + end = start + writer->allocated; + assert(str != NULL); + assert(start <= str && str <= end); +#endif +} + +char* +_PyBytesWriter_Prepare(_PyBytesWriter *writer, char *str, Py_ssize_t size) +{ + Py_ssize_t allocated, pos; + + _PyBytesWriter_CheckConsistency(writer, str); + assert(size >= 0); + + if (size == 0) { + /* nothing to do */ + return str; + } + + if (writer->size > PY_SSIZE_T_MAX - size) { + PyErr_NoMemory(); + _PyBytesWriter_Dealloc(writer); + return NULL; + } + writer->size += size; + + allocated = writer->allocated; + if (writer->size <= allocated) + return str; + + allocated = writer->size; + if (writer->overallocate + && allocated <= (PY_SSIZE_T_MAX - allocated / OVERALLOCATE_FACTOR)) { + /* overallocate to limit the number of realloc() */ + allocated += allocated / OVERALLOCATE_FACTOR; + } + + pos = _PyBytesWriter_GetPos(writer, str); + if (!writer->use_stack_buffer) { + /* Note: Don't use a bytearray object because the conversion from + byterray to bytes requires to copy all bytes. */ + if (_PyBytes_Resize(&writer->buffer, allocated)) { + assert(writer->buffer == NULL); + return NULL; + } + } + else { + /* convert from stack buffer to bytes object buffer */ + assert(writer->buffer == NULL); + + writer->buffer = PyBytes_FromStringAndSize(NULL, allocated); + if (writer->buffer == NULL) + return NULL; + + if (pos != 0) { + Py_MEMCPY(PyBytes_AS_STRING(writer->buffer), + writer->stack_buffer, + pos); + } + +#ifdef Py_DEBUG + memset(writer->stack_buffer, 0xDB, sizeof(writer->stack_buffer)); +#endif + + writer->use_stack_buffer = 0; + } + writer->allocated = allocated; + + str = _PyBytesWriter_AsString(writer) + pos; + _PyBytesWriter_CheckConsistency(writer, str); + return str; +} + +/* Allocate the buffer to write size bytes. + Return the pointer to the beginning of buffer data. + Raise an exception and return NULL on error. */ +char* +_PyBytesWriter_Alloc(_PyBytesWriter *writer, Py_ssize_t size) +{ + /* ensure that _PyBytesWriter_Alloc() is only called once */ + assert(writer->size == 0 && writer->buffer == NULL); + assert(size >= 0); + + writer->use_stack_buffer = 1; +#if Py_DEBUG + /* the last byte is reserved, it must be '\0' */ + writer->stack_buffer[sizeof(writer->stack_buffer) - 1] = 0; + writer->allocated = sizeof(writer->stack_buffer) - 1; +#else + writer->allocated = sizeof(writer->stack_buffer); +#endif + return _PyBytesWriter_Prepare(writer, writer->stack_buffer, size); +} + +PyObject * +_PyBytesWriter_Finish(_PyBytesWriter *writer, char *str) +{ + Py_ssize_t pos; + PyObject *result; + + _PyBytesWriter_CheckConsistency(writer, str); + + pos = _PyBytesWriter_GetPos(writer, str); + if (!writer->use_stack_buffer) { + if (pos != writer->allocated) { + if (_PyBytes_Resize(&writer->buffer, pos)) { + assert(writer->buffer == NULL); + return NULL; + } + } + + result = writer->buffer; + writer->buffer = NULL; + } + else { + result = PyBytes_FromStringAndSize(writer->stack_buffer, pos); + } + + return result; +} diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -347,216 +347,6 @@ #endif } -/* The _PyBytesWriter structure is big: it contains an embeded "stack buffer". - A _PyBytesWriter variable must be declared at the end of variables in a - function to optimize the memory allocation on the stack. */ -typedef struct { - /* bytes object */ - PyObject *buffer; - - /* Number of allocated size */ - Py_ssize_t allocated; - - /* Current size of the buffer (can be smaller than the allocated size) */ - Py_ssize_t size; - - /* If non-zero, overallocate the buffer (default: 0). */ - int overallocate; - - /* Stack buffer */ - int use_stack_buffer; - char stack_buffer[512]; -} _PyBytesWriter; - -static void -_PyBytesWriter_Init(_PyBytesWriter *writer) -{ - writer->buffer = NULL; - writer->allocated = 0; - writer->size = 0; - writer->overallocate = 0; - writer->use_stack_buffer = 0; -#ifdef Py_DEBUG - memset(writer->stack_buffer, 0xCB, sizeof(writer->stack_buffer)); -#endif -} - -static void -_PyBytesWriter_Dealloc(_PyBytesWriter *writer) -{ - Py_CLEAR(writer->buffer); -} - -static char* -_PyBytesWriter_AsString(_PyBytesWriter *writer) -{ - if (!writer->use_stack_buffer) { - assert(writer->buffer != NULL); - return PyBytes_AS_STRING(writer->buffer); - } - else { - assert(writer->buffer == NULL); - return writer->stack_buffer; - } -} - -Py_LOCAL_INLINE(Py_ssize_t) -_PyBytesWriter_GetPos(_PyBytesWriter *writer, char *str) -{ - char *start = _PyBytesWriter_AsString(writer); - assert(str != NULL); - assert(str >= start); - return str - start; -} - -Py_LOCAL_INLINE(void) -_PyBytesWriter_CheckConsistency(_PyBytesWriter *writer, char *str) -{ -#ifdef Py_DEBUG - char *start, *end; - - if (!writer->use_stack_buffer) { - assert(writer->buffer != NULL); - assert(PyBytes_CheckExact(writer->buffer)); - assert(Py_REFCNT(writer->buffer) == 1); - } - else { - assert(writer->buffer == NULL); - } - - start = _PyBytesWriter_AsString(writer); - assert(0 <= writer->size && writer->size <= writer->allocated); - /* the last byte must always be null */ - assert(start[writer->allocated] == 0); - - end = start + writer->allocated; - assert(str != NULL); - assert(start <= str && str <= end); -#endif -} - -/* Add *size* bytes to the buffer. - str is the current pointer inside the buffer. - Return the updated current pointer inside the buffer. - Raise an exception and return NULL on error. */ -static char* -_PyBytesWriter_Prepare(_PyBytesWriter *writer, char *str, Py_ssize_t size) -{ - Py_ssize_t allocated, pos; - - _PyBytesWriter_CheckConsistency(writer, str); - assert(size >= 0); - - if (size == 0) { - /* nothing to do */ - return str; - } - - if (writer->size > PY_SSIZE_T_MAX - size) { - PyErr_NoMemory(); - _PyBytesWriter_Dealloc(writer); - return NULL; - } - writer->size += size; - - allocated = writer->allocated; - if (writer->size <= allocated) - return str; - - allocated = writer->size; - if (writer->overallocate - && allocated <= (PY_SSIZE_T_MAX - allocated / OVERALLOCATE_FACTOR)) { - /* overallocate to limit the number of realloc() */ - allocated += allocated / OVERALLOCATE_FACTOR; - } - - pos = _PyBytesWriter_GetPos(writer, str); - if (!writer->use_stack_buffer) { - /* Note: Don't use a bytearray object because the conversion from - byterray to bytes requires to copy all bytes. */ - if (_PyBytes_Resize(&writer->buffer, allocated)) { - assert(writer->buffer == NULL); - return NULL; - } - } - else { - /* convert from stack buffer to bytes object buffer */ - assert(writer->buffer == NULL); - - writer->buffer = PyBytes_FromStringAndSize(NULL, allocated); - if (writer->buffer == NULL) - return NULL; - - if (pos != 0) { - Py_MEMCPY(PyBytes_AS_STRING(writer->buffer), - writer->stack_buffer, - pos); - } - -#ifdef Py_DEBUG - memset(writer->stack_buffer, 0xDB, sizeof(writer->stack_buffer)); -#endif - - writer->use_stack_buffer = 0; - } - writer->allocated = allocated; - - str = _PyBytesWriter_AsString(writer) + pos; - _PyBytesWriter_CheckConsistency(writer, str); - return str; -} - -/* Allocate the buffer to write size bytes. - Return the pointer to the beginning of buffer data. - Raise an exception and return NULL on error. */ -static char* -_PyBytesWriter_Alloc(_PyBytesWriter *writer, Py_ssize_t size) -{ - /* ensure that _PyBytesWriter_Alloc() is only called once */ - assert(writer->size == 0 && writer->buffer == NULL); - assert(size >= 0); - - writer->use_stack_buffer = 1; -#if Py_DEBUG - /* the last byte is reserved, it must be '\0' */ - writer->stack_buffer[sizeof(writer->stack_buffer) - 1] = 0; - writer->allocated = sizeof(writer->stack_buffer) - 1; -#else - writer->allocated = sizeof(writer->stack_buffer); -#endif - return _PyBytesWriter_Prepare(writer, writer->stack_buffer, size); -} - -/* Get the buffer content and reset the writer. - Return a bytes object. - Raise an exception and return NULL on error. */ -static PyObject * -_PyBytesWriter_Finish(_PyBytesWriter *writer, char *str) -{ - Py_ssize_t pos; - PyObject *result; - - _PyBytesWriter_CheckConsistency(writer, str); - - pos = _PyBytesWriter_GetPos(writer, str); - if (!writer->use_stack_buffer) { - if (pos != writer->allocated) { - if (_PyBytes_Resize(&writer->buffer, pos)) { - assert(writer->buffer == NULL); - return NULL; - } - } - - result = writer->buffer; - writer->buffer = NULL; - } - else { - result = PyBytes_FromStringAndSize(writer->stack_buffer, pos); - } - - return result; -} - #ifdef Py_DEBUG int _PyUnicode_CheckConsistency(PyObject *op, int check_content) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 02:52:38 2015 From: python-checkins at python.org (victor.stinner) Date: Fri, 09 Oct 2015 00:52:38 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325318=3A_Fix_comp?= =?utf-8?q?ilation_error?= Message-ID: <20151009005238.20779.49084@psf.io> https://hg.python.org/cpython/rev/e9c1404d6bd9 changeset: 98607:e9c1404d6bd9 user: Victor Stinner date: Fri Oct 09 02:52:16 2015 +0200 summary: Issue #25318: Fix compilation error Replace "#if Py_DEBUG" with "#ifdef Py_DEBUG". files: Objects/bytesobject.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -3892,7 +3892,7 @@ assert(size >= 0); writer->use_stack_buffer = 1; -#if Py_DEBUG +#ifdef Py_DEBUG /* the last byte is reserved, it must be '\0' */ writer->stack_buffer[sizeof(writer->stack_buffer) - 1] = 0; writer->allocated = sizeof(writer->stack_buffer) - 1; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 02:55:28 2015 From: python-checkins at python.org (raymond.hettinger) Date: Fri, 09 Oct 2015 00:55:28 +0000 Subject: [Python-checkins] =?utf-8?q?test=3A_test?= Message-ID: <20151009005528.7262.70312@psf.io> https://hg.python.org/test/rev/199cb49e866b changeset: 226:199cb49e866b user: Raymond Hettinger date: Thu Oct 08 20:55:24 2015 -0400 summary: test files: ab | 0 1 files changed, 0 insertions(+), 0 deletions(-) diff --git a/ab b/ab new file mode 100644 -- Repository URL: https://hg.python.org/test From python-checkins at python.org Fri Oct 9 03:14:26 2015 From: python-checkins at python.org (raymond.hettinger) Date: Fri, 09 Oct 2015 01:14:26 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI1MzI2?= =?utf-8?q?=3A__Improve_an_obscure_error_message=2E?= Message-ID: <20151009011420.97720.71338@psf.io> https://hg.python.org/cpython/rev/4d1bd86d3bbd changeset: 98608:4d1bd86d3bbd branch: 2.7 parent: 98586:04815b55227f user: Raymond Hettinger date: Thu Oct 08 21:14:15 2015 -0400 summary: Issue #25326: Improve an obscure error message. files: Objects/abstract.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Objects/abstract.c b/Objects/abstract.c --- a/Objects/abstract.c +++ b/Objects/abstract.c @@ -259,7 +259,7 @@ pb->bf_getcharbuffer == NULL || pb->bf_getsegcount == NULL) { PyErr_SetString(PyExc_TypeError, - "expected a character buffer object"); + "expected a string or other character buffer object"); return -1; } if ((*pb->bf_getsegcount)(obj,NULL) != 1) { -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 03:38:50 2015 From: python-checkins at python.org (victor.stinner) Date: Fri, 09 Oct 2015 01:38:50 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325318=3A_Avoid_sp?= =?utf-8?q?rintf=28=29_in_backslashreplace=28=29?= Message-ID: <20151009013850.18382.52485@psf.io> https://hg.python.org/cpython/rev/9cf89366bbcb changeset: 98609:9cf89366bbcb parent: 98607:e9c1404d6bd9 user: Victor Stinner date: Fri Oct 09 03:17:30 2015 +0200 summary: Issue #25318: Avoid sprintf() in backslashreplace() Rewrite backslashreplace() to be closer to PyCodec_BackslashReplaceErrors(). Add also unit tests for non-BMP characters. files: Lib/test/test_codecs.py | 6 ++++-- Objects/unicodeobject.c | 27 +++++++++++++++++++-------- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py --- a/Lib/test/test_codecs.py +++ b/Lib/test/test_codecs.py @@ -3155,7 +3155,8 @@ ('[\x80\xff\u20ac]', 'ignore', b'[]'), ('[\x80\xff\u20ac]', 'replace', b'[???]'), ('[\x80\xff\u20ac]', 'xmlcharrefreplace', b'[€ÿ€]'), - ('[\x80\xff\u20ac]', 'backslashreplace', b'[\\x80\\xff\\u20ac]'), + ('[\x80\xff\u20ac\U000abcde]', 'backslashreplace', + b'[\\x80\\xff\\u20ac\\U000abcde]'), ('[\udc80\udcff]', 'surrogateescape', b'[\x80\xff]'), ): with self.subTest(data=data, error_handler=error_handler, @@ -3197,7 +3198,8 @@ for data, error_handler, expected in ( ('[\u20ac\udc80]', 'ignore', b'[]'), ('[\u20ac\udc80]', 'replace', b'[??]'), - ('[\u20ac\udc80]', 'backslashreplace', b'[\\u20ac\\udc80]'), + ('[\u20ac\U000abcde]', 'backslashreplace', + b'[\\u20ac\\U000abcde]'), ('[\u20ac\udc80]', 'xmlcharrefreplace', b'[€�]'), ('[\udc80\udcff]', 'surrogateescape', b'[\x80\xff]'), ): diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -610,14 +610,25 @@ /* generate replacement */ for (i = collstart; i < collend; ++i) { ch = PyUnicode_READ(kind, data, i); - if (ch < 0x100) - str += sprintf(str, "\\x%02x", ch); - else if (ch < 0x10000) - str += sprintf(str, "\\u%04x", ch); - else { - assert(ch <= MAX_UNICODE); - str += sprintf(str, "\\U%08x", ch); - } + *str++ = '\\'; + if (ch >= 0x00010000) { + *str++ = 'U'; + *str++ = Py_hexdigits[(ch>>28)&0xf]; + *str++ = Py_hexdigits[(ch>>24)&0xf]; + *str++ = Py_hexdigits[(ch>>20)&0xf]; + *str++ = Py_hexdigits[(ch>>16)&0xf]; + *str++ = Py_hexdigits[(ch>>12)&0xf]; + *str++ = Py_hexdigits[(ch>>8)&0xf]; + } + else if (ch >= 0x100) { + *str++ = 'u'; + *str++ = Py_hexdigits[(ch>>12)&0xf]; + *str++ = Py_hexdigits[(ch>>8)&0xf]; + } + else + *str++ = 'x'; + *str++ = Py_hexdigits[(ch>>4)&0xf]; + *str++ = Py_hexdigits[ch&0xf]; } return str; } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 03:38:52 2015 From: python-checkins at python.org (victor.stinner) Date: Fri, 09 Oct 2015 01:38:52 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325318=3A_cleanup_?= =?utf-8?q?code_=5FPyBytesWriter?= Message-ID: <20151009013850.475.612@psf.io> https://hg.python.org/cpython/rev/c53dcf1d6967 changeset: 98611:c53dcf1d6967 user: Victor Stinner date: Fri Oct 09 03:38:24 2015 +0200 summary: Issue #25318: cleanup code _PyBytesWriter Rename "stack buffer" to "small buffer". Add also an assertion in _PyBytesWriter_GetPos(). files: Include/bytesobject.h | 4 +- Objects/bytesobject.c | 34 +++++++++++++++--------------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/Include/bytesobject.h b/Include/bytesobject.h --- a/Include/bytesobject.h +++ b/Include/bytesobject.h @@ -141,8 +141,8 @@ int overallocate; /* Stack buffer */ - int use_stack_buffer; - char stack_buffer[512]; + int use_small_buffer; + char small_buffer[512]; } _PyBytesWriter; /* Initialize a bytes writer diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -3754,9 +3754,9 @@ writer->allocated = 0; writer->size = 0; writer->overallocate = 0; - writer->use_stack_buffer = 0; + writer->use_small_buffer = 0; #ifdef Py_DEBUG - memset(writer->stack_buffer, 0xCB, sizeof(writer->stack_buffer)); + memset(writer->small_buffer, 0xCB, sizeof(writer->small_buffer)); #endif } @@ -3769,13 +3769,13 @@ Py_LOCAL_INLINE(char*) _PyBytesWriter_AsString(_PyBytesWriter *writer) { - if (!writer->use_stack_buffer) { + if (!writer->use_small_buffer) { assert(writer->buffer != NULL); return PyBytes_AS_STRING(writer->buffer); } else { assert(writer->buffer == NULL); - return writer->stack_buffer; + return writer->small_buffer; } } @@ -3785,6 +3785,7 @@ char *start = _PyBytesWriter_AsString(writer); assert(str != NULL); assert(str >= start); + assert(str - start <= writer->allocated); return str - start; } @@ -3794,7 +3795,7 @@ #ifdef Py_DEBUG char *start, *end; - if (!writer->use_stack_buffer) { + if (!writer->use_small_buffer) { assert(writer->buffer != NULL); assert(PyBytes_CheckExact(writer->buffer)); assert(Py_REFCNT(writer->buffer) == 1); @@ -3846,7 +3847,7 @@ } pos = _PyBytesWriter_GetPos(writer, str); - if (!writer->use_stack_buffer) { + if (!writer->use_small_buffer) { /* Note: Don't use a bytearray object because the conversion from byterray to bytes requires to copy all bytes. */ if (_PyBytes_Resize(&writer->buffer, allocated)) { @@ -3864,15 +3865,14 @@ if (pos != 0) { Py_MEMCPY(PyBytes_AS_STRING(writer->buffer), - writer->stack_buffer, + writer->small_buffer, pos); } + writer->use_small_buffer = 0; #ifdef Py_DEBUG - memset(writer->stack_buffer, 0xDB, sizeof(writer->stack_buffer)); + memset(writer->small_buffer, 0xDB, sizeof(writer->small_buffer)); #endif - - writer->use_stack_buffer = 0; } writer->allocated = allocated; @@ -3891,15 +3891,15 @@ assert(writer->size == 0 && writer->buffer == NULL); assert(size >= 0); - writer->use_stack_buffer = 1; + writer->use_small_buffer = 1; #ifdef Py_DEBUG /* the last byte is reserved, it must be '\0' */ - writer->stack_buffer[sizeof(writer->stack_buffer) - 1] = 0; - writer->allocated = sizeof(writer->stack_buffer) - 1; + writer->allocated = sizeof(writer->small_buffer) - 1; + writer->small_buffer[writer->allocated] = 0; #else - writer->allocated = sizeof(writer->stack_buffer); + writer->allocated = sizeof(writer->small_buffer); #endif - return _PyBytesWriter_Prepare(writer, writer->stack_buffer, size); + return _PyBytesWriter_Prepare(writer, writer->small_buffer, size); } PyObject * @@ -3911,7 +3911,7 @@ _PyBytesWriter_CheckConsistency(writer, str); pos = _PyBytesWriter_GetPos(writer, str); - if (!writer->use_stack_buffer) { + if (!writer->use_small_buffer) { if (pos != writer->allocated) { if (_PyBytes_Resize(&writer->buffer, pos)) { assert(writer->buffer == NULL); @@ -3923,7 +3923,7 @@ writer->buffer = NULL; } else { - result = PyBytes_FromStringAndSize(writer->stack_buffer, pos); + result = PyBytes_FromStringAndSize(writer->small_buffer, pos); } return result; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 03:38:56 2015 From: python-checkins at python.org (victor.stinner) Date: Fri, 09 Oct 2015 01:38:56 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325318=3A_Fix_back?= =?utf-8?q?slashreplace=28=29?= Message-ID: <20151009013850.2683.39292@psf.io> https://hg.python.org/cpython/rev/0a522f68d275 changeset: 98610:0a522f68d275 user: Victor Stinner date: Fri Oct 09 03:37:11 2015 +0200 summary: Issue #25318: Fix backslashreplace() Fix code to estimate the needed space. files: Objects/unicodeobject.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -590,7 +590,7 @@ incr = 2+4; else { assert(ch <= MAX_UNICODE); - incr = 2+6; + incr = 2+8; } if (size > PY_SSIZE_T_MAX - incr) { PyErr_SetString(PyExc_OverflowError, -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 06:03:57 2015 From: python-checkins at python.org (raymond.hettinger) Date: Fri, 09 Oct 2015 04:03:57 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325298=3A__Add_loc?= =?utf-8?q?k_and_rlock_weakref_tests_=28Contributed_by_Nir_Soffer=29=2E?= Message-ID: <20151009040357.18388.16368@psf.io> https://hg.python.org/cpython/rev/45903695e86c changeset: 98612:45903695e86c user: Raymond Hettinger date: Fri Oct 09 00:03:51 2015 -0400 summary: Issue #25298: Add lock and rlock weakref tests (Contributed by Nir Soffer). files: Lib/test/lock_tests.py | 12 ++++++++++++ 1 files changed, 12 insertions(+), 0 deletions(-) diff --git a/Lib/test/lock_tests.py b/Lib/test/lock_tests.py --- a/Lib/test/lock_tests.py +++ b/Lib/test/lock_tests.py @@ -7,6 +7,7 @@ from _thread import start_new_thread, TIMEOUT_MAX import threading import unittest +import weakref from test import support @@ -198,6 +199,17 @@ self.assertFalse(results[0]) self.assertTimeout(results[1], 0.5) + def test_weakref_exists(self): + lock = self.locktype() + ref = weakref.ref(lock) + self.assertIsNotNone(ref()) + + def test_weakref_deleted(self): + lock = self.locktype() + ref = weakref.ref(lock) + del lock + self.assertIsNone(ref()) + class LockTests(BaseLockTests): """ -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 06:43:00 2015 From: python-checkins at python.org (raymond.hettinger) Date: Fri, 09 Oct 2015 04:43:00 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Make_comparison_more_consi?= =?utf-8?q?stent?= Message-ID: <20151009044300.97706.75364@psf.io> https://hg.python.org/cpython/rev/7978f187b10a changeset: 98613:7978f187b10a user: Raymond Hettinger date: Fri Oct 09 00:42:47 2015 -0400 summary: Make comparison more consistent files: Python/bltinmodule.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Python/bltinmodule.c b/Python/bltinmodule.c --- a/Python/bltinmodule.c +++ b/Python/bltinmodule.c @@ -331,7 +331,7 @@ Py_DECREF(it); return NULL; } - if (cmp == 1) { + if (cmp > 0) { Py_DECREF(it); Py_RETURN_TRUE; } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 07:35:30 2015 From: python-checkins at python.org (raymond.hettinger) Date: Fri, 09 Oct 2015 05:35:30 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Hoist_constant_expression_?= =?utf-8?q?out_of_the_inner_loop=2E?= Message-ID: <20151009053529.3295.49294@psf.io> https://hg.python.org/cpython/rev/1aae9b6a6929 changeset: 98614:1aae9b6a6929 user: Raymond Hettinger date: Fri Oct 09 01:34:08 2015 -0400 summary: Hoist constant expression out of the inner loop. files: Python/bltinmodule.c | 6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Python/bltinmodule.c b/Python/bltinmodule.c --- a/Python/bltinmodule.c +++ b/Python/bltinmodule.c @@ -469,6 +469,7 @@ PyObject *it = lz->it; long ok; PyObject *(*iternext)(PyObject *); + int checktrue = lz->func == Py_None || lz->func == (PyObject *)&PyBool_Type; iternext = *Py_TYPE(it)->tp_iternext; for (;;) { @@ -476,12 +477,11 @@ if (item == NULL) return NULL; - if (lz->func == Py_None || lz->func == (PyObject *)&PyBool_Type) { + if (checktrue) { ok = PyObject_IsTrue(item); } else { PyObject *good; - good = PyObject_CallFunctionObjArgs(lz->func, - item, NULL); + good = PyObject_CallFunctionObjArgs(lz->func, item, NULL); if (good == NULL) { Py_DECREF(item); return NULL; -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Fri Oct 9 10:44:42 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Fri, 09 Oct 2015 08:44:42 +0000 Subject: [Python-checkins] Daily reference leaks (1aae9b6a6929): sum=17880 Message-ID: <20151009084442.18368.25524@psf.io> results for 1aae9b6a6929 on branch "default" -------------------------------------------- test_asyncio leaked [3, 0, 0] memory blocks, sum=3 test_capi leaked [1598, 1598, 1598] references, sum=4794 test_capi leaked [387, 389, 389] memory blocks, sum=1165 test_functools leaked [0, 2, 2] memory blocks, sum=4 test_threading leaked [3196, 3196, 3196] references, sum=9588 test_threading leaked [774, 776, 776] memory blocks, sum=2326 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflogikNhry', '--timeout', '7200'] From python-checkins at python.org Fri Oct 9 12:21:56 2015 From: python-checkins at python.org (victor.stinner) Date: Fri, 09 Oct 2015 10:21:56 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325349=3A_Optimize?= =?utf-8?q?_bytes_=25_args_using_the_new_private_=5FPyBytesWriter_API?= Message-ID: <20151009102155.3273.45766@psf.io> https://hg.python.org/cpython/rev/b2f3cbdc0f2d changeset: 98615:b2f3cbdc0f2d user: Victor Stinner date: Fri Oct 09 11:48:06 2015 +0200 summary: Issue #25349: Optimize bytes % args using the new private _PyBytesWriter API * Thanks to the _PyBytesWriter API, output smaller than 512 bytes are allocated on the stack and so avoid calling _PyBytes_Resize(). Because of that, change the default buffer size to fmtcnt instead of fmtcnt+100. * Rely on _PyBytesWriter algorithm to overallocate the buffer instead of using a custom code. For example, _PyBytesWriter uses a different overallocation factor (25% or 50%) depending on the platform to get best performances. * Disable overallocation for the last write. * Replace C loops to fill characters with memset() * Add also many comments to _PyBytes_Format() * Remove unused FORMATBUFLEN constant * Avoid the creation of a temporary bytes object when formatting a floating point number (when no custom formatting option is used) * Fix also reference leaks on error handling * Use Py_MEMCPY() to copy bytes between two formatters (%) files: Misc/NEWS | 2 + Objects/bytesobject.c | 187 ++++++++++++++++++++--------- 2 files changed, 130 insertions(+), 59 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,8 @@ Core and Builtins ----------------- +- Issue #25349: Optimize bytes % args using the new private _PyBytesWriter API. + - Issue #24806: Prevent builtin types that are not allowed to be subclassed from being subclassed through multiple inheritance. diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -409,12 +409,15 @@ /* Returns a new reference to a PyBytes object, or NULL on failure. */ -static PyObject * -formatfloat(PyObject *v, int flags, int prec, int type) +static char* +formatfloat(PyObject *v, int flags, int prec, int type, + PyObject **p_result, _PyBytesWriter *writer, char *str, + Py_ssize_t prealloc) { char *p; PyObject *result; double x; + size_t len; x = PyFloat_AsDouble(v); if (x == -1.0 && PyErr_Occurred()) { @@ -431,9 +434,23 @@ if (p == NULL) return NULL; - result = PyBytes_FromStringAndSize(p, strlen(p)); + + len = strlen(p); + if (writer != NULL) { + if ((Py_ssize_t)len > prealloc) { + str = _PyBytesWriter_Prepare(writer, str, len - prealloc); + if (str == NULL) + return NULL; + } + Py_MEMCPY(str, p, len); + str += len; + return str; + } + + result = PyBytes_FromStringAndSize(p, len); PyMem_Free(p); - return result; + *p_result = result; + return str; } static PyObject * @@ -557,36 +574,32 @@ return NULL; } -/* fmt%(v1,v2,...) is roughly equivalent to sprintf(fmt, v1, v2, ...) - - FORMATBUFLEN is the length of the buffer in which the ints & - chars are formatted. XXX This is a magic number. Each formatting - routine does bounds checking to ensure no overflow, but a better - solution may be to malloc a buffer of appropriate size for each - format. For now, the current solution is sufficient. -*/ -#define FORMATBUFLEN (size_t)120 +/* fmt%(v1,v2,...) is roughly equivalent to sprintf(fmt, v1, v2, ...) */ PyObject * _PyBytes_Format(PyObject *format, PyObject *args) { char *fmt, *res; Py_ssize_t arglen, argidx; - Py_ssize_t reslen, rescnt, fmtcnt; + Py_ssize_t fmtcnt; int args_owned = 0; - PyObject *result; PyObject *dict = NULL; + _PyBytesWriter writer; + if (format == NULL || !PyBytes_Check(format) || args == NULL) { PyErr_BadInternalCall(); return NULL; } fmt = PyBytes_AS_STRING(format); fmtcnt = PyBytes_GET_SIZE(format); - reslen = rescnt = fmtcnt + 100; - result = PyBytes_FromStringAndSize((char *)NULL, reslen); - if (result == NULL) + + _PyBytesWriter_Init(&writer); + + res = _PyBytesWriter_Alloc(&writer, fmtcnt); + if (res == NULL) return NULL; - res = PyBytes_AsString(result); + writer.overallocate = 1; + if (PyTuple_Check(args)) { arglen = PyTuple_GET_SIZE(args); argidx = 0; @@ -600,18 +613,25 @@ !PyByteArray_Check(args)) { dict = args; } + while (--fmtcnt >= 0) { if (*fmt != '%') { - if (--rescnt < 0) { - rescnt = fmtcnt + 100; - reslen += rescnt; - if (_PyBytes_Resize(&result, reslen)) - return NULL; - res = PyBytes_AS_STRING(result) - + reslen - rescnt; - --rescnt; + Py_ssize_t len; + char *pos; + + pos = strchr(fmt + 1, '%'); + if (pos != NULL) + len = pos - fmt; + else { + len = PyBytes_GET_SIZE(format); + len -= (fmt - PyBytes_AS_STRING(format)); } - *res++ = *fmt++; + assert(len != 0); + + Py_MEMCPY(res, fmt, len); + res += len; + fmt += len; + fmtcnt -= (len - 1); } else { /* Got a format specifier */ @@ -626,6 +646,10 @@ int sign; Py_ssize_t len = 0; char onechar; /* For byte_converter() */ + Py_ssize_t alloc; +#ifdef Py_DEBUG + char *before; +#endif fmt++; if (*fmt == '(') { @@ -673,6 +697,8 @@ arglen = -1; argidx = -2; } + + /* Parse flags. Example: "%+i" => flags=F_SIGN. */ while (--fmtcnt >= 0) { switch (c = *fmt++) { case '-': flags |= F_LJUST; continue; @@ -683,6 +709,8 @@ } break; } + + /* Parse width. Example: "%10s" => width=10 */ if (c == '*') { v = getnextarg(args, arglen, &argidx); if (v == NULL) @@ -717,6 +745,8 @@ width = width*10 + (c - '0'); } } + + /* Parse precision. Example: "%.3f" => prec=3 */ if (c == '.') { prec = 0; if (--fmtcnt >= 0) @@ -771,6 +801,12 @@ if (v == NULL) goto error; } + + if (fmtcnt < 0) { + /* last writer: disable writer overallocation */ + writer.overallocate = 0; + } + sign = 0; fill = ' '; switch (c) { @@ -778,6 +814,7 @@ pbuf = "%"; len = 1; break; + case 'r': // %r is only for 2/3 code; 3 only code should use %a case 'a': @@ -790,6 +827,7 @@ if (prec >= 0 && len > prec) len = prec; break; + case 's': // %s is only for 2/3 code; 3 only code should use %b case 'b': @@ -799,6 +837,7 @@ if (prec >= 0 && len > prec) len = prec; break; + case 'i': case 'd': case 'u': @@ -815,14 +854,24 @@ if (flags & F_ZERO) fill = '0'; break; + case 'e': case 'E': case 'f': case 'F': case 'g': case 'G': - temp = formatfloat(v, flags, prec, c); - if (temp == NULL) + if (width == -1 && prec == -1 + && !(flags & (F_SIGN | F_BLANK))) + { + /* Fast path */ + res = formatfloat(v, flags, prec, c, NULL, &writer, res, 1); + if (res == NULL) + goto error; + continue; + } + + if (!formatfloat(v, flags, prec, c, &temp, NULL, res, 1)) goto error; pbuf = PyBytes_AS_STRING(temp); len = PyBytes_GET_SIZE(temp); @@ -830,12 +879,14 @@ if (flags & F_ZERO) fill = '0'; break; + case 'c': pbuf = &onechar; len = byte_converter(v, &onechar); if (!len) goto error; break; + default: PyErr_Format(PyExc_ValueError, "unsupported format character '%c' (0x%x) " @@ -845,6 +896,7 @@ PyBytes_AsString(format))); goto error; } + if (sign) { if (*pbuf == '-' || *pbuf == '+') { sign = *pbuf++; @@ -859,29 +911,30 @@ } if (width < len) width = len; - if (rescnt - (sign != 0) < width) { - reslen -= rescnt; - rescnt = width + fmtcnt + 100; - reslen += rescnt; - if (reslen < 0) { - Py_DECREF(result); - Py_XDECREF(temp); - return PyErr_NoMemory(); - } - if (_PyBytes_Resize(&result, reslen)) { - Py_XDECREF(temp); - return NULL; - } - res = PyBytes_AS_STRING(result) - + reslen - rescnt; + + alloc = width; + if (sign != 0 && len == width) + alloc++; + if (alloc > 1) { + res = _PyBytesWriter_Prepare(&writer, res, alloc - 1); + if (res == NULL) + goto error; } +#ifdef Py_DEBUG + before = res; +#endif + + /* Write the sign if needed */ if (sign) { if (fill != ' ') *res++ = sign; - rescnt--; if (width > len) width--; } + + /* Write the numeric prefix for "x", "X" and "o" formats + if the alternate form is used. + For example, write "0x" for the "%#x" format. */ if ((flags & F_ALT) && (c == 'x' || c == 'X')) { assert(pbuf[0] == '0'); assert(pbuf[1] == c); @@ -889,18 +942,21 @@ *res++ = *pbuf++; *res++ = *pbuf++; } - rescnt -= 2; width -= 2; if (width < 0) width = 0; len -= 2; } + + /* Pad left with the fill character if needed */ if (width > len && !(flags & F_LJUST)) { - do { - --rescnt; - *res++ = fill; - } while (--width > len); + memset(res, fill, width - len); + res += (width - len); + width = len; } + + /* If padding with spaces: write sign if needed and/or numeric + prefix if the alternate form is used */ if (fill == ' ') { if (sign) *res++ = sign; @@ -912,13 +968,17 @@ *res++ = *pbuf++; } } + + /* Copy bytes */ Py_MEMCPY(res, pbuf, len); res += len; - rescnt -= len; - while (--width >= len) { - --rescnt; - *res++ = ' '; + + /* Pad right with the fill character if needed */ + if (width > len) { + memset(res, ' ', width - len); + res += (width - len); } + if (dict && (argidx < arglen) && c != '%') { PyErr_SetString(PyExc_TypeError, "not all arguments converted during bytes formatting"); @@ -926,22 +986,31 @@ goto error; } Py_XDECREF(temp); + +#ifdef Py_DEBUG + /* check that we computed the exact size for this write */ + assert((res - before) == alloc); +#endif } /* '%' */ + + /* If overallocation was disabled, ensure that it was the last + write. Otherwise, we missed an optimization */ + assert(writer.overallocate || fmtcnt < 0); } /* until end */ + if (argidx < arglen && !dict) { PyErr_SetString(PyExc_TypeError, "not all arguments converted during bytes formatting"); goto error; } + if (args_owned) { Py_DECREF(args); } - if (_PyBytes_Resize(&result, reslen - rescnt)) - return NULL; - return result; + return _PyBytesWriter_Finish(&writer, res); error: - Py_DECREF(result); + _PyBytesWriter_Dealloc(&writer); if (args_owned) { Py_DECREF(args); } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 13:16:34 2015 From: python-checkins at python.org (victor.stinner) Date: Fri, 09 Oct 2015 11:16:34 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_=5FPyBytesWriter=3A_rename?= =?utf-8?q?_size_attribute_to_min=5Fsize?= Message-ID: <20151009111616.97700.47106@psf.io> https://hg.python.org/cpython/rev/9c8724ec0e00 changeset: 98616:9c8724ec0e00 user: Victor Stinner date: Fri Oct 09 12:37:03 2015 +0200 summary: _PyBytesWriter: rename size attribute to min_size files: Include/bytesobject.h | 5 +++-- Objects/bytesobject.c | 14 +++++++------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/Include/bytesobject.h b/Include/bytesobject.h --- a/Include/bytesobject.h +++ b/Include/bytesobject.h @@ -134,8 +134,9 @@ /* Number of allocated size */ Py_ssize_t allocated; - /* Current size of the buffer (can be smaller than the allocated size) */ - Py_ssize_t size; + /* Minimum number of allocated bytes, + incremented by _PyBytesWriter_Prepare() */ + Py_ssize_t min_size; /* If non-zero, overallocate the buffer (default: 0). */ int overallocate; diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -3821,7 +3821,7 @@ { writer->buffer = NULL; writer->allocated = 0; - writer->size = 0; + writer->min_size = 0; writer->overallocate = 0; writer->use_small_buffer = 0; #ifdef Py_DEBUG @@ -3874,7 +3874,7 @@ } start = _PyBytesWriter_AsString(writer); - assert(0 <= writer->size && writer->size <= writer->allocated); + assert(0 <= writer->min_size && writer->min_size <= writer->allocated); /* the last byte must always be null */ assert(start[writer->allocated] == 0); @@ -3897,18 +3897,18 @@ return str; } - if (writer->size > PY_SSIZE_T_MAX - size) { + if (writer->min_size > PY_SSIZE_T_MAX - size) { PyErr_NoMemory(); _PyBytesWriter_Dealloc(writer); return NULL; } - writer->size += size; + writer->min_size += size; allocated = writer->allocated; - if (writer->size <= allocated) + if (writer->min_size <= allocated) return str; - allocated = writer->size; + allocated = writer->min_size; if (writer->overallocate && allocated <= (PY_SSIZE_T_MAX - allocated / OVERALLOCATE_FACTOR)) { /* overallocate to limit the number of realloc() */ @@ -3957,7 +3957,7 @@ _PyBytesWriter_Alloc(_PyBytesWriter *writer, Py_ssize_t size) { /* ensure that _PyBytesWriter_Alloc() is only called once */ - assert(writer->size == 0 && writer->buffer == NULL); + assert(writer->min_size == 0 && writer->buffer == NULL); assert(size >= 0); writer->use_small_buffer = 1; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 13:16:34 2015 From: python-checkins at python.org (victor.stinner) Date: Fri, 09 Oct 2015 11:16:34 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Add_=5FPyBytesWriter=5FWri?= =?utf-8?q?teBytes=28=29_to_factorize_the_code?= Message-ID: <20151009111617.97708.57649@psf.io> https://hg.python.org/cpython/rev/48f242f5e689 changeset: 98618:48f242f5e689 user: Victor Stinner date: Fri Oct 09 12:57:22 2015 +0200 summary: Add _PyBytesWriter_WriteBytes() to factorize the code files: Include/bytesobject.h | 7 +++++++ Objects/bytesobject.c | 14 ++++++++++++++ Objects/stringlib/codecs.h | 22 +++++++++++----------- Objects/unicodeobject.c | 8 +++----- 4 files changed, 35 insertions(+), 16 deletions(-) diff --git a/Include/bytesobject.h b/Include/bytesobject.h --- a/Include/bytesobject.h +++ b/Include/bytesobject.h @@ -174,6 +174,13 @@ PyAPI_FUNC(char*) _PyBytesWriter_Prepare(_PyBytesWriter *writer, char *str, Py_ssize_t size); + +/* Write bytes. + Raise an exception and return NULL on error. */ +PyAPI_FUNC(char*) _PyBytesWriter_WriteBytes(_PyBytesWriter *writer, + char *str, + char *bytes, + Py_ssize_t size); #endif /* Py_LIMITED_API */ #ifdef __cplusplus diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -3995,3 +3995,17 @@ return result; } + +char* +_PyBytesWriter_WriteBytes(_PyBytesWriter *writer, char *str, + char *bytes, Py_ssize_t size) +{ + str = _PyBytesWriter_Prepare(writer, str, size); + if (str == NULL) + return NULL; + + Py_MEMCPY(str, bytes, size); + str += size; + + return str; +} diff --git a/Objects/stringlib/codecs.h b/Objects/stringlib/codecs.h --- a/Objects/stringlib/codecs.h +++ b/Objects/stringlib/codecs.h @@ -388,24 +388,24 @@ /* substract preallocated bytes */ writer.min_size -= max_char_size; - if (PyBytes_Check(rep)) - repsize = PyBytes_GET_SIZE(rep); - else - repsize = PyUnicode_GET_LENGTH(rep); - - p = _PyBytesWriter_Prepare(&writer, p, repsize); - if (p == NULL) - goto error; - if (PyBytes_Check(rep)) { - memcpy(p, PyBytes_AS_STRING(rep), repsize); - p += repsize; + p = _PyBytesWriter_WriteBytes(&writer, p, + PyBytes_AS_STRING(rep), + PyBytes_GET_SIZE(rep)); + if (p == NULL) + goto error; } else { /* rep is unicode */ if (PyUnicode_READY(rep) < 0) goto error; + repsize = PyUnicode_GET_LENGTH(rep); + + p = _PyBytesWriter_Prepare(&writer, p, repsize); + if (p == NULL) + goto error; + if (!PyUnicode_IS_ASCII(rep)) { raise_encode_exception(&exc, "utf-8", unicode, diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -6706,14 +6706,12 @@ if (PyBytes_Check(repunicode)) { /* Directly copy bytes result to output. */ - repsize = PyBytes_Size(repunicode); - - str = _PyBytesWriter_Prepare(&writer, str, repsize); + str = _PyBytesWriter_WriteBytes(&writer, str, + PyBytes_AS_STRING(repunicode), + PyBytes_GET_SIZE(repunicode)); if (str == NULL) goto onError; - memcpy(str, PyBytes_AsString(repunicode), repsize); - str += repsize; pos = newpos; Py_DECREF(repunicode); break; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 13:16:34 2015 From: python-checkins at python.org (victor.stinner) Date: Fri, 09 Oct 2015 11:16:34 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_=5FPyBytesWriter=3A_simpli?= =?utf-8?q?fy_code_to_avoid_=22prealloc=22_parameters?= Message-ID: <20151009111617.128854.92072@psf.io> https://hg.python.org/cpython/rev/cf075c6b1c1e changeset: 98617:cf075c6b1c1e user: Victor Stinner date: Fri Oct 09 12:38:53 2015 +0200 summary: _PyBytesWriter: simplify code to avoid "prealloc" parameters Substract preallocate bytes from min_size before calling _PyBytesWriter_Prepare(). files: Objects/bytesobject.c | 16 +++---- Objects/stringlib/codecs.h | 20 +++++--- Objects/unicodeobject.c | 58 ++++++++++++------------- 3 files changed, 47 insertions(+), 47 deletions(-) diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -411,8 +411,7 @@ static char* formatfloat(PyObject *v, int flags, int prec, int type, - PyObject **p_result, _PyBytesWriter *writer, char *str, - Py_ssize_t prealloc) + PyObject **p_result, _PyBytesWriter *writer, char *str) { char *p; PyObject *result; @@ -437,11 +436,9 @@ len = strlen(p); if (writer != NULL) { - if ((Py_ssize_t)len > prealloc) { - str = _PyBytesWriter_Prepare(writer, str, len - prealloc); - if (str == NULL) - return NULL; - } + str = _PyBytesWriter_Prepare(writer, str, len); + if (str == NULL) + return NULL; Py_MEMCPY(str, p, len); str += len; return str; @@ -865,13 +862,14 @@ && !(flags & (F_SIGN | F_BLANK))) { /* Fast path */ - res = formatfloat(v, flags, prec, c, NULL, &writer, res, 1); + writer.min_size -= 2; /* size preallocated by "%f" */ + res = formatfloat(v, flags, prec, c, NULL, &writer, res); if (res == NULL) goto error; continue; } - if (!formatfloat(v, flags, prec, c, &temp, NULL, res, 1)) + if (!formatfloat(v, flags, prec, c, &temp, NULL, res)) goto error; pbuf = PyBytes_AS_STRING(temp); len = PyBytes_GET_SIZE(temp); diff --git a/Objects/stringlib/codecs.h b/Objects/stringlib/codecs.h --- a/Objects/stringlib/codecs.h +++ b/Objects/stringlib/codecs.h @@ -345,7 +345,9 @@ break; case _Py_ERROR_BACKSLASHREPLACE: - p = backslashreplace(&writer, max_char_size, p, + /* substract preallocated bytes */ + writer.min_size -= max_char_size * (endpos - startpos); + p = backslashreplace(&writer, p, unicode, startpos, endpos); if (p == NULL) goto error; @@ -353,7 +355,9 @@ break; case _Py_ERROR_XMLCHARREFREPLACE: - p = xmlcharrefreplace(&writer, max_char_size, p, + /* substract preallocated bytes */ + writer.min_size -= max_char_size * (endpos - startpos); + p = xmlcharrefreplace(&writer, p, unicode, startpos, endpos); if (p == NULL) goto error; @@ -381,17 +385,17 @@ if (!rep) goto error; + /* substract preallocated bytes */ + writer.min_size -= max_char_size; + if (PyBytes_Check(rep)) repsize = PyBytes_GET_SIZE(rep); else repsize = PyUnicode_GET_LENGTH(rep); - if (repsize > max_char_size) { - p = _PyBytesWriter_Prepare(&writer, p, - repsize - max_char_size); - if (p == NULL) - goto error; - } + p = _PyBytesWriter_Prepare(&writer, p, repsize); + if (p == NULL) + goto error; if (PyBytes_Check(rep)) { memcpy(p, PyBytes_AS_STRING(rep), repsize); diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -565,11 +565,10 @@ /* Implementation of the "backslashreplace" error handler for 8-bit encodings: ASCII, Latin1, UTF-8, etc. */ static char* -backslashreplace(_PyBytesWriter *writer, Py_ssize_t prealloc_per_char, - char *str, +backslashreplace(_PyBytesWriter *writer, char *str, PyObject *unicode, Py_ssize_t collstart, Py_ssize_t collend) { - Py_ssize_t size, i, prealloc; + Py_ssize_t size, i; Py_UCS4 ch; enum PyUnicode_Kind kind; void *data; @@ -600,12 +599,9 @@ size += incr; } - prealloc = prealloc_per_char * (collend - collstart); - if (size > prealloc) { - str = _PyBytesWriter_Prepare(writer, str, size - prealloc); - if (str == NULL) - return NULL; - } + str = _PyBytesWriter_Prepare(writer, str, size); + if (str == NULL) + return NULL; /* generate replacement */ for (i = collstart; i < collend; ++i) { @@ -636,11 +632,10 @@ /* Implementation of the "xmlcharrefreplace" error handler for 8-bit encodings: ASCII, Latin1, UTF-8, etc. */ static char* -xmlcharrefreplace(_PyBytesWriter *writer, Py_ssize_t prealloc_per_char, - char *str, +xmlcharrefreplace(_PyBytesWriter *writer, char *str, PyObject *unicode, Py_ssize_t collstart, Py_ssize_t collend) { - Py_ssize_t size, i, prealloc; + Py_ssize_t size, i; Py_UCS4 ch; enum PyUnicode_Kind kind; void *data; @@ -679,12 +674,9 @@ size += incr; } - prealloc = prealloc_per_char * (collend - collstart); - if (size > prealloc) { - str = _PyBytesWriter_Prepare(writer, str, size - prealloc); - if (str == NULL) - return NULL; - } + str = _PyBytesWriter_Prepare(writer, str, size); + if (str == NULL) + return NULL; /* generate replacement */ for (i = collstart; i < collend; ++i) { @@ -6666,7 +6658,9 @@ break; case _Py_ERROR_BACKSLASHREPLACE: - str = backslashreplace(&writer, 1, str, + /* substract preallocated bytes */ + writer.min_size -= (collend - collstart); + str = backslashreplace(&writer, str, unicode, collstart, collend); if (str == NULL) goto onError; @@ -6674,7 +6668,9 @@ break; case _Py_ERROR_XMLCHARREFREPLACE: - str = xmlcharrefreplace(&writer, 1, str, + /* substract preallocated bytes */ + writer.min_size -= (collend - collstart); + str = xmlcharrefreplace(&writer, str, unicode, collstart, collend); if (str == NULL) goto onError; @@ -6705,14 +6701,17 @@ PyUnicode_READY(repunicode) == -1)) goto onError; + /* substract preallocated bytes */ + writer.min_size -= 1; + if (PyBytes_Check(repunicode)) { /* Directly copy bytes result to output. */ repsize = PyBytes_Size(repunicode); - if (repsize > 1) { - str = _PyBytesWriter_Prepare(&writer, str, repsize-1); - if (str == NULL) - goto onError; - } + + str = _PyBytesWriter_Prepare(&writer, str, repsize); + if (str == NULL) + goto onError; + memcpy(str, PyBytes_AsString(repunicode), repsize); str += repsize; pos = newpos; @@ -6724,11 +6723,10 @@ have+the replacement+the rest of the string, so we won't have to check space for encodable characters) */ repsize = PyUnicode_GET_LENGTH(repunicode); - if (repsize > 1) { - str = _PyBytesWriter_Prepare(&writer, str, repsize-1); - if (str == NULL) - goto onError; - } + + str = _PyBytesWriter_Prepare(&writer, str, repsize); + if (str == NULL) + goto onError; /* check if there is anything unencodable in the replacement and copy it to the output */ -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 13:16:34 2015 From: python-checkins at python.org (victor.stinner) Date: Fri, 09 Oct 2015 11:16:34 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Optimize_error_handlers_of?= =?utf-8?q?_ASCII_and_Latin1_encoders_when_the_replacement?= Message-ID: <20151009111617.128840.85537@psf.io> https://hg.python.org/cpython/rev/92025e9c6b54 changeset: 98619:92025e9c6b54 user: Victor Stinner date: Fri Oct 09 13:10:05 2015 +0200 summary: Optimize error handlers of ASCII and Latin1 encoders when the replacement string is pure ASCII: use _PyBytesWriter_WriteBytes(), don't check individual character. Cleanup unicode_encode_ucs1(): * Rename repunicode to rep * Clear rep object on error * Factorize code between bytes and unicode path files: Objects/stringlib/codecs.h | 18 ++--- Objects/unicodeobject.c | 76 ++++++++++++++----------- 2 files changed, 49 insertions(+), 45 deletions(-) diff --git a/Objects/stringlib/codecs.h b/Objects/stringlib/codecs.h --- a/Objects/stringlib/codecs.h +++ b/Objects/stringlib/codecs.h @@ -311,7 +311,7 @@ #if STRINGLIB_SIZEOF_CHAR > 1 else if (Py_UNICODE_IS_SURROGATE(ch)) { Py_ssize_t startpos, endpos, newpos; - Py_ssize_t repsize, k; + Py_ssize_t k; if (error_handler == _Py_ERROR_UNKNOWN) error_handler = get_error_handler(errors); @@ -392,20 +392,12 @@ p = _PyBytesWriter_WriteBytes(&writer, p, PyBytes_AS_STRING(rep), PyBytes_GET_SIZE(rep)); - if (p == NULL) - goto error; } else { /* rep is unicode */ if (PyUnicode_READY(rep) < 0) goto error; - repsize = PyUnicode_GET_LENGTH(rep); - - p = _PyBytesWriter_Prepare(&writer, p, repsize); - if (p == NULL) - goto error; - if (!PyUnicode_IS_ASCII(rep)) { raise_encode_exception(&exc, "utf-8", unicode, @@ -415,9 +407,13 @@ } assert(PyUnicode_KIND(rep) == PyUnicode_1BYTE_KIND); - memcpy(p, PyUnicode_DATA(rep), repsize); - p += repsize; + p = _PyBytesWriter_WriteBytes(&writer, p, + PyUnicode_DATA(rep), + PyUnicode_GET_LENGTH(rep)); } + + if (p == NULL) + goto error; Py_CLEAR(rep); i = newpos; diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -6599,6 +6599,7 @@ PyObject *error_handler_obj = NULL; PyObject *exc = NULL; _Py_error_handler error_handler = _Py_ERROR_UNKNOWN; + PyObject *rep = NULL; /* output object */ _PyBytesWriter writer; @@ -6627,8 +6628,7 @@ ++pos; } else { - PyObject *repunicode; - Py_ssize_t repsize, newpos, i; + Py_ssize_t newpos, i; /* startpos for collecting unencodable chars */ Py_ssize_t collstart = pos; Py_ssize_t collend = collstart + 1; @@ -6694,52 +6694,59 @@ /* fallback to general error handling */ default: - repunicode = unicode_encode_call_errorhandler(errors, &error_handler_obj, - encoding, reason, unicode, &exc, - collstart, collend, &newpos); - if (repunicode == NULL || (PyUnicode_Check(repunicode) && - PyUnicode_READY(repunicode) == -1)) + rep = unicode_encode_call_errorhandler(errors, &error_handler_obj, + encoding, reason, unicode, &exc, + collstart, collend, &newpos); + if (rep == NULL) goto onError; /* substract preallocated bytes */ writer.min_size -= 1; - if (PyBytes_Check(repunicode)) { + if (PyBytes_Check(rep)) { /* Directly copy bytes result to output. */ str = _PyBytesWriter_WriteBytes(&writer, str, - PyBytes_AS_STRING(repunicode), - PyBytes_GET_SIZE(repunicode)); + PyBytes_AS_STRING(rep), + PyBytes_GET_SIZE(rep)); if (str == NULL) goto onError; - - pos = newpos; - Py_DECREF(repunicode); - break; } - - /* need more space? (at least enough for what we - have+the replacement+the rest of the string, so - we won't have to check space for encodable characters) */ - repsize = PyUnicode_GET_LENGTH(repunicode); - - str = _PyBytesWriter_Prepare(&writer, str, repsize); - if (str == NULL) - goto onError; - - /* check if there is anything unencodable in the replacement - and copy it to the output */ - for (i = 0; repsize-->0; ++i, ++str) { - ch = PyUnicode_READ_CHAR(repunicode, i); - if (ch >= limit) { - raise_encode_exception(&exc, encoding, unicode, - pos, pos+1, reason); - Py_DECREF(repunicode); + else { + assert(PyUnicode_Check(rep)); + + if (PyUnicode_READY(rep) < 0) goto onError; + + if (PyUnicode_IS_ASCII(rep)) { + /* Fast path: all characters are smaller than limit */ + assert(limit >= 128); + assert(PyUnicode_KIND(rep) == PyUnicode_1BYTE_KIND); + str = _PyBytesWriter_WriteBytes(&writer, str, + PyUnicode_DATA(rep), + PyUnicode_GET_LENGTH(rep)); } - *str = (char)ch; + else { + Py_ssize_t repsize = PyUnicode_GET_LENGTH(rep); + + str = _PyBytesWriter_Prepare(&writer, str, repsize); + if (str == NULL) + goto onError; + + /* check if there is anything unencodable in the + replacement and copy it to the output */ + for (i = 0; repsize-->0; ++i, ++str) { + ch = PyUnicode_READ_CHAR(rep, i); + if (ch >= limit) { + raise_encode_exception(&exc, encoding, unicode, + pos, pos+1, reason); + goto onError; + } + *str = (char)ch; + } + } } pos = newpos; - Py_DECREF(repunicode); + Py_CLEAR(rep); } /* If overallocation was disabled, ensure that it was the last @@ -6753,6 +6760,7 @@ return _PyBytesWriter_Finish(&writer, str); onError: + Py_XDECREF(rep); _PyBytesWriter_Dealloc(&writer); Py_XDECREF(error_handler_obj); Py_XDECREF(exc); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 16:24:04 2015 From: python-checkins at python.org (r.david.murray) Date: Fri, 09 Oct 2015 14:24:04 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_=2325328=3A_add_missing_raise_keyword_in_decode=5F?= =?utf-8?q?data+SMTPUTF8_check=2E?= Message-ID: <20151009142255.2679.10719@psf.io> https://hg.python.org/cpython/rev/576128c0d068 changeset: 98621:576128c0d068 parent: 98619:92025e9c6b54 parent: 98620:d471cf4a73b2 user: R David Murray date: Fri Oct 09 10:20:58 2015 -0400 summary: Merge #25328: add missing raise keyword in decode_data+SMTPUTF8 check. files: Lib/smtpd.py | 4 ++-- Lib/test/test_smtpd.py | 6 ++++++ Misc/NEWS | 3 +++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/Lib/smtpd.py b/Lib/smtpd.py --- a/Lib/smtpd.py +++ b/Lib/smtpd.py @@ -137,8 +137,8 @@ self.enable_SMTPUTF8 = enable_SMTPUTF8 if enable_SMTPUTF8: if decode_data: - ValueError("decode_data and enable_SMTPUTF8 cannot be set to" - " True at the same time") + raise ValueError("decode_data and enable_SMTPUTF8 cannot" + " be set to True at the same time") decode_data = False if decode_data is None: warn("The decode_data default of True will change to False in 3.6;" diff --git a/Lib/test/test_smtpd.py b/Lib/test/test_smtpd.py --- a/Lib/test/test_smtpd.py +++ b/Lib/test/test_smtpd.py @@ -313,6 +313,12 @@ DummyDispatcherBroken, BrokenDummyServer, (support.HOST, 0), ('b', 0), decode_data=True) + def test_decode_data_and_enable_SMTPUTF8_raises(self): + self.assertRaises( + ValueError, smtpd.SMTPChannel, + self.server, self.channel.conn, self.channel.addr, + enable_SMTPUTF8=True, decode_data=True) + def test_server_accept(self): self.server.handle_accept() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -48,6 +48,9 @@ Library ------- +- Issue #25328: smtpd's SMTPChannel now correctly raises a ValueError if both + decode_data and enable_SMTPUTF8 are set to true. + - Issue #16099: RobotFileParser now supports Crawl-delay and Request-rate extensions. Patch by Nikolay Bogoychev. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 16:24:04 2015 From: python-checkins at python.org (r.david.murray) Date: Fri, 09 Oct 2015 14:24:04 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogIzI1MzI4OiBhZGQg?= =?utf-8?q?missing_raise_keyword_in_decode=5Fdata+SMTPUTF8_check=2E?= Message-ID: <20151009142254.97702.22718@psf.io> https://hg.python.org/cpython/rev/d471cf4a73b2 changeset: 98620:d471cf4a73b2 branch: 3.5 parent: 98601:1e99ba6b7c98 user: R David Murray date: Fri Oct 09 10:19:33 2015 -0400 summary: #25328: add missing raise keyword in decode_data+SMTPUTF8 check. This is a relatively benign bug, since having both be true was correctly rejected at in SMTPServer even before this patch. Patch by Xiang Zhang. files: Lib/smtpd.py | 4 ++-- Lib/test/test_smtpd.py | 6 ++++++ Misc/NEWS | 3 +++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/Lib/smtpd.py b/Lib/smtpd.py --- a/Lib/smtpd.py +++ b/Lib/smtpd.py @@ -137,8 +137,8 @@ self.enable_SMTPUTF8 = enable_SMTPUTF8 if enable_SMTPUTF8: if decode_data: - ValueError("decode_data and enable_SMTPUTF8 cannot be set to" - " True at the same time") + raise ValueError("decode_data and enable_SMTPUTF8 cannot" + " be set to True at the same time") decode_data = False if decode_data is None: warn("The decode_data default of True will change to False in 3.6;" diff --git a/Lib/test/test_smtpd.py b/Lib/test/test_smtpd.py --- a/Lib/test/test_smtpd.py +++ b/Lib/test/test_smtpd.py @@ -313,6 +313,12 @@ DummyDispatcherBroken, BrokenDummyServer, (support.HOST, 0), ('b', 0), decode_data=True) + def test_decode_data_and_enable_SMTPUTF8_raises(self): + self.assertRaises( + ValueError, smtpd.SMTPChannel, + self.server, self.channel.conn, self.channel.addr, + enable_SMTPUTF8=True, decode_data=True) + def test_server_accept(self): self.server.handle_accept() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -37,6 +37,9 @@ Library ------- +- Issue #25328: smtpd's SMTPChannel now correctly raises a ValueError if both + decode_data and enable_SMTPUTF8 are set to true. + - Issue #25316: distutils raises OSError instead of DistutilsPlatformError when MSVC is not installed. -- Repository URL: https://hg.python.org/cpython From lp_benchmark_robot at intel.com Fri Oct 9 17:06:13 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Fri, 9 Oct 2015 16:06:13 +0100 Subject: [Python-checkins] Benchmark Results for Python 2.7 2015-10-09 Message-ID: Results for project python_2.7-nightly, build date 2015-10-09 06:09:53 commit: 4d1bd86d3bbde1c0038dca3d29f99a27768b9bcf revision date: 2015-10-09 01:14:15 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v2.7.10, with hash 15c95b7d81dcf821daade360741e00714667653f from 2015-05-23 16:02:14+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v2.7.10 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.20667% -2.21248% 3.96800% 11.32773% :-) pybench 0.20117% -0.04027% 6.77648% 6.74180% :-( regex_v8 1.18478% -0.16871% -2.38830% 8.72188% :-) nbody 0.20759% -0.03092% 6.69899% 5.82531% :-) json_dump_v2 0.19872% -0.40111% 2.94689% 13.71620% :-| normal_startup 1.72314% -0.17055% -1.83273% 2.88638% :-| ssbench 0.61870% 0.08683% 1.33914% 1.44073% ------------------------------------------------------------------------------------------ Note: Benchmark results for ssbench are measured in requests/second while all other are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From lp_benchmark_robot at intel.com Fri Oct 9 17:09:31 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Fri, 9 Oct 2015 16:09:31 +0100 Subject: [Python-checkins] Benchmark Results for Python Default 2015-10-09 Message-ID: <8d646d0b-814e-4bfb-90a5-c13eed07a9ea@irsmsx101.ger.corp.intel.com> Results for project python_default-nightly, build date 2015-10-08 03:02:03 commit: 3291e6132a674606af028be2d500701e5ff8285a revision date: 2015-10-07 11:15:15 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v3.4.3, with hash b4cbecbc0781e89a309d03b60a1f75f8499250e6 from 2015-02-25 12:15:33+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v3.4.3 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.46296% 1.14672% 8.55231% 14.86189% :-( pybench 0.17124% -0.04103% -2.09380% 8.78514% :-( regex_v8 2.65849% 0.00322% -4.47271% 4.96087% :-| nbody 0.06992% -0.35455% -0.20884% 9.39964% :-| json_dump_v2 0.34646% 0.42105% -0.27706% 10.07059% :-| normal_startup 0.66576% 0.53474% 0.25984% 5.40658% ------------------------------------------------------------------------------------------ Note: Benchmark results are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From python-checkins at python.org Fri Oct 9 19:12:09 2015 From: python-checkins at python.org (andrew.svetlov) Date: Fri, 09 Oct 2015 17:12:09 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_Fix_typo?= Message-ID: <20151009171056.55462.20851@psf.io> https://hg.python.org/peps/rev/7a3cf8cd02ab changeset: 6109:7a3cf8cd02ab user: Andrew Svetlov date: Fri Oct 09 20:10:51 2015 +0300 summary: Fix typo files: pep-0492.txt | 12 ++++++------ 1 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pep-0492.txt b/pep-0492.txt --- a/pep-0492.txt +++ b/pep-0492.txt @@ -125,7 +125,7 @@ * Internally, two new code object flags were introduced: - ``CO_COROUTINE`` is used to mark *native coroutines* - (defined with new syntax.) + (defined with new syntax). - ``CO_ITERABLE_COROUTINE`` is used to make *generator-based coroutines* compatible with *native coroutines* (set by @@ -139,7 +139,7 @@ such behavior requires a future import (see PEP 479). * When a *coroutine* is garbage collected, a ``RuntimeWarning`` is - raised if it was never awaited on (see also `Debugging Features`_.) + raised if it was never awaited on (see also `Debugging Features`_). * See also `Coroutine objects`_ section. @@ -199,7 +199,7 @@ internally, coroutines are a special kind of generators, every ``await`` is suspended by a ``yield`` somewhere down the chain of ``await`` calls (please refer to PEP 3156 for a detailed - explanation.) + explanation). To enable this behavior for coroutines, a new magic method called ``__await__`` is added. In asyncio, for instance, to enable *Future* @@ -222,7 +222,7 @@ It is a ``SyntaxError`` to use ``await`` outside of an ``async def`` function (like it is a ``SyntaxError`` to use ``yield`` outside of -``def`` function.) +``def`` function). It is a ``TypeError`` to pass anything other than an *awaitable* object to an ``await`` expression. @@ -918,7 +918,7 @@ ``async`` is mostly used by asyncio. We are addressing this by renaming ``async()`` function to ``ensure_future()`` (see `asyncio`_ -section for details.) +section for details). Another use of ``async`` keyword is in ``Lib/xml/dom/xmlbuilder.py``, to define an ``async = False`` attribute for ``DocumentLS`` class. @@ -970,7 +970,7 @@ 2. A new keyword ``cocall`` to call a *cofunction*. Can only be used inside a *cofunction*. Maps to ``await`` in this proposal (with - some differences, see below.) + some differences, see below). 3. It is not possible to call a *cofunction* without a ``cocall`` keyword. -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Fri Oct 9 23:04:44 2015 From: python-checkins at python.org (victor.stinner) Date: Fri, 09 Oct 2015 21:04:44 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325349=3A_Add_fast?= =?utf-8?b?IHBhdGggZm9yIGInJWMnICUgaW50?= Message-ID: <20151009210442.97700.76975@psf.io> https://hg.python.org/cpython/rev/4d46d1588629 changeset: 98623:4d46d1588629 user: Victor Stinner date: Fri Oct 09 22:50:36 2015 +0200 summary: Issue #25349: Add fast path for b'%c' % int Optimize also %% formater. files: Lib/test/test_format.py | 2 ++ Objects/bytesobject.c | 25 +++++++++++++++---------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/Lib/test/test_format.py b/Lib/test/test_format.py --- a/Lib/test/test_format.py +++ b/Lib/test/test_format.py @@ -300,6 +300,8 @@ testcommon(b"%c", 7, b"\x07") testcommon(b"%c", b"Z", b"Z") testcommon(b"%c", bytearray(b"Z"), b"Z") + testcommon(b"%5c", 65, b" A") + testcommon(b"%-5c", 65, b"A ") # %b will insert a series of bytes, either from a type that supports # the Py_buffer protocol, or something that has a __bytes__ method class FakeBytes(object): diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -808,9 +808,8 @@ fill = ' '; switch (c) { case '%': - pbuf = "%"; - len = 1; - break; + *res++ = '%'; + continue; case 'r': // %r is only for 2/3 code; 3 only code should use %a @@ -842,9 +841,9 @@ case 'x': case 'X': if (PyLong_CheckExact(v) - && width == -1 && prec == -1 - && !(flags & (F_SIGN | F_BLANK)) - && c != 'X') + && width == -1 && prec == -1 + && !(flags & (F_SIGN | F_BLANK)) + && c != 'X') { /* Fast path */ int alternate = flags & F_ALT; @@ -869,7 +868,7 @@ } /* Fast path */ - writer.min_size -= 2; /* size preallocated by "%d" */ + writer.min_size -= 2; /* size preallocated for "%d" */ res = _PyLong_FormatBytesWriter(&writer, res, v, base, alternate); if (res == NULL) @@ -898,7 +897,7 @@ && !(flags & (F_SIGN | F_BLANK))) { /* Fast path */ - writer.min_size -= 2; /* size preallocated by "%f" */ + writer.min_size -= 2; /* size preallocated for "%f" */ res = formatfloat(v, flags, prec, c, NULL, &writer, res); if (res == NULL) goto error; @@ -919,6 +918,11 @@ len = byte_converter(v, &onechar); if (!len) goto error; + if (width == -1) { + /* Fast path */ + *res++ = onechar; + continue; + } break; default: @@ -949,8 +953,9 @@ alloc = width; if (sign != 0 && len == width) alloc++; - if (alloc > 1) { - res = _PyBytesWriter_Prepare(&writer, res, alloc - 1); + /* 2: size preallocated for %s */ + if (alloc > 2) { + res = _PyBytesWriter_Prepare(&writer, res, alloc - 2); if (res == NULL) goto error; } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Oct 9 23:04:44 2015 From: python-checkins at python.org (victor.stinner) Date: Fri, 09 Oct 2015 21:04:44 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325349=3A_Optimize?= =?utf-8?q?_bytes_=25_int?= Message-ID: <20151009210442.97714.4362@psf.io> https://hg.python.org/cpython/rev/d9a89c9137d2 changeset: 98622:d9a89c9137d2 user: Victor Stinner date: Fri Oct 09 22:43:24 2015 +0200 summary: Issue #25349: Optimize bytes % int Optimize bytes.__mod__(args) for integere formats: %d (%i, %u), %o, %x and %X. _PyBytesWriter is now used to format directly the integer into the writer buffer, instead of using a temporary bytes object. Formatting is between 30% and 50% faster on a microbenchmark. files: Include/longobject.h | 7 + Objects/bytesobject.c | 36 +++++++++ Objects/longobject.c | 120 +++++++++++++++++++++++------ 3 files changed, 136 insertions(+), 27 deletions(-) diff --git a/Include/longobject.h b/Include/longobject.h --- a/Include/longobject.h +++ b/Include/longobject.h @@ -182,6 +182,13 @@ int base, int alternate); +PyAPI_FUNC(char*) _PyLong_FormatBytesWriter( + _PyBytesWriter *writer, + char *str, + PyObject *obj, + int base, + int alternate); + /* Format the object based on the format_spec, as defined in PEP 3101 (Advanced String Formatting). */ PyAPI_FUNC(int) _PyLong_FormatAdvancedWriter( diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -841,6 +841,42 @@ case 'o': case 'x': case 'X': + if (PyLong_CheckExact(v) + && width == -1 && prec == -1 + && !(flags & (F_SIGN | F_BLANK)) + && c != 'X') + { + /* Fast path */ + int alternate = flags & F_ALT; + int base; + + switch(c) + { + default: + assert(0 && "'type' not in [diuoxX]"); + case 'd': + case 'i': + case 'u': + base = 10; + break; + case 'o': + base = 8; + break; + case 'x': + case 'X': + base = 16; + break; + } + + /* Fast path */ + writer.min_size -= 2; /* size preallocated by "%d" */ + res = _PyLong_FormatBytesWriter(&writer, res, + v, base, alternate); + if (res == NULL) + goto error; + continue; + } + temp = formatlong(v, flags, prec, c); if (!temp) goto error; diff --git a/Objects/longobject.c b/Objects/longobject.c --- a/Objects/longobject.c +++ b/Objects/longobject.c @@ -1582,7 +1582,9 @@ static int long_to_decimal_string_internal(PyObject *aa, PyObject **p_output, - _PyUnicodeWriter *writer) + _PyUnicodeWriter *writer, + _PyBytesWriter *bytes_writer, + char **bytes_str) { PyLongObject *scratch, *a; PyObject *str; @@ -1664,6 +1666,13 @@ kind = writer->kind; str = NULL; } + else if (bytes_writer) { + *bytes_str = _PyBytesWriter_Prepare(bytes_writer, *bytes_str, strlen); + if (*bytes_str == NULL) { + Py_DECREF(scratch); + return -1; + } + } else { str = PyUnicode_New(strlen, '9'); if (str == NULL) { @@ -1673,13 +1682,8 @@ kind = PyUnicode_KIND(str); } -#define WRITE_DIGITS(TYPE) \ +#define WRITE_DIGITS(p) \ do { \ - if (writer) \ - p = (TYPE*)PyUnicode_DATA(writer->buffer) + writer->pos + strlen; \ - else \ - p = (TYPE*)PyUnicode_DATA(str) + strlen; \ - \ /* pout[0] through pout[size-2] contribute exactly \ _PyLong_DECIMAL_SHIFT digits each */ \ for (i=0; i < size - 1; i++) { \ @@ -1699,6 +1703,16 @@ /* and sign */ \ if (negative) \ *--p = '-'; \ + } while (0) + +#define WRITE_UNICODE_DIGITS(TYPE) \ + do { \ + if (writer) \ + p = (TYPE*)PyUnicode_DATA(writer->buffer) + writer->pos + strlen; \ + else \ + p = (TYPE*)PyUnicode_DATA(str) + strlen; \ + \ + WRITE_DIGITS(p); \ \ /* check we've counted correctly */ \ if (writer) \ @@ -1708,25 +1722,34 @@ } while (0) /* fill the string right-to-left */ - if (kind == PyUnicode_1BYTE_KIND) { + if (bytes_writer) { + char *p = *bytes_str + strlen; + WRITE_DIGITS(p); + assert(p == *bytes_str); + } + else if (kind == PyUnicode_1BYTE_KIND) { Py_UCS1 *p; - WRITE_DIGITS(Py_UCS1); + WRITE_UNICODE_DIGITS(Py_UCS1); } else if (kind == PyUnicode_2BYTE_KIND) { Py_UCS2 *p; - WRITE_DIGITS(Py_UCS2); + WRITE_UNICODE_DIGITS(Py_UCS2); } else { Py_UCS4 *p; assert (kind == PyUnicode_4BYTE_KIND); - WRITE_DIGITS(Py_UCS4); + WRITE_UNICODE_DIGITS(Py_UCS4); } #undef WRITE_DIGITS +#undef WRITE_UNICODE_DIGITS Py_DECREF(scratch); if (writer) { writer->pos += strlen; } + else if (bytes_writer) { + (*bytes_str) += strlen; + } else { assert(_PyUnicode_CheckConsistency(str, 1)); *p_output = (PyObject *)str; @@ -1738,7 +1761,7 @@ long_to_decimal_string(PyObject *aa) { PyObject *v; - if (long_to_decimal_string_internal(aa, &v, NULL) == -1) + if (long_to_decimal_string_internal(aa, &v, NULL, NULL, NULL) == -1) return NULL; return v; } @@ -1750,7 +1773,8 @@ static int long_format_binary(PyObject *aa, int base, int alternate, - PyObject **p_output, _PyUnicodeWriter *writer) + PyObject **p_output, _PyUnicodeWriter *writer, + _PyBytesWriter *bytes_writer, char **bytes_str) { PyLongObject *a = (PyLongObject *)aa; PyObject *v; @@ -1812,6 +1836,11 @@ kind = writer->kind; v = NULL; } + else if (writer) { + *bytes_str = _PyBytesWriter_Prepare(bytes_writer, *bytes_str, sz); + if (*bytes_str == NULL) + return -1; + } else { v = PyUnicode_New(sz, 'x'); if (v == NULL) @@ -1819,13 +1848,8 @@ kind = PyUnicode_KIND(v); } -#define WRITE_DIGITS(TYPE) \ +#define WRITE_DIGITS(p) \ do { \ - if (writer) \ - p = (TYPE*)PyUnicode_DATA(writer->buffer) + writer->pos + sz; \ - else \ - p = (TYPE*)PyUnicode_DATA(v) + sz; \ - \ if (size_a == 0) { \ *--p = '0'; \ } \ @@ -1860,30 +1884,50 @@ } \ if (negative) \ *--p = '-'; \ + } while (0) + +#define WRITE_UNICODE_DIGITS(TYPE) \ + do { \ + if (writer) \ + p = (TYPE*)PyUnicode_DATA(writer->buffer) + writer->pos + sz; \ + else \ + p = (TYPE*)PyUnicode_DATA(v) + sz; \ + \ + WRITE_DIGITS(p); \ + \ if (writer) \ assert(p == ((TYPE*)PyUnicode_DATA(writer->buffer) + writer->pos)); \ else \ assert(p == (TYPE*)PyUnicode_DATA(v)); \ } while (0) - if (kind == PyUnicode_1BYTE_KIND) { + if (bytes_writer) { + char *p = *bytes_str + sz; + WRITE_DIGITS(p); + assert(p == *bytes_str); + } + else if (kind == PyUnicode_1BYTE_KIND) { Py_UCS1 *p; - WRITE_DIGITS(Py_UCS1); + WRITE_UNICODE_DIGITS(Py_UCS1); } else if (kind == PyUnicode_2BYTE_KIND) { Py_UCS2 *p; - WRITE_DIGITS(Py_UCS2); + WRITE_UNICODE_DIGITS(Py_UCS2); } else { Py_UCS4 *p; assert (kind == PyUnicode_4BYTE_KIND); - WRITE_DIGITS(Py_UCS4); + WRITE_UNICODE_DIGITS(Py_UCS4); } #undef WRITE_DIGITS +#undef WRITE_UNICODE_DIGITS if (writer) { writer->pos += sz; } + else if (bytes_writer) { + (*bytes_str) += sz; + } else { assert(_PyUnicode_CheckConsistency(v, 1)); *p_output = v; @@ -1897,9 +1941,9 @@ PyObject *str; int err; if (base == 10) - err = long_to_decimal_string_internal(obj, &str, NULL); + err = long_to_decimal_string_internal(obj, &str, NULL, NULL, NULL); else - err = long_format_binary(obj, base, 1, &str, NULL); + err = long_format_binary(obj, base, 1, &str, NULL, NULL, NULL); if (err == -1) return NULL; return str; @@ -1911,9 +1955,31 @@ int base, int alternate) { if (base == 10) - return long_to_decimal_string_internal(obj, NULL, writer); + return long_to_decimal_string_internal(obj, NULL, writer, + NULL, NULL); else - return long_format_binary(obj, base, alternate, NULL, writer); + return long_format_binary(obj, base, alternate, NULL, writer, + NULL, NULL); +} + +char* +_PyLong_FormatBytesWriter(_PyBytesWriter *writer, char *str, + PyObject *obj, + int base, int alternate) +{ + char *str2; + int res; + str2 = str; + if (base == 10) + res = long_to_decimal_string_internal(obj, NULL, NULL, + writer, &str2); + else + res = long_format_binary(obj, base, alternate, NULL, NULL, + writer, &str2); + if (res < 0) + return NULL; + assert(str2 != NULL); + return str2; } /* Table of digit values for 8-bit string -> integer conversion. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 00:10:16 2015 From: python-checkins at python.org (brett.cannon) Date: Fri, 09 Oct 2015 22:10:16 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1MDk5?= =?utf-8?q?=3A_Skip_relevant_tests_in_test=5Fcompileall_when_an_entry_on?= Message-ID: <20151009221015.128838.33341@psf.io> https://hg.python.org/cpython/rev/34bbd537b3e6 changeset: 98624:34bbd537b3e6 branch: 3.5 parent: 98620:d471cf4a73b2 user: Brett Cannon date: Fri Oct 09 15:09:43 2015 -0700 summary: Issue #25099: Skip relevant tests in test_compileall when an entry on sys.path has an unwritable __pycache__ directory. This typically comes up when someone runs the test suite from an administrative install of Python on Windows where the user does not have write permissions to the stdlib's directory. Thanks to Zachary Ware and Matthias Klose for reporting bugs related to this issue. files: Lib/test/test_compileall.py | 36 ++++++++++++++++++++++-- Misc/NEWS | 3 ++ 2 files changed, 35 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_compileall.py b/Lib/test/test_compileall.py --- a/Lib/test/test_compileall.py +++ b/Lib/test/test_compileall.py @@ -2,6 +2,7 @@ import compileall import importlib.util import os +import pathlib import py_compile import shutil import struct @@ -168,6 +169,33 @@ class CommandLineTests(unittest.TestCase): """Test compileall's CLI.""" + @classmethod + def setUpClass(cls): + for path in filter(os.path.isdir, sys.path): + directory_created = False + directory = pathlib.Path(path) / '__pycache__' + path = directory / 'test.try' + try: + if not directory.is_dir(): + directory.mkdir() + directory_created = True + with path.open('w') as file: + file.write('# for test_compileall') + except OSError: + sys_path_writable = False + break + finally: + support.unlink(str(path)) + if directory_created: + directory.rmdir() + else: + sys_path_writable = True + cls._sys_path_writable = sys_path_writable + + def _skip_if_sys_path_not_writable(self): + if not self._sys_path_writable: + raise unittest.SkipTest('not all entries on sys.path are writable') + def _get_run_args(self, args): interp_args = ['-S'] if sys.flags.optimize: @@ -194,8 +222,8 @@ self.assertFalse(os.path.exists(path)) def setUp(self): - self.addCleanup(self._cleanup) self.directory = tempfile.mkdtemp() + self.addCleanup(support.rmtree, self.directory) self.pkgdir = os.path.join(self.directory, 'foo') os.mkdir(self.pkgdir) self.pkgdir_cachedir = os.path.join(self.pkgdir, '__pycache__') @@ -203,11 +231,9 @@ self.initfn = script_helper.make_script(self.pkgdir, '__init__', '') self.barfn = script_helper.make_script(self.pkgdir, 'bar', '') - def _cleanup(self): - support.rmtree(self.directory) - def test_no_args_compiles_path(self): # Note that -l is implied for the no args case. + self._skip_if_sys_path_not_writable() bazfn = script_helper.make_script(self.directory, 'baz', '') self.assertRunOK(PYTHONPATH=self.directory) self.assertCompiled(bazfn) @@ -215,6 +241,7 @@ self.assertNotCompiled(self.barfn) def test_no_args_respects_force_flag(self): + self._skip_if_sys_path_not_writable() bazfn = script_helper.make_script(self.directory, 'baz', '') self.assertRunOK(PYTHONPATH=self.directory) pycpath = importlib.util.cache_from_source(bazfn) @@ -231,6 +258,7 @@ self.assertNotEqual(mtime, mtime2) def test_no_args_respects_quiet_flag(self): + self._skip_if_sys_path_not_writable() script_helper.make_script(self.directory, 'baz', '') noisy = self.assertRunOK(PYTHONPATH=self.directory) self.assertIn(b'Listing ', noisy) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -281,6 +281,9 @@ Tests ----- +- Issue #25099: Make test_compileall not fail when a entry on sys.path cannot + be written to (commonly seen in administrative installs on Windows). + - Issue #23919: Prevents assert dialogs appearing in the test suite. - PCbuild\rt.bat now accepts an unlimited number of arguments to pass along -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 00:10:16 2015 From: python-checkins at python.org (brett.cannon) Date: Fri, 09 Oct 2015 22:10:16 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_for_issue_=2325099?= Message-ID: <20151009221015.20753.43330@psf.io> https://hg.python.org/cpython/rev/21f3a92e0c6d changeset: 98625:21f3a92e0c6d parent: 98623:4d46d1588629 parent: 98624:34bbd537b3e6 user: Brett Cannon date: Fri Oct 09 15:10:10 2015 -0700 summary: Merge for issue #25099 files: Lib/test/test_compileall.py | 36 ++++++++++++++++++++++-- Misc/NEWS | 3 ++ 2 files changed, 35 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_compileall.py b/Lib/test/test_compileall.py --- a/Lib/test/test_compileall.py +++ b/Lib/test/test_compileall.py @@ -2,6 +2,7 @@ import compileall import importlib.util import os +import pathlib import py_compile import shutil import struct @@ -168,6 +169,33 @@ class CommandLineTests(unittest.TestCase): """Test compileall's CLI.""" + @classmethod + def setUpClass(cls): + for path in filter(os.path.isdir, sys.path): + directory_created = False + directory = pathlib.Path(path) / '__pycache__' + path = directory / 'test.try' + try: + if not directory.is_dir(): + directory.mkdir() + directory_created = True + with path.open('w') as file: + file.write('# for test_compileall') + except OSError: + sys_path_writable = False + break + finally: + support.unlink(str(path)) + if directory_created: + directory.rmdir() + else: + sys_path_writable = True + cls._sys_path_writable = sys_path_writable + + def _skip_if_sys_path_not_writable(self): + if not self._sys_path_writable: + raise unittest.SkipTest('not all entries on sys.path are writable') + def _get_run_args(self, args): interp_args = ['-S'] if sys.flags.optimize: @@ -194,8 +222,8 @@ self.assertFalse(os.path.exists(path)) def setUp(self): - self.addCleanup(self._cleanup) self.directory = tempfile.mkdtemp() + self.addCleanup(support.rmtree, self.directory) self.pkgdir = os.path.join(self.directory, 'foo') os.mkdir(self.pkgdir) self.pkgdir_cachedir = os.path.join(self.pkgdir, '__pycache__') @@ -203,11 +231,9 @@ self.initfn = script_helper.make_script(self.pkgdir, '__init__', '') self.barfn = script_helper.make_script(self.pkgdir, 'bar', '') - def _cleanup(self): - support.rmtree(self.directory) - def test_no_args_compiles_path(self): # Note that -l is implied for the no args case. + self._skip_if_sys_path_not_writable() bazfn = script_helper.make_script(self.directory, 'baz', '') self.assertRunOK(PYTHONPATH=self.directory) self.assertCompiled(bazfn) @@ -215,6 +241,7 @@ self.assertNotCompiled(self.barfn) def test_no_args_respects_force_flag(self): + self._skip_if_sys_path_not_writable() bazfn = script_helper.make_script(self.directory, 'baz', '') self.assertRunOK(PYTHONPATH=self.directory) pycpath = importlib.util.cache_from_source(bazfn) @@ -231,6 +258,7 @@ self.assertNotEqual(mtime, mtime2) def test_no_args_respects_quiet_flag(self): + self._skip_if_sys_path_not_writable() script_helper.make_script(self.directory, 'baz', '') noisy = self.assertRunOK(PYTHONPATH=self.directory) self.assertIn(b'Listing ', noisy) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -422,6 +422,9 @@ Tests ----- +- Issue #25099: Make test_compileall not fail when a entry on sys.path cannot + be written to (commonly seen in administrative installs on Windows). + - Issue #23919: Prevents assert dialogs appearing in the test suite. - PCbuild\rt.bat now accepts an unlimited number of arguments to pass along -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 01:54:33 2015 From: python-checkins at python.org (vinay.sajip) Date: Fri, 09 Oct 2015 23:54:33 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Closes_=2325344=3A_Merged_fix_from_3=2E4=2E?= Message-ID: <20151009235433.2683.30025@psf.io> https://hg.python.org/cpython/rev/be13ea160b1a changeset: 98628:be13ea160b1a branch: 3.5 parent: 98624:34bbd537b3e6 parent: 98627:7cc3a8141022 user: Vinay Sajip date: Sat Oct 10 00:53:37 2015 +0100 summary: Closes #25344: Merged fix from 3.4. files: Doc/howto/logging-cookbook.rst | 137 +++++++++++++++++++++ 1 files changed, 137 insertions(+), 0 deletions(-) diff --git a/Doc/howto/logging-cookbook.rst b/Doc/howto/logging-cookbook.rst --- a/Doc/howto/logging-cookbook.rst +++ b/Doc/howto/logging-cookbook.rst @@ -2151,3 +2151,140 @@ other systems altogether which can process messages via external programs run from a command line. + +.. _buffered-logging: + +Buffering logging messages and outputting them conditionally +------------------------------------------------------------ + +There might be situations where you want to log messages in a temporary area +and only output them if a certain condition occurs. For example, you may want to +start logging debug events in a function, and if the function completes without +errors, you don't want to clutter the log with the collected debug information, +but if there is an error, you want all the debug information to be output as well +as the error. + +Here is an example which shows how you could do this using a decorator for your +functions where you want logging to behave this way. It makes use of the +:class:`logging.handlers.MemoryHandler`, which allows buffering of logged events +until some condition occurs, at which point the buffered events are ``flushed`` +- passed to another handler (the ``target`` handler) for processing. By default, +the ``MemoryHandler`` flushed when its buffer gets filled up or an event whose +level is greater than or equal to a specified threshold is seen. You can use this +recipe with a more specialised subclass of ``MemoryHandler`` if you want custom +flushing behavior. + +The example script has a simple function, ``foo``, which just cycles through +all the logging levels, writing to ``sys.stderr`` to say what level it's about +to log at, and then actually logging a message that that level. You can pass a +parameter to ``foo`` which, if true, will log at ERROR and CRITICAL levels - +otherwise, it only logs at DEBUG, INFO and WARNING levels. + +The script just arranges to decorate ``foo`` with a decorator which will do the +conditional logging that's required. The decorator takes a logger as a parameter +and attaches a memory handler for the duration of the call to the decorated +function. The decorator can be additionally parameterised using a target handler, +a level at which flushing should occur, and a capacity for the buffer. These +default to a :class:`~logging.StreamHandler` which writes to ``sys.stderr``, +``logging.ERROR`` and ``100`` respectively. + +Here's the script:: + + import logging + from logging.handlers import MemoryHandler + import sys + + logger = logging.getLogger(__name__) + logger.addHandler(logging.NullHandler()) + + def log_if_errors(logger, target_handler=None, flush_level=None, capacity=None): + if target_handler is None: + target_handler = logging.StreamHandler() + if flush_level is None: + flush_level = logging.ERROR + if capacity is None: + capacity = 100 + handler = MemoryHandler(capacity, flushLevel=flush_level, target=target_handler) + + def decorator(fn): + def wrapper(*args, **kwargs): + logger.addHandler(handler) + try: + return fn(*args, **kwargs) + except Exception: + logger.exception('call failed') + raise + finally: + super(MemoryHandler, handler).flush() + logger.removeHandler(handler) + return wrapper + + return decorator + + def write_line(s): + sys.stderr.write('%s\n' % s) + + def foo(fail=False): + write_line('about to log at DEBUG ...') + logger.debug('Actually logged at DEBUG') + write_line('about to log at INFO ...') + logger.info('Actually logged at INFO') + write_line('about to log at WARNING ...') + logger.warning('Actually logged at WARNING') + if fail: + write_line('about to log at ERROR ...') + logger.error('Actually logged at ERROR') + write_line('about to log at CRITICAL ...') + logger.critical('Actually logged at CRITICAL') + return fail + + decorated_foo = log_if_errors(logger)(foo) + + if __name__ == '__main__': + logger.setLevel(logging.DEBUG) + write_line('Calling undecorated foo with False') + assert not foo(False) + write_line('Calling undecorated foo with True') + assert foo(True) + write_line('Calling decorated foo with False') + assert not decorated_foo(False) + write_line('Calling decorated foo with True') + assert decorated_foo(True) + +When this script is run, the following output should be observed:: + + Calling undecorated foo with False + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + Calling undecorated foo with True + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + about to log at ERROR ... + about to log at CRITICAL ... + Calling decorated foo with False + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + Calling decorated foo with True + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + about to log at ERROR ... + Actually logged at DEBUG + Actually logged at INFO + Actually logged at WARNING + Actually logged at ERROR + about to log at CRITICAL ... + Actually logged at CRITICAL + +As you can see, actual logging output only occurs when an event is logged whose +severity is ERROR or greater, but in that case, any previous events at lower +severities are also logged. + +You can of course use the conventional means of decoration:: + + @log_if_errors(logger) + def foo(fail=False): + ... -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 01:54:32 2015 From: python-checkins at python.org (vinay.sajip) Date: Fri, 09 Oct 2015 23:54:32 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogQ2xvc2VzICMyNTM0?= =?utf-8?q?4=3A_Added_cookbook_recipe_to_show_buffering_of_logging_events?= =?utf-8?q?=2E?= Message-ID: <20151009235432.3279.38938@psf.io> https://hg.python.org/cpython/rev/3ad8a2d34d01 changeset: 98626:3ad8a2d34d01 branch: 2.7 parent: 98608:4d1bd86d3bbd user: Vinay Sajip date: Sat Oct 10 00:49:10 2015 +0100 summary: Closes #25344: Added cookbook recipe to show buffering of logging events. files: Doc/howto/logging-cookbook.rst | 136 +++++++++++++++++++++ 1 files changed, 136 insertions(+), 0 deletions(-) diff --git a/Doc/howto/logging-cookbook.rst b/Doc/howto/logging-cookbook.rst --- a/Doc/howto/logging-cookbook.rst +++ b/Doc/howto/logging-cookbook.rst @@ -1168,3 +1168,139 @@ other systems altogether which can process messages via external programs run from a command line. +.. _buffered-logging: + +Buffering logging messages and outputting them conditionally +------------------------------------------------------------ + +There might be situations where you want to log messages in a temporary area +and only output them if a certain condition occurs. For example, you may want to +start logging debug events in a function, and if the function completes without +errors, you don't want to clutter the log with the collected debug information, +but if there is an error, you want all the debug information to be output as well +as the error. + +Here is an example which shows how you could do this using a decorator for your +functions where you want logging to behave this way. It makes use of the +:class:`logging.handlers.MemoryHandler`, which allows buffering of logged events +until some condition occurs, at which point the buffered events are ``flushed`` +- passed to another handler (the ``target`` handler) for processing. By default, +the ``MemoryHandler`` flushed when its buffer gets filled up or an event whose +level is greater than or equal to a specified threshold is seen. You can use this +recipe with a more specialised subclass of ``MemoryHandler`` if you want custom +flushing behavior. + +The example script has a simple function, ``foo``, which just cycles through +all the logging levels, writing to ``sys.stderr`` to say what level it's about +to log at, and then actually logging a message that that level. You can pass a +parameter to ``foo`` which, if true, will log at ERROR and CRITICAL levels - +otherwise, it only logs at DEBUG, INFO and WARNING levels. + +The script just arranges to decorate ``foo`` with a decorator which will do the +conditional logging that's required. The decorator takes a logger as a parameter +and attaches a memory handler for the duration of the call to the decorated +function. The decorator can be additionally parameterised using a target handler, +a level at which flushing should occur, and a capacity for the buffer. These +default to a :class:`~logging.StreamHandler` which writes to ``sys.stderr``, +``logging.ERROR`` and ``100`` respectively. + +Here's the script:: + + import logging + from logging.handlers import MemoryHandler + import sys + + logger = logging.getLogger(__name__) + logger.addHandler(logging.NullHandler()) + + def log_if_errors(logger, target_handler=None, flush_level=None, capacity=None): + if target_handler is None: + target_handler = logging.StreamHandler() + if flush_level is None: + flush_level = logging.ERROR + if capacity is None: + capacity = 100 + handler = MemoryHandler(capacity, flushLevel=flush_level, target=target_handler) + + def decorator(fn): + def wrapper(*args, **kwargs): + logger.addHandler(handler) + try: + return fn(*args, **kwargs) + except Exception: + logger.exception('call failed') + raise + finally: + super(MemoryHandler, handler).flush() + logger.removeHandler(handler) + return wrapper + + return decorator + + def write_line(s): + sys.stderr.write('%s\n' % s) + + def foo(fail=False): + write_line('about to log at DEBUG ...') + logger.debug('Actually logged at DEBUG') + write_line('about to log at INFO ...') + logger.info('Actually logged at INFO') + write_line('about to log at WARNING ...') + logger.warning('Actually logged at WARNING') + if fail: + write_line('about to log at ERROR ...') + logger.error('Actually logged at ERROR') + write_line('about to log at CRITICAL ...') + logger.critical('Actually logged at CRITICAL') + return fail + + decorated_foo = log_if_errors(logger)(foo) + + if __name__ == '__main__': + logger.setLevel(logging.DEBUG) + write_line('Calling undecorated foo with False') + assert not foo(False) + write_line('Calling undecorated foo with True') + assert foo(True) + write_line('Calling decorated foo with False') + assert not decorated_foo(False) + write_line('Calling decorated foo with True') + assert decorated_foo(True) + +When this script is run, the following output should be observed:: + + Calling undecorated foo with False + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + Calling undecorated foo with True + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + about to log at ERROR ... + about to log at CRITICAL ... + Calling decorated foo with False + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + Calling decorated foo with True + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + about to log at ERROR ... + Actually logged at DEBUG + Actually logged at INFO + Actually logged at WARNING + Actually logged at ERROR + about to log at CRITICAL ... + Actually logged at CRITICAL + +As you can see, actual logging output only occurs when an event is logged whose +severity is ERROR or greater, but in that case, any previous events at lower +severities are also logged. + +You can of course use the conventional means of decoration:: + + @log_if_errors(logger) + def foo(fail=False): + ... -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 01:54:33 2015 From: python-checkins at python.org (vinay.sajip) Date: Fri, 09 Oct 2015 23:54:33 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Closes_=2325344=3A_Merged_fix_from_3=2E5=2E?= Message-ID: <20151009235433.128828.24073@psf.io> https://hg.python.org/cpython/rev/6c183537b2fb changeset: 98629:6c183537b2fb parent: 98625:21f3a92e0c6d parent: 98628:be13ea160b1a user: Vinay Sajip date: Sat Oct 10 00:54:18 2015 +0100 summary: Closes #25344: Merged fix from 3.5. files: Doc/howto/logging-cookbook.rst | 137 +++++++++++++++++++++ 1 files changed, 137 insertions(+), 0 deletions(-) diff --git a/Doc/howto/logging-cookbook.rst b/Doc/howto/logging-cookbook.rst --- a/Doc/howto/logging-cookbook.rst +++ b/Doc/howto/logging-cookbook.rst @@ -2151,3 +2151,140 @@ other systems altogether which can process messages via external programs run from a command line. + +.. _buffered-logging: + +Buffering logging messages and outputting them conditionally +------------------------------------------------------------ + +There might be situations where you want to log messages in a temporary area +and only output them if a certain condition occurs. For example, you may want to +start logging debug events in a function, and if the function completes without +errors, you don't want to clutter the log with the collected debug information, +but if there is an error, you want all the debug information to be output as well +as the error. + +Here is an example which shows how you could do this using a decorator for your +functions where you want logging to behave this way. It makes use of the +:class:`logging.handlers.MemoryHandler`, which allows buffering of logged events +until some condition occurs, at which point the buffered events are ``flushed`` +- passed to another handler (the ``target`` handler) for processing. By default, +the ``MemoryHandler`` flushed when its buffer gets filled up or an event whose +level is greater than or equal to a specified threshold is seen. You can use this +recipe with a more specialised subclass of ``MemoryHandler`` if you want custom +flushing behavior. + +The example script has a simple function, ``foo``, which just cycles through +all the logging levels, writing to ``sys.stderr`` to say what level it's about +to log at, and then actually logging a message that that level. You can pass a +parameter to ``foo`` which, if true, will log at ERROR and CRITICAL levels - +otherwise, it only logs at DEBUG, INFO and WARNING levels. + +The script just arranges to decorate ``foo`` with a decorator which will do the +conditional logging that's required. The decorator takes a logger as a parameter +and attaches a memory handler for the duration of the call to the decorated +function. The decorator can be additionally parameterised using a target handler, +a level at which flushing should occur, and a capacity for the buffer. These +default to a :class:`~logging.StreamHandler` which writes to ``sys.stderr``, +``logging.ERROR`` and ``100`` respectively. + +Here's the script:: + + import logging + from logging.handlers import MemoryHandler + import sys + + logger = logging.getLogger(__name__) + logger.addHandler(logging.NullHandler()) + + def log_if_errors(logger, target_handler=None, flush_level=None, capacity=None): + if target_handler is None: + target_handler = logging.StreamHandler() + if flush_level is None: + flush_level = logging.ERROR + if capacity is None: + capacity = 100 + handler = MemoryHandler(capacity, flushLevel=flush_level, target=target_handler) + + def decorator(fn): + def wrapper(*args, **kwargs): + logger.addHandler(handler) + try: + return fn(*args, **kwargs) + except Exception: + logger.exception('call failed') + raise + finally: + super(MemoryHandler, handler).flush() + logger.removeHandler(handler) + return wrapper + + return decorator + + def write_line(s): + sys.stderr.write('%s\n' % s) + + def foo(fail=False): + write_line('about to log at DEBUG ...') + logger.debug('Actually logged at DEBUG') + write_line('about to log at INFO ...') + logger.info('Actually logged at INFO') + write_line('about to log at WARNING ...') + logger.warning('Actually logged at WARNING') + if fail: + write_line('about to log at ERROR ...') + logger.error('Actually logged at ERROR') + write_line('about to log at CRITICAL ...') + logger.critical('Actually logged at CRITICAL') + return fail + + decorated_foo = log_if_errors(logger)(foo) + + if __name__ == '__main__': + logger.setLevel(logging.DEBUG) + write_line('Calling undecorated foo with False') + assert not foo(False) + write_line('Calling undecorated foo with True') + assert foo(True) + write_line('Calling decorated foo with False') + assert not decorated_foo(False) + write_line('Calling decorated foo with True') + assert decorated_foo(True) + +When this script is run, the following output should be observed:: + + Calling undecorated foo with False + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + Calling undecorated foo with True + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + about to log at ERROR ... + about to log at CRITICAL ... + Calling decorated foo with False + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + Calling decorated foo with True + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + about to log at ERROR ... + Actually logged at DEBUG + Actually logged at INFO + Actually logged at WARNING + Actually logged at ERROR + about to log at CRITICAL ... + Actually logged at CRITICAL + +As you can see, actual logging output only occurs when an event is logged whose +severity is ERROR or greater, but in that case, any previous events at lower +severities are also logged. + +You can of course use the conventional means of decoration:: + + @log_if_errors(logger) + def foo(fail=False): + ... -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 01:54:34 2015 From: python-checkins at python.org (vinay.sajip) Date: Fri, 09 Oct 2015 23:54:34 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogQ2xvc2VzICMyNTM0?= =?utf-8?q?4=3A_Added_cookbook_recipe_to_show_buffering_of_logging_events?= =?utf-8?q?=2E?= Message-ID: <20151009235432.3281.15431@psf.io> https://hg.python.org/cpython/rev/7cc3a8141022 changeset: 98627:7cc3a8141022 branch: 3.4 parent: 98592:f4606117d571 user: Vinay Sajip date: Sat Oct 10 00:52:35 2015 +0100 summary: Closes #25344: Added cookbook recipe to show buffering of logging events. files: Doc/howto/logging-cookbook.rst | 137 +++++++++++++++++++++ 1 files changed, 137 insertions(+), 0 deletions(-) diff --git a/Doc/howto/logging-cookbook.rst b/Doc/howto/logging-cookbook.rst --- a/Doc/howto/logging-cookbook.rst +++ b/Doc/howto/logging-cookbook.rst @@ -2142,3 +2142,140 @@ other systems altogether which can process messages via external programs run from a command line. + +.. _buffered-logging: + +Buffering logging messages and outputting them conditionally +------------------------------------------------------------ + +There might be situations where you want to log messages in a temporary area +and only output them if a certain condition occurs. For example, you may want to +start logging debug events in a function, and if the function completes without +errors, you don't want to clutter the log with the collected debug information, +but if there is an error, you want all the debug information to be output as well +as the error. + +Here is an example which shows how you could do this using a decorator for your +functions where you want logging to behave this way. It makes use of the +:class:`logging.handlers.MemoryHandler`, which allows buffering of logged events +until some condition occurs, at which point the buffered events are ``flushed`` +- passed to another handler (the ``target`` handler) for processing. By default, +the ``MemoryHandler`` flushed when its buffer gets filled up or an event whose +level is greater than or equal to a specified threshold is seen. You can use this +recipe with a more specialised subclass of ``MemoryHandler`` if you want custom +flushing behavior. + +The example script has a simple function, ``foo``, which just cycles through +all the logging levels, writing to ``sys.stderr`` to say what level it's about +to log at, and then actually logging a message that that level. You can pass a +parameter to ``foo`` which, if true, will log at ERROR and CRITICAL levels - +otherwise, it only logs at DEBUG, INFO and WARNING levels. + +The script just arranges to decorate ``foo`` with a decorator which will do the +conditional logging that's required. The decorator takes a logger as a parameter +and attaches a memory handler for the duration of the call to the decorated +function. The decorator can be additionally parameterised using a target handler, +a level at which flushing should occur, and a capacity for the buffer. These +default to a :class:`~logging.StreamHandler` which writes to ``sys.stderr``, +``logging.ERROR`` and ``100`` respectively. + +Here's the script:: + + import logging + from logging.handlers import MemoryHandler + import sys + + logger = logging.getLogger(__name__) + logger.addHandler(logging.NullHandler()) + + def log_if_errors(logger, target_handler=None, flush_level=None, capacity=None): + if target_handler is None: + target_handler = logging.StreamHandler() + if flush_level is None: + flush_level = logging.ERROR + if capacity is None: + capacity = 100 + handler = MemoryHandler(capacity, flushLevel=flush_level, target=target_handler) + + def decorator(fn): + def wrapper(*args, **kwargs): + logger.addHandler(handler) + try: + return fn(*args, **kwargs) + except Exception: + logger.exception('call failed') + raise + finally: + super(MemoryHandler, handler).flush() + logger.removeHandler(handler) + return wrapper + + return decorator + + def write_line(s): + sys.stderr.write('%s\n' % s) + + def foo(fail=False): + write_line('about to log at DEBUG ...') + logger.debug('Actually logged at DEBUG') + write_line('about to log at INFO ...') + logger.info('Actually logged at INFO') + write_line('about to log at WARNING ...') + logger.warning('Actually logged at WARNING') + if fail: + write_line('about to log at ERROR ...') + logger.error('Actually logged at ERROR') + write_line('about to log at CRITICAL ...') + logger.critical('Actually logged at CRITICAL') + return fail + + decorated_foo = log_if_errors(logger)(foo) + + if __name__ == '__main__': + logger.setLevel(logging.DEBUG) + write_line('Calling undecorated foo with False') + assert not foo(False) + write_line('Calling undecorated foo with True') + assert foo(True) + write_line('Calling decorated foo with False') + assert not decorated_foo(False) + write_line('Calling decorated foo with True') + assert decorated_foo(True) + +When this script is run, the following output should be observed:: + + Calling undecorated foo with False + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + Calling undecorated foo with True + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + about to log at ERROR ... + about to log at CRITICAL ... + Calling decorated foo with False + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + Calling decorated foo with True + about to log at DEBUG ... + about to log at INFO ... + about to log at WARNING ... + about to log at ERROR ... + Actually logged at DEBUG + Actually logged at INFO + Actually logged at WARNING + Actually logged at ERROR + about to log at CRITICAL ... + Actually logged at CRITICAL + +As you can see, actual logging output only occurs when an event is logged whose +severity is ERROR or greater, but in that case, any previous events at lower +severities are also logged. + +You can of course use the conventional means of decoration:: + + @log_if_errors(logger) + def foo(fail=False): + ... -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 02:40:20 2015 From: python-checkins at python.org (nick.coghlan) Date: Sat, 10 Oct 2015 00:40:20 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_PEP_476=3A_Link_to_Guido=27s_?= =?utf-8?q?approval_email?= Message-ID: <20151010004020.97722.38476@psf.io> https://hg.python.org/peps/rev/456850854d7a changeset: 6110:456850854d7a user: Nick Coghlan date: Sat Oct 10 10:40:07 2015 +1000 summary: PEP 476: Link to Guido's approval email files: pep-0476.txt | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/pep-0476.txt b/pep-0476.txt --- a/pep-0476.txt +++ b/pep-0476.txt @@ -7,6 +7,7 @@ Type: Standards Track Content-Type: text/x-rst Created: 28-August-2014 +Resolution: https://mail.python.org/pipermail/python-dev/2014-October/136676.html Abstract ======== -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Sat Oct 10 02:49:40 2015 From: python-checkins at python.org (nick.coghlan) Date: Sat, 10 Oct 2015 00:49:40 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_Withdraw_the_Kallithea_PEPs?= Message-ID: <20151010004940.7244.61057@psf.io> https://hg.python.org/peps/rev/d97c8cf0eafe changeset: 6111:d97c8cf0eafe user: Nick Coghlan date: Sat Oct 10 10:49:30 2015 +1000 summary: Withdraw the Kallithea PEPs files: pep-0462.txt | 14 +++++++++----- pep-0474.txt | 13 ++++++++++++- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/pep-0462.txt b/pep-0462.txt --- a/pep-0462.txt +++ b/pep-0462.txt @@ -3,7 +3,7 @@ Version: $Revision$ Last-Modified: $Date$ Author: Nick Coghlan -Status: Deferred +Status: Withdrawn Type: Process Content-Type: text/x-rst Requires: 474 @@ -23,11 +23,15 @@ their changes incorporated. -PEP Deferral -============ +PEP Withdrawal +============== -This PEP is currently deferred pending acceptance or rejection of the -Kallithea-based forge.python.org proposal in PEP 474. +This PEP has been `withdrawn by the author +`_ +in favour of the GitLab based proposal in PEP 507. + +If anyone else would like to take over championing this PEP, contact the +`core-workflow mailing list `_ Rationale for changes to the core development workflow diff --git a/pep-0474.txt b/pep-0474.txt --- a/pep-0474.txt +++ b/pep-0474.txt @@ -3,7 +3,7 @@ Version: $Revision$ Last-Modified: $Date$ Author: Nick Coghlan -Status: Draft +Status: Withdrawn Type: Process Content-Type: text/x-rst Created: 19-Jul-2014 @@ -23,6 +23,17 @@ for CPython itself (see PEP 462 in relation to that). +PEP Withdrawal +============== + +This PEP has been `withdrawn by the author +`_ +in favour of the GitLab based proposal in PEP 507. + +If anyone else would like to take over championing this PEP, contact the +`core-workflow mailing list `_ + + Proposal ======== -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Sat Oct 10 04:10:34 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 02:10:34 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI0NDAy?= =?utf-8?q?=3A_Fix_input=28=29_when_stdout=2Efileno=28=29_fails=3B_diagnos?= =?utf-8?q?ed_by_Eryksun?= Message-ID: <20151010021033.55484.63490@psf.io> https://hg.python.org/cpython/rev/425d81d10b13 changeset: 98630:425d81d10b13 branch: 3.4 parent: 98627:7cc3a8141022 user: Martin Panter date: Sat Oct 10 01:25:38 2015 +0000 summary: Issue #24402: Fix input() when stdout.fileno() fails; diagnosed by Eryksun Also factored out some test cases into a new PtyTests class. files: Lib/test/test_builtin.py | 186 +++++++++++++++----------- Misc/NEWS | 3 + Python/bltinmodule.c | 4 +- 3 files changed, 116 insertions(+), 77 deletions(-) diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py --- a/Lib/test/test_builtin.py +++ b/Lib/test/test_builtin.py @@ -1134,82 +1134,6 @@ sys.stdout = savestdout fp.close() - @unittest.skipUnless(pty, "the pty and signal modules must be available") - def check_input_tty(self, prompt, terminal_input, stdio_encoding=None): - if not sys.stdin.isatty() or not sys.stdout.isatty(): - self.skipTest("stdin and stdout must be ttys") - r, w = os.pipe() - try: - pid, fd = pty.fork() - except (OSError, AttributeError) as e: - os.close(r) - os.close(w) - self.skipTest("pty.fork() raised {}".format(e)) - if pid == 0: - # Child - try: - # Make sure we don't get stuck if there's a problem - signal.alarm(2) - os.close(r) - # Check the error handlers are accounted for - if stdio_encoding: - sys.stdin = io.TextIOWrapper(sys.stdin.detach(), - encoding=stdio_encoding, - errors='surrogateescape') - sys.stdout = io.TextIOWrapper(sys.stdout.detach(), - encoding=stdio_encoding, - errors='replace') - with open(w, "w") as wpipe: - print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe) - print(ascii(input(prompt)), file=wpipe) - except: - traceback.print_exc() - finally: - # We don't want to return to unittest... - os._exit(0) - # Parent - os.close(w) - os.write(fd, terminal_input + b"\r\n") - # Get results from the pipe - with open(r, "r") as rpipe: - lines = [] - while True: - line = rpipe.readline().strip() - if line == "": - # The other end was closed => the child exited - break - lines.append(line) - # Check the result was got and corresponds to the user's terminal input - if len(lines) != 2: - # Something went wrong, try to get at stderr - with open(fd, "r", encoding="ascii", errors="ignore") as child_output: - self.fail("got %d lines in pipe but expected 2, child output was:\n%s" - % (len(lines), child_output.read())) - os.close(fd) - # Check we did exercise the GNU readline path - self.assertIn(lines[0], {'tty = True', 'tty = False'}) - if lines[0] != 'tty = True': - self.skipTest("standard IO in should have been a tty") - input_result = eval(lines[1]) # ascii() -> eval() roundtrip - if stdio_encoding: - expected = terminal_input.decode(stdio_encoding, 'surrogateescape') - else: - expected = terminal_input.decode(sys.stdin.encoding) # what else? - self.assertEqual(input_result, expected) - - def test_input_tty(self): - # Test input() functionality when wired to a tty (the code path - # is different and invokes GNU readline if available). - self.check_input_tty("prompt", b"quux") - - def test_input_tty_non_ascii(self): - # Check stdin/stdout encoding is used when invoking GNU readline - self.check_input_tty("prompt?", b"quux\xe9", "utf-8") - - def test_input_tty_non_ascii_unicode_errors(self): - # Check stdin/stdout error handler is used when invoking GNU readline - self.check_input_tty("prompt?", b"quux\xe9", "ascii") - # test_int(): see test_int.py for tests of built-in function int(). def test_repr(self): @@ -1564,6 +1488,116 @@ self.assertRaises(TypeError, tp, 1, 2) self.assertRaises(TypeError, tp, a=1, b=2) + at unittest.skipUnless(pty, "the pty and signal modules must be available") +class PtyTests(unittest.TestCase): + """Tests that use a pseudo terminal to guarantee stdin and stdout are + terminals in the test environment""" + + def fork(self): + try: + return pty.fork() + except (OSError, AttributeError) as e: + self.skipTest("pty.fork() raised {}".format(e)) + + def check_input_tty(self, prompt, terminal_input, stdio_encoding=None): + if not sys.stdin.isatty() or not sys.stdout.isatty(): + self.skipTest("stdin and stdout must be ttys") + r, w = os.pipe() + try: + pid, fd = self.fork() + except: + os.close(r) + os.close(w) + raise + if pid == 0: + # Child + try: + # Make sure we don't get stuck if there's a problem + signal.alarm(2) + os.close(r) + # Check the error handlers are accounted for + if stdio_encoding: + sys.stdin = io.TextIOWrapper(sys.stdin.detach(), + encoding=stdio_encoding, + errors='surrogateescape') + sys.stdout = io.TextIOWrapper(sys.stdout.detach(), + encoding=stdio_encoding, + errors='replace') + with open(w, "w") as wpipe: + print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe) + print(ascii(input(prompt)), file=wpipe) + except: + traceback.print_exc() + finally: + # We don't want to return to unittest... + os._exit(0) + # Parent + os.close(w) + os.write(fd, terminal_input + b"\r\n") + # Get results from the pipe + with open(r, "r") as rpipe: + lines = [] + while True: + line = rpipe.readline().strip() + if line == "": + # The other end was closed => the child exited + break + lines.append(line) + # Check the result was got and corresponds to the user's terminal input + if len(lines) != 2: + # Something went wrong, try to get at stderr + with open(fd, "r", encoding="ascii", errors="ignore") as child_output: + self.fail("got %d lines in pipe but expected 2, child output was:\n%s" + % (len(lines), child_output.read())) + os.close(fd) + # Check we did exercise the GNU readline path + self.assertIn(lines[0], {'tty = True', 'tty = False'}) + if lines[0] != 'tty = True': + self.skipTest("standard IO in should have been a tty") + input_result = eval(lines[1]) # ascii() -> eval() roundtrip + if stdio_encoding: + expected = terminal_input.decode(stdio_encoding, 'surrogateescape') + else: + expected = terminal_input.decode(sys.stdin.encoding) # what else? + self.assertEqual(input_result, expected) + + def test_input_tty(self): + # Test input() functionality when wired to a tty (the code path + # is different and invokes GNU readline if available). + self.check_input_tty("prompt", b"quux") + + def test_input_tty_non_ascii(self): + # Check stdin/stdout encoding is used when invoking GNU readline + self.check_input_tty("prompt?", b"quux\xe9", "utf-8") + + def test_input_tty_non_ascii_unicode_errors(self): + # Check stdin/stdout error handler is used when invoking GNU readline + self.check_input_tty("prompt?", b"quux\xe9", "ascii") + + def test_input_no_stdout_fileno(self): + # Issue #24402: If stdin is the original terminal but stdout.fileno() + # fails, do not use the original stdout file descriptor + pid, pty = self.fork() + if pid: # Parent process + # Ideally this should read and write concurrently using select() + # or similar, to avoid the possibility of a deadlock. + os.write(pty, b"quux\r") + _, status = os.waitpid(pid, 0) + output = os.read(pty, 3000).decode("ascii", "backslashreplace") + os.close(pty) + self.assertEqual(status, 0, output) + else: # Child process + try: + self.assertTrue(sys.stdin.isatty(), "stdin not a terminal") + sys.stdout = io.StringIO() # Does not support fileno() + input("prompt") + self.assertEqual(sys.stdout.getvalue(), "prompt") + os._exit(0) # Success! + except: + sys.excepthook(*sys.exc_info()) + finally: + os._exit(1) # Failure + class TestSorted(unittest.TestCase): def test_basic(self): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #24402: Fix input() to prompt to the redirected stdout when + sys.stdout.fileno() fails. + - Issue #24806: Prevent builtin types that are not allowed to be subclassed from being subclassed through multiple inheritance. diff --git a/Python/bltinmodule.c b/Python/bltinmodule.c --- a/Python/bltinmodule.c +++ b/Python/bltinmodule.c @@ -1723,8 +1723,10 @@ } if (tty) { tmp = _PyObject_CallMethodId(fout, &PyId_fileno, ""); - if (tmp == NULL) + if (tmp == NULL) { PyErr_Clear(); + tty = 0; + } else { fd = PyLong_AsLong(tmp); Py_DECREF(tmp); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 04:10:34 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 02:10:34 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2324402=3A_Merge_input=28=29_fix_from_3=2E4_into_3=2E5?= Message-ID: <20151010021033.55486.84365@psf.io> https://hg.python.org/cpython/rev/e97d940a6543 changeset: 98631:e97d940a6543 branch: 3.5 parent: 98628:be13ea160b1a parent: 98630:425d81d10b13 user: Martin Panter date: Sat Oct 10 01:55:23 2015 +0000 summary: Issue #24402: Merge input() fix from 3.4 into 3.5 files: Lib/test/test_builtin.py | 186 +++++++++++++++----------- Misc/NEWS | 3 + Python/bltinmodule.c | 4 +- 3 files changed, 116 insertions(+), 77 deletions(-) diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py --- a/Lib/test/test_builtin.py +++ b/Lib/test/test_builtin.py @@ -1134,82 +1134,6 @@ sys.stdout = savestdout fp.close() - @unittest.skipUnless(pty, "the pty and signal modules must be available") - def check_input_tty(self, prompt, terminal_input, stdio_encoding=None): - if not sys.stdin.isatty() or not sys.stdout.isatty(): - self.skipTest("stdin and stdout must be ttys") - r, w = os.pipe() - try: - pid, fd = pty.fork() - except (OSError, AttributeError) as e: - os.close(r) - os.close(w) - self.skipTest("pty.fork() raised {}".format(e)) - if pid == 0: - # Child - try: - # Make sure we don't get stuck if there's a problem - signal.alarm(2) - os.close(r) - # Check the error handlers are accounted for - if stdio_encoding: - sys.stdin = io.TextIOWrapper(sys.stdin.detach(), - encoding=stdio_encoding, - errors='surrogateescape') - sys.stdout = io.TextIOWrapper(sys.stdout.detach(), - encoding=stdio_encoding, - errors='replace') - with open(w, "w") as wpipe: - print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe) - print(ascii(input(prompt)), file=wpipe) - except: - traceback.print_exc() - finally: - # We don't want to return to unittest... - os._exit(0) - # Parent - os.close(w) - os.write(fd, terminal_input + b"\r\n") - # Get results from the pipe - with open(r, "r") as rpipe: - lines = [] - while True: - line = rpipe.readline().strip() - if line == "": - # The other end was closed => the child exited - break - lines.append(line) - # Check the result was got and corresponds to the user's terminal input - if len(lines) != 2: - # Something went wrong, try to get at stderr - with open(fd, "r", encoding="ascii", errors="ignore") as child_output: - self.fail("got %d lines in pipe but expected 2, child output was:\n%s" - % (len(lines), child_output.read())) - os.close(fd) - # Check we did exercise the GNU readline path - self.assertIn(lines[0], {'tty = True', 'tty = False'}) - if lines[0] != 'tty = True': - self.skipTest("standard IO in should have been a tty") - input_result = eval(lines[1]) # ascii() -> eval() roundtrip - if stdio_encoding: - expected = terminal_input.decode(stdio_encoding, 'surrogateescape') - else: - expected = terminal_input.decode(sys.stdin.encoding) # what else? - self.assertEqual(input_result, expected) - - def test_input_tty(self): - # Test input() functionality when wired to a tty (the code path - # is different and invokes GNU readline if available). - self.check_input_tty("prompt", b"quux") - - def test_input_tty_non_ascii(self): - # Check stdin/stdout encoding is used when invoking GNU readline - self.check_input_tty("prompt?", b"quux\xe9", "utf-8") - - def test_input_tty_non_ascii_unicode_errors(self): - # Check stdin/stdout error handler is used when invoking GNU readline - self.check_input_tty("prompt?", b"quux\xe9", "ascii") - # test_int(): see test_int.py for tests of built-in function int(). def test_repr(self): @@ -1564,6 +1488,116 @@ self.assertRaises(TypeError, tp, 1, 2) self.assertRaises(TypeError, tp, a=1, b=2) + at unittest.skipUnless(pty, "the pty and signal modules must be available") +class PtyTests(unittest.TestCase): + """Tests that use a pseudo terminal to guarantee stdin and stdout are + terminals in the test environment""" + + def fork(self): + try: + return pty.fork() + except (OSError, AttributeError) as e: + self.skipTest("pty.fork() raised {}".format(e)) + + def check_input_tty(self, prompt, terminal_input, stdio_encoding=None): + if not sys.stdin.isatty() or not sys.stdout.isatty(): + self.skipTest("stdin and stdout must be ttys") + r, w = os.pipe() + try: + pid, fd = self.fork() + except: + os.close(r) + os.close(w) + raise + if pid == 0: + # Child + try: + # Make sure we don't get stuck if there's a problem + signal.alarm(2) + os.close(r) + # Check the error handlers are accounted for + if stdio_encoding: + sys.stdin = io.TextIOWrapper(sys.stdin.detach(), + encoding=stdio_encoding, + errors='surrogateescape') + sys.stdout = io.TextIOWrapper(sys.stdout.detach(), + encoding=stdio_encoding, + errors='replace') + with open(w, "w") as wpipe: + print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe) + print(ascii(input(prompt)), file=wpipe) + except: + traceback.print_exc() + finally: + # We don't want to return to unittest... + os._exit(0) + # Parent + os.close(w) + os.write(fd, terminal_input + b"\r\n") + # Get results from the pipe + with open(r, "r") as rpipe: + lines = [] + while True: + line = rpipe.readline().strip() + if line == "": + # The other end was closed => the child exited + break + lines.append(line) + # Check the result was got and corresponds to the user's terminal input + if len(lines) != 2: + # Something went wrong, try to get at stderr + with open(fd, "r", encoding="ascii", errors="ignore") as child_output: + self.fail("got %d lines in pipe but expected 2, child output was:\n%s" + % (len(lines), child_output.read())) + os.close(fd) + # Check we did exercise the GNU readline path + self.assertIn(lines[0], {'tty = True', 'tty = False'}) + if lines[0] != 'tty = True': + self.skipTest("standard IO in should have been a tty") + input_result = eval(lines[1]) # ascii() -> eval() roundtrip + if stdio_encoding: + expected = terminal_input.decode(stdio_encoding, 'surrogateescape') + else: + expected = terminal_input.decode(sys.stdin.encoding) # what else? + self.assertEqual(input_result, expected) + + def test_input_tty(self): + # Test input() functionality when wired to a tty (the code path + # is different and invokes GNU readline if available). + self.check_input_tty("prompt", b"quux") + + def test_input_tty_non_ascii(self): + # Check stdin/stdout encoding is used when invoking GNU readline + self.check_input_tty("prompt?", b"quux\xe9", "utf-8") + + def test_input_tty_non_ascii_unicode_errors(self): + # Check stdin/stdout error handler is used when invoking GNU readline + self.check_input_tty("prompt?", b"quux\xe9", "ascii") + + def test_input_no_stdout_fileno(self): + # Issue #24402: If stdin is the original terminal but stdout.fileno() + # fails, do not use the original stdout file descriptor + pid, pty = self.fork() + if pid: # Parent process + # Ideally this should read and write concurrently using select() + # or similar, to avoid the possibility of a deadlock. + os.write(pty, b"quux\r") + _, status = os.waitpid(pid, 0) + output = os.read(pty, 3000).decode("ascii", "backslashreplace") + os.close(pty) + self.assertEqual(status, 0, output) + else: # Child process + try: + self.assertTrue(sys.stdin.isatty(), "stdin not a terminal") + sys.stdout = io.StringIO() # Does not support fileno() + input("prompt") + self.assertEqual(sys.stdout.getvalue(), "prompt") + os._exit(0) # Success! + except: + sys.excepthook(*sys.exc_info()) + finally: + os._exit(1) # Failure + class TestSorted(unittest.TestCase): def test_basic(self): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -11,6 +11,9 @@ Core and Builtins ----------------- +- Issue #24402: Fix input() to prompt to the redirected stdout when + sys.stdout.fileno() fails. + - Issue #24806: Prevent builtin types that are not allowed to be subclassed from being subclassed through multiple inheritance. diff --git a/Python/bltinmodule.c b/Python/bltinmodule.c --- a/Python/bltinmodule.c +++ b/Python/bltinmodule.c @@ -1853,8 +1853,10 @@ } if (tty) { tmp = _PyObject_CallMethodId(fout, &PyId_fileno, ""); - if (tmp == NULL) + if (tmp == NULL) { PyErr_Clear(); + tty = 0; + } else { fd = PyLong_AsLong(tmp); Py_DECREF(tmp); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 04:10:35 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 02:10:35 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2324402=3A_Merge_input=28=29_fix_from_3=2E5?= Message-ID: <20151010021034.7254.72661@psf.io> https://hg.python.org/cpython/rev/bcc0f8eb6797 changeset: 98632:bcc0f8eb6797 parent: 98629:6c183537b2fb parent: 98631:e97d940a6543 user: Martin Panter date: Sat Oct 10 02:09:41 2015 +0000 summary: Issue #24402: Merge input() fix from 3.5 files: Lib/test/test_builtin.py | 186 +++++++++++++++----------- Misc/NEWS | 6 + Python/bltinmodule.c | 4 +- 3 files changed, 119 insertions(+), 77 deletions(-) diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py --- a/Lib/test/test_builtin.py +++ b/Lib/test/test_builtin.py @@ -1134,82 +1134,6 @@ sys.stdout = savestdout fp.close() - @unittest.skipUnless(pty, "the pty and signal modules must be available") - def check_input_tty(self, prompt, terminal_input, stdio_encoding=None): - if not sys.stdin.isatty() or not sys.stdout.isatty(): - self.skipTest("stdin and stdout must be ttys") - r, w = os.pipe() - try: - pid, fd = pty.fork() - except (OSError, AttributeError) as e: - os.close(r) - os.close(w) - self.skipTest("pty.fork() raised {}".format(e)) - if pid == 0: - # Child - try: - # Make sure we don't get stuck if there's a problem - signal.alarm(2) - os.close(r) - # Check the error handlers are accounted for - if stdio_encoding: - sys.stdin = io.TextIOWrapper(sys.stdin.detach(), - encoding=stdio_encoding, - errors='surrogateescape') - sys.stdout = io.TextIOWrapper(sys.stdout.detach(), - encoding=stdio_encoding, - errors='replace') - with open(w, "w") as wpipe: - print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe) - print(ascii(input(prompt)), file=wpipe) - except: - traceback.print_exc() - finally: - # We don't want to return to unittest... - os._exit(0) - # Parent - os.close(w) - os.write(fd, terminal_input + b"\r\n") - # Get results from the pipe - with open(r, "r") as rpipe: - lines = [] - while True: - line = rpipe.readline().strip() - if line == "": - # The other end was closed => the child exited - break - lines.append(line) - # Check the result was got and corresponds to the user's terminal input - if len(lines) != 2: - # Something went wrong, try to get at stderr - with open(fd, "r", encoding="ascii", errors="ignore") as child_output: - self.fail("got %d lines in pipe but expected 2, child output was:\n%s" - % (len(lines), child_output.read())) - os.close(fd) - # Check we did exercise the GNU readline path - self.assertIn(lines[0], {'tty = True', 'tty = False'}) - if lines[0] != 'tty = True': - self.skipTest("standard IO in should have been a tty") - input_result = eval(lines[1]) # ascii() -> eval() roundtrip - if stdio_encoding: - expected = terminal_input.decode(stdio_encoding, 'surrogateescape') - else: - expected = terminal_input.decode(sys.stdin.encoding) # what else? - self.assertEqual(input_result, expected) - - def test_input_tty(self): - # Test input() functionality when wired to a tty (the code path - # is different and invokes GNU readline if available). - self.check_input_tty("prompt", b"quux") - - def test_input_tty_non_ascii(self): - # Check stdin/stdout encoding is used when invoking GNU readline - self.check_input_tty("prompt?", b"quux\xe9", "utf-8") - - def test_input_tty_non_ascii_unicode_errors(self): - # Check stdin/stdout error handler is used when invoking GNU readline - self.check_input_tty("prompt?", b"quux\xe9", "ascii") - # test_int(): see test_int.py for tests of built-in function int(). def test_repr(self): @@ -1564,6 +1488,116 @@ self.assertRaises(TypeError, tp, 1, 2) self.assertRaises(TypeError, tp, a=1, b=2) + at unittest.skipUnless(pty, "the pty and signal modules must be available") +class PtyTests(unittest.TestCase): + """Tests that use a pseudo terminal to guarantee stdin and stdout are + terminals in the test environment""" + + def fork(self): + try: + return pty.fork() + except (OSError, AttributeError) as e: + self.skipTest("pty.fork() raised {}".format(e)) + + def check_input_tty(self, prompt, terminal_input, stdio_encoding=None): + if not sys.stdin.isatty() or not sys.stdout.isatty(): + self.skipTest("stdin and stdout must be ttys") + r, w = os.pipe() + try: + pid, fd = self.fork() + except: + os.close(r) + os.close(w) + raise + if pid == 0: + # Child + try: + # Make sure we don't get stuck if there's a problem + signal.alarm(2) + os.close(r) + # Check the error handlers are accounted for + if stdio_encoding: + sys.stdin = io.TextIOWrapper(sys.stdin.detach(), + encoding=stdio_encoding, + errors='surrogateescape') + sys.stdout = io.TextIOWrapper(sys.stdout.detach(), + encoding=stdio_encoding, + errors='replace') + with open(w, "w") as wpipe: + print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe) + print(ascii(input(prompt)), file=wpipe) + except: + traceback.print_exc() + finally: + # We don't want to return to unittest... + os._exit(0) + # Parent + os.close(w) + os.write(fd, terminal_input + b"\r\n") + # Get results from the pipe + with open(r, "r") as rpipe: + lines = [] + while True: + line = rpipe.readline().strip() + if line == "": + # The other end was closed => the child exited + break + lines.append(line) + # Check the result was got and corresponds to the user's terminal input + if len(lines) != 2: + # Something went wrong, try to get at stderr + with open(fd, "r", encoding="ascii", errors="ignore") as child_output: + self.fail("got %d lines in pipe but expected 2, child output was:\n%s" + % (len(lines), child_output.read())) + os.close(fd) + # Check we did exercise the GNU readline path + self.assertIn(lines[0], {'tty = True', 'tty = False'}) + if lines[0] != 'tty = True': + self.skipTest("standard IO in should have been a tty") + input_result = eval(lines[1]) # ascii() -> eval() roundtrip + if stdio_encoding: + expected = terminal_input.decode(stdio_encoding, 'surrogateescape') + else: + expected = terminal_input.decode(sys.stdin.encoding) # what else? + self.assertEqual(input_result, expected) + + def test_input_tty(self): + # Test input() functionality when wired to a tty (the code path + # is different and invokes GNU readline if available). + self.check_input_tty("prompt", b"quux") + + def test_input_tty_non_ascii(self): + # Check stdin/stdout encoding is used when invoking GNU readline + self.check_input_tty("prompt?", b"quux\xe9", "utf-8") + + def test_input_tty_non_ascii_unicode_errors(self): + # Check stdin/stdout error handler is used when invoking GNU readline + self.check_input_tty("prompt?", b"quux\xe9", "ascii") + + def test_input_no_stdout_fileno(self): + # Issue #24402: If stdin is the original terminal but stdout.fileno() + # fails, do not use the original stdout file descriptor + pid, pty = self.fork() + if pid: # Parent process + # Ideally this should read and write concurrently using select() + # or similar, to avoid the possibility of a deadlock. + os.write(pty, b"quux\r") + _, status = os.waitpid(pid, 0) + output = os.read(pty, 3000).decode("ascii", "backslashreplace") + os.close(pty) + self.assertEqual(status, 0, output) + else: # Child process + try: + self.assertTrue(sys.stdin.isatty(), "stdin not a terminal") + sys.stdout = io.StringIO() # Does not support fileno() + input("prompt") + self.assertEqual(sys.stdout.getvalue(), "prompt") + os._exit(0) # Success! + except: + sys.excepthook(*sys.exc_info()) + finally: + os._exit(1) # Failure + class TestSorted(unittest.TestCase): def test_basic(self): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #24402: Fix input() to prompt to the redirected stdout when + sys.stdout.fileno() fails. + - Issue #25349: Optimize bytes % args using the new private _PyBytesWriter API. - Issue #24806: Prevent builtin types that are not allowed to be subclassed from @@ -260,6 +263,9 @@ Core and Builtins ----------------- +- Issue #24402: Fix input() to prompt to the redirected stdout when + sys.stdout.fileno() fails. + - Issue #25182: The stdprinter (used as sys.stderr before the io module is imported at startup) now uses the backslashreplace error handler. diff --git a/Python/bltinmodule.c b/Python/bltinmodule.c --- a/Python/bltinmodule.c +++ b/Python/bltinmodule.c @@ -1854,8 +1854,10 @@ } if (tty) { tmp = _PyObject_CallMethodId(fout, &PyId_fileno, ""); - if (tmp == NULL) + if (tmp == NULL) { PyErr_Clear(); + tty = 0; + } else { fd = PyLong_AsLong(tmp); Py_DECREF(tmp); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 07:30:23 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 05:30:23 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2324402=3A_Merge_potential_test_fix_from_3=2E5?= Message-ID: <20151010053022.473.34785@psf.io> https://hg.python.org/cpython/rev/cb574ee7231e changeset: 98635:cb574ee7231e parent: 98632:bcc0f8eb6797 parent: 98634:6a8f96b46dce user: Martin Panter date: Sat Oct 10 05:29:19 2015 +0000 summary: Issue #24402: Merge potential test fix from 3.5 files: Lib/test/test_builtin.py | 93 ++++++++++++++------------- 1 files changed, 48 insertions(+), 45 deletions(-) diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py --- a/Lib/test/test_builtin.py +++ b/Lib/test/test_builtin.py @@ -1493,21 +1493,14 @@ """Tests that use a pseudo terminal to guarantee stdin and stdout are terminals in the test environment""" - def fork(self): + def run_child(self, child, terminal_input): + r, w = os.pipe() # Pipe test results from child back to parent try: - return pty.fork() + pid, fd = pty.fork() except (OSError, AttributeError) as e: - self.skipTest("pty.fork() raised {}".format(e)) - - def check_input_tty(self, prompt, terminal_input, stdio_encoding=None): - if not sys.stdin.isatty() or not sys.stdout.isatty(): - self.skipTest("stdin and stdout must be ttys") - r, w = os.pipe() - try: - pid, fd = self.fork() - except: os.close(r) os.close(w) + self.skipTest("pty.fork() raised {}".format(e)) raise if pid == 0: # Child @@ -1515,17 +1508,8 @@ # Make sure we don't get stuck if there's a problem signal.alarm(2) os.close(r) - # Check the error handlers are accounted for - if stdio_encoding: - sys.stdin = io.TextIOWrapper(sys.stdin.detach(), - encoding=stdio_encoding, - errors='surrogateescape') - sys.stdout = io.TextIOWrapper(sys.stdout.detach(), - encoding=stdio_encoding, - errors='replace') with open(w, "w") as wpipe: - print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe) - print(ascii(input(prompt)), file=wpipe) + child(wpipe) except: traceback.print_exc() finally: @@ -1533,7 +1517,7 @@ os._exit(0) # Parent os.close(w) - os.write(fd, terminal_input + b"\r\n") + os.write(fd, terminal_input) # Get results from the pipe with open(r, "r") as rpipe: lines = [] @@ -1546,10 +1530,38 @@ # Check the result was got and corresponds to the user's terminal input if len(lines) != 2: # Something went wrong, try to get at stderr - with open(fd, "r", encoding="ascii", errors="ignore") as child_output: - self.fail("got %d lines in pipe but expected 2, child output was:\n%s" - % (len(lines), child_output.read())) + # Beware of Linux raising EIO when the slave is closed + child_output = bytearray() + while True: + try: + chunk = os.read(fd, 3000) + except OSError: # Assume EIO + break + if not chunk: + break + child_output.extend(chunk) + os.close(fd) + child_output = child_output.decode("ascii", "ignore") + self.fail("got %d lines in pipe but expected 2, child output was:\n%s" + % (len(lines), child_output)) os.close(fd) + return lines + + def check_input_tty(self, prompt, terminal_input, stdio_encoding=None): + if not sys.stdin.isatty() or not sys.stdout.isatty(): + self.skipTest("stdin and stdout must be ttys") + def child(wpipe): + # Check the error handlers are accounted for + if stdio_encoding: + sys.stdin = io.TextIOWrapper(sys.stdin.detach(), + encoding=stdio_encoding, + errors='surrogateescape') + sys.stdout = io.TextIOWrapper(sys.stdout.detach(), + encoding=stdio_encoding, + errors='replace') + print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe) + print(ascii(input(prompt)), file=wpipe) + lines = self.run_child(child, terminal_input + b"\r\n") # Check we did exercise the GNU readline path self.assertIn(lines[0], {'tty = True', 'tty = False'}) if lines[0] != 'tty = True': @@ -1577,26 +1589,17 @@ def test_input_no_stdout_fileno(self): # Issue #24402: If stdin is the original terminal but stdout.fileno() # fails, do not use the original stdout file descriptor - pid, pty = self.fork() - if pid: # Parent process - # Ideally this should read and write concurrently using select() - # or similar, to avoid the possibility of a deadlock. - os.write(pty, b"quux\r") - _, status = os.waitpid(pid, 0) - output = os.read(pty, 3000).decode("ascii", "backslashreplace") - os.close(pty) - self.assertEqual(status, 0, output) - else: # Child process - try: - self.assertTrue(sys.stdin.isatty(), "stdin not a terminal") - sys.stdout = io.StringIO() # Does not support fileno() - input("prompt") - self.assertEqual(sys.stdout.getvalue(), "prompt") - os._exit(0) # Success! - except: - sys.excepthook(*sys.exc_info()) - finally: - os._exit(1) # Failure + def child(wpipe): + print("stdin.isatty():", sys.stdin.isatty(), file=wpipe) + sys.stdout = io.StringIO() # Does not support fileno() + input("prompt") + print("captured:", ascii(sys.stdout.getvalue()), file=wpipe) + lines = self.run_child(child, b"quux\r") + expected = ( + "stdin.isatty(): True", + "captured: 'prompt'", + ) + self.assertSequenceEqual(lines, expected) class TestSorted(unittest.TestCase): -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 07:30:22 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 05:30:22 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI0NDAy?= =?utf-8?q?=3A_Factor_out_PtyTests=2Erun=5Fchild=28=29_in_input=28=29_test?= =?utf-8?q?s?= Message-ID: <20151010053022.451.31141@psf.io> https://hg.python.org/cpython/rev/d8dd9015b086 changeset: 98633:d8dd9015b086 branch: 3.4 parent: 98630:425d81d10b13 user: Martin Panter date: Sat Oct 10 05:27:15 2015 +0000 summary: Issue #24402: Factor out PtyTests.run_child() in input() tests This reuses existing code to hopefully make the new test_input_no_stdout_ fileno() test work. It is hanging Free BSD 9 and OS X Tiger buildbots, and I don't know why. files: Lib/test/test_builtin.py | 93 ++++++++++++++------------- 1 files changed, 48 insertions(+), 45 deletions(-) diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py --- a/Lib/test/test_builtin.py +++ b/Lib/test/test_builtin.py @@ -1493,21 +1493,14 @@ """Tests that use a pseudo terminal to guarantee stdin and stdout are terminals in the test environment""" - def fork(self): + def run_child(self, child, terminal_input): + r, w = os.pipe() # Pipe test results from child back to parent try: - return pty.fork() + pid, fd = pty.fork() except (OSError, AttributeError) as e: - self.skipTest("pty.fork() raised {}".format(e)) - - def check_input_tty(self, prompt, terminal_input, stdio_encoding=None): - if not sys.stdin.isatty() or not sys.stdout.isatty(): - self.skipTest("stdin and stdout must be ttys") - r, w = os.pipe() - try: - pid, fd = self.fork() - except: os.close(r) os.close(w) + self.skipTest("pty.fork() raised {}".format(e)) raise if pid == 0: # Child @@ -1515,17 +1508,8 @@ # Make sure we don't get stuck if there's a problem signal.alarm(2) os.close(r) - # Check the error handlers are accounted for - if stdio_encoding: - sys.stdin = io.TextIOWrapper(sys.stdin.detach(), - encoding=stdio_encoding, - errors='surrogateescape') - sys.stdout = io.TextIOWrapper(sys.stdout.detach(), - encoding=stdio_encoding, - errors='replace') with open(w, "w") as wpipe: - print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe) - print(ascii(input(prompt)), file=wpipe) + child(wpipe) except: traceback.print_exc() finally: @@ -1533,7 +1517,7 @@ os._exit(0) # Parent os.close(w) - os.write(fd, terminal_input + b"\r\n") + os.write(fd, terminal_input) # Get results from the pipe with open(r, "r") as rpipe: lines = [] @@ -1546,10 +1530,38 @@ # Check the result was got and corresponds to the user's terminal input if len(lines) != 2: # Something went wrong, try to get at stderr - with open(fd, "r", encoding="ascii", errors="ignore") as child_output: - self.fail("got %d lines in pipe but expected 2, child output was:\n%s" - % (len(lines), child_output.read())) + # Beware of Linux raising EIO when the slave is closed + child_output = bytearray() + while True: + try: + chunk = os.read(fd, 3000) + except OSError: # Assume EIO + break + if not chunk: + break + child_output.extend(chunk) + os.close(fd) + child_output = child_output.decode("ascii", "ignore") + self.fail("got %d lines in pipe but expected 2, child output was:\n%s" + % (len(lines), child_output)) os.close(fd) + return lines + + def check_input_tty(self, prompt, terminal_input, stdio_encoding=None): + if not sys.stdin.isatty() or not sys.stdout.isatty(): + self.skipTest("stdin and stdout must be ttys") + def child(wpipe): + # Check the error handlers are accounted for + if stdio_encoding: + sys.stdin = io.TextIOWrapper(sys.stdin.detach(), + encoding=stdio_encoding, + errors='surrogateescape') + sys.stdout = io.TextIOWrapper(sys.stdout.detach(), + encoding=stdio_encoding, + errors='replace') + print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe) + print(ascii(input(prompt)), file=wpipe) + lines = self.run_child(child, terminal_input + b"\r\n") # Check we did exercise the GNU readline path self.assertIn(lines[0], {'tty = True', 'tty = False'}) if lines[0] != 'tty = True': @@ -1577,26 +1589,17 @@ def test_input_no_stdout_fileno(self): # Issue #24402: If stdin is the original terminal but stdout.fileno() # fails, do not use the original stdout file descriptor - pid, pty = self.fork() - if pid: # Parent process - # Ideally this should read and write concurrently using select() - # or similar, to avoid the possibility of a deadlock. - os.write(pty, b"quux\r") - _, status = os.waitpid(pid, 0) - output = os.read(pty, 3000).decode("ascii", "backslashreplace") - os.close(pty) - self.assertEqual(status, 0, output) - else: # Child process - try: - self.assertTrue(sys.stdin.isatty(), "stdin not a terminal") - sys.stdout = io.StringIO() # Does not support fileno() - input("prompt") - self.assertEqual(sys.stdout.getvalue(), "prompt") - os._exit(0) # Success! - except: - sys.excepthook(*sys.exc_info()) - finally: - os._exit(1) # Failure + def child(wpipe): + print("stdin.isatty():", sys.stdin.isatty(), file=wpipe) + sys.stdout = io.StringIO() # Does not support fileno() + input("prompt") + print("captured:", ascii(sys.stdout.getvalue()), file=wpipe) + lines = self.run_child(child, b"quux\r") + expected = ( + "stdin.isatty(): True", + "captured: 'prompt'", + ) + self.assertSequenceEqual(lines, expected) class TestSorted(unittest.TestCase): -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 07:30:22 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 05:30:22 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2324402=3A_Merge_potential_test_fix_from_3=2E4_into_3?= =?utf-8?q?=2E5?= Message-ID: <20151010053022.20763.6400@psf.io> https://hg.python.org/cpython/rev/6a8f96b46dce changeset: 98634:6a8f96b46dce branch: 3.5 parent: 98631:e97d940a6543 parent: 98633:d8dd9015b086 user: Martin Panter date: Sat Oct 10 05:28:17 2015 +0000 summary: Issue #24402: Merge potential test fix from 3.4 into 3.5 files: Lib/test/test_builtin.py | 93 ++++++++++++++------------- 1 files changed, 48 insertions(+), 45 deletions(-) diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py --- a/Lib/test/test_builtin.py +++ b/Lib/test/test_builtin.py @@ -1493,21 +1493,14 @@ """Tests that use a pseudo terminal to guarantee stdin and stdout are terminals in the test environment""" - def fork(self): + def run_child(self, child, terminal_input): + r, w = os.pipe() # Pipe test results from child back to parent try: - return pty.fork() + pid, fd = pty.fork() except (OSError, AttributeError) as e: - self.skipTest("pty.fork() raised {}".format(e)) - - def check_input_tty(self, prompt, terminal_input, stdio_encoding=None): - if not sys.stdin.isatty() or not sys.stdout.isatty(): - self.skipTest("stdin and stdout must be ttys") - r, w = os.pipe() - try: - pid, fd = self.fork() - except: os.close(r) os.close(w) + self.skipTest("pty.fork() raised {}".format(e)) raise if pid == 0: # Child @@ -1515,17 +1508,8 @@ # Make sure we don't get stuck if there's a problem signal.alarm(2) os.close(r) - # Check the error handlers are accounted for - if stdio_encoding: - sys.stdin = io.TextIOWrapper(sys.stdin.detach(), - encoding=stdio_encoding, - errors='surrogateescape') - sys.stdout = io.TextIOWrapper(sys.stdout.detach(), - encoding=stdio_encoding, - errors='replace') with open(w, "w") as wpipe: - print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe) - print(ascii(input(prompt)), file=wpipe) + child(wpipe) except: traceback.print_exc() finally: @@ -1533,7 +1517,7 @@ os._exit(0) # Parent os.close(w) - os.write(fd, terminal_input + b"\r\n") + os.write(fd, terminal_input) # Get results from the pipe with open(r, "r") as rpipe: lines = [] @@ -1546,10 +1530,38 @@ # Check the result was got and corresponds to the user's terminal input if len(lines) != 2: # Something went wrong, try to get at stderr - with open(fd, "r", encoding="ascii", errors="ignore") as child_output: - self.fail("got %d lines in pipe but expected 2, child output was:\n%s" - % (len(lines), child_output.read())) + # Beware of Linux raising EIO when the slave is closed + child_output = bytearray() + while True: + try: + chunk = os.read(fd, 3000) + except OSError: # Assume EIO + break + if not chunk: + break + child_output.extend(chunk) + os.close(fd) + child_output = child_output.decode("ascii", "ignore") + self.fail("got %d lines in pipe but expected 2, child output was:\n%s" + % (len(lines), child_output)) os.close(fd) + return lines + + def check_input_tty(self, prompt, terminal_input, stdio_encoding=None): + if not sys.stdin.isatty() or not sys.stdout.isatty(): + self.skipTest("stdin and stdout must be ttys") + def child(wpipe): + # Check the error handlers are accounted for + if stdio_encoding: + sys.stdin = io.TextIOWrapper(sys.stdin.detach(), + encoding=stdio_encoding, + errors='surrogateescape') + sys.stdout = io.TextIOWrapper(sys.stdout.detach(), + encoding=stdio_encoding, + errors='replace') + print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe) + print(ascii(input(prompt)), file=wpipe) + lines = self.run_child(child, terminal_input + b"\r\n") # Check we did exercise the GNU readline path self.assertIn(lines[0], {'tty = True', 'tty = False'}) if lines[0] != 'tty = True': @@ -1577,26 +1589,17 @@ def test_input_no_stdout_fileno(self): # Issue #24402: If stdin is the original terminal but stdout.fileno() # fails, do not use the original stdout file descriptor - pid, pty = self.fork() - if pid: # Parent process - # Ideally this should read and write concurrently using select() - # or similar, to avoid the possibility of a deadlock. - os.write(pty, b"quux\r") - _, status = os.waitpid(pid, 0) - output = os.read(pty, 3000).decode("ascii", "backslashreplace") - os.close(pty) - self.assertEqual(status, 0, output) - else: # Child process - try: - self.assertTrue(sys.stdin.isatty(), "stdin not a terminal") - sys.stdout = io.StringIO() # Does not support fileno() - input("prompt") - self.assertEqual(sys.stdout.getvalue(), "prompt") - os._exit(0) # Success! - except: - sys.excepthook(*sys.exc_info()) - finally: - os._exit(1) # Failure + def child(wpipe): + print("stdin.isatty():", sys.stdin.isatty(), file=wpipe) + sys.stdout = io.StringIO() # Does not support fileno() + input("prompt") + print("captured:", ascii(sys.stdout.getvalue()), file=wpipe) + lines = self.run_child(child, b"quux\r") + expected = ( + "stdin.isatty(): True", + "captured: 'prompt'", + ) + self.assertSequenceEqual(lines, expected) class TestSorted(unittest.TestCase): -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 08:33:52 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 10 Oct 2015 06:33:52 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI0ODQ4?= =?utf-8?q?=3A_Fixed_yet_one_bug_in_UTF-7_decoder=2E__Testing_for_BASE64_c?= =?utf-8?q?haracter?= Message-ID: <20151010063352.97696.11655@psf.io> https://hg.python.org/cpython/rev/ff1366ff2761 changeset: 98636:ff1366ff2761 branch: 2.7 parent: 98626:3ad8a2d34d01 user: Serhiy Storchaka date: Sat Oct 10 09:33:11 2015 +0300 summary: Issue #24848: Fixed yet one bug in UTF-7 decoder. Testing for BASE64 character was locale depending. files: Lib/test/test_codecs.py | 16 ++++++++-------- Objects/unicodeobject.c | 5 ++++- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py --- a/Lib/test/test_codecs.py +++ b/Lib/test/test_codecs.py @@ -690,9 +690,9 @@ def test_errors(self): tests = [ - ('\xffb', u'\ufffdb'), - ('a\xffb', u'a\ufffdb'), - ('a\xff\xffb', u'a\ufffd\ufffdb'), + ('\xe1b', u'\ufffdb'), + ('a\xe1b', u'a\ufffdb'), + ('a\xe1\xe1b', u'a\ufffd\ufffdb'), ('a+IK', u'a\ufffd'), ('a+IK-b', u'a\ufffdb'), ('a+IK,b', u'a\ufffdb'), @@ -708,8 +708,8 @@ ('a+//,+IKw-b', u'a\ufffd\u20acb'), ('a+///,+IKw-b', u'a\uffff\ufffd\u20acb'), ('a+////,+IKw-b', u'a\uffff\ufffd\u20acb'), - ('a+IKw-b\xff', u'a\u20acb\ufffd'), - ('a+IKw\xffb', u'a\u20ac\ufffdb'), + ('a+IKw-b\xe1', u'a\u20acb\ufffd'), + ('a+IKw\xe1b', u'a\u20ac\ufffdb'), ] for raw, expected in tests: try: @@ -738,16 +738,16 @@ def test_lone_surrogates(self): tests = [ ('a+2AE-b', u'a\ud801b'), - ('a+2AE\xffb', u'a\ufffdb'), + ('a+2AE\xe1b', u'a\ufffdb'), ('a+2AE', u'a\ufffd'), ('a+2AEA-b', u'a\ufffdb'), ('a+2AH-b', u'a\ufffdb'), ('a+IKzYAQ-b', u'a\u20ac\ud801b'), - ('a+IKzYAQ\xffb', u'a\u20ac\ufffdb'), + ('a+IKzYAQ\xe1b', u'a\u20ac\ufffdb'), ('a+IKzYAQA-b', u'a\u20ac\ufffdb'), ('a+IKzYAd-b', u'a\u20ac\ufffdb'), ('a+IKwgrNgB-b', u'a\u20ac\u20ac\ud801b'), - ('a+IKwgrNgB\xffb', u'a\u20ac\u20ac\ufffdb'), + ('a+IKwgrNgB\xe1b', u'a\u20ac\u20ac\ufffdb'), ('a+IKwgrNgB', u'a\u20ac\u20ac\ufffd'), ('a+IKwgrNgBA-b', u'a\u20ac\u20ac\ufffdb'), ] diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -1555,7 +1555,10 @@ /* Is c a base-64 character? */ #define IS_BASE64(c) \ - (isalnum(c) || (c) == '+' || (c) == '/') + (((c) >= 'A' && (c) <= 'Z') || \ + ((c) >= 'a' && (c) <= 'z') || \ + ((c) >= '0' && (c) <= '9') || \ + (c) == '+' || (c) == '/') /* given that c is a base-64 character, what is its base-64 value? */ -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Sat Oct 10 10:44:20 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Sat, 10 Oct 2015 08:44:20 +0000 Subject: [Python-checkins] Daily reference leaks (cb574ee7231e): sum=32787 Message-ID: <20151010084420.55468.75676@psf.io> results for cb574ee7231e on branch "default" -------------------------------------------- test_capi leaked [5410, 5410, 5410] references, sum=16230 test_capi leaked [1421, 1423, 1423] memory blocks, sum=4267 test_format leaked [62, 62, 62] references, sum=186 test_format leaked [62, 62, 62] memory blocks, sum=186 test_functools leaked [0, 2, 2] memory blocks, sum=4 test_threading leaked [3196, 3196, 3196] references, sum=9588 test_threading leaked [774, 776, 776] memory blocks, sum=2326 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflogxTwSVU', '--timeout', '7200'] From python-checkins at python.org Sat Oct 10 12:31:20 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 10:31:20 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyNDEz?= =?utf-8?q?=3A_Document_newline_effect_on_StringIO_initializer_and_getvalu?= =?utf-8?q?e?= Message-ID: <20151010103120.128826.58533@psf.io> https://hg.python.org/cpython/rev/57fc950298bb changeset: 98637:57fc950298bb branch: 2.7 user: Martin Panter date: Sat Oct 10 02:52:30 2015 +0000 summary: Issue #22413: Document newline effect on StringIO initializer and getvalue Also add to comment in the C code. files: Doc/library/io.rst | 17 ++++++++++++----- Modules/_io/_iomodule.h | 7 ++++++- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/Doc/library/io.rst b/Doc/library/io.rst --- a/Doc/library/io.rst +++ b/Doc/library/io.rst @@ -803,10 +803,16 @@ An in-memory stream for unicode text. It inherits :class:`TextIOWrapper`. - The initial value of the buffer (an empty unicode string by default) can - be set by providing *initial_value*. The *newline* argument works like - that of :class:`TextIOWrapper`. The default is to consider only ``\n`` - characters as end of lines and to do no newline translation. + The initial value of the buffer can be set by providing *initial_value*. + If newline translation is enabled, newlines will be encoded as if by + :meth:`~TextIOBase.write`. The stream is positioned at the start of + the buffer. + + The *newline* argument works like that of :class:`TextIOWrapper`. + The default is to consider only ``\n`` characters as ends of lines and + to do no newline translation. If *newline* is set to ``None``, + newlines are written as ``\n`` on all platforms, but universal + newline decoding is still performed when reading. :class:`StringIO` provides this method in addition to those from :class:`TextIOWrapper` and its parents: @@ -815,7 +821,8 @@ Return a ``unicode`` containing the entire contents of the buffer at any time before the :class:`StringIO` object's :meth:`close` method is - called. + called. Newlines are decoded as if by :meth:`~TextIOBase.read`, + although the stream position is not changed. Example usage:: diff --git a/Modules/_io/_iomodule.h b/Modules/_io/_iomodule.h --- a/Modules/_io/_iomodule.h +++ b/Modules/_io/_iomodule.h @@ -52,7 +52,12 @@ which can be safely put aside until another search. NOTE: for performance reasons, `end` must point to a NUL character ('\0'). - Otherwise, the function will scan further and return garbage. */ + Otherwise, the function will scan further and return garbage. + + There are three modes, in order of priority: + * translated: Only find \n (assume newlines already translated) + * universal: Use universal newlines algorithm + * Otherwise, the line ending is specified by readnl, a str object */ extern Py_ssize_t _PyIO_find_line_ending( int translated, int universal, PyObject *readnl, Py_UNICODE *start, Py_UNICODE *end, Py_ssize_t *consumed); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 12:31:20 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 10:31:20 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyNDEz?= =?utf-8?q?=3A_Document_newline_effect_on_StringIO_initializer_and_getvalu?= =?utf-8?q?e?= Message-ID: <20151010103120.70970.42906@psf.io> https://hg.python.org/cpython/rev/cba4bf2a1721 changeset: 98638:cba4bf2a1721 branch: 3.4 parent: 98633:d8dd9015b086 user: Martin Panter date: Sat Oct 10 03:01:20 2015 +0000 summary: Issue #22413: Document newline effect on StringIO initializer and getvalue Also add to comment in the C code. files: Doc/library/io.rst | 16 ++++++++++++---- Modules/_io/_iomodule.h | 7 ++++++- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/Doc/library/io.rst b/Doc/library/io.rst --- a/Doc/library/io.rst +++ b/Doc/library/io.rst @@ -864,10 +864,16 @@ An in-memory stream for text I/O. The text buffer is discarded when the :meth:`~IOBase.close` method is called. - The initial value of the buffer (an empty string by default) can be set by - providing *initial_value*. The *newline* argument works like that of - :class:`TextIOWrapper`. The default is to consider only ``\n`` characters - as end of lines and to do no newline translation. + The initial value of the buffer can be set by providing *initial_value*. + If newline translation is enabled, newlines will be encoded as if by + :meth:`~TextIOBase.write`. The stream is positioned at the start of + the buffer. + + The *newline* argument works like that of :class:`TextIOWrapper`. + The default is to consider only ``\n`` characters as ends of lines and + to do no newline translation. If *newline* is set to ``None``, + newlines are written as ``\n`` on all platforms, but universal + newline decoding is still performed when reading. :class:`StringIO` provides this method in addition to those from :class:`TextIOBase` and its parents: @@ -875,6 +881,8 @@ .. method:: getvalue() Return a ``str`` containing the entire contents of the buffer. + Newlines are decoded as if by :meth:`~TextIOBase.read`, although + the stream position is not changed. Example usage:: diff --git a/Modules/_io/_iomodule.h b/Modules/_io/_iomodule.h --- a/Modules/_io/_iomodule.h +++ b/Modules/_io/_iomodule.h @@ -52,7 +52,12 @@ which can be safely put aside until another search. NOTE: for performance reasons, `end` must point to a NUL character ('\0'). - Otherwise, the function will scan further and return garbage. */ + Otherwise, the function will scan further and return garbage. + + There are three modes, in order of priority: + * translated: Only find \n (assume newlines already translated) + * universal: Use universal newlines algorithm + * Otherwise, the line ending is specified by readnl, a str object */ extern Py_ssize_t _PyIO_find_line_ending( int translated, int universal, PyObject *readnl, int kind, char *start, char *end, Py_ssize_t *consumed); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 12:31:21 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 10:31:21 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2322413=3A_Merge_StringIO_doc_from_3=2E4_into_3=2E5?= Message-ID: <20151010103121.55476.33323@psf.io> https://hg.python.org/cpython/rev/451da3327f68 changeset: 98639:451da3327f68 branch: 3.5 parent: 98634:6a8f96b46dce parent: 98638:cba4bf2a1721 user: Martin Panter date: Sat Oct 10 10:15:21 2015 +0000 summary: Issue #22413: Merge StringIO doc from 3.4 into 3.5 files: Doc/library/io.rst | 16 ++++++++++++---- Modules/_io/_iomodule.h | 7 ++++++- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/Doc/library/io.rst b/Doc/library/io.rst --- a/Doc/library/io.rst +++ b/Doc/library/io.rst @@ -889,10 +889,16 @@ An in-memory stream for text I/O. The text buffer is discarded when the :meth:`~IOBase.close` method is called. - The initial value of the buffer (an empty string by default) can be set by - providing *initial_value*. The *newline* argument works like that of - :class:`TextIOWrapper`. The default is to consider only ``\n`` characters - as end of lines and to do no newline translation. + The initial value of the buffer can be set by providing *initial_value*. + If newline translation is enabled, newlines will be encoded as if by + :meth:`~TextIOBase.write`. The stream is positioned at the start of + the buffer. + + The *newline* argument works like that of :class:`TextIOWrapper`. + The default is to consider only ``\n`` characters as ends of lines and + to do no newline translation. If *newline* is set to ``None``, + newlines are written as ``\n`` on all platforms, but universal + newline decoding is still performed when reading. :class:`StringIO` provides this method in addition to those from :class:`TextIOBase` and its parents: @@ -900,6 +906,8 @@ .. method:: getvalue() Return a ``str`` containing the entire contents of the buffer. + Newlines are decoded as if by :meth:`~TextIOBase.read`, although + the stream position is not changed. Example usage:: diff --git a/Modules/_io/_iomodule.h b/Modules/_io/_iomodule.h --- a/Modules/_io/_iomodule.h +++ b/Modules/_io/_iomodule.h @@ -52,7 +52,12 @@ which can be safely put aside until another search. NOTE: for performance reasons, `end` must point to a NUL character ('\0'). - Otherwise, the function will scan further and return garbage. */ + Otherwise, the function will scan further and return garbage. + + There are three modes, in order of priority: + * translated: Only find \n (assume newlines already translated) + * universal: Use universal newlines algorithm + * Otherwise, the line ending is specified by readnl, a str object */ extern Py_ssize_t _PyIO_find_line_ending( int translated, int universal, PyObject *readnl, int kind, char *start, char *end, Py_ssize_t *consumed); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 12:31:26 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 10:31:26 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzIyNDEz?= =?utf-8?q?=3A_Remove_comment_made_out_of_date_by_Argument_Clinic?= Message-ID: <20151010103126.97712.24632@psf.io> https://hg.python.org/cpython/rev/46df76819b79 changeset: 98640:46df76819b79 branch: 3.5 user: Martin Panter date: Sat Oct 10 10:17:57 2015 +0000 summary: Issue #22413: Remove comment made out of date by Argument Clinic files: Modules/_io/stringio.c | 6 ++---- 1 files changed, 2 insertions(+), 4 deletions(-) diff --git a/Modules/_io/stringio.c b/Modules/_io/stringio.c --- a/Modules/_io/stringio.c +++ b/Modules/_io/stringio.c @@ -696,10 +696,8 @@ char *newline = "\n"; Py_ssize_t value_len; - /* Parse the newline argument. This used to be done with the 'z' - specifier, however this allowed any object with the buffer interface to - be converted. Thus we have to parse it manually since we only want to - allow unicode objects or None. */ + /* Parse the newline argument. We only want to allow unicode objects or + None. */ if (newline_obj == Py_None) { newline = NULL; } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 12:31:27 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 10:31:27 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2322413=3A_Merge_StringIO_doc_from_3=2E5?= Message-ID: <20151010103126.3289.43200@psf.io> https://hg.python.org/cpython/rev/c12d3f941731 changeset: 98641:c12d3f941731 parent: 98635:cb574ee7231e parent: 98640:46df76819b79 user: Martin Panter date: Sat Oct 10 10:20:25 2015 +0000 summary: Issue #22413: Merge StringIO doc from 3.5 files: Doc/library/io.rst | 16 ++++++++++++---- Modules/_io/_iomodule.h | 7 ++++++- Modules/_io/stringio.c | 6 ++---- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/Doc/library/io.rst b/Doc/library/io.rst --- a/Doc/library/io.rst +++ b/Doc/library/io.rst @@ -889,10 +889,16 @@ An in-memory stream for text I/O. The text buffer is discarded when the :meth:`~IOBase.close` method is called. - The initial value of the buffer (an empty string by default) can be set by - providing *initial_value*. The *newline* argument works like that of - :class:`TextIOWrapper`. The default is to consider only ``\n`` characters - as end of lines and to do no newline translation. + The initial value of the buffer can be set by providing *initial_value*. + If newline translation is enabled, newlines will be encoded as if by + :meth:`~TextIOBase.write`. The stream is positioned at the start of + the buffer. + + The *newline* argument works like that of :class:`TextIOWrapper`. + The default is to consider only ``\n`` characters as ends of lines and + to do no newline translation. If *newline* is set to ``None``, + newlines are written as ``\n`` on all platforms, but universal + newline decoding is still performed when reading. :class:`StringIO` provides this method in addition to those from :class:`TextIOBase` and its parents: @@ -900,6 +906,8 @@ .. method:: getvalue() Return a ``str`` containing the entire contents of the buffer. + Newlines are decoded as if by :meth:`~TextIOBase.read`, although + the stream position is not changed. Example usage:: diff --git a/Modules/_io/_iomodule.h b/Modules/_io/_iomodule.h --- a/Modules/_io/_iomodule.h +++ b/Modules/_io/_iomodule.h @@ -52,7 +52,12 @@ which can be safely put aside until another search. NOTE: for performance reasons, `end` must point to a NUL character ('\0'). - Otherwise, the function will scan further and return garbage. */ + Otherwise, the function will scan further and return garbage. + + There are three modes, in order of priority: + * translated: Only find \n (assume newlines already translated) + * universal: Use universal newlines algorithm + * Otherwise, the line ending is specified by readnl, a str object */ extern Py_ssize_t _PyIO_find_line_ending( int translated, int universal, PyObject *readnl, int kind, char *start, char *end, Py_ssize_t *consumed); diff --git a/Modules/_io/stringio.c b/Modules/_io/stringio.c --- a/Modules/_io/stringio.c +++ b/Modules/_io/stringio.c @@ -696,10 +696,8 @@ char *newline = "\n"; Py_ssize_t value_len; - /* Parse the newline argument. This used to be done with the 'z' - specifier, however this allowed any object with the buffer interface to - be converted. Thus we have to parse it manually since we only want to - allow unicode objects or None. */ + /* Parse the newline argument. We only want to allow unicode objects or + None. */ if (newline_obj == Py_None) { newline = NULL; } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 12:53:39 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 10:53:39 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1MTYx?= =?utf-8?q?=3A_Add_full_stops_in_documentation=3B_patch_by_Takase_Arihiro?= Message-ID: <20151010105339.55478.397@psf.io> https://hg.python.org/cpython/rev/4dfead9635e5 changeset: 98642:4dfead9635e5 branch: 3.4 parent: 98638:cba4bf2a1721 user: Martin Panter date: Sat Oct 10 10:36:22 2015 +0000 summary: Issue #25161: Add full stops in documentation; patch by Takase Arihiro files: Doc/c-api/code.rst | 2 +- Doc/c-api/gen.rst | 2 +- Doc/c-api/veryhigh.rst | 2 +- Doc/distutils/apiref.rst | 2 +- Doc/glossary.rst | 2 +- Doc/library/2to3.rst | 2 +- Doc/library/argparse.rst | 2 +- Doc/library/asyncio-protocol.rst | 2 +- Doc/library/bdb.rst | 2 +- Doc/library/contextlib.rst | 2 +- Doc/library/decimal.rst | 2 +- Doc/library/email.charset.rst | 2 +- Doc/library/enum.rst | 2 +- Doc/library/exceptions.rst | 2 +- Doc/library/fractions.rst | 2 +- Doc/library/http.cookiejar.rst | 2 +- Doc/library/http.cookies.rst | 2 +- Doc/library/importlib.rst | 2 +- Doc/library/json.rst | 2 +- Doc/library/locale.rst | 2 +- Doc/library/logging.handlers.rst | 2 +- Doc/library/modulefinder.rst | 2 +- Doc/library/msvcrt.rst | 2 +- Doc/library/multiprocessing.rst | 6 +++--- Doc/library/pickle.rst | 2 +- Doc/library/platform.rst | 2 +- Doc/library/plistlib.rst | 2 +- Doc/library/shutil.rst | 2 +- Doc/library/sys.rst | 2 +- Doc/library/tkinter.ttk.rst | 5 +++-- Doc/library/urllib.request.rst | 2 +- Doc/library/wsgiref.rst | 2 +- Doc/library/xml.dom.minidom.rst | 2 +- Doc/library/xml.dom.rst | 2 +- Doc/library/xml.etree.elementtree.rst | 2 +- Doc/reference/datamodel.rst | 2 +- Doc/reference/expressions.rst | 2 +- Misc/ACKS | 1 + 38 files changed, 42 insertions(+), 40 deletions(-) diff --git a/Doc/c-api/code.rst b/Doc/c-api/code.rst --- a/Doc/c-api/code.rst +++ b/Doc/c-api/code.rst @@ -29,7 +29,7 @@ .. c:function:: int PyCode_Check(PyObject *co) - Return true if *co* is a :class:`code` object + Return true if *co* is a :class:`code` object. .. c:function:: int PyCode_GetNumFree(PyCodeObject *co) diff --git a/Doc/c-api/gen.rst b/Doc/c-api/gen.rst --- a/Doc/c-api/gen.rst +++ b/Doc/c-api/gen.rst @@ -17,7 +17,7 @@ .. c:var:: PyTypeObject PyGen_Type - The type object corresponding to generator objects + The type object corresponding to generator objects. .. c:function:: int PyGen_Check(ob) diff --git a/Doc/c-api/veryhigh.rst b/Doc/c-api/veryhigh.rst --- a/Doc/c-api/veryhigh.rst +++ b/Doc/c-api/veryhigh.rst @@ -201,7 +201,7 @@ .. c:function:: struct _node* PyParser_SimpleParseFile(FILE *fp, const char *filename, int start) This is a simplified interface to :c:func:`PyParser_SimpleParseFileFlags` below, - leaving *flags* set to ``0`` + leaving *flags* set to ``0``. .. c:function:: struct _node* PyParser_SimpleParseFileFlags(FILE *fp, const char *filename, int start, int flags) diff --git a/Doc/distutils/apiref.rst b/Doc/distutils/apiref.rst --- a/Doc/distutils/apiref.rst +++ b/Doc/distutils/apiref.rst @@ -920,7 +920,7 @@ Walk two filename lists in parallel, testing if each source is newer than its corresponding target. Return a pair of lists (*sources*, *targets*) where - source is newer than target, according to the semantics of :func:`newer` + source is newer than target, according to the semantics of :func:`newer`. .. % % equivalent to a listcomp... diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -867,7 +867,7 @@ without interfering with the behaviour of other Python applications running on the same system. - See also :ref:`scripts-pyvenv` + See also :ref:`scripts-pyvenv`. virtual machine A computer defined entirely in software. Python's virtual machine diff --git a/Doc/library/2to3.rst b/Doc/library/2to3.rst --- a/Doc/library/2to3.rst +++ b/Doc/library/2to3.rst @@ -271,7 +271,7 @@ .. 2to3fixer:: input - Converts ``input(prompt)`` to ``eval(input(prompt))`` + Converts ``input(prompt)`` to ``eval(input(prompt))``. .. 2to3fixer:: intern diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -1984,4 +1984,4 @@ ``%(default)s`` and ``%(prog)s``. * Replace the OptionParser constructor ``version`` argument with a call to - ``parser.add_argument('--version', action='version', version='')`` + ``parser.add_argument('--version', action='version', version='')``. diff --git a/Doc/library/asyncio-protocol.rst b/Doc/library/asyncio-protocol.rst --- a/Doc/library/asyncio-protocol.rst +++ b/Doc/library/asyncio-protocol.rst @@ -232,7 +232,7 @@ .. method:: kill(self) - Kill the subprocess, as in :meth:`subprocess.Popen.kill` + Kill the subprocess, as in :meth:`subprocess.Popen.kill`. On POSIX systems, the function sends SIGKILL to the subprocess. On Windows, this method is an alias for :meth:`terminate`. diff --git a/Doc/library/bdb.rst b/Doc/library/bdb.rst --- a/Doc/library/bdb.rst +++ b/Doc/library/bdb.rst @@ -231,7 +231,7 @@ .. method:: set_until(frame) Stop when the line with the line no greater than the current one is - reached or when returning from current frame + reached or when returning from current frame. .. method:: set_trace([frame]) diff --git a/Doc/library/contextlib.rst b/Doc/library/contextlib.rst --- a/Doc/library/contextlib.rst +++ b/Doc/library/contextlib.rst @@ -543,7 +543,7 @@ Due to the way the decorator protocol works, a callback function declared this way cannot take any parameters. Instead, any resources to -be released must be accessed as closure variables +be released must be accessed as closure variables. Using a context manager as a function decorator diff --git a/Doc/library/decimal.rst b/Doc/library/decimal.rst --- a/Doc/library/decimal.rst +++ b/Doc/library/decimal.rst @@ -845,7 +845,7 @@ Engineering notation has an exponent which is a multiple of 3, so there are up to 3 digits left of the decimal place. For example, converts - ``Decimal('123E+1')`` to ``Decimal('1.23E+3')`` + ``Decimal('123E+1')`` to ``Decimal('1.23E+3')``. .. method:: to_integral(rounding=None, context=None) diff --git a/Doc/library/email.charset.rst b/Doc/library/email.charset.rst --- a/Doc/library/email.charset.rst +++ b/Doc/library/email.charset.rst @@ -234,5 +234,5 @@ *charset* is the canonical name of a character set. *codecname* is the name of a Python codec, as appropriate for the second argument to the :class:`str`'s - :meth:`~str.encode` method + :meth:`~str.encode` method. diff --git a/Doc/library/enum.rst b/Doc/library/enum.rst --- a/Doc/library/enum.rst +++ b/Doc/library/enum.rst @@ -708,7 +708,7 @@ class, such as `list(Color)` or `some_var in Color`. :class:`EnumMeta` is responsible for ensuring that various other methods on the final :class:`Enum` class are correct (such as :meth:`__new__`, :meth:`__getnewargs__`, -:meth:`__str__` and :meth:`__repr__`) +:meth:`__str__` and :meth:`__repr__`). Enum Members (aka instances) diff --git a/Doc/library/exceptions.rst b/Doc/library/exceptions.rst --- a/Doc/library/exceptions.rst +++ b/Doc/library/exceptions.rst @@ -603,7 +603,7 @@ .. exception:: SyntaxWarning - Base class for warnings about dubious syntax + Base class for warnings about dubious syntax. .. exception:: RuntimeWarning diff --git a/Doc/library/fractions.rst b/Doc/library/fractions.rst --- a/Doc/library/fractions.rst +++ b/Doc/library/fractions.rst @@ -97,7 +97,7 @@ This class method constructs a :class:`Fraction` representing the exact value of *flt*, which must be a :class:`float`. Beware that - ``Fraction.from_float(0.3)`` is not the same value as ``Fraction(3, 10)`` + ``Fraction.from_float(0.3)`` is not the same value as ``Fraction(3, 10)``. .. note:: diff --git a/Doc/library/http.cookiejar.rst b/Doc/library/http.cookiejar.rst --- a/Doc/library/http.cookiejar.rst +++ b/Doc/library/http.cookiejar.rst @@ -540,7 +540,7 @@ .. attribute:: DefaultCookiePolicy.strict_ns_unverifiable - apply RFC 2965 rules on unverifiable transactions even to Netscape cookies + Apply RFC 2965 rules on unverifiable transactions even to Netscape cookies. .. attribute:: DefaultCookiePolicy.strict_ns_domain diff --git a/Doc/library/http.cookies.rst b/Doc/library/http.cookies.rst --- a/Doc/library/http.cookies.rst +++ b/Doc/library/http.cookies.rst @@ -84,7 +84,7 @@ Return an encoded value. *val* can be any type, but return value must be a string. This method does nothing in :class:`BaseCookie` --- it exists so it can - be overridden + be overridden. In general, it should be the case that :meth:`value_encode` and :meth:`value_decode` are inverses on the range of *value_decode*. diff --git a/Doc/library/importlib.rst b/Doc/library/importlib.rst --- a/Doc/library/importlib.rst +++ b/Doc/library/importlib.rst @@ -717,7 +717,7 @@ modules recognized by the standard import machinery. This is a helper for code which simply needs to know if a filesystem path potentially refers to a module without needing any details on the kind - of module (for example, :func:`inspect.getmodulename`) + of module (for example, :func:`inspect.getmodulename`). .. versionadded:: 3.3 diff --git a/Doc/library/json.rst b/Doc/library/json.rst --- a/Doc/library/json.rst +++ b/Doc/library/json.rst @@ -337,7 +337,7 @@ .. method:: decode(s) Return the Python representation of *s* (a :class:`str` instance - containing a JSON document) + containing a JSON document). .. method:: raw_decode(s) diff --git a/Doc/library/locale.rst b/Doc/library/locale.rst --- a/Doc/library/locale.rst +++ b/Doc/library/locale.rst @@ -204,7 +204,7 @@ .. data:: RADIXCHAR - Get the radix character (decimal dot, decimal comma, etc.) + Get the radix character (decimal dot, decimal comma, etc.). .. data:: THOUSEP diff --git a/Doc/library/logging.handlers.rst b/Doc/library/logging.handlers.rst --- a/Doc/library/logging.handlers.rst +++ b/Doc/library/logging.handlers.rst @@ -230,7 +230,7 @@ renamed to the destination. :param source: The source filename. This is normally the base - filename, e.g. 'test.log' + filename, e.g. 'test.log'. :param dest: The destination filename. This is normally what the source is rotated to, e.g. 'test.log.1'. diff --git a/Doc/library/modulefinder.rst b/Doc/library/modulefinder.rst --- a/Doc/library/modulefinder.rst +++ b/Doc/library/modulefinder.rst @@ -53,7 +53,7 @@ .. attribute:: modules A dictionary mapping module names to modules. See - :ref:`modulefinder-example` + :ref:`modulefinder-example`. .. _modulefinder-example: diff --git a/Doc/library/msvcrt.rst b/Doc/library/msvcrt.rst --- a/Doc/library/msvcrt.rst +++ b/Doc/library/msvcrt.rst @@ -18,7 +18,7 @@ The module implements both the normal and wide char variants of the console I/O api. The normal API deals only with ASCII characters and is of limited use for internationalized applications. The wide char API should be used where -ever possible +ever possible. .. versionchanged:: 3.3 Operations in this module now raise :exc:`OSError` where :exc:`IOError` diff --git a/Doc/library/multiprocessing.rst b/Doc/library/multiprocessing.rst --- a/Doc/library/multiprocessing.rst +++ b/Doc/library/multiprocessing.rst @@ -1936,7 +1936,7 @@ raised by :meth:`_callmethod`. Note in particular that an exception will be raised if *methodname* has - not been *exposed* + not been *exposed*. An example of the usage of :meth:`_callmethod`: @@ -2042,7 +2042,7 @@ If *callback* is specified then it should be a callable which accepts a single argument. When the result becomes ready *callback* is applied to it, that is unless the call failed, in which case the *error_callback* - is applied instead + is applied instead. If *error_callback* is specified then it should be a callable which accepts a single argument. If the target function fails, then @@ -2067,7 +2067,7 @@ If *callback* is specified then it should be a callable which accepts a single argument. When the result becomes ready *callback* is applied to it, that is unless the call failed, in which case the *error_callback* - is applied instead + is applied instead. If *error_callback* is specified then it should be a callable which accepts a single argument. If the target function fails, then diff --git a/Doc/library/pickle.rst b/Doc/library/pickle.rst --- a/Doc/library/pickle.rst +++ b/Doc/library/pickle.rst @@ -235,7 +235,7 @@ .. function:: loads(bytes_object, \*, fix_imports=True, encoding="ASCII", errors="strict") Read a pickled object hierarchy from a :class:`bytes` object and return the - reconstituted object hierarchy specified therein + reconstituted object hierarchy specified therein. The protocol version of the pickle is detected automatically, so no protocol argument is needed. Bytes past the pickled object's diff --git a/Doc/library/platform.rst b/Doc/library/platform.rst --- a/Doc/library/platform.rst +++ b/Doc/library/platform.rst @@ -117,7 +117,7 @@ .. function:: python_version() - Returns the Python version as string ``'major.minor.patchlevel'`` + Returns the Python version as string ``'major.minor.patchlevel'``. Note that unlike the Python ``sys.version``, the returned value will always include the patchlevel (it defaults to 0). diff --git a/Doc/library/plistlib.rst b/Doc/library/plistlib.rst --- a/Doc/library/plistlib.rst +++ b/Doc/library/plistlib.rst @@ -194,7 +194,7 @@ It has one attribute, :attr:`data`, that can be used to retrieve the Python bytes object stored in it. - .. deprecated:: 3.4 Use a :class:`bytes` object instead + .. deprecated:: 3.4 Use a :class:`bytes` object instead. The following constants are available: diff --git a/Doc/library/shutil.rst b/Doc/library/shutil.rst --- a/Doc/library/shutil.rst +++ b/Doc/library/shutil.rst @@ -461,7 +461,7 @@ .. function:: get_archive_formats() Return a list of supported formats for archiving. - Each element of the returned sequence is a tuple ``(name, description)`` + Each element of the returned sequence is a tuple ``(name, description)``. By default :mod:`shutil` provides these formats: diff --git a/Doc/library/sys.rst b/Doc/library/sys.rst --- a/Doc/library/sys.rst +++ b/Doc/library/sys.rst @@ -629,7 +629,7 @@ :term:`struct sequence` :data:`sys.version_info` may be used for a more human-friendly encoding of the same information. - More details of ``hexversion`` can be found at :ref:`apiabiversion` + More details of ``hexversion`` can be found at :ref:`apiabiversion`. .. data:: implementation diff --git a/Doc/library/tkinter.ttk.rst b/Doc/library/tkinter.ttk.rst --- a/Doc/library/tkinter.ttk.rst +++ b/Doc/library/tkinter.ttk.rst @@ -110,8 +110,9 @@ | class | Specifies the window class. The class is used when querying | | | the option database for the window's other options, to | | | determine the default bindtags for the window, and to select | - | | the widget's default layout and style. This is a read-only | - | | which may only be specified when the window is created | + | | the widget's default layout and style. This option is | + | | read-only, and may only be specified when the window is | + | | created. | +-----------+--------------------------------------------------------------+ | cursor | Specifies the mouse cursor to be used for the widget. If set | | | to the empty string (the default), the cursor is inherited | diff --git a/Doc/library/urllib.request.rst b/Doc/library/urllib.request.rst --- a/Doc/library/urllib.request.rst +++ b/Doc/library/urllib.request.rst @@ -201,7 +201,7 @@ ``"Python-urllib/2.6"`` (on Python 2.6). An example of using ``Content-Type`` header with *data* argument would be - sending a dictionary like ``{"Content-Type":" application/x-www-form-urlencoded;charset=utf-8"}`` + sending a dictionary like ``{"Content-Type":" application/x-www-form-urlencoded;charset=utf-8"}``. The final two arguments are only of interest for correct handling of third-party HTTP cookies: diff --git a/Doc/library/wsgiref.rst b/Doc/library/wsgiref.rst --- a/Doc/library/wsgiref.rst +++ b/Doc/library/wsgiref.rst @@ -501,7 +501,7 @@ Similar to :class:`BaseCGIHandler`, but designed for use with HTTP origin servers. If you are writing an HTTP server implementation, you will probably - want to subclass this instead of :class:`BaseCGIHandler` + want to subclass this instead of :class:`BaseCGIHandler`. This class is a subclass of :class:`BaseHandler`. It overrides the :meth:`__init__`, :meth:`get_stdin`, :meth:`get_stderr`, :meth:`add_cgi_vars`, diff --git a/Doc/library/xml.dom.minidom.rst b/Doc/library/xml.dom.minidom.rst --- a/Doc/library/xml.dom.minidom.rst +++ b/Doc/library/xml.dom.minidom.rst @@ -15,7 +15,7 @@ Model interface, with an API similar to that in other languages. It is intended to be simpler than the full DOM and also significantly smaller. Users who are not already proficient with the DOM should consider using the -:mod:`xml.etree.ElementTree` module for their XML processing instead +:mod:`xml.etree.ElementTree` module for their XML processing instead. .. warning:: diff --git a/Doc/library/xml.dom.rst b/Doc/library/xml.dom.rst --- a/Doc/library/xml.dom.rst +++ b/Doc/library/xml.dom.rst @@ -304,7 +304,7 @@ .. attribute:: Node.prefix The part of the :attr:`tagName` preceding the colon if there is one, else the - empty string. The value is a string, or ``None`` + empty string. The value is a string, or ``None``. .. attribute:: Node.namespaceURI diff --git a/Doc/library/xml.etree.elementtree.rst b/Doc/library/xml.etree.elementtree.rst --- a/Doc/library/xml.etree.elementtree.rst +++ b/Doc/library/xml.etree.elementtree.rst @@ -891,7 +891,7 @@ Creates and returns a tree iterator for the root element. The iterator loops over all elements in this tree, in section order. *tag* is the tag - to look for (default is to return all elements) + to look for (default is to return all elements). .. method:: iterfind(match, namespaces=None) diff --git a/Doc/reference/datamodel.rst b/Doc/reference/datamodel.rst --- a/Doc/reference/datamodel.rst +++ b/Doc/reference/datamodel.rst @@ -1310,7 +1310,7 @@ object's :meth:`__hash__` must interoperate on builds of different bit sizes, be sure to check the width on all supported builds. An easy way to do this is with - ``python -c "import sys; print(sys.hash_info.width)"`` + ``python -c "import sys; print(sys.hash_info.width)"``. If a class does not define an :meth:`__eq__` method it should not define a :meth:`__hash__` operation either; if it defines :meth:`__eq__` but not diff --git a/Doc/reference/expressions.rst b/Doc/reference/expressions.rst --- a/Doc/reference/expressions.rst +++ b/Doc/reference/expressions.rst @@ -371,7 +371,7 @@ (by returning a value from the sub-generator). .. versionchanged:: 3.3 - Added ``yield from `` to delegate control flow to a subiterator + Added ``yield from `` to delegate control flow to a subiterator. The parentheses may be omitted when the yield expression is the sole expression on the right hand side of an assignment statement. diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1367,6 +1367,7 @@ Amir Szekely Arfrever Frehtes Taifersar Arahesis Hideaki Takahashi +Takase Arihiro Indra Talip Neil Tallim Geoff Talvola -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 12:53:40 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 10:53:40 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325161=3A_Merge_full_stops_from_3=2E5?= Message-ID: <20151010105340.475.80207@psf.io> https://hg.python.org/cpython/rev/fe87a6f9caa7 changeset: 98644:fe87a6f9caa7 parent: 98641:c12d3f941731 parent: 98643:be34d96e2184 user: Martin Panter date: Sat Oct 10 10:45:00 2015 +0000 summary: Issue #25161: Merge full stops from 3.5 files: Doc/c-api/code.rst | 2 +- Doc/c-api/gen.rst | 2 +- Doc/c-api/veryhigh.rst | 2 +- Doc/distutils/apiref.rst | 2 +- Doc/glossary.rst | 2 +- Doc/library/2to3.rst | 2 +- Doc/library/argparse.rst | 2 +- Doc/library/asyncio-protocol.rst | 2 +- Doc/library/bdb.rst | 2 +- Doc/library/contextlib.rst | 2 +- Doc/library/decimal.rst | 2 +- Doc/library/email.charset.rst | 2 +- Doc/library/enum.rst | 2 +- Doc/library/exceptions.rst | 2 +- Doc/library/fractions.rst | 2 +- Doc/library/http.cookiejar.rst | 2 +- Doc/library/http.cookies.rst | 2 +- Doc/library/importlib.rst | 2 +- Doc/library/json.rst | 2 +- Doc/library/locale.rst | 2 +- Doc/library/logging.handlers.rst | 2 +- Doc/library/modulefinder.rst | 2 +- Doc/library/msvcrt.rst | 2 +- Doc/library/multiprocessing.rst | 6 +++--- Doc/library/pickle.rst | 2 +- Doc/library/platform.rst | 2 +- Doc/library/plistlib.rst | 2 +- Doc/library/shutil.rst | 2 +- Doc/library/sys.rst | 2 +- Doc/library/tkinter.ttk.rst | 5 +++-- Doc/library/urllib.request.rst | 2 +- Doc/library/wsgiref.rst | 2 +- Doc/library/xml.dom.minidom.rst | 2 +- Doc/library/xml.dom.rst | 2 +- Doc/library/xml.etree.elementtree.rst | 2 +- Doc/reference/datamodel.rst | 2 +- Doc/reference/expressions.rst | 2 +- Misc/ACKS | 1 + 38 files changed, 42 insertions(+), 40 deletions(-) diff --git a/Doc/c-api/code.rst b/Doc/c-api/code.rst --- a/Doc/c-api/code.rst +++ b/Doc/c-api/code.rst @@ -29,7 +29,7 @@ .. c:function:: int PyCode_Check(PyObject *co) - Return true if *co* is a :class:`code` object + Return true if *co* is a :class:`code` object. .. c:function:: int PyCode_GetNumFree(PyCodeObject *co) diff --git a/Doc/c-api/gen.rst b/Doc/c-api/gen.rst --- a/Doc/c-api/gen.rst +++ b/Doc/c-api/gen.rst @@ -17,7 +17,7 @@ .. c:var:: PyTypeObject PyGen_Type - The type object corresponding to generator objects + The type object corresponding to generator objects. .. c:function:: int PyGen_Check(PyObject *ob) diff --git a/Doc/c-api/veryhigh.rst b/Doc/c-api/veryhigh.rst --- a/Doc/c-api/veryhigh.rst +++ b/Doc/c-api/veryhigh.rst @@ -201,7 +201,7 @@ .. c:function:: struct _node* PyParser_SimpleParseFile(FILE *fp, const char *filename, int start) This is a simplified interface to :c:func:`PyParser_SimpleParseFileFlags` below, - leaving *flags* set to ``0`` + leaving *flags* set to ``0``. .. c:function:: struct _node* PyParser_SimpleParseFileFlags(FILE *fp, const char *filename, int start, int flags) diff --git a/Doc/distutils/apiref.rst b/Doc/distutils/apiref.rst --- a/Doc/distutils/apiref.rst +++ b/Doc/distutils/apiref.rst @@ -928,7 +928,7 @@ Walk two filename lists in parallel, testing if each source is newer than its corresponding target. Return a pair of lists (*sources*, *targets*) where - source is newer than target, according to the semantics of :func:`newer` + source is newer than target, according to the semantics of :func:`newer`. .. % % equivalent to a listcomp... diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -948,7 +948,7 @@ without interfering with the behaviour of other Python applications running on the same system. - See also :ref:`scripts-pyvenv` + See also :ref:`scripts-pyvenv`. virtual machine A computer defined entirely in software. Python's virtual machine diff --git a/Doc/library/2to3.rst b/Doc/library/2to3.rst --- a/Doc/library/2to3.rst +++ b/Doc/library/2to3.rst @@ -271,7 +271,7 @@ .. 2to3fixer:: input - Converts ``input(prompt)`` to ``eval(input(prompt))`` + Converts ``input(prompt)`` to ``eval(input(prompt))``. .. 2to3fixer:: intern diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -2011,4 +2011,4 @@ ``%(default)s`` and ``%(prog)s``. * Replace the OptionParser constructor ``version`` argument with a call to - ``parser.add_argument('--version', action='version', version='')`` + ``parser.add_argument('--version', action='version', version='')``. diff --git a/Doc/library/asyncio-protocol.rst b/Doc/library/asyncio-protocol.rst --- a/Doc/library/asyncio-protocol.rst +++ b/Doc/library/asyncio-protocol.rst @@ -232,7 +232,7 @@ .. method:: kill(self) - Kill the subprocess, as in :meth:`subprocess.Popen.kill` + Kill the subprocess, as in :meth:`subprocess.Popen.kill`. On POSIX systems, the function sends SIGKILL to the subprocess. On Windows, this method is an alias for :meth:`terminate`. diff --git a/Doc/library/bdb.rst b/Doc/library/bdb.rst --- a/Doc/library/bdb.rst +++ b/Doc/library/bdb.rst @@ -231,7 +231,7 @@ .. method:: set_until(frame) Stop when the line with the line no greater than the current one is - reached or when returning from current frame + reached or when returning from current frame. .. method:: set_trace([frame]) diff --git a/Doc/library/contextlib.rst b/Doc/library/contextlib.rst --- a/Doc/library/contextlib.rst +++ b/Doc/library/contextlib.rst @@ -553,7 +553,7 @@ Due to the way the decorator protocol works, a callback function declared this way cannot take any parameters. Instead, any resources to -be released must be accessed as closure variables +be released must be accessed as closure variables. Using a context manager as a function decorator diff --git a/Doc/library/decimal.rst b/Doc/library/decimal.rst --- a/Doc/library/decimal.rst +++ b/Doc/library/decimal.rst @@ -841,7 +841,7 @@ Engineering notation has an exponent which is a multiple of 3, so there are up to 3 digits left of the decimal place. For example, converts - ``Decimal('123E+1')`` to ``Decimal('1.23E+3')`` + ``Decimal('123E+1')`` to ``Decimal('1.23E+3')``. .. method:: to_integral(rounding=None, context=None) diff --git a/Doc/library/email.charset.rst b/Doc/library/email.charset.rst --- a/Doc/library/email.charset.rst +++ b/Doc/library/email.charset.rst @@ -234,5 +234,5 @@ *charset* is the canonical name of a character set. *codecname* is the name of a Python codec, as appropriate for the second argument to the :class:`str`'s - :meth:`~str.encode` method + :meth:`~str.encode` method. diff --git a/Doc/library/enum.rst b/Doc/library/enum.rst --- a/Doc/library/enum.rst +++ b/Doc/library/enum.rst @@ -714,7 +714,7 @@ class, such as `list(Color)` or `some_var in Color`. :class:`EnumMeta` is responsible for ensuring that various other methods on the final :class:`Enum` class are correct (such as :meth:`__new__`, :meth:`__getnewargs__`, -:meth:`__str__` and :meth:`__repr__`) +:meth:`__str__` and :meth:`__repr__`). Enum Members (aka instances) diff --git a/Doc/library/exceptions.rst b/Doc/library/exceptions.rst --- a/Doc/library/exceptions.rst +++ b/Doc/library/exceptions.rst @@ -635,7 +635,7 @@ .. exception:: SyntaxWarning - Base class for warnings about dubious syntax + Base class for warnings about dubious syntax. .. exception:: RuntimeWarning diff --git a/Doc/library/fractions.rst b/Doc/library/fractions.rst --- a/Doc/library/fractions.rst +++ b/Doc/library/fractions.rst @@ -97,7 +97,7 @@ This class method constructs a :class:`Fraction` representing the exact value of *flt*, which must be a :class:`float`. Beware that - ``Fraction.from_float(0.3)`` is not the same value as ``Fraction(3, 10)`` + ``Fraction.from_float(0.3)`` is not the same value as ``Fraction(3, 10)``. .. note:: diff --git a/Doc/library/http.cookiejar.rst b/Doc/library/http.cookiejar.rst --- a/Doc/library/http.cookiejar.rst +++ b/Doc/library/http.cookiejar.rst @@ -540,7 +540,7 @@ .. attribute:: DefaultCookiePolicy.strict_ns_unverifiable - apply RFC 2965 rules on unverifiable transactions even to Netscape cookies + Apply RFC 2965 rules on unverifiable transactions even to Netscape cookies. .. attribute:: DefaultCookiePolicy.strict_ns_domain diff --git a/Doc/library/http.cookies.rst b/Doc/library/http.cookies.rst --- a/Doc/library/http.cookies.rst +++ b/Doc/library/http.cookies.rst @@ -84,7 +84,7 @@ Return an encoded value. *val* can be any type, but return value must be a string. This method does nothing in :class:`BaseCookie` --- it exists so it can - be overridden + be overridden. In general, it should be the case that :meth:`value_encode` and :meth:`value_decode` are inverses on the range of *value_decode*. diff --git a/Doc/library/importlib.rst b/Doc/library/importlib.rst --- a/Doc/library/importlib.rst +++ b/Doc/library/importlib.rst @@ -747,7 +747,7 @@ modules recognized by the standard import machinery. This is a helper for code which simply needs to know if a filesystem path potentially refers to a module without needing any details on the kind - of module (for example, :func:`inspect.getmodulename`) + of module (for example, :func:`inspect.getmodulename`). .. versionadded:: 3.3 diff --git a/Doc/library/json.rst b/Doc/library/json.rst --- a/Doc/library/json.rst +++ b/Doc/library/json.rst @@ -339,7 +339,7 @@ .. method:: decode(s) Return the Python representation of *s* (a :class:`str` instance - containing a JSON document) + containing a JSON document). :exc:`JSONDecodeError` will be raised if the given JSON document is not valid. diff --git a/Doc/library/locale.rst b/Doc/library/locale.rst --- a/Doc/library/locale.rst +++ b/Doc/library/locale.rst @@ -204,7 +204,7 @@ .. data:: RADIXCHAR - Get the radix character (decimal dot, decimal comma, etc.) + Get the radix character (decimal dot, decimal comma, etc.). .. data:: THOUSEP diff --git a/Doc/library/logging.handlers.rst b/Doc/library/logging.handlers.rst --- a/Doc/library/logging.handlers.rst +++ b/Doc/library/logging.handlers.rst @@ -238,7 +238,7 @@ renamed to the destination. :param source: The source filename. This is normally the base - filename, e.g. 'test.log' + filename, e.g. 'test.log'. :param dest: The destination filename. This is normally what the source is rotated to, e.g. 'test.log.1'. diff --git a/Doc/library/modulefinder.rst b/Doc/library/modulefinder.rst --- a/Doc/library/modulefinder.rst +++ b/Doc/library/modulefinder.rst @@ -53,7 +53,7 @@ .. attribute:: modules A dictionary mapping module names to modules. See - :ref:`modulefinder-example` + :ref:`modulefinder-example`. .. _modulefinder-example: diff --git a/Doc/library/msvcrt.rst b/Doc/library/msvcrt.rst --- a/Doc/library/msvcrt.rst +++ b/Doc/library/msvcrt.rst @@ -18,7 +18,7 @@ The module implements both the normal and wide char variants of the console I/O api. The normal API deals only with ASCII characters and is of limited use for internationalized applications. The wide char API should be used where -ever possible +ever possible. .. versionchanged:: 3.3 Operations in this module now raise :exc:`OSError` where :exc:`IOError` diff --git a/Doc/library/multiprocessing.rst b/Doc/library/multiprocessing.rst --- a/Doc/library/multiprocessing.rst +++ b/Doc/library/multiprocessing.rst @@ -1944,7 +1944,7 @@ raised by :meth:`_callmethod`. Note in particular that an exception will be raised if *methodname* has - not been *exposed* + not been *exposed*. An example of the usage of :meth:`_callmethod`: @@ -2050,7 +2050,7 @@ If *callback* is specified then it should be a callable which accepts a single argument. When the result becomes ready *callback* is applied to it, that is unless the call failed, in which case the *error_callback* - is applied instead + is applied instead. If *error_callback* is specified then it should be a callable which accepts a single argument. If the target function fails, then @@ -2075,7 +2075,7 @@ If *callback* is specified then it should be a callable which accepts a single argument. When the result becomes ready *callback* is applied to it, that is unless the call failed, in which case the *error_callback* - is applied instead + is applied instead. If *error_callback* is specified then it should be a callable which accepts a single argument. If the target function fails, then diff --git a/Doc/library/pickle.rst b/Doc/library/pickle.rst --- a/Doc/library/pickle.rst +++ b/Doc/library/pickle.rst @@ -235,7 +235,7 @@ .. function:: loads(bytes_object, \*, fix_imports=True, encoding="ASCII", errors="strict") Read a pickled object hierarchy from a :class:`bytes` object and return the - reconstituted object hierarchy specified therein + reconstituted object hierarchy specified therein. The protocol version of the pickle is detected automatically, so no protocol argument is needed. Bytes past the pickled object's diff --git a/Doc/library/platform.rst b/Doc/library/platform.rst --- a/Doc/library/platform.rst +++ b/Doc/library/platform.rst @@ -117,7 +117,7 @@ .. function:: python_version() - Returns the Python version as string ``'major.minor.patchlevel'`` + Returns the Python version as string ``'major.minor.patchlevel'``. Note that unlike the Python ``sys.version``, the returned value will always include the patchlevel (it defaults to 0). diff --git a/Doc/library/plistlib.rst b/Doc/library/plistlib.rst --- a/Doc/library/plistlib.rst +++ b/Doc/library/plistlib.rst @@ -194,7 +194,7 @@ It has one attribute, :attr:`data`, that can be used to retrieve the Python bytes object stored in it. - .. deprecated:: 3.4 Use a :class:`bytes` object instead + .. deprecated:: 3.4 Use a :class:`bytes` object instead. The following constants are available: diff --git a/Doc/library/shutil.rst b/Doc/library/shutil.rst --- a/Doc/library/shutil.rst +++ b/Doc/library/shutil.rst @@ -497,7 +497,7 @@ .. function:: get_archive_formats() Return a list of supported formats for archiving. - Each element of the returned sequence is a tuple ``(name, description)`` + Each element of the returned sequence is a tuple ``(name, description)``. By default :mod:`shutil` provides these formats: diff --git a/Doc/library/sys.rst b/Doc/library/sys.rst --- a/Doc/library/sys.rst +++ b/Doc/library/sys.rst @@ -641,7 +641,7 @@ :term:`struct sequence` :data:`sys.version_info` may be used for a more human-friendly encoding of the same information. - More details of ``hexversion`` can be found at :ref:`apiabiversion` + More details of ``hexversion`` can be found at :ref:`apiabiversion`. .. data:: implementation diff --git a/Doc/library/tkinter.ttk.rst b/Doc/library/tkinter.ttk.rst --- a/Doc/library/tkinter.ttk.rst +++ b/Doc/library/tkinter.ttk.rst @@ -110,8 +110,9 @@ | class | Specifies the window class. The class is used when querying | | | the option database for the window's other options, to | | | determine the default bindtags for the window, and to select | - | | the widget's default layout and style. This is a read-only | - | | which may only be specified when the window is created | + | | the widget's default layout and style. This option is | + | | read-only, and may only be specified when the window is | + | | created. | +-----------+--------------------------------------------------------------+ | cursor | Specifies the mouse cursor to be used for the widget. If set | | | to the empty string (the default), the cursor is inherited | diff --git a/Doc/library/urllib.request.rst b/Doc/library/urllib.request.rst --- a/Doc/library/urllib.request.rst +++ b/Doc/library/urllib.request.rst @@ -202,7 +202,7 @@ ``"Python-urllib/2.6"`` (on Python 2.6). An example of using ``Content-Type`` header with *data* argument would be - sending a dictionary like ``{"Content-Type":" application/x-www-form-urlencoded;charset=utf-8"}`` + sending a dictionary like ``{"Content-Type":" application/x-www-form-urlencoded;charset=utf-8"}``. The final two arguments are only of interest for correct handling of third-party HTTP cookies: diff --git a/Doc/library/wsgiref.rst b/Doc/library/wsgiref.rst --- a/Doc/library/wsgiref.rst +++ b/Doc/library/wsgiref.rst @@ -506,7 +506,7 @@ Similar to :class:`BaseCGIHandler`, but designed for use with HTTP origin servers. If you are writing an HTTP server implementation, you will probably - want to subclass this instead of :class:`BaseCGIHandler` + want to subclass this instead of :class:`BaseCGIHandler`. This class is a subclass of :class:`BaseHandler`. It overrides the :meth:`__init__`, :meth:`get_stdin`, :meth:`get_stderr`, :meth:`add_cgi_vars`, diff --git a/Doc/library/xml.dom.minidom.rst b/Doc/library/xml.dom.minidom.rst --- a/Doc/library/xml.dom.minidom.rst +++ b/Doc/library/xml.dom.minidom.rst @@ -15,7 +15,7 @@ Model interface, with an API similar to that in other languages. It is intended to be simpler than the full DOM and also significantly smaller. Users who are not already proficient with the DOM should consider using the -:mod:`xml.etree.ElementTree` module for their XML processing instead +:mod:`xml.etree.ElementTree` module for their XML processing instead. .. warning:: diff --git a/Doc/library/xml.dom.rst b/Doc/library/xml.dom.rst --- a/Doc/library/xml.dom.rst +++ b/Doc/library/xml.dom.rst @@ -304,7 +304,7 @@ .. attribute:: Node.prefix The part of the :attr:`tagName` preceding the colon if there is one, else the - empty string. The value is a string, or ``None`` + empty string. The value is a string, or ``None``. .. attribute:: Node.namespaceURI diff --git a/Doc/library/xml.etree.elementtree.rst b/Doc/library/xml.etree.elementtree.rst --- a/Doc/library/xml.etree.elementtree.rst +++ b/Doc/library/xml.etree.elementtree.rst @@ -891,7 +891,7 @@ Creates and returns a tree iterator for the root element. The iterator loops over all elements in this tree, in section order. *tag* is the tag - to look for (default is to return all elements) + to look for (default is to return all elements). .. method:: iterfind(match, namespaces=None) diff --git a/Doc/reference/datamodel.rst b/Doc/reference/datamodel.rst --- a/Doc/reference/datamodel.rst +++ b/Doc/reference/datamodel.rst @@ -1320,7 +1320,7 @@ object's :meth:`__hash__` must interoperate on builds of different bit sizes, be sure to check the width on all supported builds. An easy way to do this is with - ``python -c "import sys; print(sys.hash_info.width)"`` + ``python -c "import sys; print(sys.hash_info.width)"``. If a class does not define an :meth:`__eq__` method it should not define a :meth:`__hash__` operation either; if it defines :meth:`__eq__` but not diff --git a/Doc/reference/expressions.rst b/Doc/reference/expressions.rst --- a/Doc/reference/expressions.rst +++ b/Doc/reference/expressions.rst @@ -371,7 +371,7 @@ (by returning a value from the sub-generator). .. versionchanged:: 3.3 - Added ``yield from `` to delegate control flow to a subiterator + Added ``yield from `` to delegate control flow to a subiterator. The parentheses may be omitted when the yield expression is the sole expression on the right hand side of an assignment statement. diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1408,6 +1408,7 @@ Maciej Szulik Arfrever Frehtes Taifersar Arahesis Hideaki Takahashi +Takase Arihiro Indra Talip Neil Tallim Geoff Talvola -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 12:53:40 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 10:53:40 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325161=3A_Merge_full_stops_from_3=2E4_into_3=2E5?= Message-ID: <20151010105340.55478.20305@psf.io> https://hg.python.org/cpython/rev/be34d96e2184 changeset: 98643:be34d96e2184 branch: 3.5 parent: 98640:46df76819b79 parent: 98642:4dfead9635e5 user: Martin Panter date: Sat Oct 10 10:44:25 2015 +0000 summary: Issue #25161: Merge full stops from 3.4 into 3.5 files: Doc/c-api/code.rst | 2 +- Doc/c-api/gen.rst | 2 +- Doc/c-api/veryhigh.rst | 2 +- Doc/distutils/apiref.rst | 2 +- Doc/glossary.rst | 2 +- Doc/library/2to3.rst | 2 +- Doc/library/argparse.rst | 2 +- Doc/library/asyncio-protocol.rst | 2 +- Doc/library/bdb.rst | 2 +- Doc/library/contextlib.rst | 2 +- Doc/library/decimal.rst | 2 +- Doc/library/email.charset.rst | 2 +- Doc/library/enum.rst | 2 +- Doc/library/exceptions.rst | 2 +- Doc/library/fractions.rst | 2 +- Doc/library/http.cookiejar.rst | 2 +- Doc/library/http.cookies.rst | 2 +- Doc/library/importlib.rst | 2 +- Doc/library/json.rst | 2 +- Doc/library/locale.rst | 2 +- Doc/library/logging.handlers.rst | 2 +- Doc/library/modulefinder.rst | 2 +- Doc/library/msvcrt.rst | 2 +- Doc/library/multiprocessing.rst | 6 +++--- Doc/library/pickle.rst | 2 +- Doc/library/platform.rst | 2 +- Doc/library/plistlib.rst | 2 +- Doc/library/shutil.rst | 2 +- Doc/library/sys.rst | 2 +- Doc/library/tkinter.ttk.rst | 5 +++-- Doc/library/urllib.request.rst | 2 +- Doc/library/wsgiref.rst | 2 +- Doc/library/xml.dom.minidom.rst | 2 +- Doc/library/xml.dom.rst | 2 +- Doc/library/xml.etree.elementtree.rst | 2 +- Doc/reference/datamodel.rst | 2 +- Doc/reference/expressions.rst | 2 +- Misc/ACKS | 1 + 38 files changed, 42 insertions(+), 40 deletions(-) diff --git a/Doc/c-api/code.rst b/Doc/c-api/code.rst --- a/Doc/c-api/code.rst +++ b/Doc/c-api/code.rst @@ -29,7 +29,7 @@ .. c:function:: int PyCode_Check(PyObject *co) - Return true if *co* is a :class:`code` object + Return true if *co* is a :class:`code` object. .. c:function:: int PyCode_GetNumFree(PyCodeObject *co) diff --git a/Doc/c-api/gen.rst b/Doc/c-api/gen.rst --- a/Doc/c-api/gen.rst +++ b/Doc/c-api/gen.rst @@ -17,7 +17,7 @@ .. c:var:: PyTypeObject PyGen_Type - The type object corresponding to generator objects + The type object corresponding to generator objects. .. c:function:: int PyGen_Check(PyObject *ob) diff --git a/Doc/c-api/veryhigh.rst b/Doc/c-api/veryhigh.rst --- a/Doc/c-api/veryhigh.rst +++ b/Doc/c-api/veryhigh.rst @@ -201,7 +201,7 @@ .. c:function:: struct _node* PyParser_SimpleParseFile(FILE *fp, const char *filename, int start) This is a simplified interface to :c:func:`PyParser_SimpleParseFileFlags` below, - leaving *flags* set to ``0`` + leaving *flags* set to ``0``. .. c:function:: struct _node* PyParser_SimpleParseFileFlags(FILE *fp, const char *filename, int start, int flags) diff --git a/Doc/distutils/apiref.rst b/Doc/distutils/apiref.rst --- a/Doc/distutils/apiref.rst +++ b/Doc/distutils/apiref.rst @@ -928,7 +928,7 @@ Walk two filename lists in parallel, testing if each source is newer than its corresponding target. Return a pair of lists (*sources*, *targets*) where - source is newer than target, according to the semantics of :func:`newer` + source is newer than target, according to the semantics of :func:`newer`. .. % % equivalent to a listcomp... diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -948,7 +948,7 @@ without interfering with the behaviour of other Python applications running on the same system. - See also :ref:`scripts-pyvenv` + See also :ref:`scripts-pyvenv`. virtual machine A computer defined entirely in software. Python's virtual machine diff --git a/Doc/library/2to3.rst b/Doc/library/2to3.rst --- a/Doc/library/2to3.rst +++ b/Doc/library/2to3.rst @@ -271,7 +271,7 @@ .. 2to3fixer:: input - Converts ``input(prompt)`` to ``eval(input(prompt))`` + Converts ``input(prompt)`` to ``eval(input(prompt))``. .. 2to3fixer:: intern diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -2011,4 +2011,4 @@ ``%(default)s`` and ``%(prog)s``. * Replace the OptionParser constructor ``version`` argument with a call to - ``parser.add_argument('--version', action='version', version='')`` + ``parser.add_argument('--version', action='version', version='')``. diff --git a/Doc/library/asyncio-protocol.rst b/Doc/library/asyncio-protocol.rst --- a/Doc/library/asyncio-protocol.rst +++ b/Doc/library/asyncio-protocol.rst @@ -232,7 +232,7 @@ .. method:: kill(self) - Kill the subprocess, as in :meth:`subprocess.Popen.kill` + Kill the subprocess, as in :meth:`subprocess.Popen.kill`. On POSIX systems, the function sends SIGKILL to the subprocess. On Windows, this method is an alias for :meth:`terminate`. diff --git a/Doc/library/bdb.rst b/Doc/library/bdb.rst --- a/Doc/library/bdb.rst +++ b/Doc/library/bdb.rst @@ -231,7 +231,7 @@ .. method:: set_until(frame) Stop when the line with the line no greater than the current one is - reached or when returning from current frame + reached or when returning from current frame. .. method:: set_trace([frame]) diff --git a/Doc/library/contextlib.rst b/Doc/library/contextlib.rst --- a/Doc/library/contextlib.rst +++ b/Doc/library/contextlib.rst @@ -553,7 +553,7 @@ Due to the way the decorator protocol works, a callback function declared this way cannot take any parameters. Instead, any resources to -be released must be accessed as closure variables +be released must be accessed as closure variables. Using a context manager as a function decorator diff --git a/Doc/library/decimal.rst b/Doc/library/decimal.rst --- a/Doc/library/decimal.rst +++ b/Doc/library/decimal.rst @@ -841,7 +841,7 @@ Engineering notation has an exponent which is a multiple of 3, so there are up to 3 digits left of the decimal place. For example, converts - ``Decimal('123E+1')`` to ``Decimal('1.23E+3')`` + ``Decimal('123E+1')`` to ``Decimal('1.23E+3')``. .. method:: to_integral(rounding=None, context=None) diff --git a/Doc/library/email.charset.rst b/Doc/library/email.charset.rst --- a/Doc/library/email.charset.rst +++ b/Doc/library/email.charset.rst @@ -234,5 +234,5 @@ *charset* is the canonical name of a character set. *codecname* is the name of a Python codec, as appropriate for the second argument to the :class:`str`'s - :meth:`~str.encode` method + :meth:`~str.encode` method. diff --git a/Doc/library/enum.rst b/Doc/library/enum.rst --- a/Doc/library/enum.rst +++ b/Doc/library/enum.rst @@ -714,7 +714,7 @@ class, such as `list(Color)` or `some_var in Color`. :class:`EnumMeta` is responsible for ensuring that various other methods on the final :class:`Enum` class are correct (such as :meth:`__new__`, :meth:`__getnewargs__`, -:meth:`__str__` and :meth:`__repr__`) +:meth:`__str__` and :meth:`__repr__`). Enum Members (aka instances) diff --git a/Doc/library/exceptions.rst b/Doc/library/exceptions.rst --- a/Doc/library/exceptions.rst +++ b/Doc/library/exceptions.rst @@ -635,7 +635,7 @@ .. exception:: SyntaxWarning - Base class for warnings about dubious syntax + Base class for warnings about dubious syntax. .. exception:: RuntimeWarning diff --git a/Doc/library/fractions.rst b/Doc/library/fractions.rst --- a/Doc/library/fractions.rst +++ b/Doc/library/fractions.rst @@ -97,7 +97,7 @@ This class method constructs a :class:`Fraction` representing the exact value of *flt*, which must be a :class:`float`. Beware that - ``Fraction.from_float(0.3)`` is not the same value as ``Fraction(3, 10)`` + ``Fraction.from_float(0.3)`` is not the same value as ``Fraction(3, 10)``. .. note:: diff --git a/Doc/library/http.cookiejar.rst b/Doc/library/http.cookiejar.rst --- a/Doc/library/http.cookiejar.rst +++ b/Doc/library/http.cookiejar.rst @@ -540,7 +540,7 @@ .. attribute:: DefaultCookiePolicy.strict_ns_unverifiable - apply RFC 2965 rules on unverifiable transactions even to Netscape cookies + Apply RFC 2965 rules on unverifiable transactions even to Netscape cookies. .. attribute:: DefaultCookiePolicy.strict_ns_domain diff --git a/Doc/library/http.cookies.rst b/Doc/library/http.cookies.rst --- a/Doc/library/http.cookies.rst +++ b/Doc/library/http.cookies.rst @@ -84,7 +84,7 @@ Return an encoded value. *val* can be any type, but return value must be a string. This method does nothing in :class:`BaseCookie` --- it exists so it can - be overridden + be overridden. In general, it should be the case that :meth:`value_encode` and :meth:`value_decode` are inverses on the range of *value_decode*. diff --git a/Doc/library/importlib.rst b/Doc/library/importlib.rst --- a/Doc/library/importlib.rst +++ b/Doc/library/importlib.rst @@ -747,7 +747,7 @@ modules recognized by the standard import machinery. This is a helper for code which simply needs to know if a filesystem path potentially refers to a module without needing any details on the kind - of module (for example, :func:`inspect.getmodulename`) + of module (for example, :func:`inspect.getmodulename`). .. versionadded:: 3.3 diff --git a/Doc/library/json.rst b/Doc/library/json.rst --- a/Doc/library/json.rst +++ b/Doc/library/json.rst @@ -339,7 +339,7 @@ .. method:: decode(s) Return the Python representation of *s* (a :class:`str` instance - containing a JSON document) + containing a JSON document). :exc:`JSONDecodeError` will be raised if the given JSON document is not valid. diff --git a/Doc/library/locale.rst b/Doc/library/locale.rst --- a/Doc/library/locale.rst +++ b/Doc/library/locale.rst @@ -204,7 +204,7 @@ .. data:: RADIXCHAR - Get the radix character (decimal dot, decimal comma, etc.) + Get the radix character (decimal dot, decimal comma, etc.). .. data:: THOUSEP diff --git a/Doc/library/logging.handlers.rst b/Doc/library/logging.handlers.rst --- a/Doc/library/logging.handlers.rst +++ b/Doc/library/logging.handlers.rst @@ -230,7 +230,7 @@ renamed to the destination. :param source: The source filename. This is normally the base - filename, e.g. 'test.log' + filename, e.g. 'test.log'. :param dest: The destination filename. This is normally what the source is rotated to, e.g. 'test.log.1'. diff --git a/Doc/library/modulefinder.rst b/Doc/library/modulefinder.rst --- a/Doc/library/modulefinder.rst +++ b/Doc/library/modulefinder.rst @@ -53,7 +53,7 @@ .. attribute:: modules A dictionary mapping module names to modules. See - :ref:`modulefinder-example` + :ref:`modulefinder-example`. .. _modulefinder-example: diff --git a/Doc/library/msvcrt.rst b/Doc/library/msvcrt.rst --- a/Doc/library/msvcrt.rst +++ b/Doc/library/msvcrt.rst @@ -18,7 +18,7 @@ The module implements both the normal and wide char variants of the console I/O api. The normal API deals only with ASCII characters and is of limited use for internationalized applications. The wide char API should be used where -ever possible +ever possible. .. versionchanged:: 3.3 Operations in this module now raise :exc:`OSError` where :exc:`IOError` diff --git a/Doc/library/multiprocessing.rst b/Doc/library/multiprocessing.rst --- a/Doc/library/multiprocessing.rst +++ b/Doc/library/multiprocessing.rst @@ -1939,7 +1939,7 @@ raised by :meth:`_callmethod`. Note in particular that an exception will be raised if *methodname* has - not been *exposed* + not been *exposed*. An example of the usage of :meth:`_callmethod`: @@ -2045,7 +2045,7 @@ If *callback* is specified then it should be a callable which accepts a single argument. When the result becomes ready *callback* is applied to it, that is unless the call failed, in which case the *error_callback* - is applied instead + is applied instead. If *error_callback* is specified then it should be a callable which accepts a single argument. If the target function fails, then @@ -2070,7 +2070,7 @@ If *callback* is specified then it should be a callable which accepts a single argument. When the result becomes ready *callback* is applied to it, that is unless the call failed, in which case the *error_callback* - is applied instead + is applied instead. If *error_callback* is specified then it should be a callable which accepts a single argument. If the target function fails, then diff --git a/Doc/library/pickle.rst b/Doc/library/pickle.rst --- a/Doc/library/pickle.rst +++ b/Doc/library/pickle.rst @@ -235,7 +235,7 @@ .. function:: loads(bytes_object, \*, fix_imports=True, encoding="ASCII", errors="strict") Read a pickled object hierarchy from a :class:`bytes` object and return the - reconstituted object hierarchy specified therein + reconstituted object hierarchy specified therein. The protocol version of the pickle is detected automatically, so no protocol argument is needed. Bytes past the pickled object's diff --git a/Doc/library/platform.rst b/Doc/library/platform.rst --- a/Doc/library/platform.rst +++ b/Doc/library/platform.rst @@ -117,7 +117,7 @@ .. function:: python_version() - Returns the Python version as string ``'major.minor.patchlevel'`` + Returns the Python version as string ``'major.minor.patchlevel'``. Note that unlike the Python ``sys.version``, the returned value will always include the patchlevel (it defaults to 0). diff --git a/Doc/library/plistlib.rst b/Doc/library/plistlib.rst --- a/Doc/library/plistlib.rst +++ b/Doc/library/plistlib.rst @@ -194,7 +194,7 @@ It has one attribute, :attr:`data`, that can be used to retrieve the Python bytes object stored in it. - .. deprecated:: 3.4 Use a :class:`bytes` object instead + .. deprecated:: 3.4 Use a :class:`bytes` object instead. The following constants are available: diff --git a/Doc/library/shutil.rst b/Doc/library/shutil.rst --- a/Doc/library/shutil.rst +++ b/Doc/library/shutil.rst @@ -497,7 +497,7 @@ .. function:: get_archive_formats() Return a list of supported formats for archiving. - Each element of the returned sequence is a tuple ``(name, description)`` + Each element of the returned sequence is a tuple ``(name, description)``. By default :mod:`shutil` provides these formats: diff --git a/Doc/library/sys.rst b/Doc/library/sys.rst --- a/Doc/library/sys.rst +++ b/Doc/library/sys.rst @@ -641,7 +641,7 @@ :term:`struct sequence` :data:`sys.version_info` may be used for a more human-friendly encoding of the same information. - More details of ``hexversion`` can be found at :ref:`apiabiversion` + More details of ``hexversion`` can be found at :ref:`apiabiversion`. .. data:: implementation diff --git a/Doc/library/tkinter.ttk.rst b/Doc/library/tkinter.ttk.rst --- a/Doc/library/tkinter.ttk.rst +++ b/Doc/library/tkinter.ttk.rst @@ -110,8 +110,9 @@ | class | Specifies the window class. The class is used when querying | | | the option database for the window's other options, to | | | determine the default bindtags for the window, and to select | - | | the widget's default layout and style. This is a read-only | - | | which may only be specified when the window is created | + | | the widget's default layout and style. This option is | + | | read-only, and may only be specified when the window is | + | | created. | +-----------+--------------------------------------------------------------+ | cursor | Specifies the mouse cursor to be used for the widget. If set | | | to the empty string (the default), the cursor is inherited | diff --git a/Doc/library/urllib.request.rst b/Doc/library/urllib.request.rst --- a/Doc/library/urllib.request.rst +++ b/Doc/library/urllib.request.rst @@ -202,7 +202,7 @@ ``"Python-urllib/2.6"`` (on Python 2.6). An example of using ``Content-Type`` header with *data* argument would be - sending a dictionary like ``{"Content-Type":" application/x-www-form-urlencoded;charset=utf-8"}`` + sending a dictionary like ``{"Content-Type":" application/x-www-form-urlencoded;charset=utf-8"}``. The final two arguments are only of interest for correct handling of third-party HTTP cookies: diff --git a/Doc/library/wsgiref.rst b/Doc/library/wsgiref.rst --- a/Doc/library/wsgiref.rst +++ b/Doc/library/wsgiref.rst @@ -506,7 +506,7 @@ Similar to :class:`BaseCGIHandler`, but designed for use with HTTP origin servers. If you are writing an HTTP server implementation, you will probably - want to subclass this instead of :class:`BaseCGIHandler` + want to subclass this instead of :class:`BaseCGIHandler`. This class is a subclass of :class:`BaseHandler`. It overrides the :meth:`__init__`, :meth:`get_stdin`, :meth:`get_stderr`, :meth:`add_cgi_vars`, diff --git a/Doc/library/xml.dom.minidom.rst b/Doc/library/xml.dom.minidom.rst --- a/Doc/library/xml.dom.minidom.rst +++ b/Doc/library/xml.dom.minidom.rst @@ -15,7 +15,7 @@ Model interface, with an API similar to that in other languages. It is intended to be simpler than the full DOM and also significantly smaller. Users who are not already proficient with the DOM should consider using the -:mod:`xml.etree.ElementTree` module for their XML processing instead +:mod:`xml.etree.ElementTree` module for their XML processing instead. .. warning:: diff --git a/Doc/library/xml.dom.rst b/Doc/library/xml.dom.rst --- a/Doc/library/xml.dom.rst +++ b/Doc/library/xml.dom.rst @@ -304,7 +304,7 @@ .. attribute:: Node.prefix The part of the :attr:`tagName` preceding the colon if there is one, else the - empty string. The value is a string, or ``None`` + empty string. The value is a string, or ``None``. .. attribute:: Node.namespaceURI diff --git a/Doc/library/xml.etree.elementtree.rst b/Doc/library/xml.etree.elementtree.rst --- a/Doc/library/xml.etree.elementtree.rst +++ b/Doc/library/xml.etree.elementtree.rst @@ -891,7 +891,7 @@ Creates and returns a tree iterator for the root element. The iterator loops over all elements in this tree, in section order. *tag* is the tag - to look for (default is to return all elements) + to look for (default is to return all elements). .. method:: iterfind(match, namespaces=None) diff --git a/Doc/reference/datamodel.rst b/Doc/reference/datamodel.rst --- a/Doc/reference/datamodel.rst +++ b/Doc/reference/datamodel.rst @@ -1320,7 +1320,7 @@ object's :meth:`__hash__` must interoperate on builds of different bit sizes, be sure to check the width on all supported builds. An easy way to do this is with - ``python -c "import sys; print(sys.hash_info.width)"`` + ``python -c "import sys; print(sys.hash_info.width)"``. If a class does not define an :meth:`__eq__` method it should not define a :meth:`__hash__` operation either; if it defines :meth:`__eq__` but not diff --git a/Doc/reference/expressions.rst b/Doc/reference/expressions.rst --- a/Doc/reference/expressions.rst +++ b/Doc/reference/expressions.rst @@ -371,7 +371,7 @@ (by returning a value from the sub-generator). .. versionchanged:: 3.3 - Added ``yield from `` to delegate control flow to a subiterator + Added ``yield from `` to delegate control flow to a subiterator. The parentheses may be omitted when the yield expression is the sole expression on the right hand side of an assignment statement. diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1406,6 +1406,7 @@ Maciej Szulik Arfrever Frehtes Taifersar Arahesis Hideaki Takahashi +Takase Arihiro Indra Talip Neil Tallim Geoff Talvola -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 12:57:52 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 10:57:52 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI1MTYx?= =?utf-8?q?=3A_Add_full_stops_in_documentation=3B_patch_by_Takase_Arihiro?= Message-ID: <20151010105751.20753.13670@psf.io> https://hg.python.org/cpython/rev/b9a0ecae02cb changeset: 98645:b9a0ecae02cb branch: 2.7 parent: 98637:57fc950298bb user: Martin Panter date: Sat Oct 10 10:52:35 2015 +0000 summary: Issue #25161: Add full stops in documentation; patch by Takase Arihiro files: Doc/c-api/code.rst | 2 +- Doc/c-api/gen.rst | 2 +- Doc/c-api/veryhigh.rst | 2 +- Doc/distutils/apiref.rst | 2 +- Doc/library/2to3.rst | 2 +- Doc/library/argparse.rst | 2 +- Doc/library/bdb.rst | 2 +- Doc/library/cookie.rst | 2 +- Doc/library/cookielib.rst | 2 +- Doc/library/decimal.rst | 2 +- Doc/library/exceptions.rst | 2 +- Doc/library/fractions.rst | 2 +- Doc/library/json.rst | 2 +- Doc/library/locale.rst | 2 +- Doc/library/modulefinder.rst | 2 +- Doc/library/msvcrt.rst | 2 +- Doc/library/multiprocessing.rst | 2 +- Doc/library/platform.rst | 2 +- Doc/library/shutil.rst | 2 +- Doc/library/wsgiref.rst | 2 +- Doc/library/xml.dom.minidom.rst | 2 +- Doc/library/xml.dom.rst | 2 +- Doc/library/xml.etree.elementtree.rst | 2 +- Misc/ACKS | 1 + 24 files changed, 24 insertions(+), 23 deletions(-) diff --git a/Doc/c-api/code.rst b/Doc/c-api/code.rst --- a/Doc/c-api/code.rst +++ b/Doc/c-api/code.rst @@ -29,7 +29,7 @@ .. c:function:: int PyCode_Check(PyObject *co) - Return true if *co* is a :class:`code` object + Return true if *co* is a :class:`code` object. .. c:function:: int PyCode_GetNumFree(PyObject *co) diff --git a/Doc/c-api/gen.rst b/Doc/c-api/gen.rst --- a/Doc/c-api/gen.rst +++ b/Doc/c-api/gen.rst @@ -17,7 +17,7 @@ .. c:var:: PyTypeObject PyGen_Type - The type object corresponding to generator objects + The type object corresponding to generator objects. .. c:function:: int PyGen_Check(ob) diff --git a/Doc/c-api/veryhigh.rst b/Doc/c-api/veryhigh.rst --- a/Doc/c-api/veryhigh.rst +++ b/Doc/c-api/veryhigh.rst @@ -168,7 +168,7 @@ .. c:function:: struct _node* PyParser_SimpleParseFile(FILE *fp, const char *filename, int start) This is a simplified interface to :c:func:`PyParser_SimpleParseFileFlags` below, - leaving *flags* set to ``0`` + leaving *flags* set to ``0``. .. c:function:: struct _node* PyParser_SimpleParseFileFlags(FILE *fp, const char *filename, int start, int flags) diff --git a/Doc/distutils/apiref.rst b/Doc/distutils/apiref.rst --- a/Doc/distutils/apiref.rst +++ b/Doc/distutils/apiref.rst @@ -926,7 +926,7 @@ Walk two filename lists in parallel, testing if each source is newer than its corresponding target. Return a pair of lists (*sources*, *targets*) where - source is newer than target, according to the semantics of :func:`newer` + source is newer than target, according to the semantics of :func:`newer`. .. % % equivalent to a listcomp... diff --git a/Doc/library/2to3.rst b/Doc/library/2to3.rst --- a/Doc/library/2to3.rst +++ b/Doc/library/2to3.rst @@ -271,7 +271,7 @@ .. 2to3fixer:: input - Converts ``input(prompt)`` to ``eval(input(prompt))`` + Converts ``input(prompt)`` to ``eval(input(prompt))``. .. 2to3fixer:: intern diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -1948,4 +1948,4 @@ ``%(default)s`` and ``%(prog)s``. * Replace the OptionParser constructor ``version`` argument with a call to - ``parser.add_argument('--version', action='version', version='')`` + ``parser.add_argument('--version', action='version', version='')``. diff --git a/Doc/library/bdb.rst b/Doc/library/bdb.rst --- a/Doc/library/bdb.rst +++ b/Doc/library/bdb.rst @@ -223,7 +223,7 @@ .. method:: set_until(frame) Stop when the line with the line no greater than the current one is - reached or when returning from current frame + reached or when returning from current frame. .. method:: set_trace([frame]) diff --git a/Doc/library/cookie.rst b/Doc/library/cookie.rst --- a/Doc/library/cookie.rst +++ b/Doc/library/cookie.rst @@ -116,7 +116,7 @@ Return an encoded value. *val* can be any type, but return value must be a string. This method does nothing in :class:`BaseCookie` --- it exists so it can - be overridden + be overridden. In general, it should be the case that :meth:`value_encode` and :meth:`value_decode` are inverses on the range of *value_decode*. diff --git a/Doc/library/cookielib.rst b/Doc/library/cookielib.rst --- a/Doc/library/cookielib.rst +++ b/Doc/library/cookielib.rst @@ -546,7 +546,7 @@ .. attribute:: DefaultCookiePolicy.strict_ns_unverifiable - apply RFC 2965 rules on unverifiable transactions even to Netscape cookies + Apply RFC 2965 rules on unverifiable transactions even to Netscape cookies. .. attribute:: DefaultCookiePolicy.strict_ns_domain diff --git a/Doc/library/decimal.rst b/Doc/library/decimal.rst --- a/Doc/library/decimal.rst +++ b/Doc/library/decimal.rst @@ -890,7 +890,7 @@ Engineering notation has an exponent which is a multiple of 3, so there are up to 3 digits left of the decimal place. For example, converts - ``Decimal('123E+1')`` to ``Decimal('1.23E+3')`` + ``Decimal('123E+1')`` to ``Decimal('1.23E+3')``. .. method:: to_integral([rounding[, context]]) diff --git a/Doc/library/exceptions.rst b/Doc/library/exceptions.rst --- a/Doc/library/exceptions.rst +++ b/Doc/library/exceptions.rst @@ -496,7 +496,7 @@ .. exception:: SyntaxWarning - Base class for warnings about dubious syntax + Base class for warnings about dubious syntax. .. exception:: RuntimeWarning diff --git a/Doc/library/fractions.rst b/Doc/library/fractions.rst --- a/Doc/library/fractions.rst +++ b/Doc/library/fractions.rst @@ -89,7 +89,7 @@ This class method constructs a :class:`Fraction` representing the exact value of *flt*, which must be a :class:`float`. Beware that - ``Fraction.from_float(0.3)`` is not the same value as ``Fraction(3, 10)`` + ``Fraction.from_float(0.3)`` is not the same value as ``Fraction(3, 10)``. .. note:: From Python 2.7 onwards, you can also construct a :class:`Fraction` instance directly from a :class:`float`. diff --git a/Doc/library/json.rst b/Doc/library/json.rst --- a/Doc/library/json.rst +++ b/Doc/library/json.rst @@ -356,7 +356,7 @@ .. method:: decode(s) Return the Python representation of *s* (a :class:`str` or - :class:`unicode` instance containing a JSON document) + :class:`unicode` instance containing a JSON document). .. method:: raw_decode(s) diff --git a/Doc/library/locale.rst b/Doc/library/locale.rst --- a/Doc/library/locale.rst +++ b/Doc/library/locale.rst @@ -208,7 +208,7 @@ .. data:: RADIXCHAR - Get the radix character (decimal dot, decimal comma, etc.) + Get the radix character (decimal dot, decimal comma, etc.). .. data:: THOUSEP diff --git a/Doc/library/modulefinder.rst b/Doc/library/modulefinder.rst --- a/Doc/library/modulefinder.rst +++ b/Doc/library/modulefinder.rst @@ -55,7 +55,7 @@ .. attribute:: modules A dictionary mapping module names to modules. See - :ref:`modulefinder-example` + :ref:`modulefinder-example`. .. _modulefinder-example: diff --git a/Doc/library/msvcrt.rst b/Doc/library/msvcrt.rst --- a/Doc/library/msvcrt.rst +++ b/Doc/library/msvcrt.rst @@ -19,7 +19,7 @@ The module implements both the normal and wide char variants of the console I/O api. The normal API deals only with ASCII characters and is of limited use for internationalized applications. The wide char API should be used where -ever possible +ever possible. .. _msvcrt-files: diff --git a/Doc/library/multiprocessing.rst b/Doc/library/multiprocessing.rst --- a/Doc/library/multiprocessing.rst +++ b/Doc/library/multiprocessing.rst @@ -1705,7 +1705,7 @@ raised by :meth:`_callmethod`. Note in particular that an exception will be raised if *methodname* has - not been *exposed* + not been *exposed*. An example of the usage of :meth:`_callmethod`: diff --git a/Doc/library/platform.rst b/Doc/library/platform.rst --- a/Doc/library/platform.rst +++ b/Doc/library/platform.rst @@ -126,7 +126,7 @@ .. function:: python_version() - Returns the Python version as string ``'major.minor.patchlevel'`` + Returns the Python version as string ``'major.minor.patchlevel'``. Note that unlike the Python ``sys.version``, the returned value will always include the patchlevel (it defaults to 0). diff --git a/Doc/library/shutil.rst b/Doc/library/shutil.rst --- a/Doc/library/shutil.rst +++ b/Doc/library/shutil.rst @@ -291,7 +291,7 @@ .. function:: get_archive_formats() Return a list of supported formats for archiving. - Each element of the returned sequence is a tuple ``(name, description)`` + Each element of the returned sequence is a tuple ``(name, description)``. By default :mod:`shutil` provides these formats: diff --git a/Doc/library/wsgiref.rst b/Doc/library/wsgiref.rst --- a/Doc/library/wsgiref.rst +++ b/Doc/library/wsgiref.rst @@ -479,7 +479,7 @@ Similar to :class:`BaseCGIHandler`, but designed for use with HTTP origin servers. If you are writing an HTTP server implementation, you will probably - want to subclass this instead of :class:`BaseCGIHandler` + want to subclass this instead of :class:`BaseCGIHandler`. This class is a subclass of :class:`BaseHandler`. It overrides the :meth:`__init__`, :meth:`get_stdin`, :meth:`get_stderr`, :meth:`add_cgi_vars`, diff --git a/Doc/library/xml.dom.minidom.rst b/Doc/library/xml.dom.minidom.rst --- a/Doc/library/xml.dom.minidom.rst +++ b/Doc/library/xml.dom.minidom.rst @@ -18,7 +18,7 @@ Model interface, with an API similar to that in other languages. It is intended to be simpler than the full DOM and also significantly smaller. Users who are not already proficient with the DOM should consider using the -:mod:`xml.etree.ElementTree` module for their XML processing instead +:mod:`xml.etree.ElementTree` module for their XML processing instead. .. warning:: diff --git a/Doc/library/xml.dom.rst b/Doc/library/xml.dom.rst --- a/Doc/library/xml.dom.rst +++ b/Doc/library/xml.dom.rst @@ -321,7 +321,7 @@ .. attribute:: Node.prefix The part of the :attr:`tagName` preceding the colon if there is one, else the - empty string. The value is a string, or ``None`` + empty string. The value is a string, or ``None``. .. attribute:: Node.namespaceURI diff --git a/Doc/library/xml.etree.elementtree.rst b/Doc/library/xml.etree.elementtree.rst --- a/Doc/library/xml.etree.elementtree.rst +++ b/Doc/library/xml.etree.elementtree.rst @@ -829,7 +829,7 @@ Creates and returns a tree iterator for the root element. The iterator loops over all elements in this tree, in section order. *tag* is the tag - to look for (default is to return all elements) + to look for (default is to return all elements). .. method:: iterfind(match) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1341,6 +1341,7 @@ Amir Szekely Arfrever Frehtes Taifersar Arahesis Hideaki Takahashi +Takase Arihiro Indra Talip Neil Tallim Geoff Talvola -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 13:10:37 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 11:10:37 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325322=3A_Merge_contextlib=2Esuppress_test_fix_f?= =?utf-8?q?rom_3=2E5?= Message-ID: <20151010111036.128828.50044@psf.io> https://hg.python.org/cpython/rev/c601496c2829 changeset: 98648:c601496c2829 parent: 98644:fe87a6f9caa7 parent: 98647:836ac579e179 user: Martin Panter date: Sat Oct 10 11:06:05 2015 +0000 summary: Issue #25322: Merge contextlib.suppress test fix from 3.5 files: Lib/test/test_contextlib.py | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_contextlib.py b/Lib/test/test_contextlib.py --- a/Lib/test/test_contextlib.py +++ b/Lib/test/test_contextlib.py @@ -880,9 +880,11 @@ with ignore_exceptions: len(5) with ignore_exceptions: - 1/0 with ignore_exceptions: # Check nested usage len(5) + outer_continued = True + 1/0 + self.assertTrue(outer_continued) if __name__ == "__main__": unittest.main() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 13:10:37 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 11:10:37 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1MzIy?= =?utf-8?q?=3A_Fix_test_for_nested_contextlib=2Esuppress?= Message-ID: <20151010111036.128842.24140@psf.io> https://hg.python.org/cpython/rev/452f76cbebdd changeset: 98646:452f76cbebdd branch: 3.4 parent: 98642:4dfead9635e5 user: Martin Panter date: Sat Oct 10 11:04:44 2015 +0000 summary: Issue #25322: Fix test for nested contextlib.suppress files: Lib/test/test_contextlib.py | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_contextlib.py b/Lib/test/test_contextlib.py --- a/Lib/test/test_contextlib.py +++ b/Lib/test/test_contextlib.py @@ -828,9 +828,11 @@ with ignore_exceptions: len(5) with ignore_exceptions: - 1/0 with ignore_exceptions: # Check nested usage len(5) + outer_continued = True + 1/0 + self.assertTrue(outer_continued) if __name__ == "__main__": unittest.main() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 13:10:37 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 10 Oct 2015 11:10:37 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325322=3A_Merge_contextlib=2Esuppress_test_fix_from_3?= =?utf-8?q?=2E4_into_3=2E5?= Message-ID: <20151010111036.18390.15740@psf.io> https://hg.python.org/cpython/rev/836ac579e179 changeset: 98647:836ac579e179 branch: 3.5 parent: 98643:be34d96e2184 parent: 98646:452f76cbebdd user: Martin Panter date: Sat Oct 10 11:05:47 2015 +0000 summary: Issue #25322: Merge contextlib.suppress test fix from 3.4 into 3.5 files: Lib/test/test_contextlib.py | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_contextlib.py b/Lib/test/test_contextlib.py --- a/Lib/test/test_contextlib.py +++ b/Lib/test/test_contextlib.py @@ -880,9 +880,11 @@ with ignore_exceptions: len(5) with ignore_exceptions: - 1/0 with ignore_exceptions: # Check nested usage len(5) + outer_continued = True + 1/0 + self.assertTrue(outer_continued) if __name__ == "__main__": unittest.main() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 18:45:01 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 10 Oct 2015 16:45:01 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325364=3A_zipfile_now_works_in_threads_disabled_?= =?utf-8?q?builds=2E?= Message-ID: <20151010164501.55484.42332@psf.io> https://hg.python.org/cpython/rev/44b37c3ee76c changeset: 98650:44b37c3ee76c parent: 98648:c601496c2829 parent: 98649:15740b3ad148 user: Serhiy Storchaka date: Sat Oct 10 19:44:23 2015 +0300 summary: Issue #25364: zipfile now works in threads disabled builds. files: Lib/zipfile.py | 5 ++++- Misc/NEWS | 2 ++ 2 files changed, 6 insertions(+), 1 deletions(-) diff --git a/Lib/zipfile.py b/Lib/zipfile.py --- a/Lib/zipfile.py +++ b/Lib/zipfile.py @@ -13,8 +13,11 @@ import shutil import struct import binascii -import threading +try: + import threading +except ImportError: + import dummy_threading as threading try: import zlib # We may need its compression method diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -51,6 +51,8 @@ Library ------- +- Issue #25364: zipfile now works in threads disabled builds. + - Issue #25328: smtpd's SMTPChannel now correctly raises a ValueError if both decode_data and enable_SMTPUTF8 are set to true. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 18:45:01 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 10 Oct 2015 16:45:01 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1MzY0?= =?utf-8?q?=3A_zipfile_now_works_in_threads_disabled_builds=2E?= Message-ID: <20151010164501.2665.98945@psf.io> https://hg.python.org/cpython/rev/15740b3ad148 changeset: 98649:15740b3ad148 branch: 3.5 parent: 98647:836ac579e179 user: Serhiy Storchaka date: Sat Oct 10 19:43:32 2015 +0300 summary: Issue #25364: zipfile now works in threads disabled builds. files: Lib/zipfile.py | 5 ++++- Misc/NEWS | 2 ++ 2 files changed, 6 insertions(+), 1 deletions(-) diff --git a/Lib/zipfile.py b/Lib/zipfile.py --- a/Lib/zipfile.py +++ b/Lib/zipfile.py @@ -13,8 +13,11 @@ import shutil import struct import binascii -import threading +try: + import threading +except ImportError: + import dummy_threading as threading try: import zlib # We may need its compression method diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -40,6 +40,8 @@ Library ------- +- Issue #25364: zipfile now works in threads disabled builds. + - Issue #25328: smtpd's SMTPChannel now correctly raises a ValueError if both decode_data and enable_SMTPUTF8 are set to true. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 19:12:40 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 10 Oct 2015 17:12:40 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1MzY1?= =?utf-8?q?=3A_test=5Fpickle_now_works_in_threads_disabled_builds=2E?= Message-ID: <20151010171238.70979.47989@psf.io> https://hg.python.org/cpython/rev/1cb9ce2d83d8 changeset: 98651:1cb9ce2d83d8 branch: 3.4 parent: 98646:452f76cbebdd user: Serhiy Storchaka date: Sat Oct 10 20:10:07 2015 +0300 summary: Issue #25365: test_pickle now works in threads disabled builds. files: Lib/test/test_pickle.py | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_pickle.py b/Lib/test/test_pickle.py --- a/Lib/test/test_pickle.py +++ b/Lib/test/test_pickle.py @@ -377,8 +377,9 @@ self.assertEqual(mapping('exceptions', name), ('builtins', name)) - import multiprocessing.context - for name, exc in get_exceptions(multiprocessing.context): + def test_multiprocessing_exceptions(self): + module = support.import_module('multiprocessing.context') + for name, exc in get_exceptions(module): with self.subTest(name): self.assertEqual(reverse_mapping('multiprocessing.context', name), ('multiprocessing', name)) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 19:12:40 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 10 Oct 2015 17:12:40 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325365=3A_test=5Fpickle_now_works_in_threads_disabled_?= =?utf-8?q?builds=2E?= Message-ID: <20151010171239.18366.34175@psf.io> https://hg.python.org/cpython/rev/48cb00431ce6 changeset: 98652:48cb00431ce6 branch: 3.5 parent: 98649:15740b3ad148 parent: 98651:1cb9ce2d83d8 user: Serhiy Storchaka date: Sat Oct 10 20:10:54 2015 +0300 summary: Issue #25365: test_pickle now works in threads disabled builds. files: Lib/test/test_pickle.py | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_pickle.py b/Lib/test/test_pickle.py --- a/Lib/test/test_pickle.py +++ b/Lib/test/test_pickle.py @@ -380,8 +380,9 @@ self.assertEqual(mapping('exceptions', name), ('builtins', name)) - import multiprocessing.context - for name, exc in get_exceptions(multiprocessing.context): + def test_multiprocessing_exceptions(self): + module = support.import_module('multiprocessing.context') + for name, exc in get_exceptions(module): with self.subTest(name): self.assertEqual(reverse_mapping('multiprocessing.context', name), ('multiprocessing', name)) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 19:12:40 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 10 Oct 2015 17:12:40 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325365=3A_test=5Fpickle_now_works_in_threads_dis?= =?utf-8?q?abled_builds=2E?= Message-ID: <20151010171239.97706.89683@psf.io> https://hg.python.org/cpython/rev/655fd1e9b444 changeset: 98653:655fd1e9b444 parent: 98650:44b37c3ee76c parent: 98652:48cb00431ce6 user: Serhiy Storchaka date: Sat Oct 10 20:11:13 2015 +0300 summary: Issue #25365: test_pickle now works in threads disabled builds. files: Lib/test/test_pickle.py | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_pickle.py b/Lib/test/test_pickle.py --- a/Lib/test/test_pickle.py +++ b/Lib/test/test_pickle.py @@ -380,8 +380,9 @@ self.assertEqual(mapping('exceptions', name), ('builtins', name)) - import multiprocessing.context - for name, exc in get_exceptions(multiprocessing.context): + def test_multiprocessing_exceptions(self): + module = support.import_module('multiprocessing.context') + for name, exc in get_exceptions(module): with self.subTest(name): self.assertEqual(reverse_mapping('multiprocessing.context', name), ('multiprocessing', name)) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 19:28:42 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 10 Oct 2015 17:28:42 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Correct_Misc/N?= =?utf-8?q?EWS=2E?= Message-ID: <20151010172842.70981.94820@psf.io> https://hg.python.org/cpython/rev/1666eda8cfa9 changeset: 98655:1666eda8cfa9 branch: 3.4 user: Serhiy Storchaka date: Sat Oct 10 20:26:16 2015 +0300 summary: Correct Misc/NEWS. files: Misc/NEWS | 6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -194,9 +194,6 @@ Patch from ?ukasz Langa. - Issue #23888: Handle fractional time in cookie expiry. Patch by ssh. -- Issue #25099: Make test_compileall not fail when a entry on sys.path cannot - be written to (commonly seen in administrative installs on Windows). - - Issue #23004: mock_open() now reads binary data correctly when the type of read_data is bytes. Initial patch by Aaron Hill. @@ -600,6 +597,9 @@ Tests ----- +- Issue #25099: Make test_compileall not fail when a entry on sys.path cannot + be written to (commonly seen in administrative installs on Windows). + - Issue #24751: When running regrtest with the ``-w`` command line option, a test run is no longer marked as a failure if all tests succeed when re-run. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 19:28:43 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 10 Oct 2015 17:28:43 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Null_merge?= Message-ID: <20151010172843.7238.18117@psf.io> https://hg.python.org/cpython/rev/0f91679f7f13 changeset: 98656:0f91679f7f13 branch: 3.5 parent: 98652:48cb00431ce6 parent: 98655:1666eda8cfa9 user: Serhiy Storchaka date: Sat Oct 10 20:27:28 2015 +0300 summary: Null merge files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 19:28:42 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 10 Oct 2015 17:28:42 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1MDk5?= =?utf-8?q?=3A_Skip_relevant_tests_in_test=5Fcompileall_when_an_entry_on?= Message-ID: <20151010172842.464.5376@psf.io> https://hg.python.org/cpython/rev/fc0a7aa7ae61 changeset: 98654:fc0a7aa7ae61 branch: 3.4 parent: 98651:1cb9ce2d83d8 user: Brett Cannon date: Fri Oct 09 15:09:43 2015 -0700 summary: Issue #25099: Skip relevant tests in test_compileall when an entry on sys.path has an unwritable __pycache__ directory. This typically comes up when someone runs the test suite from an administrative install of Python on Windows where the user does not have write permissions to the stdlib's directory. Thanks to Zachary Ware and Matthias Klose for reporting bugs related to this issue. (grafted from 34bbd537b3e688dfbb6498e9083445a6a72fc4b1) files: Lib/test/test_compileall.py | 36 ++++++++++++++++++++++-- Misc/NEWS | 3 ++ 2 files changed, 35 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_compileall.py b/Lib/test/test_compileall.py --- a/Lib/test/test_compileall.py +++ b/Lib/test/test_compileall.py @@ -2,6 +2,7 @@ import compileall import importlib.util import os +import pathlib import py_compile import shutil import struct @@ -133,6 +134,33 @@ class CommandLineTests(unittest.TestCase): """Test compileall's CLI.""" + @classmethod + def setUpClass(cls): + for path in filter(os.path.isdir, sys.path): + directory_created = False + directory = pathlib.Path(path) / '__pycache__' + path = directory / 'test.try' + try: + if not directory.is_dir(): + directory.mkdir() + directory_created = True + with path.open('w') as file: + file.write('# for test_compileall') + except OSError: + sys_path_writable = False + break + finally: + support.unlink(str(path)) + if directory_created: + directory.rmdir() + else: + sys_path_writable = True + cls._sys_path_writable = sys_path_writable + + def _skip_if_sys_path_not_writable(self): + if not self._sys_path_writable: + raise unittest.SkipTest('not all entries on sys.path are writable') + def _get_run_args(self, args): interp_args = ['-S'] if sys.flags.optimize: @@ -159,8 +187,8 @@ self.assertFalse(os.path.exists(path)) def setUp(self): - self.addCleanup(self._cleanup) self.directory = tempfile.mkdtemp() + self.addCleanup(support.rmtree, self.directory) self.pkgdir = os.path.join(self.directory, 'foo') os.mkdir(self.pkgdir) self.pkgdir_cachedir = os.path.join(self.pkgdir, '__pycache__') @@ -168,11 +196,9 @@ self.initfn = script_helper.make_script(self.pkgdir, '__init__', '') self.barfn = script_helper.make_script(self.pkgdir, 'bar', '') - def _cleanup(self): - support.rmtree(self.directory) - def test_no_args_compiles_path(self): # Note that -l is implied for the no args case. + self._skip_if_sys_path_not_writable() bazfn = script_helper.make_script(self.directory, 'baz', '') self.assertRunOK(PYTHONPATH=self.directory) self.assertCompiled(bazfn) @@ -180,6 +206,7 @@ self.assertNotCompiled(self.barfn) def test_no_args_respects_force_flag(self): + self._skip_if_sys_path_not_writable() bazfn = script_helper.make_script(self.directory, 'baz', '') self.assertRunOK(PYTHONPATH=self.directory) pycpath = importlib.util.cache_from_source(bazfn) @@ -196,6 +223,7 @@ self.assertNotEqual(mtime, mtime2) def test_no_args_respects_quiet_flag(self): + self._skip_if_sys_path_not_writable() script_helper.make_script(self.directory, 'baz', '') noisy = self.assertRunOK(PYTHONPATH=self.directory) self.assertIn(b'Listing ', noisy) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -194,6 +194,9 @@ Patch from ?ukasz Langa. - Issue #23888: Handle fractional time in cookie expiry. Patch by ssh. +- Issue #25099: Make test_compileall not fail when a entry on sys.path cannot + be written to (commonly seen in administrative installs on Windows). + - Issue #23004: mock_open() now reads binary data correctly when the type of read_data is bytes. Initial patch by Aaron Hill. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 19:28:43 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 10 Oct 2015 17:28:43 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Null_merge?= Message-ID: <20151010172843.449.41851@psf.io> https://hg.python.org/cpython/rev/f716526fce6f changeset: 98657:f716526fce6f parent: 98653:655fd1e9b444 parent: 98656:0f91679f7f13 user: Serhiy Storchaka date: Sat Oct 10 20:27:52 2015 +0300 summary: Null merge files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 10 21:42:59 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sat, 10 Oct 2015 19:42:59 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2324164=3A_Objects_?= =?utf-8?q?that_need_calling_=60=60=5F=5Fnew=5F=5F=60=60_with_keyword_argu?= =?utf-8?q?ments=2C?= Message-ID: <20151010194259.3297.20111@psf.io> https://hg.python.org/cpython/rev/bc5894a3a0e6 changeset: 98658:bc5894a3a0e6 user: Serhiy Storchaka date: Sat Oct 10 22:42:18 2015 +0300 summary: Issue #24164: Objects that need calling ``__new__`` with keyword arguments, can now be pickled using pickle protocols older than protocol version 4. files: Doc/whatsnew/3.6.rst | 9 +++ Lib/pickle.py | 17 ++++- Lib/test/pickletester.py | 6 +- Misc/NEWS | 3 + Modules/_pickle.c | 78 ++++++++++++++++++++++++--- Objects/typeobject.c | 16 +---- 6 files changed, 97 insertions(+), 32 deletions(-) diff --git a/Doc/whatsnew/3.6.rst b/Doc/whatsnew/3.6.rst --- a/Doc/whatsnew/3.6.rst +++ b/Doc/whatsnew/3.6.rst @@ -112,6 +112,15 @@ (Contributed by Joe Jevnik in :issue:`24379`.) +pickle +------ + +Objects that need calling ``__new__`` with keyword arguments, can now be pickled +using :ref:`pickle protocols ` older than protocol version 4. +Protocol version 4 already supports this case. (Contributed by Serhiy +Storchaka in :issue:`24164`.) + + rlcomplete ---------- diff --git a/Lib/pickle.py b/Lib/pickle.py --- a/Lib/pickle.py +++ b/Lib/pickle.py @@ -27,6 +27,7 @@ from copyreg import dispatch_table from copyreg import _extension_registry, _inverted_registry, _extension_cache from itertools import islice +from functools import partial import sys from sys import maxsize from struct import pack, unpack @@ -544,7 +545,7 @@ write = self.write func_name = getattr(func, "__name__", "") - if self.proto >= 4 and func_name == "__newobj_ex__": + if self.proto >= 2 and func_name == "__newobj_ex__": cls, args, kwargs = args if not hasattr(cls, "__new__"): raise PicklingError("args[0] from {} args has no __new__" @@ -552,10 +553,16 @@ if obj is not None and cls is not obj.__class__: raise PicklingError("args[0] from {} args has the wrong class" .format(func_name)) - save(cls) - save(args) - save(kwargs) - write(NEWOBJ_EX) + if self.proto >= 4: + save(cls) + save(args) + save(kwargs) + write(NEWOBJ_EX) + else: + func = partial(cls.__new__, cls, *args, **kwargs) + save(func) + save(()) + write(REDUCE) elif self.proto >= 2 and func_name == "__newobj__": # A __reduce__ implementation can direct protocol 2 or newer to # use the more efficient NEWOBJ opcode, while still diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -1580,16 +1580,14 @@ x.abc = 666 for proto in protocols: with self.subTest(proto=proto): - if 2 <= proto < 4: - self.assertRaises(ValueError, self.dumps, x, proto) - continue s = self.dumps(x, proto) if proto < 1: self.assertIn(b'\nL64206', s) # LONG elif proto < 2: self.assertIn(b'M\xce\xfa', s) # BININT2 + elif proto < 4: + self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE else: - assert proto >= 4 self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE self.assertFalse(opcode_in_pickle(pickle.NEWOBJ, s)) self.assertEqual(opcode_in_pickle(pickle.NEWOBJ_EX, s), diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -51,6 +51,9 @@ Library ------- +- Issue #24164: Objects that need calling ``__new__`` with keyword arguments, + can now be pickled using pickle protocols older than protocol version 4. + - Issue #25364: zipfile now works in threads disabled builds. - Issue #25328: smtpd's SMTPChannel now correctly raises a ValueError if both diff --git a/Modules/_pickle.c b/Modules/_pickle.c --- a/Modules/_pickle.c +++ b/Modules/_pickle.c @@ -153,6 +153,9 @@ PyObject *codecs_encode; /* builtins.getattr, used for saving nested names with protocol < 4 */ PyObject *getattr; + /* functools.partial, used for implementing __newobj_ex__ with protocols + 2 and 3 */ + PyObject *partial; } PickleState; /* Forward declaration of the _pickle module definition. */ @@ -200,6 +203,7 @@ PyObject *copyreg = NULL; PyObject *compat_pickle = NULL; PyObject *codecs = NULL; + PyObject *functools = NULL; builtins = PyEval_GetBuiltins(); if (builtins == NULL) @@ -314,12 +318,21 @@ } Py_CLEAR(codecs); + functools = PyImport_ImportModule("functools"); + if (!functools) + goto error; + st->partial = PyObject_GetAttrString(functools, "partial"); + if (!st->partial) + goto error; + Py_CLEAR(functools); + return 0; error: Py_CLEAR(copyreg); Py_CLEAR(compat_pickle); Py_CLEAR(codecs); + Py_CLEAR(functools); _Pickle_ClearState(st); return -1; } @@ -3533,11 +3546,9 @@ PyErr_Clear(); } else if (PyUnicode_Check(name)) { - if (self->proto >= 4) { - _Py_IDENTIFIER(__newobj_ex__); - use_newobj_ex = PyUnicode_Compare( - name, _PyUnicode_FromId(&PyId___newobj_ex__)) == 0; - } + _Py_IDENTIFIER(__newobj_ex__); + use_newobj_ex = PyUnicode_Compare( + name, _PyUnicode_FromId(&PyId___newobj_ex__)) == 0; if (!use_newobj_ex) { _Py_IDENTIFIER(__newobj__); use_newobj = PyUnicode_Compare( @@ -3581,11 +3592,58 @@ return -1; } - if (save(self, cls, 0) < 0 || - save(self, args, 0) < 0 || - save(self, kwargs, 0) < 0 || - _Pickler_Write(self, &newobj_ex_op, 1) < 0) { - return -1; + if (self->proto >= 4) { + if (save(self, cls, 0) < 0 || + save(self, args, 0) < 0 || + save(self, kwargs, 0) < 0 || + _Pickler_Write(self, &newobj_ex_op, 1) < 0) { + return -1; + } + } + else { + PyObject *newargs; + PyObject *cls_new; + Py_ssize_t i; + _Py_IDENTIFIER(__new__); + + newargs = PyTuple_New(Py_SIZE(args) + 2); + if (newargs == NULL) + return -1; + + cls_new = _PyObject_GetAttrId(cls, &PyId___new__); + if (cls_new == NULL) { + Py_DECREF(newargs); + return -1; + } + PyTuple_SET_ITEM(newargs, 0, cls_new); + Py_INCREF(cls); + PyTuple_SET_ITEM(newargs, 1, cls); + for (i = 0; i < Py_SIZE(args); i++) { + PyObject *item = PyTuple_GET_ITEM(args, i); + Py_INCREF(item); + PyTuple_SET_ITEM(newargs, i + 2, item); + } + + callable = PyObject_Call(st->partial, newargs, kwargs); + Py_DECREF(newargs); + if (callable == NULL) + return -1; + + newargs = PyTuple_New(0); + if (newargs == NULL) { + Py_DECREF(callable); + return -1; + } + + if (save(self, callable, 0) < 0 || + save(self, newargs, 0) < 0 || + _Pickler_Write(self, &reduce_op, 1) < 0) { + Py_DECREF(newargs); + Py_DECREF(callable); + return -1; + } + Py_DECREF(newargs); + Py_DECREF(callable); } } else if (use_newobj) { diff --git a/Objects/typeobject.c b/Objects/typeobject.c --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -4101,7 +4101,7 @@ } static PyObject * -reduce_newobj(PyObject *obj, int proto) +reduce_newobj(PyObject *obj) { PyObject *args = NULL, *kwargs = NULL; PyObject *copyreg; @@ -4153,7 +4153,7 @@ } Py_DECREF(args); } - else if (proto >= 4) { + else { _Py_IDENTIFIER(__newobj_ex__); newobj = _PyObject_GetAttrId(copyreg, &PyId___newobj_ex__); @@ -4171,16 +4171,6 @@ return NULL; } } - else { - PyErr_SetString(PyExc_ValueError, - "must use protocol 4 or greater to copy this " - "object; since __getnewargs_ex__ returned " - "keyword arguments."); - Py_DECREF(args); - Py_DECREF(kwargs); - Py_DECREF(copyreg); - return NULL; - } state = _PyObject_GetState(obj); if (state == NULL) { @@ -4225,7 +4215,7 @@ PyObject *copyreg, *res; if (proto >= 2) - return reduce_newobj(self, proto); + return reduce_newobj(self); copyreg = import_copyreg(); if (!copyreg) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 04:32:47 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sun, 11 Oct 2015 02:32:47 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy41ICgjMjUzNzEp?= Message-ID: <20151011023247.128856.75201@psf.io> https://hg.python.org/cpython/rev/d2d8c1c8c258 changeset: 98661:d2d8c1c8c258 parent: 98658:bc5894a3a0e6 parent: 98660:ae98209ff69a user: Benjamin Peterson date: Sat Oct 10 19:32:41 2015 -0700 summary: merge 3.5 (#25371) files: Modules/selectmodule.c | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Modules/selectmodule.c b/Modules/selectmodule.c --- a/Modules/selectmodule.c +++ b/Modules/selectmodule.c @@ -2363,7 +2363,7 @@ that are ready.\n\ \n\ *** IMPORTANT NOTICE ***\n\ -On Windows only sockets are supported; on Unix, all file\n\ +On Windows, only sockets are supported; on Unix, all file\n\ descriptors can be used."); static PyMethodDef select_methods[] = { @@ -2381,7 +2381,7 @@ "This module supports asynchronous I/O on multiple file descriptors.\n\ \n\ *** IMPORTANT NOTICE ***\n\ -On Windows only sockets are supported; on Unix, all file descriptors."); +On Windows, only sockets are supported; on Unix, all file descriptors."); static struct PyModuleDef selectmodule = { -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 04:32:47 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sun, 11 Oct 2015 02:32:47 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_add_a_missing_?= =?utf-8?q?comma_=28closes_=2325371=29?= Message-ID: <20151011023247.55480.57966@psf.io> https://hg.python.org/cpython/rev/99c82576bb70 changeset: 98659:99c82576bb70 branch: 3.4 parent: 98655:1666eda8cfa9 user: Benjamin Peterson date: Sat Oct 10 19:32:20 2015 -0700 summary: add a missing comma (closes #25371) files: Modules/selectmodule.c | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Modules/selectmodule.c b/Modules/selectmodule.c --- a/Modules/selectmodule.c +++ b/Modules/selectmodule.c @@ -2256,7 +2256,7 @@ that are ready.\n\ \n\ *** IMPORTANT NOTICE ***\n\ -On Windows only sockets are supported; on Unix, all file\n\ +On Windows, only sockets are supported; on Unix, all file\n\ descriptors can be used."); static PyMethodDef select_methods[] = { @@ -2274,7 +2274,7 @@ "This module supports asynchronous I/O on multiple file descriptors.\n\ \n\ *** IMPORTANT NOTICE ***\n\ -On Windows only sockets are supported; on Unix, all file descriptors."); +On Windows, only sockets are supported; on Unix, all file descriptors."); static struct PyModuleDef selectmodule = { -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 04:32:47 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sun, 11 Oct 2015 02:32:47 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_merge_3=2E4_=28=2325371=29?= Message-ID: <20151011023247.128830.6410@psf.io> https://hg.python.org/cpython/rev/ae98209ff69a changeset: 98660:ae98209ff69a branch: 3.5 parent: 98656:0f91679f7f13 parent: 98659:99c82576bb70 user: Benjamin Peterson date: Sat Oct 10 19:32:33 2015 -0700 summary: merge 3.4 (#25371) files: Modules/selectmodule.c | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Modules/selectmodule.c b/Modules/selectmodule.c --- a/Modules/selectmodule.c +++ b/Modules/selectmodule.c @@ -2363,7 +2363,7 @@ that are ready.\n\ \n\ *** IMPORTANT NOTICE ***\n\ -On Windows only sockets are supported; on Unix, all file\n\ +On Windows, only sockets are supported; on Unix, all file\n\ descriptors can be used."); static PyMethodDef select_methods[] = { @@ -2381,7 +2381,7 @@ "This module supports asynchronous I/O on multiple file descriptors.\n\ \n\ *** IMPORTANT NOTICE ***\n\ -On Windows only sockets are supported; on Unix, all file descriptors."); +On Windows, only sockets are supported; on Unix, all file descriptors."); static struct PyModuleDef selectmodule = { -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 04:37:04 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sun, 11 Oct 2015 02:37:04 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_use_the_with_s?= =?utf-8?q?tatement_for_locking_the_internal_condition_=28closes_=2325362?= =?utf-8?q?=29?= Message-ID: <20151011023704.2675.93615@psf.io> https://hg.python.org/cpython/rev/9049134eecae changeset: 98662:9049134eecae branch: 2.7 parent: 98645:b9a0ecae02cb user: Benjamin Peterson date: Sat Oct 10 19:34:46 2015 -0700 summary: use the with statement for locking the internal condition (closes #25362) Patch by Nir Soffer. files: Lib/threading.py | 15 +++------------ 1 files changed, 3 insertions(+), 12 deletions(-) diff --git a/Lib/threading.py b/Lib/threading.py --- a/Lib/threading.py +++ b/Lib/threading.py @@ -580,12 +580,9 @@ that call wait() once the flag is true will not block at all. """ - self.__cond.acquire() - try: + with self.__cond: self.__flag = True self.__cond.notify_all() - finally: - self.__cond.release() def clear(self): """Reset the internal flag to false. @@ -594,11 +591,8 @@ set the internal flag to true again. """ - self.__cond.acquire() - try: + with self.__cond: self.__flag = False - finally: - self.__cond.release() def wait(self, timeout=None): """Block until the internal flag is true. @@ -615,13 +609,10 @@ True except if a timeout is given and the operation times out. """ - self.__cond.acquire() - try: + with self.__cond: if not self.__flag: self.__cond.wait(timeout) return self.__flag - finally: - self.__cond.release() # Helper to generate new thread names _counter = _count().next -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 04:37:04 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sun, 11 Oct 2015 02:37:04 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_use_the_with_s?= =?utf-8?q?tatement_for_locking_the_internal_condition_=28closes_=2325362?= =?utf-8?q?=29?= Message-ID: <20151011023704.2677.26220@psf.io> https://hg.python.org/cpython/rev/62e87422a1a9 changeset: 98663:62e87422a1a9 branch: 3.4 parent: 98659:99c82576bb70 user: Benjamin Peterson date: Sat Oct 10 19:34:46 2015 -0700 summary: use the with statement for locking the internal condition (closes #25362) Patch by Nir Soffer. files: Lib/threading.py | 15 +++------------ 1 files changed, 3 insertions(+), 12 deletions(-) diff --git a/Lib/threading.py b/Lib/threading.py --- a/Lib/threading.py +++ b/Lib/threading.py @@ -511,12 +511,9 @@ that call wait() once the flag is true will not block at all. """ - self._cond.acquire() - try: + with self._cond: self._flag = True self._cond.notify_all() - finally: - self._cond.release() def clear(self): """Reset the internal flag to false. @@ -525,11 +522,8 @@ set the internal flag to true again. """ - self._cond.acquire() - try: + with self._cond: self._flag = False - finally: - self._cond.release() def wait(self, timeout=None): """Block until the internal flag is true. @@ -546,14 +540,11 @@ True except if a timeout is given and the operation times out. """ - self._cond.acquire() - try: + with self._cond: signaled = self._flag if not signaled: signaled = self._cond.wait(timeout) return signaled - finally: - self._cond.release() # A barrier class. Inspired in part by the pthread_barrier_* api and -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 04:37:05 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sun, 11 Oct 2015 02:37:05 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy41ICgjMjUzNjIp?= Message-ID: <20151011023705.18382.60753@psf.io> https://hg.python.org/cpython/rev/555ab69f071a changeset: 98665:555ab69f071a parent: 98661:d2d8c1c8c258 parent: 98664:794101c6e560 user: Benjamin Peterson date: Sat Oct 10 19:36:51 2015 -0700 summary: merge 3.5 (#25362) files: Lib/threading.py | 15 +++------------ 1 files changed, 3 insertions(+), 12 deletions(-) diff --git a/Lib/threading.py b/Lib/threading.py --- a/Lib/threading.py +++ b/Lib/threading.py @@ -514,12 +514,9 @@ that call wait() once the flag is true will not block at all. """ - self._cond.acquire() - try: + with self._cond: self._flag = True self._cond.notify_all() - finally: - self._cond.release() def clear(self): """Reset the internal flag to false. @@ -528,11 +525,8 @@ set the internal flag to true again. """ - self._cond.acquire() - try: + with self._cond: self._flag = False - finally: - self._cond.release() def wait(self, timeout=None): """Block until the internal flag is true. @@ -549,14 +543,11 @@ True except if a timeout is given and the operation times out. """ - self._cond.acquire() - try: + with self._cond: signaled = self._flag if not signaled: signaled = self._cond.wait(timeout) return signaled - finally: - self._cond.release() # A barrier class. Inspired in part by the pthread_barrier_* api and -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 04:37:05 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sun, 11 Oct 2015 02:37:05 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_merge_3=2E4_=28=2325362=29?= Message-ID: <20151011023705.128836.89651@psf.io> https://hg.python.org/cpython/rev/794101c6e560 changeset: 98664:794101c6e560 branch: 3.5 parent: 98660:ae98209ff69a parent: 98663:62e87422a1a9 user: Benjamin Peterson date: Sat Oct 10 19:36:40 2015 -0700 summary: merge 3.4 (#25362) files: Lib/threading.py | 15 +++------------ 1 files changed, 3 insertions(+), 12 deletions(-) diff --git a/Lib/threading.py b/Lib/threading.py --- a/Lib/threading.py +++ b/Lib/threading.py @@ -514,12 +514,9 @@ that call wait() once the flag is true will not block at all. """ - self._cond.acquire() - try: + with self._cond: self._flag = True self._cond.notify_all() - finally: - self._cond.release() def clear(self): """Reset the internal flag to false. @@ -528,11 +525,8 @@ set the internal flag to true again. """ - self._cond.acquire() - try: + with self._cond: self._flag = False - finally: - self._cond.release() def wait(self, timeout=None): """Block until the internal flag is true. @@ -549,14 +543,11 @@ True except if a timeout is given and the operation times out. """ - self._cond.acquire() - try: + with self._cond: signaled = self._flag if not signaled: signaled = self._cond.wait(timeout) return signaled - finally: - self._cond.release() # A barrier class. Inspired in part by the pthread_barrier_* api and -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 05:56:06 2015 From: python-checkins at python.org (raymond.hettinger) Date: Sun, 11 Oct 2015 03:56:06 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Minor_tweak=2E__Make_the_m?= =?utf-8?q?axlen_comparisons_a_little_more_clear_and_consistent=2E?= Message-ID: <20151011035606.483.78698@psf.io> https://hg.python.org/cpython/rev/9165e297d65b changeset: 98666:9165e297d65b user: Raymond Hettinger date: Sat Oct 10 23:56:02 2015 -0400 summary: Minor tweak. Make the maxlen comparisons a little more clear and consistent. files: Modules/_collectionsmodule.c | 12 ++++++------ 1 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c --- a/Modules/_collectionsmodule.c +++ b/Modules/_collectionsmodule.c @@ -281,7 +281,7 @@ static void deque_trim_right(dequeobject *deque) { - if (deque->maxlen != -1 && Py_SIZE(deque) > deque->maxlen) { + if (deque->maxlen >= 0 && Py_SIZE(deque) > deque->maxlen) { PyObject *rv = deque_pop(deque, NULL); assert(rv != NULL); assert(Py_SIZE(deque) <= deque->maxlen); @@ -292,7 +292,7 @@ static void deque_trim_left(dequeobject *deque) { - if (deque->maxlen != -1 && Py_SIZE(deque) > deque->maxlen) { + if (deque->maxlen >= 0 && Py_SIZE(deque) > deque->maxlen) { PyObject *rv = deque_popleft(deque, NULL); assert(rv != NULL); assert(Py_SIZE(deque) <= deque->maxlen); @@ -385,7 +385,7 @@ { PyObject *it, *item; PyObject *(*iternext)(PyObject *); - int trim = (deque->maxlen != -1); + int trim = (deque->maxlen >= 0); /* Handle case where id(deque) == id(iterable) */ if ((PyObject *)deque == iterable) { @@ -447,7 +447,7 @@ { PyObject *it, *item; PyObject *(*iternext)(PyObject *); - int trim = (deque->maxlen != -1); + int trim = (deque->maxlen >= 0); /* Handle case where id(deque) == id(iterable) */ if ((PyObject *)deque == iterable) { @@ -686,7 +686,7 @@ /* common case, repeating a single element */ PyObject *item = deque->leftblock->data[deque->leftindex]; - if (deque->maxlen != -1 && n > deque->maxlen) + if (deque->maxlen >= 0 && n > deque->maxlen) n = deque->maxlen; if (n > MAX_DEQUE_LEN) @@ -1355,7 +1355,7 @@ Py_ReprLeave(deque); return NULL; } - if (((dequeobject *)deque)->maxlen != -1) + if (((dequeobject *)deque)->maxlen >= 0) result = PyUnicode_FromFormat("deque(%R, maxlen=%zd)", aslist, ((dequeobject *)deque)->maxlen); else -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 08:25:02 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sun, 11 Oct 2015 06:25:02 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogZG9uJ3QgbWVudGlv?= =?utf-8?q?n_Python_2=2E2_=28closes_=2325375=29?= Message-ID: <20151011062501.18384.43901@psf.io> https://hg.python.org/cpython/rev/275d388ca1fc changeset: 98668:275d388ca1fc branch: 3.4 parent: 98663:62e87422a1a9 user: Benjamin Peterson date: Sat Oct 10 23:23:55 2015 -0700 summary: don't mention Python 2.2 (closes #25375) files: Doc/faq/extending.rst | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Doc/faq/extending.rst b/Doc/faq/extending.rst --- a/Doc/faq/extending.rst +++ b/Doc/faq/extending.rst @@ -443,8 +443,8 @@ Can I create an object class with some methods implemented in C and others in Python (e.g. through inheritance)? ---------------------------------------------------------------------------------------------------------------- -In Python 2.2, you can inherit from built-in classes such as :class:`int`, -:class:`list`, :class:`dict`, etc. +Yes, you can inherit from built-in classes such as :class:`int`, :class:`list`, +:class:`dict`, etc. The Boost Python Library (BPL, http://www.boost.org/libs/python/doc/index.html) provides a way of doing this from C++ (i.e. you can inherit from an extension -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 08:25:02 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sun, 11 Oct 2015 06:25:02 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy41ICgjMjUzNzUp?= Message-ID: <20151011062502.481.88588@psf.io> https://hg.python.org/cpython/rev/0017245ff5ce changeset: 98670:0017245ff5ce parent: 98666:9165e297d65b parent: 98669:41dc8034458c user: Benjamin Peterson date: Sat Oct 10 23:24:53 2015 -0700 summary: merge 3.5 (#25375) files: Doc/faq/extending.rst | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Doc/faq/extending.rst b/Doc/faq/extending.rst --- a/Doc/faq/extending.rst +++ b/Doc/faq/extending.rst @@ -443,8 +443,8 @@ Can I create an object class with some methods implemented in C and others in Python (e.g. through inheritance)? ---------------------------------------------------------------------------------------------------------------- -In Python 2.2, you can inherit from built-in classes such as :class:`int`, -:class:`list`, :class:`dict`, etc. +Yes, you can inherit from built-in classes such as :class:`int`, :class:`list`, +:class:`dict`, etc. The Boost Python Library (BPL, http://www.boost.org/libs/python/doc/index.html) provides a way of doing this from C++ (i.e. you can inherit from an extension -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 08:25:02 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sun, 11 Oct 2015 06:25:02 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogZG9uJ3QgbWVudGlv?= =?utf-8?q?n_Python_2=2E2_=28closes_=2325375=29?= Message-ID: <20151011062501.18384.22961@psf.io> https://hg.python.org/cpython/rev/a81b47fb5848 changeset: 98667:a81b47fb5848 branch: 2.7 parent: 98662:9049134eecae user: Benjamin Peterson date: Sat Oct 10 23:23:55 2015 -0700 summary: don't mention Python 2.2 (closes #25375) files: Doc/faq/extending.rst | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Doc/faq/extending.rst b/Doc/faq/extending.rst --- a/Doc/faq/extending.rst +++ b/Doc/faq/extending.rst @@ -440,8 +440,8 @@ Can I create an object class with some methods implemented in C and others in Python (e.g. through inheritance)? ---------------------------------------------------------------------------------------------------------------- -In Python 2.2, you can inherit from built-in classes such as :class:`int`, -:class:`list`, :class:`dict`, etc. +Yes, you can inherit from built-in classes such as :class:`int`, :class:`list`, +:class:`dict`, etc. The Boost Python Library (BPL, http://www.boost.org/libs/python/doc/index.html) provides a way of doing this from C++ (i.e. you can inherit from an extension -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 08:25:03 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sun, 11 Oct 2015 06:25:03 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_merge_3=2E4?= Message-ID: <20151011062502.70963.60333@psf.io> https://hg.python.org/cpython/rev/41dc8034458c changeset: 98669:41dc8034458c branch: 3.5 parent: 98664:794101c6e560 parent: 98668:275d388ca1fc user: Benjamin Peterson date: Sat Oct 10 23:24:39 2015 -0700 summary: merge 3.4 files: Doc/faq/extending.rst | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Doc/faq/extending.rst b/Doc/faq/extending.rst --- a/Doc/faq/extending.rst +++ b/Doc/faq/extending.rst @@ -443,8 +443,8 @@ Can I create an object class with some methods implemented in C and others in Python (e.g. through inheritance)? ---------------------------------------------------------------------------------------------------------------- -In Python 2.2, you can inherit from built-in classes such as :class:`int`, -:class:`list`, :class:`dict`, etc. +Yes, you can inherit from built-in classes such as :class:`int`, :class:`list`, +:class:`dict`, etc. The Boost Python Library (BPL, http://www.boost.org/libs/python/doc/index.html) provides a way of doing this from C++ (i.e. you can inherit from an extension -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 09:48:10 2015 From: python-checkins at python.org (victor.stinner) Date: Sun, 11 Oct 2015 07:48:10 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Close_=2325368=3A_Fix_test?= =?utf-8?q?=5Feintr_when_Python_is_compiled_without_thread_support?= Message-ID: <20151011074810.18378.98400@psf.io> https://hg.python.org/cpython/rev/42101b8f777c changeset: 98671:42101b8f777c user: Victor Stinner date: Sun Oct 11 09:47:17 2015 +0200 summary: Close #25368: Fix test_eintr when Python is compiled without thread support files: Lib/test/eintrdata/eintr_tester.py | 6 ++++-- 1 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Lib/test/eintrdata/eintr_tester.py b/Lib/test/eintrdata/eintr_tester.py --- a/Lib/test/eintrdata/eintr_tester.py +++ b/Lib/test/eintrdata/eintr_tester.py @@ -52,7 +52,8 @@ cls.signal_period) # Issue #25277: Use faulthandler to try to debug a hang on FreeBSD - faulthandler.dump_traceback_later(10 * 60, exit=True) + if hasattr(faulthandler, 'dump_traceback_later'): + faulthandler.dump_traceback_later(10 * 60, exit=True) @classmethod def stop_alarm(cls): @@ -62,7 +63,8 @@ def tearDownClass(cls): cls.stop_alarm() signal.signal(signal.SIGALRM, cls.orig_handler) - faulthandler.cancel_dump_traceback_later() + if hasattr(faulthandler, 'cancel_dump_traceback_later'): + faulthandler.cancel_dump_traceback_later() @classmethod def _sleep(cls): -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 09:54:57 2015 From: python-checkins at python.org (victor.stinner) Date: Sun, 11 Oct 2015 07:54:57 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Close_=2324784=3A_Fix_comp?= =?utf-8?q?ilation_without_thread_support?= Message-ID: <20151011075457.3289.45670@psf.io> https://hg.python.org/cpython/rev/3ab4f23ab983 changeset: 98672:3ab4f23ab983 user: Victor Stinner date: Sun Oct 11 09:54:42 2015 +0200 summary: Close #24784: Fix compilation without thread support Add "#ifdef WITH_THREAD" around cals to: * PyGILState_Check() * _PyImport_AcquireLock() * _PyImport_ReleaseLock() files: Modules/_posixsubprocess.c | 12 ++++++++++-- Modules/socketmodule.c | 2 ++ Python/fileutils.c | 6 ++++++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/Modules/_posixsubprocess.c b/Modules/_posixsubprocess.c --- a/Modules/_posixsubprocess.c +++ b/Modules/_posixsubprocess.c @@ -549,7 +549,9 @@ int need_to_reenable_gc = 0; char *const *exec_array, *const *argv = NULL, *const *envp = NULL; Py_ssize_t arg_num; +#ifdef WITH_THREAD int import_lock_held = 0; +#endif if (!PyArg_ParseTuple( args, "OOpOOOiiiiiiiiiiO:fork_exec", @@ -644,8 +646,10 @@ preexec_fn_args_tuple = PyTuple_New(0); if (!preexec_fn_args_tuple) goto cleanup; +#ifdef WITH_THREAD _PyImport_AcquireLock(); import_lock_held = 1; +#endif } if (cwd_obj != Py_None) { @@ -688,12 +692,14 @@ /* Capture the errno exception before errno can be clobbered. */ PyErr_SetFromErrno(PyExc_OSError); } - if (preexec_fn != Py_None && - _PyImport_ReleaseLock() < 0 && !PyErr_Occurred()) { +#ifdef WITH_THREAD + if (preexec_fn != Py_None + && _PyImport_ReleaseLock() < 0 && !PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "not holding the import lock"); } import_lock_held = 0; +#endif /* Parent process */ if (envp) @@ -716,8 +722,10 @@ return PyLong_FromPid(pid); cleanup: +#ifdef WITH_THREAD if (import_lock_held) _PyImport_ReleaseLock(); +#endif if (envp) _Py_FreeCharPArray(envp); if (argv) diff --git a/Modules/socketmodule.c b/Modules/socketmodule.c --- a/Modules/socketmodule.c +++ b/Modules/socketmodule.c @@ -719,8 +719,10 @@ int deadline_initialized = 0; int res; +#ifdef WITH_THREAD /* sock_call() must be called with the GIL held. */ assert(PyGILState_Check()); +#endif /* outer loop to retry select() when select() is interrupted by a signal or to retry select()+sock_func() on false positive (see above) */ diff --git a/Python/fileutils.c b/Python/fileutils.c --- a/Python/fileutils.c +++ b/Python/fileutils.c @@ -986,8 +986,10 @@ int _Py_open(const char *pathname, int flags) { +#ifdef WITH_THREAD /* _Py_open() must be called with the GIL held. */ assert(PyGILState_Check()); +#endif return _Py_open_impl(pathname, flags, 1); } @@ -1080,7 +1082,9 @@ wchar_t wmode[10]; int usize; +#ifdef WITH_THREAD assert(PyGILState_Check()); +#endif if (!PyUnicode_Check(path)) { PyErr_Format(PyExc_TypeError, @@ -1108,7 +1112,9 @@ PyObject *bytes; char *path_bytes; +#ifdef WITH_THREAD assert(PyGILState_Check()); +#endif if (!PyUnicode_FSConverter(path, &bytes)) return NULL; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 10:04:39 2015 From: python-checkins at python.org (victor.stinner) Date: Sun, 11 Oct 2015 08:04:39 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_test=5Fregrtest=3A_catch_s?= =?utf-8?q?tderr_in_test=5Fnowindows=28=29?= Message-ID: <20151011080437.128854.80722@psf.io> https://hg.python.org/cpython/rev/df4ddecbc3a1 changeset: 98674:df4ddecbc3a1 user: Victor Stinner date: Sun Oct 11 10:04:26 2015 +0200 summary: test_regrtest: catch stderr in test_nowindows() Check also that the deprecation warning is emited. files: Lib/test/test_regrtest.py | 6 +++++- 1 files changed, 5 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -5,6 +5,7 @@ """ import argparse +import contextlib import faulthandler import getopt import io @@ -247,8 +248,11 @@ def test_nowindows(self): for opt in '-n', '--nowindows': with self.subTest(opt=opt): - ns = libregrtest._parse_args([opt]) + with contextlib.redirect_stderr(io.StringIO()) as stderr: + ns = libregrtest._parse_args([opt]) self.assertTrue(ns.nowindows) + err = stderr.getvalue() + self.assertIn('the --nowindows (-n) option is deprecated', err) def test_forever(self): for opt in '-F', '--forever': -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 10:04:39 2015 From: python-checkins at python.org (victor.stinner) Date: Sun, 11 Oct 2015 08:04:39 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Close_=2325369=3A_Fix_test?= =?utf-8?q?=5Fregrtest_without_thread_support?= Message-ID: <20151011080437.70994.38060@psf.io> https://hg.python.org/cpython/rev/0d0d83b1c078 changeset: 98673:0d0d83b1c078 user: Victor Stinner date: Sun Oct 11 10:03:28 2015 +0200 summary: Close #25369: Fix test_regrtest without thread support files: Lib/test/test_regrtest.py | 5 ++++- 1 files changed, 4 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -7,6 +7,7 @@ import argparse import faulthandler import getopt +import io import os.path import platform import re @@ -433,7 +434,9 @@ self.tests = [self.create_test() for index in range(self.NTEST)] self.python_args = ['-Wd', '-E', '-bb'] - self.regrtest_args = ['-uall', '-rwW', '--timeout', '3600', '-j4'] + self.regrtest_args = ['-uall', '-rwW'] + if hasattr(faulthandler, 'dump_traceback_later'): + self.regrtest_args.extend(('--timeout', '3600', '-j4')) if sys.platform == 'win32': self.regrtest_args.append('-n') -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 10:12:46 2015 From: python-checkins at python.org (victor.stinner) Date: Sun, 11 Oct 2015 08:12:46 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogQ2xvc2UgIzI0Nzg0?= =?utf-8?q?=3A_Fix_compilation_without_thread_support?= Message-ID: <20151011081246.3271.5592@psf.io> https://hg.python.org/cpython/rev/20ef719b1cb9 changeset: 98675:20ef719b1cb9 branch: 3.5 parent: 98669:41dc8034458c user: Victor Stinner date: Sun Oct 11 09:54:42 2015 +0200 summary: Close #24784: Fix compilation without thread support Add "#ifdef WITH_THREAD" around cals to: * PyGILState_Check() * _PyImport_AcquireLock() * _PyImport_ReleaseLock() files: Modules/_posixsubprocess.c | 12 ++++++++++-- Modules/socketmodule.c | 2 ++ Python/fileutils.c | 6 ++++++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/Modules/_posixsubprocess.c b/Modules/_posixsubprocess.c --- a/Modules/_posixsubprocess.c +++ b/Modules/_posixsubprocess.c @@ -549,7 +549,9 @@ int need_to_reenable_gc = 0; char *const *exec_array, *const *argv = NULL, *const *envp = NULL; Py_ssize_t arg_num; +#ifdef WITH_THREAD int import_lock_held = 0; +#endif if (!PyArg_ParseTuple( args, "OOpOOOiiiiiiiiiiO:fork_exec", @@ -644,8 +646,10 @@ preexec_fn_args_tuple = PyTuple_New(0); if (!preexec_fn_args_tuple) goto cleanup; +#ifdef WITH_THREAD _PyImport_AcquireLock(); import_lock_held = 1; +#endif } if (cwd_obj != Py_None) { @@ -688,12 +692,14 @@ /* Capture the errno exception before errno can be clobbered. */ PyErr_SetFromErrno(PyExc_OSError); } - if (preexec_fn != Py_None && - _PyImport_ReleaseLock() < 0 && !PyErr_Occurred()) { +#ifdef WITH_THREAD + if (preexec_fn != Py_None + && _PyImport_ReleaseLock() < 0 && !PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "not holding the import lock"); } import_lock_held = 0; +#endif /* Parent process */ if (envp) @@ -716,8 +722,10 @@ return PyLong_FromPid(pid); cleanup: +#ifdef WITH_THREAD if (import_lock_held) _PyImport_ReleaseLock(); +#endif if (envp) _Py_FreeCharPArray(envp); if (argv) diff --git a/Modules/socketmodule.c b/Modules/socketmodule.c --- a/Modules/socketmodule.c +++ b/Modules/socketmodule.c @@ -719,8 +719,10 @@ int deadline_initialized = 0; int res; +#ifdef WITH_THREAD /* sock_call() must be called with the GIL held. */ assert(PyGILState_Check()); +#endif /* outer loop to retry select() when select() is interrupted by a signal or to retry select()+sock_func() on false positive (see above) */ diff --git a/Python/fileutils.c b/Python/fileutils.c --- a/Python/fileutils.c +++ b/Python/fileutils.c @@ -986,8 +986,10 @@ int _Py_open(const char *pathname, int flags) { +#ifdef WITH_THREAD /* _Py_open() must be called with the GIL held. */ assert(PyGILState_Check()); +#endif return _Py_open_impl(pathname, flags, 1); } @@ -1080,7 +1082,9 @@ wchar_t wmode[10]; int usize; +#ifdef WITH_THREAD assert(PyGILState_Check()); +#endif if (!PyUnicode_Check(path)) { PyErr_Format(PyExc_TypeError, @@ -1108,7 +1112,9 @@ PyObject *bytes; char *path_bytes; +#ifdef WITH_THREAD assert(PyGILState_Check()); +#endif if (!PyUnicode_FSConverter(path, &bytes)) return NULL; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 10:12:48 2015 From: python-checkins at python.org (victor.stinner) Date: Sun, 11 Oct 2015 08:12:48 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_3=2E5_=28test=5Fcoroutines=2C_issue_=2325367=29?= Message-ID: <20151011081247.18374.14872@psf.io> https://hg.python.org/cpython/rev/d8002372e5df changeset: 98677:d8002372e5df parent: 98674:df4ddecbc3a1 parent: 98676:d8737b841fcf user: Victor Stinner date: Sun Oct 11 10:10:49 2015 +0200 summary: Merge 3.5 (test_coroutines, issue #25367) files: Lib/test/test_coroutines.py | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_coroutines.py b/Lib/test/test_coroutines.py --- a/Lib/test/test_coroutines.py +++ b/Lib/test/test_coroutines.py @@ -1322,7 +1322,9 @@ class CoroAsyncIOCompatTest(unittest.TestCase): def test_asyncio_1(self): - import asyncio + # asyncio cannot be imported when Python is compiled without thread + # support + support.import_module('asyncio') class MyException(Exception): pass -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 10:12:48 2015 From: python-checkins at python.org (victor.stinner) Date: Sun, 11 Oct 2015 08:12:48 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogQ2xvc2UgIzI1MzY3?= =?utf-8?q?=3A_Fix_test=5Fcoroutines_with_no_thread_support?= Message-ID: <20151011081247.128854.37996@psf.io> https://hg.python.org/cpython/rev/d8737b841fcf changeset: 98676:d8737b841fcf branch: 3.5 user: Victor Stinner date: Sun Oct 11 10:10:31 2015 +0200 summary: Close #25367: Fix test_coroutines with no thread support Skip test_asyncio_1() when the asyncio module cannot be imported because CPython is compiled with no thread support. files: Lib/test/test_coroutines.py | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_coroutines.py b/Lib/test/test_coroutines.py --- a/Lib/test/test_coroutines.py +++ b/Lib/test/test_coroutines.py @@ -1322,7 +1322,9 @@ class CoroAsyncIOCompatTest(unittest.TestCase): def test_asyncio_1(self): - import asyncio + # asyncio cannot be imported when Python is compiled without thread + # support + support.import_module('asyncio') class MyException(Exception): pass -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 10:41:11 2015 From: python-checkins at python.org (victor.stinner) Date: Sun, 11 Oct 2015 08:41:11 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Null_merge_3=2E5?= Message-ID: <20151011084111.7238.17610@psf.io> https://hg.python.org/cpython/rev/3e2d679aaa82 changeset: 98680:3e2d679aaa82 parent: 98678:746b98342943 parent: 98679:ef70e5373131 user: Victor Stinner date: Sun Oct 11 10:40:27 2015 +0200 summary: Null merge 3.5 files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 10:41:11 2015 From: python-checkins at python.org (victor.stinner) Date: Sun, 11 Oct 2015 08:41:11 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Close_=2325373=3A_Fix_regr?= =?utf-8?q?test_--slow_with_interrupted_test?= Message-ID: <20151011084111.457.50864@psf.io> https://hg.python.org/cpython/rev/746b98342943 changeset: 98678:746b98342943 user: Victor Stinner date: Sun Oct 11 10:37:25 2015 +0200 summary: Close #25373: Fix regrtest --slow with interrupted test * Fix accumulate_result(): don't use time on interrupted and failed test * Add unit test for interrupted test * Add unit test on --slow with interrupted test, with and without multiprocessing files: Lib/test/libregrtest/main.py | 12 ++++-- Lib/test/test_regrtest.py | 43 ++++++++++++++++++++++- 2 files changed, 49 insertions(+), 6 deletions(-) diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py --- a/Lib/test/libregrtest/main.py +++ b/Lib/test/libregrtest/main.py @@ -7,10 +7,11 @@ import sysconfig import tempfile import textwrap +from test.libregrtest.cmdline import _parse_args from test.libregrtest.runtest import ( findtests, runtest, - STDTESTS, NOTTESTS, PASSED, FAILED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED) -from test.libregrtest.cmdline import _parse_args + STDTESTS, NOTTESTS, PASSED, FAILED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED, + INTERRUPTED, CHILD_ERROR) from test.libregrtest.setup import setup_tests from test import support try: @@ -87,7 +88,8 @@ def accumulate_result(self, test, result): ok, test_time = result - self.test_times.append((test_time, test)) + if ok not in (CHILD_ERROR, INTERRUPTED): + self.test_times.append((test_time, test)) if ok == PASSED: self.good.append(test) elif ok == FAILED: @@ -291,10 +293,12 @@ else: try: result = runtest(self.ns, test) - self.accumulate_result(test, result) except KeyboardInterrupt: + self.accumulate_result(test, (INTERRUPTED, None)) self.interrupted = True break + else: + self.accumulate_result(test, result) if self.ns.findleaks: gc.collect() diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -24,6 +24,16 @@ ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..') ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR)) +TEST_INTERRUPTED = textwrap.dedent(""" + from signal import SIGINT + try: + from _testcapi import raise_signal + raise_signal(SIGINT) + except ImportError: + import os + os.kill(os.getpid(), SIGINT) + """) + class ParseArgsTestCase(unittest.TestCase): """ @@ -340,16 +350,19 @@ return list(match.group(1) for match in parser) def check_executed_tests(self, output, tests, skipped=(), failed=(), - randomize=False): + omitted=(), randomize=False): if isinstance(tests, str): tests = [tests] if isinstance(skipped, str): skipped = [skipped] if isinstance(failed, str): failed = [failed] + if isinstance(omitted, str): + omitted = [omitted] ntest = len(tests) nskipped = len(skipped) nfailed = len(failed) + nomitted = len(omitted) executed = self.parse_executed_tests(output) if randomize: @@ -375,7 +388,11 @@ regex = list_regex('%s test%s failed', failed) self.check_line(output, regex) - good = ntest - nskipped - nfailed + if omitted: + regex = list_regex('%s test%s omitted', omitted) + self.check_line(output, regex) + + good = ntest - nskipped - nfailed - nomitted if good: regex = r'%s test%s OK\.$' % (good, plural(good)) if not skipped and not failed and good > 1: @@ -607,6 +624,12 @@ output = self.run_tests('--fromfile', filename) self.check_executed_tests(output, tests) + def test_interrupted(self): + code = TEST_INTERRUPTED + test = self.create_test("sigint", code=code) + output = self.run_tests(test, exitcode=1) + self.check_executed_tests(output, test, omitted=test) + def test_slow(self): # test --slow tests = [self.create_test() for index in range(3)] @@ -617,6 +640,22 @@ % (self.TESTNAME_REGEX, len(tests))) self.check_line(output, regex) + def test_slow_interrupted(self): + # Issue #25373: test --slow with an interrupted test + code = TEST_INTERRUPTED + test = self.create_test("sigint", code=code) + + for multiprocessing in (False, True): + if multiprocessing: + args = ("--slow", "-j2", test) + else: + args = ("--slow", test) + output = self.run_tests(*args, exitcode=1) + self.check_executed_tests(output, test, omitted=test) + regex = ('10 slowest tests:\n') + self.check_line(output, regex) + self.check_line(output, 'Test suite interrupted by signal SIGINT.') + def test_coverage(self): # test --coverage test = self.create_test() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 10:41:12 2015 From: python-checkins at python.org (victor.stinner) Date: Sun, 11 Oct 2015 08:41:12 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogbG9zZSAjMjUzNzM6?= =?utf-8?q?_Fix_regrtest_--slow_with_interrupted_test?= Message-ID: <20151011084111.70972.33224@psf.io> https://hg.python.org/cpython/rev/ef70e5373131 changeset: 98679:ef70e5373131 branch: 3.5 parent: 98676:d8737b841fcf user: Victor Stinner date: Sun Oct 11 10:39:56 2015 +0200 summary: lose #25373: Fix regrtest --slow with interrupted test files: Lib/test/regrtest.py | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py --- a/Lib/test/regrtest.py +++ b/Lib/test/regrtest.py @@ -659,7 +659,8 @@ def accumulate_result(test, result): ok, test_time = result - test_times.append((test_time, test)) + if ok not in (CHILD_ERROR, INTERRUPTED): + test_times.append((test_time, test)) if ok == PASSED: good.append(test) elif ok == FAILED: -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Sun Oct 11 10:46:00 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Sun, 11 Oct 2015 08:46:00 +0000 Subject: [Python-checkins] Daily reference leaks (9165e297d65b): sum=61866 Message-ID: <20151011084600.20765.55357@psf.io> results for 9165e297d65b on branch "default" -------------------------------------------- test_capi leaked [5411, 5411, 5411] references, sum=16233 test_capi leaked [1421, 1423, 1423] memory blocks, sum=4267 test_format leaked [62, 62, 62] references, sum=186 test_format leaked [62, 62, 62] memory blocks, sum=186 test_functools leaked [0, 1, 3] memory blocks, sum=4 test_threading leaked [10820, 10820, 10820] references, sum=32460 test_threading leaked [2842, 2844, 2844] memory blocks, sum=8530 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflogf4KGyT', '--timeout', '7200'] From python-checkins at python.org Sun Oct 11 10:54:03 2015 From: python-checkins at python.org (victor.stinner) Date: Sun, 11 Oct 2015 08:54:03 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogQ2xvc2UgIzI1MzY3?= =?utf-8?q?=3A_Fix_test=5Fcoroutines=28=29?= Message-ID: <20151011085403.3295.74469@psf.io> https://hg.python.org/cpython/rev/493845d905c5 changeset: 98681:493845d905c5 branch: 3.5 parent: 98679:ef70e5373131 user: Victor Stinner date: Sun Oct 11 10:53:15 2015 +0200 summary: Close #25367: Fix test_coroutines() Fix usage of support.import_module('asyncio'): store the result in an 'asyncio' variable. files: Lib/test/test_coroutines.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_coroutines.py b/Lib/test/test_coroutines.py --- a/Lib/test/test_coroutines.py +++ b/Lib/test/test_coroutines.py @@ -1324,7 +1324,7 @@ def test_asyncio_1(self): # asyncio cannot be imported when Python is compiled without thread # support - support.import_module('asyncio') + asyncio = support.import_module('asyncio') class MyException(Exception): pass -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 10:54:04 2015 From: python-checkins at python.org (victor.stinner) Date: Sun, 11 Oct 2015 08:54:04 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?b?KTogTWVyZ2UgMy41ICh0ZXN0X2Nvcm91dGluZXMp?= Message-ID: <20151011085404.2679.5075@psf.io> https://hg.python.org/cpython/rev/dee8afb28831 changeset: 98682:dee8afb28831 parent: 98680:3e2d679aaa82 parent: 98681:493845d905c5 user: Victor Stinner date: Sun Oct 11 10:53:50 2015 +0200 summary: Merge 3.5 (test_coroutines) files: Lib/test/test_coroutines.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_coroutines.py b/Lib/test/test_coroutines.py --- a/Lib/test/test_coroutines.py +++ b/Lib/test/test_coroutines.py @@ -1324,7 +1324,7 @@ def test_asyncio_1(self): # asyncio cannot be imported when Python is compiled without thread # support - support.import_module('asyncio') + asyncio = support.import_module('asyncio') class MyException(Exception): pass -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 11:01:32 2015 From: python-checkins at python.org (victor.stinner) Date: Sun, 11 Oct 2015 09:01:32 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325357=3A_Add_an_o?= =?utf-8?q?ptional_newline_paramer_to_binascii=2Eb2a=5Fbase64=28=29=2E?= Message-ID: <20151011090132.2681.6934@psf.io> https://hg.python.org/cpython/rev/463a09a3bfff changeset: 98683:463a09a3bfff user: Victor Stinner date: Sun Oct 11 11:01:02 2015 +0200 summary: Issue #25357: Add an optional newline paramer to binascii.b2a_base64(). base64.b64encode() uses it to avoid a memory copy. files: Doc/library/binascii.rst | 11 ++++++++--- Lib/base64.py | 3 +-- Lib/test/test_binascii.py | 10 ++++++++++ Misc/NEWS | 3 +++ Modules/binascii.c | 21 +++++++++++++-------- Modules/clinic/binascii.c.h | 17 ++++++++++------- 6 files changed, 45 insertions(+), 20 deletions(-) diff --git a/Doc/library/binascii.rst b/Doc/library/binascii.rst --- a/Doc/library/binascii.rst +++ b/Doc/library/binascii.rst @@ -52,11 +52,16 @@ than one line may be passed at a time. -.. function:: b2a_base64(data) +.. function:: b2a_base64(data, \*, newline=True) Convert binary data to a line of ASCII characters in base64 coding. The return - value is the converted line, including a newline char. The length of *data* - should be at most 57 to adhere to the base64 standard. + value is the converted line, including a newline char if *newline* is + true. The length of *data* should be at most 57 to adhere to the + base64 standard. + + + .. versionchanged:: 3.6 + Added the *newline* parameter. .. function:: a2b_qp(data, header=False) diff --git a/Lib/base64.py b/Lib/base64.py --- a/Lib/base64.py +++ b/Lib/base64.py @@ -58,8 +58,7 @@ The encoded byte string is returned. """ - # Strip off the trailing newline - encoded = binascii.b2a_base64(s)[:-1] + encoded = binascii.b2a_base64(s, newline=False) if altchars is not None: assert len(altchars) == 2, repr(altchars) return encoded.translate(bytes.maketrans(b'+/', altchars)) diff --git a/Lib/test/test_binascii.py b/Lib/test/test_binascii.py --- a/Lib/test/test_binascii.py +++ b/Lib/test/test_binascii.py @@ -262,6 +262,16 @@ # non-ASCII string self.assertRaises(ValueError, a2b, "\x80") + def test_b2a_base64_newline(self): + # Issue #25357: test newline parameter + b = self.type2test(b'hello') + self.assertEqual(binascii.b2a_base64(b), + b'aGVsbG8=\n') + self.assertEqual(binascii.b2a_base64(b, newline=True), + b'aGVsbG8=\n') + self.assertEqual(binascii.b2a_base64(b, newline=False), + b'aGVsbG8=') + class ArrayBinASCIITest(BinASCIITest): def type2test(self, s): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -51,6 +51,9 @@ Library ------- +- Issue #25357: Add an optional newline paramer to binascii.b2a_base64(). + base64.b64encode() uses it to avoid a memory copy. + - Issue #24164: Objects that need calling ``__new__`` with keyword arguments, can now be pickled using pickle protocols older than protocol version 4. diff --git a/Modules/binascii.c b/Modules/binascii.c --- a/Modules/binascii.c +++ b/Modules/binascii.c @@ -528,21 +528,22 @@ binascii.b2a_base64 data: Py_buffer - / + * + newline: int(c_default="1") = True Base64-code line of data. [clinic start generated code]*/ static PyObject * -binascii_b2a_base64_impl(PyModuleDef *module, Py_buffer *data) -/*[clinic end generated code: output=3cd61fbee2913285 input=14ec4e47371174a9]*/ +binascii_b2a_base64_impl(PyModuleDef *module, Py_buffer *data, int newline) +/*[clinic end generated code: output=19e1dd719a890b50 input=7b2ea6fa38d8924c]*/ { unsigned char *ascii_data, *bin_data; int leftbits = 0; unsigned char this_ch; unsigned int leftchar = 0; PyObject *rv; - Py_ssize_t bin_len; + Py_ssize_t bin_len, out_len; bin_data = data->buf; bin_len = data->len; @@ -555,9 +556,12 @@ } /* We're lazy and allocate too much (fixed up later). - "+3" leaves room for up to two pad characters and a trailing - newline. Note that 'b' gets encoded as 'Yg==\n' (1 in, 5 out). */ - if ( (rv=PyBytes_FromStringAndSize(NULL, bin_len*2 + 3)) == NULL ) + "+2" leaves room for up to two pad characters. + Note that 'b' gets encoded as 'Yg==\n' (1 in, 5 out). */ + out_len = bin_len*2 + 2; + if (newline) + out_len++; + if ( (rv=PyBytes_FromStringAndSize(NULL, out_len)) == NULL ) return NULL; ascii_data = (unsigned char *)PyBytes_AS_STRING(rv); @@ -581,7 +585,8 @@ *ascii_data++ = table_b2a_base64[(leftchar&0xf) << 2]; *ascii_data++ = BASE64_PAD; } - *ascii_data++ = '\n'; /* Append a courtesy newline */ + if (newline) + *ascii_data++ = '\n'; /* Append a courtesy newline */ if (_PyBytes_Resize(&rv, (ascii_data - diff --git a/Modules/clinic/binascii.c.h b/Modules/clinic/binascii.c.h --- a/Modules/clinic/binascii.c.h +++ b/Modules/clinic/binascii.c.h @@ -93,26 +93,29 @@ } PyDoc_STRVAR(binascii_b2a_base64__doc__, -"b2a_base64($module, data, /)\n" +"b2a_base64($module, /, data, *, newline=True)\n" "--\n" "\n" "Base64-code line of data."); #define BINASCII_B2A_BASE64_METHODDEF \ - {"b2a_base64", (PyCFunction)binascii_b2a_base64, METH_O, binascii_b2a_base64__doc__}, + {"b2a_base64", (PyCFunction)binascii_b2a_base64, METH_VARARGS|METH_KEYWORDS, binascii_b2a_base64__doc__}, static PyObject * -binascii_b2a_base64_impl(PyModuleDef *module, Py_buffer *data); +binascii_b2a_base64_impl(PyModuleDef *module, Py_buffer *data, int newline); static PyObject * -binascii_b2a_base64(PyModuleDef *module, PyObject *arg) +binascii_b2a_base64(PyModuleDef *module, PyObject *args, PyObject *kwargs) { PyObject *return_value = NULL; + static char *_keywords[] = {"data", "newline", NULL}; Py_buffer data = {NULL, NULL}; + int newline = 1; - if (!PyArg_Parse(arg, "y*:b2a_base64", &data)) + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "y*|$i:b2a_base64", _keywords, + &data, &newline)) goto exit; - return_value = binascii_b2a_base64_impl(module, &data); + return_value = binascii_b2a_base64_impl(module, &data, newline); exit: /* Cleanup for data */ @@ -516,4 +519,4 @@ return return_value; } -/*[clinic end generated code: output=b1a3cbf7660ebaa5 input=a9049054013a1b77]*/ +/*[clinic end generated code: output=b15a24350d105251 input=a9049054013a1b77]*/ -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 14:15:31 2015 From: python-checkins at python.org (nick.coghlan) Date: Sun, 11 Oct 2015 12:15:31 +0000 Subject: [Python-checkins] =?utf-8?q?devguide=3A_Issue_=2325194=3A_Add_mot?= =?utf-8?q?ivations_=26_affiliations_page?= Message-ID: <20151011121531.3295.79042@psf.io> https://hg.python.org/devguide/rev/0f0ff7d19cfc changeset: 767:0f0ff7d19cfc user: Nick Coghlan date: Sun Oct 11 22:15:23 2015 +1000 summary: Issue #25194: Add motivations & affiliations page This is an initial version of a motivations & affiliations page based on the python-committers thread and the discussion in the issue tracker. I expect the layout details may change as more folks add their own entries and we see how it looks in practice. files: coredev.rst | 22 ++++- index.rst | 1 + motivations.rst | 135 ++++++++++++++++++++++++++++++++++++ 3 files changed, 151 insertions(+), 7 deletions(-) diff --git a/coredev.rst b/coredev.rst --- a/coredev.rst +++ b/coredev.rst @@ -132,13 +132,13 @@ reputation for being a very nice group of people and we would like to keep it that way. -Second, please be prompt in responding to questions. We are all volunteers so -what little free time one can dedicate to Python should be spent being -productive. If you have been asked to respond to an issue or answer a question -and you put it off it ends up stalling other people's work. It is completely -acceptable to say you are too busy, but you need to say that instead of -stringing people along. This obviously applies to anything you do on the issue -tracker as well. +Second, please be prompt in responding to questions. Many contributors to Python +are volunteers so what little free time they can dedicate to Python should be +spent being productive. If you have been asked to respond to an issue or answer +a question and you put it off it ends up stalling other people's work. It is +completely acceptable to say you are too busy, but you need to say that instead +of leaving people waiting for an answer. This also applies to anything you +do on the issue tracker. Third, please list what areas you want to be considered an expert in the :ref:`experts`. This allows triagers to direct issues to you which involve @@ -149,6 +149,14 @@ gets in the way, so no one will be insulted if you remove yourself from the list. +Fourth, please consider whether or not you wish to add your name to the +:ref:`motivations` list. Core contributor participation in the list helps the +wider Python community to better appreciate the perspectives currently +represented amongst the core development team, the Python Software Foundation +to better assess the sustainability of current contributions to CPython core +development, and also serves as a referral list for organisations seeking +commercial Python support from the core development community. + And finally, enjoy yourself! Contributing to open source software should be fun (overall). If you find yourself no longer enjoying the work then either take a break or figure out what you need to do to make it enjoyable again. diff --git a/index.rst b/index.rst --- a/index.rst +++ b/index.rst @@ -222,6 +222,7 @@ buildslave gitdevs faq + motivations .. _Buildbot status: http://python.org/dev/buildbot/ diff --git a/motivations.rst b/motivations.rst new file mode 100644 --- /dev/null +++ b/motivations.rst @@ -0,0 +1,135 @@ +.. _motivations: + +Motivations and Affiliations +============================ + +CPython core contributors participate in the core development process for a +variety of reasons. Being accepted as a core contributor indicates only that +an individual is interested in acquiring those responsibilities, has the +ability to collaborate effectively with existing core contributors, and has had +the time available to demonstrate both that interest and that ability. + +This page allows core contributors that choose to do so to provide more +information to the rest of the Python community regarding their personal +motivations for participating in the core development process (which can be +admittedly trying at times), as well as any personal and professional +affiliations which they consider particularly relevant. + +Core contributors that wish to provide this additional information add a new +entry to the :ref:`published-motivations` section below. Guidelines relating +to content and layout are included as comments in the source code for this page. + +Core contributors that are available for training, consulting or contract work, +or are seeking crowdfunding support for their community contributions, may also +choose to disclose that information here (including linking out to commercial +sites with the relevant details). + +Limitations on scope +-------------------- + +Changes to the software and documentation maintained by core contributors, +together with related design discussions, all take place in public venues, and +hence can be fully audited by anyone that cares to do so. Accordingly, core +contributors are NOT required to publish their motivations and affiliations if +they do not choose to do so. This helps to ensure that core contribution +processes remain open to anyone that is in a position to sign the `Contributor +Licensing Agreement`_, the details of which are filed privately with the Python +Software Foundation, rather than publicly. + +.. _Contributor Licensing Agreement: https://www.python.org/psf/contrib/contrib-form/ + +While providing additional information on this page is entirely optional, +contributors that are not facing personal safety concerns are specifically +encouraged to disclose commercial affiliations in the following two cases +(even if not currently paid for time spent participating in the core +development process): + +* contributors working for vendors that distribute a commercially supported + Python runtime +* contributors working for Sponsor Members of the Python Software Foundation + +These are cases where disclosure of commercial interests helps to improve the +overall transparency of the core development process, as well as making it +easier for staff at these organisations to locate colleagues that can help +them to participate in and contribute effectively to supporting the core +development process. + +Contributors that are available for consulting or contract work on behalf of +the Python Software Foundation or other organisations are also encouraged +to provide that information here, as this will help the PSF to better +facilitate funding of core development work by organisations that don't +directly employ any core contributors themselves. + + +.. _published-motivations: + +Published entries +----------------- + +The following core contributors have chosen to provide additional details +regarding their reasons for participating in the CPython core development +process: + +.. Entry guidelines: + + We use the "topic" directive rather than normal section headings in order to + avoid creating entries in the main table of contents. + + Topic headings should be in the form of "Name (Country)" to help give some + indication as to the geographic dispersal of core contributors. + + Entries should be written as short third person biographies, rather than + being written in first person. + + Entries should be maintained in alphabetical order by last name, or by first + name (relative to other last names) if "last name" isn't a meaningful term + for your name. + + Include a "Personal site" bullet point with a link if you'd like to highlight + a personal blog or other site. + + Include an "Extended bio" bullet point with a link if you'd like to provide + more than a couple of paragraphs of biographical information. (Use a + double-trailing underscore on these links to avoid "Duplicate explicit + target name" warnings from Sphinx/docutils) + + Include an "Available for " (or activities) bullet point with a + link if you'd like to be contacted for professional training, consulting or + contract work. A link to a page with additional details is preferred to a + direct email address or contact phone number, as this is a global site, and + folks may not be familiar with the relevant practical details that apply to + this kind of work in a contributor's country of residence. + + Include a "Crowdfunding" bullet point with a link if you'd like to highlight + crowdfunding services (e.g. Patreon) that folks can use to support your core + development work. + + Include additional bullet points (without links) for any other affiliations + you would like to mention. + + If there's a kind of link you'd like to include in your entry that isn't + already covered by the categories mentioned above, please start a discussion + about that on the python-committers mailing list. + + python-committers is also the appropriate point of contact for any other + questions or suggestions relating to this page. + + +.. topic:: Nick Coghlan (Australia) + + * Personal site: `Curious Efficiency `_ + * `Extended bio `__ + * Red Hat (Software Engineer, Developer Experience) + * Python Software Foundation (Board of Directors) + + Nick originally began participating in CPython core development as an + interesting and enlightening hobby activity while working for Boeing Defence + Australia. After commencing work for Red Hat, he also became involved in a + range of topics related directly to improving the experience of Python + developers on the Fedora Linux distribution and derived platforms, and now + works for Red Hat's Developer Experience team. + + In addition to his personal and professional interest in ensuring Python + remains an excellent choice for Linux-based network service development, he + is also interested in helping to ensure its continued suitability for + educational and data analysis use cases. -- Repository URL: https://hg.python.org/devguide From python-checkins at python.org Sun Oct 11 16:55:13 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 11 Oct 2015 14:55:13 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Cleanup_test?= =?utf-8?q?=5Fdescr=3A_remove_C8_that_is_the_same_as_C3=2E?= Message-ID: <20151011145512.97726.70059@psf.io> https://hg.python.org/cpython/rev/f9820c4724ca changeset: 98684:f9820c4724ca branch: 3.4 parent: 98668:275d388ca1fc user: Serhiy Storchaka date: Sun Oct 11 17:42:39 2015 +0300 summary: Cleanup test_descr: remove C8 that is the same as C3. files: Lib/test/test_descr.py | 8 -------- 1 files changed, 0 insertions(+), 8 deletions(-) diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py --- a/Lib/test/test_descr.py +++ b/Lib/test/test_descr.py @@ -4655,14 +4655,6 @@ with self.assertRaises((TypeError, ValueError)): obj.__reduce_ex__(proto) - class C8: - def __getnewargs_ex__(self): - return (args, kwargs) - obj = C8() - for proto in protocols: - if 2 <= proto < 4: - with self.assertRaises(ValueError): - obj.__reduce_ex__(proto) class C9: def __getnewargs_ex__(self): return (args, {}) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 16:55:13 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 11 Oct 2015 14:55:13 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Cleanup_test=5Fdescr=3A_remove_C8_that_is_the_same_as_C3=2E?= Message-ID: <20151011145513.55460.8313@psf.io> https://hg.python.org/cpython/rev/911f2586e6b4 changeset: 98685:911f2586e6b4 branch: 3.5 parent: 98681:493845d905c5 parent: 98684:f9820c4724ca user: Serhiy Storchaka date: Sun Oct 11 17:43:12 2015 +0300 summary: Cleanup test_descr: remove C8 that is the same as C3. files: Lib/test/test_descr.py | 8 -------- 1 files changed, 0 insertions(+), 8 deletions(-) diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py --- a/Lib/test/test_descr.py +++ b/Lib/test/test_descr.py @@ -4763,14 +4763,6 @@ with self.assertRaises((TypeError, ValueError)): obj.__reduce_ex__(proto) - class C8: - def __getnewargs_ex__(self): - return (args, kwargs) - obj = C8() - for proto in protocols: - if 2 <= proto < 4: - with self.assertRaises(ValueError): - obj.__reduce_ex__(proto) class C9: def __getnewargs_ex__(self): return (args, {}) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 16:55:14 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 11 Oct 2015 14:55:14 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Cleanup_test=5Fdescr=3A_remove_C8_that_is_the_same_as_C3?= =?utf-8?q?=2E?= Message-ID: <20151011145513.2677.34105@psf.io> https://hg.python.org/cpython/rev/41d1c909adf1 changeset: 98686:41d1c909adf1 parent: 98683:463a09a3bfff parent: 98685:911f2586e6b4 user: Serhiy Storchaka date: Sun Oct 11 17:43:38 2015 +0300 summary: Cleanup test_descr: remove C8 that is the same as C3. files: Lib/test/test_descr.py | 8 -------- 1 files changed, 0 insertions(+), 8 deletions(-) diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py --- a/Lib/test/test_descr.py +++ b/Lib/test/test_descr.py @@ -4763,14 +4763,6 @@ with self.assertRaises((TypeError, ValueError)): obj.__reduce_ex__(proto) - class C8: - def __getnewargs_ex__(self): - return (args, kwargs) - obj = C8() - for proto in protocols: - if 2 <= proto < 4: - with self.assertRaises(ValueError): - obj.__reduce_ex__(proto) class C9: def __getnewargs_ex__(self): return (args, {}) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 16:55:14 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 11 Oct 2015 14:55:14 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_No_longer_skip_tests_for_classes_with_=5F=5Fgetnewargs?= =?utf-8?q?=5Fex=5F=5F=2E__The_copy_module?= Message-ID: <20151011145514.128856.98949@psf.io> https://hg.python.org/cpython/rev/6f4dd9bc1f20 changeset: 98688:6f4dd9bc1f20 parent: 98686:41d1c909adf1 parent: 98687:80501740ab84 user: Serhiy Storchaka date: Sun Oct 11 17:48:51 2015 +0300 summary: No longer skip tests for classes with __getnewargs_ex__. The copy module already supports reduce protocol 4 (issue #20289). files: Lib/test/test_descr.py | 4 ---- 1 files changed, 0 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py --- a/Lib/test/test_descr.py +++ b/Lib/test/test_descr.py @@ -5081,10 +5081,6 @@ with self.subTest(cls=cls): kwargs = getattr(cls, 'KWARGS', {}) obj = cls(*cls.ARGS, **kwargs) - # XXX: We need to modify the copy module to support PEP 3154's - # reduce protocol 4. - if hasattr(cls, '__getnewargs_ex__'): - continue objcopy = deepcopy(obj) self._assert_is_copy(obj, objcopy) # For test classes that supports this, make sure we didn't go -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 16:55:17 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 11 Oct 2015 14:55:17 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2324164=3A_Fixed_te?= =?utf-8?q?st=5Fdescr=3A_=5F=5Fgetnewargs=5Fex=5F=5F_now_is_supported_in_p?= =?utf-8?q?rotocols?= Message-ID: <20151011145514.97716.75989@psf.io> https://hg.python.org/cpython/rev/df33dbbef7bb changeset: 98689:df33dbbef7bb user: Serhiy Storchaka date: Sun Oct 11 17:52:09 2015 +0300 summary: Issue #24164: Fixed test_descr: __getnewargs_ex__ now is supported in protocols 2 and 3. files: Lib/test/test_descr.py | 9 +-------- 1 files changed, 1 insertions(+), 8 deletions(-) diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py --- a/Lib/test/test_descr.py +++ b/Lib/test/test_descr.py @@ -4738,11 +4738,8 @@ return (args, kwargs) obj = C3() for proto in protocols: - if proto >= 4: + if proto >= 2: self._check_reduce(proto, obj, args, kwargs) - elif proto >= 2: - with self.assertRaises(ValueError): - obj.__reduce_ex__(proto) class C4: def __getnewargs_ex__(self): @@ -5061,10 +5058,6 @@ kwargs = getattr(cls, 'KWARGS', {}) obj = cls(*cls.ARGS, **kwargs) proto = pickle_copier.proto - if 2 <= proto < 4 and hasattr(cls, '__getnewargs_ex__'): - with self.assertRaises(ValueError): - pickle_copier.dumps(obj, proto) - continue objcopy = pickle_copier.copy(obj) self._assert_is_copy(obj, objcopy) # For test classes that supports this, make sure we didn't go -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 16:55:17 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 11 Oct 2015 14:55:17 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E5=29=3A_No_longer_skip?= =?utf-8?q?_tests_for_classes_with_=5F=5Fgetnewargs=5Fex=5F=5F=2E__The_cop?= =?utf-8?q?y_module?= Message-ID: <20151011145513.55480.40523@psf.io> https://hg.python.org/cpython/rev/80501740ab84 changeset: 98687:80501740ab84 branch: 3.5 parent: 98685:911f2586e6b4 user: Serhiy Storchaka date: Sun Oct 11 17:48:28 2015 +0300 summary: No longer skip tests for classes with __getnewargs_ex__. The copy module already supports reduce protocol 4 (issue #20289). files: Lib/test/test_descr.py | 4 ---- 1 files changed, 0 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py --- a/Lib/test/test_descr.py +++ b/Lib/test/test_descr.py @@ -5081,10 +5081,6 @@ with self.subTest(cls=cls): kwargs = getattr(cls, 'KWARGS', {}) obj = cls(*cls.ARGS, **kwargs) - # XXX: We need to modify the copy module to support PEP 3154's - # reduce protocol 4. - if hasattr(cls, '__getnewargs_ex__'): - continue objcopy = deepcopy(obj) self._assert_is_copy(obj, objcopy) # For test classes that supports this, make sure we didn't go -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Oct 11 18:43:55 2015 From: python-checkins at python.org (raymond.hettinger) Date: Sun, 11 Oct 2015 16:43:55 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Hoist_the_deque-=3Emaxlen_?= =?utf-8?q?lookup_out_of_the_inner-loop=2E?= Message-ID: <20151011164355.55474.92442@psf.io> https://hg.python.org/cpython/rev/0c8a9cafb24c changeset: 98690:0c8a9cafb24c user: Raymond Hettinger date: Sun Oct 11 09:43:50 2015 -0700 summary: Hoist the deque->maxlen lookup out of the inner-loop. files: Modules/_collectionsmodule.c | 16 ++++++++++------ 1 files changed, 10 insertions(+), 6 deletions(-) diff --git a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c --- a/Modules/_collectionsmodule.c +++ b/Modules/_collectionsmodule.c @@ -385,7 +385,7 @@ { PyObject *it, *item; PyObject *(*iternext)(PyObject *); - int trim = (deque->maxlen >= 0); + Py_ssize_t maxlen = deque->maxlen; /* Handle case where id(deque) == id(iterable) */ if ((PyObject *)deque == iterable) { @@ -433,8 +433,10 @@ Py_SIZE(deque)++; deque->rightindex++; deque->rightblock->data[deque->rightindex] = item; - if (trim) - deque_trim_left(deque); + if (maxlen >= 0 && Py_SIZE(deque) > maxlen) { + PyObject *rv = deque_popleft(deque, NULL); + Py_DECREF(rv); + } } return finalize_iterator(it); } @@ -447,7 +449,7 @@ { PyObject *it, *item; PyObject *(*iternext)(PyObject *); - int trim = (deque->maxlen >= 0); + Py_ssize_t maxlen = deque->maxlen; /* Handle case where id(deque) == id(iterable) */ if ((PyObject *)deque == iterable) { @@ -495,8 +497,10 @@ Py_SIZE(deque)++; deque->leftindex--; deque->leftblock->data[deque->leftindex] = item; - if (trim) - deque_trim_right(deque); + if (maxlen >= 0 && Py_SIZE(deque) > maxlen) { + PyObject *rv = deque_pop(deque, NULL); + Py_DECREF(rv); + } } return finalize_iterator(it); } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 00:16:36 2015 From: python-checkins at python.org (steve.dower) Date: Sun, 11 Oct 2015 22:16:36 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1MzYx?= =?utf-8?q?=3A_Disables_use_of_SSE2_instructions_in_Windows_32-bit_build?= Message-ID: <20151011221636.128850.85024@psf.io> https://hg.python.org/cpython/rev/15f6bbe944fa changeset: 98691:15f6bbe944fa branch: 3.5 parent: 98687:80501740ab84 user: Steve Dower date: Sun Oct 11 15:15:52 2015 -0700 summary: Issue #25361: Disables use of SSE2 instructions in Windows 32-bit build files: Misc/NEWS | 2 ++ PCbuild/pyproject.props | 1 + PCbuild/pythoncore.vcxproj.filters | 6 ++++++ 3 files changed, 9 insertions(+), 0 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -308,6 +308,8 @@ Windows ------- +- Issue #25361: Disables use of SSE2 instructions in Windows 32-bit build + - Issue #25089: Adds logging to installer for case where launcher is not selected on upgrade. diff --git a/PCbuild/pyproject.props b/PCbuild/pyproject.props --- a/PCbuild/pyproject.props +++ b/PCbuild/pyproject.props @@ -43,6 +43,7 @@ Default true true + NoExtensions Disabled diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -435,6 +435,9 @@ Modules + + Include + @@ -968,6 +971,9 @@ PC + + Objects + -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 00:16:36 2015 From: python-checkins at python.org (steve.dower) Date: Sun, 11 Oct 2015 22:16:36 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325361=3A_Disables_use_of_SSE2_instructions_in_W?= =?utf-8?q?indows_32-bit_build?= Message-ID: <20151011221636.451.72059@psf.io> https://hg.python.org/cpython/rev/3cf8c2930373 changeset: 98692:3cf8c2930373 parent: 98690:0c8a9cafb24c parent: 98691:15f6bbe944fa user: Steve Dower date: Sun Oct 11 15:16:21 2015 -0700 summary: Issue #25361: Disables use of SSE2 instructions in Windows 32-bit build files: Misc/NEWS | 2 ++ PCbuild/pyproject.props | 1 + PCbuild/pythoncore.vcxproj.filters | 6 ++++++ 3 files changed, 9 insertions(+), 0 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -459,6 +459,8 @@ Windows ------- +- Issue #25361: Disables use of SSE2 instructions in Windows 32-bit build + - Issue #25089: Adds logging to installer for case where launcher is not selected on upgrade. diff --git a/PCbuild/pyproject.props b/PCbuild/pyproject.props --- a/PCbuild/pyproject.props +++ b/PCbuild/pyproject.props @@ -43,6 +43,7 @@ Default true true + NoExtensions Disabled diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -435,6 +435,9 @@ Modules + + Include + @@ -968,6 +971,9 @@ PC + + Objects + -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 00:38:01 2015 From: python-checkins at python.org (steve.dower) Date: Sun, 11 Oct 2015 22:38:01 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325163=3A_Display_correct_directory_in_installer?= =?utf-8?q?_when_using_non-default?= Message-ID: <20151011223801.449.95721@psf.io> https://hg.python.org/cpython/rev/6f97c51b6dc5 changeset: 98694:6f97c51b6dc5 parent: 98692:3cf8c2930373 parent: 98693:919b1dffa741 user: Steve Dower date: Sun Oct 11 15:37:36 2015 -0700 summary: Issue #25163: Display correct directory in installer when using non-default settings. files: Misc/NEWS | 3 + Tools/msi/bundle/Default.thm | 2 +- Tools/msi/bundle/Default.wxl | 2 +- Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp | 58 ++++++--- 4 files changed, 41 insertions(+), 24 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -459,6 +459,9 @@ Windows ------- +- Issue #25163: Display correct directory in installer when using non-default + settings. + - Issue #25361: Disables use of SSE2 instructions in Windows 32-bit build - Issue #25089: Adds logging to installer for case where launcher is not diff --git a/Tools/msi/bundle/Default.thm b/Tools/msi/bundle/Default.thm --- a/Tools/msi/bundle/Default.thm +++ b/Tools/msi/bundle/Default.thm @@ -24,8 +24,8 @@ + #(loc.ShortInstallLauncherAllUsersLabel) #(loc.ShortPrependPathLabel) - #(loc.ShortInstallLauncherAllUsersLabel) diff --git a/Tools/msi/bundle/Default.wxl b/Tools/msi/bundle/Default.wxl --- a/Tools/msi/bundle/Default.wxl +++ b/Tools/msi/bundle/Default.wxl @@ -42,7 +42,7 @@ [WixBundleName] <a href="#">license terms</a>. I &agree to the license terms and conditions &Install Now - [DefaultJustForMeTargetDir] + [TargetDir] Includes IDLE, pip and documentation Creates shortcuts and file associations diff --git a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp --- a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp +++ b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp @@ -293,28 +293,8 @@ hr = _engine->SetVariableNumeric(L"CompileAll", installAllUsers); ExitOnFailure(hr, L"Failed to update CompileAll"); - hr = BalGetStringVariable(L"TargetDir", &targetDir); - if (FAILED(hr) || !targetDir || !targetDir[0]) { - ReleaseStr(targetDir); - targetDir = nullptr; - - hr = BalGetStringVariable( - installAllUsers ? L"DefaultAllUsersTargetDir" : L"DefaultJustForMeTargetDir", - &defaultDir - ); - BalExitOnFailure(hr, "Failed to get the default install directory"); - - if (!defaultDir || !defaultDir[0]) { - BalLogError(E_INVALIDARG, "Default install directory is blank"); - } - - hr = BalFormatString(defaultDir, &targetDir); - BalExitOnFailure1(hr, "Failed to format '%ls'", defaultDir); - - hr = _engine->SetVariableString(L"TargetDir", targetDir); - BalExitOnFailure(hr, "Failed to set install target directory"); - } - ReleaseStr(targetDir); + hr = EnsureTargetDir(); + ExitOnFailure(hr, L"Failed to set TargetDir"); OnPlan(BOOTSTRAPPER_ACTION_INSTALL); break; @@ -2972,6 +2952,39 @@ return; } + HRESULT EnsureTargetDir() { + LONGLONG installAllUsers; + LPWSTR targetDir = nullptr, defaultDir = nullptr; + HRESULT hr = BalGetStringVariable(L"TargetDir", &targetDir); + if (FAILED(hr) || !targetDir || !targetDir[0]) { + ReleaseStr(targetDir); + targetDir = nullptr; + + hr = BalGetNumericVariable(L"InstallAllUsers", &installAllUsers); + ExitOnFailure(hr, L"Failed to get install scope"); + + hr = BalGetStringVariable( + installAllUsers ? L"DefaultAllUsersTargetDir" : L"DefaultJustForMeTargetDir", + &defaultDir + ); + BalExitOnFailure(hr, "Failed to get the default install directory"); + + if (!defaultDir || !defaultDir[0]) { + BalLogError(E_INVALIDARG, "Default install directory is blank"); + } + + hr = BalFormatString(defaultDir, &targetDir); + BalExitOnFailure1(hr, "Failed to format '%ls'", defaultDir); + + hr = _engine->SetVariableString(L"TargetDir", targetDir); + BalExitOnFailure(hr, "Failed to set install target directory"); + } + LExit: + ReleaseStr(defaultDir); + ReleaseStr(targetDir); + return hr; + } + public: // // Constructor - initialize member variables. @@ -3057,6 +3070,7 @@ _baFunction = nullptr; LoadOptionalFeatureStates(pEngine); + EnsureTargetDir(); } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 00:38:01 2015 From: python-checkins at python.org (steve.dower) Date: Sun, 11 Oct 2015 22:38:01 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1MTYz?= =?utf-8?q?=3A_Display_correct_directory_in_installer_when_using_non-defau?= =?utf-8?q?lt?= Message-ID: <20151011223801.55456.38685@psf.io> https://hg.python.org/cpython/rev/919b1dffa741 changeset: 98693:919b1dffa741 branch: 3.5 parent: 98691:15f6bbe944fa user: Steve Dower date: Sun Oct 11 15:37:22 2015 -0700 summary: Issue #25163: Display correct directory in installer when using non-default settings. files: Misc/NEWS | 3 + Tools/msi/bundle/Default.thm | 2 +- Tools/msi/bundle/Default.wxl | 2 +- Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp | 58 ++++++--- 4 files changed, 41 insertions(+), 24 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -308,6 +308,9 @@ Windows ------- +- Issue #25163: Display correct directory in installer when using non-default + settings. + - Issue #25361: Disables use of SSE2 instructions in Windows 32-bit build - Issue #25089: Adds logging to installer for case where launcher is not diff --git a/Tools/msi/bundle/Default.thm b/Tools/msi/bundle/Default.thm --- a/Tools/msi/bundle/Default.thm +++ b/Tools/msi/bundle/Default.thm @@ -24,8 +24,8 @@ + #(loc.ShortInstallLauncherAllUsersLabel) #(loc.ShortPrependPathLabel) - #(loc.ShortInstallLauncherAllUsersLabel) diff --git a/Tools/msi/bundle/Default.wxl b/Tools/msi/bundle/Default.wxl --- a/Tools/msi/bundle/Default.wxl +++ b/Tools/msi/bundle/Default.wxl @@ -42,7 +42,7 @@ [WixBundleName] <a href="#">license terms</a>. I &agree to the license terms and conditions &Install Now - [DefaultJustForMeTargetDir] + [TargetDir] Includes IDLE, pip and documentation Creates shortcuts and file associations diff --git a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp --- a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp +++ b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp @@ -293,28 +293,8 @@ hr = _engine->SetVariableNumeric(L"CompileAll", installAllUsers); ExitOnFailure(hr, L"Failed to update CompileAll"); - hr = BalGetStringVariable(L"TargetDir", &targetDir); - if (FAILED(hr) || !targetDir || !targetDir[0]) { - ReleaseStr(targetDir); - targetDir = nullptr; - - hr = BalGetStringVariable( - installAllUsers ? L"DefaultAllUsersTargetDir" : L"DefaultJustForMeTargetDir", - &defaultDir - ); - BalExitOnFailure(hr, "Failed to get the default install directory"); - - if (!defaultDir || !defaultDir[0]) { - BalLogError(E_INVALIDARG, "Default install directory is blank"); - } - - hr = BalFormatString(defaultDir, &targetDir); - BalExitOnFailure1(hr, "Failed to format '%ls'", defaultDir); - - hr = _engine->SetVariableString(L"TargetDir", targetDir); - BalExitOnFailure(hr, "Failed to set install target directory"); - } - ReleaseStr(targetDir); + hr = EnsureTargetDir(); + ExitOnFailure(hr, L"Failed to set TargetDir"); OnPlan(BOOTSTRAPPER_ACTION_INSTALL); break; @@ -2972,6 +2952,39 @@ return; } + HRESULT EnsureTargetDir() { + LONGLONG installAllUsers; + LPWSTR targetDir = nullptr, defaultDir = nullptr; + HRESULT hr = BalGetStringVariable(L"TargetDir", &targetDir); + if (FAILED(hr) || !targetDir || !targetDir[0]) { + ReleaseStr(targetDir); + targetDir = nullptr; + + hr = BalGetNumericVariable(L"InstallAllUsers", &installAllUsers); + ExitOnFailure(hr, L"Failed to get install scope"); + + hr = BalGetStringVariable( + installAllUsers ? L"DefaultAllUsersTargetDir" : L"DefaultJustForMeTargetDir", + &defaultDir + ); + BalExitOnFailure(hr, "Failed to get the default install directory"); + + if (!defaultDir || !defaultDir[0]) { + BalLogError(E_INVALIDARG, "Default install directory is blank"); + } + + hr = BalFormatString(defaultDir, &targetDir); + BalExitOnFailure1(hr, "Failed to format '%ls'", defaultDir); + + hr = _engine->SetVariableString(L"TargetDir", targetDir); + BalExitOnFailure(hr, "Failed to set install target directory"); + } + LExit: + ReleaseStr(defaultDir); + ReleaseStr(targetDir); + return hr; + } + public: // // Constructor - initialize member variables. @@ -3057,6 +3070,7 @@ _baFunction = nullptr; LoadOptionalFeatureStates(pEngine); + EnsureTargetDir(); } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 01:41:07 2015 From: python-checkins at python.org (steve.dower) Date: Sun, 11 Oct 2015 23:41:07 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1MTQz?= =?utf-8?q?=3A_Improves_installer_error_messages_for_unsupported_platforms?= =?utf-8?q?=2E?= Message-ID: <20151011234107.97716.3503@psf.io> https://hg.python.org/cpython/rev/2316bc881eea changeset: 98695:2316bc881eea branch: 3.5 parent: 98693:919b1dffa741 user: Steve Dower date: Sun Oct 11 16:40:41 2015 -0700 summary: Issue #25143: Improves installer error messages for unsupported platforms. files: Misc/NEWS | 2 + Tools/msi/bundle/Default.thm | 2 +- Tools/msi/bundle/Default.wxl | 10 +++ Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp | 32 ++++++++++ Tools/msi/bundle/bootstrap/pch.h | 1 + 5 files changed, 46 insertions(+), 1 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -308,6 +308,8 @@ Windows ------- +- Issue #25143: Improves installer error messages for unsupported platforms. + - Issue #25163: Display correct directory in installer when using non-default settings. diff --git a/Tools/msi/bundle/Default.thm b/Tools/msi/bundle/Default.thm --- a/Tools/msi/bundle/Default.thm +++ b/Tools/msi/bundle/Default.thm @@ -128,7 +128,7 @@ #(loc.FailureHyperlinkLogText) - + #(loc.FailureRestartText) diff --git a/Tools/msi/bundle/Default.wxl b/Tools/msi/bundle/Default.wxl --- a/Tools/msi/bundle/Default.wxl +++ b/Tools/msi/bundle/Default.wxl @@ -120,4 +120,14 @@ You must restart your computer to complete the rollback of the software. &Restart Unable to install [WixBundleName] due to an existing install. Use Programs and Features to modify, repair or remove [WixBundleName]. + + Windows 7 Service Pack 1 and all applicable updates are required to install [WixBundleName]. + +Please <a href="https://www.bing.com/search?q=how%20to%20install%20windows%207%20service%20pack%201">update your machine</a> and then restart the installation. + Windows Vista Service Pack 2 and all applicable updates are required to install [WixBundleName]. + +Please <a href="https://www.bing.com/search?q=how%20to%20install%20windows%20vista%20service%20pack%202">update your machine</a> and then restart the installation. + Windows Vista or later is required to install and use [WixBundleName]. + +Visit <a href="https://www.python.org/">python.org</a> to download Python 3.4. diff --git a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp --- a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp +++ b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp @@ -1216,6 +1216,8 @@ hr = pThis->CreateMainWindow(); BalExitOnFailure(hr, "Failed to create main window."); + pThis->ValidateOperatingSystem(); + if (FAILED(pThis->_hrFinal)) { pThis->SetState(PYBA_STATE_FAILED, hr); ::PostMessageW(pThis->_hWnd, WM_PYBA_SHOW_FAILURE, 0, 0); @@ -2985,6 +2987,36 @@ return hr; } + void ValidateOperatingSystem() { + LOC_STRING *pLocString = nullptr; + + if (IsWindows7SP1OrGreater()) { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Target OS is Windows 7 SP1 or later"); + return; + } else if (IsWindows7OrGreater()) { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Detected Windows 7 RTM"); + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Service Pack 1 is required to continue installation"); + LocGetString(_wixLoc, L"#(loc.FailureWin7MissingSP1)", &pLocString); + } else if (IsWindowsVistaSP2OrGreater()) { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Target OS is Windows Vista SP2"); + return; + } else if (IsWindowsVistaOrGreater()) { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Detected Windows Vista RTM or SP1"); + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Service Pack 2 is required to continue installation"); + LocGetString(_wixLoc, L"#(loc.FailureVistaMissingSP2)", &pLocString); + } else { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Detected Windows XP or earlier"); + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Windows Vista SP2 or later is required to continue installation"); + LocGetString(_wixLoc, L"#(loc.FailureXPOrEarlier)", &pLocString); + } + + if (pLocString && pLocString->wzText) { + BalFormatString(pLocString->wzText, &_failedMessage); + } + + _hrFinal = E_WIXSTDBA_CONDITION_FAILED; + } + public: // // Constructor - initialize member variables. diff --git a/Tools/msi/bundle/bootstrap/pch.h b/Tools/msi/bundle/bootstrap/pch.h --- a/Tools/msi/bundle/bootstrap/pch.h +++ b/Tools/msi/bundle/bootstrap/pch.h @@ -23,6 +23,7 @@ #include #include #include +#include #include "dutil.h" #include "memutil.h" -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 01:41:08 2015 From: python-checkins at python.org (steve.dower) Date: Sun, 11 Oct 2015 23:41:08 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325143=3A_Improves_installer_error_messages_for_?= =?utf-8?q?unsupported_platforms=2E?= Message-ID: <20151011234107.128836.83328@psf.io> https://hg.python.org/cpython/rev/2c384ba13fdd changeset: 98696:2c384ba13fdd parent: 98694:6f97c51b6dc5 parent: 98695:2316bc881eea user: Steve Dower date: Sun Oct 11 16:40:52 2015 -0700 summary: Issue #25143: Improves installer error messages for unsupported platforms. files: Misc/NEWS | 2 + Tools/msi/bundle/Default.thm | 2 +- Tools/msi/bundle/Default.wxl | 10 +++ Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp | 32 ++++++++++ Tools/msi/bundle/bootstrap/pch.h | 1 + 5 files changed, 46 insertions(+), 1 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -459,6 +459,8 @@ Windows ------- +- Issue #25143: Improves installer error messages for unsupported platforms. + - Issue #25163: Display correct directory in installer when using non-default settings. diff --git a/Tools/msi/bundle/Default.thm b/Tools/msi/bundle/Default.thm --- a/Tools/msi/bundle/Default.thm +++ b/Tools/msi/bundle/Default.thm @@ -128,7 +128,7 @@ #(loc.FailureHyperlinkLogText) - + #(loc.FailureRestartText) diff --git a/Tools/msi/bundle/Default.wxl b/Tools/msi/bundle/Default.wxl --- a/Tools/msi/bundle/Default.wxl +++ b/Tools/msi/bundle/Default.wxl @@ -120,4 +120,14 @@ You must restart your computer to complete the rollback of the software. &Restart Unable to install [WixBundleName] due to an existing install. Use Programs and Features to modify, repair or remove [WixBundleName]. + + Windows 7 Service Pack 1 and all applicable updates are required to install [WixBundleName]. + +Please <a href="https://www.bing.com/search?q=how%20to%20install%20windows%207%20service%20pack%201">update your machine</a> and then restart the installation. + Windows Vista Service Pack 2 and all applicable updates are required to install [WixBundleName]. + +Please <a href="https://www.bing.com/search?q=how%20to%20install%20windows%20vista%20service%20pack%202">update your machine</a> and then restart the installation. + Windows Vista or later is required to install and use [WixBundleName]. + +Visit <a href="https://www.python.org/">python.org</a> to download Python 3.4. diff --git a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp --- a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp +++ b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp @@ -1216,6 +1216,8 @@ hr = pThis->CreateMainWindow(); BalExitOnFailure(hr, "Failed to create main window."); + pThis->ValidateOperatingSystem(); + if (FAILED(pThis->_hrFinal)) { pThis->SetState(PYBA_STATE_FAILED, hr); ::PostMessageW(pThis->_hWnd, WM_PYBA_SHOW_FAILURE, 0, 0); @@ -2985,6 +2987,36 @@ return hr; } + void ValidateOperatingSystem() { + LOC_STRING *pLocString = nullptr; + + if (IsWindows7SP1OrGreater()) { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Target OS is Windows 7 SP1 or later"); + return; + } else if (IsWindows7OrGreater()) { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Detected Windows 7 RTM"); + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Service Pack 1 is required to continue installation"); + LocGetString(_wixLoc, L"#(loc.FailureWin7MissingSP1)", &pLocString); + } else if (IsWindowsVistaSP2OrGreater()) { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Target OS is Windows Vista SP2"); + return; + } else if (IsWindowsVistaOrGreater()) { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Detected Windows Vista RTM or SP1"); + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Service Pack 2 is required to continue installation"); + LocGetString(_wixLoc, L"#(loc.FailureVistaMissingSP2)", &pLocString); + } else { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Detected Windows XP or earlier"); + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Windows Vista SP2 or later is required to continue installation"); + LocGetString(_wixLoc, L"#(loc.FailureXPOrEarlier)", &pLocString); + } + + if (pLocString && pLocString->wzText) { + BalFormatString(pLocString->wzText, &_failedMessage); + } + + _hrFinal = E_WIXSTDBA_CONDITION_FAILED; + } + public: // // Constructor - initialize member variables. diff --git a/Tools/msi/bundle/bootstrap/pch.h b/Tools/msi/bundle/bootstrap/pch.h --- a/Tools/msi/bundle/bootstrap/pch.h +++ b/Tools/msi/bundle/bootstrap/pch.h @@ -23,6 +23,7 @@ #include #include #include +#include #include "dutil.h" #include "memutil.h" -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 03:05:45 2015 From: python-checkins at python.org (steve.dower) Date: Mon, 12 Oct 2015 01:05:45 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_from_3=2E5?= Message-ID: <20151012010544.2695.99070@psf.io> https://hg.python.org/cpython/rev/0452df4d63ba changeset: 98698:0452df4d63ba parent: 98696:2c384ba13fdd parent: 98697:d503b27de2d9 user: Steve Dower date: Sun Oct 11 18:05:27 2015 -0700 summary: Merge from 3.5 files: Tools/msi/bundle/Default.wxl | 2 +- Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp | 16 ++++++++- Tools/msi/bundle/bundle.wxs | 1 + 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/Tools/msi/bundle/Default.wxl b/Tools/msi/bundle/Default.wxl --- a/Tools/msi/bundle/Default.wxl +++ b/Tools/msi/bundle/Default.wxl @@ -52,7 +52,7 @@ Use settings preselected by your administrator [SimpleInstallDescription] - &Upgrade Now + Up&grade Now [TargetDir] Replaces your existing installation without changing settings. diff --git a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp --- a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp +++ b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp @@ -674,6 +674,8 @@ hr ); } + + LoadOptionalFeatureStates(_engine); } else if (BOOTSTRAPPER_RELATED_OPERATION_NONE == operation) { if (_command.action == BOOTSTRAPPER_ACTION_INSTALL) { LOC_STRING *pLocString = nullptr; @@ -2556,7 +2558,14 @@ BOOL WillElevate() { static BAL_CONDITION WILL_ELEVATE_CONDITION = { - L"not WixBundleElevated and (InstallAllUsers or (InstallLauncherAllUsers and Include_launcher))", + L"not WixBundleElevated and (" + /*Elevate when installing for all users*/ + L"InstallAllUsers or" + /*Elevate when installing the launcher for all users and it was not detected*/ + L"(InstallLauncherAllUsers and Include_launcher and not DetectedLauncher) or" + /*Elevate when the launcher was installed for all users and it is being removed*/ + L"(DetectedLauncher and DetectedLauncherAllUsers and not Include_launcher)" + L")", L"" }; BOOL result; @@ -2884,6 +2893,10 @@ pEngine->SetVariableNumeric(L"Include_launcher", 0); } else if (res == ERROR_SUCCESS) { pEngine->SetVariableNumeric(L"Include_launcher", 1); + pEngine->SetVariableNumeric(L"DetectedLauncher", 1); + pEngine->SetVariableNumeric(L"InstallLauncherAllUsers", (hkHive == HKEY_LOCAL_MACHINE) ? 1 : 0); + pEngine->SetVariableNumeric(L"DetectedLauncherAllUsers", (hkHive == HKEY_LOCAL_MACHINE) ? 1 : 0); + pEngine->SetVariableString(L"InstallLauncherAllUsersState", L"disable"); } res = RegQueryValueExW(hKey, L"AssociateFiles", nullptr, nullptr, nullptr, nullptr); @@ -3101,7 +3114,6 @@ _hBAFModule = nullptr; _baFunction = nullptr; - LoadOptionalFeatureStates(pEngine); EnsureTargetDir(); } diff --git a/Tools/msi/bundle/bundle.wxs b/Tools/msi/bundle/bundle.wxs --- a/Tools/msi/bundle/bundle.wxs +++ b/Tools/msi/bundle/bundle.wxs @@ -65,6 +65,7 @@ + -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 03:05:44 2015 From: python-checkins at python.org (steve.dower) Date: Mon, 12 Oct 2015 01:05:44 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E5=29=3A_Only_detects_f?= =?utf-8?q?eatures_from_previous_version_when_a_bundle_is_found=2E?= Message-ID: <20151012010544.7240.95957@psf.io> https://hg.python.org/cpython/rev/d503b27de2d9 changeset: 98697:d503b27de2d9 branch: 3.5 parent: 98695:2316bc881eea user: Steve Dower date: Sun Oct 11 18:05:11 2015 -0700 summary: Only detects features from previous version when a bundle is found. Otherwise, stray registry entries would cause issues. Also fixes an accelerator collision and improves UAC icons when upgrading. files: Tools/msi/bundle/Default.wxl | 2 +- Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp | 16 ++++++++- Tools/msi/bundle/bundle.wxs | 1 + 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/Tools/msi/bundle/Default.wxl b/Tools/msi/bundle/Default.wxl --- a/Tools/msi/bundle/Default.wxl +++ b/Tools/msi/bundle/Default.wxl @@ -52,7 +52,7 @@ Use settings preselected by your administrator [SimpleInstallDescription] - &Upgrade Now + Up&grade Now [TargetDir] Replaces your existing installation without changing settings. diff --git a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp --- a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp +++ b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp @@ -674,6 +674,8 @@ hr ); } + + LoadOptionalFeatureStates(_engine); } else if (BOOTSTRAPPER_RELATED_OPERATION_NONE == operation) { if (_command.action == BOOTSTRAPPER_ACTION_INSTALL) { LOC_STRING *pLocString = nullptr; @@ -2556,7 +2558,14 @@ BOOL WillElevate() { static BAL_CONDITION WILL_ELEVATE_CONDITION = { - L"not WixBundleElevated and (InstallAllUsers or (InstallLauncherAllUsers and Include_launcher))", + L"not WixBundleElevated and (" + /*Elevate when installing for all users*/ + L"InstallAllUsers or" + /*Elevate when installing the launcher for all users and it was not detected*/ + L"(InstallLauncherAllUsers and Include_launcher and not DetectedLauncher) or" + /*Elevate when the launcher was installed for all users and it is being removed*/ + L"(DetectedLauncher and DetectedLauncherAllUsers and not Include_launcher)" + L")", L"" }; BOOL result; @@ -2884,6 +2893,10 @@ pEngine->SetVariableNumeric(L"Include_launcher", 0); } else if (res == ERROR_SUCCESS) { pEngine->SetVariableNumeric(L"Include_launcher", 1); + pEngine->SetVariableNumeric(L"DetectedLauncher", 1); + pEngine->SetVariableNumeric(L"InstallLauncherAllUsers", (hkHive == HKEY_LOCAL_MACHINE) ? 1 : 0); + pEngine->SetVariableNumeric(L"DetectedLauncherAllUsers", (hkHive == HKEY_LOCAL_MACHINE) ? 1 : 0); + pEngine->SetVariableString(L"InstallLauncherAllUsersState", L"disable"); } res = RegQueryValueExW(hKey, L"AssociateFiles", nullptr, nullptr, nullptr, nullptr); @@ -3101,7 +3114,6 @@ _hBAFModule = nullptr; _baFunction = nullptr; - LoadOptionalFeatureStates(pEngine); EnsureTargetDir(); } diff --git a/Tools/msi/bundle/bundle.wxs b/Tools/msi/bundle/bundle.wxs --- a/Tools/msi/bundle/bundle.wxs +++ b/Tools/msi/bundle/bundle.wxs @@ -65,6 +65,7 @@ + -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 03:07:18 2015 From: python-checkins at python.org (steve.dower) Date: Mon, 12 Oct 2015 01:07:18 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E5=29=3A_Keeps_all-user?= =?utf-8?q?s_launcher_checkbox_visible_when_the_option_cannot_be_changed?= =?utf-8?q?=2E?= Message-ID: <20151012010718.18366.9694@psf.io> https://hg.python.org/cpython/rev/3abf8f49c439 changeset: 98699:3abf8f49c439 branch: 3.5 parent: 98697:d503b27de2d9 user: Steve Dower date: Sun Oct 11 18:06:55 2015 -0700 summary: Keeps all-users launcher checkbox visible when the option cannot be changed. files: Tools/msi/bundle/Default.thm | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Tools/msi/bundle/Default.thm b/Tools/msi/bundle/Default.thm --- a/Tools/msi/bundle/Default.thm +++ b/Tools/msi/bundle/Default.thm @@ -24,7 +24,7 @@ - #(loc.ShortInstallLauncherAllUsersLabel) + #(loc.ShortInstallLauncherAllUsersLabel) #(loc.ShortPrependPathLabel) @@ -65,7 +65,7 @@ #(loc.Include_testHelpLabel) #(loc.Include_launcherLabel) - #(loc.InstallLauncherAllUsersLabel) + #(loc.InstallLauncherAllUsersLabel) #(loc.Include_launcherHelpLabel) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 03:07:19 2015 From: python-checkins at python.org (steve.dower) Date: Mon, 12 Oct 2015 01:07:19 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_from_3=2E5?= Message-ID: <20151012010718.2695.60991@psf.io> https://hg.python.org/cpython/rev/60f863101378 changeset: 98700:60f863101378 parent: 98698:0452df4d63ba parent: 98699:3abf8f49c439 user: Steve Dower date: Sun Oct 11 18:07:07 2015 -0700 summary: Merge from 3.5 files: Tools/msi/bundle/Default.thm | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Tools/msi/bundle/Default.thm b/Tools/msi/bundle/Default.thm --- a/Tools/msi/bundle/Default.thm +++ b/Tools/msi/bundle/Default.thm @@ -24,7 +24,7 @@ - #(loc.ShortInstallLauncherAllUsersLabel) + #(loc.ShortInstallLauncherAllUsersLabel) #(loc.ShortPrependPathLabel) @@ -65,7 +65,7 @@ #(loc.Include_testHelpLabel) #(loc.Include_launcherLabel) - #(loc.InstallLauncherAllUsersLabel) + #(loc.InstallLauncherAllUsersLabel) #(loc.Include_launcherHelpLabel) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 04:08:18 2015 From: python-checkins at python.org (terry.reedy) Date: Mon, 12 Oct 2015 02:08:18 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Merge_with_3=2E4?= Message-ID: <20151012020818.18364.69107@psf.io> https://hg.python.org/cpython/rev/5798c14b53de changeset: 98703:5798c14b53de branch: 3.5 parent: 98699:3abf8f49c439 parent: 98702:01f83efcc44b user: Terry Jan Reedy date: Sun Oct 11 22:07:48 2015 -0400 summary: Merge with 3.4 files: Lib/idlelib/configDialog.py | 51 +++++++++++++++--------- Lib/idlelib/textView.py | 2 +- 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -20,7 +20,9 @@ from idlelib.configSectionNameDialog import GetCfgSectionNameDialog from idlelib.configHelpSourceEdit import GetHelpSourceDialog from idlelib.tabbedpages import TabbedPageSet +from idlelib.textView import view_text from idlelib import macosxSupport + class ConfigDialog(Toplevel): def __init__(self, parent, title='', _htest=False, _utest=False): @@ -85,6 +87,7 @@ self.CreatePageKeys() self.CreatePageGeneral() self.create_action_buttons().pack(side=BOTTOM) + def create_action_buttons(self): if macosxSupport.isAquaTk(): # Changing the default padding on OSX results in unreadable @@ -94,28 +97,18 @@ paddingArgs = {'padx':6, 'pady':3} outer = Frame(self, pady=2) buttons = Frame(outer, pady=2) - self.buttonOk = Button( - buttons, text='Ok', command=self.Ok, - takefocus=FALSE, **paddingArgs) - self.buttonApply = Button( - buttons, text='Apply', command=self.Apply, - takefocus=FALSE, **paddingArgs) - self.buttonCancel = Button( - buttons, text='Cancel', command=self.Cancel, - takefocus=FALSE, **paddingArgs) - self.buttonOk.pack(side=LEFT, padx=5) - self.buttonApply.pack(side=LEFT, padx=5) - self.buttonCancel.pack(side=LEFT, padx=5) -# Comment out Help button creation and packing until implement self.Help -## self.buttonHelp = Button( -## buttons, text='Help', command=self.Help, -## takefocus=FALSE, **paddingArgs) -## self.buttonHelp.pack(side=RIGHT, padx=5) - + for txt, cmd in ( + ('Ok', self.Ok), + ('Apply', self.Apply), + ('Cancel', self.Cancel), + ('Help', self.Help)): + Button(buttons, text=txt, command=cmd, takefocus=FALSE, + **paddingArgs).pack(side=LEFT, padx=5) # add space above buttons Frame(outer, height=2, borderwidth=0).pack(side=TOP) buttons.pack(side=BOTTOM) return outer + def CreatePageFontTab(self): parent = self.parent self.fontSize = StringVar(parent) @@ -1183,7 +1176,27 @@ self.ActivateConfigChanges() def Help(self): - pass + page = self.tabPages._current_page + view_text(self, title='Help for IDLE preferences', + text=help_common+help_pages.get(page, '')) + +help_common = '''\ +When you click either the Apply or Ok buttons, settings in this +dialog that are different from IDLE's default are saved in +a .idlerc directory in your home directory. Except as noted, +hese changes apply to all versions of IDLE installed on this +machine. Some do not take affect until IDLE is restarted. +[Cancel] only cancels changes made since the last save. +''' +help_pages = { + 'Highlighting':''' +Highlighting: +The IDLE Dark color theme is new in Octover 2015. It can only +be used with older IDLE releases if it is saved as a custom +theme, with a different name. +''' +} + class VerticalScrolledFrame(Frame): """A pure Tkinter vertically scrollable frame. diff --git a/Lib/idlelib/textView.py b/Lib/idlelib/textView.py --- a/Lib/idlelib/textView.py +++ b/Lib/idlelib/textView.py @@ -21,7 +21,7 @@ Toplevel.__init__(self, parent) self.configure(borderwidth=5) # place dialog below parent if running htest - self.geometry("=%dx%d+%d+%d" % (625, 500, + self.geometry("=%dx%d+%d+%d" % (750, 500, parent.winfo_rootx() + 10, parent.winfo_rooty() + (10 if not _htest else 100))) #elguavas - config placeholders til config stuff completed -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 04:08:18 2015 From: python-checkins at python.org (terry.reedy) Date: Mon, 12 Oct 2015 02:08:18 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzIyNzI2?= =?utf-8?q?=3A_Re-activate_config_dialog_help_button_with_some_content_abo?= =?utf-8?q?ut?= Message-ID: <20151012020818.128848.75101@psf.io> https://hg.python.org/cpython/rev/01f83efcc44b changeset: 98702:01f83efcc44b branch: 3.4 parent: 98684:f9820c4724ca user: Terry Jan Reedy date: Sun Oct 11 22:07:31 2015 -0400 summary: Issue #22726: Re-activate config dialog help button with some content about the other buttons and the new IDLE Dark theme. files: Lib/idlelib/configDialog.py | 51 +++++++++++++++--------- Lib/idlelib/textView.py | 2 +- 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -20,7 +20,9 @@ from idlelib.configSectionNameDialog import GetCfgSectionNameDialog from idlelib.configHelpSourceEdit import GetHelpSourceDialog from idlelib.tabbedpages import TabbedPageSet +from idlelib.textView import view_text from idlelib import macosxSupport + class ConfigDialog(Toplevel): def __init__(self, parent, title='', _htest=False, _utest=False): @@ -85,6 +87,7 @@ self.CreatePageKeys() self.CreatePageGeneral() self.create_action_buttons().pack(side=BOTTOM) + def create_action_buttons(self): if macosxSupport.isAquaTk(): # Changing the default padding on OSX results in unreadable @@ -94,28 +97,18 @@ paddingArgs = {'padx':6, 'pady':3} outer = Frame(self, pady=2) buttons = Frame(outer, pady=2) - self.buttonOk = Button( - buttons, text='Ok', command=self.Ok, - takefocus=FALSE, **paddingArgs) - self.buttonApply = Button( - buttons, text='Apply', command=self.Apply, - takefocus=FALSE, **paddingArgs) - self.buttonCancel = Button( - buttons, text='Cancel', command=self.Cancel, - takefocus=FALSE, **paddingArgs) - self.buttonOk.pack(side=LEFT, padx=5) - self.buttonApply.pack(side=LEFT, padx=5) - self.buttonCancel.pack(side=LEFT, padx=5) -# Comment out Help button creation and packing until implement self.Help -## self.buttonHelp = Button( -## buttons, text='Help', command=self.Help, -## takefocus=FALSE, **paddingArgs) -## self.buttonHelp.pack(side=RIGHT, padx=5) - + for txt, cmd in ( + ('Ok', self.Ok), + ('Apply', self.Apply), + ('Cancel', self.Cancel), + ('Help', self.Help)): + Button(buttons, text=txt, command=cmd, takefocus=FALSE, + **paddingArgs).pack(side=LEFT, padx=5) # add space above buttons Frame(outer, height=2, borderwidth=0).pack(side=TOP) buttons.pack(side=BOTTOM) return outer + def CreatePageFontTab(self): parent = self.parent self.fontSize = StringVar(parent) @@ -1183,7 +1176,27 @@ self.ActivateConfigChanges() def Help(self): - pass + page = self.tabPages._current_page + view_text(self, title='Help for IDLE preferences', + text=help_common+help_pages.get(page, '')) + +help_common = '''\ +When you click either the Apply or Ok buttons, settings in this +dialog that are different from IDLE's default are saved in +a .idlerc directory in your home directory. Except as noted, +hese changes apply to all versions of IDLE installed on this +machine. Some do not take affect until IDLE is restarted. +[Cancel] only cancels changes made since the last save. +''' +help_pages = { + 'Highlighting':''' +Highlighting: +The IDLE Dark color theme is new in Octover 2015. It can only +be used with older IDLE releases if it is saved as a custom +theme, with a different name. +''' +} + class VerticalScrolledFrame(Frame): """A pure Tkinter vertically scrollable frame. diff --git a/Lib/idlelib/textView.py b/Lib/idlelib/textView.py --- a/Lib/idlelib/textView.py +++ b/Lib/idlelib/textView.py @@ -21,7 +21,7 @@ Toplevel.__init__(self, parent) self.configure(borderwidth=5) # place dialog below parent if running htest - self.geometry("=%dx%d+%d+%d" % (625, 500, + self.geometry("=%dx%d+%d+%d" % (750, 500, parent.winfo_rootx() + 10, parent.winfo_rooty() + (10 if not _htest else 100))) #elguavas - config placeholders til config stuff completed -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 04:08:18 2015 From: python-checkins at python.org (terry.reedy) Date: Mon, 12 Oct 2015 02:08:18 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzIyNzI2?= =?utf-8?q?=3A_Re-activate_config_dialog_help_button_with_some_content_abo?= =?utf-8?q?ut?= Message-ID: <20151012020818.7238.93270@psf.io> https://hg.python.org/cpython/rev/fd41b05b8227 changeset: 98701:fd41b05b8227 branch: 2.7 parent: 98667:a81b47fb5848 user: Terry Jan Reedy date: Sun Oct 11 22:07:25 2015 -0400 summary: Issue #22726: Re-activate config dialog help button with some content about the other buttons and the new IDLE Dark theme. files: Lib/idlelib/configDialog.py | 51 +++++++++++++++--------- Lib/idlelib/textView.py | 2 +- 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -18,7 +18,9 @@ from idlelib.configSectionNameDialog import GetCfgSectionNameDialog from idlelib.configHelpSourceEdit import GetHelpSourceDialog from idlelib.tabbedpages import TabbedPageSet +from idlelib.textView import view_text from idlelib import macosxSupport + class ConfigDialog(Toplevel): def __init__(self, parent, title='', _htest=False, _utest=False): @@ -83,6 +85,7 @@ self.CreatePageKeys() self.CreatePageGeneral() self.create_action_buttons().pack(side=BOTTOM) + def create_action_buttons(self): if macosxSupport.isAquaTk(): # Changing the default padding on OSX results in unreadable @@ -92,28 +95,18 @@ paddingArgs = {'padx':6, 'pady':3} outer = Frame(self, pady=2) buttons = Frame(outer, pady=2) - self.buttonOk = Button( - buttons, text='Ok', command=self.Ok, - takefocus=FALSE, **paddingArgs) - self.buttonApply = Button( - buttons, text='Apply', command=self.Apply, - takefocus=FALSE, **paddingArgs) - self.buttonCancel = Button( - buttons, text='Cancel', command=self.Cancel, - takefocus=FALSE, **paddingArgs) - self.buttonOk.pack(side=LEFT, padx=5) - self.buttonApply.pack(side=LEFT, padx=5) - self.buttonCancel.pack(side=LEFT, padx=5) -# Comment out Help button creation and packing until implement self.Help -## self.buttonHelp = Button( -## buttons, text='Help', command=self.Help, -## takefocus=FALSE, **paddingArgs) -## self.buttonHelp.pack(side=RIGHT, padx=5) - + for txt, cmd in ( + ('Ok', self.Ok), + ('Apply', self.Apply), + ('Cancel', self.Cancel), + ('Help', self.Help)): + Button(buttons, text=txt, command=cmd, takefocus=FALSE, + **paddingArgs).pack(side=LEFT, padx=5) # add space above buttons Frame(outer, height=2, borderwidth=0).pack(side=TOP) buttons.pack(side=BOTTOM) return outer + def CreatePageFontTab(self): parent = self.parent self.fontSize = StringVar(parent) @@ -1200,7 +1193,27 @@ self.ActivateConfigChanges() def Help(self): - pass + page = self.tabPages._current_page + view_text(self, title='Help for IDLE preferences', + text=help_common+help_pages.get(page, '')) + +help_common = '''\ +When you click either the Apply or Ok buttons, settings in this +dialog that are different from IDLE's default are saved in +a .idlerc directory in your home directory. Except as noted, +hese changes apply to all versions of IDLE installed on this +machine. Some do not take affect until IDLE is restarted. +[Cancel] only cancels changes made since the last save. +''' +help_pages = { + 'Highlighting':''' +Highlighting: +The IDLE Dark color theme is new in Octover 2015. It can only +be used with older IDLE releases if it is saved as a custom +theme, with a different name. +''' +} + class VerticalScrolledFrame(Frame): """A pure Tkinter vertically scrollable frame. diff --git a/Lib/idlelib/textView.py b/Lib/idlelib/textView.py --- a/Lib/idlelib/textView.py +++ b/Lib/idlelib/textView.py @@ -21,7 +21,7 @@ Toplevel.__init__(self, parent) self.configure(borderwidth=5) # place dialog below parent if running htest - self.geometry("=%dx%d+%d+%d" % (625, 500, + self.geometry("=%dx%d+%d+%d" % (750, 500, parent.winfo_rootx() + 10, parent.winfo_rooty() + (10 if not _htest else 100))) #elguavas - config placeholders til config stuff completed -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 04:08:18 2015 From: python-checkins at python.org (terry.reedy) Date: Mon, 12 Oct 2015 02:08:18 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_with_3=2E5?= Message-ID: <20151012020818.97700.23150@psf.io> https://hg.python.org/cpython/rev/a19ee47a00dd changeset: 98704:a19ee47a00dd parent: 98700:60f863101378 parent: 98703:5798c14b53de user: Terry Jan Reedy date: Sun Oct 11 22:08:02 2015 -0400 summary: Merge with 3.5 files: Lib/idlelib/configDialog.py | 51 +++++++++++++++--------- Lib/idlelib/textView.py | 2 +- 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -20,7 +20,9 @@ from idlelib.configSectionNameDialog import GetCfgSectionNameDialog from idlelib.configHelpSourceEdit import GetHelpSourceDialog from idlelib.tabbedpages import TabbedPageSet +from idlelib.textView import view_text from idlelib import macosxSupport + class ConfigDialog(Toplevel): def __init__(self, parent, title='', _htest=False, _utest=False): @@ -85,6 +87,7 @@ self.CreatePageKeys() self.CreatePageGeneral() self.create_action_buttons().pack(side=BOTTOM) + def create_action_buttons(self): if macosxSupport.isAquaTk(): # Changing the default padding on OSX results in unreadable @@ -94,28 +97,18 @@ paddingArgs = {'padx':6, 'pady':3} outer = Frame(self, pady=2) buttons = Frame(outer, pady=2) - self.buttonOk = Button( - buttons, text='Ok', command=self.Ok, - takefocus=FALSE, **paddingArgs) - self.buttonApply = Button( - buttons, text='Apply', command=self.Apply, - takefocus=FALSE, **paddingArgs) - self.buttonCancel = Button( - buttons, text='Cancel', command=self.Cancel, - takefocus=FALSE, **paddingArgs) - self.buttonOk.pack(side=LEFT, padx=5) - self.buttonApply.pack(side=LEFT, padx=5) - self.buttonCancel.pack(side=LEFT, padx=5) -# Comment out Help button creation and packing until implement self.Help -## self.buttonHelp = Button( -## buttons, text='Help', command=self.Help, -## takefocus=FALSE, **paddingArgs) -## self.buttonHelp.pack(side=RIGHT, padx=5) - + for txt, cmd in ( + ('Ok', self.Ok), + ('Apply', self.Apply), + ('Cancel', self.Cancel), + ('Help', self.Help)): + Button(buttons, text=txt, command=cmd, takefocus=FALSE, + **paddingArgs).pack(side=LEFT, padx=5) # add space above buttons Frame(outer, height=2, borderwidth=0).pack(side=TOP) buttons.pack(side=BOTTOM) return outer + def CreatePageFontTab(self): parent = self.parent self.fontSize = StringVar(parent) @@ -1183,7 +1176,27 @@ self.ActivateConfigChanges() def Help(self): - pass + page = self.tabPages._current_page + view_text(self, title='Help for IDLE preferences', + text=help_common+help_pages.get(page, '')) + +help_common = '''\ +When you click either the Apply or Ok buttons, settings in this +dialog that are different from IDLE's default are saved in +a .idlerc directory in your home directory. Except as noted, +hese changes apply to all versions of IDLE installed on this +machine. Some do not take affect until IDLE is restarted. +[Cancel] only cancels changes made since the last save. +''' +help_pages = { + 'Highlighting':''' +Highlighting: +The IDLE Dark color theme is new in Octover 2015. It can only +be used with older IDLE releases if it is saved as a custom +theme, with a different name. +''' +} + class VerticalScrolledFrame(Frame): """A pure Tkinter vertically scrollable frame. diff --git a/Lib/idlelib/textView.py b/Lib/idlelib/textView.py --- a/Lib/idlelib/textView.py +++ b/Lib/idlelib/textView.py @@ -21,7 +21,7 @@ Toplevel.__init__(self, parent) self.configure(borderwidth=5) # place dialog below parent if running htest - self.geometry("=%dx%d+%d+%d" % (625, 500, + self.geometry("=%dx%d+%d+%d" % (750, 500, parent.winfo_rootx() + 10, parent.winfo_rooty() + (10 if not _htest else 100))) #elguavas - config placeholders til config stuff completed -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 07:34:54 2015 From: python-checkins at python.org (raymond.hettinger) Date: Mon, 12 Oct 2015 05:34:54 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Refactor_the_deque_trim_lo?= =?utf-8?q?gic_to_eliminate_the_two_separate_trim_functions=2E?= Message-ID: <20151012053452.55486.30204@psf.io> https://hg.python.org/cpython/rev/4c9c0eb6e75c changeset: 98705:4c9c0eb6e75c user: Raymond Hettinger date: Sun Oct 11 22:34:48 2015 -0700 summary: Refactor the deque trim logic to eliminate the two separate trim functions. files: Modules/_collectionsmodule.c | 39 ++++++++--------------- 1 files changed, 14 insertions(+), 25 deletions(-) diff --git a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c --- a/Modules/_collectionsmodule.c +++ b/Modules/_collectionsmodule.c @@ -276,29 +276,12 @@ * the limit. If it has, we get the size back down to the limit by popping an * item off of the opposite end. The methods that can trigger this are append(), * appendleft(), extend(), and extendleft(). + * + * The macro to check whether a deque needs to be trimmed uses a single + * unsigned test that returns true whenever 0 <= maxlen < Py_SIZE(deque). */ -static void -deque_trim_right(dequeobject *deque) -{ - if (deque->maxlen >= 0 && Py_SIZE(deque) > deque->maxlen) { - PyObject *rv = deque_pop(deque, NULL); - assert(rv != NULL); - assert(Py_SIZE(deque) <= deque->maxlen); - Py_DECREF(rv); - } -} - -static void -deque_trim_left(dequeobject *deque) -{ - if (deque->maxlen >= 0 && Py_SIZE(deque) > deque->maxlen) { - PyObject *rv = deque_popleft(deque, NULL); - assert(rv != NULL); - assert(Py_SIZE(deque) <= deque->maxlen); - Py_DECREF(rv); - } -} +#define NEEDS_TRIM(deque, maxlen) ((size_t)(maxlen) < (size_t)(Py_SIZE(deque))) static PyObject * deque_append(dequeobject *deque, PyObject *item) @@ -319,7 +302,10 @@ Py_INCREF(item); deque->rightindex++; deque->rightblock->data[deque->rightindex] = item; - deque_trim_left(deque); + if (NEEDS_TRIM(deque, deque->maxlen)) { + PyObject *rv = deque_popleft(deque, NULL); + Py_DECREF(rv); + } Py_RETURN_NONE; } @@ -344,7 +330,10 @@ Py_INCREF(item); deque->leftindex--; deque->leftblock->data[deque->leftindex] = item; - deque_trim_right(deque); + if (NEEDS_TRIM(deque, deque->maxlen)) { + PyObject *rv = deque_pop(deque, NULL); + Py_DECREF(rv); + } Py_RETURN_NONE; } @@ -433,7 +422,7 @@ Py_SIZE(deque)++; deque->rightindex++; deque->rightblock->data[deque->rightindex] = item; - if (maxlen >= 0 && Py_SIZE(deque) > maxlen) { + if (NEEDS_TRIM(deque, maxlen)) { PyObject *rv = deque_popleft(deque, NULL); Py_DECREF(rv); } @@ -497,7 +486,7 @@ Py_SIZE(deque)++; deque->leftindex--; deque->leftblock->data[deque->leftindex] = item; - if (maxlen >= 0 && Py_SIZE(deque) > maxlen) { + if (NEEDS_TRIM(deque, maxlen)) { PyObject *rv = deque_pop(deque, NULL); Py_DECREF(rv); } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 07:52:59 2015 From: python-checkins at python.org (raymond.hettinger) Date: Mon, 12 Oct 2015 05:52:59 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Minor_fixup=2E__maxlen_is_?= =?utf-8?q?already_known=2E?= Message-ID: <20151012055258.451.80289@psf.io> https://hg.python.org/cpython/rev/58e2736a57e6 changeset: 98706:58e2736a57e6 user: Raymond Hettinger date: Sun Oct 11 22:52:54 2015 -0700 summary: Minor fixup. maxlen is already known. files: Modules/_collectionsmodule.c | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c --- a/Modules/_collectionsmodule.c +++ b/Modules/_collectionsmodule.c @@ -399,7 +399,7 @@ if (it == NULL) return NULL; - if (deque->maxlen == 0) + if (maxlen == 0) return consume_iterator(it); iternext = *Py_TYPE(it)->tp_iternext; @@ -463,7 +463,7 @@ if (it == NULL) return NULL; - if (deque->maxlen == 0) + if (maxlen == 0) return consume_iterator(it); iternext = *Py_TYPE(it)->tp_iternext; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 08:03:54 2015 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 12 Oct 2015 06:03:54 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_actually_link_?= =?utf-8?q?to_the_version_attributes_documentation?= Message-ID: <20151012060353.97710.45318@psf.io> https://hg.python.org/cpython/rev/b07ac3c6bb98 changeset: 98708:b07ac3c6bb98 branch: 3.4 parent: 98702:01f83efcc44b user: Benjamin Peterson date: Sun Oct 11 23:03:22 2015 -0700 summary: actually link to the version attributes documentation files: Doc/faq/general.rst | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Doc/faq/general.rst b/Doc/faq/general.rst --- a/Doc/faq/general.rst +++ b/Doc/faq/general.rst @@ -151,8 +151,8 @@ next minor version, which becomes the "a0" version, e.g. "2.4a0". -See also the documentation for ``sys.version``, ``sys.hexversion``, and -``sys.version_info``. +See also the documentation for :data:`sys.version`, :data:`sys.hexversion`, and +:data:`sys.version_info`. How do I obtain a copy of the Python source? -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 08:03:54 2015 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 12 Oct 2015 06:03:54 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_merge_3=2E4?= Message-ID: <20151012060353.3279.6649@psf.io> https://hg.python.org/cpython/rev/272bb3f06da8 changeset: 98709:272bb3f06da8 branch: 3.5 parent: 98703:5798c14b53de parent: 98708:b07ac3c6bb98 user: Benjamin Peterson date: Sun Oct 11 23:03:41 2015 -0700 summary: merge 3.4 files: Doc/faq/general.rst | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Doc/faq/general.rst b/Doc/faq/general.rst --- a/Doc/faq/general.rst +++ b/Doc/faq/general.rst @@ -151,8 +151,8 @@ next minor version, which becomes the "a0" version, e.g. "2.4a0". -See also the documentation for ``sys.version``, ``sys.hexversion``, and -``sys.version_info``. +See also the documentation for :data:`sys.version`, :data:`sys.hexversion`, and +:data:`sys.version_info`. How do I obtain a copy of the Python source? -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 08:03:54 2015 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 12 Oct 2015 06:03:54 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_actually_link_?= =?utf-8?q?to_the_version_attributes_documentation?= Message-ID: <20151012060353.128832.96919@psf.io> https://hg.python.org/cpython/rev/4188cd5dc0c5 changeset: 98707:4188cd5dc0c5 branch: 2.7 parent: 98701:fd41b05b8227 user: Benjamin Peterson date: Sun Oct 11 23:03:22 2015 -0700 summary: actually link to the version attributes documentation files: Doc/faq/general.rst | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Doc/faq/general.rst b/Doc/faq/general.rst --- a/Doc/faq/general.rst +++ b/Doc/faq/general.rst @@ -151,8 +151,8 @@ next minor version, which becomes the "a0" version, e.g. "2.4a0". -See also the documentation for ``sys.version``, ``sys.hexversion``, and -``sys.version_info``. +See also the documentation for :data:`sys.version`, :data:`sys.hexversion`, and +:data:`sys.version_info`. How do I obtain a copy of the Python source? -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 08:03:55 2015 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 12 Oct 2015 06:03:55 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy41?= Message-ID: <20151012060354.473.25049@psf.io> https://hg.python.org/cpython/rev/e910ea39253f changeset: 98710:e910ea39253f parent: 98706:58e2736a57e6 parent: 98709:272bb3f06da8 user: Benjamin Peterson date: Sun Oct 11 23:03:47 2015 -0700 summary: merge 3.5 files: Doc/faq/general.rst | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Doc/faq/general.rst b/Doc/faq/general.rst --- a/Doc/faq/general.rst +++ b/Doc/faq/general.rst @@ -151,8 +151,8 @@ next minor version, which becomes the "a0" version, e.g. "2.4a0". -See also the documentation for ``sys.version``, ``sys.hexversion``, and -``sys.version_info``. +See also the documentation for :data:`sys.version`, :data:`sys.hexversion`, and +:data:`sys.version_info`. How do I obtain a copy of the Python source? -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Mon Oct 12 10:43:44 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Mon, 12 Oct 2015 08:43:44 +0000 Subject: [Python-checkins] Daily reference leaks (4c9c0eb6e75c): sum=18252 Message-ID: <20151012084343.7258.99792@psf.io> results for 4c9c0eb6e75c on branch "default" -------------------------------------------- test_capi leaked [1599, 1599, 1599] references, sum=4797 test_capi leaked [387, 389, 389] memory blocks, sum=1165 test_format leaked [62, 62, 62] references, sum=186 test_format leaked [62, 62, 62] memory blocks, sum=186 test_functools leaked [0, 2, 2] memory blocks, sum=4 test_threading leaked [3196, 3196, 3196] references, sum=9588 test_threading leaked [774, 776, 776] memory blocks, sum=2326 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflogP5sdng', '--timeout', '7200'] From python-checkins at python.org Mon Oct 12 13:30:36 2015 From: python-checkins at python.org (victor.stinner) Date: Mon, 12 Oct 2015 11:30:36 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Relax_=5FPyBytesWriter_API?= Message-ID: <20151012113036.97710.54073@psf.io> https://hg.python.org/cpython/rev/34566b4fe187 changeset: 98711:34566b4fe187 user: Victor Stinner date: Mon Oct 12 13:12:54 2015 +0200 summary: Relax _PyBytesWriter API Don't require _PyBytesWriter pointer to be a "char *". Same change for _PyBytesWriter_WriteBytes() parameter. For example, binascii uses "unsigned char*". files: Include/bytesobject.h | 14 +++++++------- Objects/bytesobject.c | 15 +++++++-------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/Include/bytesobject.h b/Include/bytesobject.h --- a/Include/bytesobject.h +++ b/Include/bytesobject.h @@ -156,7 +156,7 @@ Return a bytes object. Raise an exception and return NULL on error. */ PyAPI_FUNC(PyObject *) _PyBytesWriter_Finish(_PyBytesWriter *writer, - char *str); + void *str); /* Deallocate memory of a writer (clear its internal buffer). */ PyAPI_FUNC(void) _PyBytesWriter_Dealloc(_PyBytesWriter *writer); @@ -164,22 +164,22 @@ /* Allocate the buffer to write size bytes. Return the pointer to the beginning of buffer data. Raise an exception and return NULL on error. */ -PyAPI_FUNC(char*) _PyBytesWriter_Alloc(_PyBytesWriter *writer, +PyAPI_FUNC(void*) _PyBytesWriter_Alloc(_PyBytesWriter *writer, Py_ssize_t size); /* Add *size* bytes to the buffer. str is the current pointer inside the buffer. Return the updated current pointer inside the buffer. Raise an exception and return NULL on error. */ -PyAPI_FUNC(char*) _PyBytesWriter_Prepare(_PyBytesWriter *writer, - char *str, +PyAPI_FUNC(void*) _PyBytesWriter_Prepare(_PyBytesWriter *writer, + void *str, Py_ssize_t size); /* Write bytes. Raise an exception and return NULL on error. */ -PyAPI_FUNC(char*) _PyBytesWriter_WriteBytes(_PyBytesWriter *writer, - char *str, - char *bytes, +PyAPI_FUNC(void*) _PyBytesWriter_WriteBytes(_PyBytesWriter *writer, + void *str, + const void *bytes, Py_ssize_t size); #endif /* Py_LIMITED_API */ diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -3923,8 +3923,8 @@ #endif } -char* -_PyBytesWriter_Prepare(_PyBytesWriter *writer, char *str, Py_ssize_t size) +void* +_PyBytesWriter_Prepare(_PyBytesWriter *writer, void *str, Py_ssize_t size) { Py_ssize_t allocated, pos; @@ -3992,7 +3992,7 @@ /* Allocate the buffer to write size bytes. Return the pointer to the beginning of buffer data. Raise an exception and return NULL on error. */ -char* +void* _PyBytesWriter_Alloc(_PyBytesWriter *writer, Py_ssize_t size) { /* ensure that _PyBytesWriter_Alloc() is only called once */ @@ -4011,7 +4011,7 @@ } PyObject * -_PyBytesWriter_Finish(_PyBytesWriter *writer, char *str) +_PyBytesWriter_Finish(_PyBytesWriter *writer, void *str) { Py_ssize_t pos; PyObject *result; @@ -4033,13 +4033,12 @@ else { result = PyBytes_FromStringAndSize(writer->small_buffer, pos); } - return result; } -char* -_PyBytesWriter_WriteBytes(_PyBytesWriter *writer, char *str, - char *bytes, Py_ssize_t size) +void* +_PyBytesWriter_WriteBytes(_PyBytesWriter *writer, void *str, + const void *bytes, Py_ssize_t size) { str = _PyBytesWriter_Prepare(writer, str, size); if (str == NULL) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 13:40:48 2015 From: python-checkins at python.org (victor.stinner) Date: Mon, 12 Oct 2015 11:40:48 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Writer_APIs=3A_use_empty_s?= =?utf-8?q?tring_singletons?= Message-ID: <20151012113036.70986.95638@psf.io> https://hg.python.org/cpython/rev/f33433d9c163 changeset: 98712:f33433d9c163 user: Victor Stinner date: Mon Oct 12 13:29:43 2015 +0200 summary: Writer APIs: use empty string singletons Modify _PyBytesWriter_Finish() and _PyUnicodeWriter_Finish() to return the empty bytes/Unicode string if the string is empty. files: Objects/bytesobject.c | 23 +++++++++++++-------- Objects/unicodeobject.c | 31 ++++++++++++++++++---------- 2 files changed, 34 insertions(+), 20 deletions(-) diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -4019,19 +4019,24 @@ _PyBytesWriter_CheckConsistency(writer, str); pos = _PyBytesWriter_GetPos(writer, str); - if (!writer->use_small_buffer) { + if (pos == 0) { + Py_CLEAR(writer->buffer); + /* Get the empty byte string singleton */ + result = PyBytes_FromStringAndSize(NULL, 0); + } + else if (writer->use_small_buffer) { + result = PyBytes_FromStringAndSize(writer->small_buffer, pos); + } + else { + result = writer->buffer; + writer->buffer = NULL; + if (pos != writer->allocated) { - if (_PyBytes_Resize(&writer->buffer, pos)) { - assert(writer->buffer == NULL); + if (_PyBytes_Resize(&result, pos)) { + assert(result == NULL); return NULL; } } - - result = writer->buffer; - writer->buffer = NULL; - } - else { - result = PyBytes_FromStringAndSize(writer->small_buffer, pos); } return result; } diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -13715,17 +13715,26 @@ assert(PyUnicode_GET_LENGTH(str) == writer->pos); return str; } - if (PyUnicode_GET_LENGTH(writer->buffer) != writer->pos) { - PyObject *newbuffer; - newbuffer = resize_compact(writer->buffer, writer->pos); - if (newbuffer == NULL) { - Py_CLEAR(writer->buffer); - return NULL; - } - writer->buffer = newbuffer; - } - str = writer->buffer; - writer->buffer = NULL; + if (writer->pos == 0) { + Py_CLEAR(writer->buffer); + + /* Get the empty Unicode string singleton ('') */ + _Py_INCREF_UNICODE_EMPTY(); + str = unicode_empty; + } + else { + str = writer->buffer; + writer->buffer = NULL; + + if (PyUnicode_GET_LENGTH(str) != writer->pos) { + PyObject *str2; + str2 = resize_compact(str, writer->pos); + if (str2 == NULL) + return NULL; + str = str2; + } + } + assert(_PyUnicode_CheckConsistency(str, 1)); return unicode_result_ready(str); } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 13:58:00 2015 From: python-checkins at python.org (victor.stinner) Date: Mon, 12 Oct 2015 11:58:00 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Fix_compilation_error_in_?= =?utf-8?q?=5FPyBytesWriter=5FWriteBytes=28=29_on_Windows?= Message-ID: <20151012115754.2665.70145@psf.io> https://hg.python.org/cpython/rev/b45083aac800 changeset: 98713:b45083aac800 user: Victor Stinner date: Mon Oct 12 13:57:47 2015 +0200 summary: Fix compilation error in _PyBytesWriter_WriteBytes() on Windows files: Objects/bytesobject.c | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -4042,9 +4042,11 @@ } void* -_PyBytesWriter_WriteBytes(_PyBytesWriter *writer, void *str, +_PyBytesWriter_WriteBytes(_PyBytesWriter *writer, void *ptr, const void *bytes, Py_ssize_t size) { + char *str = (char *)ptr; + str = _PyBytesWriter_Prepare(writer, str, size); if (str == NULL) return NULL; -- Repository URL: https://hg.python.org/cpython From lp_benchmark_robot at intel.com Mon Oct 12 14:18:04 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Mon, 12 Oct 2015 13:18:04 +0100 Subject: [Python-checkins] Benchmark Results for Python Default 2015-10-12 Message-ID: Results for project python_default-nightly, build date 2015-10-12 03:02:06 commit: a19ee47a00ddd209c5748a39819c189b0c7e02e7 revision date: 2015-10-12 02:08:02 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v3.4.3, with hash b4cbecbc0781e89a309d03b60a1f75f8499250e6 from 2015-02-25 12:15:33+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v3.4.3 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.44597% 1.08343% 9.47930% 13.99395% :-| pybench 0.21832% 0.08591% -1.83113% 8.19470% :-( regex_v8 2.74145% 3.24744% -4.40579% 4.19423% :-| nbody 0.08193% 0.51457% 0.06086% 8.79745% :-| json_dump_v2 0.25511% -0.48646% -1.51492% 7.42596% :-| normal_startup 0.76151% 0.12445% -0.02200% 5.35233% ------------------------------------------------------------------------------------------ Note: Benchmark results are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From lp_benchmark_robot at intel.com Mon Oct 12 14:21:59 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Mon, 12 Oct 2015 13:21:59 +0100 Subject: [Python-checkins] Benchmark Results for Python 2.7 2015-10-12 Message-ID: <9809972c-556c-421f-a1ea-2a11e78b3273@irsmsx102.ger.corp.intel.com> Results for project python_2.7-nightly, build date 2015-10-12 03:48:58 commit: fd41b05b8227dd03f3200946ef6ba6b1fa674720 revision date: 2015-10-12 02:07:25 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v2.7.10, with hash 15c95b7d81dcf821daade360741e00714667653f from 2015-05-23 16:02:14+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v2.7.10 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.42583% 1.50312% 4.13064% 10.88535% :-) pybench 0.15775% 0.10465% 6.87779% 6.51140% :-| regex_v8 1.07930% -0.04742% -1.96257% 8.18956% :-) nbody 0.20862% 0.53349% 8.80361% 3.77290% :-) json_dump_v2 0.30450% 0.84957% 3.76523% 14.16716% :-( normal_startup 2.02193% -0.71677% -2.17892% 3.28828% :-| ssbench 1.09077% 0.05315% 1.23948% 2.85630% ------------------------------------------------------------------------------------------ Note: Benchmark results for ssbench are measured in requests/second while all other are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From python-checkins at python.org Mon Oct 12 14:38:32 2015 From: python-checkins at python.org (victor.stinner) Date: Mon, 12 Oct 2015 12:38:32 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2324164=3A_Fix_test?= =?utf-8?q?=5Fpyclbr?= Message-ID: <20151012123832.3299.5406@psf.io> https://hg.python.org/cpython/rev/288953a787ce changeset: 98714:288953a787ce user: Victor Stinner date: Mon Oct 12 14:38:24 2015 +0200 summary: Issue #24164: Fix test_pyclbr Ignore pickle.partial symbol which comes from functools.partial. files: Lib/test/test_pyclbr.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_pyclbr.py b/Lib/test/test_pyclbr.py --- a/Lib/test/test_pyclbr.py +++ b/Lib/test/test_pyclbr.py @@ -156,7 +156,7 @@ # These were once about the 10 longest modules cm('random', ignore=('Random',)) # from _random import Random as CoreGenerator cm('cgi', ignore=('log',)) # set with = in module - cm('pickle') + cm('pickle', ignore=('partial',)) cm('aifc', ignore=('openfp', '_aifc_params')) # set with = in module cm('sre_parse', ignore=('dump', 'groups')) # from sre_constants import *; property cm('pdb') -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 22:39:24 2015 From: python-checkins at python.org (victor.stinner) Date: Mon, 12 Oct 2015 20:39:24 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325353=3A_Optimize?= =?utf-8?q?_unicode_escape_and_raw_unicode_escape_encoders_to_use?= Message-ID: <20151012203922.3285.66659@psf.io> https://hg.python.org/cpython/rev/8e27f8398a4f changeset: 98715:8e27f8398a4f user: Victor Stinner date: Mon Oct 12 22:36:57 2015 +0200 summary: Issue #25353: Optimize unicode escape and raw unicode escape encoders to use the new _PyBytesWriter API. files: Modules/_pickle.c | 48 ++++++----- Objects/unicodeobject.c | 119 ++++++++++++++++----------- 2 files changed, 98 insertions(+), 69 deletions(-) diff --git a/Modules/_pickle.c b/Modules/_pickle.c --- a/Modules/_pickle.c +++ b/Modules/_pickle.c @@ -2110,38 +2110,35 @@ static PyObject * raw_unicode_escape(PyObject *obj) { - PyObject *repr; char *p; Py_ssize_t i, size; - size_t expandsize; void *data; unsigned int kind; + _PyBytesWriter writer; if (PyUnicode_READY(obj)) return NULL; + _PyBytesWriter_Init(&writer); + size = PyUnicode_GET_LENGTH(obj); data = PyUnicode_DATA(obj); kind = PyUnicode_KIND(obj); - if (kind == PyUnicode_4BYTE_KIND) - expandsize = 10; - else - expandsize = 6; - - if ((size_t)size > (size_t)PY_SSIZE_T_MAX / expandsize) - return PyErr_NoMemory(); - repr = PyBytes_FromStringAndSize(NULL, expandsize * size); - if (repr == NULL) - return NULL; - if (size == 0) - return repr; - assert(Py_REFCNT(repr) == 1); - - p = PyBytes_AS_STRING(repr); + + p = _PyBytesWriter_Alloc(&writer, size); + if (p == NULL) + goto error; + writer.overallocate = 1; + for (i=0; i < size; i++) { Py_UCS4 ch = PyUnicode_READ(kind, data, i); /* Map 32-bit characters to '\Uxxxxxxxx' */ if (ch >= 0x10000) { + /* -1: substract 1 preallocated byte */ + p = _PyBytesWriter_Prepare(&writer, p, 10-1); + if (p == NULL) + goto error; + *p++ = '\\'; *p++ = 'U'; *p++ = Py_hexdigits[(ch >> 28) & 0xf]; @@ -2153,8 +2150,13 @@ *p++ = Py_hexdigits[(ch >> 4) & 0xf]; *p++ = Py_hexdigits[ch & 15]; } - /* Map 16-bit characters to '\uxxxx' */ + /* Map 16-bit characters, '\\' and '\n' to '\uxxxx' */ else if (ch >= 256 || ch == '\\' || ch == '\n') { + /* -1: substract 1 preallocated byte */ + p = _PyBytesWriter_Prepare(&writer, p, 6-1); + if (p == NULL) + goto error; + *p++ = '\\'; *p++ = 'u'; *p++ = Py_hexdigits[(ch >> 12) & 0xf]; @@ -2166,10 +2168,12 @@ else *p++ = (char) ch; } - size = p - PyBytes_AS_STRING(repr); - if (_PyBytes_Resize(&repr, size) < 0) - return NULL; - return repr; + + return _PyBytesWriter_Finish(&writer, p); + +error: + _PyBytesWriter_Dealloc(&writer); + return NULL; } static int diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -6052,11 +6052,10 @@ PyUnicode_AsUnicodeEscapeString(PyObject *unicode) { Py_ssize_t i, len; - PyObject *repr; char *p; int kind; void *data; - Py_ssize_t expandsize = 0; + _PyBytesWriter writer; /* Initial allocation is based on the longest-possible character escape. @@ -6072,35 +6071,28 @@ } if (PyUnicode_READY(unicode) == -1) return NULL; + + _PyBytesWriter_Init(&writer); + len = PyUnicode_GET_LENGTH(unicode); kind = PyUnicode_KIND(unicode); data = PyUnicode_DATA(unicode); - switch (kind) { - case PyUnicode_1BYTE_KIND: expandsize = 4; break; - case PyUnicode_2BYTE_KIND: expandsize = 6; break; - case PyUnicode_4BYTE_KIND: expandsize = 10; break; - } - - if (len == 0) - return PyBytes_FromStringAndSize(NULL, 0); - - if (len > (PY_SSIZE_T_MAX - 2 - 1) / expandsize) - return PyErr_NoMemory(); - - repr = PyBytes_FromStringAndSize(NULL, - 2 - + expandsize*len - + 1); - if (repr == NULL) - return NULL; - - p = PyBytes_AS_STRING(repr); + + p = _PyBytesWriter_Alloc(&writer, len); + if (p == NULL) + goto error; + writer.overallocate = 1; for (i = 0; i < len; i++) { Py_UCS4 ch = PyUnicode_READ(kind, data, i); /* Escape backslashes */ if (ch == '\\') { + /* -1: substract 1 preallocated byte */ + p = _PyBytesWriter_Prepare(&writer, p, 2-1); + if (p == NULL) + goto error; + *p++ = '\\'; *p++ = (char) ch; continue; @@ -6109,6 +6101,11 @@ /* Map 21-bit characters to '\U00xxxxxx' */ else if (ch >= 0x10000) { assert(ch <= MAX_UNICODE); + + p = _PyBytesWriter_Prepare(&writer, p, 10-1); + if (p == NULL) + goto error; + *p++ = '\\'; *p++ = 'U'; *p++ = Py_hexdigits[(ch >> 28) & 0x0000000F]; @@ -6124,6 +6121,10 @@ /* Map 16-bit characters to '\uxxxx' */ if (ch >= 256) { + p = _PyBytesWriter_Prepare(&writer, p, 6-1); + if (p == NULL) + goto error; + *p++ = '\\'; *p++ = 'u'; *p++ = Py_hexdigits[(ch >> 12) & 0x000F]; @@ -6134,20 +6135,37 @@ /* Map special whitespace to '\t', \n', '\r' */ else if (ch == '\t') { + p = _PyBytesWriter_Prepare(&writer, p, 2-1); + if (p == NULL) + goto error; + *p++ = '\\'; *p++ = 't'; } else if (ch == '\n') { + p = _PyBytesWriter_Prepare(&writer, p, 2-1); + if (p == NULL) + goto error; + *p++ = '\\'; *p++ = 'n'; } else if (ch == '\r') { + p = _PyBytesWriter_Prepare(&writer, p, 2-1); + if (p == NULL) + goto error; + *p++ = '\\'; *p++ = 'r'; } /* Map non-printable US ASCII to '\xhh' */ else if (ch < ' ' || ch >= 0x7F) { + /* -1: substract 1 preallocated byte */ + p = _PyBytesWriter_Prepare(&writer, p, 4-1); + if (p == NULL) + goto error; + *p++ = '\\'; *p++ = 'x'; *p++ = Py_hexdigits[(ch >> 4) & 0x000F]; @@ -6159,10 +6177,11 @@ *p++ = (char) ch; } - assert(p - PyBytes_AS_STRING(repr) > 0); - if (_PyBytes_Resize(&repr, p - PyBytes_AS_STRING(repr)) < 0) - return NULL; - return repr; + return _PyBytesWriter_Finish(&writer, p); + +error: + _PyBytesWriter_Dealloc(&writer); + return NULL; } PyObject * @@ -6291,13 +6310,12 @@ PyObject * PyUnicode_AsRawUnicodeEscapeString(PyObject *unicode) { - PyObject *repr; char *p; - char *q; - Py_ssize_t expandsize, pos; + Py_ssize_t pos; int kind; void *data; Py_ssize_t len; + _PyBytesWriter writer; if (!PyUnicode_Check(unicode)) { PyErr_BadArgument(); @@ -6305,28 +6323,29 @@ } if (PyUnicode_READY(unicode) == -1) return NULL; + + _PyBytesWriter_Init(&writer); + kind = PyUnicode_KIND(unicode); data = PyUnicode_DATA(unicode); len = PyUnicode_GET_LENGTH(unicode); - /* 4 byte characters can take up 10 bytes, 2 byte characters can take up 6 - bytes, and 1 byte characters 4. */ - expandsize = kind * 2 + 2; - - if (len > PY_SSIZE_T_MAX / expandsize) - return PyErr_NoMemory(); - - repr = PyBytes_FromStringAndSize(NULL, expandsize * len); - if (repr == NULL) - return NULL; - if (len == 0) - return repr; - - p = q = PyBytes_AS_STRING(repr); + + p = _PyBytesWriter_Alloc(&writer, len); + if (p == NULL) + goto error; + writer.overallocate = 1; + for (pos = 0; pos < len; pos++) { Py_UCS4 ch = PyUnicode_READ(kind, data, pos); /* Map 32-bit characters to '\Uxxxxxxxx' */ if (ch >= 0x10000) { assert(ch <= MAX_UNICODE); + + /* -1: substract 1 preallocated byte */ + p = _PyBytesWriter_Prepare(&writer, p, 10-1); + if (p == NULL) + goto error; + *p++ = '\\'; *p++ = 'U'; *p++ = Py_hexdigits[(ch >> 28) & 0xf]; @@ -6340,6 +6359,11 @@ } /* Map 16-bit characters to '\uxxxx' */ else if (ch >= 256) { + /* -1: substract 1 preallocated byte */ + p = _PyBytesWriter_Prepare(&writer, p, 6-1); + if (p == NULL) + goto error; + *p++ = '\\'; *p++ = 'u'; *p++ = Py_hexdigits[(ch >> 12) & 0xf]; @@ -6352,10 +6376,11 @@ *p++ = (char) ch; } - assert(p > q); - if (_PyBytes_Resize(&repr, p - q) < 0) - return NULL; - return repr; + return _PyBytesWriter_Finish(&writer, p); + +error: + _PyBytesWriter_Dealloc(&writer); + return NULL; } PyObject * -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 23:37:44 2015 From: python-checkins at python.org (victor.stinner) Date: Mon, 12 Oct 2015 21:37:44 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?b?KTogTWVyZ2UgMy41ICh0ZXN0X2VpbnRyKQ==?= Message-ID: <20151012213740.18368.13958@psf.io> https://hg.python.org/cpython/rev/370af83da32c changeset: 98717:370af83da32c parent: 98715:8e27f8398a4f parent: 98716:605eda657884 user: Victor Stinner date: Mon Oct 12 23:37:31 2015 +0200 summary: Merge 3.5 (test_eintr) files: Lib/test/eintrdata/eintr_tester.py | 10 +++++----- 1 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Lib/test/eintrdata/eintr_tester.py b/Lib/test/eintrdata/eintr_tester.py --- a/Lib/test/eintrdata/eintr_tester.py +++ b/Lib/test/eintrdata/eintr_tester.py @@ -66,11 +66,6 @@ if hasattr(faulthandler, 'cancel_dump_traceback_later'): faulthandler.cancel_dump_traceback_later() - @classmethod - def _sleep(cls): - # default sleep time - time.sleep(cls.sleep_time) - def subprocess(self, *args, **kw): cmd_args = (sys.executable, '-c') + args return subprocess.Popen(cmd_args, **kw) @@ -382,6 +377,11 @@ @unittest.skipUnless(hasattr(signal, 'sigwaitinfo'), 'need signal.sigwaitinfo()') def test_sigwaitinfo(self): + # Issue #25277: The sleep is a weak synchronization between the parent + # and the child process. If the sleep is too low, the test hangs on + # slow or highly loaded systems. + self.sleep_time = 2.0 + signum = signal.SIGUSR1 pid = os.getpid() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Oct 12 23:37:45 2015 From: python-checkins at python.org (victor.stinner) Date: Mon, 12 Oct 2015 21:37:45 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1Mjc3?= =?utf-8?q?=3A_Use_a_longer_sleep_in_test=5Feintr_to_reduce_the_risk_of_ra?= =?utf-8?q?ce?= Message-ID: <20151012213740.55476.41732@psf.io> https://hg.python.org/cpython/rev/605eda657884 changeset: 98716:605eda657884 branch: 3.5 parent: 98709:272bb3f06da8 user: Victor Stinner date: Mon Oct 12 23:37:02 2015 +0200 summary: Issue #25277: Use a longer sleep in test_eintr to reduce the risk of race condition in test_eintr. files: Lib/test/eintrdata/eintr_tester.py | 10 +++++----- 1 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Lib/test/eintrdata/eintr_tester.py b/Lib/test/eintrdata/eintr_tester.py --- a/Lib/test/eintrdata/eintr_tester.py +++ b/Lib/test/eintrdata/eintr_tester.py @@ -59,11 +59,6 @@ cls.stop_alarm() signal.signal(signal.SIGALRM, cls.orig_handler) - @classmethod - def _sleep(cls): - # default sleep time - time.sleep(cls.sleep_time) - def subprocess(self, *args, **kw): cmd_args = (sys.executable, '-c') + args return subprocess.Popen(cmd_args, **kw) @@ -375,6 +370,11 @@ @unittest.skipUnless(hasattr(signal, 'sigwaitinfo'), 'need signal.sigwaitinfo()') def test_sigwaitinfo(self): + # Issue #25277: The sleep is a weak synchronization between the parent + # and the child process. If the sleep is too low, the test hangs on + # slow or highly loaded systems. + self.sleep_time = 2.0 + signum = signal.SIGUSR1 pid = os.getpid() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 13 00:16:34 2015 From: python-checkins at python.org (victor.stinner) Date: Mon, 12 Oct 2015 22:16:34 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogc3lzLnNldHJlY3Vy?= =?utf-8?q?sionlimit=28=29_now_raises_RecursionError?= Message-ID: <20151012221631.128838.98950@psf.io> https://hg.python.org/cpython/rev/eb0c76442cee changeset: 98718:eb0c76442cee branch: 3.5 parent: 98716:605eda657884 user: Victor Stinner date: Tue Oct 13 00:11:21 2015 +0200 summary: sys.setrecursionlimit() now raises RecursionError Issue #25274: sys.setrecursionlimit() now raises a RecursionError if the new recursion limit is too low depending at the current recursion depth. Modify also the "lower-water mark" formula to make it monotonic. This mark is used to decide when the overflowed flag of the thread state is reset. files: Doc/library/sys.rst | 7 +++ Include/ceval.h | 12 ++++- Lib/test/test_sys.py | 52 +++++++++++++++++++++++--- Misc/NEWS | 5 ++ Modules/_testcapimodule.c | 10 +++++ Python/sysmodule.c | 29 +++++++++++++- 6 files changed, 102 insertions(+), 13 deletions(-) diff --git a/Doc/library/sys.rst b/Doc/library/sys.rst --- a/Doc/library/sys.rst +++ b/Doc/library/sys.rst @@ -975,6 +975,13 @@ that supports a higher limit. This should be done with care, because a too-high limit can lead to a crash. + If the new limit is too low at the current recursion depth, a + :exc:`RecursionError` exception is raised. + + .. versionchanged:: 3.5.1 + A :exc:`RecursionError` exception is now raised if the new limit is too + low at the current recursion depth. + .. function:: setswitchinterval(interval) diff --git a/Include/ceval.h b/Include/ceval.h --- a/Include/ceval.h +++ b/Include/ceval.h @@ -94,10 +94,16 @@ # define _Py_MakeRecCheck(x) (++(x) > _Py_CheckRecursionLimit) #endif +/* Compute the "lower-water mark" for a recursion limit. When + * Py_LeaveRecursiveCall() is called with a recursion depth below this mark, + * the overflowed flag is reset to 0. */ +#define _Py_RecursionLimitLowerWaterMark(limit) \ + (((limit) > 200) \ + ? ((limit) - 50) \ + : (3 * ((limit) >> 2))) + #define _Py_MakeEndRecCheck(x) \ - (--(x) < ((_Py_CheckRecursionLimit > 100) \ - ? (_Py_CheckRecursionLimit - 50) \ - : (3 * (_Py_CheckRecursionLimit >> 2)))) + (--(x) < _Py_RecursionLimitLowerWaterMark(_Py_CheckRecursionLimit)) #define Py_ALLOW_RECURSION \ do { unsigned char _old = PyThreadState_GET()->recursion_critical;\ diff --git a/Lib/test/test_sys.py b/Lib/test/test_sys.py --- a/Lib/test/test_sys.py +++ b/Lib/test/test_sys.py @@ -201,22 +201,60 @@ if hasattr(sys, 'gettrace') and sys.gettrace(): self.skipTest('fatal error if run with a trace function') - # NOTE: this test is slightly fragile in that it depends on the current - # recursion count when executing the test being low enough so as to - # trigger the recursion recovery detection in the _Py_MakeEndRecCheck - # macro (see ceval.h). oldlimit = sys.getrecursionlimit() def f(): f() try: - for i in (50, 1000): - # Issue #5392: stack overflow after hitting recursion limit twice - sys.setrecursionlimit(i) + for depth in (10, 25, 50, 75, 100, 250, 1000): + try: + sys.setrecursionlimit(depth) + except RecursionError: + # Issue #25274: The recursion limit is too low at the + # current recursion depth + continue + + # Issue #5392: test stack overflow after hitting recursion + # limit twice self.assertRaises(RecursionError, f) self.assertRaises(RecursionError, f) finally: sys.setrecursionlimit(oldlimit) + @test.support.cpython_only + def test_setrecursionlimit_recursion_depth(self): + # Issue #25274: Setting a low recursion limit must be blocked if the + # current recursion depth is already higher than the "lower-water + # mark". Otherwise, it may not be possible anymore to + # reset the overflowed flag to 0. + + from _testcapi import get_recursion_depth + + def set_recursion_limit_at_depth(depth, limit): + recursion_depth = get_recursion_depth() + if recursion_depth >= depth: + with self.assertRaises(RecursionError) as cm: + sys.setrecursionlimit(limit) + self.assertRegex(str(cm.exception), + "cannot set the recursion limit to [0-9]+ " + "at the recursion depth [0-9]+: " + "the limit is too low") + else: + set_recursion_limit_at_depth(depth, limit) + + oldlimit = sys.getrecursionlimit() + try: + sys.setrecursionlimit(1000) + + for limit in (10, 25, 50, 75, 100, 150, 200): + # formula extracted from _Py_RecursionLimitLowerWaterMark() + if limit > 200: + depth = limit - 50 + else: + depth = limit * 3 // 4 + set_recursion_limit_at_depth(depth, limit) + finally: + sys.setrecursionlimit(oldlimit) + def test_recursionlimit_fatalerror(self): # A fatal error occurs if a second recursion limit is hit when recovering # from a first one. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -11,6 +11,11 @@ Core and Builtins ----------------- +- Issue #25274: sys.setrecursionlimit() now raises a RecursionError if the new + recursion limit is too low depending at the current recursion depth. Modify + also the "lower-water mark" formula to make it monotonic. This mark is used + to decide when the overflowed flag of the thread state is reset. + - Issue #24402: Fix input() to prompt to the redirected stdout when sys.stdout.fileno() fails. diff --git a/Modules/_testcapimodule.c b/Modules/_testcapimodule.c --- a/Modules/_testcapimodule.c +++ b/Modules/_testcapimodule.c @@ -3518,6 +3518,15 @@ return _PyTime_AsNanosecondsObject(ms); } +static PyObject* +get_recursion_depth(PyObject *self, PyObject *args) +{ + PyThreadState *tstate = PyThreadState_GET(); + + /* substract one to ignore the frame of the get_recursion_depth() call */ + return PyLong_FromLong(tstate->recursion_depth - 1); +} + static PyMethodDef TestMethods[] = { {"raise_exception", raise_exception, METH_VARARGS}, @@ -3694,6 +3703,7 @@ #endif {"PyTime_AsMilliseconds", test_PyTime_AsMilliseconds, METH_VARARGS}, {"PyTime_AsMicroseconds", test_PyTime_AsMicroseconds, METH_VARARGS}, + {"get_recursion_depth", get_recursion_depth, METH_NOARGS}, {NULL, NULL} /* sentinel */ }; diff --git a/Python/sysmodule.c b/Python/sysmodule.c --- a/Python/sysmodule.c +++ b/Python/sysmodule.c @@ -632,14 +632,37 @@ static PyObject * sys_setrecursionlimit(PyObject *self, PyObject *args) { - int new_limit; + int new_limit, mark; + PyThreadState *tstate; + if (!PyArg_ParseTuple(args, "i:setrecursionlimit", &new_limit)) return NULL; - if (new_limit <= 0) { + + if (new_limit < 1) { PyErr_SetString(PyExc_ValueError, - "recursion limit must be positive"); + "recursion limit must be greater or equal than 1"); return NULL; } + + /* Issue #25274: When the recursion depth hits the recursion limit in + _Py_CheckRecursiveCall(), the overflowed flag of the thread state is + set to 1 and a RecursionError is raised. The overflowed flag is reset + to 0 when the recursion depth goes below the low-water mark: see + Py_LeaveRecursiveCall(). + + Reject too low new limit if the current recursion depth is higher than + the new low-water mark. Otherwise it may not be possible anymore to + reset the overflowed flag to 0. */ + mark = _Py_RecursionLimitLowerWaterMark(new_limit); + tstate = PyThreadState_GET(); + if (tstate->recursion_depth >= mark) { + PyErr_Format(PyExc_RecursionError, + "cannot set the recursion limit to %i at " + "the recursion depth %i: the limit is too low", + new_limit, tstate->recursion_depth); + return NULL; + } + Py_SetRecursionLimit(new_limit); Py_INCREF(Py_None); return Py_None; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 13 00:16:34 2015 From: python-checkins at python.org (victor.stinner) Date: Mon, 12 Oct 2015 22:16:34 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_3=2E5_=28sys=2Esetrecursionlimit=29?= Message-ID: <20151012221631.55458.94229@psf.io> https://hg.python.org/cpython/rev/978b7dfcb6f1 changeset: 98719:978b7dfcb6f1 parent: 98717:370af83da32c parent: 98718:eb0c76442cee user: Victor Stinner date: Tue Oct 13 00:16:07 2015 +0200 summary: Merge 3.5 (sys.setrecursionlimit) files: Doc/library/sys.rst | 7 +++ Include/ceval.h | 12 ++++- Lib/test/test_sys.py | 55 ++++++++++++++++++++++---- Misc/NEWS | 5 ++ Modules/_testcapimodule.c | 10 ++++ Python/sysmodule.c | 29 ++++++++++++- 6 files changed, 102 insertions(+), 16 deletions(-) diff --git a/Doc/library/sys.rst b/Doc/library/sys.rst --- a/Doc/library/sys.rst +++ b/Doc/library/sys.rst @@ -975,6 +975,13 @@ that supports a higher limit. This should be done with care, because a too-high limit can lead to a crash. + If the new limit is too low at the current recursion depth, a + :exc:`RecursionError` exception is raised. + + .. versionchanged:: 3.5.1 + A :exc:`RecursionError` exception is now raised if the new limit is too + low at the current recursion depth. + .. function:: setswitchinterval(interval) diff --git a/Include/ceval.h b/Include/ceval.h --- a/Include/ceval.h +++ b/Include/ceval.h @@ -94,10 +94,16 @@ # define _Py_MakeRecCheck(x) (++(x) > _Py_CheckRecursionLimit) #endif +/* Compute the "lower-water mark" for a recursion limit. When + * Py_LeaveRecursiveCall() is called with a recursion depth below this mark, + * the overflowed flag is reset to 0. */ +#define _Py_RecursionLimitLowerWaterMark(limit) \ + (((limit) > 200) \ + ? ((limit) - 50) \ + : (3 * ((limit) >> 2))) + #define _Py_MakeEndRecCheck(x) \ - (--(x) < ((_Py_CheckRecursionLimit > 100) \ - ? (_Py_CheckRecursionLimit - 50) \ - : (3 * (_Py_CheckRecursionLimit >> 2)))) + (--(x) < _Py_RecursionLimitLowerWaterMark(_Py_CheckRecursionLimit)) #define Py_ALLOW_RECURSION \ do { unsigned char _old = PyThreadState_GET()->recursion_critical;\ diff --git a/Lib/test/test_sys.py b/Lib/test/test_sys.py --- a/Lib/test/test_sys.py +++ b/Lib/test/test_sys.py @@ -201,25 +201,60 @@ if hasattr(sys, 'gettrace') and sys.gettrace(): self.skipTest('fatal error if run with a trace function') - # NOTE: this test is slightly fragile in that it depends on the current - # recursion count when executing the test being low enough so as to - # trigger the recursion recovery detection in the _Py_MakeEndRecCheck - # macro (see ceval.h). oldlimit = sys.getrecursionlimit() def f(): f() try: - # FIXME: workaround crash for the issue #25274 - # FIXME: until the crash is fixed - #for i in (50, 1000): - for i in (150, 1000): - # Issue #5392: stack overflow after hitting recursion limit twice - sys.setrecursionlimit(i) + for depth in (10, 25, 50, 75, 100, 250, 1000): + try: + sys.setrecursionlimit(depth) + except RecursionError: + # Issue #25274: The recursion limit is too low at the + # current recursion depth + continue + + # Issue #5392: test stack overflow after hitting recursion + # limit twice self.assertRaises(RecursionError, f) self.assertRaises(RecursionError, f) finally: sys.setrecursionlimit(oldlimit) + @test.support.cpython_only + def test_setrecursionlimit_recursion_depth(self): + # Issue #25274: Setting a low recursion limit must be blocked if the + # current recursion depth is already higher than the "lower-water + # mark". Otherwise, it may not be possible anymore to + # reset the overflowed flag to 0. + + from _testcapi import get_recursion_depth + + def set_recursion_limit_at_depth(depth, limit): + recursion_depth = get_recursion_depth() + if recursion_depth >= depth: + with self.assertRaises(RecursionError) as cm: + sys.setrecursionlimit(limit) + self.assertRegex(str(cm.exception), + "cannot set the recursion limit to [0-9]+ " + "at the recursion depth [0-9]+: " + "the limit is too low") + else: + set_recursion_limit_at_depth(depth, limit) + + oldlimit = sys.getrecursionlimit() + try: + sys.setrecursionlimit(1000) + + for limit in (10, 25, 50, 75, 100, 150, 200): + # formula extracted from _Py_RecursionLimitLowerWaterMark() + if limit > 200: + depth = limit - 50 + else: + depth = limit * 3 // 4 + set_recursion_limit_at_depth(depth, limit) + finally: + sys.setrecursionlimit(oldlimit) + def test_recursionlimit_fatalerror(self): # A fatal error occurs if a second recursion limit is hit when recovering # from a first one. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,11 @@ Core and Builtins ----------------- +- Issue #25274: sys.setrecursionlimit() now raises a RecursionError if the new + recursion limit is too low depending at the current recursion depth. Modify + also the "lower-water mark" formula to make it monotonic. This mark is used + to decide when the overflowed flag of the thread state is reset. + - Issue #24402: Fix input() to prompt to the redirected stdout when sys.stdout.fileno() fails. diff --git a/Modules/_testcapimodule.c b/Modules/_testcapimodule.c --- a/Modules/_testcapimodule.c +++ b/Modules/_testcapimodule.c @@ -3520,6 +3520,15 @@ return _PyTime_AsNanosecondsObject(ms); } +static PyObject* +get_recursion_depth(PyObject *self, PyObject *args) +{ + PyThreadState *tstate = PyThreadState_GET(); + + /* substract one to ignore the frame of the get_recursion_depth() call */ + return PyLong_FromLong(tstate->recursion_depth - 1); +} + static PyMethodDef TestMethods[] = { {"raise_exception", raise_exception, METH_VARARGS}, @@ -3696,6 +3705,7 @@ #endif {"PyTime_AsMilliseconds", test_PyTime_AsMilliseconds, METH_VARARGS}, {"PyTime_AsMicroseconds", test_PyTime_AsMicroseconds, METH_VARARGS}, + {"get_recursion_depth", get_recursion_depth, METH_NOARGS}, {NULL, NULL} /* sentinel */ }; diff --git a/Python/sysmodule.c b/Python/sysmodule.c --- a/Python/sysmodule.c +++ b/Python/sysmodule.c @@ -632,14 +632,37 @@ static PyObject * sys_setrecursionlimit(PyObject *self, PyObject *args) { - int new_limit; + int new_limit, mark; + PyThreadState *tstate; + if (!PyArg_ParseTuple(args, "i:setrecursionlimit", &new_limit)) return NULL; - if (new_limit <= 0) { + + if (new_limit < 1) { PyErr_SetString(PyExc_ValueError, - "recursion limit must be positive"); + "recursion limit must be greater or equal than 1"); return NULL; } + + /* Issue #25274: When the recursion depth hits the recursion limit in + _Py_CheckRecursiveCall(), the overflowed flag of the thread state is + set to 1 and a RecursionError is raised. The overflowed flag is reset + to 0 when the recursion depth goes below the low-water mark: see + Py_LeaveRecursiveCall(). + + Reject too low new limit if the current recursion depth is higher than + the new low-water mark. Otherwise it may not be possible anymore to + reset the overflowed flag to 0. */ + mark = _Py_RecursionLimitLowerWaterMark(new_limit); + tstate = PyThreadState_GET(); + if (tstate->recursion_depth >= mark) { + PyErr_Format(PyExc_RecursionError, + "cannot set the recursion limit to %i at " + "the recursion depth %i: the limit is too low", + new_limit, tstate->recursion_depth); + return NULL; + } + Py_SetRecursionLimit(new_limit); Py_INCREF(Py_None); return Py_None; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 13 06:32:07 2015 From: python-checkins at python.org (zach.ware) Date: Tue, 13 Oct 2015 04:32:07 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Closes_=2325093=3A_Merge_with_3=2E5?= Message-ID: <20151013043206.18384.97623@psf.io> https://hg.python.org/cpython/rev/a345d1c70d95 changeset: 98722:a345d1c70d95 parent: 98719:978b7dfcb6f1 parent: 98721:a557ec9c8b12 user: Zachary Ware date: Mon Oct 12 23:31:44 2015 -0500 summary: Closes #25093: Merge with 3.5 files: Lib/test/test_tcl.py | 8 ++++---- 1 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_tcl.py b/Lib/test/test_tcl.py --- a/Lib/test/test_tcl.py +++ b/Lib/test/test_tcl.py @@ -1,5 +1,6 @@ import unittest import re +import subprocess import sys import os from test import support @@ -242,11 +243,10 @@ with support.EnvironmentVarGuard() as env: env.unset("TCL_LIBRARY") - f = os.popen('%s -c "import tkinter; print(tkinter)"' % (unc_name,)) + stdout = subprocess.check_output( + [unc_name, '-c', 'import tkinter; print(tkinter)']) - self.assertIn('tkinter', f.read()) - # exit code must be zero - self.assertEqual(f.close(), None) + self.assertIn(b'tkinter', stdout) def test_exprstring(self): tcl = self.interp -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 13 06:32:10 2015 From: python-checkins at python.org (zach.ware) Date: Tue, 13 Oct 2015 04:32:10 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1MDkz?= =?utf-8?q?=3A_Fix_test=5Ftcl=27s_testloadWithUNC_for_paths_with_spaces?= Message-ID: <20151013043200.55470.77508@psf.io> https://hg.python.org/cpython/rev/6eb49f521336 changeset: 98720:6eb49f521336 branch: 3.4 parent: 98708:b07ac3c6bb98 user: Zachary Ware date: Mon Oct 12 23:27:58 2015 -0500 summary: Issue #25093: Fix test_tcl's testloadWithUNC for paths with spaces Patch by Serhiy Storchaka. files: Lib/test/test_tcl.py | 8 ++++---- 1 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_tcl.py b/Lib/test/test_tcl.py --- a/Lib/test/test_tcl.py +++ b/Lib/test/test_tcl.py @@ -1,5 +1,6 @@ import unittest import re +import subprocess import sys import os from test import support @@ -246,11 +247,10 @@ with support.EnvironmentVarGuard() as env: env.unset("TCL_LIBRARY") - f = os.popen('%s -c "import tkinter; print(tkinter)"' % (unc_name,)) + stdout = subprocess.check_output( + [unc_name, '-c', 'import tkinter; print(tkinter)']) - self.assertIn('tkinter', f.read()) - # exit code must be zero - self.assertEqual(f.close(), None) + self.assertIn(b'tkinter', stdout) def test_exprstring(self): tcl = self.interp -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 13 06:32:10 2015 From: python-checkins at python.org (zach.ware) Date: Tue, 13 Oct 2015 04:32:10 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325093=3A_Merge_with_3=2E4?= Message-ID: <20151013043201.70954.67267@psf.io> https://hg.python.org/cpython/rev/a557ec9c8b12 changeset: 98721:a557ec9c8b12 branch: 3.5 parent: 98718:eb0c76442cee parent: 98720:6eb49f521336 user: Zachary Ware date: Mon Oct 12 23:30:15 2015 -0500 summary: Issue #25093: Merge with 3.4 files: Lib/test/test_tcl.py | 8 ++++---- 1 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_tcl.py b/Lib/test/test_tcl.py --- a/Lib/test/test_tcl.py +++ b/Lib/test/test_tcl.py @@ -1,5 +1,6 @@ import unittest import re +import subprocess import sys import os from test import support @@ -242,11 +243,10 @@ with support.EnvironmentVarGuard() as env: env.unset("TCL_LIBRARY") - f = os.popen('%s -c "import tkinter; print(tkinter)"' % (unc_name,)) + stdout = subprocess.check_output( + [unc_name, '-c', 'import tkinter; print(tkinter)']) - self.assertIn('tkinter', f.read()) - # exit code must be zero - self.assertEqual(f.close(), None) + self.assertIn(b'tkinter', stdout) def test_exprstring(self): tcl = self.interp -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Tue Oct 13 10:45:18 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Tue, 13 Oct 2015 08:45:18 +0000 Subject: [Python-checkins] Daily reference leaks (a345d1c70d95): sum=18243 Message-ID: <20151013084517.2677.37195@psf.io> results for a345d1c70d95 on branch "default" -------------------------------------------- test_capi leaked [1599, 1599, 1599] references, sum=4797 test_capi leaked [387, 389, 389] memory blocks, sum=1165 test_collections leaked [0, 0, -6] references, sum=-6 test_collections leaked [0, 0, -3] memory blocks, sum=-3 test_format leaked [62, 62, 62] references, sum=186 test_format leaked [62, 62, 62] memory blocks, sum=186 test_functools leaked [0, 2, 2] memory blocks, sum=4 test_threading leaked [3196, 3196, 3196] references, sum=9588 test_threading leaked [774, 776, 776] memory blocks, sum=2326 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflogdumiSC', '--timeout', '7200'] From python-checkins at python.org Tue Oct 13 10:52:50 2015 From: python-checkins at python.org (victor.stinner) Date: Tue, 13 Oct 2015 08:52:50 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325384=3A_Use_=5FP?= =?utf-8?q?yBytesWriter_API_in_binascii?= Message-ID: <20151013085249.70970.81368@psf.io> https://hg.python.org/cpython/rev/d6fcda2b9b5e changeset: 98723:d6fcda2b9b5e user: Victor Stinner date: Tue Oct 13 10:51:47 2015 +0200 summary: Issue #25384: Use _PyBytesWriter API in binascii This API avoids a final call to _PyBytes_Resize() for output smaller than 512 bytes. Small optimization: disable overallocation in binascii.rledecode_hqx() for the last write. files: Modules/binascii.c | 194 ++++++++++++++------------------ 1 files changed, 83 insertions(+), 111 deletions(-) diff --git a/Modules/binascii.c b/Modules/binascii.c --- a/Modules/binascii.c +++ b/Modules/binascii.c @@ -346,9 +346,10 @@ int leftbits = 0; unsigned char this_ch; unsigned int leftchar = 0; - PyObject *rv; - Py_ssize_t bin_len; + Py_ssize_t bin_len, out_len; + _PyBytesWriter writer; + _PyBytesWriter_Init(&writer); bin_data = data->buf; bin_len = data->len; if ( bin_len > 45 ) { @@ -358,9 +359,10 @@ } /* We're lazy and allocate to much (fixed up later) */ - if ( (rv=PyBytes_FromStringAndSize(NULL, 2 + (bin_len+2)/3*4)) == NULL ) + out_len = 2 + (bin_len + 2) / 3 * 4; + ascii_data = _PyBytesWriter_Alloc(&writer, out_len); + if (ascii_data == NULL) return NULL; - ascii_data = (unsigned char *)PyBytes_AS_STRING(rv); /* Store the length */ *ascii_data++ = ' ' + (bin_len & 077); @@ -382,12 +384,7 @@ } *ascii_data++ = '\n'; /* Append a courtesy newline */ - if (_PyBytes_Resize(&rv, - (ascii_data - - (unsigned char *)PyBytes_AS_STRING(rv))) < 0) { - Py_CLEAR(rv); - } - return rv; + return _PyBytesWriter_Finish(&writer, ascii_data); } @@ -433,9 +430,9 @@ int leftbits = 0; unsigned char this_ch; unsigned int leftchar = 0; - PyObject *rv; Py_ssize_t ascii_len, bin_len; int quad_pos = 0; + _PyBytesWriter writer; ascii_data = data->buf; ascii_len = data->len; @@ -447,11 +444,12 @@ bin_len = ((ascii_len+3)/4)*3; /* Upper bound, corrected later */ + _PyBytesWriter_Init(&writer); + /* Allocate the buffer */ - if ( (rv=PyBytes_FromStringAndSize(NULL, bin_len)) == NULL ) + bin_data = _PyBytesWriter_Alloc(&writer, bin_len); + if (bin_data == NULL) return NULL; - bin_data = (unsigned char *)PyBytes_AS_STRING(rv); - bin_len = 0; for( ; ascii_len > 0; ascii_len--, ascii_data++) { this_ch = *ascii_data; @@ -496,31 +494,17 @@ if ( leftbits >= 8 ) { leftbits -= 8; *bin_data++ = (leftchar >> leftbits) & 0xff; - bin_len++; leftchar &= ((1 << leftbits) - 1); } } if (leftbits != 0) { PyErr_SetString(Error, "Incorrect padding"); - Py_DECREF(rv); + _PyBytesWriter_Dealloc(&writer); return NULL; } - /* And set string size correctly. If the result string is empty - ** (because the input was all invalid) return the shared empty - ** string instead; _PyBytes_Resize() won't do this for us. - */ - if (bin_len > 0) { - if (_PyBytes_Resize(&rv, bin_len) < 0) { - Py_CLEAR(rv); - } - } - else { - Py_DECREF(rv); - rv = PyBytes_FromStringAndSize("", 0); - } - return rv; + return _PyBytesWriter_Finish(&writer, bin_data); } @@ -542,11 +526,12 @@ int leftbits = 0; unsigned char this_ch; unsigned int leftchar = 0; - PyObject *rv; Py_ssize_t bin_len, out_len; + _PyBytesWriter writer; bin_data = data->buf; bin_len = data->len; + _PyBytesWriter_Init(&writer); assert(bin_len >= 0); @@ -561,9 +546,9 @@ out_len = bin_len*2 + 2; if (newline) out_len++; - if ( (rv=PyBytes_FromStringAndSize(NULL, out_len)) == NULL ) + ascii_data = _PyBytesWriter_Alloc(&writer, out_len); + if (ascii_data == NULL) return NULL; - ascii_data = (unsigned char *)PyBytes_AS_STRING(rv); for( ; bin_len > 0 ; bin_len--, bin_data++ ) { /* Shift the data into our buffer */ @@ -588,12 +573,7 @@ if (newline) *ascii_data++ = '\n'; /* Append a courtesy newline */ - if (_PyBytes_Resize(&rv, - (ascii_data - - (unsigned char *)PyBytes_AS_STRING(rv))) < 0) { - Py_CLEAR(rv); - } - return rv; + return _PyBytesWriter_Finish(&writer, ascii_data); } /*[clinic input] @@ -613,12 +593,14 @@ int leftbits = 0; unsigned char this_ch; unsigned int leftchar = 0; - PyObject *rv; + PyObject *res; Py_ssize_t len; int done = 0; + _PyBytesWriter writer; ascii_data = data->buf; len = data->len; + _PyBytesWriter_Init(&writer); assert(len >= 0); @@ -628,9 +610,9 @@ /* Allocate a string that is too big (fixed later) Add two to the initial length to prevent interning which would preclude subsequent resizing. */ - if ( (rv=PyBytes_FromStringAndSize(NULL, len+2)) == NULL ) + bin_data = _PyBytesWriter_Alloc(&writer, len + 2); + if (bin_data == NULL) return NULL; - bin_data = (unsigned char *)PyBytes_AS_STRING(rv); for( ; len > 0 ; len--, ascii_data++ ) { /* Get the byte and look it up */ @@ -639,7 +621,7 @@ continue; if ( this_ch == FAIL ) { PyErr_SetString(Error, "Illegal char"); - Py_DECREF(rv); + _PyBytesWriter_Dealloc(&writer); return NULL; } if ( this_ch == DONE ) { @@ -661,21 +643,14 @@ if ( leftbits && !done ) { PyErr_SetString(Incomplete, "String has incomplete number of bytes"); - Py_DECREF(rv); + _PyBytesWriter_Dealloc(&writer); return NULL; } - if (_PyBytes_Resize(&rv, - (bin_data - - (unsigned char *)PyBytes_AS_STRING(rv))) < 0) { - Py_CLEAR(rv); - } - if (rv) { - PyObject *rrv = Py_BuildValue("Oi", rv, done); - Py_DECREF(rv); - return rrv; - } - return NULL; + res = _PyBytesWriter_Finish(&writer, bin_data); + if (res == NULL) + return NULL; + return Py_BuildValue("Ni", res, done); } @@ -693,10 +668,11 @@ /*[clinic end generated code: output=0905da344dbf0648 input=e1f1712447a82b09]*/ { unsigned char *in_data, *out_data; - PyObject *rv; unsigned char ch; Py_ssize_t in, inend, len; + _PyBytesWriter writer; + _PyBytesWriter_Init(&writer); in_data = data->buf; len = data->len; @@ -706,9 +682,9 @@ return PyErr_NoMemory(); /* Worst case: output is twice as big as input (fixed later) */ - if ( (rv=PyBytes_FromStringAndSize(NULL, len*2+2)) == NULL ) + out_data = _PyBytesWriter_Alloc(&writer, len * 2 + 2); + if (out_data == NULL) return NULL; - out_data = (unsigned char *)PyBytes_AS_STRING(rv); for( in=0; inbuf; len = data->len; + _PyBytesWriter_Init(&writer); assert(len >= 0); @@ -772,9 +745,9 @@ return PyErr_NoMemory(); /* Allocate a buffer that is at least large enough */ - if ( (rv=PyBytes_FromStringAndSize(NULL, len*2+2)) == NULL ) + ascii_data = _PyBytesWriter_Alloc(&writer, len * 2 + 2); + if (ascii_data == NULL) return NULL; - ascii_data = (unsigned char *)PyBytes_AS_STRING(rv); for( ; len > 0 ; len--, bin_data++ ) { /* Shift into our buffer, and output any 6bits ready */ @@ -791,12 +764,8 @@ leftchar <<= (6-leftbits); *ascii_data++ = table_b2a_hqx[leftchar & 0x3f]; } - if (_PyBytes_Resize(&rv, - (ascii_data - - (unsigned char *)PyBytes_AS_STRING(rv))) < 0) { - Py_CLEAR(rv); - } - return rv; + + return _PyBytesWriter_Finish(&writer, ascii_data); } @@ -815,11 +784,12 @@ { unsigned char *in_data, *out_data; unsigned char in_byte, in_repeat; - PyObject *rv; Py_ssize_t in_len, out_len, out_len_left; + _PyBytesWriter writer; in_data = data->buf; in_len = data->len; + _PyBytesWriter_Init(&writer); assert(in_len >= 0); @@ -830,45 +800,49 @@ return PyErr_NoMemory(); /* Allocate a buffer of reasonable size. Resized when needed */ - out_len = in_len*2; - if ( (rv=PyBytes_FromStringAndSize(NULL, out_len)) == NULL ) + out_len = in_len * 2; + out_data = _PyBytesWriter_Alloc(&writer, out_len); + if (out_data == NULL) return NULL; - out_len_left = out_len; - out_data = (unsigned char *)PyBytes_AS_STRING(rv); + + /* Use overallocation */ + writer.overallocate = 1; + out_len_left = writer.allocated; /* ** We need two macros here to get/put bytes and handle ** end-of-buffer for input and output strings. */ -#define INBYTE(b) \ - do { \ - if ( --in_len < 0 ) { \ - PyErr_SetString(Incomplete, ""); \ - Py_DECREF(rv); \ - return NULL; \ - } \ - b = *in_data++; \ +#define INBYTE(b) \ + do { \ + if ( --in_len < 0 ) { \ + PyErr_SetString(Incomplete, ""); \ + goto error; \ + } \ + b = *in_data++; \ } while(0) -#define OUTBYTE(b) \ - do { \ - if ( --out_len_left < 0 ) { \ - if ( out_len > PY_SSIZE_T_MAX / 2) return PyErr_NoMemory(); \ - if (_PyBytes_Resize(&rv, 2*out_len) < 0) \ - { Py_XDECREF(rv); return NULL; } \ - out_data = (unsigned char *)PyBytes_AS_STRING(rv) \ - + out_len; \ - out_len_left = out_len-1; \ - out_len = out_len * 2; \ - } \ - *out_data++ = b; \ +#define OUTBYTE(b) \ + do { \ + if ( --out_len_left < 0 ) { \ + if (in_len <= 0) { \ + /* We are done after this write, no need to \ + overallocate the buffer anymore */ \ + writer.overallocate = 0; \ + } \ + out_data = _PyBytesWriter_Prepare(&writer, out_data, 1); \ + if (out_data == NULL) \ + goto error; \ + out_len_left = writer.allocated; \ + } \ + *out_data++ = b; \ } while(0) - /* - ** Handle first byte separately (since we have to get angry - ** in case of an orphaned RLE code). - */ - INBYTE(in_byte); + /* + ** Handle first byte separately (since we have to get angry + ** in case of an orphaned RLE code). + */ + INBYTE(in_byte); if (in_byte == RUNCHAR) { INBYTE(in_repeat); @@ -877,8 +851,7 @@ ** of the string only). This is a programmer error. */ PyErr_SetString(Error, "Orphaned RLE code at start"); - Py_DECREF(rv); - return NULL; + goto error; } OUTBYTE(RUNCHAR); } else { @@ -904,12 +877,11 @@ OUTBYTE(in_byte); } } - if (_PyBytes_Resize(&rv, - (out_data - - (unsigned char *)PyBytes_AS_STRING(rv))) < 0) { - Py_CLEAR(rv); - } - return rv; + return _PyBytesWriter_Finish(&writer, out_data); + +error: + _PyBytesWriter_Dealloc(&writer); + return NULL; } -- Repository URL: https://hg.python.org/cpython From lp_benchmark_robot at intel.com Tue Oct 13 16:10:45 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Tue, 13 Oct 2015 15:10:45 +0100 Subject: [Python-checkins] Benchmark Results for Python Default 2015-10-13 Message-ID: <423493f9-14a0-428d-adb5-fa84bd520852@irsmsx104.ger.corp.intel.com> Results for project python_default-nightly, build date 2015-10-13 03:02:29 commit: 978b7dfcb6f11aafcccd9fd3e26376f4fe546938 revision date: 2015-10-12 22:16:07 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v3.4.3, with hash b4cbecbc0781e89a309d03b60a1f75f8499250e6 from 2015-02-25 12:15:33+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v3.4.3 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.36145% -2.41751% 7.29095% 17.92550% :-| pybench 0.15442% 0.10094% -1.72834% 8.28531% :-( regex_v8 3.16390% -1.33440% -5.79897% 6.75407% :-( nbody 0.10627% -2.14403% -2.08186% 11.35472% :-) json_dump_v2 0.30483% 2.05806% 0.57432% 9.82115% :-| normal_startup 0.73702% 0.00173% 0.05020% 5.68030% ------------------------------------------------------------------------------------------ Note: Benchmark results are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From lp_benchmark_robot at intel.com Tue Oct 13 16:21:38 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Tue, 13 Oct 2015 15:21:38 +0100 Subject: [Python-checkins] Benchmark Results for Python 2.7 2015-10-13 Message-ID: <53d7b4c5-24f3-4716-9041-37a0933334c7@irsmsx104.ger.corp.intel.com> Results for project python_2.7-nightly, build date 2015-10-13 03:49:04 commit: 4188cd5dc0c52d61ac7b92a21108b6cc07cd70f7 revision date: 2015-10-12 06:03:22 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v2.7.10, with hash 15c95b7d81dcf821daade360741e00714667653f from 2015-05-23 16:02:14+00:00 ------------------------------------------------------------------------------------------ benchmark relative change since change since current rev with std_dev* last run v2.7.10 regrtest PGO ------------------------------------------------------------------------------------------ :-) django_v2 0.21859% 0.81636% 4.91327% 9.37144% :-) pybench 0.31168% -0.07253% 6.81025% 6.05573% :-| regex_v8 0.54197% 0.29594% -1.66082% 7.45115% :-) nbody 0.31369% -0.68734% 8.17678% 3.93618% :-) json_dump_v2 0.24004% -0.68558% 3.10546% 14.19512% :-| normal_startup 1.56439% 0.92498% -1.23379% 2.96960% :-| ssbench 0.16997% 0.22941% 1.47174% 1.96783% ------------------------------------------------------------------------------------------ Note: Benchmark results for ssbench are measured in requests/second while all other are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From python-checkins at python.org Tue Oct 13 20:14:56 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 13 Oct 2015 18:14:56 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1Mzgw?= =?utf-8?q?=3A_Fixed_protocol_for_the_STACK=5FGLOBAL_opcode_in?= Message-ID: <20151013181455.7236.68347@psf.io> https://hg.python.org/cpython/rev/e52f1fa2d10e changeset: 98724:e52f1fa2d10e branch: 3.4 parent: 98720:6eb49f521336 user: Serhiy Storchaka date: Tue Oct 13 21:12:32 2015 +0300 summary: Issue #25380: Fixed protocol for the STACK_GLOBAL opcode in pickletools.opcodes. files: Lib/pickletools.py | 2 +- Misc/NEWS | 3 +++ 2 files changed, 4 insertions(+), 1 deletions(-) diff --git a/Lib/pickletools.py b/Lib/pickletools.py --- a/Lib/pickletools.py +++ b/Lib/pickletools.py @@ -1898,7 +1898,7 @@ arg=None, stack_before=[pyunicode, pyunicode], stack_after=[anyobject], - proto=0, + proto=4, doc="""Push a global object (module.attr) on the stack. """), diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -96,6 +96,9 @@ Library ------- +- Issue #25380: Fixed protocol for the STACK_GLOBAL opcode in + pickletools.opcodes. + - Issue #23972: Updates asyncio datagram create method allowing reuseport and reuseaddr socket options to be set prior to binding the socket. Mirroring the existing asyncio create_server method the reuseaddr option -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 13 20:14:56 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 13 Oct 2015 18:14:56 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325380=3A_Fixed_protocol_for_the_STACK=5FGLOBAL_?= =?utf-8?q?opcode_in?= Message-ID: <20151013181456.20779.91764@psf.io> https://hg.python.org/cpython/rev/f584dadc640f changeset: 98726:f584dadc640f parent: 98723:d6fcda2b9b5e parent: 98725:4115eabc3a6d user: Serhiy Storchaka date: Tue Oct 13 21:14:01 2015 +0300 summary: Issue #25380: Fixed protocol for the STACK_GLOBAL opcode in pickletools.opcodes. files: Lib/pickletools.py | 2 +- Misc/NEWS | 3 +++ 2 files changed, 4 insertions(+), 1 deletions(-) diff --git a/Lib/pickletools.py b/Lib/pickletools.py --- a/Lib/pickletools.py +++ b/Lib/pickletools.py @@ -1898,7 +1898,7 @@ arg=None, stack_before=[pyunicode, pyunicode], stack_after=[anyobject], - proto=0, + proto=4, doc="""Push a global object (module.attr) on the stack. """), diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -73,6 +73,9 @@ - Issue #25316: distutils raises OSError instead of DistutilsPlatformError when MSVC is not installed. +- Issue #25380: Fixed protocol for the STACK_GLOBAL opcode in + pickletools.opcodes. + - Issue #23972: Updates asyncio datagram create method allowing reuseport and reuseaddr socket options to be set prior to binding the socket. Mirroring the existing asyncio create_server method the reuseaddr option -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 13 20:14:56 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 13 Oct 2015 18:14:56 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325380=3A_Fixed_protocol_for_the_STACK=5FGLOBAL_opcode?= =?utf-8?q?_in?= Message-ID: <20151013181456.70965.32052@psf.io> https://hg.python.org/cpython/rev/4115eabc3a6d changeset: 98725:4115eabc3a6d branch: 3.5 parent: 98721:a557ec9c8b12 parent: 98724:e52f1fa2d10e user: Serhiy Storchaka date: Tue Oct 13 21:13:34 2015 +0300 summary: Issue #25380: Fixed protocol for the STACK_GLOBAL opcode in pickletools.opcodes. files: Lib/pickletools.py | 2 +- Misc/NEWS | 3 +++ 2 files changed, 4 insertions(+), 1 deletions(-) diff --git a/Lib/pickletools.py b/Lib/pickletools.py --- a/Lib/pickletools.py +++ b/Lib/pickletools.py @@ -1898,7 +1898,7 @@ arg=None, stack_before=[pyunicode, pyunicode], stack_after=[anyobject], - proto=0, + proto=4, doc="""Push a global object (module.attr) on the stack. """), diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -53,6 +53,9 @@ - Issue #25316: distutils raises OSError instead of DistutilsPlatformError when MSVC is not installed. +- Issue #25380: Fixed protocol for the STACK_GLOBAL opcode in + pickletools.opcodes. + - Issue #23972: Updates asyncio datagram create method allowing reuseport and reuseaddr socket options to be set prior to binding the socket. Mirroring the existing asyncio create_server method the reuseaddr option -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 13 20:20:51 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 13 Oct 2015 18:20:51 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325382=3A_pickleto?= =?utf-8?q?ols=2Edis=28=29_now_outputs_implicit_memo_index_for_the?= Message-ID: <20151013182050.18364.59577@psf.io> https://hg.python.org/cpython/rev/d88526aa1b29 changeset: 98727:d88526aa1b29 user: Serhiy Storchaka date: Tue Oct 13 21:20:14 2015 +0300 summary: Issue #25382: pickletools.dis() now outputs implicit memo index for the MEMOIZE opcode. files: Lib/pickletools.py | 1 + Misc/NEWS | 3 +++ 2 files changed, 4 insertions(+), 0 deletions(-) diff --git a/Lib/pickletools.py b/Lib/pickletools.py --- a/Lib/pickletools.py +++ b/Lib/pickletools.py @@ -2440,6 +2440,7 @@ if opcode.name in ("PUT", "BINPUT", "LONG_BINPUT", "MEMOIZE"): if opcode.name == "MEMOIZE": memo_idx = len(memo) + markmsg = "(as %d)" % memo_idx else: assert arg is not None memo_idx = arg diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -56,6 +56,9 @@ Library ------- +- Issue #25382: pickletools.dis() now outputs implicit memo index for the + MEMOIZE opcode. + - Issue #25357: Add an optional newline paramer to binascii.b2a_base64(). base64.b64encode() uses it to avoid a memory copy. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Oct 13 20:27:20 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Tue, 13 Oct 2015 18:27:20 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2324164=3A_Document?= =?utf-8?q?_changes_to_=5F=5Fgetnewargs=5F=5F_and_=5F=5Fgetnewargs=5Fex=5F?= =?utf-8?b?Xy4=?= Message-ID: <20151013182717.2689.32324@psf.io> https://hg.python.org/cpython/rev/de982d8b7b15 changeset: 98728:de982d8b7b15 user: Serhiy Storchaka date: Tue Oct 13 21:26:35 2015 +0300 summary: Issue #24164: Document changes to __getnewargs__ and __getnewargs_ex__. files: Doc/library/pickle.rst | 19 +++++++++++++------ 1 files changed, 13 insertions(+), 6 deletions(-) diff --git a/Doc/library/pickle.rst b/Doc/library/pickle.rst --- a/Doc/library/pickle.rst +++ b/Doc/library/pickle.rst @@ -488,7 +488,7 @@ .. method:: object.__getnewargs_ex__() - In protocols 4 and newer, classes that implements the + In protocols 2 and newer, classes that implements the :meth:`__getnewargs_ex__` method can dictate the values passed to the :meth:`__new__` method upon unpickling. The method must return a pair ``(args, kwargs)`` where *args* is a tuple of positional arguments @@ -500,15 +500,22 @@ class requires keyword-only arguments. Otherwise, it is recommended for compatibility to implement :meth:`__getnewargs__`. + .. versionchanged:: 3.6 + :meth:`__getnewargs_ex__` is now used in protocols 2 and 3. + .. method:: object.__getnewargs__() - This method serve a similar purpose as :meth:`__getnewargs_ex__` but - for protocols 2 and newer. It must return a tuple of arguments ``args`` - which will be passed to the :meth:`__new__` method upon unpickling. + This method serve a similar purpose as :meth:`__getnewargs_ex__`, but + supports only positional arguments. It must return a tuple of arguments + ``args`` which will be passed to the :meth:`__new__` method upon unpickling. - In protocols 4 and newer, :meth:`__getnewargs__` will not be called if - :meth:`__getnewargs_ex__` is defined. + :meth:`__getnewargs__` will not be called if :meth:`__getnewargs_ex__` is + defined. + + .. versionchanged:: 3.6 + Before Python 3.6, :meth:`__getnewargs__` was called instead of + :meth:`__getnewargs_ex__` in protocols 2 and 3. .. method:: object.__getstate__() -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 01:55:13 2015 From: python-checkins at python.org (victor.stinner) Date: Tue, 13 Oct 2015 23:55:13 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Rewrite_PyBytes=5FFromForm?= =?utf-8?q?atV=28=29_using_=5FPyBytesWriter_API?= Message-ID: <20151013235513.3271.37965@psf.io> https://hg.python.org/cpython/rev/388483b53cde changeset: 98729:388483b53cde user: Victor Stinner date: Wed Oct 14 00:21:35 2015 +0200 summary: Rewrite PyBytes_FromFormatV() using _PyBytesWriter API * Add much more unit tests on PyBytes_FromFormatV() * Remove the first loop to compute the length of the output string * Use _PyBytesWriter to handle the bytes buffer, use overallocation * Cleanup the code to make simpler and easier to review files: Lib/test/test_bytes.py | 90 ++++++- Objects/bytesobject.c | 352 ++++++++++++++-------------- 2 files changed, 252 insertions(+), 190 deletions(-) diff --git a/Lib/test/test_bytes.py b/Lib/test/test_bytes.py --- a/Lib/test/test_bytes.py +++ b/Lib/test/test_bytes.py @@ -783,25 +783,93 @@ # Test PyBytes_FromFormat() def test_from_format(self): test.support.import_module('ctypes') - from ctypes import pythonapi, py_object, c_int, c_char_p + _testcapi = test.support.import_module('_testcapi') + from ctypes import pythonapi, py_object + from ctypes import ( + c_int, c_uint, + c_long, c_ulong, + c_size_t, c_ssize_t, + c_char_p) + PyBytes_FromFormat = pythonapi.PyBytes_FromFormat PyBytes_FromFormat.restype = py_object + # basic tests self.assertEqual(PyBytes_FromFormat(b'format'), b'format') + self.assertEqual(PyBytes_FromFormat(b'Hello %s !', b'world'), + b'Hello world !') + # test formatters + self.assertEqual(PyBytes_FromFormat(b'c=%c', c_int(0)), + b'c=\0') + self.assertEqual(PyBytes_FromFormat(b'c=%c', c_int(ord('@'))), + b'c=@') + self.assertEqual(PyBytes_FromFormat(b'c=%c', c_int(255)), + b'c=\xff') + self.assertEqual(PyBytes_FromFormat(b'd=%d ld=%ld zd=%zd', + c_int(1), c_long(2), + c_size_t(3)), + b'd=1 ld=2 zd=3') + self.assertEqual(PyBytes_FromFormat(b'd=%d ld=%ld zd=%zd', + c_int(-1), c_long(-2), + c_size_t(-3)), + b'd=-1 ld=-2 zd=-3') + self.assertEqual(PyBytes_FromFormat(b'u=%u lu=%lu zu=%zu', + c_uint(123), c_ulong(456), + c_size_t(789)), + b'u=123 lu=456 zu=789') + self.assertEqual(PyBytes_FromFormat(b'i=%i', c_int(123)), + b'i=123') + self.assertEqual(PyBytes_FromFormat(b'i=%i', c_int(-123)), + b'i=-123') + self.assertEqual(PyBytes_FromFormat(b'x=%x', c_int(0xabc)), + b'x=abc') + self.assertEqual(PyBytes_FromFormat(b'ptr=%p', + c_char_p(0xabcdef)), + b'ptr=0xabcdef') + self.assertEqual(PyBytes_FromFormat(b's=%s', c_char_p(b'cstr')), + b's=cstr') + + # test minimum and maximum integer values + size_max = c_size_t(-1).value + for formatstr, ctypes_type, value, py_formatter in ( + (b'%d', c_int, _testcapi.INT_MIN, str), + (b'%d', c_int, _testcapi.INT_MAX, str), + (b'%ld', c_long, _testcapi.LONG_MIN, str), + (b'%ld', c_long, _testcapi.LONG_MAX, str), + (b'%lu', c_ulong, _testcapi.ULONG_MAX, str), + (b'%zd', c_ssize_t, _testcapi.PY_SSIZE_T_MIN, str), + (b'%zd', c_ssize_t, _testcapi.PY_SSIZE_T_MAX, str), + (b'%zu', c_size_t, size_max, str), + (b'%p', c_char_p, size_max, lambda value: '%#x' % value), + ): + self.assertEqual(PyBytes_FromFormat(formatstr, ctypes_type(value)), + py_formatter(value).encode('ascii')), + + # width and precision (width is currently ignored) + self.assertEqual(PyBytes_FromFormat(b'%5s', b'a'), + b'a') + self.assertEqual(PyBytes_FromFormat(b'%.3s', b'abcdef'), + b'abc') + + # '%%' formatter + self.assertEqual(PyBytes_FromFormat(b'%%'), + b'%') + self.assertEqual(PyBytes_FromFormat(b'[%%]'), + b'[%]') + self.assertEqual(PyBytes_FromFormat(b'%%%c', c_int(ord('_'))), + b'%_') + self.assertEqual(PyBytes_FromFormat(b'%%s'), + b'%s') + + # Invalid formats and partial formatting self.assertEqual(PyBytes_FromFormat(b'%'), b'%') - self.assertEqual(PyBytes_FromFormat(b'%%'), b'%') - self.assertEqual(PyBytes_FromFormat(b'%%s'), b'%s') - self.assertEqual(PyBytes_FromFormat(b'[%%]'), b'[%]') - self.assertEqual(PyBytes_FromFormat(b'%%%c', c_int(ord('_'))), b'%_') + self.assertEqual(PyBytes_FromFormat(b'x=%i y=%', c_int(2), c_int(3)), + b'x=2 y=%') - self.assertEqual(PyBytes_FromFormat(b'c:%c', c_int(255)), - b'c:\xff') - self.assertEqual(PyBytes_FromFormat(b's:%s', c_char_p(b'cstr')), - b's:cstr') - - # Issue #19969 + # Issue #19969: %c must raise OverflowError for values + # not in the range [0; 255] self.assertRaises(OverflowError, PyBytes_FromFormat, b'%c', c_int(-1)) self.assertRaises(OverflowError, diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -174,190 +174,184 @@ PyObject * PyBytes_FromFormatV(const char *format, va_list vargs) { - va_list count; - Py_ssize_t n = 0; - const char* f; char *s; - PyObject* string; - - Py_VA_COPY(count, vargs); - /* step 1: figure out how large a buffer we need */ + const char *f; + const char *p; + Py_ssize_t prec; + int longflag; + int size_tflag; + /* Longest 64-bit formatted numbers: + - "18446744073709551615\0" (21 bytes) + - "-9223372036854775808\0" (21 bytes) + Decimal takes the most space (it isn't enough for octal.) + + Longest 64-bit pointer representation: + "0xffffffffffffffff\0" (19 bytes). */ + char buffer[21]; + _PyBytesWriter writer; + + _PyBytesWriter_Init(&writer); + + s = _PyBytesWriter_Alloc(&writer, strlen(format)); + if (s == NULL) + return NULL; + writer.overallocate = 1; + +#define WRITE_BYTES(str) \ + do { \ + s = _PyBytesWriter_WriteBytes(&writer, s, (str), strlen(str)); \ + if (s == NULL) \ + goto error; \ + } while (0) + for (f = format; *f; f++) { - if (*f == '%') { - const char* p = f; - while (*++f && *f != '%' && !Py_ISALPHA(*f)) - ; - - /* skip the 'l' or 'z' in {%ld, %zd, %lu, %zu} since - * they don't affect the amount of space we reserve. - */ - if ((*f == 'l' || *f == 'z') && - (f[1] == 'd' || f[1] == 'u')) - ++f; - - switch (*f) { - case 'c': - { - int c = va_arg(count, int); - if (c < 0 || c > 255) { - PyErr_SetString(PyExc_OverflowError, - "PyBytes_FromFormatV(): %c format " - "expects an integer in range [0; 255]"); - return NULL; - } - n++; - break; + if (*f != '%') { + *s++ = *f; + continue; + } + + p = f++; + + /* ignore the width (ex: 10 in "%10s") */ + while (Py_ISDIGIT(*f)) + f++; + + /* parse the precision (ex: 10 in "%.10s") */ + prec = 0; + if (*f == '.') { + f++; + for (; Py_ISDIGIT(*f); f++) { + prec = (prec * 10) + (*f - '0'); } - case '%': - n++; - break; - case 'd': case 'u': case 'i': case 'x': - (void) va_arg(count, int); - /* 20 bytes is enough to hold a 64-bit - integer. Decimal takes the most space. - This isn't enough for octal. */ - n += 20; - break; - case 's': - s = va_arg(count, char*); - n += strlen(s); - break; - case 'p': - (void) va_arg(count, int); - /* maximum 64-bit pointer representation: - * 0xffffffffffffffff - * so 19 characters is enough. - * XXX I count 18 -- what's the extra for? - */ - n += 19; - break; - default: - /* if we stumble upon an unknown - formatting code, copy the rest of - the format string to the output - string. (we cannot just skip the - code, since there's no way to know - what's in the argument list) */ - n += strlen(p); - goto expand; + } + + while (*f && *f != '%' && !Py_ISALPHA(*f)) + f++; + + /* handle the long flag ('l'), but only for %ld and %lu. + others can be added when necessary. */ + longflag = 0; + if (*f == 'l' && (f[1] == 'd' || f[1] == 'u')) { + longflag = 1; + ++f; + } + + /* handle the size_t flag ('z'). */ + size_tflag = 0; + if (*f == 'z' && (f[1] == 'd' || f[1] == 'u')) { + size_tflag = 1; + ++f; + } + + /* substract bytes preallocated for the format string + (ex: 2 for "%s") */ + writer.min_size -= (f - p + 1); + + switch (*f) { + case 'c': + { + int c = va_arg(vargs, int); + if (c < 0 || c > 255) { + PyErr_SetString(PyExc_OverflowError, + "PyBytes_FromFormatV(): %c format " + "expects an integer in range [0; 255]"); + goto error; } - } else - n++; + writer.min_size++; + *s++ = (unsigned char)c; + break; + } + + case 'd': + if (longflag) + sprintf(buffer, "%ld", va_arg(vargs, long)); + else if (size_tflag) + sprintf(buffer, "%" PY_FORMAT_SIZE_T "d", + va_arg(vargs, Py_ssize_t)); + else + sprintf(buffer, "%d", va_arg(vargs, int)); + assert(strlen(buffer) < sizeof(buffer)); + WRITE_BYTES(buffer); + break; + + case 'u': + if (longflag) + sprintf(buffer, "%lu", + va_arg(vargs, unsigned long)); + else if (size_tflag) + sprintf(buffer, "%" PY_FORMAT_SIZE_T "u", + va_arg(vargs, size_t)); + else + sprintf(buffer, "%u", + va_arg(vargs, unsigned int)); + assert(strlen(buffer) < sizeof(buffer)); + WRITE_BYTES(buffer); + break; + + case 'i': + sprintf(buffer, "%i", va_arg(vargs, int)); + assert(strlen(buffer) < sizeof(buffer)); + WRITE_BYTES(buffer); + break; + + case 'x': + sprintf(buffer, "%x", va_arg(vargs, int)); + assert(strlen(buffer) < sizeof(buffer)); + WRITE_BYTES(buffer); + break; + + case 's': + { + Py_ssize_t i; + + p = va_arg(vargs, char*); + i = strlen(p); + if (prec > 0 && i > prec) + i = prec; + s = _PyBytesWriter_WriteBytes(&writer, s, p, i); + if (s == NULL) + goto error; + break; + } + + case 'p': + sprintf(buffer, "%p", va_arg(vargs, void*)); + assert(strlen(buffer) < sizeof(buffer)); + /* %p is ill-defined: ensure leading 0x. */ + if (buffer[1] == 'X') + buffer[1] = 'x'; + else if (buffer[1] != 'x') { + memmove(buffer+2, buffer, strlen(buffer)+1); + buffer[0] = '0'; + buffer[1] = 'x'; + } + WRITE_BYTES(buffer); + break; + + case '%': + writer.min_size++; + *s++ = '%'; + break; + + default: + if (*f == 0) { + /* fix min_size if we reached the end of the format string */ + writer.min_size++; + } + + /* invalid format string: copy unformatted string and exit */ + WRITE_BYTES(p); + return _PyBytesWriter_Finish(&writer, s); + } } - expand: - /* step 2: fill the buffer */ - /* Since we've analyzed how much space we need for the worst case, - use sprintf directly instead of the slower PyOS_snprintf. */ - string = PyBytes_FromStringAndSize(NULL, n); - if (!string) - return NULL; - - s = PyBytes_AsString(string); - - for (f = format; *f; f++) { - if (*f == '%') { - const char* p = f++; - Py_ssize_t i; - int longflag = 0; - int size_tflag = 0; - /* parse the width.precision part (we're only - interested in the precision value, if any) */ - n = 0; - while (Py_ISDIGIT(*f)) - n = (n*10) + *f++ - '0'; - if (*f == '.') { - f++; - n = 0; - while (Py_ISDIGIT(*f)) - n = (n*10) + *f++ - '0'; - } - while (*f && *f != '%' && !Py_ISALPHA(*f)) - f++; - /* handle the long flag, but only for %ld and %lu. - others can be added when necessary. */ - if (*f == 'l' && (f[1] == 'd' || f[1] == 'u')) { - longflag = 1; - ++f; - } - /* handle the size_t flag. */ - if (*f == 'z' && (f[1] == 'd' || f[1] == 'u')) { - size_tflag = 1; - ++f; - } - - switch (*f) { - case 'c': - { - int c = va_arg(vargs, int); - /* c has been checked for overflow in the first step */ - *s++ = (unsigned char)c; - break; - } - case 'd': - if (longflag) - sprintf(s, "%ld", va_arg(vargs, long)); - else if (size_tflag) - sprintf(s, "%" PY_FORMAT_SIZE_T "d", - va_arg(vargs, Py_ssize_t)); - else - sprintf(s, "%d", va_arg(vargs, int)); - s += strlen(s); - break; - case 'u': - if (longflag) - sprintf(s, "%lu", - va_arg(vargs, unsigned long)); - else if (size_tflag) - sprintf(s, "%" PY_FORMAT_SIZE_T "u", - va_arg(vargs, size_t)); - else - sprintf(s, "%u", - va_arg(vargs, unsigned int)); - s += strlen(s); - break; - case 'i': - sprintf(s, "%i", va_arg(vargs, int)); - s += strlen(s); - break; - case 'x': - sprintf(s, "%x", va_arg(vargs, int)); - s += strlen(s); - break; - case 's': - p = va_arg(vargs, char*); - i = strlen(p); - if (n > 0 && i > n) - i = n; - Py_MEMCPY(s, p, i); - s += i; - break; - case 'p': - sprintf(s, "%p", va_arg(vargs, void*)); - /* %p is ill-defined: ensure leading 0x. */ - if (s[1] == 'X') - s[1] = 'x'; - else if (s[1] != 'x') { - memmove(s+2, s, strlen(s)+1); - s[0] = '0'; - s[1] = 'x'; - } - s += strlen(s); - break; - case '%': - *s++ = '%'; - break; - default: - strcpy(s, p); - s += strlen(s); - goto end; - } - } else - *s++ = *f; - } - - end: - _PyBytes_Resize(&string, s - PyBytes_AS_STRING(string)); - return string; + +#undef WRITE_BYTES + + return _PyBytesWriter_Finish(&writer, s); + + error: + _PyBytesWriter_Dealloc(&writer); + return NULL; } PyObject * -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 02:55:52 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 00:55:52 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Fix_test=5Fbytes_on_Window?= =?utf-8?q?s?= Message-ID: <20151014005550.2675.12330@psf.io> https://hg.python.org/cpython/rev/60dc5d351304 changeset: 98730:60dc5d351304 user: Victor Stinner date: Wed Oct 14 02:55:12 2015 +0200 summary: Fix test_bytes on Windows On Windows, sprintf("%p", 0xabcdef) formats hexadecimal in uppercase and pad to 16 characters (on 64-bit system) with zeros. files: Lib/test/test_bytes.py | 11 +++++++---- 1 files changed, 7 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_bytes.py b/Lib/test/test_bytes.py --- a/Lib/test/test_bytes.py +++ b/Lib/test/test_bytes.py @@ -782,7 +782,7 @@ # Test PyBytes_FromFormat() def test_from_format(self): - test.support.import_module('ctypes') + ctypes = test.support.import_module('ctypes') _testcapi = test.support.import_module('_testcapi') from ctypes import pythonapi, py_object from ctypes import ( @@ -825,9 +825,12 @@ b'i=-123') self.assertEqual(PyBytes_FromFormat(b'x=%x', c_int(0xabc)), b'x=abc') - self.assertEqual(PyBytes_FromFormat(b'ptr=%p', - c_char_p(0xabcdef)), - b'ptr=0xabcdef') + ptr = 0xabcdef + expected = [b'ptr=%#x' % ptr] + win_format = 'ptr=0x%0{}X'.format(2 * ctypes.sizeof(c_char_p)) + expected.append((win_format % ptr).encode('ascii')) + self.assertIn(PyBytes_FromFormat(b'ptr=%p', c_char_p(ptr)), + expected) self.assertEqual(PyBytes_FromFormat(b's=%s', c_char_p(b'cstr')), b's=cstr') -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 04:09:55 2015 From: python-checkins at python.org (terry.reedy) Date: Wed, 14 Oct 2015 02:09:55 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI0Nzgy?= =?utf-8?q?=3A_whitespace?= Message-ID: <20151014020955.449.95976@psf.io> https://hg.python.org/cpython/rev/4ed0cc2b7c7c changeset: 98736:4ed0cc2b7c7c branch: 3.4 parent: 98732:5647c61fb593 user: Terry Jan Reedy date: Tue Oct 13 22:09:06 2015 -0400 summary: Issue #24782: whitespace files: Lib/idlelib/configDialog.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -1190,7 +1190,7 @@ This code is generic - it works for any and all IDLE extensions. IDLE extensions save their configuration options using idleConf. - This code reads the current configuration using idleConf, supplies a + This code reads the current configuration using idleConf, supplies a GUI interface to change the configuration values, and saves the changes using idleConf. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 04:09:55 2015 From: python-checkins at python.org (terry.reedy) Date: Wed, 14 Oct 2015 02:09:55 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_with_3=2E5?= Message-ID: <20151014020955.481.22176@psf.io> https://hg.python.org/cpython/rev/9ab61ec2934c changeset: 98738:9ab61ec2934c parent: 98734:fa9c50ac29f4 parent: 98737:4423e5022378 user: Terry Jan Reedy date: Tue Oct 13 22:09:34 2015 -0400 summary: Merge with 3.5 files: Lib/idlelib/configDialog.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -1190,7 +1190,7 @@ This code is generic - it works for any and all IDLE extensions. IDLE extensions save their configuration options using idleConf. - This code reads the current configuration using idleConf, supplies a + This code reads the current configuration using idleConf, supplies a GUI interface to change the configuration values, and saves the changes using idleConf. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 04:09:55 2015 From: python-checkins at python.org (terry.reedy) Date: Wed, 14 Oct 2015 02:09:55 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_with_3=2E5?= Message-ID: <20151014020955.18382.68567@psf.io> https://hg.python.org/cpython/rev/fa9c50ac29f4 changeset: 98734:fa9c50ac29f4 parent: 98730:60dc5d351304 parent: 98733:96645f7cd88c user: Terry Jan Reedy date: Tue Oct 13 22:04:22 2015 -0400 summary: Merge with 3.5 files: Doc/library/idle.rst | 19 +- Lib/idlelib/Bindings.py | 1 - Lib/idlelib/EditorWindow.py | 6 - Lib/idlelib/configDialog.py | 394 +++++++++----------- Lib/idlelib/help.html | 22 +- Lib/idlelib/idle_test/htest.py | 12 +- 6 files changed, 200 insertions(+), 254 deletions(-) diff --git a/Doc/library/idle.rst b/Doc/library/idle.rst --- a/Doc/library/idle.rst +++ b/Doc/library/idle.rst @@ -252,17 +252,16 @@ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Configure IDLE - Open a configuration dialog. Fonts, indentation, keybindings, and color - themes may be altered. Startup Preferences may be set, and additional - help sources can be specified. Non-default user setting are saved in a - .idlerc directory in the user's home directory. Problems caused by bad user - configuration files are solved by editing or deleting one or more of the - files in .idlerc. On OS X, open the configuration dialog by selecting - Preferences in the application menu. + Open a configuration dialog and change preferences for the following: + fonts, indentation, keybindings, text color themes, startup windows and + size, additional help sources, and extensions (see below). On OS X, + open the configuration dialog by selecting Preferences in the application + menu. To use a new built-in color theme (IDLE Dark) with older IDLEs, + save it as a new custom theme. -Configure Extensions - Open a configuration dialog for setting preferences for extensions - (discussed below). See note above about the location of user settings. + Non-default user settings are saved in a .idlerc directory in the user's + home directory. Problems caused by bad user configuration files are solved + by editing or deleting one or more of the files in .idlerc. Code Context (toggle)(Editor Window only) Open a pane at the top of the edit window which shows the block context diff --git a/Lib/idlelib/Bindings.py b/Lib/idlelib/Bindings.py --- a/Lib/idlelib/Bindings.py +++ b/Lib/idlelib/Bindings.py @@ -78,7 +78,6 @@ ]), ('options', [ ('Configure _IDLE', '<>'), - ('Configure _Extensions', '<>'), None, ]), ('help', [ diff --git a/Lib/idlelib/EditorWindow.py b/Lib/idlelib/EditorWindow.py --- a/Lib/idlelib/EditorWindow.py +++ b/Lib/idlelib/EditorWindow.py @@ -191,8 +191,6 @@ text.bind("<>", self.python_docs) text.bind("<>", self.about_dialog) text.bind("<>", self.config_dialog) - text.bind("<>", - self.config_extensions_dialog) text.bind("<>", self.open_module) text.bind("<>", lambda event: "break") text.bind("<>", self.select_all) @@ -514,10 +512,6 @@ # Synchronize with macosxSupport.overrideRootMenu.config_dialog. configDialog.ConfigDialog(self.top,'Settings') - def config_extensions_dialog(self, event=None): - "Handle Options 'Configure Extensions' event." - configDialog.ConfigExtensionsDialog(self.top) - def help_dialog(self, event=None): "Handle Help 'IDLE Help' event." # Synchronize with macosxSupport.overrideRootMenu.help_dialog. diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -80,12 +80,14 @@ def CreateWidgets(self): self.tabPages = TabbedPageSet(self, - page_names=['Fonts/Tabs', 'Highlighting', 'Keys', 'General']) + page_names=['Fonts/Tabs', 'Highlighting', 'Keys', 'General', + 'Extensions']) self.tabPages.pack(side=TOP, expand=TRUE, fill=BOTH) self.CreatePageFontTab() self.CreatePageHighlight() self.CreatePageKeys() self.CreatePageGeneral() + self.CreatePageExtensions() self.create_action_buttons().pack(side=BOTTOM) def create_action_buttons(self): @@ -1092,6 +1094,7 @@ self.LoadKeyCfg() ### general page self.LoadGeneralCfg() + # note: extension page handled separately def SaveNewKeySet(self, keySetName, keySet): """ @@ -1145,6 +1148,7 @@ # save these even if unchanged! idleConf.userCfg[configType].Save() self.ResetChangedItems() #clear the changed items dict + self.save_all_changed_extensions() # uses a different mechanism def DeactivateCurrentConfig(self): #Before a config is saved, some cleanup of current @@ -1180,6 +1184,168 @@ view_text(self, title='Help for IDLE preferences', text=help_common+help_pages.get(page, '')) + def CreatePageExtensions(self): + """Part of the config dialog used for configuring IDLE extensions. + + This code is generic - it works for any and all IDLE extensions. + + IDLE extensions save their configuration options using idleConf. + This code reads the current configuration using idleConf, supplies a + GUI interface to change the configuration values, and saves the + changes using idleConf. + + Not all changes take effect immediately - some may require restarting IDLE. + This depends on each extension's implementation. + + All values are treated as text, and it is up to the user to supply + reasonable values. The only exception to this are the 'enable*' options, + which are boolean, and can be toggled with an True/False button. + """ + parent = self.parent + frame = self.tabPages.pages['Extensions'].frame + self.ext_defaultCfg = idleConf.defaultCfg['extensions'] + self.ext_userCfg = idleConf.userCfg['extensions'] + self.is_int = self.register(is_int) + self.load_extensions() + # create widgets - a listbox shows all available extensions, with the + # controls for the extension selected in the listbox to the right + self.extension_names = StringVar(self) + frame.rowconfigure(0, weight=1) + frame.columnconfigure(2, weight=1) + self.extension_list = Listbox(frame, listvariable=self.extension_names, + selectmode='browse') + self.extension_list.bind('<>', self.extension_selected) + scroll = Scrollbar(frame, command=self.extension_list.yview) + self.extension_list.yscrollcommand=scroll.set + self.details_frame = LabelFrame(frame, width=250, height=250) + self.extension_list.grid(column=0, row=0, sticky='nws') + scroll.grid(column=1, row=0, sticky='ns') + self.details_frame.grid(column=2, row=0, sticky='nsew', padx=[10, 0]) + frame.configure(padx=10, pady=10) + self.config_frame = {} + self.current_extension = None + + self.outerframe = self # TEMPORARY + self.tabbed_page_set = self.extension_list # TEMPORARY + + # create the frame holding controls for each extension + ext_names = '' + for ext_name in sorted(self.extensions): + self.create_extension_frame(ext_name) + ext_names = ext_names + '{' + ext_name + '} ' + self.extension_names.set(ext_names) + self.extension_list.selection_set(0) + self.extension_selected(None) + + def load_extensions(self): + "Fill self.extensions with data from the default and user configs." + self.extensions = {} + for ext_name in idleConf.GetExtensions(active_only=False): + self.extensions[ext_name] = [] + + for ext_name in self.extensions: + opt_list = sorted(self.ext_defaultCfg.GetOptionList(ext_name)) + + # bring 'enable' options to the beginning of the list + enables = [opt_name for opt_name in opt_list + if opt_name.startswith('enable')] + for opt_name in enables: + opt_list.remove(opt_name) + opt_list = enables + opt_list + + for opt_name in opt_list: + def_str = self.ext_defaultCfg.Get( + ext_name, opt_name, raw=True) + try: + def_obj = {'True':True, 'False':False}[def_str] + opt_type = 'bool' + except KeyError: + try: + def_obj = int(def_str) + opt_type = 'int' + except ValueError: + def_obj = def_str + opt_type = None + try: + value = self.ext_userCfg.Get( + ext_name, opt_name, type=opt_type, raw=True, + default=def_obj) + except ValueError: # Need this until .Get fixed + value = def_obj # bad values overwritten by entry + var = StringVar(self) + var.set(str(value)) + + self.extensions[ext_name].append({'name': opt_name, + 'type': opt_type, + 'default': def_str, + 'value': value, + 'var': var, + }) + + def extension_selected(self, event): + newsel = self.extension_list.curselection() + if newsel: + newsel = self.extension_list.get(newsel) + if newsel is None or newsel != self.current_extension: + if self.current_extension: + self.details_frame.config(text='') + self.config_frame[self.current_extension].grid_forget() + self.current_extension = None + if newsel: + self.details_frame.config(text=newsel) + self.config_frame[newsel].grid(column=0, row=0, sticky='nsew') + self.current_extension = newsel + + def create_extension_frame(self, ext_name): + """Create a frame holding the widgets to configure one extension""" + f = VerticalScrolledFrame(self.details_frame, height=250, width=250) + self.config_frame[ext_name] = f + entry_area = f.interior + # create an entry for each configuration option + for row, opt in enumerate(self.extensions[ext_name]): + # create a row with a label and entry/checkbutton + label = Label(entry_area, text=opt['name']) + label.grid(row=row, column=0, sticky=NW) + var = opt['var'] + if opt['type'] == 'bool': + Checkbutton(entry_area, textvariable=var, variable=var, + onvalue='True', offvalue='False', + indicatoron=FALSE, selectcolor='', width=8 + ).grid(row=row, column=1, sticky=W, padx=7) + elif opt['type'] == 'int': + Entry(entry_area, textvariable=var, validate='key', + validatecommand=(self.is_int, '%P') + ).grid(row=row, column=1, sticky=NSEW, padx=7) + + else: + Entry(entry_area, textvariable=var + ).grid(row=row, column=1, sticky=NSEW, padx=7) + return + + def set_extension_value(self, section, opt): + name = opt['name'] + default = opt['default'] + value = opt['var'].get().strip() or default + opt['var'].set(value) + # if self.defaultCfg.has_section(section): + # Currently, always true; if not, indent to return + if (value == default): + return self.ext_userCfg.RemoveOption(section, name) + # set the option + return self.ext_userCfg.SetOption(section, name, value) + + def save_all_changed_extensions(self): + """Save configuration changes to the user config file.""" + has_changes = False + for ext_name in self.extensions: + options = self.extensions[ext_name] + for opt in options: + if self.set_extension_value(ext_name, opt): + has_changes = True + if has_changes: + self.ext_userCfg.Save() + + help_common = '''\ When you click either the Apply or Ok buttons, settings in this dialog that are different from IDLE's default are saved in @@ -1198,6 +1364,17 @@ } +def is_int(s): + "Return 's is blank or represents an int'" + if not s: + return True + try: + int(s) + return True + except ValueError: + return False + + class VerticalScrolledFrame(Frame): """A pure Tkinter vertically scrollable frame. @@ -1240,221 +1417,6 @@ return -def is_int(s): - "Return 's is blank or represents an int'" - if not s: - return True - try: - int(s) - return True - except ValueError: - return False - -# TODO: -# * Revert to default(s)? Per option or per extension? -# * List options in their original order (possible??) -class ConfigExtensionsDialog(Toplevel): - """A dialog for configuring IDLE extensions. - - This dialog is generic - it works for any and all IDLE extensions. - - IDLE extensions save their configuration options using idleConf. - ConfigExtensionsDialog reads the current configuration using idleConf, - supplies a GUI interface to change the configuration values, and saves the - changes using idleConf. - - Not all changes take effect immediately - some may require restarting IDLE. - This depends on each extension's implementation. - - All values are treated as text, and it is up to the user to supply - reasonable values. The only exception to this are the 'enable*' options, - which are boolean, and can be toggled with an True/False button. - """ - def __init__(self, parent, title=None, _htest=False): - Toplevel.__init__(self, parent) - self.wm_withdraw() - - self.configure(borderwidth=5) - self.geometry( - "+%d+%d" % (parent.winfo_rootx() + 20, - parent.winfo_rooty() + (30 if not _htest else 150))) - self.wm_title(title or 'IDLE Extensions Configuration') - - self.defaultCfg = idleConf.defaultCfg['extensions'] - self.userCfg = idleConf.userCfg['extensions'] - self.is_int = self.register(is_int) - self.load_extensions() - self.create_widgets() - - self.resizable(height=FALSE, width=FALSE) # don't allow resizing yet - self.transient(parent) - self.protocol("WM_DELETE_WINDOW", self.Cancel) - self.tabbed_page_set.focus_set() - # wait for window to be generated - self.update() - # set current width as the minimum width - self.wm_minsize(self.winfo_width(), 1) - # now allow resizing - self.resizable(height=TRUE, width=TRUE) - - self.wm_deiconify() - if not _htest: - self.grab_set() - self.wait_window() - - def load_extensions(self): - "Fill self.extensions with data from the default and user configs." - self.extensions = {} - for ext_name in idleConf.GetExtensions(active_only=False): - self.extensions[ext_name] = [] - - for ext_name in self.extensions: - opt_list = sorted(self.defaultCfg.GetOptionList(ext_name)) - - # bring 'enable' options to the beginning of the list - enables = [opt_name for opt_name in opt_list - if opt_name.startswith('enable')] - for opt_name in enables: - opt_list.remove(opt_name) - opt_list = enables + opt_list - - for opt_name in opt_list: - def_str = self.defaultCfg.Get( - ext_name, opt_name, raw=True) - try: - def_obj = {'True':True, 'False':False}[def_str] - opt_type = 'bool' - except KeyError: - try: - def_obj = int(def_str) - opt_type = 'int' - except ValueError: - def_obj = def_str - opt_type = None - try: - value = self.userCfg.Get( - ext_name, opt_name, type=opt_type, raw=True, - default=def_obj) - except ValueError: # Need this until .Get fixed - value = def_obj # bad values overwritten by entry - var = StringVar(self) - var.set(str(value)) - - self.extensions[ext_name].append({'name': opt_name, - 'type': opt_type, - 'default': def_str, - 'value': value, - 'var': var, - }) - - def create_widgets(self): - """Create the dialog's widgets.""" - self.extension_names = StringVar(self) - self.rowconfigure(0, weight=1) - self.columnconfigure(2, weight=1) - self.extension_list = Listbox(self, listvariable=self.extension_names, - selectmode='browse') - self.extension_list.bind('<>', self.extension_selected) - scroll = Scrollbar(self, command=self.extension_list.yview) - self.extension_list.yscrollcommand=scroll.set - self.details_frame = LabelFrame(self, width=250, height=250) - self.extension_list.grid(column=0, row=0, sticky='nws') - scroll.grid(column=1, row=0, sticky='ns') - self.details_frame.grid(column=2, row=0, sticky='nsew', padx=[10, 0]) - self.configure(padx=10, pady=10) - self.config_frame = {} - self.current_extension = None - - self.outerframe = self # TEMPORARY - self.tabbed_page_set = self.extension_list # TEMPORARY - - # create the individual pages - ext_names = '' - for ext_name in sorted(self.extensions): - self.create_extension_frame(ext_name) - ext_names = ext_names + '{' + ext_name + '} ' - self.extension_names.set(ext_names) - self.extension_list.selection_set(0) - self.extension_selected(None) - self.create_action_buttons().grid(row=1, columnspan=3) - - def extension_selected(self, event): - newsel = self.extension_list.curselection() - if newsel: - newsel = self.extension_list.get(newsel) - if newsel is None or newsel != self.current_extension: - if self.current_extension: - self.details_frame.config(text='') - self.config_frame[self.current_extension].grid_forget() - self.current_extension = None - if newsel: - self.details_frame.config(text=newsel) - self.config_frame[newsel].grid(column=0, row=0, sticky='nsew') - self.current_extension = newsel - - create_action_buttons = ConfigDialog.create_action_buttons - - def create_extension_frame(self, ext_name): - """Create a frame holding the widgets to configure one extension""" - f = VerticalScrolledFrame(self.details_frame, height=250, width=250) - self.config_frame[ext_name] = f - entry_area = f.interior - # create an entry for each configuration option - for row, opt in enumerate(self.extensions[ext_name]): - # create a row with a label and entry/checkbutton - label = Label(entry_area, text=opt['name']) - label.grid(row=row, column=0, sticky=NW) - var = opt['var'] - if opt['type'] == 'bool': - Checkbutton(entry_area, textvariable=var, variable=var, - onvalue='True', offvalue='False', - indicatoron=FALSE, selectcolor='', width=8 - ).grid(row=row, column=1, sticky=W, padx=7) - elif opt['type'] == 'int': - Entry(entry_area, textvariable=var, validate='key', - validatecommand=(self.is_int, '%P') - ).grid(row=row, column=1, sticky=NSEW, padx=7) - - else: - Entry(entry_area, textvariable=var - ).grid(row=row, column=1, sticky=NSEW, padx=7) - return - - - Ok = ConfigDialog.Ok - - def Apply(self): - self.save_all_changed_configs() - pass - - Cancel = ConfigDialog.Cancel - - def Help(self): - pass - - def set_user_value(self, section, opt): - name = opt['name'] - default = opt['default'] - value = opt['var'].get().strip() or default - opt['var'].set(value) - # if self.defaultCfg.has_section(section): - # Currently, always true; if not, indent to return - if (value == default): - return self.userCfg.RemoveOption(section, name) - # set the option - return self.userCfg.SetOption(section, name, value) - - def save_all_changed_configs(self): - """Save configuration changes to the user config file.""" - has_changes = False - for ext_name in self.extensions: - options = self.extensions[ext_name] - for opt in options: - if self.set_user_value(ext_name, opt): - has_changes = True - if has_changes: - self.userCfg.Save() - if __name__ == '__main__': import unittest diff --git a/Lib/idlelib/help.html b/Lib/idlelib/help.html --- a/Lib/idlelib/help.html +++ b/Lib/idlelib/help.html @@ -266,16 +266,16 @@

          25.5.1.7. Options menu (Shell and Editor)?

          Configure IDLE
          -
          Open a configuration dialog. Fonts, indentation, keybindings, and color -themes may be altered. Startup Preferences may be set, and additional -help sources can be specified. Non-default user setting are saved in a -.idlerc directory in the user’s home directory. Problems caused by bad user -configuration files are solved by editing or deleting one or more of the -files in .idlerc. On OS X, open the configuration dialog by selecting -Preferences in the application menu.
          -
          Configure Extensions
          -
          Open a configuration dialog for setting preferences for extensions -(discussed below). See note above about the location of user settings.
          +

          Open a configuration dialog and change preferences for the following: +fonts, indentation, keybindings, text color themes, startup windows and +size, additional help sources, and extensions (see below). On OS X, +open the configuration dialog by selecting Preferences in the application +menu. To use a new built-in color theme (IDLE Dark) with older IDLEs, +save it as a new custom theme.

          +

          Non-default user settings are saved in a .idlerc directory in the user’s +home directory. Problems caused by bad user configuration files are solved +by editing or deleting one or more of the files in .idlerc.

          +
          Code Context (toggle)(Editor Window only)
          Open a pane at the top of the edit window which shows the block context of the code which has scrolled above the top of the window.
          @@ -699,7 +699,7 @@ The Python Software Foundation is a non-profit corporation. Please donate.
          - Last updated on Oct 02, 2015. + Last updated on Oct 13, 2015. Found a bug?
          Created using Sphinx 1.2.3. diff --git a/Lib/idlelib/idle_test/htest.py b/Lib/idlelib/idle_test/htest.py --- a/Lib/idlelib/idle_test/htest.py +++ b/Lib/idlelib/idle_test/htest.py @@ -93,15 +93,6 @@ "Double clicking on items prints a traceback for an exception " "that is ignored." } -ConfigExtensionsDialog_spec = { - 'file': 'configDialog', - 'kwds': {'title': 'Test Extension Configuration', - '_htest': True,}, - 'msg': "IDLE extensions dialog.\n" - "\n[Ok] to close the dialog.[Apply] to apply the settings and " - "and [Cancel] to revert all changes.\nRe-run the test to ensure " - "changes made have persisted." - } _color_delegator_spec = { 'file': 'ColorDelegator', @@ -121,7 +112,8 @@ "font face of the text in the area below it.\nIn the " "'Highlighting' tab, try different color schemes. Clicking " "items in the sample program should update the choices above it." - "\nIn the 'Keys' and 'General' tab, test settings of interest." + "\nIn the 'Keys', 'General' and 'Extensions' tabs, test settings" + "of interest." "\n[Ok] to close the dialog.[Apply] to apply the settings and " "and [Cancel] to revert all changes.\nRe-run the test to ensure " "changes made have persisted." -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 04:09:54 2015 From: python-checkins at python.org (terry.reedy) Date: Wed, 14 Oct 2015 02:09:54 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI0Nzgy?= =?utf-8?q?=3A_Finish_converting_the_Configure_Extension_dialog_into_a_new?= Message-ID: <20151014020954.20757.32695@psf.io> https://hg.python.org/cpython/rev/5647c61fb593 changeset: 98732:5647c61fb593 branch: 3.4 parent: 98724:e52f1fa2d10e user: Terry Jan Reedy date: Tue Oct 13 22:03:51 2015 -0400 summary: Issue #24782: Finish converting the Configure Extension dialog into a new tab in the IDLE Preferences dialog. Code patch by Mark Roseman. files: Doc/library/idle.rst | 19 +- Lib/idlelib/Bindings.py | 1 - Lib/idlelib/EditorWindow.py | 6 - Lib/idlelib/configDialog.py | 394 +++++++++----------- Lib/idlelib/help.html | 22 +- Lib/idlelib/idle_test/htest.py | 12 +- 6 files changed, 200 insertions(+), 254 deletions(-) diff --git a/Doc/library/idle.rst b/Doc/library/idle.rst --- a/Doc/library/idle.rst +++ b/Doc/library/idle.rst @@ -252,17 +252,16 @@ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Configure IDLE - Open a configuration dialog. Fonts, indentation, keybindings, and color - themes may be altered. Startup Preferences may be set, and additional - help sources can be specified. Non-default user setting are saved in a - .idlerc directory in the user's home directory. Problems caused by bad user - configuration files are solved by editing or deleting one or more of the - files in .idlerc. On OS X, open the configuration dialog by selecting - Preferences in the application menu. + Open a configuration dialog and change preferences for the following: + fonts, indentation, keybindings, text color themes, startup windows and + size, additional help sources, and extensions (see below). On OS X, + open the configuration dialog by selecting Preferences in the application + menu. To use a new built-in color theme (IDLE Dark) with older IDLEs, + save it as a new custom theme. -Configure Extensions - Open a configuration dialog for setting preferences for extensions - (discussed below). See note above about the location of user settings. + Non-default user settings are saved in a .idlerc directory in the user's + home directory. Problems caused by bad user configuration files are solved + by editing or deleting one or more of the files in .idlerc. Code Context (toggle)(Editor Window only) Open a pane at the top of the edit window which shows the block context diff --git a/Lib/idlelib/Bindings.py b/Lib/idlelib/Bindings.py --- a/Lib/idlelib/Bindings.py +++ b/Lib/idlelib/Bindings.py @@ -78,7 +78,6 @@ ]), ('options', [ ('Configure _IDLE', '<>'), - ('Configure _Extensions', '<>'), None, ]), ('help', [ diff --git a/Lib/idlelib/EditorWindow.py b/Lib/idlelib/EditorWindow.py --- a/Lib/idlelib/EditorWindow.py +++ b/Lib/idlelib/EditorWindow.py @@ -191,8 +191,6 @@ text.bind("<>", self.python_docs) text.bind("<>", self.about_dialog) text.bind("<>", self.config_dialog) - text.bind("<>", - self.config_extensions_dialog) text.bind("<>", self.open_module) text.bind("<>", lambda event: "break") text.bind("<>", self.select_all) @@ -514,10 +512,6 @@ # Synchronize with macosxSupport.overrideRootMenu.config_dialog. configDialog.ConfigDialog(self.top,'Settings') - def config_extensions_dialog(self, event=None): - "Handle Options 'Configure Extensions' event." - configDialog.ConfigExtensionsDialog(self.top) - def help_dialog(self, event=None): "Handle Help 'IDLE Help' event." # Synchronize with macosxSupport.overrideRootMenu.help_dialog. diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -80,12 +80,14 @@ def CreateWidgets(self): self.tabPages = TabbedPageSet(self, - page_names=['Fonts/Tabs', 'Highlighting', 'Keys', 'General']) + page_names=['Fonts/Tabs', 'Highlighting', 'Keys', 'General', + 'Extensions']) self.tabPages.pack(side=TOP, expand=TRUE, fill=BOTH) self.CreatePageFontTab() self.CreatePageHighlight() self.CreatePageKeys() self.CreatePageGeneral() + self.CreatePageExtensions() self.create_action_buttons().pack(side=BOTTOM) def create_action_buttons(self): @@ -1092,6 +1094,7 @@ self.LoadKeyCfg() ### general page self.LoadGeneralCfg() + # note: extension page handled separately def SaveNewKeySet(self, keySetName, keySet): """ @@ -1145,6 +1148,7 @@ # save these even if unchanged! idleConf.userCfg[configType].Save() self.ResetChangedItems() #clear the changed items dict + self.save_all_changed_extensions() # uses a different mechanism def DeactivateCurrentConfig(self): #Before a config is saved, some cleanup of current @@ -1180,6 +1184,168 @@ view_text(self, title='Help for IDLE preferences', text=help_common+help_pages.get(page, '')) + def CreatePageExtensions(self): + """Part of the config dialog used for configuring IDLE extensions. + + This code is generic - it works for any and all IDLE extensions. + + IDLE extensions save their configuration options using idleConf. + This code reads the current configuration using idleConf, supplies a + GUI interface to change the configuration values, and saves the + changes using idleConf. + + Not all changes take effect immediately - some may require restarting IDLE. + This depends on each extension's implementation. + + All values are treated as text, and it is up to the user to supply + reasonable values. The only exception to this are the 'enable*' options, + which are boolean, and can be toggled with an True/False button. + """ + parent = self.parent + frame = self.tabPages.pages['Extensions'].frame + self.ext_defaultCfg = idleConf.defaultCfg['extensions'] + self.ext_userCfg = idleConf.userCfg['extensions'] + self.is_int = self.register(is_int) + self.load_extensions() + # create widgets - a listbox shows all available extensions, with the + # controls for the extension selected in the listbox to the right + self.extension_names = StringVar(self) + frame.rowconfigure(0, weight=1) + frame.columnconfigure(2, weight=1) + self.extension_list = Listbox(frame, listvariable=self.extension_names, + selectmode='browse') + self.extension_list.bind('<>', self.extension_selected) + scroll = Scrollbar(frame, command=self.extension_list.yview) + self.extension_list.yscrollcommand=scroll.set + self.details_frame = LabelFrame(frame, width=250, height=250) + self.extension_list.grid(column=0, row=0, sticky='nws') + scroll.grid(column=1, row=0, sticky='ns') + self.details_frame.grid(column=2, row=0, sticky='nsew', padx=[10, 0]) + frame.configure(padx=10, pady=10) + self.config_frame = {} + self.current_extension = None + + self.outerframe = self # TEMPORARY + self.tabbed_page_set = self.extension_list # TEMPORARY + + # create the frame holding controls for each extension + ext_names = '' + for ext_name in sorted(self.extensions): + self.create_extension_frame(ext_name) + ext_names = ext_names + '{' + ext_name + '} ' + self.extension_names.set(ext_names) + self.extension_list.selection_set(0) + self.extension_selected(None) + + def load_extensions(self): + "Fill self.extensions with data from the default and user configs." + self.extensions = {} + for ext_name in idleConf.GetExtensions(active_only=False): + self.extensions[ext_name] = [] + + for ext_name in self.extensions: + opt_list = sorted(self.ext_defaultCfg.GetOptionList(ext_name)) + + # bring 'enable' options to the beginning of the list + enables = [opt_name for opt_name in opt_list + if opt_name.startswith('enable')] + for opt_name in enables: + opt_list.remove(opt_name) + opt_list = enables + opt_list + + for opt_name in opt_list: + def_str = self.ext_defaultCfg.Get( + ext_name, opt_name, raw=True) + try: + def_obj = {'True':True, 'False':False}[def_str] + opt_type = 'bool' + except KeyError: + try: + def_obj = int(def_str) + opt_type = 'int' + except ValueError: + def_obj = def_str + opt_type = None + try: + value = self.ext_userCfg.Get( + ext_name, opt_name, type=opt_type, raw=True, + default=def_obj) + except ValueError: # Need this until .Get fixed + value = def_obj # bad values overwritten by entry + var = StringVar(self) + var.set(str(value)) + + self.extensions[ext_name].append({'name': opt_name, + 'type': opt_type, + 'default': def_str, + 'value': value, + 'var': var, + }) + + def extension_selected(self, event): + newsel = self.extension_list.curselection() + if newsel: + newsel = self.extension_list.get(newsel) + if newsel is None or newsel != self.current_extension: + if self.current_extension: + self.details_frame.config(text='') + self.config_frame[self.current_extension].grid_forget() + self.current_extension = None + if newsel: + self.details_frame.config(text=newsel) + self.config_frame[newsel].grid(column=0, row=0, sticky='nsew') + self.current_extension = newsel + + def create_extension_frame(self, ext_name): + """Create a frame holding the widgets to configure one extension""" + f = VerticalScrolledFrame(self.details_frame, height=250, width=250) + self.config_frame[ext_name] = f + entry_area = f.interior + # create an entry for each configuration option + for row, opt in enumerate(self.extensions[ext_name]): + # create a row with a label and entry/checkbutton + label = Label(entry_area, text=opt['name']) + label.grid(row=row, column=0, sticky=NW) + var = opt['var'] + if opt['type'] == 'bool': + Checkbutton(entry_area, textvariable=var, variable=var, + onvalue='True', offvalue='False', + indicatoron=FALSE, selectcolor='', width=8 + ).grid(row=row, column=1, sticky=W, padx=7) + elif opt['type'] == 'int': + Entry(entry_area, textvariable=var, validate='key', + validatecommand=(self.is_int, '%P') + ).grid(row=row, column=1, sticky=NSEW, padx=7) + + else: + Entry(entry_area, textvariable=var + ).grid(row=row, column=1, sticky=NSEW, padx=7) + return + + def set_extension_value(self, section, opt): + name = opt['name'] + default = opt['default'] + value = opt['var'].get().strip() or default + opt['var'].set(value) + # if self.defaultCfg.has_section(section): + # Currently, always true; if not, indent to return + if (value == default): + return self.ext_userCfg.RemoveOption(section, name) + # set the option + return self.ext_userCfg.SetOption(section, name, value) + + def save_all_changed_extensions(self): + """Save configuration changes to the user config file.""" + has_changes = False + for ext_name in self.extensions: + options = self.extensions[ext_name] + for opt in options: + if self.set_extension_value(ext_name, opt): + has_changes = True + if has_changes: + self.ext_userCfg.Save() + + help_common = '''\ When you click either the Apply or Ok buttons, settings in this dialog that are different from IDLE's default are saved in @@ -1198,6 +1364,17 @@ } +def is_int(s): + "Return 's is blank or represents an int'" + if not s: + return True + try: + int(s) + return True + except ValueError: + return False + + class VerticalScrolledFrame(Frame): """A pure Tkinter vertically scrollable frame. @@ -1240,221 +1417,6 @@ return -def is_int(s): - "Return 's is blank or represents an int'" - if not s: - return True - try: - int(s) - return True - except ValueError: - return False - -# TODO: -# * Revert to default(s)? Per option or per extension? -# * List options in their original order (possible??) -class ConfigExtensionsDialog(Toplevel): - """A dialog for configuring IDLE extensions. - - This dialog is generic - it works for any and all IDLE extensions. - - IDLE extensions save their configuration options using idleConf. - ConfigExtensionsDialog reads the current configuration using idleConf, - supplies a GUI interface to change the configuration values, and saves the - changes using idleConf. - - Not all changes take effect immediately - some may require restarting IDLE. - This depends on each extension's implementation. - - All values are treated as text, and it is up to the user to supply - reasonable values. The only exception to this are the 'enable*' options, - which are boolean, and can be toggled with an True/False button. - """ - def __init__(self, parent, title=None, _htest=False): - Toplevel.__init__(self, parent) - self.wm_withdraw() - - self.configure(borderwidth=5) - self.geometry( - "+%d+%d" % (parent.winfo_rootx() + 20, - parent.winfo_rooty() + (30 if not _htest else 150))) - self.wm_title(title or 'IDLE Extensions Configuration') - - self.defaultCfg = idleConf.defaultCfg['extensions'] - self.userCfg = idleConf.userCfg['extensions'] - self.is_int = self.register(is_int) - self.load_extensions() - self.create_widgets() - - self.resizable(height=FALSE, width=FALSE) # don't allow resizing yet - self.transient(parent) - self.protocol("WM_DELETE_WINDOW", self.Cancel) - self.tabbed_page_set.focus_set() - # wait for window to be generated - self.update() - # set current width as the minimum width - self.wm_minsize(self.winfo_width(), 1) - # now allow resizing - self.resizable(height=TRUE, width=TRUE) - - self.wm_deiconify() - if not _htest: - self.grab_set() - self.wait_window() - - def load_extensions(self): - "Fill self.extensions with data from the default and user configs." - self.extensions = {} - for ext_name in idleConf.GetExtensions(active_only=False): - self.extensions[ext_name] = [] - - for ext_name in self.extensions: - opt_list = sorted(self.defaultCfg.GetOptionList(ext_name)) - - # bring 'enable' options to the beginning of the list - enables = [opt_name for opt_name in opt_list - if opt_name.startswith('enable')] - for opt_name in enables: - opt_list.remove(opt_name) - opt_list = enables + opt_list - - for opt_name in opt_list: - def_str = self.defaultCfg.Get( - ext_name, opt_name, raw=True) - try: - def_obj = {'True':True, 'False':False}[def_str] - opt_type = 'bool' - except KeyError: - try: - def_obj = int(def_str) - opt_type = 'int' - except ValueError: - def_obj = def_str - opt_type = None - try: - value = self.userCfg.Get( - ext_name, opt_name, type=opt_type, raw=True, - default=def_obj) - except ValueError: # Need this until .Get fixed - value = def_obj # bad values overwritten by entry - var = StringVar(self) - var.set(str(value)) - - self.extensions[ext_name].append({'name': opt_name, - 'type': opt_type, - 'default': def_str, - 'value': value, - 'var': var, - }) - - def create_widgets(self): - """Create the dialog's widgets.""" - self.extension_names = StringVar(self) - self.rowconfigure(0, weight=1) - self.columnconfigure(2, weight=1) - self.extension_list = Listbox(self, listvariable=self.extension_names, - selectmode='browse') - self.extension_list.bind('<>', self.extension_selected) - scroll = Scrollbar(self, command=self.extension_list.yview) - self.extension_list.yscrollcommand=scroll.set - self.details_frame = LabelFrame(self, width=250, height=250) - self.extension_list.grid(column=0, row=0, sticky='nws') - scroll.grid(column=1, row=0, sticky='ns') - self.details_frame.grid(column=2, row=0, sticky='nsew', padx=[10, 0]) - self.configure(padx=10, pady=10) - self.config_frame = {} - self.current_extension = None - - self.outerframe = self # TEMPORARY - self.tabbed_page_set = self.extension_list # TEMPORARY - - # create the individual pages - ext_names = '' - for ext_name in sorted(self.extensions): - self.create_extension_frame(ext_name) - ext_names = ext_names + '{' + ext_name + '} ' - self.extension_names.set(ext_names) - self.extension_list.selection_set(0) - self.extension_selected(None) - self.create_action_buttons().grid(row=1, columnspan=3) - - def extension_selected(self, event): - newsel = self.extension_list.curselection() - if newsel: - newsel = self.extension_list.get(newsel) - if newsel is None or newsel != self.current_extension: - if self.current_extension: - self.details_frame.config(text='') - self.config_frame[self.current_extension].grid_forget() - self.current_extension = None - if newsel: - self.details_frame.config(text=newsel) - self.config_frame[newsel].grid(column=0, row=0, sticky='nsew') - self.current_extension = newsel - - create_action_buttons = ConfigDialog.create_action_buttons - - def create_extension_frame(self, ext_name): - """Create a frame holding the widgets to configure one extension""" - f = VerticalScrolledFrame(self.details_frame, height=250, width=250) - self.config_frame[ext_name] = f - entry_area = f.interior - # create an entry for each configuration option - for row, opt in enumerate(self.extensions[ext_name]): - # create a row with a label and entry/checkbutton - label = Label(entry_area, text=opt['name']) - label.grid(row=row, column=0, sticky=NW) - var = opt['var'] - if opt['type'] == 'bool': - Checkbutton(entry_area, textvariable=var, variable=var, - onvalue='True', offvalue='False', - indicatoron=FALSE, selectcolor='', width=8 - ).grid(row=row, column=1, sticky=W, padx=7) - elif opt['type'] == 'int': - Entry(entry_area, textvariable=var, validate='key', - validatecommand=(self.is_int, '%P') - ).grid(row=row, column=1, sticky=NSEW, padx=7) - - else: - Entry(entry_area, textvariable=var - ).grid(row=row, column=1, sticky=NSEW, padx=7) - return - - - Ok = ConfigDialog.Ok - - def Apply(self): - self.save_all_changed_configs() - pass - - Cancel = ConfigDialog.Cancel - - def Help(self): - pass - - def set_user_value(self, section, opt): - name = opt['name'] - default = opt['default'] - value = opt['var'].get().strip() or default - opt['var'].set(value) - # if self.defaultCfg.has_section(section): - # Currently, always true; if not, indent to return - if (value == default): - return self.userCfg.RemoveOption(section, name) - # set the option - return self.userCfg.SetOption(section, name, value) - - def save_all_changed_configs(self): - """Save configuration changes to the user config file.""" - has_changes = False - for ext_name in self.extensions: - options = self.extensions[ext_name] - for opt in options: - if self.set_user_value(ext_name, opt): - has_changes = True - if has_changes: - self.userCfg.Save() - if __name__ == '__main__': import unittest diff --git a/Lib/idlelib/help.html b/Lib/idlelib/help.html --- a/Lib/idlelib/help.html +++ b/Lib/idlelib/help.html @@ -266,16 +266,16 @@

          25.5.1.7. Options menu (Shell and Editor)?

          Configure IDLE
          -
          Open a configuration dialog. Fonts, indentation, keybindings, and color -themes may be altered. Startup Preferences may be set, and additional -help sources can be specified. Non-default user setting are saved in a -.idlerc directory in the user’s home directory. Problems caused by bad user -configuration files are solved by editing or deleting one or more of the -files in .idlerc. On OS X, open the configuration dialog by selecting -Preferences in the application menu.
          -
          Configure Extensions
          -
          Open a configuration dialog for setting preferences for extensions -(discussed below). See note above about the location of user settings.
          +

          Open a configuration dialog and change preferences for the following: +fonts, indentation, keybindings, text color themes, startup windows and +size, additional help sources, and extensions (see below). On OS X, +open the configuration dialog by selecting Preferences in the application +menu. To use a new built-in color theme (IDLE Dark) with older IDLEs, +save it as a new custom theme.

          +

          Non-default user settings are saved in a .idlerc directory in the user’s +home directory. Problems caused by bad user configuration files are solved +by editing or deleting one or more of the files in .idlerc.

          +
          Code Context (toggle)(Editor Window only)
          Open a pane at the top of the edit window which shows the block context of the code which has scrolled above the top of the window.
          @@ -699,7 +699,7 @@ The Python Software Foundation is a non-profit corporation. Please donate.
          - Last updated on Oct 02, 2015. + Last updated on Oct 13, 2015. Found a bug?
          Created using Sphinx 1.2.3. diff --git a/Lib/idlelib/idle_test/htest.py b/Lib/idlelib/idle_test/htest.py --- a/Lib/idlelib/idle_test/htest.py +++ b/Lib/idlelib/idle_test/htest.py @@ -93,15 +93,6 @@ "Double clicking on items prints a traceback for an exception " "that is ignored." } -ConfigExtensionsDialog_spec = { - 'file': 'configDialog', - 'kwds': {'title': 'Test Extension Configuration', - '_htest': True,}, - 'msg': "IDLE extensions dialog.\n" - "\n[Ok] to close the dialog.[Apply] to apply the settings and " - "and [Cancel] to revert all changes.\nRe-run the test to ensure " - "changes made have persisted." - } _color_delegator_spec = { 'file': 'ColorDelegator', @@ -121,7 +112,8 @@ "font face of the text in the area below it.\nIn the " "'Highlighting' tab, try different color schemes. Clicking " "items in the sample program should update the choices above it." - "\nIn the 'Keys' and 'General' tab, test settings of interest." + "\nIn the 'Keys', 'General' and 'Extensions' tabs, test settings" + "of interest." "\n[Ok] to close the dialog.[Apply] to apply the settings and " "and [Cancel] to revert all changes.\nRe-run the test to ensure " "changes made have persisted." -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 04:09:55 2015 From: python-checkins at python.org (terry.reedy) Date: Wed, 14 Oct 2015 02:09:55 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI0Nzgy?= =?utf-8?q?=3A_Finish_converting_the_Configure_Extension_dialog_into_a_new?= Message-ID: <20151014020954.3283.27325@psf.io> https://hg.python.org/cpython/rev/334dc1abc8af changeset: 98731:334dc1abc8af branch: 2.7 parent: 98707:4188cd5dc0c5 user: Terry Jan Reedy date: Tue Oct 13 22:03:44 2015 -0400 summary: Issue #24782: Finish converting the Configure Extension dialog into a new tab in the IDLE Preferences dialog. Code patch by Mark Roseman. files: Doc/library/idle.rst | 19 +- Lib/idlelib/Bindings.py | 1 - Lib/idlelib/EditorWindow.py | 6 - Lib/idlelib/configDialog.py | 394 +++++++++----------- Lib/idlelib/help.html | 22 +- Lib/idlelib/idle_test/htest.py | 12 +- 6 files changed, 200 insertions(+), 254 deletions(-) diff --git a/Doc/library/idle.rst b/Doc/library/idle.rst --- a/Doc/library/idle.rst +++ b/Doc/library/idle.rst @@ -252,17 +252,16 @@ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Configure IDLE - Open a configuration dialog. Fonts, indentation, keybindings, and color - themes may be altered. Startup Preferences may be set, and additional - help sources can be specified. Non-default user setting are saved in a - .idlerc directory in the user's home directory. Problems caused by bad user - configuration files are solved by editing or deleting one or more of the - files in .idlerc. On OS X, open the configuration dialog by selecting - Preferences in the application menu. + Open a configuration dialog and change preferences for the following: + fonts, indentation, keybindings, text color themes, startup windows and + size, additional help sources, and extensions (see below). On OS X, + open the configuration dialog by selecting Preferences in the application + menu. To use a new built-in color theme (IDLE Dark) with older IDLEs, + save it as a new custom theme. -Configure Extensions - Open a configuration dialog for setting preferences for extensions - (discussed below). See note above about the location of user settings. + Non-default user settings are saved in a .idlerc directory in the user's + home directory. Problems caused by bad user configuration files are solved + by editing or deleting one or more of the files in .idlerc. Code Context (toggle)(Editor Window only) Open a pane at the top of the edit window which shows the block context diff --git a/Lib/idlelib/Bindings.py b/Lib/idlelib/Bindings.py --- a/Lib/idlelib/Bindings.py +++ b/Lib/idlelib/Bindings.py @@ -76,7 +76,6 @@ ]), ('options', [ ('Configure _IDLE', '<>'), - ('Configure _Extensions', '<>'), None, ]), ('help', [ diff --git a/Lib/idlelib/EditorWindow.py b/Lib/idlelib/EditorWindow.py --- a/Lib/idlelib/EditorWindow.py +++ b/Lib/idlelib/EditorWindow.py @@ -218,8 +218,6 @@ text.bind("<>", self.python_docs) text.bind("<>", self.about_dialog) text.bind("<>", self.config_dialog) - text.bind("<>", - self.config_extensions_dialog) text.bind("<>", self.open_module) text.bind("<>", lambda event: "break") text.bind("<>", self.select_all) @@ -541,10 +539,6 @@ # Synchronize with macosxSupport.overrideRootMenu.config_dialog. configDialog.ConfigDialog(self.top,'Settings') - def config_extensions_dialog(self, event=None): - "Handle Options 'Configure Extensions' event." - configDialog.ConfigExtensionsDialog(self.top) - def help_dialog(self, event=None): "Handle Help 'IDLE Help' event." # Synchronize with macosxSupport.overrideRootMenu.help_dialog. diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -78,12 +78,14 @@ def CreateWidgets(self): self.tabPages = TabbedPageSet(self, - page_names=['Fonts/Tabs', 'Highlighting', 'Keys', 'General']) + page_names=['Fonts/Tabs', 'Highlighting', 'Keys', 'General', + 'Extensions']) self.tabPages.pack(side=TOP, expand=TRUE, fill=BOTH) self.CreatePageFontTab() self.CreatePageHighlight() self.CreatePageKeys() self.CreatePageGeneral() + self.CreatePageExtensions() self.create_action_buttons().pack(side=BOTTOM) def create_action_buttons(self): @@ -1109,6 +1111,7 @@ self.LoadKeyCfg() ### general page self.LoadGeneralCfg() + # note: extension page handled separately def SaveNewKeySet(self, keySetName, keySet): """ @@ -1162,6 +1165,7 @@ # save these even if unchanged! idleConf.userCfg[configType].Save() self.ResetChangedItems() #clear the changed items dict + self.save_all_changed_extensions() # uses a different mechanism def DeactivateCurrentConfig(self): #Before a config is saved, some cleanup of current @@ -1197,6 +1201,168 @@ view_text(self, title='Help for IDLE preferences', text=help_common+help_pages.get(page, '')) + def CreatePageExtensions(self): + """Part of the config dialog used for configuring IDLE extensions. + + This code is generic - it works for any and all IDLE extensions. + + IDLE extensions save their configuration options using idleConf. + This code reads the current configuration using idleConf, supplies a + GUI interface to change the configuration values, and saves the + changes using idleConf. + + Not all changes take effect immediately - some may require restarting IDLE. + This depends on each extension's implementation. + + All values are treated as text, and it is up to the user to supply + reasonable values. The only exception to this are the 'enable*' options, + which are boolean, and can be toggled with an True/False button. + """ + parent = self.parent + frame = self.tabPages.pages['Extensions'].frame + self.ext_defaultCfg = idleConf.defaultCfg['extensions'] + self.ext_userCfg = idleConf.userCfg['extensions'] + self.is_int = self.register(is_int) + self.load_extensions() + # create widgets - a listbox shows all available extensions, with the + # controls for the extension selected in the listbox to the right + self.extension_names = StringVar(self) + frame.rowconfigure(0, weight=1) + frame.columnconfigure(2, weight=1) + self.extension_list = Listbox(frame, listvariable=self.extension_names, + selectmode='browse') + self.extension_list.bind('<>', self.extension_selected) + scroll = Scrollbar(frame, command=self.extension_list.yview) + self.extension_list.yscrollcommand=scroll.set + self.details_frame = LabelFrame(frame, width=250, height=250) + self.extension_list.grid(column=0, row=0, sticky='nws') + scroll.grid(column=1, row=0, sticky='ns') + self.details_frame.grid(column=2, row=0, sticky='nsew', padx=[10, 0]) + frame.configure(padx=10, pady=10) + self.config_frame = {} + self.current_extension = None + + self.outerframe = self # TEMPORARY + self.tabbed_page_set = self.extension_list # TEMPORARY + + # create the frame holding controls for each extension + ext_names = '' + for ext_name in sorted(self.extensions): + self.create_extension_frame(ext_name) + ext_names = ext_names + '{' + ext_name + '} ' + self.extension_names.set(ext_names) + self.extension_list.selection_set(0) + self.extension_selected(None) + + def load_extensions(self): + "Fill self.extensions with data from the default and user configs." + self.extensions = {} + for ext_name in idleConf.GetExtensions(active_only=False): + self.extensions[ext_name] = [] + + for ext_name in self.extensions: + opt_list = sorted(self.ext_defaultCfg.GetOptionList(ext_name)) + + # bring 'enable' options to the beginning of the list + enables = [opt_name for opt_name in opt_list + if opt_name.startswith('enable')] + for opt_name in enables: + opt_list.remove(opt_name) + opt_list = enables + opt_list + + for opt_name in opt_list: + def_str = self.ext_defaultCfg.Get( + ext_name, opt_name, raw=True) + try: + def_obj = {'True':True, 'False':False}[def_str] + opt_type = 'bool' + except KeyError: + try: + def_obj = int(def_str) + opt_type = 'int' + except ValueError: + def_obj = def_str + opt_type = None + try: + value = self.ext_userCfg.Get( + ext_name, opt_name, type=opt_type, raw=True, + default=def_obj) + except ValueError: # Need this until .Get fixed + value = def_obj # bad values overwritten by entry + var = StringVar(self) + var.set(str(value)) + + self.extensions[ext_name].append({'name': opt_name, + 'type': opt_type, + 'default': def_str, + 'value': value, + 'var': var, + }) + + def extension_selected(self, event): + newsel = self.extension_list.curselection() + if newsel: + newsel = self.extension_list.get(newsel) + if newsel is None or newsel != self.current_extension: + if self.current_extension: + self.details_frame.config(text='') + self.config_frame[self.current_extension].grid_forget() + self.current_extension = None + if newsel: + self.details_frame.config(text=newsel) + self.config_frame[newsel].grid(column=0, row=0, sticky='nsew') + self.current_extension = newsel + + def create_extension_frame(self, ext_name): + """Create a frame holding the widgets to configure one extension""" + f = VerticalScrolledFrame(self.details_frame, height=250, width=250) + self.config_frame[ext_name] = f + entry_area = f.interior + # create an entry for each configuration option + for row, opt in enumerate(self.extensions[ext_name]): + # create a row with a label and entry/checkbutton + label = Label(entry_area, text=opt['name']) + label.grid(row=row, column=0, sticky=NW) + var = opt['var'] + if opt['type'] == 'bool': + Checkbutton(entry_area, textvariable=var, variable=var, + onvalue='True', offvalue='False', + indicatoron=FALSE, selectcolor='', width=8 + ).grid(row=row, column=1, sticky=W, padx=7) + elif opt['type'] == 'int': + Entry(entry_area, textvariable=var, validate='key', + validatecommand=(self.is_int, '%P') + ).grid(row=row, column=1, sticky=NSEW, padx=7) + + else: + Entry(entry_area, textvariable=var + ).grid(row=row, column=1, sticky=NSEW, padx=7) + return + + def set_extension_value(self, section, opt): + name = opt['name'] + default = opt['default'] + value = opt['var'].get().strip() or default + opt['var'].set(value) + # if self.defaultCfg.has_section(section): + # Currently, always true; if not, indent to return + if (value == default): + return self.ext_userCfg.RemoveOption(section, name) + # set the option + return self.ext_userCfg.SetOption(section, name, value) + + def save_all_changed_extensions(self): + """Save configuration changes to the user config file.""" + has_changes = False + for ext_name in self.extensions: + options = self.extensions[ext_name] + for opt in options: + if self.set_extension_value(ext_name, opt): + has_changes = True + if has_changes: + self.ext_userCfg.Save() + + help_common = '''\ When you click either the Apply or Ok buttons, settings in this dialog that are different from IDLE's default are saved in @@ -1215,6 +1381,17 @@ } +def is_int(s): + "Return 's is blank or represents an int'" + if not s: + return True + try: + int(s) + return True + except ValueError: + return False + + class VerticalScrolledFrame(Frame): """A pure Tkinter vertically scrollable frame. @@ -1257,221 +1434,6 @@ return -def is_int(s): - "Return 's is blank or represents an int'" - if not s: - return True - try: - int(s) - return True - except ValueError: - return False - -# TODO: -# * Revert to default(s)? Per option or per extension? -# * List options in their original order (possible??) -class ConfigExtensionsDialog(Toplevel): - """A dialog for configuring IDLE extensions. - - This dialog is generic - it works for any and all IDLE extensions. - - IDLE extensions save their configuration options using idleConf. - ConfigExtensionsDialog reads the current configuration using idleConf, - supplies a GUI interface to change the configuration values, and saves the - changes using idleConf. - - Not all changes take effect immediately - some may require restarting IDLE. - This depends on each extension's implementation. - - All values are treated as text, and it is up to the user to supply - reasonable values. The only exception to this are the 'enable*' options, - which are boolean, and can be toggled with an True/False button. - """ - def __init__(self, parent, title=None, _htest=False): - Toplevel.__init__(self, parent) - self.wm_withdraw() - - self.configure(borderwidth=5) - self.geometry( - "+%d+%d" % (parent.winfo_rootx() + 20, - parent.winfo_rooty() + (30 if not _htest else 150))) - self.wm_title(title or 'IDLE Extensions Configuration') - - self.defaultCfg = idleConf.defaultCfg['extensions'] - self.userCfg = idleConf.userCfg['extensions'] - self.is_int = self.register(is_int) - self.load_extensions() - self.create_widgets() - - self.resizable(height=FALSE, width=FALSE) # don't allow resizing yet - self.transient(parent) - self.protocol("WM_DELETE_WINDOW", self.Cancel) - self.tabbed_page_set.focus_set() - # wait for window to be generated - self.update() - # set current width as the minimum width - self.wm_minsize(self.winfo_width(), 1) - # now allow resizing - self.resizable(height=TRUE, width=TRUE) - - self.wm_deiconify() - if not _htest: - self.grab_set() - self.wait_window() - - def load_extensions(self): - "Fill self.extensions with data from the default and user configs." - self.extensions = {} - for ext_name in idleConf.GetExtensions(active_only=False): - self.extensions[ext_name] = [] - - for ext_name in self.extensions: - opt_list = sorted(self.defaultCfg.GetOptionList(ext_name)) - - # bring 'enable' options to the beginning of the list - enables = [opt_name for opt_name in opt_list - if opt_name.startswith('enable')] - for opt_name in enables: - opt_list.remove(opt_name) - opt_list = enables + opt_list - - for opt_name in opt_list: - def_str = self.defaultCfg.Get( - ext_name, opt_name, raw=True) - try: - def_obj = {'True':True, 'False':False}[def_str] - opt_type = 'bool' - except KeyError: - try: - def_obj = int(def_str) - opt_type = 'int' - except ValueError: - def_obj = def_str - opt_type = None - try: - value = self.userCfg.Get( - ext_name, opt_name, type=opt_type, raw=True, - default=def_obj) - except ValueError: # Need this until .Get fixed - value = def_obj # bad values overwritten by entry - var = StringVar(self) - var.set(str(value)) - - self.extensions[ext_name].append({'name': opt_name, - 'type': opt_type, - 'default': def_str, - 'value': value, - 'var': var, - }) - - def create_widgets(self): - """Create the dialog's widgets.""" - self.extension_names = StringVar(self) - self.rowconfigure(0, weight=1) - self.columnconfigure(2, weight=1) - self.extension_list = Listbox(self, listvariable=self.extension_names, - selectmode='browse') - self.extension_list.bind('<>', self.extension_selected) - scroll = Scrollbar(self, command=self.extension_list.yview) - self.extension_list.yscrollcommand=scroll.set - self.details_frame = LabelFrame(self, width=250, height=250) - self.extension_list.grid(column=0, row=0, sticky='nws') - scroll.grid(column=1, row=0, sticky='ns') - self.details_frame.grid(column=2, row=0, sticky='nsew', padx=[10, 0]) - self.configure(padx=10, pady=10) - self.config_frame = {} - self.current_extension = None - - self.outerframe = self # TEMPORARY - self.tabbed_page_set = self.extension_list # TEMPORARY - - # create the individual pages - ext_names = '' - for ext_name in sorted(self.extensions): - self.create_extension_frame(ext_name) - ext_names = ext_names + '{' + ext_name + '} ' - self.extension_names.set(ext_names) - self.extension_list.selection_set(0) - self.extension_selected(None) - self.create_action_buttons().grid(row=1, columnspan=3) - - def extension_selected(self, event): - newsel = self.extension_list.curselection() - if newsel: - newsel = self.extension_list.get(newsel) - if newsel is None or newsel != self.current_extension: - if self.current_extension: - self.details_frame.config(text='') - self.config_frame[self.current_extension].grid_forget() - self.current_extension = None - if newsel: - self.details_frame.config(text=newsel) - self.config_frame[newsel].grid(column=0, row=0, sticky='nsew') - self.current_extension = newsel - - create_action_buttons = ConfigDialog.create_action_buttons.im_func - - def create_extension_frame(self, ext_name): - """Create a frame holding the widgets to configure one extension""" - f = VerticalScrolledFrame(self.details_frame, height=250, width=250) - self.config_frame[ext_name] = f - entry_area = f.interior - # create an entry for each configuration option - for row, opt in enumerate(self.extensions[ext_name]): - # create a row with a label and entry/checkbutton - label = Label(entry_area, text=opt['name']) - label.grid(row=row, column=0, sticky=NW) - var = opt['var'] - if opt['type'] == 'bool': - Checkbutton(entry_area, textvariable=var, variable=var, - onvalue='True', offvalue='False', - indicatoron=FALSE, selectcolor='', width=8 - ).grid(row=row, column=1, sticky=W, padx=7) - elif opt['type'] == 'int': - Entry(entry_area, textvariable=var, validate='key', - validatecommand=(self.is_int, '%P') - ).grid(row=row, column=1, sticky=NSEW, padx=7) - - else: - Entry(entry_area, textvariable=var - ).grid(row=row, column=1, sticky=NSEW, padx=7) - return - - - Ok = ConfigDialog.Ok.im_func - - def Apply(self): - self.save_all_changed_configs() - pass - - Cancel = ConfigDialog.Cancel.im_func - - def Help(self): - pass - - def set_user_value(self, section, opt): - name = opt['name'] - default = opt['default'] - value = opt['var'].get().strip() or default - opt['var'].set(value) - # if self.defaultCfg.has_section(section): - # Currently, always true; if not, indent to return - if (value == default): - return self.userCfg.RemoveOption(section, name) - # set the option - return self.userCfg.SetOption(section, name, value) - - def save_all_changed_configs(self): - """Save configuration changes to the user config file.""" - has_changes = False - for ext_name in self.extensions: - options = self.extensions[ext_name] - for opt in options: - if self.set_user_value(ext_name, opt): - has_changes = True - if has_changes: - self.userCfg.Save() - if __name__ == '__main__': import unittest diff --git a/Lib/idlelib/help.html b/Lib/idlelib/help.html --- a/Lib/idlelib/help.html +++ b/Lib/idlelib/help.html @@ -266,16 +266,16 @@

          24.6.1.7. Options menu (Shell and Editor)?

          Configure IDLE
          -
          Open a configuration dialog. Fonts, indentation, keybindings, and color -themes may be altered. Startup Preferences may be set, and additional -help sources can be specified. Non-default user setting are saved in a -.idlerc directory in the user’s home directory. Problems caused by bad user -configuration files are solved by editing or deleting one or more of the -files in .idlerc. On OS X, open the configuration dialog by selecting -Preferences in the application menu.
          -
          Configure Extensions
          -
          Open a configuration dialog for setting preferences for extensions -(discussed below). See note above about the location of user settings.
          +

          Open a configuration dialog and change preferences for the following: +fonts, indentation, keybindings, text color themes, startup windows and +size, additional help sources, and extensions (see below). On OS X, +open the configuration dialog by selecting Preferences in the application +menu. To use a new built-in color theme (IDLE Dark) with older IDLEs, +save it as a new custom theme.

          +

          Non-default user settings are saved in a .idlerc directory in the user’s +home directory. Problems caused by bad user configuration files are solved +by editing or deleting one or more of the files in .idlerc.

          +
          Code Context (toggle)(Editor Window only)
          Open a pane at the top of the edit window which shows the block context of the code which has scrolled above the top of the window.
          @@ -699,7 +699,7 @@ The Python Software Foundation is a non-profit corporation. Please donate.
          - Last updated on Oct 02, 2015. + Last updated on Oct 13, 2015. Found a bug?
          Created using Sphinx 1.2.3. diff --git a/Lib/idlelib/idle_test/htest.py b/Lib/idlelib/idle_test/htest.py --- a/Lib/idlelib/idle_test/htest.py +++ b/Lib/idlelib/idle_test/htest.py @@ -93,15 +93,6 @@ "Double clicking on items prints a traceback for an exception " "that is ignored." } -ConfigExtensionsDialog_spec = { - 'file': 'configDialog', - 'kwds': {'title': 'Test Extension Configuration', - '_htest': True,}, - 'msg': "IDLE extensions dialog.\n" - "\n[Ok] to close the dialog.[Apply] to apply the settings and " - "and [Cancel] to revert all changes.\nRe-run the test to ensure " - "changes made have persisted." - } _color_delegator_spec = { 'file': 'ColorDelegator', @@ -121,7 +112,8 @@ "font face of the text in the area below it.\nIn the " "'Highlighting' tab, try different color schemes. Clicking " "items in the sample program should update the choices above it." - "\nIn the 'Keys' and 'General' tab, test settings of interest." + "\nIn the 'Keys', 'General' and 'Extensions' tabs, test settings" + "of interest." "\n[Ok] to close the dialog.[Apply] to apply the settings and " "and [Cancel] to revert all changes.\nRe-run the test to ensure " "changes made have persisted." -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 04:09:57 2015 From: python-checkins at python.org (terry.reedy) Date: Wed, 14 Oct 2015 02:09:57 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Merge_with_3=2E4?= Message-ID: <20151014020955.55462.17881@psf.io> https://hg.python.org/cpython/rev/4423e5022378 changeset: 98737:4423e5022378 branch: 3.5 parent: 98733:96645f7cd88c parent: 98736:4ed0cc2b7c7c user: Terry Jan Reedy date: Tue Oct 13 22:09:19 2015 -0400 summary: Merge with 3.4 files: Lib/idlelib/configDialog.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -1190,7 +1190,7 @@ This code is generic - it works for any and all IDLE extensions. IDLE extensions save their configuration options using idleConf. - This code reads the current configuration using idleConf, supplies a + This code reads the current configuration using idleConf, supplies a GUI interface to change the configuration values, and saves the changes using idleConf. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 04:09:57 2015 From: python-checkins at python.org (terry.reedy) Date: Wed, 14 Oct 2015 02:09:57 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI0Nzgy?= =?utf-8?q?=3A_whitespace?= Message-ID: <20151014020955.128830.99798@psf.io> https://hg.python.org/cpython/rev/6bce28fec911 changeset: 98735:6bce28fec911 branch: 2.7 parent: 98731:334dc1abc8af user: Terry Jan Reedy date: Tue Oct 13 22:08:45 2015 -0400 summary: Issue #24782: whitespace files: Lib/idlelib/configDialog.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -1207,7 +1207,7 @@ This code is generic - it works for any and all IDLE extensions. IDLE extensions save their configuration options using idleConf. - This code reads the current configuration using idleConf, supplies a + This code reads the current configuration using idleConf, supplies a GUI interface to change the configuration values, and saves the changes using idleConf. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 04:09:58 2015 From: python-checkins at python.org (terry.reedy) Date: Wed, 14 Oct 2015 02:09:58 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Merge_with_3=2E4?= Message-ID: <20151014020955.55462.38451@psf.io> https://hg.python.org/cpython/rev/96645f7cd88c changeset: 98733:96645f7cd88c branch: 3.5 parent: 98725:4115eabc3a6d parent: 98732:5647c61fb593 user: Terry Jan Reedy date: Tue Oct 13 22:04:07 2015 -0400 summary: Merge with 3.4 files: Doc/library/idle.rst | 19 +- Lib/idlelib/Bindings.py | 1 - Lib/idlelib/EditorWindow.py | 6 - Lib/idlelib/configDialog.py | 394 +++++++++----------- Lib/idlelib/help.html | 22 +- Lib/idlelib/idle_test/htest.py | 12 +- 6 files changed, 200 insertions(+), 254 deletions(-) diff --git a/Doc/library/idle.rst b/Doc/library/idle.rst --- a/Doc/library/idle.rst +++ b/Doc/library/idle.rst @@ -252,17 +252,16 @@ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Configure IDLE - Open a configuration dialog. Fonts, indentation, keybindings, and color - themes may be altered. Startup Preferences may be set, and additional - help sources can be specified. Non-default user setting are saved in a - .idlerc directory in the user's home directory. Problems caused by bad user - configuration files are solved by editing or deleting one or more of the - files in .idlerc. On OS X, open the configuration dialog by selecting - Preferences in the application menu. + Open a configuration dialog and change preferences for the following: + fonts, indentation, keybindings, text color themes, startup windows and + size, additional help sources, and extensions (see below). On OS X, + open the configuration dialog by selecting Preferences in the application + menu. To use a new built-in color theme (IDLE Dark) with older IDLEs, + save it as a new custom theme. -Configure Extensions - Open a configuration dialog for setting preferences for extensions - (discussed below). See note above about the location of user settings. + Non-default user settings are saved in a .idlerc directory in the user's + home directory. Problems caused by bad user configuration files are solved + by editing or deleting one or more of the files in .idlerc. Code Context (toggle)(Editor Window only) Open a pane at the top of the edit window which shows the block context diff --git a/Lib/idlelib/Bindings.py b/Lib/idlelib/Bindings.py --- a/Lib/idlelib/Bindings.py +++ b/Lib/idlelib/Bindings.py @@ -78,7 +78,6 @@ ]), ('options', [ ('Configure _IDLE', '<>'), - ('Configure _Extensions', '<>'), None, ]), ('help', [ diff --git a/Lib/idlelib/EditorWindow.py b/Lib/idlelib/EditorWindow.py --- a/Lib/idlelib/EditorWindow.py +++ b/Lib/idlelib/EditorWindow.py @@ -191,8 +191,6 @@ text.bind("<>", self.python_docs) text.bind("<>", self.about_dialog) text.bind("<>", self.config_dialog) - text.bind("<>", - self.config_extensions_dialog) text.bind("<>", self.open_module) text.bind("<>", lambda event: "break") text.bind("<>", self.select_all) @@ -514,10 +512,6 @@ # Synchronize with macosxSupport.overrideRootMenu.config_dialog. configDialog.ConfigDialog(self.top,'Settings') - def config_extensions_dialog(self, event=None): - "Handle Options 'Configure Extensions' event." - configDialog.ConfigExtensionsDialog(self.top) - def help_dialog(self, event=None): "Handle Help 'IDLE Help' event." # Synchronize with macosxSupport.overrideRootMenu.help_dialog. diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py --- a/Lib/idlelib/configDialog.py +++ b/Lib/idlelib/configDialog.py @@ -80,12 +80,14 @@ def CreateWidgets(self): self.tabPages = TabbedPageSet(self, - page_names=['Fonts/Tabs', 'Highlighting', 'Keys', 'General']) + page_names=['Fonts/Tabs', 'Highlighting', 'Keys', 'General', + 'Extensions']) self.tabPages.pack(side=TOP, expand=TRUE, fill=BOTH) self.CreatePageFontTab() self.CreatePageHighlight() self.CreatePageKeys() self.CreatePageGeneral() + self.CreatePageExtensions() self.create_action_buttons().pack(side=BOTTOM) def create_action_buttons(self): @@ -1092,6 +1094,7 @@ self.LoadKeyCfg() ### general page self.LoadGeneralCfg() + # note: extension page handled separately def SaveNewKeySet(self, keySetName, keySet): """ @@ -1145,6 +1148,7 @@ # save these even if unchanged! idleConf.userCfg[configType].Save() self.ResetChangedItems() #clear the changed items dict + self.save_all_changed_extensions() # uses a different mechanism def DeactivateCurrentConfig(self): #Before a config is saved, some cleanup of current @@ -1180,6 +1184,168 @@ view_text(self, title='Help for IDLE preferences', text=help_common+help_pages.get(page, '')) + def CreatePageExtensions(self): + """Part of the config dialog used for configuring IDLE extensions. + + This code is generic - it works for any and all IDLE extensions. + + IDLE extensions save their configuration options using idleConf. + This code reads the current configuration using idleConf, supplies a + GUI interface to change the configuration values, and saves the + changes using idleConf. + + Not all changes take effect immediately - some may require restarting IDLE. + This depends on each extension's implementation. + + All values are treated as text, and it is up to the user to supply + reasonable values. The only exception to this are the 'enable*' options, + which are boolean, and can be toggled with an True/False button. + """ + parent = self.parent + frame = self.tabPages.pages['Extensions'].frame + self.ext_defaultCfg = idleConf.defaultCfg['extensions'] + self.ext_userCfg = idleConf.userCfg['extensions'] + self.is_int = self.register(is_int) + self.load_extensions() + # create widgets - a listbox shows all available extensions, with the + # controls for the extension selected in the listbox to the right + self.extension_names = StringVar(self) + frame.rowconfigure(0, weight=1) + frame.columnconfigure(2, weight=1) + self.extension_list = Listbox(frame, listvariable=self.extension_names, + selectmode='browse') + self.extension_list.bind('<>', self.extension_selected) + scroll = Scrollbar(frame, command=self.extension_list.yview) + self.extension_list.yscrollcommand=scroll.set + self.details_frame = LabelFrame(frame, width=250, height=250) + self.extension_list.grid(column=0, row=0, sticky='nws') + scroll.grid(column=1, row=0, sticky='ns') + self.details_frame.grid(column=2, row=0, sticky='nsew', padx=[10, 0]) + frame.configure(padx=10, pady=10) + self.config_frame = {} + self.current_extension = None + + self.outerframe = self # TEMPORARY + self.tabbed_page_set = self.extension_list # TEMPORARY + + # create the frame holding controls for each extension + ext_names = '' + for ext_name in sorted(self.extensions): + self.create_extension_frame(ext_name) + ext_names = ext_names + '{' + ext_name + '} ' + self.extension_names.set(ext_names) + self.extension_list.selection_set(0) + self.extension_selected(None) + + def load_extensions(self): + "Fill self.extensions with data from the default and user configs." + self.extensions = {} + for ext_name in idleConf.GetExtensions(active_only=False): + self.extensions[ext_name] = [] + + for ext_name in self.extensions: + opt_list = sorted(self.ext_defaultCfg.GetOptionList(ext_name)) + + # bring 'enable' options to the beginning of the list + enables = [opt_name for opt_name in opt_list + if opt_name.startswith('enable')] + for opt_name in enables: + opt_list.remove(opt_name) + opt_list = enables + opt_list + + for opt_name in opt_list: + def_str = self.ext_defaultCfg.Get( + ext_name, opt_name, raw=True) + try: + def_obj = {'True':True, 'False':False}[def_str] + opt_type = 'bool' + except KeyError: + try: + def_obj = int(def_str) + opt_type = 'int' + except ValueError: + def_obj = def_str + opt_type = None + try: + value = self.ext_userCfg.Get( + ext_name, opt_name, type=opt_type, raw=True, + default=def_obj) + except ValueError: # Need this until .Get fixed + value = def_obj # bad values overwritten by entry + var = StringVar(self) + var.set(str(value)) + + self.extensions[ext_name].append({'name': opt_name, + 'type': opt_type, + 'default': def_str, + 'value': value, + 'var': var, + }) + + def extension_selected(self, event): + newsel = self.extension_list.curselection() + if newsel: + newsel = self.extension_list.get(newsel) + if newsel is None or newsel != self.current_extension: + if self.current_extension: + self.details_frame.config(text='') + self.config_frame[self.current_extension].grid_forget() + self.current_extension = None + if newsel: + self.details_frame.config(text=newsel) + self.config_frame[newsel].grid(column=0, row=0, sticky='nsew') + self.current_extension = newsel + + def create_extension_frame(self, ext_name): + """Create a frame holding the widgets to configure one extension""" + f = VerticalScrolledFrame(self.details_frame, height=250, width=250) + self.config_frame[ext_name] = f + entry_area = f.interior + # create an entry for each configuration option + for row, opt in enumerate(self.extensions[ext_name]): + # create a row with a label and entry/checkbutton + label = Label(entry_area, text=opt['name']) + label.grid(row=row, column=0, sticky=NW) + var = opt['var'] + if opt['type'] == 'bool': + Checkbutton(entry_area, textvariable=var, variable=var, + onvalue='True', offvalue='False', + indicatoron=FALSE, selectcolor='', width=8 + ).grid(row=row, column=1, sticky=W, padx=7) + elif opt['type'] == 'int': + Entry(entry_area, textvariable=var, validate='key', + validatecommand=(self.is_int, '%P') + ).grid(row=row, column=1, sticky=NSEW, padx=7) + + else: + Entry(entry_area, textvariable=var + ).grid(row=row, column=1, sticky=NSEW, padx=7) + return + + def set_extension_value(self, section, opt): + name = opt['name'] + default = opt['default'] + value = opt['var'].get().strip() or default + opt['var'].set(value) + # if self.defaultCfg.has_section(section): + # Currently, always true; if not, indent to return + if (value == default): + return self.ext_userCfg.RemoveOption(section, name) + # set the option + return self.ext_userCfg.SetOption(section, name, value) + + def save_all_changed_extensions(self): + """Save configuration changes to the user config file.""" + has_changes = False + for ext_name in self.extensions: + options = self.extensions[ext_name] + for opt in options: + if self.set_extension_value(ext_name, opt): + has_changes = True + if has_changes: + self.ext_userCfg.Save() + + help_common = '''\ When you click either the Apply or Ok buttons, settings in this dialog that are different from IDLE's default are saved in @@ -1198,6 +1364,17 @@ } +def is_int(s): + "Return 's is blank or represents an int'" + if not s: + return True + try: + int(s) + return True + except ValueError: + return False + + class VerticalScrolledFrame(Frame): """A pure Tkinter vertically scrollable frame. @@ -1240,221 +1417,6 @@ return -def is_int(s): - "Return 's is blank or represents an int'" - if not s: - return True - try: - int(s) - return True - except ValueError: - return False - -# TODO: -# * Revert to default(s)? Per option or per extension? -# * List options in their original order (possible??) -class ConfigExtensionsDialog(Toplevel): - """A dialog for configuring IDLE extensions. - - This dialog is generic - it works for any and all IDLE extensions. - - IDLE extensions save their configuration options using idleConf. - ConfigExtensionsDialog reads the current configuration using idleConf, - supplies a GUI interface to change the configuration values, and saves the - changes using idleConf. - - Not all changes take effect immediately - some may require restarting IDLE. - This depends on each extension's implementation. - - All values are treated as text, and it is up to the user to supply - reasonable values. The only exception to this are the 'enable*' options, - which are boolean, and can be toggled with an True/False button. - """ - def __init__(self, parent, title=None, _htest=False): - Toplevel.__init__(self, parent) - self.wm_withdraw() - - self.configure(borderwidth=5) - self.geometry( - "+%d+%d" % (parent.winfo_rootx() + 20, - parent.winfo_rooty() + (30 if not _htest else 150))) - self.wm_title(title or 'IDLE Extensions Configuration') - - self.defaultCfg = idleConf.defaultCfg['extensions'] - self.userCfg = idleConf.userCfg['extensions'] - self.is_int = self.register(is_int) - self.load_extensions() - self.create_widgets() - - self.resizable(height=FALSE, width=FALSE) # don't allow resizing yet - self.transient(parent) - self.protocol("WM_DELETE_WINDOW", self.Cancel) - self.tabbed_page_set.focus_set() - # wait for window to be generated - self.update() - # set current width as the minimum width - self.wm_minsize(self.winfo_width(), 1) - # now allow resizing - self.resizable(height=TRUE, width=TRUE) - - self.wm_deiconify() - if not _htest: - self.grab_set() - self.wait_window() - - def load_extensions(self): - "Fill self.extensions with data from the default and user configs." - self.extensions = {} - for ext_name in idleConf.GetExtensions(active_only=False): - self.extensions[ext_name] = [] - - for ext_name in self.extensions: - opt_list = sorted(self.defaultCfg.GetOptionList(ext_name)) - - # bring 'enable' options to the beginning of the list - enables = [opt_name for opt_name in opt_list - if opt_name.startswith('enable')] - for opt_name in enables: - opt_list.remove(opt_name) - opt_list = enables + opt_list - - for opt_name in opt_list: - def_str = self.defaultCfg.Get( - ext_name, opt_name, raw=True) - try: - def_obj = {'True':True, 'False':False}[def_str] - opt_type = 'bool' - except KeyError: - try: - def_obj = int(def_str) - opt_type = 'int' - except ValueError: - def_obj = def_str - opt_type = None - try: - value = self.userCfg.Get( - ext_name, opt_name, type=opt_type, raw=True, - default=def_obj) - except ValueError: # Need this until .Get fixed - value = def_obj # bad values overwritten by entry - var = StringVar(self) - var.set(str(value)) - - self.extensions[ext_name].append({'name': opt_name, - 'type': opt_type, - 'default': def_str, - 'value': value, - 'var': var, - }) - - def create_widgets(self): - """Create the dialog's widgets.""" - self.extension_names = StringVar(self) - self.rowconfigure(0, weight=1) - self.columnconfigure(2, weight=1) - self.extension_list = Listbox(self, listvariable=self.extension_names, - selectmode='browse') - self.extension_list.bind('<>', self.extension_selected) - scroll = Scrollbar(self, command=self.extension_list.yview) - self.extension_list.yscrollcommand=scroll.set - self.details_frame = LabelFrame(self, width=250, height=250) - self.extension_list.grid(column=0, row=0, sticky='nws') - scroll.grid(column=1, row=0, sticky='ns') - self.details_frame.grid(column=2, row=0, sticky='nsew', padx=[10, 0]) - self.configure(padx=10, pady=10) - self.config_frame = {} - self.current_extension = None - - self.outerframe = self # TEMPORARY - self.tabbed_page_set = self.extension_list # TEMPORARY - - # create the individual pages - ext_names = '' - for ext_name in sorted(self.extensions): - self.create_extension_frame(ext_name) - ext_names = ext_names + '{' + ext_name + '} ' - self.extension_names.set(ext_names) - self.extension_list.selection_set(0) - self.extension_selected(None) - self.create_action_buttons().grid(row=1, columnspan=3) - - def extension_selected(self, event): - newsel = self.extension_list.curselection() - if newsel: - newsel = self.extension_list.get(newsel) - if newsel is None or newsel != self.current_extension: - if self.current_extension: - self.details_frame.config(text='') - self.config_frame[self.current_extension].grid_forget() - self.current_extension = None - if newsel: - self.details_frame.config(text=newsel) - self.config_frame[newsel].grid(column=0, row=0, sticky='nsew') - self.current_extension = newsel - - create_action_buttons = ConfigDialog.create_action_buttons - - def create_extension_frame(self, ext_name): - """Create a frame holding the widgets to configure one extension""" - f = VerticalScrolledFrame(self.details_frame, height=250, width=250) - self.config_frame[ext_name] = f - entry_area = f.interior - # create an entry for each configuration option - for row, opt in enumerate(self.extensions[ext_name]): - # create a row with a label and entry/checkbutton - label = Label(entry_area, text=opt['name']) - label.grid(row=row, column=0, sticky=NW) - var = opt['var'] - if opt['type'] == 'bool': - Checkbutton(entry_area, textvariable=var, variable=var, - onvalue='True', offvalue='False', - indicatoron=FALSE, selectcolor='', width=8 - ).grid(row=row, column=1, sticky=W, padx=7) - elif opt['type'] == 'int': - Entry(entry_area, textvariable=var, validate='key', - validatecommand=(self.is_int, '%P') - ).grid(row=row, column=1, sticky=NSEW, padx=7) - - else: - Entry(entry_area, textvariable=var - ).grid(row=row, column=1, sticky=NSEW, padx=7) - return - - - Ok = ConfigDialog.Ok - - def Apply(self): - self.save_all_changed_configs() - pass - - Cancel = ConfigDialog.Cancel - - def Help(self): - pass - - def set_user_value(self, section, opt): - name = opt['name'] - default = opt['default'] - value = opt['var'].get().strip() or default - opt['var'].set(value) - # if self.defaultCfg.has_section(section): - # Currently, always true; if not, indent to return - if (value == default): - return self.userCfg.RemoveOption(section, name) - # set the option - return self.userCfg.SetOption(section, name, value) - - def save_all_changed_configs(self): - """Save configuration changes to the user config file.""" - has_changes = False - for ext_name in self.extensions: - options = self.extensions[ext_name] - for opt in options: - if self.set_user_value(ext_name, opt): - has_changes = True - if has_changes: - self.userCfg.Save() - if __name__ == '__main__': import unittest diff --git a/Lib/idlelib/help.html b/Lib/idlelib/help.html --- a/Lib/idlelib/help.html +++ b/Lib/idlelib/help.html @@ -266,16 +266,16 @@

          25.5.1.7. Options menu (Shell and Editor)?

          Configure IDLE
          -
          Open a configuration dialog. Fonts, indentation, keybindings, and color -themes may be altered. Startup Preferences may be set, and additional -help sources can be specified. Non-default user setting are saved in a -.idlerc directory in the user’s home directory. Problems caused by bad user -configuration files are solved by editing or deleting one or more of the -files in .idlerc. On OS X, open the configuration dialog by selecting -Preferences in the application menu.
          -
          Configure Extensions
          -
          Open a configuration dialog for setting preferences for extensions -(discussed below). See note above about the location of user settings.
          +

          Open a configuration dialog and change preferences for the following: +fonts, indentation, keybindings, text color themes, startup windows and +size, additional help sources, and extensions (see below). On OS X, +open the configuration dialog by selecting Preferences in the application +menu. To use a new built-in color theme (IDLE Dark) with older IDLEs, +save it as a new custom theme.

          +

          Non-default user settings are saved in a .idlerc directory in the user’s +home directory. Problems caused by bad user configuration files are solved +by editing or deleting one or more of the files in .idlerc.

          +
          Code Context (toggle)(Editor Window only)
          Open a pane at the top of the edit window which shows the block context of the code which has scrolled above the top of the window.
          @@ -699,7 +699,7 @@ The Python Software Foundation is a non-profit corporation. Please donate.
          - Last updated on Oct 02, 2015. + Last updated on Oct 13, 2015. Found a bug?
          Created using Sphinx 1.2.3. diff --git a/Lib/idlelib/idle_test/htest.py b/Lib/idlelib/idle_test/htest.py --- a/Lib/idlelib/idle_test/htest.py +++ b/Lib/idlelib/idle_test/htest.py @@ -93,15 +93,6 @@ "Double clicking on items prints a traceback for an exception " "that is ignored." } -ConfigExtensionsDialog_spec = { - 'file': 'configDialog', - 'kwds': {'title': 'Test Extension Configuration', - '_htest': True,}, - 'msg': "IDLE extensions dialog.\n" - "\n[Ok] to close the dialog.[Apply] to apply the settings and " - "and [Cancel] to revert all changes.\nRe-run the test to ensure " - "changes made have persisted." - } _color_delegator_spec = { 'file': 'ColorDelegator', @@ -121,7 +112,8 @@ "font face of the text in the area below it.\nIn the " "'Highlighting' tab, try different color schemes. Clicking " "items in the sample program should update the choices above it." - "\nIn the 'Keys' and 'General' tab, test settings of interest." + "\nIn the 'Keys', 'General' and 'Extensions' tabs, test settings" + "of interest." "\n[Ok] to close the dialog.[Apply] to apply the settings and " "and [Cancel] to revert all changes.\nRe-run the test to ensure " "changes made have persisted." -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 05:47:11 2015 From: python-checkins at python.org (senthil.kumaran) Date: Wed, 14 Oct 2015 03:47:11 +0000 Subject: [Python-checkins] =?utf-8?q?test=3A_Test_hg_write_with_new_comput?= =?utf-8?q?er_and_keys=2E?= Message-ID: <20151014034710.18376.83104@psf.io> https://hg.python.org/test/rev/0aa4a5dfda0f changeset: 227:0aa4a5dfda0f user: Senthil Kumaran date: Tue Oct 13 20:46:44 2015 -0700 summary: Test hg write with new computer and keys. files: quote | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diff --git a/quote b/quote --- a/quote +++ b/quote @@ -6,3 +6,6 @@ The details of that silly code are irrelevant. -- Tim Peters, 4 Mar 1992 + +"Little else matters than to write good code." +-- Karl Lehenbauer -- Repository URL: https://hg.python.org/test From python-checkins at python.org Wed Oct 14 10:02:07 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 08:02:07 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Fix_long=5Fformat=5Fbinary?= =?utf-8?b?KCk=?= Message-ID: <20151014080207.453.76669@psf.io> https://hg.python.org/cpython/rev/03646293f1b3 changeset: 98739:03646293f1b3 user: Victor Stinner date: Wed Oct 14 09:47:23 2015 +0200 summary: Fix long_format_binary() Issue #25399: Fix long_format_binary(), allocate bytes for the bytes writer. files: Objects/longobject.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Objects/longobject.c b/Objects/longobject.c --- a/Objects/longobject.c +++ b/Objects/longobject.c @@ -1836,7 +1836,7 @@ kind = writer->kind; v = NULL; } - else if (writer) { + else if (bytes_writer) { *bytes_str = _PyBytesWriter_Prepare(bytes_writer, *bytes_str, sz); if (*bytes_str == NULL) return -1; -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 10:02:08 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 08:02:08 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Optimize_bytearray_=25_arg?= =?utf-8?q?s?= Message-ID: <20151014080207.20761.68088@psf.io> https://hg.python.org/cpython/rev/f369b79c0153 changeset: 98741:f369b79c0153 user: Victor Stinner date: Wed Oct 14 09:56:53 2015 +0200 summary: Optimize bytearray % args Issue #25399: Don't create temporary bytes objects: modify _PyBytes_Format() to create work directly on bytearray objects. * Rename _PyBytes_Format() to _PyBytes_FormatEx() just in case if something outside CPython uses it * _PyBytes_FormatEx() now uses (char*, Py_ssize_t) for the input string, so bytearray_format() doesn't need tot create a temporary input bytes object * Add use_bytearray parameter to _PyBytes_FormatEx() which is passed to _PyBytesWriter, to create a bytearray buffer instead of a bytes buffer Most formatting operations are now between 2.5 and 5 times faster. files: Include/bytesobject.h | 6 +++- Objects/bytearrayobject.c | 22 +++----------- Objects/bytesobject.c | 41 +++++++++++++++----------- 3 files changed, 33 insertions(+), 36 deletions(-) diff --git a/Include/bytesobject.h b/Include/bytesobject.h --- a/Include/bytesobject.h +++ b/Include/bytesobject.h @@ -62,7 +62,11 @@ PyAPI_FUNC(void) PyBytes_ConcatAndDel(PyObject **, PyObject *); #ifndef Py_LIMITED_API PyAPI_FUNC(int) _PyBytes_Resize(PyObject **, Py_ssize_t); -PyAPI_FUNC(PyObject *) _PyBytes_Format(PyObject *, PyObject *); +PyAPI_FUNC(PyObject*) _PyBytes_FormatEx( + const char *format, + Py_ssize_t format_len, + PyObject *args, + int use_bytearray); #endif PyAPI_FUNC(PyObject *) PyBytes_DecodeEscape(const char *, Py_ssize_t, const char *, Py_ssize_t, diff --git a/Objects/bytearrayobject.c b/Objects/bytearrayobject.c --- a/Objects/bytearrayobject.c +++ b/Objects/bytearrayobject.c @@ -282,26 +282,14 @@ static PyObject * bytearray_format(PyByteArrayObject *self, PyObject *args) { - PyObject *bytes_in, *bytes_out, *res; - char *bytestring; - - if (self == NULL || !PyByteArray_Check(self) || args == NULL) { + if (self == NULL || !PyByteArray_Check(self)) { PyErr_BadInternalCall(); return NULL; } - bytestring = PyByteArray_AS_STRING(self); - bytes_in = PyBytes_FromString(bytestring); - if (bytes_in == NULL) - return NULL; - bytes_out = _PyBytes_Format(bytes_in, args); - Py_DECREF(bytes_in); - if (bytes_out == NULL) - return NULL; - res = PyByteArray_FromObject(bytes_out); - Py_DECREF(bytes_out); - if (res == NULL) - return NULL; - return res; + + return _PyBytes_FormatEx(PyByteArray_AS_STRING(self), + PyByteArray_GET_SIZE(self), + args, 1); } /* Functions stuffed into the type object */ diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -568,28 +568,32 @@ /* fmt%(v1,v2,...) is roughly equivalent to sprintf(fmt, v1, v2, ...) */ PyObject * -_PyBytes_Format(PyObject *format, PyObject *args) +_PyBytes_FormatEx(const char *format, Py_ssize_t format_len, + PyObject *args, int use_bytearray) { - char *fmt, *res; + const char *fmt; + char *res; Py_ssize_t arglen, argidx; Py_ssize_t fmtcnt; int args_owned = 0; PyObject *dict = NULL; _PyBytesWriter writer; - if (format == NULL || !PyBytes_Check(format) || args == NULL) { + if (args == NULL) { PyErr_BadInternalCall(); return NULL; } - fmt = PyBytes_AS_STRING(format); - fmtcnt = PyBytes_GET_SIZE(format); + fmt = format; + fmtcnt = format_len; _PyBytesWriter_Init(&writer); + writer.use_bytearray = use_bytearray; res = _PyBytesWriter_Alloc(&writer, fmtcnt); if (res == NULL) return NULL; - writer.overallocate = 1; + if (!use_bytearray) + writer.overallocate = 1; if (PyTuple_Check(args)) { arglen = PyTuple_GET_SIZE(args); @@ -613,10 +617,8 @@ pos = strchr(fmt + 1, '%'); if (pos != NULL) len = pos - fmt; - else { - len = PyBytes_GET_SIZE(format); - len -= (fmt - PyBytes_AS_STRING(format)); - } + else + len = format_len - (fmt - format); assert(len != 0); Py_MEMCPY(res, fmt, len); @@ -644,7 +646,7 @@ fmt++; if (*fmt == '(') { - char *keystart; + const char *keystart; Py_ssize_t keylen; PyObject *key; int pcount = 1; @@ -924,8 +926,7 @@ "unsupported format character '%c' (0x%x) " "at index %zd", c, c, - (Py_ssize_t)(fmt - 1 - - PyBytes_AsString(format))); + (Py_ssize_t)(fmt - 1 - format)); goto error; } @@ -1028,7 +1029,7 @@ /* If overallocation was disabled, ensure that it was the last write. Otherwise, we missed an optimization */ - assert(writer.overallocate || fmtcnt < 0); + assert(writer.overallocate || fmtcnt < 0 || use_bytearray); } /* until end */ if (argidx < arglen && !dict) { @@ -3233,11 +3234,15 @@ }; static PyObject * -bytes_mod(PyObject *v, PyObject *w) +bytes_mod(PyObject *self, PyObject *args) { - if (!PyBytes_Check(v)) - Py_RETURN_NOTIMPLEMENTED; - return _PyBytes_Format(v, w); + if (self == NULL || !PyBytes_Check(self)) { + PyErr_BadInternalCall(); + return NULL; + } + + return _PyBytes_FormatEx(PyBytes_AS_STRING(self), PyBytes_GET_SIZE(self), + args, 0); } static PyNumberMethods bytes_as_number = { -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 10:02:07 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 08:02:07 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Add_use=5Fbytearray_attrib?= =?utf-8?q?ute_to_=5FPyBytesWriter?= Message-ID: <20151014080207.18390.88669@psf.io> https://hg.python.org/cpython/rev/6fe0050a2f52 changeset: 98740:6fe0050a2f52 user: Victor Stinner date: Wed Oct 14 09:41:48 2015 +0200 summary: Add use_bytearray attribute to _PyBytesWriter Issue #25399: Add a new use_bytearray attribute to _PyBytesWriter to use a bytearray buffer, instead of using a bytes object. files: Include/bytesobject.h | 12 ++- Objects/bytesobject.c | 95 +++++++++++++++++++++--------- 2 files changed, 74 insertions(+), 33 deletions(-) diff --git a/Include/bytesobject.h b/Include/bytesobject.h --- a/Include/bytesobject.h +++ b/Include/bytesobject.h @@ -128,17 +128,21 @@ A _PyBytesWriter variable must be declared at the end of variables in a function to optimize the memory allocation on the stack. */ typedef struct { - /* bytes object */ + /* bytes, bytearray or NULL (when the small buffer is used) */ PyObject *buffer; - /* Number of allocated size */ + /* Number of allocated size. */ Py_ssize_t allocated; /* Minimum number of allocated bytes, incremented by _PyBytesWriter_Prepare() */ Py_ssize_t min_size; - /* If non-zero, overallocate the buffer (default: 0). */ + /* If non-zero, use a bytearray instead of a bytes object for buffer. */ + int use_bytearray; + + /* If non-zero, overallocate the buffer (default: 0). + This flag must be zero if use_bytearray is non-zero. */ int overallocate; /* Stack buffer */ @@ -153,7 +157,7 @@ PyAPI_FUNC(void) _PyBytesWriter_Init(_PyBytesWriter *writer); /* Get the buffer content and reset the writer. - Return a bytes object. + Return a bytes object, or a bytearray object if use_bytearray is non-zero. Raise an exception and return NULL on error. */ PyAPI_FUNC(PyObject *) _PyBytesWriter_Finish(_PyBytesWriter *writer, void *str); diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -3852,11 +3852,8 @@ void _PyBytesWriter_Init(_PyBytesWriter *writer) { - writer->buffer = NULL; - writer->allocated = 0; - writer->min_size = 0; - writer->overallocate = 0; - writer->use_small_buffer = 0; + /* Set all attributes before small_buffer to 0 */ + memset(writer, 0, offsetof(_PyBytesWriter, small_buffer)); #ifdef Py_DEBUG memset(writer->small_buffer, 0xCB, sizeof(writer->small_buffer)); #endif @@ -3871,14 +3868,18 @@ Py_LOCAL_INLINE(char*) _PyBytesWriter_AsString(_PyBytesWriter *writer) { - if (!writer->use_small_buffer) { + if (writer->use_small_buffer) { + assert(writer->buffer == NULL); + return writer->small_buffer; + } + else if (writer->use_bytearray) { + assert(writer->buffer != NULL); + return PyByteArray_AS_STRING(writer->buffer); + } + else { assert(writer->buffer != NULL); return PyBytes_AS_STRING(writer->buffer); } - else { - assert(writer->buffer == NULL); - return writer->small_buffer; - } } Py_LOCAL_INLINE(Py_ssize_t) @@ -3897,18 +3898,28 @@ #ifdef Py_DEBUG char *start, *end; - if (!writer->use_small_buffer) { + if (writer->use_small_buffer) { + assert(writer->buffer == NULL); + } + else { assert(writer->buffer != NULL); - assert(PyBytes_CheckExact(writer->buffer)); + if (writer->use_bytearray) + assert(PyByteArray_CheckExact(writer->buffer)); + else + assert(PyBytes_CheckExact(writer->buffer)); assert(Py_REFCNT(writer->buffer) == 1); } - else { - assert(writer->buffer == NULL); + + if (writer->use_bytearray) { + /* bytearray has its own overallocation algorithm, + writer overallocation must be disabled */ + assert(!writer->overallocate); } - start = _PyBytesWriter_AsString(writer); + assert(0 <= writer->allocated); assert(0 <= writer->min_size && writer->min_size <= writer->allocated); /* the last byte must always be null */ + start = _PyBytesWriter_AsString(writer); assert(start[writer->allocated] == 0); end = start + writer->allocated; @@ -3932,8 +3943,7 @@ if (writer->min_size > PY_SSIZE_T_MAX - size) { PyErr_NoMemory(); - _PyBytesWriter_Dealloc(writer); - return NULL; + goto error; } writer->min_size += size; @@ -3950,23 +3960,38 @@ pos = _PyBytesWriter_GetPos(writer, str); if (!writer->use_small_buffer) { - /* Note: Don't use a bytearray object because the conversion from - byterray to bytes requires to copy all bytes. */ - if (_PyBytes_Resize(&writer->buffer, allocated)) { - assert(writer->buffer == NULL); - return NULL; + if (writer->use_bytearray) { + if (PyByteArray_Resize(writer->buffer, allocated)) + goto error; + /* writer->allocated can be smaller than writer->buffer->ob_alloc, + but we cannot use ob_alloc because bytes may need to be moved + to use the whole buffer. bytearray uses an internal optimization + to avoid moving or copying bytes when bytes are removed at the + beginning (ex: del bytearray[:1]). */ + } + else { + if (_PyBytes_Resize(&writer->buffer, allocated)) + goto error; } } else { /* convert from stack buffer to bytes object buffer */ assert(writer->buffer == NULL); - writer->buffer = PyBytes_FromStringAndSize(NULL, allocated); + if (writer->use_bytearray) + writer->buffer = PyByteArray_FromStringAndSize(NULL, allocated); + else + writer->buffer = PyBytes_FromStringAndSize(NULL, allocated); if (writer->buffer == NULL) - return NULL; + goto error; if (pos != 0) { - Py_MEMCPY(PyBytes_AS_STRING(writer->buffer), + char *dest; + if (writer->use_bytearray) + dest = PyByteArray_AS_STRING(writer->buffer); + else + dest = PyBytes_AS_STRING(writer->buffer); + Py_MEMCPY(dest, writer->small_buffer, pos); } @@ -3981,6 +4006,10 @@ str = _PyBytesWriter_AsString(writer) + pos; _PyBytesWriter_CheckConsistency(writer, str); return str; + +error: + _PyBytesWriter_Dealloc(writer); + return NULL; } /* Allocate the buffer to write size bytes. @@ -4013,7 +4042,7 @@ _PyBytesWriter_CheckConsistency(writer, str); pos = _PyBytesWriter_GetPos(writer, str); - if (pos == 0) { + if (pos == 0 && !writer->use_bytearray) { Py_CLEAR(writer->buffer); /* Get the empty byte string singleton */ result = PyBytes_FromStringAndSize(NULL, 0); @@ -4026,9 +4055,17 @@ writer->buffer = NULL; if (pos != writer->allocated) { - if (_PyBytes_Resize(&result, pos)) { - assert(result == NULL); - return NULL; + if (writer->use_bytearray) { + if (PyByteArray_Resize(result, pos)) { + Py_DECREF(result); + return NULL; + } + } + else { + if (_PyBytes_Resize(&result, pos)) { + assert(result == NULL); + return NULL; + } } } } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 10:10:08 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 08:10:08 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Document_latest_optimizati?= =?utf-8?q?ons_using_=5FPyBytesWriter?= Message-ID: <20151014081008.18362.59744@psf.io> https://hg.python.org/cpython/rev/90e41d965228 changeset: 98742:90e41d965228 user: Victor Stinner date: Wed Oct 14 10:10:00 2015 +0200 summary: Document latest optimizations using _PyBytesWriter files: Doc/whatsnew/3.6.rst | 17 +++++++++++++---- Misc/NEWS | 4 ++++ 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/Doc/whatsnew/3.6.rst b/Doc/whatsnew/3.6.rst --- a/Doc/whatsnew/3.6.rst +++ b/Doc/whatsnew/3.6.rst @@ -141,16 +141,25 @@ ============= * The ASCII decoder is now up to 60 times as fast for error handlers: - ``surrogateescape``, ``ignore`` and ``replace``. + ``surrogateescape``, ``ignore`` and ``replace`` (Contributed + by Victor Stinner in :issue:`24870`). * The ASCII and the Latin1 encoders are now up to 3 times as fast for the error - error ``surrogateescape``. + error ``surrogateescape`` (Contributed by Victor Stinner in :issue:`25227`). * The UTF-8 encoder is now up to 75 times as fast for error handlers: - ``ignore``, ``replace``, ``surrogateescape``, ``surrogatepass``. + ``ignore``, ``replace``, ``surrogateescape``, ``surrogatepass`` (Contributed + by Victor Stinner in :issue:`25267`). * The UTF-8 decoder is now up to 15 times as fast for error handlers: - ``ignore``, ``replace`` and ``surrogateescape``. + ``ignore``, ``replace`` and ``surrogateescape`` (Contributed + by Victor Stinner in :issue:`25301`). + +* ``bytes % args`` is now up to 2 times faster. (Contributed by Victor Stinner + in :issue:`25349`). + +* ``bytearray % args`` is now between 2.5 and 5 times faster. (Contributed by + Victor Stinner in :issue:`25399`). Build and C API Changes diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #25399: Optimize bytearray % args using the new private _PyBytesWriter + API. Formatting is now between 2.5 and 5 times faster. + - Issue #25274: sys.setrecursionlimit() now raises a RecursionError if the new recursion limit is too low depending at the current recursion depth. Modify also the "lower-water mark" formula to make it monotonic. This mark is used @@ -19,6 +22,7 @@ sys.stdout.fileno() fails. - Issue #25349: Optimize bytes % args using the new private _PyBytesWriter API. + Formatting is now up to 2 times faster. - Issue #24806: Prevent builtin types that are not allowed to be subclassed from being subclassed through multiple inheritance. -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Wed Oct 14 10:46:24 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Wed, 14 Oct 2015 08:46:24 +0000 Subject: [Python-checkins] Daily reference leaks (9ab61ec2934c): sum=61869 Message-ID: <20151014084623.3275.31731@psf.io> results for 9ab61ec2934c on branch "default" -------------------------------------------- test_bytes leaked [1, 1, 1] references, sum=3 test_capi leaked [5411, 5411, 5411] references, sum=16233 test_capi leaked [1421, 1423, 1423] memory blocks, sum=4267 test_format leaked [62, 62, 62] references, sum=186 test_format leaked [62, 62, 62] memory blocks, sum=186 test_functools leaked [0, 2, 2] memory blocks, sum=4 test_threading leaked [10820, 10820, 10820] references, sum=32460 test_threading leaked [2842, 2844, 2844] memory blocks, sum=8530 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflog08Ho2G', '--timeout', '7200'] From python-checkins at python.org Wed Oct 14 11:32:41 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 09:32:41 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Optimize_bytes=2Efromhex?= =?utf-8?q?=28=29_and_bytearray=2Efromhex=28=29?= Message-ID: <20151014093240.7258.87675@psf.io> https://hg.python.org/cpython/rev/55d207a637ff changeset: 98743:55d207a637ff user: Victor Stinner date: Wed Oct 14 11:25:33 2015 +0200 summary: Optimize bytes.fromhex() and bytearray.fromhex() Issue #25401: Optimize bytes.fromhex() and bytearray.fromhex(): they are now between 2x and 3.5x faster. Changes: * Use a fast-path working on a char* string for ASCII string * Use a slow-path for non-ASCII string * Replace slow hex_digit_to_int() function with a O(1) lookup in _PyLong_DigitValue precomputed table * Use _PyBytesWriter API to handle the buffer * Add unit tests to check the error position in error messages files: Doc/whatsnew/3.6.rst | 3 + Include/bytesobject.h | 3 + Include/longobject.h | 2 +- Lib/test/test_bytes.py | 14 ++ Misc/NEWS | 3 + Objects/bytearrayobject.c | 43 +-------- Objects/bytesobject.c | 128 +++++++++++++++---------- 7 files changed, 101 insertions(+), 95 deletions(-) diff --git a/Doc/whatsnew/3.6.rst b/Doc/whatsnew/3.6.rst --- a/Doc/whatsnew/3.6.rst +++ b/Doc/whatsnew/3.6.rst @@ -161,6 +161,9 @@ * ``bytearray % args`` is now between 2.5 and 5 times faster. (Contributed by Victor Stinner in :issue:`25399`). +* Optimize :meth:`bytes.fromhex` and :meth:`bytearray.fromhex`: they are now + between 2x and 3.5x faster. (Contributed by Victor Stinner in :issue:`25401`). + Build and C API Changes ======================= diff --git a/Include/bytesobject.h b/Include/bytesobject.h --- a/Include/bytesobject.h +++ b/Include/bytesobject.h @@ -67,6 +67,9 @@ Py_ssize_t format_len, PyObject *args, int use_bytearray); +PyAPI_FUNC(PyObject*) _PyBytes_FromHex( + PyObject *string, + int use_bytearray); #endif PyAPI_FUNC(PyObject *) PyBytes_DecodeEscape(const char *, Py_ssize_t, const char *, Py_ssize_t, diff --git a/Include/longobject.h b/Include/longobject.h --- a/Include/longobject.h +++ b/Include/longobject.h @@ -65,7 +65,7 @@ # error "void* different in size from int, long and long long" #endif /* SIZEOF_VOID_P */ -/* Used by Python/mystrtoul.c. */ +/* Used by Python/mystrtoul.c and _PyBytes_FromHex(). */ #ifndef Py_LIMITED_API PyAPI_DATA(unsigned char) _PyLong_DigitValue[256]; #endif diff --git a/Lib/test/test_bytes.py b/Lib/test/test_bytes.py --- a/Lib/test/test_bytes.py +++ b/Lib/test/test_bytes.py @@ -301,6 +301,20 @@ self.assertRaises(ValueError, self.type2test.fromhex, '\x00') self.assertRaises(ValueError, self.type2test.fromhex, '12 \x00 34') + for data, pos in ( + # invalid first hexadecimal character + ('12 x4 56', 3), + # invalid second hexadecimal character + ('12 3x 56', 4), + # two invalid hexadecimal characters + ('12 xy 56', 3), + # test non-ASCII string + ('12 3\xff 56', 4), + ): + with self.assertRaises(ValueError) as cm: + self.type2test.fromhex(data) + self.assertIn('at position %s' % pos, str(cm.exception)) + def test_hex(self): self.assertRaises(TypeError, self.type2test.hex) self.assertRaises(TypeError, self.type2test.hex, 1) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #25401: Optimize bytes.fromhex() and bytearray.fromhex(): they are now + between 2x and 3.5x faster. + - Issue #25399: Optimize bytearray % args using the new private _PyBytesWriter API. Formatting is now between 2.5 and 5 times faster. diff --git a/Objects/bytearrayobject.c b/Objects/bytearrayobject.c --- a/Objects/bytearrayobject.c +++ b/Objects/bytearrayobject.c @@ -2823,48 +2823,7 @@ bytearray_fromhex_impl(PyObject*cls, PyObject *string) /*[clinic end generated code: output=df3da60129b3700c input=907bbd2d34d9367a]*/ { - PyObject *newbytes; - char *buf; - Py_ssize_t hexlen, byteslen, i, j; - int top, bot; - void *data; - unsigned int kind; - - assert(PyUnicode_Check(string)); - if (PyUnicode_READY(string)) - return NULL; - kind = PyUnicode_KIND(string); - data = PyUnicode_DATA(string); - hexlen = PyUnicode_GET_LENGTH(string); - - byteslen = hexlen/2; /* This overestimates if there are spaces */ - newbytes = PyByteArray_FromStringAndSize(NULL, byteslen); - if (!newbytes) - return NULL; - buf = PyByteArray_AS_STRING(newbytes); - for (i = j = 0; i < hexlen; i += 2) { - /* skip over spaces in the input */ - while (PyUnicode_READ(kind, data, i) == ' ') - i++; - if (i >= hexlen) - break; - top = hex_digit_to_int(PyUnicode_READ(kind, data, i)); - bot = hex_digit_to_int(PyUnicode_READ(kind, data, i+1)); - if (top == -1 || bot == -1) { - PyErr_Format(PyExc_ValueError, - "non-hexadecimal number found in " - "fromhex() arg at position %zd", i); - goto error; - } - buf[j++] = (top << 4) + bot; - } - if (PyByteArray_Resize(newbytes, j) < 0) - goto error; - return newbytes; - - error: - Py_DECREF(newbytes); - return NULL; + return _PyBytes_FromHex(string, 1); } PyDoc_STRVAR(hex__doc__, diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -30,6 +30,10 @@ */ #define PyBytesObject_SIZE (offsetof(PyBytesObject, ob_sval) + 1) +/* Forward declaration */ +Py_LOCAL_INLINE(Py_ssize_t) _PyBytesWriter_GetSize(_PyBytesWriter *writer, + char *str); + /* For PyBytes_FromString(), the parameter `str' points to a null-terminated string containing exactly `size' bytes. @@ -3078,22 +3082,6 @@ ); } -static int -hex_digit_to_int(Py_UCS4 c) -{ - if (c >= 128) - return -1; - if (Py_ISDIGIT(c)) - return c - '0'; - else { - if (Py_ISUPPER(c)) - c = Py_TOLOWER(c); - if (c >= 'a' && c <= 'f') - return c - 'a' + 10; - } - return -1; -} - /*[clinic input] @classmethod bytes.fromhex @@ -3111,47 +3099,83 @@ bytes_fromhex_impl(PyTypeObject *type, PyObject *string) /*[clinic end generated code: output=0973acc63661bb2e input=bf4d1c361670acd3]*/ { - PyObject *newstring; + return _PyBytes_FromHex(string, 0); +} + +PyObject* +_PyBytes_FromHex(PyObject *string, int use_bytearray) +{ char *buf; - Py_ssize_t hexlen, byteslen, i, j; - int top, bot; - void *data; - unsigned int kind; + Py_ssize_t hexlen, invalid_char; + unsigned int top, bot; + Py_UCS1 *str, *end; + _PyBytesWriter writer; + + _PyBytesWriter_Init(&writer); + writer.use_bytearray = use_bytearray; assert(PyUnicode_Check(string)); if (PyUnicode_READY(string)) return NULL; - kind = PyUnicode_KIND(string); - data = PyUnicode_DATA(string); hexlen = PyUnicode_GET_LENGTH(string); - byteslen = hexlen/2; /* This overestimates if there are spaces */ - newstring = PyBytes_FromStringAndSize(NULL, byteslen); - if (!newstring) + if (!PyUnicode_IS_ASCII(string)) { + void *data = PyUnicode_DATA(string); + unsigned int kind = PyUnicode_KIND(string); + Py_ssize_t i; + + /* search for the first non-ASCII character */ + for (i = 0; i < hexlen; i++) { + if (PyUnicode_READ(kind, data, i) >= 128) + break; + } + invalid_char = i; + goto error; + } + + assert(PyUnicode_KIND(string) == PyUnicode_1BYTE_KIND); + str = PyUnicode_1BYTE_DATA(string); + + /* This overestimates if there are spaces */ + buf = _PyBytesWriter_Alloc(&writer, hexlen / 2); + if (buf == NULL) return NULL; - buf = PyBytes_AS_STRING(newstring); - for (i = j = 0; i < hexlen; i += 2) { + + end = str + hexlen; + while (str < end) { /* skip over spaces in the input */ - while (PyUnicode_READ(kind, data, i) == ' ') - i++; - if (i >= hexlen) - break; - top = hex_digit_to_int(PyUnicode_READ(kind, data, i)); - bot = hex_digit_to_int(PyUnicode_READ(kind, data, i+1)); - if (top == -1 || bot == -1) { - PyErr_Format(PyExc_ValueError, - "non-hexadecimal number found in " - "fromhex() arg at position %zd", i); + if (*str == ' ') { + do { + str++; + } while (*str == ' '); + if (str >= end) + break; + } + + top = _PyLong_DigitValue[*str]; + if (top >= 16) { + invalid_char = str - PyUnicode_1BYTE_DATA(string); goto error; } - buf[j++] = (top << 4) + bot; + str++; + + bot = _PyLong_DigitValue[*str]; + if (bot >= 16) { + invalid_char = str - PyUnicode_1BYTE_DATA(string); + goto error; + } + str++; + + *buf++ = (unsigned char)((top << 4) + bot); } - if (j != byteslen && _PyBytes_Resize(&newstring, j) < 0) - goto error; - return newstring; + + return _PyBytesWriter_Finish(&writer, buf); error: - Py_XDECREF(newstring); + PyErr_Format(PyExc_ValueError, + "non-hexadecimal number found in " + "fromhex() arg at position %zd", invalid_char); + _PyBytesWriter_Dealloc(&writer); return NULL; } @@ -3888,7 +3912,7 @@ } Py_LOCAL_INLINE(Py_ssize_t) -_PyBytesWriter_GetPos(_PyBytesWriter *writer, char *str) +_PyBytesWriter_GetSize(_PyBytesWriter *writer, char *str) { char *start = _PyBytesWriter_AsString(writer); assert(str != NULL); @@ -3963,7 +3987,7 @@ allocated += allocated / OVERALLOCATE_FACTOR; } - pos = _PyBytesWriter_GetPos(writer, str); + pos = _PyBytesWriter_GetSize(writer, str); if (!writer->use_small_buffer) { if (writer->use_bytearray) { if (PyByteArray_Resize(writer->buffer, allocated)) @@ -4041,33 +4065,33 @@ PyObject * _PyBytesWriter_Finish(_PyBytesWriter *writer, void *str) { - Py_ssize_t pos; + Py_ssize_t size; PyObject *result; _PyBytesWriter_CheckConsistency(writer, str); - pos = _PyBytesWriter_GetPos(writer, str); - if (pos == 0 && !writer->use_bytearray) { + size = _PyBytesWriter_GetSize(writer, str); + if (size == 0 && !writer->use_bytearray) { Py_CLEAR(writer->buffer); /* Get the empty byte string singleton */ result = PyBytes_FromStringAndSize(NULL, 0); } else if (writer->use_small_buffer) { - result = PyBytes_FromStringAndSize(writer->small_buffer, pos); + result = PyBytes_FromStringAndSize(writer->small_buffer, size); } else { result = writer->buffer; writer->buffer = NULL; - if (pos != writer->allocated) { + if (size != writer->allocated) { if (writer->use_bytearray) { - if (PyByteArray_Resize(result, pos)) { + if (PyByteArray_Resize(result, size)) { Py_DECREF(result); return NULL; } } else { - if (_PyBytes_Resize(&result, pos)) { + if (_PyBytes_Resize(&result, size)) { assert(result == NULL); return NULL; } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 12:05:50 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 10:05:50 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325401=3A_Remove_n?= =?utf-8?q?ow_unused_hex=5Fdigit=5Fto=5Fint=28=29_function?= Message-ID: <20151014100550.128852.7713@psf.io> https://hg.python.org/cpython/rev/09e0533f3694 changeset: 98744:09e0533f3694 user: Victor Stinner date: Wed Oct 14 11:59:46 2015 +0200 summary: Issue #25401: Remove now unused hex_digit_to_int() function files: Objects/bytearrayobject.c | 16 ---------------- 1 files changed, 0 insertions(+), 16 deletions(-) diff --git a/Objects/bytearrayobject.c b/Objects/bytearrayobject.c --- a/Objects/bytearrayobject.c +++ b/Objects/bytearrayobject.c @@ -2789,22 +2789,6 @@ ); } -static int -hex_digit_to_int(Py_UCS4 c) -{ - if (c >= 128) - return -1; - if (Py_ISDIGIT(c)) - return c - '0'; - else { - if (Py_ISUPPER(c)) - c = Py_TOLOWER(c); - if (c >= 'a' && c <= 'f') - return c - 'a' + 10; - } - return -1; -} - /*[clinic input] @classmethod bytearray.fromhex -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 12:05:50 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 10:05:50 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_=5FPyBytesWriter=5FAlloc?= =?utf-8?q?=28=29=3A_only_use_10_bytes_of_the_small_buffer_in_debug_mode_t?= =?utf-8?q?o?= Message-ID: <20151014100550.20781.75245@psf.io> https://hg.python.org/cpython/rev/72db185a8ab1 changeset: 98745:72db185a8ab1 user: Victor Stinner date: Wed Oct 14 12:02:39 2015 +0200 summary: _PyBytesWriter_Alloc(): only use 10 bytes of the small buffer in debug mode to enhance code to detect buffer under- and overflow. files: Objects/bytesobject.c | 14 +++++++++++++- 1 files changed, 13 insertions(+), 1 deletions(-) diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -4053,8 +4053,20 @@ writer->use_small_buffer = 1; #ifdef Py_DEBUG - /* the last byte is reserved, it must be '\0' */ writer->allocated = sizeof(writer->small_buffer) - 1; + /* In debug mode, don't use the full small buffer because it is less + efficient than bytes and bytearray objects to detect buffer underflow + and buffer overflow. Use 10 bytes of the small buffer to test also + code using the smaller buffer in debug mode. + + Don't modify the _PyBytesWriter structure (use a shorter small buffer) + in debug mode to also be able to detect stack overflow when running + tests in debug mode. The _PyBytesWriter is large (more than 512 bytes), + if Py_EnterRecursiveCall() is not used in deep C callback, we may hit a + stack overflow. */ + writer->allocated = Py_MIN(writer->allocated, 10); + /* _PyBytesWriter_CheckConsistency() requires the last byte to be 0, + to detect buffer overflow */ writer->small_buffer[writer->allocated] = 0; #else writer->allocated = sizeof(writer->small_buffer); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 13:42:50 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 11:42:50 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Fix_compiler_warnings_=28u?= =?utf-8?q?ninitialized_variables=29=2C_false_alarms_in_fact?= Message-ID: <20151014114249.449.35212@psf.io> https://hg.python.org/cpython/rev/4fcc6eb732c4 changeset: 98746:4fcc6eb732c4 user: Victor Stinner date: Wed Oct 14 12:10:20 2015 +0200 summary: Fix compiler warnings (uninitialized variables), false alarms in fact files: Objects/longobject.c | 6 ++---- 1 files changed, 2 insertions(+), 4 deletions(-) diff --git a/Objects/longobject.c b/Objects/longobject.c --- a/Objects/longobject.c +++ b/Objects/longobject.c @@ -1587,7 +1587,7 @@ char **bytes_str) { PyLongObject *scratch, *a; - PyObject *str; + PyObject *str = NULL; Py_ssize_t size, strlen, size_a, i, j; digit *pout, *pin, rem, tenpow; int negative; @@ -1664,7 +1664,6 @@ return -1; } kind = writer->kind; - str = NULL; } else if (bytes_writer) { *bytes_str = _PyBytesWriter_Prepare(bytes_writer, *bytes_str, strlen); @@ -1777,7 +1776,7 @@ _PyBytesWriter *bytes_writer, char **bytes_str) { PyLongObject *a = (PyLongObject *)aa; - PyObject *v; + PyObject *v = NULL; Py_ssize_t sz; Py_ssize_t size_a; enum PyUnicode_Kind kind; @@ -1834,7 +1833,6 @@ if (_PyUnicodeWriter_Prepare(writer, sz, 'x') == -1) return -1; kind = writer->kind; - v = NULL; } else if (bytes_writer) { *bytes_str = _PyBytesWriter_Prepare(bytes_writer, *bytes_str, sz); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 13:42:50 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 11:42:50 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Modify_=5FPyBytes=5FDecode?= =?utf-8?q?EscapeRecode=28=29_to_use_=5FPyBytesAPI?= Message-ID: <20151014114249.481.16680@psf.io> https://hg.python.org/cpython/rev/9b191f539db8 changeset: 98747:9b191f539db8 user: Victor Stinner date: Wed Oct 14 13:32:13 2015 +0200 summary: Modify _PyBytes_DecodeEscapeRecode() to use _PyBytesAPI * Don't overallocate by 400% when recode is needed: only overallocate on demand using _PyBytesWriter. * Use _PyLong_DigitValue to convert hexadecimal digit to int * Create _PyBytes_DecodeEscapeRecode() subfunction files: Include/longobject.h | 3 +- Objects/bytesobject.c | 131 ++++++++++++++++------------- 2 files changed, 75 insertions(+), 59 deletions(-) diff --git a/Include/longobject.h b/Include/longobject.h --- a/Include/longobject.h +++ b/Include/longobject.h @@ -65,7 +65,8 @@ # error "void* different in size from int, long and long long" #endif /* SIZEOF_VOID_P */ -/* Used by Python/mystrtoul.c and _PyBytes_FromHex(). */ +/* Used by Python/mystrtoul.c, _PyBytes_FromHex(), + _PyBytes_DecodeEscapeRecode(), etc. */ #ifndef Py_LIMITED_API PyAPI_DATA(unsigned char) _PyLong_DigitValue[256]; #endif diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -1068,6 +1068,42 @@ the string is UTF-8 encoded and should be re-encoded in the specified encoding. */ +static char * +_PyBytes_DecodeEscapeRecode(const char **s, const char *end, + const char *errors, const char *recode_encoding, + _PyBytesWriter *writer, char *p) +{ + PyObject *u, *w; + const char* t; + + t = *s; + /* Decode non-ASCII bytes as UTF-8. */ + while (t < end && (*t & 0x80)) + t++; + u = PyUnicode_DecodeUTF8(*s, t - *s, errors); + if (u == NULL) + return NULL; + + /* Recode them in target encoding. */ + w = PyUnicode_AsEncodedString(u, recode_encoding, errors); + Py_DECREF(u); + if (w == NULL) + return NULL; + assert(PyBytes_Check(w)); + + /* Append bytes to output buffer. */ + writer->min_size--; /* substract 1 preallocated byte */ + p = _PyBytesWriter_WriteBytes(writer, p, + PyBytes_AS_STRING(w), + PyBytes_GET_SIZE(w)); + Py_DECREF(w); + if (p == NULL) + return NULL; + + *s = t; + return p; +} + PyObject *PyBytes_DecodeEscape(const char *s, Py_ssize_t len, const char *errors, @@ -1075,54 +1111,42 @@ const char *recode_encoding) { int c; - char *p, *buf; + char *p; const char *end; - PyObject *v; - Py_ssize_t newlen = recode_encoding ? 4*len:len; - v = PyBytes_FromStringAndSize((char *)NULL, newlen); - if (v == NULL) + _PyBytesWriter writer; + + _PyBytesWriter_Init(&writer); + + p = _PyBytesWriter_Alloc(&writer, len); + if (p == NULL) return NULL; - p = buf = PyBytes_AsString(v); + writer.overallocate = 1; + end = s + len; while (s < end) { if (*s != '\\') { non_esc: - if (recode_encoding && (*s & 0x80)) { - PyObject *u, *w; - char *r; - const char* t; - Py_ssize_t rn; - t = s; - /* Decode non-ASCII bytes as UTF-8. */ - while (t < end && (*t & 0x80)) t++; - u = PyUnicode_DecodeUTF8(s, t - s, errors); - if(!u) goto failed; - - /* Recode them in target encoding. */ - w = PyUnicode_AsEncodedString( - u, recode_encoding, errors); - Py_DECREF(u); - if (!w) goto failed; - - /* Append bytes to output buffer. */ - assert(PyBytes_Check(w)); - r = PyBytes_AS_STRING(w); - rn = PyBytes_GET_SIZE(w); - Py_MEMCPY(p, r, rn); - p += rn; - Py_DECREF(w); - s = t; - } else { + if (!(recode_encoding && (*s & 0x80))) { *p++ = *s++; } + else { + /* non-ASCII character and need to recode */ + p = _PyBytes_DecodeEscapeRecode(&s, end, + errors, recode_encoding, + &writer, p); + if (p == NULL) + goto failed; + } continue; } + s++; - if (s==end) { + if (s == end) { PyErr_SetString(PyExc_ValueError, "Trailing \\ in string"); goto failed; } + switch (*s++) { /* XXX This assumes ASCII! */ case '\n': break; @@ -1147,28 +1171,18 @@ *p++ = c; break; case 'x': - if (s+1 < end && Py_ISXDIGIT(s[0]) && Py_ISXDIGIT(s[1])) { - unsigned int x = 0; - c = Py_CHARMASK(*s); - s++; - if (Py_ISDIGIT(c)) - x = c - '0'; - else if (Py_ISLOWER(c)) - x = 10 + c - 'a'; - else - x = 10 + c - 'A'; - x = x << 4; - c = Py_CHARMASK(*s); - s++; - if (Py_ISDIGIT(c)) - x += c - '0'; - else if (Py_ISLOWER(c)) - x += 10 + c - 'a'; - else - x += 10 + c - 'A'; - *p++ = x; - break; + if (s+1 < end) { + int digit1, digit2; + digit1 = _PyLong_DigitValue[Py_CHARMASK(s[0])]; + digit2 = _PyLong_DigitValue[Py_CHARMASK(s[1])]; + if (digit1 < 16 && digit2 < 16) { + *p++ = (unsigned char)((digit1 << 4) + digit2); + s += 2; + break; + } } + /* invalid hexadecimal digits */ + if (!errors || strcmp(errors, "strict") == 0) { PyErr_Format(PyExc_ValueError, "invalid \\x escape at position %d", @@ -1190,6 +1204,7 @@ if (s < end && Py_ISXDIGIT(s[0])) s++; /* and a hexdigit */ break; + default: *p++ = '\\'; s--; @@ -1197,11 +1212,11 @@ UTF-8 bytes may follow. */ } } - if (p-buf < newlen) - _PyBytes_Resize(&v, p - buf); - return v; + + return _PyBytesWriter_Finish(&writer, p); + failed: - Py_DECREF(v); + _PyBytesWriter_Dealloc(&writer); return NULL; } -- Repository URL: https://hg.python.org/cpython From lp_benchmark_robot at intel.com Wed Oct 14 15:25:22 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Wed, 14 Oct 2015 14:25:22 +0100 Subject: [Python-checkins] Benchmark Results for Python Default 2015-10-14 Message-ID: <0cd129d1-24b8-4e20-bba3-3a3e6e1f406c@irsmsx151.ger.corp.intel.com> Results for project Python default, build date 2015-10-14 03:02:12 commit: 9ab61ec2934cf289b71818bc2428a58110fc3780 revision date: 2015-10-14 02:09:34 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v3.4.3, with hash b4cbecbc0781e89a309d03b60a1f75f8499250e6 from 2015-02-25 12:15:33+00:00 ---------------------------------------------------------------------------------- benchmark relative change since change since current rev run std_dev* last run baseline with PGO ---------------------------------------------------------------------------------- :-) django_v2 0.32% -2.25% 7.43% 17.59% :-| pybench 0.15% 0.06% -1.64% 8.22% :-( regex_v8 3.06% -0.20% -6.00% 7.10% :-| nbody 0.14% 0.95% -1.32% 10.82% :-| json_dump_v2 0.32% -0.50% -0.27% 7.18% :-| normal_startup 1.10% -0.04% -0.47% 5.33% ---------------------------------------------------------------------------------- Note: Benchmark results are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From lp_benchmark_robot at intel.com Wed Oct 14 15:25:44 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Wed, 14 Oct 2015 14:25:44 +0100 Subject: [Python-checkins] Benchmark Results for Python 2.7 2015-10-14 Message-ID: <46af8def-f380-465e-b7e6-aeef356b1c5c@irsmsx151.ger.corp.intel.com> Results for project Python 2.7, build date 2015-10-14 11:06:08 commit: 6bce28fec91139d45b1c3e8dc14414f49bf6016b revision date: 2015-10-14 02:08:45 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v2.7.10, with hash 15c95b7d81dcf821daade360741e00714667653f from 2015-05-23 16:02:14+00:00 ---------------------------------------------------------------------------------- benchmark relative change since change since current rev run std_dev* last run baseline with PGO ---------------------------------------------------------------------------------- :-) django_v2 0.24% 0.27% 5.17% 10.84% :-) pybench 0.25% 0.04% 6.85% 6.47% :-| regex_v8 0.50% -0.02% -1.68% 6.70% :-) nbody 0.20% 0.13% 8.29% 3.19% :-) json_dump_v2 0.29% -0.15% 2.96% 14.51% :-( normal_startup 1.81% -1.16% -2.41% 3.37% :-| ssbench 0.76% -0.54% 0.93% 1.72% ---------------------------------------------------------------------------------- Note: Benchmark results for ssbench are measured in requests/second while all other are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From python-checkins at python.org Wed Oct 14 15:31:45 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 13:31:45 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_test=5Fbytes=3A_new_try_to?= =?utf-8?q?_fix_test_on_=27=25p=27_formatter_on_Windows?= Message-ID: <20151014133145.3281.5617@psf.io> https://hg.python.org/cpython/rev/8e97d54e6d7d changeset: 98754:8e97d54e6d7d user: Victor Stinner date: Wed Oct 14 15:28:59 2015 +0200 summary: test_bytes: new try to fix test on '%p' formatter on Windows files: Lib/test/test_bytes.py | 22 ++++++++++++++++------ 1 files changed, 16 insertions(+), 6 deletions(-) diff --git a/Lib/test/test_bytes.py b/Lib/test/test_bytes.py --- a/Lib/test/test_bytes.py +++ b/Lib/test/test_bytes.py @@ -839,12 +839,22 @@ b'i=-123') self.assertEqual(PyBytes_FromFormat(b'x=%x', c_int(0xabc)), b'x=abc') + + sizeof_ptr = ctypes.sizeof(c_char_p) + + if os.name == 'nt': + # Windows (MSCRT) + ptr_format = '0x%0{}X'.format(2 * sizeof_ptr) + def ptr_formatter(ptr): + return (ptr_format % ptr) + else: + # UNIX (glibc) + def ptr_formatter(ptr): + return '%#x' % ptr + ptr = 0xabcdef - expected = [b'ptr=%#x' % ptr] - win_format = 'ptr=0x%0{}X'.format(2 * ctypes.sizeof(c_char_p)) - expected.append((win_format % ptr).encode('ascii')) - self.assertIn(PyBytes_FromFormat(b'ptr=%p', c_char_p(ptr)), - expected) + self.assertEqual(PyBytes_FromFormat(b'ptr=%p', c_char_p(ptr)), + ('ptr=' + ptr_formatter(ptr)).encode('ascii')) self.assertEqual(PyBytes_FromFormat(b's=%s', c_char_p(b'cstr')), b's=cstr') @@ -859,7 +869,7 @@ (b'%zd', c_ssize_t, _testcapi.PY_SSIZE_T_MIN, str), (b'%zd', c_ssize_t, _testcapi.PY_SSIZE_T_MAX, str), (b'%zu', c_size_t, size_max, str), - (b'%p', c_char_p, size_max, lambda value: '%#x' % value), + (b'%p', c_char_p, size_max, ptr_formatter), ): self.assertEqual(PyBytes_FromFormat(formatstr, ctypes_type(value)), py_formatter(value).encode('ascii')), -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 15:31:45 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 13:31:45 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Use_=5FPyBytesWriter_in_?= =?utf-8?q?=5FPyBytes=5FFromIterator=28=29?= Message-ID: <20151014133145.18360.21318@psf.io> https://hg.python.org/cpython/rev/6d59c4bb2dab changeset: 98751:6d59c4bb2dab user: Victor Stinner date: Wed Oct 14 14:15:49 2015 +0200 summary: Use _PyBytesWriter in _PyBytes_FromIterator() files: Objects/bytesobject.c | 37 +++++++++++++++--------------- 1 files changed, 18 insertions(+), 19 deletions(-) diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -3456,23 +3456,23 @@ static PyObject * _PyBytes_FromIterator(PyObject *x) { - PyObject *new, *it; + char *str; + PyObject *it; Py_ssize_t i, size; + _PyBytesWriter writer; + + _PyBytesWriter_Init(&writer); /* For iterator version, create a string object and resize as needed */ size = PyObject_LengthHint(x, 64); if (size == -1 && PyErr_Occurred()) return NULL; - /* Allocate an extra byte to prevent PyBytes_FromStringAndSize() from - returning a shared empty bytes string. This required because we - want to call _PyBytes_Resize() the returned object, which we can - only do on bytes objects with refcount == 1. */ - if (size == 0) - size = 1; - new = PyBytes_FromStringAndSize(NULL, size); - if (new == NULL) + + str = _PyBytesWriter_Alloc(&writer, size); + if (str == NULL) return NULL; - assert(Py_REFCNT(new) == 1); + writer.overallocate = 1; + size = writer.allocated; /* Get the iterator */ it = PyObject_GetIter(x); @@ -3507,21 +3507,20 @@ /* Append the byte */ if (i >= size) { - size = 2 * size + 1; - if (_PyBytes_Resize(&new, size) < 0) - goto error; + str = _PyBytesWriter_Resize(&writer, str, size+1); + if (str == NULL) + return NULL; + size = writer.allocated; } - ((PyBytesObject *)new)->ob_sval[i] = (char) value; + *str++ = (char) value; } - _PyBytes_Resize(&new, i); - - /* Clean up and return success */ Py_DECREF(it); - return new; + + return _PyBytesWriter_Finish(&writer, str); error: + _PyBytesWriter_Dealloc(&writer); Py_XDECREF(it); - Py_XDECREF(new); return NULL; } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 15:31:45 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 13:31:45 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325384=3A_Fix_bina?= =?utf-8?q?scii=2Erledecode=5Fhqx=28=29?= Message-ID: <20151014133145.483.15807@psf.io> https://hg.python.org/cpython/rev/32b17c3b3cf3 changeset: 98752:32b17c3b3cf3 user: Victor Stinner date: Wed Oct 14 15:02:35 2015 +0200 summary: Issue #25384: Fix binascii.rledecode_hqx() Fix usage of _PyBytesWriter API. Use the new _PyBytesWriter_Resize() function instead of _PyBytesWriter_Prepare(). files: Lib/test/test_binascii.py | 16 +++++++++++++++- Modules/binascii.c | 11 +++++++---- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/Lib/test/test_binascii.py b/Lib/test/test_binascii.py --- a/Lib/test/test_binascii.py +++ b/Lib/test/test_binascii.py @@ -159,10 +159,24 @@ # Then calculate the hexbin4 binary-to-ASCII translation rle = binascii.rlecode_hqx(self.data) a = binascii.b2a_hqx(self.type2test(rle)) + b, _ = binascii.a2b_hqx(self.type2test(a)) res = binascii.rledecode_hqx(b) + self.assertEqual(res, self.rawdata) - self.assertEqual(res, self.rawdata) + def test_rle(self): + # test repetition with a repetition longer than the limit of 255 + data = (b'a' * 100 + b'b' + b'c' * 300) + + encoded = binascii.rlecode_hqx(data) + self.assertEqual(encoded, + (b'a\x90d' # 'a' * 100 + b'b' # 'b' + b'c\x90\xff' # 'c' * 255 + b'c\x90-')) # 'c' * 45 + + decoded = binascii.rledecode_hqx(encoded) + self.assertEqual(decoded, data) def test_hex(self): # test hexlification diff --git a/Modules/binascii.c b/Modules/binascii.c --- a/Modules/binascii.c +++ b/Modules/binascii.c @@ -800,14 +800,15 @@ return PyErr_NoMemory(); /* Allocate a buffer of reasonable size. Resized when needed */ - out_len = in_len * 2; + out_len = in_len; out_data = _PyBytesWriter_Alloc(&writer, out_len); if (out_data == NULL) return NULL; /* Use overallocation */ writer.overallocate = 1; - out_len_left = writer.allocated; + out_len = writer.allocated; + out_len_left = out_len; /* ** We need two macros here to get/put bytes and handle @@ -830,10 +831,12 @@ overallocate the buffer anymore */ \ writer.overallocate = 0; \ } \ - out_data = _PyBytesWriter_Prepare(&writer, out_data, 1); \ + out_data = _PyBytesWriter_Resize(&writer, out_data, \ + writer.allocated + 1); \ if (out_data == NULL) \ goto error; \ - out_len_left = writer.allocated; \ + out_len_left = writer.allocated - out_len - 1; \ + out_len = writer.allocated; \ } \ *out_data++ = b; \ } while(0) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 15:31:45 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 13:31:45 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Split_PyBytes=5FFromObject?= =?utf-8?q?=28=29_into_subfunctions?= Message-ID: <20151014133144.18364.33245@psf.io> https://hg.python.org/cpython/rev/1cf65cd32a55 changeset: 98748:1cf65cd32a55 user: Victor Stinner date: Wed Oct 14 13:44:29 2015 +0200 summary: Split PyBytes_FromObject() into subfunctions files: Objects/bytesobject.c | 201 ++++++++++++++++++----------- 1 files changed, 122 insertions(+), 79 deletions(-) diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -3384,89 +3384,100 @@ return PyBytes_FromObject(x); } -PyObject * -PyBytes_FromObject(PyObject *x) +static PyObject* +_PyBytes_FromBuffer(PyObject *x) +{ + PyObject *new; + Py_buffer view; + + if (PyObject_GetBuffer(x, &view, PyBUF_FULL_RO) < 0) + return NULL; + + new = PyBytes_FromStringAndSize(NULL, view.len); + if (!new) + goto fail; + if (PyBuffer_ToContiguous(((PyBytesObject *)new)->ob_sval, + &view, view.len, 'C') < 0) + goto fail; + PyBuffer_Release(&view); + return new; + +fail: + Py_XDECREF(new); + PyBuffer_Release(&view); + return NULL; +} + +static PyObject* +_PyBytes_FromList(PyObject *x) +{ + PyObject *new; + Py_ssize_t i; + Py_ssize_t value; + char *str; + + new = PyBytes_FromStringAndSize(NULL, Py_SIZE(x)); + if (new == NULL) + return NULL; + str = ((PyBytesObject *)new)->ob_sval; + + for (i = 0; i < Py_SIZE(x); i++) { + value = PyNumber_AsSsize_t(PyList_GET_ITEM(x, i), PyExc_ValueError); + if (value == -1 && PyErr_Occurred()) + goto error; + + if (value < 0 || value >= 256) { + PyErr_SetString(PyExc_ValueError, + "bytes must be in range(0, 256)"); + goto error; + } + *str++ = (char) value; + } + return new; + +error: + Py_DECREF(new); + return NULL; +} + +static PyObject* +_PyBytes_FromTuple(PyObject *x) +{ + PyObject *new; + Py_ssize_t i; + Py_ssize_t value; + char *str; + + new = PyBytes_FromStringAndSize(NULL, Py_SIZE(x)); + if (new == NULL) + return NULL; + str = ((PyBytesObject *)new)->ob_sval; + + for (i = 0; i < Py_SIZE(x); i++) { + value = PyNumber_AsSsize_t(PyTuple_GET_ITEM(x, i), PyExc_ValueError); + if (value == -1 && PyErr_Occurred()) + goto error; + + if (value < 0 || value >= 256) { + PyErr_SetString(PyExc_ValueError, + "bytes must be in range(0, 256)"); + goto error; + } + *str++ = (char) value; + } + return new; + +error: + Py_DECREF(new); + return NULL; +} + +static PyObject * +_PyBytes_FromIterator(PyObject *x) { PyObject *new, *it; Py_ssize_t i, size; - if (x == NULL) { - PyErr_BadInternalCall(); - return NULL; - } - - if (PyBytes_CheckExact(x)) { - Py_INCREF(x); - return x; - } - - /* Use the modern buffer interface */ - if (PyObject_CheckBuffer(x)) { - Py_buffer view; - if (PyObject_GetBuffer(x, &view, PyBUF_FULL_RO) < 0) - return NULL; - new = PyBytes_FromStringAndSize(NULL, view.len); - if (!new) - goto fail; - if (PyBuffer_ToContiguous(((PyBytesObject *)new)->ob_sval, - &view, view.len, 'C') < 0) - goto fail; - PyBuffer_Release(&view); - return new; - fail: - Py_XDECREF(new); - PyBuffer_Release(&view); - return NULL; - } - if (PyUnicode_Check(x)) { - PyErr_SetString(PyExc_TypeError, - "cannot convert unicode object to bytes"); - return NULL; - } - - if (PyList_CheckExact(x)) { - new = PyBytes_FromStringAndSize(NULL, Py_SIZE(x)); - if (new == NULL) - return NULL; - for (i = 0; i < Py_SIZE(x); i++) { - Py_ssize_t value = PyNumber_AsSsize_t( - PyList_GET_ITEM(x, i), PyExc_ValueError); - if (value == -1 && PyErr_Occurred()) { - Py_DECREF(new); - return NULL; - } - if (value < 0 || value >= 256) { - PyErr_SetString(PyExc_ValueError, - "bytes must be in range(0, 256)"); - Py_DECREF(new); - return NULL; - } - ((PyBytesObject *)new)->ob_sval[i] = (char) value; - } - return new; - } - if (PyTuple_CheckExact(x)) { - new = PyBytes_FromStringAndSize(NULL, Py_SIZE(x)); - if (new == NULL) - return NULL; - for (i = 0; i < Py_SIZE(x); i++) { - Py_ssize_t value = PyNumber_AsSsize_t( - PyTuple_GET_ITEM(x, i), PyExc_ValueError); - if (value == -1 && PyErr_Occurred()) { - Py_DECREF(new); - return NULL; - } - if (value < 0 || value >= 256) { - PyErr_SetString(PyExc_ValueError, - "bytes must be in range(0, 256)"); - Py_DECREF(new); - return NULL; - } - ((PyBytesObject *)new)->ob_sval[i] = (char) value; - } - return new; - } - /* For iterator version, create a string object and resize as needed */ size = PyObject_LengthHint(x, 64); if (size == -1 && PyErr_Occurred()) @@ -3533,6 +3544,38 @@ return NULL; } +PyObject * +PyBytes_FromObject(PyObject *x) +{ + if (x == NULL) { + PyErr_BadInternalCall(); + return NULL; + } + + if (PyBytes_CheckExact(x)) { + Py_INCREF(x); + return x; + } + + /* Use the modern buffer interface */ + if (PyObject_CheckBuffer(x)) + return _PyBytes_FromBuffer(x); + + if (PyList_CheckExact(x)) + return _PyBytes_FromList(x); + + if (PyTuple_CheckExact(x)) + return _PyBytes_FromTuple(x); + + if (PyUnicode_Check(x)) { + PyErr_SetString(PyExc_TypeError, + "cannot convert unicode object to bytes"); + return NULL; + } + + return _PyBytes_FromIterator(x); +} + static PyObject * str_subtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 15:31:45 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 13:31:45 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Factorize_=5FPyBytes=5FFro?= =?utf-8?q?mList=28=29_and_=5FPyBytes=5FFromTuple=28=29_code_using_a_C_mac?= =?utf-8?q?ro?= Message-ID: <20151014133145.55474.63931@psf.io> https://hg.python.org/cpython/rev/270378da0396 changeset: 98749:270378da0396 user: Victor Stinner date: Wed Oct 14 13:50:40 2015 +0200 summary: Factorize _PyBytes_FromList() and _PyBytes_FromTuple() code using a C macro files: Objects/bytesobject.c | 89 ++++++++++++------------------ 1 files changed, 35 insertions(+), 54 deletions(-) diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -3408,68 +3408,49 @@ return NULL; } +#define _PyBytes_FROM_LIST_BODY(x, GET_ITEM) \ + do { \ + PyObject *bytes; \ + Py_ssize_t i; \ + Py_ssize_t value; \ + char *str; \ + PyObject *item; \ + \ + bytes = PyBytes_FromStringAndSize(NULL, Py_SIZE(x)); \ + if (bytes == NULL) \ + return NULL; \ + str = ((PyBytesObject *)bytes)->ob_sval; \ + \ + for (i = 0; i < Py_SIZE(x); i++) { \ + item = GET_ITEM((x), i); \ + value = PyNumber_AsSsize_t(item, PyExc_ValueError); \ + if (value == -1 && PyErr_Occurred()) \ + goto error; \ + \ + if (value < 0 || value >= 256) { \ + PyErr_SetString(PyExc_ValueError, \ + "bytes must be in range(0, 256)"); \ + goto error; \ + } \ + *str++ = (char) value; \ + } \ + return bytes; \ + \ + error: \ + Py_DECREF(bytes); \ + return NULL; \ + } while (0) + static PyObject* _PyBytes_FromList(PyObject *x) { - PyObject *new; - Py_ssize_t i; - Py_ssize_t value; - char *str; - - new = PyBytes_FromStringAndSize(NULL, Py_SIZE(x)); - if (new == NULL) - return NULL; - str = ((PyBytesObject *)new)->ob_sval; - - for (i = 0; i < Py_SIZE(x); i++) { - value = PyNumber_AsSsize_t(PyList_GET_ITEM(x, i), PyExc_ValueError); - if (value == -1 && PyErr_Occurred()) - goto error; - - if (value < 0 || value >= 256) { - PyErr_SetString(PyExc_ValueError, - "bytes must be in range(0, 256)"); - goto error; - } - *str++ = (char) value; - } - return new; - -error: - Py_DECREF(new); - return NULL; + _PyBytes_FROM_LIST_BODY(x, PyList_GET_ITEM); } static PyObject* _PyBytes_FromTuple(PyObject *x) { - PyObject *new; - Py_ssize_t i; - Py_ssize_t value; - char *str; - - new = PyBytes_FromStringAndSize(NULL, Py_SIZE(x)); - if (new == NULL) - return NULL; - str = ((PyBytesObject *)new)->ob_sval; - - for (i = 0; i < Py_SIZE(x); i++) { - value = PyNumber_AsSsize_t(PyTuple_GET_ITEM(x, i), PyExc_ValueError); - if (value == -1 && PyErr_Occurred()) - goto error; - - if (value < 0 || value >= 256) { - PyErr_SetString(PyExc_ValueError, - "bytes must be in range(0, 256)"); - goto error; - } - *str++ = (char) value; - } - return new; - -error: - Py_DECREF(new); - return NULL; + _PyBytes_FROM_LIST_BODY(x, PyTuple_GET_ITEM); } static PyObject * -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 15:31:46 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 13:31:46 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Add_=5FPyBytesWriter=5FRes?= =?utf-8?q?ize=28=29_function?= Message-ID: <20151014133145.7264.89980@psf.io> https://hg.python.org/cpython/rev/705ae6d08f88 changeset: 98750:705ae6d08f88 user: Victor Stinner date: Wed Oct 14 13:56:47 2015 +0200 summary: Add _PyBytesWriter_Resize() function This function gives a control to the buffer size without using min_size. files: Include/bytesobject.h | 19 +++++++++++- Objects/bytesobject.c | 50 +++++++++++++++++++----------- 2 files changed, 49 insertions(+), 20 deletions(-) diff --git a/Include/bytesobject.h b/Include/bytesobject.h --- a/Include/bytesobject.h +++ b/Include/bytesobject.h @@ -178,7 +178,9 @@ PyAPI_FUNC(void*) _PyBytesWriter_Alloc(_PyBytesWriter *writer, Py_ssize_t size); -/* Add *size* bytes to the buffer. +/* Ensure that the buffer is large enough to write *size* bytes. + Add size to the writer minimum size (min_size attribute). + str is the current pointer inside the buffer. Return the updated current pointer inside the buffer. Raise an exception and return NULL on error. */ @@ -186,6 +188,21 @@ void *str, Py_ssize_t size); +/* Resize the buffer to make it larger. + The new buffer may be larger than size bytes because of overallocation. + Return the updated current pointer inside the buffer. + Raise an exception and return NULL on error. + + Note: size must be greater than the number of allocated bytes in the writer. + + This function doesn't use the writer minimum size (min_size attribute). + + See also _PyBytesWriter_Prepare(). + */ +PyAPI_FUNC(void*) _PyBytesWriter_Resize(_PyBytesWriter *writer, + void *str, + Py_ssize_t size); + /* Write bytes. Raise an exception and return NULL on error. */ PyAPI_FUNC(void*) _PyBytesWriter_WriteBytes(_PyBytesWriter *writer, diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -3997,29 +3997,14 @@ } void* -_PyBytesWriter_Prepare(_PyBytesWriter *writer, void *str, Py_ssize_t size) +_PyBytesWriter_Resize(_PyBytesWriter *writer, void *str, Py_ssize_t size) { Py_ssize_t allocated, pos; _PyBytesWriter_CheckConsistency(writer, str); - assert(size >= 0); - - if (size == 0) { - /* nothing to do */ - return str; - } - - if (writer->min_size > PY_SSIZE_T_MAX - size) { - PyErr_NoMemory(); - goto error; - } - writer->min_size += size; - - allocated = writer->allocated; - if (writer->min_size <= allocated) - return str; - - allocated = writer->min_size; + assert(writer->allocated < size); + + allocated = size; if (writer->overallocate && allocated <= (PY_SSIZE_T_MAX - allocated / OVERALLOCATE_FACTOR)) { /* overallocate to limit the number of realloc() */ @@ -4080,6 +4065,33 @@ return NULL; } +void* +_PyBytesWriter_Prepare(_PyBytesWriter *writer, void *str, Py_ssize_t size) +{ + Py_ssize_t new_min_size; + + _PyBytesWriter_CheckConsistency(writer, str); + assert(size >= 0); + + if (size == 0) { + /* nothing to do */ + return str; + } + + if (writer->min_size > PY_SSIZE_T_MAX - size) { + PyErr_NoMemory(); + _PyBytesWriter_Dealloc(writer); + return NULL; + } + new_min_size = writer->min_size + size; + + if (new_min_size > writer->allocated) + str = _PyBytesWriter_Resize(writer, str, new_min_size); + + writer->min_size = new_min_size; + return str; +} + /* Allocate the buffer to write size bytes. Return the pointer to the beginning of buffer data. Raise an exception and return NULL on error. */ -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 15:31:47 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 13:31:47 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Refactor_binascii=2Erledec?= =?utf-8?b?b2RlX2hxeCgp?= Message-ID: <20151014133145.97722.47050@psf.io> https://hg.python.org/cpython/rev/665971f3e95d changeset: 98753:665971f3e95d user: Victor Stinner date: Wed Oct 14 15:20:07 2015 +0200 summary: Refactor binascii.rledecode_hqx() Rewrite the code to handle the output buffer. files: Modules/binascii.c | 53 ++++++++++++++++----------------- 1 files changed, 25 insertions(+), 28 deletions(-) diff --git a/Modules/binascii.c b/Modules/binascii.c --- a/Modules/binascii.c +++ b/Modules/binascii.c @@ -784,7 +784,7 @@ { unsigned char *in_data, *out_data; unsigned char in_byte, in_repeat; - Py_ssize_t in_len, out_len, out_len_left; + Py_ssize_t in_len; _PyBytesWriter writer; in_data = data->buf; @@ -800,15 +800,12 @@ return PyErr_NoMemory(); /* Allocate a buffer of reasonable size. Resized when needed */ - out_len = in_len; - out_data = _PyBytesWriter_Alloc(&writer, out_len); + out_data = _PyBytesWriter_Alloc(&writer, in_len); if (out_data == NULL) return NULL; /* Use overallocation */ writer.overallocate = 1; - out_len = writer.allocated; - out_len_left = out_len; /* ** We need two macros here to get/put bytes and handle @@ -823,24 +820,6 @@ b = *in_data++; \ } while(0) -#define OUTBYTE(b) \ - do { \ - if ( --out_len_left < 0 ) { \ - if (in_len <= 0) { \ - /* We are done after this write, no need to \ - overallocate the buffer anymore */ \ - writer.overallocate = 0; \ - } \ - out_data = _PyBytesWriter_Resize(&writer, out_data, \ - writer.allocated + 1); \ - if (out_data == NULL) \ - goto error; \ - out_len_left = writer.allocated - out_len - 1; \ - out_len = writer.allocated; \ - } \ - *out_data++ = b; \ - } while(0) - /* ** Handle first byte separately (since we have to get angry ** in case of an orphaned RLE code). @@ -849,6 +828,10 @@ if (in_byte == RUNCHAR) { INBYTE(in_repeat); + /* only 1 byte will be written, but 2 bytes were preallocated: + substract 1 byte to prevent overallocation */ + writer.min_size--; + if (in_repeat != 0) { /* Note Error, not Incomplete (which is at the end ** of the string only). This is a programmer error. @@ -856,9 +839,9 @@ PyErr_SetString(Error, "Orphaned RLE code at start"); goto error; } - OUTBYTE(RUNCHAR); + *out_data++ = RUNCHAR; } else { - OUTBYTE(in_byte); + *out_data++ = in_byte; } while( in_len > 0 ) { @@ -866,18 +849,32 @@ if (in_byte == RUNCHAR) { INBYTE(in_repeat); + /* only 1 byte will be written, but 2 bytes were preallocated: + substract 1 byte to prevent overallocation */ + writer.min_size--; + if ( in_repeat == 0 ) { /* Just an escaped RUNCHAR value */ - OUTBYTE(RUNCHAR); + *out_data++ = RUNCHAR; } else { /* Pick up value and output a sequence of it */ in_byte = out_data[-1]; + + /* enlarge the buffer if needed */ + if (in_repeat > 1) { + /* -1 because we already preallocated 1 byte */ + out_data = _PyBytesWriter_Prepare(&writer, out_data, + in_repeat - 1); + if (out_data == NULL) + goto error; + } + while ( --in_repeat > 0 ) - OUTBYTE(in_byte); + *out_data++ = in_byte; } } else { /* Normal byte */ - OUTBYTE(in_byte); + *out_data++ = in_byte; } } return _PyBytesWriter_Finish(&writer, out_data); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 18:23:54 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Wed, 14 Oct 2015 16:23:54 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325406=3A_Fixed_a_bug_in_C_implementation_of_Ord?= =?utf-8?q?eredDict=2Emove=5Fto=5Fend=28=29?= Message-ID: <20151014162353.20767.8813@psf.io> https://hg.python.org/cpython/rev/33d53a41daeb changeset: 98756:33d53a41daeb parent: 98754:8e97d54e6d7d parent: 98755:88e6641c3dd3 user: Serhiy Storchaka date: Wed Oct 14 19:22:44 2015 +0300 summary: Issue #25406: Fixed a bug in C implementation of OrderedDict.move_to_end() that caused segmentation fault or hang in iterating after moving several items to the start of ordered dict. files: Lib/test/test_collections.py | 14 ++++++++++ Misc/NEWS | 4 +++ Objects/odictobject.c | 31 +++++++---------------- 3 files changed, 28 insertions(+), 21 deletions(-) diff --git a/Lib/test/test_collections.py b/Lib/test/test_collections.py --- a/Lib/test/test_collections.py +++ b/Lib/test/test_collections.py @@ -1995,6 +1995,20 @@ with self.assertRaises(KeyError): od.move_to_end('x', 0) + def test_move_to_end_issue25406(self): + OrderedDict = self.module.OrderedDict + od = OrderedDict.fromkeys('abc') + od.move_to_end('c', last=False) + self.assertEqual(list(od), list('cab')) + od.move_to_end('a', last=False) + self.assertEqual(list(od), list('acb')) + + od = OrderedDict.fromkeys('abc') + od.move_to_end('a') + self.assertEqual(list(od), list('bca')) + od.move_to_end('c') + self.assertEqual(list(od), list('bac')) + def test_sizeof(self): OrderedDict = self.module.OrderedDict # Wimpy test: Just verify the reported size is larger than a regular dict diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -63,6 +63,10 @@ Library ------- +- Issue #25406: Fixed a bug in C implementation of OrderedDict.move_to_end() + that caused segmentation fault or hang in iterating after moving several + items to the start of ordered dict. + - Issue #25382: pickletools.dis() now outputs implicit memo index for the MEMOIZE opcode. diff --git a/Objects/odictobject.c b/Objects/odictobject.c --- a/Objects/odictobject.c +++ b/Objects/odictobject.c @@ -618,37 +618,26 @@ static void _odict_add_head(PyODictObject *od, _ODictNode *node) { - if (_odict_FIRST(od) == NULL) { - _odictnode_PREV(node) = NULL; - _odictnode_NEXT(node) = NULL; - _odict_FIRST(od) = node; + _odictnode_PREV(node) = NULL; + _odictnode_NEXT(node) = _odict_FIRST(od); + if (_odict_FIRST(od) == NULL) _odict_LAST(od) = node; - } - else { - _odictnode_PREV(node) = NULL; - _odictnode_NEXT(node) = _odict_FIRST(od); - _odict_FIRST(od) = node; + else _odictnode_PREV(_odict_FIRST(od)) = node; - } + _odict_FIRST(od) = node; od->od_state++; } static void _odict_add_tail(PyODictObject *od, _ODictNode *node) { - if (_odict_LAST(od) == NULL) { - _odictnode_PREV(node) = NULL; - _odictnode_NEXT(node) = NULL; + _odictnode_PREV(node) = _odict_LAST(od); + _odictnode_NEXT(node) = NULL; + if (_odict_LAST(od) == NULL) _odict_FIRST(od) = node; - _odict_LAST(od) = node; - } - else { - _odictnode_PREV(node) = _odict_LAST(od); - _odictnode_NEXT(node) = NULL; + else _odictnode_NEXT(_odict_LAST(od)) = node; - _odict_LAST(od) = node; - } - + _odict_LAST(od) = node; od->od_state++; } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 18:23:54 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Wed, 14 Oct 2015 16:23:54 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1NDA2?= =?utf-8?q?=3A_Fixed_a_bug_in_C_implementation_of_OrderedDict=2Emove=5Fto?= =?utf-8?b?X2VuZCgp?= Message-ID: <20151014162353.20765.42385@psf.io> https://hg.python.org/cpython/rev/88e6641c3dd3 changeset: 98755:88e6641c3dd3 branch: 3.5 parent: 98737:4423e5022378 user: Serhiy Storchaka date: Wed Oct 14 19:21:24 2015 +0300 summary: Issue #25406: Fixed a bug in C implementation of OrderedDict.move_to_end() that caused segmentation fault or hang in iterating after moving several items to the start of ordered dict. files: Lib/test/test_collections.py | 14 ++++++++++ Misc/NEWS | 4 +++ Objects/odictobject.c | 31 +++++++---------------- 3 files changed, 28 insertions(+), 21 deletions(-) diff --git a/Lib/test/test_collections.py b/Lib/test/test_collections.py --- a/Lib/test/test_collections.py +++ b/Lib/test/test_collections.py @@ -1995,6 +1995,20 @@ with self.assertRaises(KeyError): od.move_to_end('x', 0) + def test_move_to_end_issue25406(self): + OrderedDict = self.module.OrderedDict + od = OrderedDict.fromkeys('abc') + od.move_to_end('c', last=False) + self.assertEqual(list(od), list('cab')) + od.move_to_end('a', last=False) + self.assertEqual(list(od), list('acb')) + + od = OrderedDict.fromkeys('abc') + od.move_to_end('a') + self.assertEqual(list(od), list('bca')) + od.move_to_end('c') + self.assertEqual(list(od), list('bac')) + def test_sizeof(self): OrderedDict = self.module.OrderedDict # Wimpy test: Just verify the reported size is larger than a regular dict diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -45,6 +45,10 @@ Library ------- +- Issue #25406: Fixed a bug in C implementation of OrderedDict.move_to_end() + that caused segmentation fault or hang in iterating after moving several + items to the start of ordered dict. + - Issue #25364: zipfile now works in threads disabled builds. - Issue #25328: smtpd's SMTPChannel now correctly raises a ValueError if both diff --git a/Objects/odictobject.c b/Objects/odictobject.c --- a/Objects/odictobject.c +++ b/Objects/odictobject.c @@ -618,37 +618,26 @@ static void _odict_add_head(PyODictObject *od, _ODictNode *node) { - if (_odict_FIRST(od) == NULL) { - _odictnode_PREV(node) = NULL; - _odictnode_NEXT(node) = NULL; - _odict_FIRST(od) = node; + _odictnode_PREV(node) = NULL; + _odictnode_NEXT(node) = _odict_FIRST(od); + if (_odict_FIRST(od) == NULL) _odict_LAST(od) = node; - } - else { - _odictnode_PREV(node) = NULL; - _odictnode_NEXT(node) = _odict_FIRST(od); - _odict_FIRST(od) = node; + else _odictnode_PREV(_odict_FIRST(od)) = node; - } + _odict_FIRST(od) = node; od->od_state++; } static void _odict_add_tail(PyODictObject *od, _ODictNode *node) { - if (_odict_LAST(od) == NULL) { - _odictnode_PREV(node) = NULL; - _odictnode_NEXT(node) = NULL; + _odictnode_PREV(node) = _odict_LAST(od); + _odictnode_NEXT(node) = NULL; + if (_odict_LAST(od) == NULL) _odict_FIRST(od) = node; - _odict_LAST(od) = node; - } - else { - _odictnode_PREV(node) = _odict_LAST(od); - _odictnode_NEXT(node) = NULL; + else _odictnode_NEXT(_odict_LAST(od)) = node; - _odict_LAST(od) = node; - } - + _odict_LAST(od) = node; od->od_state++; } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 18:44:38 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 14 Oct 2015 16:44:38 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325210=3A_Change_e?= =?utf-8?q?rror_message_of_do=5Frichcompare=28=29?= Message-ID: <20151014162905.3277.16117@psf.io> https://hg.python.org/cpython/rev/0238eafb68da changeset: 98757:0238eafb68da user: Victor Stinner date: Wed Oct 14 18:25:31 2015 +0200 summary: Issue #25210: Change error message of do_richcompare() Don't add parenthesis to type names. Add also quotes around the type names. Before: TypeError: unorderable types: int() < NoneType() After: TypeError: '<' not supported between instances of 'int' and 'NoneType' files: Doc/howto/argparse.rst | 3 ++- Doc/library/enum.rst | 2 +- Doc/library/pathlib.rst | 2 +- Objects/object.c | 5 ++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Doc/howto/argparse.rst b/Doc/howto/argparse.rst --- a/Doc/howto/argparse.rst +++ b/Doc/howto/argparse.rst @@ -547,7 +547,8 @@ Traceback (most recent call last): File "prog.py", line 11, in if args.verbosity >= 2: - TypeError: unorderable types: NoneType() >= int() + TypeError: '>=' not supported between instances of 'NoneType' and 'int' + * First output went well, and fixes the bug we had before. That is, we want any value >= 2 to be as verbose as possible. diff --git a/Doc/library/enum.rst b/Doc/library/enum.rst --- a/Doc/library/enum.rst +++ b/Doc/library/enum.rst @@ -257,7 +257,7 @@ >>> Color.red < Color.blue Traceback (most recent call last): File "", line 1, in - TypeError: unorderable types: Color() < Color() + TypeError: '<' not supported between instances of 'Color' and 'Color' Equality comparisons are defined though:: diff --git a/Doc/library/pathlib.rst b/Doc/library/pathlib.rst --- a/Doc/library/pathlib.rst +++ b/Doc/library/pathlib.rst @@ -195,7 +195,7 @@ >>> PureWindowsPath('foo') < PurePosixPath('foo') Traceback (most recent call last): File "", line 1, in - TypeError: unorderable types: PureWindowsPath() < PurePosixPath() + TypeError: '<' not supported between instances of 'PureWindowsPath' and 'PurePosixPath' Operators diff --git a/Objects/object.c b/Objects/object.c --- a/Objects/object.c +++ b/Objects/object.c @@ -686,11 +686,10 @@ res = (v != w) ? Py_True : Py_False; break; default: - /* XXX Special-case None so it doesn't show as NoneType() */ PyErr_Format(PyExc_TypeError, - "unorderable types: %.100s() %s %.100s()", + "'%s' not supported between instances of '%.100s' and '%.100s'", + opstrings[op], v->ob_type->tp_name, - opstrings[op], w->ob_type->tp_name); return NULL; } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Oct 14 19:42:09 2015 From: python-checkins at python.org (steve.dower) Date: Wed, 14 Oct 2015 17:42:09 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1MTY0?= =?utf-8?q?=3A_Changes_default_all-users_install_directory_to_match_per-us?= =?utf-8?q?er?= Message-ID: <20151014174209.7240.35628@psf.io> https://hg.python.org/cpython/rev/be92f6ce1e80 changeset: 98758:be92f6ce1e80 branch: 3.5 parent: 98755:88e6641c3dd3 user: Steve Dower date: Wed Oct 14 10:36:36 2015 -0700 summary: Issue #25164: Changes default all-users install directory to match per-user directory. files: Misc/NEWS | 3 + Tools/msi/bundle/bundle.wxs | 12 ++-- Tools/msi/bundle/packagegroups/packageinstall.wxs | 4 +- Tools/msi/bundle/packagegroups/postinstall.wxs | 24 +++++----- 4 files changed, 24 insertions(+), 19 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -320,6 +320,9 @@ Windows ------- +- Issue #25164: Changes default all-users install directory to match per-user + directory. + - Issue #25143: Improves installer error messages for unsupported platforms. - Issue #25163: Display correct directory in installer when using non-default diff --git a/Tools/msi/bundle/bundle.wxs b/Tools/msi/bundle/bundle.wxs --- a/Tools/msi/bundle/bundle.wxs +++ b/Tools/msi/bundle/bundle.wxs @@ -22,20 +22,22 @@ + + - + - + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + UTC local + + + + t + u0 + u1 + + + + Fold + + + + + + + + + + + diff --git a/pep-0495.txt b/pep-0495.txt --- a/pep-0495.txt +++ b/pep-0495.txt @@ -4,34 +4,29 @@ Last-Modified: $Date$ Author: Alexander Belopolsky , Tim Peters Discussions-To: Datetime-SIG -Status: Draft +Status: Accepted Type: Standards Track Content-Type: text/x-rst Created: 02-Aug-2015 - +Python-Version: 3.6 +Resolution: https://mail.python.org/pipermail/datetime-sig/2015-September/000900.html Abstract ======== -This PEP adds a new attribute ``fold`` to the instances of +This PEP adds a new attribute ``fold`` to instances of the ``datetime.time`` and ``datetime.datetime`` classes that can be used to differentiate between two moments in time for which local times are -the same. The allowed values for the `fold` attribute will be 0 and 1 +the same. The allowed values for the ``fold`` attribute will be 0 and 1 with 0 corresponding to the earlier and 1 to the later of the two possible readings of an ambiguous local time. -.. sidebar:: US public service advertisement - - .. image:: pep-0495-daylightsavings.png - :align: center - :width: 95% - Rationale ========= -In the most world locations there have been and will be times when +In most world locations, there have been and will be times when local clocks are moved back. [#]_ In those times, intervals are introduced in which local clocks show the same time twice in the same day. In these situations, the information displayed on a local clock @@ -40,9 +35,14 @@ attribute to the ``datetime`` instances taking values of 0 and 1 that will enumerate the two ambiguous times. +.. image:: pep-0495-daylightsavings.png + :align: center + :width: 30% + + .. [#] People who live in locations observing the Daylight Saving Time (DST) move their clocks back (usually one hour) every Fall. - + It is less common, but occasionally clocks can be moved back for other reasons. For example, Ukraine skipped the spring-forward transition in March 1990 and instead, moved their clocks back on @@ -76,11 +76,11 @@ The "fold" attribute -------------------- -We propose adding an attribute called ``fold`` to the instances -of ``datetime.time`` and ``datetime.datetime`` classes. This attribute -should have the value 0 for all instances except those that -represent the second (chronologically) moment in time in an ambiguous -case. For those instances, the value will be 1. [#]_ +We propose adding an attribute called ``fold`` to instances of the +``datetime.time`` and ``datetime.datetime`` classes. This attribute +should have the value 0 for all instances except those that represent +the second (chronologically) moment in time in an ambiguous case. For +those instances, the value will be 1. [#]_ .. [#] An instance that has ``fold=1`` in a non-ambiguous case is said to represent an invalid time (or is invalid for short), but @@ -93,10 +93,6 @@ this PEP specifies how various functions should behave when given an invalid instance. -.. image:: pep-0495-fold.png - :align: center - :width: 60% - Affected APIs ------------- @@ -121,15 +117,23 @@ The ``replace()`` methods of the ``datetime.time`` and ``datetime.datetime`` classes will get a new keyword-only argument -called ``fold``. It will -behave similarly to the other ``replace()`` arguments: if the ``fold`` -argument is specified and given a value 0 or 1, the new instance -returned by ``replace()`` will have its ``fold`` attribute set -to that value. In CPython, any non-integer value of ``fold`` will -raise a ``TypeError``, but other implementations may allow the value -``None`` to behave the same as when ``fold`` is not given. If the -``fold`` argument is not specified, the original value of the ``fold`` -attribute is copied to the result. +called ``fold``. It will behave similarly to the other ``replace()`` +arguments: if the ``fold`` argument is specified and given a value 0 +or 1, the new instance returned by ``replace()`` will have its +``fold`` attribute set to that value. In CPython, any non-integer +value of ``fold`` will raise a ``TypeError``, but other +implementations may allow the value ``None`` to behave the same as +when ``fold`` is not given. [#]_ (This is +a nod to the existing difference in treatment of ``None`` arguments +in other positions of this method across Python implementations; +it is not intended to leave the door open for future alternative +interpretation of ``fold=None``.) If the ``fold`` argument is not +specified, the original value of the ``fold`` attribute is copied to +the result. + +.. [#] PyPy and pure Python implementation distributed with CPython + already allow ``None`` to mean "no change to existing + attribute" for all other attributes in ``replace()``. C-API ..... @@ -137,14 +141,14 @@ Access macros will be defined to extract the value of ``fold`` from ``PyDateTime_DateTime`` and ``PyDateTime_Time`` objects. -.. code:: +.. code:: int PyDateTime_GET_FOLD(PyDateTime_DateTime *o) Return the value of ``fold`` as a C ``int``. -.. code:: - +.. code:: + int PyDateTime_TIME_GET_FOLD(PyDateTime_Time *o) Return the value of ``fold`` as a C ``int``. @@ -155,14 +159,17 @@ .. code:: - PyObject* PyDateTime_FromDateAndTimeAndFold(int year, int month, int day, int hour, int minute, int second, int usecond, int fold) + PyObject* PyDateTime_FromDateAndTimeAndFold( + int year, int month, int day, int hour, int minute, + int second, int usecond, int fold) Return a ``datetime.datetime`` object with the specified year, month, day, hour, minute, second, microsecond and fold. .. code:: - PyObject* PyTime_FromTimeAndFold(int hour, int minute, int second, int usecond, int fold) + PyObject* PyTime_FromTimeAndFold( + int hour, int minute, int second, int usecond, int fold) Return a ``datetime.time`` object with the specified hour, minute, second, microsecond and fold. @@ -174,18 +181,23 @@ What time is it? ................ -The ``datetime.now()`` method called with no arguments, will set +The ``datetime.now()`` method called without arguments will set ``fold=1`` when returning the second of the two ambiguous times in a system local time fold. When called with a ``tzinfo`` argument, the value of the ``fold`` will be determined by the ``tzinfo.fromutc()`` -implementation. If an instance of the ``datetime.timezone`` class -(*e.g.* ``datetime.timezone.utc``) is passed as ``tzinfo``, the +implementation. When an instance of the ``datetime.timezone`` class +(the stdlib's fixed-offset ``tzinfo`` subclass, +*e.g.* ``datetime.timezone.utc``) is passed as ``tzinfo``, the returned datetime instance will always have ``fold=0``. +The ``datetime.utcnow()`` method is unaffected. Conversion from naive to aware .............................. +A new feature is proposed to facilitate conversion from naive datetime +instances to aware. + The ``astimezone()`` method will now work for naive ``self``. The system local timezone will be assumed in this case and the ``fold`` flag will be used to determine which local timezone is in effect @@ -199,6 +211,11 @@ >>> dt.replace(fold=1).astimezone().strftime('%D %T %Z%z') '11/02/14 01:30:00 EST-0500' +An implication is that ``datetime.now(tz)`` is fully equivalent to +``datetime.now().astimezone(tz)`` (assuming ``tz`` is an instance of a +post-PEP ``tzinfo`` implementation, i.e. one that correctly handles +and sets ``fold``). + Conversion from POSIX seconds from EPOCH ........................................ @@ -227,8 +244,10 @@ datetime.fromtimestamp(s0) == datetime.fromtimestamp(s1) == dt +(This is because ``==`` disregards the value of fold -- see below.) + In this case, ``dt.timestamp()`` will return the smaller of ``s0`` -and ``s1`` values if ``dt.fold == True`` and the larger otherwise. +and ``s1`` values if ``dt.fold == 0`` and the larger otherwise. For example, on a system set to US/Eastern timezone:: @@ -238,7 +257,6 @@ >>> datetime(2014, 11, 2, 1, 30, fold=1).timestamp() 1414909800.0 - When a ``datetime.datetime`` instance ``dt`` represents a missing time, there is no value ``s`` for which:: @@ -254,6 +272,8 @@ The value returned by ``dt.timestamp()`` given a missing ``dt`` will be the greater of the two "nice to know" values if ``dt.fold == 0`` and the smaller otherwise. +(This is not a typo -- it's intentionally backwards from the rule for +ambiguous times.) For example, on a system set to US/Eastern timezone:: @@ -270,13 +290,14 @@ changes in the behavior of their aware datetime instances. Two such instances that differ only by the value of the ``fold`` attribute will not be distinguishable by any means other than an explicit access to -the ``fold`` value. +the ``fold`` value. (This is because these pre-PEP implementations +are not using the ``fold`` attribute.) -On the other hand, if object's ``tzinfo`` is set to a fold-aware -implementation, then the value of ``fold`` will affect the result of -several methods but only if the corresponding time is in a fold or in -a gap: ``utcoffset()``, ``dst()``, ``tzname()``, ``astimezone()``, -``strftime()`` (if "%Z" or "%z" directive is used in the format +On the other hand, if an object's ``tzinfo`` is set to a fold-aware +implementation, then in a fold or gap the value of ``fold`` will +affect the result of several methods: +``utcoffset()``, ``dst()``, ``tzname()``, ``astimezone()``, +``strftime()`` (if the "%Z" or "%z" directive is used in the format specification), ``isoformat()``, and ``timetuple()``. @@ -293,16 +314,21 @@ Pickles ....... +The value of the fold attribute will only be saved in pickles created +with protocol version 4 (introduced in Python 3.4) or greater. + Pickle sizes for the ``datetime.datetime`` and ``datetime.time`` objects will not change. The ``fold`` value will be encoded in the -first bit of the 5th byte of the ``datetime.datetime`` pickle payload -or the 2nd byte of the datetime.time. In the `current implementation`_ -these bytes are used to store minute value (0-59) and the first bit is -always 0. (This change only affects pickle format. In the C -implementation, the ``fold`` attribute will get a full byte to store its -value.) +first bit of the 3rd byte of the ``datetime.datetime`` +pickle payload; and in the first bit of the 1st byte of the +``datetime.time`` payload. In the `current implementation`_ +these bytes are used to store the month (1-12) and hour (0-23) values +and the first bit is always 0. We picked these bytes because they are +the only bytes that are checked by the current unpickle code. Thus +loading post-PEP ``fold=1`` pickles in a pre-PEP Python will result in +an exception rather than an instance with out of range components. -.. _current implementation: https://hg.python.org/cpython/file/d3b20bff9c5d/Include/datetime.h#l17 +.. _current implementation: https://hg.python.org/cpython/file/v3.5.0/Include/datetime.h#l10 Implementations of tzinfo in the Standard Library @@ -312,13 +338,16 @@ proposed in this PEP. The existing (fixed offset) timezones do not introduce ambiguous local times and their ``utcoffset()`` implementation will return the same constant value as they do now -regardless of the value of ``fold``. +regardless of the value of ``fold``. The basic implementation of ``fromutc()`` in the abstract -``datetime.tzinfo`` class will not change. It is currently not -used anywhere in the stdlib because the only included ``tzinfo`` -implementation (the ``datetime.timzeone`` class implementing fixed -offset timezones) override ``fromutc()``. +``datetime.tzinfo`` class will not change. It is currently not used +anywhere in the stdlib because the only included ``tzinfo`` +implementation (the ``datetime.timezone`` class implementing fixed +offset timezones) overrides ``fromutc()``. Keeping the default +implementation unchanged has the benefit that pre-PEP 3rd party +implementations that inherit the default ``fromutc()`` are not +accidentally affected. Guidelines for New tzinfo Implementations @@ -337,16 +366,102 @@ the ambiguous or missing times. -In the DST Fold ---------------- +In the Fold +----------- New subclasses should override the base-class ``fromutc()`` method and -implement it so that in all cases where two UTC times ``u1`` and -``u2`` (``u1`` <``u2``) correspond to the same local time -``fromutc(u1)`` will return an instance with ``fold=0`` and -``fromutc(u2)`` will return an instance with ``fold=1``. In all +implement it so that in all cases where two different UTC times ``u0`` and +``u1`` (``u0`` <``u1``) correspond to the same local time ``t``, +``fromutc(u0)`` will return an instance with ``fold=0`` and +``fromutc(u1)`` will return an instance with ``fold=1``. In all other cases the returned instance should have ``fold=0``. +The ``utcoffset()``, ``tzname()`` and ``dst()`` methods should use the +value of the fold attribute to determine whether an otherwise +ambiguous time ``t`` corresponds to the time before or after the +transition. By definition, ``utcoffset()`` is greater before and +smaller after any transition that creates a fold. The values returned +by ``tzname()`` and ``dst()`` may or may not depend on the value of +the ``fold`` attribute depending on the kind of the transition. + +.. image:: pep-0495-fold-2.png + :align: center + :width: 60% + +The sketch above illustrates the relationship between the UTC and +local time around a fall-back transition. The zig-zag line is a graph +of the function implemented by ``fromutc()``. Two intervals on the +UTC axis adjacent to the transition point and having the size of the +time shift at the transition are mapped to the same interval on the +local axis. New implementations of ``fromutc()`` method should set +the fold attribute to 1 when ``self`` is in the region marked in +yellow on the UTC axis. (All intervals should be treated as closed on +the left and open on the right.) + + +Mind the Gap +------------ + +The ``fromutc()`` method should never produce a time in the gap. + +If the ``utcoffset()``, ``tzname()`` or ``dst()`` method is called on a +local time that falls in a gap, the rules in effect before the +transition should be used if ``fold=0``. Otherwise, the rules in +effect after the transition should be used. + +.. image:: pep-0495-gap.png + :align: center + :width: 60% + +The sketch above illustrates the relationship between the UTC and +local time around a spring-forward transition. At the transition, the +local clock is advanced skipping the times in the gap. For the +purposes of determining the values of ``utcoffset()``, ``tzname()`` +and ``dst()``, the line before the transition is extended forward to +find the UTC time corresponding to the time in the gap with ``fold=0`` +and for instances with ``fold=1``, the line after the transition is +extended back. + +Summary of Rules at a Transition +-------------------------------- + +On ambiguous/missing times ``utcoffset()`` should return values +according to the following table: + ++-----------------+----------------+-----------------------------+ +| | fold=0 | fold=1 | ++=================+================+=============================+ +| Fold | oldoff | newoff = oldoff - delta | ++-----------------+----------------+-----------------------------+ +| Gap | oldoff | newoff = oldoff + delta | ++-----------------+----------------+-----------------------------+ + +where ``oldoff`` (``newoff``) is the UTC offset before (after) the +transition and ``delta`` is the absolute size of the fold or the gap. + +Note that the interpretation of the fold attribute is consistent in +the fold and gap cases. In both cases, ``fold=0`` (``fold=1``) means +use ``fromutc()`` line before (after) the transition to find the UTC +time. Only in the "Fold" case, the UTC times ``u0`` and ``u1`` are +"real" solutions for the equation ``fromutc(u) == t``, while in the +"Gap" case they are "imaginary" solutions. + + +The DST Transitions +------------------- + +On a missing time introduced at the start of DST, the values returned +by ``utcoffset()`` and ``dst()`` methods should be as follows + ++-----------------+----------------+------------------+ +| | fold=0 | fold=1 | ++=================+================+==================+ +| utcoffset() | stdoff | stdoff + dstoff | ++-----------------+----------------+------------------+ +| dst() | zero | dstoff | ++-----------------+----------------+------------------+ + + On an ambiguous time introduced at the end of DST, the values returned by ``utcoffset()`` and ``dst()`` methods should be as follows @@ -363,61 +478,101 @@ = timedelta(0)``. -Mind the DST Gap ----------------- +Temporal Arithmetic and Comparison Operators +============================================ -On a missing time introduced at the start of DST, the values returned -by ``utcoffset()`` and ``dst()`` methods should be as follows +.. epigraph:: -+-----------------+----------------+------------------+ -| | fold=0 | fold=1 | -+=================+================+==================+ -| utcoffset() | stdoff | stdoff + dstoff | -+-----------------+----------------+------------------+ -| dst() | zero | dstoff | -+-----------------+----------------+------------------+ + | In *mathematicks* he was greater + | Than Tycho Brahe, or Erra Pater: + | For he, by geometric scale, + | Could take the size of pots of ale; + | Resolve, by sines and tangents straight, + | If bread or butter wanted weight, + | And wisely tell what hour o' th' day + | The clock does strike by algebra. + -- "Hudibras" by Samuel Butler -Non-DST Folds and Gaps ----------------------- - -On ambiguous/missing times introduced by the change in the standard time -offset, the ``dst()`` method should return the same value regardless of -the value of ``fold`` and the ``utcoffset()`` should return values -according to the following table: - -+-----------------+----------------+-----------------------------+ -| | fold=0 | fold=1 | -+=================+================+=============================+ -| ambiguous | oldoff | newoff = oldoff - delta | -+-----------------+----------------+-----------------------------+ -| missing | oldoff | newoff = oldoff + delta | -+-----------------+----------------+-----------------------------+ - -where ``delta`` is the size of the fold or the gap. - - -Temporal Arithmetic -=================== - -The value of "fold" will be ignored in all operations except those -that involve conversion between timezones. [#]_ As a consequence, +The value of the ``fold`` attribute will be ignored in all operations +with naive datetime instances. As a consequence, naive ``datetime.datetime`` or ``datetime.time`` instances that differ only by the value of ``fold`` will compare as equal. Applications that need to differentiate between such instances should check the value of -``fold`` or convert them to a timezone that does not have ambiguous -times. +``fold`` explicitly or convert those instances to a timezone that does +not have ambiguous times (such as UTC). -The result of addition (subtraction) of a timedelta to (from) a -datetime will always have ``fold`` set to 0 even if the +The value of ``fold`` will also be ignored whenever a timedelta is +added to or subtracted from a datetime instance which may be either +aware or naive. The result of addition (subtraction) of a timedelta +to (from) a datetime will always have ``fold`` set to 0 even if the original datetime instance had ``fold=1``. -.. [#] Computing a difference between two aware datetime instances - with different values of ``tzinfo`` involves an implicit timezone - conversion. In this case, the result may depend on the value of - the ``fold`` attribute in either of the instances, but only if the - instance has ``tzinfo`` that accounts for the value of ``fold`` - in its ``utcoffset()`` method. +No changes are proposed to the way the difference ``t - s`` is +computed for datetime instances ``t`` and ``s``. If both instances +are naive or ``t.tzinfo`` is the same instance as ``s.tzinfo`` +(``t.tzinfo is s.tzinfo`` evaluates to ``True``) then ``t - s`` is a +timedelta ``d`` such that ``s + d == t``. As explained in the +previous paragraph, timedelta addition ignores both ``fold`` and +``tzinfo`` attributes and so does intra-zone or naive datetime +subtraction. + +Naive and intra-zone comparisons will ignore the value of ``fold`` and +return the same results as they do now. (This is the only way to +preserve backward compatibility. If you need an aware intra-zone +comparison that uses the fold, convert both sides to UTC first.) + +The inter-zone subtraction will be defined as it is now: ``t - s`` is +computed as ``(t - t.utcoffset()) - (s - +s.utcoffset()).replace(tzinfo=t.tzinfo)``, but the result will +depend on the values of ``t.fold`` and ``s.fold`` when either +``t.tzinfo`` or ``s.tzinfo`` is post-PEP. [#]_ + +.. [#] Note that the new rules may result in a paradoxical situation + when ``s == t`` but ``s - u != t - u``. Such paradoxes are + not really new and are inherent in the overloading of the minus + operator differently for intra- and inter-zone operations. For + example, one can easily construct datetime instances ``t`` and ``s`` + with some variable offset ``tzinfo`` and a datetime ``u`` with + ``tzinfo=timezone.utc`` such that ``(t - u) - (s - u) != t - s``. + The explanation for this paradox is that the minuses inside the + parentheses and the two other minuses are really three different + operations: inter-zone datetime subtraction, timedelta subtraction, + and intra-zone datetime subtraction, which each have the mathematical + properties of subtraction separately, but not when combined in a + single expression. + + +Aware datetime Equality Comparison +---------------------------------- + +The aware datetime comparison operators will work the same as they do +now, with results indirectly affected by the value of ``fold`` whenever +the ``utcoffset()`` value of one of the operands depends on it, with one +exception. Whenever one or both of the operands in inter-zone comparison is +such that its ``utcoffset()`` depends on the value of its ``fold`` +fold attribute, the result is ``False``. [#]_ + +.. [#] This exception is designed to preserve the hash and equivalence + invariants in the face of paradoxes of inter-zone arithmetic. + +Formally, ``t == s`` when ``t.tzinfo is s.tzinfo`` evaluates to +``False`` can be defined as follows. Let ``toutc(t, fold)`` be a +function that takes an aware datetime instance ``t`` and returns a +naive instance representing the same time in UTC assuming a given +value of ``fold``: + +.. code:: + + def toutc(t, fold): + u = t - t.replace(fold=fold).utcoffset() + return u.replace(tzinfo=None) + +Then ``t == s`` is equivalent to + +.. code:: + + toutc(t, fold=0) == toutc(t, fold=1) == toutc(s, fold=0) == toutc(s, fold=1) Backward and Forward Compatibility @@ -467,7 +622,7 @@ between fold=0 and fold=1 when I set it for tomorrow 01:30 AM. What should I do? * Alice: I've never hear of a Py-O-Clock, but I guess fold=0 is - the first 01:30 AM and fold=1 is the second. + the first 01:30 AM and fold=1 is the second. A technical reason @@ -538,13 +693,12 @@ **repeated** Did not receive any support on the mailing list. - + **ltdf** (Local Time Disambiguation Flag) - short and no-one will attempt - to guess what it means without reading the docs. (Feel free to - use it in discussions with the meaning ltdf=False is the - earlier if you don't want to endorse any of the alternatives - above.) + to guess what it means without reading the docs. (This abbreviation + was used in PEP discussions with the meaning ``ltdf=False`` is the + earlier by those who didn't want to endorse any of the alternatives.) .. _original: https://mail.python.org/pipermail/python-dev/2015-April/139099.html .. _independently proposed: https://mail.python.org/pipermail/datetime-sig/2015-August/000479.html @@ -585,7 +739,7 @@ naive datetimes. This leaves us with only one situation where an existing program can -start producing diferent results after the implementation of this PEP: +start producing different results after the implementation of this PEP: when a ``datetime.timestamp()`` method is called on a naive datetime instance that happen to be in the fold or the gap. In the current implementation, the result is undefined. Depending on the system @@ -638,13 +792,13 @@ Note that 12:00 was interpreted as 13:00 by ``mktime``. With the ``datetime.timestamp``, ``datetime.fromtimestamp``, it is currently -guaranteed that +guaranteed that .. code:: >>> t = datetime.datetime(2015, 6, 1, 12).timestamp() >>> datetime.datetime.fromtimestamp(t) - datetime.datetime(2015, 6, 1, 12, 0) + datetime.datetime(2015, 6, 1, 12, 0) This PEP extends the same guarantee to both values of ``fold``: @@ -652,13 +806,13 @@ >>> t = datetime.datetime(2015, 6, 1, 12, fold=0).timestamp() >>> datetime.datetime.fromtimestamp(t) - datetime.datetime(2015, 6, 1, 12, 0) + datetime.datetime(2015, 6, 1, 12, 0) .. code:: >>> t = datetime.datetime(2015, 6, 1, 12, fold=1).timestamp() >>> datetime.datetime.fromtimestamp(t) - datetime.datetime(2015, 6, 1, 12, 0) + datetime.datetime(2015, 6, 1, 12, 0) Thus one of the suggested uses for ``fold=-1`` -- to match the legacy behavior -- is not needed. Either choice of ``fold`` will match the @@ -708,7 +862,7 @@ Implementation ============== -* Github fork: https://github.com/abalkin/cpython +* Github fork: https://github.com/abalkin/cpython/tree/issue24773-s3 * Tracker issue: http://bugs.python.org/issue24773 diff --git a/pep-0498.txt b/pep-0498.txt --- a/pep-0498.txt +++ b/pep-0498.txt @@ -8,7 +8,7 @@ Content-Type: text/x-rst Created: 01-Aug-2015 Python-Version: 3.6 -Post-History: 07-Aug-2015, 30-Aug-2015, 04-Sep-2015 +Post-History: 07-Aug-2015, 30-Aug-2015, 04-Sep-2015, 19-Sep-2015 Resolution: https://mail.python.org/pipermail/python-dev/2015-September/141526.html Abstract @@ -173,8 +173,7 @@ letter 'f' or 'F'. Everywhere this PEP uses 'f', 'F' may also be used. 'f' may be combined with 'r', in either order, to produce raw f-string literals. 'f' may not be combined with 'b': this PEP does not -propose to add binary f-strings. 'f' may also be combined with 'u', in -either order, although adding 'u' has no effect. +propose to add binary f-strings. 'f' may not be combined with 'u'. When tokenizing source files, f-strings use the same rules as normal strings, raw strings, binary strings, and triple quoted strings. That @@ -198,9 +197,14 @@ expressions are evaluated, formatted with the existing __format__ protocol, then the results are concatenated together with the string literals. While scanning the string for expressions, any doubled -braces ``'{{'`` or ``'}}'`` are replaced by the corresponding single -brace. Doubled opening braces do not signify the start of an -expression. +braces ``'{{'`` or ``'}}'`` inside literal portions of an f-string are +replaced by the corresponding single brace. Doubled opening braces do +not signify the start of an expression. + +Note that ``__format__()`` is not called directly on each value. The +actual code uses the equivalent of ``type(value).__format__(value, +format_spec)``, or ``format(value, format_spec)``. See the +documentation of the builtin ``format()`` function for more details. Comments, using the ``'#'`` character, are not allowed inside an expression. @@ -210,7 +214,7 @@ ``'!a'``. These are treated the same as in ``str.format()``: ``'!s'`` calls ``str()`` on the expression, ``'!r'`` calls ``repr()`` on the expression, and ``'!a'`` calls ``ascii()`` on the expression. These -conversions are applied before the call to ``__format__``. The only +conversions are applied before the call to ``format()``. The only reason to use ``'!s'`` is if you want to specify a format specifier that applies to ``str``, not to the type of the expression. @@ -221,11 +225,11 @@ So, an f-string looks like:: - f ' { } text ... ' + f ' { } ... ' -The resulting expression's ``__format__`` method is called with the -format specifier. The resulting value is used when building the value -of the f-string. +The expression is then formatted using the ``__format__`` protocol, +using the format specifier as an argument. The resulting value is +used when building the value of the f-string. Expressions cannot contain ``':'`` or ``'!'`` outside of strings or parentheses, brackets, or braces. The exception is that the ``'!='`` @@ -290,11 +294,11 @@ For example, this code:: - f'abc{expr1:spec1}{expr2!r:spec2}def{expr3:!s}ghi' + f'abc{expr1:spec1}{expr2!r:spec2}def{expr3}ghi' Might be be evaluated as:: - 'abc' + expr1.__format__(spec1) + repr(expr2).__format__(spec2) + 'def' + str(spec3).__format__('') + 'ghi' + 'abc' + format(expr1, spec1) + format(repr(expr2), spec2) + 'def' + format(expr3) + 'ghi' Expression evaluation --------------------- @@ -372,7 +376,15 @@ While the exact method of this run time concatenation is unspecified, the above code might evaluate to:: - 'ab' + x.__format__('') + '{c}' + 'str<' + y.__format__('^4') + 'de' + 'ab' + format(x) + '{c}' + 'str<' + format(y, '^4') + '>de' + +Each f-string is entirely evaluated before being concatenated to +adjacent f-strings. That means that this:: + + >>> f'{x' f'}' + +Is a syntax error, because the first f-string does not contain a +closing brace. Error handling -------------- @@ -386,15 +398,13 @@ >>> f'x={x' File "", line 1 - SyntaxError: missing '}' in format string expression + SyntaxError: f-string: expecting '}' Invalid expressions:: >>> f'x={!x}' - File "", line 1 - !x - ^ - SyntaxError: invalid syntax + File "", line 1 + SyntaxError: f-string: empty expression not allowed Run time errors occur when evaluating the expressions inside an f-string. Note that an f-string can be evaluated multiple times, and @@ -425,7 +435,8 @@ --------------------------------------------------------- For ease of readability, leading and trailing whitespace in -expressions is ignored. +expressions is ignored. This is a by-product of enclosing the +expression in parentheses before evaluation. Evaluation order of expressions ------------------------------- @@ -577,8 +588,8 @@ Triple quoted f-strings are allowed. These strings are parsed just as normal triple-quoted strings are. After parsing and decoding, the -normal f-string logic is applied, and ``__format__()`` on each value -is called. +normal f-string logic is applied, and ``__format__()`` is called on +each value. Raw f-strings ------------- @@ -653,6 +664,14 @@ >>> f'{(lambda x: x*2)(3)}' '6' +Can't combine with 'u' +-------------------------- + +The 'u' prefix was added to Python 3.3 in PEP 414 as a means to ease +source compatibility with Python 2.7. Because Python 2.7 will never +support f-strings, there is nothing to be gained by being able to +combine the 'f' prefix with 'u'. + Examples from Python's source code ================================== diff --git a/pep-0500.txt b/pep-0500.txt --- a/pep-0500.txt +++ b/pep-0500.txt @@ -5,12 +5,12 @@ Last-Modified: $Date$ Author: Alexander Belopolsky , Tim Peters Discussions-To: Datetime-SIG -Status: Draft +Status: Rejected Type: Standards Track Content-Type: text/x-rst Requires: 495 Created: 08-Aug-2015 - +Resolution: https://mail.python.org/pipermail/datetime-sig/2015-August/000354.html Abstract ======== diff --git a/pep-0502.txt b/pep-0502.txt --- a/pep-0502.txt +++ b/pep-0502.txt @@ -1,44 +1,46 @@ PEP: 502 -Title: String Interpolation Redux +Title: String Interpolation - Extended Discussion Version: $Revision$ Last-Modified: $Date$ Author: Mike G. Miller Status: Draft -Type: Standards Track +Type: Informational Content-Type: text/x-rst Created: 10-Aug-2015 Python-Version: 3.6 -Note: Open issues below are stated with a question mark (?), -and are therefore searchable. - Abstract ======== -This proposal describes a new string interpolation feature for Python, -called an *expression-string*, -that is both concise and powerful, -improves readability in most cases, -yet does not conflict with existing code. +PEP 498: *Literal String Interpolation*, which proposed "formatted strings" was +accepted September 9th, 2015. +Additional background and rationale given during its design phase is detailed +below. -To achieve this end, -a new string prefix is introduced, -which expands at compile-time into an equivalent expression-string object, -with requested variables from its context passed as keyword arguments. +To recap that PEP, +a string prefix was introduced that marks the string as a template to be +rendered. +These formatted strings may contain one or more expressions +built on `the existing syntax`_ of ``str.format()``. +The formatted string expands at compile-time into a conventional string format +operation, +with the given expressions from its text extracted and passed instead as +positional arguments. + At runtime, -the new object uses these passed values to render a string to given -specifications, building on `the existing syntax`_ of ``str.format()``:: +the resulting expressions are evaluated to render a string to given +specifications:: >>> location = 'World' - >>> e'Hello, {location} !' # new prefix: e'' - 'Hello, World !' # interpolated result + >>> f'Hello, {location} !' # new prefix: f'' + 'Hello, World !' # interpolated result + +Format-strings may be thought of as merely syntactic sugar to simplify traditional +calls to ``str.format()``. .. _the existing syntax: https://docs.python.org/3/library/string.html#format-string-syntax -This PEP does not recommend to remove or deprecate any of the existing string -formatting mechanisms. - Motivation ========== @@ -50,12 +52,16 @@ with similar use cases, the amount of code necessary to build similar strings is substantially higher, while at times offering lower readability due to verbosity, dense syntax, -or identifier duplication. [1]_ +or identifier duplication. + +These difficulties are described at moderate length in the original +`post to python-ideas`_ +that started the snowball (that became PEP 498) rolling. [1]_ Furthermore, replacement of the print statement with the more consistent print function of Python 3 (PEP 3105) has added one additional minor burden, an additional set of parentheses to type and read. -Combined with the verbosity of current formatting solutions, +Combined with the verbosity of current string formatting solutions, this puts an otherwise simple language at an unfortunate disadvantage to its peers:: @@ -66,7 +72,7 @@ # Python 3, str.format with named parameters print('Hello, user: {user}, id: {id}, on host: {hostname}'.format(**locals())) - # Python 3, variation B, worst case + # Python 3, worst case print('Hello, user: {user}, id: {id}, on host: {hostname}'.format(user=user, id=id, hostname= @@ -74,7 +80,7 @@ In Python, the formatting and printing of a string with multiple variables in a single line of code of standard width is noticeably harder and more verbose, -indentation often exacerbating the issue. +with indentation exacerbating the issue. For use cases such as smaller projects, systems programming, shell script replacements, and even one-liners, @@ -82,36 +88,17 @@ this verbosity has likely lead a significant number of developers and administrators to choose other languages over the years. +.. _post to python-ideas: https://mail.python.org/pipermail/python-ideas/2015-July/034659.html + Rationale ========= -Naming ------- - -The term expression-string was chosen because other applicable terms, -such as format-string and template are already well used in the Python standard -library. - -The string prefix itself, ``e''`` was chosen to demonstrate that the -specification enables expressions, -is not limited to ``str.format()`` syntax, -and also does not lend itself to `the shorthand term`_ "f-string". -It is also slightly easier to type than other choices such as ``_''`` and -``i''``, -while perhaps `less odd-looking`_ to C-developers. -``printf('')`` vs. ``print(f'')``. - -.. _the shorthand term: reference_needed -.. _less odd-looking: https://mail.python.org/pipermail/python-dev/2015-August/141147.html - - - Goals ------------- -The design goals of expression-strings are as follows: +The design goals of format strings are as follows: #. Eliminate need to pass variables manually. #. Eliminate repetition of identifiers and redundant parentheses. @@ -133,40 +120,44 @@ characters to enclose strings. It is not reasonable to choose one of them now to enable interpolation, while leaving the other for uninterpolated strings. -"Backtick" characters (`````) are also `constrained by history`_ as a shortcut -for ``repr()``. +Other characters, +such as the "Backtick" (or grave accent `````) are also +`constrained by history`_ +as a shortcut for ``repr()``. This leaves a few remaining options for the design of such a feature: * An operator, as in printf-style string formatting via ``%``. * A class, such as ``string.Template()``. -* A function, such as ``str.format()``. -* New syntax +* A method or function, such as ``str.format()``. +* New syntax, or * A new string prefix marker, such as the well-known ``r''`` or ``u''``. -The first three options above currently work well. +The first three options above are mature. Each has specific use cases and drawbacks, yet also suffer from the verbosity and visual noise mentioned previously. -All are discussed in the next section. +All options are discussed in the next sections. .. _constrained by history: https://mail.python.org/pipermail/python-ideas/2007-January/000054.html + Background ------------- -This proposal builds on several existing techniques and proposals and what +Formatted strings build on several existing techniques and proposals and what we've collectively learned from them. +In keeping with the design goals of readability and error-prevention, +the following examples therefore use named, +not positional arguments. -The following examples focus on the design goals of readability and -error-prevention using named parameters. Let's assume we have the following dictionary, and would like to print out its items as an informative string for end users:: >>> params = {'user': 'nobody', 'id': 9, 'hostname': 'darkstar'} -Printf-style formatting -''''''''''''''''''''''' +Printf-style formatting, via operator +''''''''''''''''''''''''''''''''''''' This `venerable technique`_ continues to have its uses, such as with byte-based protocols, @@ -178,7 +169,7 @@ In this form, considering the prerequisite dictionary creation, the technique is verbose, a tad noisy, -and relatively readable. +yet relatively readable. Additional issues are that an operator can only take one argument besides the original string, meaning multiple parameters must be passed in a tuple or dictionary. @@ -190,8 +181,8 @@ .. _venerable technique: https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting -string.Template -''''''''''''''' +string.Template Class +''''''''''''''''''''' The ``string.Template`` `class from`_ PEP 292 (Simpler String Substitutions) @@ -202,7 +193,7 @@ Template('Hello, user: $user, id: ${id}, on host: $hostname').substitute(params) -Also verbose, however the string itself is readable. +While also verbose, the string itself is readable. Though functionality is limited, it meets its requirements well. It isn't powerful enough for many cases, @@ -232,8 +223,8 @@ It was superseded by the following proposal. -str.format() -'''''''''''' +str.format() Method +''''''''''''''''''' The ``str.format()`` `syntax of`_ PEP 3101 is the most recent and modern of the existing options. @@ -253,36 +244,32 @@ host=hostname) 'Hello, user: nobody, id: 9, on host: darkstar' +The verbosity of the method-based approach is illustrated here. + .. _syntax of: https://docs.python.org/3/library/string.html#format-string-syntax PEP 498 -- Literal String Formatting '''''''''''''''''''''''''''''''''''' -PEP 498 discusses and delves partially into implementation details of -expression-strings, -which it calls f-strings, -the idea and syntax -(with exception of the prefix letter) -of which is identical to that discussed here. -The resulting compile-time transformation however -returns a string joined from parts at runtime, -rather than an object. +PEP 498 defines and discusses format strings, +as also described in the `Abstract`_ above. -It also, somewhat controversially to those first exposed to it, -introduces the idea that these strings shall be augmented with support for -arbitrary expressions, -which is discussed further in the following sections. - +It also, somewhat controversially to those first exposed, +introduces the idea that format-strings shall be augmented with support for +arbitrary expressions. +This is discussed further in the +Restricting Syntax section under +`Rejected Ideas`_. PEP 501 -- Translation ready string interpolation ''''''''''''''''''''''''''''''''''''''''''''''''' The complimentary PEP 501 brings internationalization into the discussion as a -first-class concern, with its proposal of i-strings, +first-class concern, with its proposal of the i-prefix, ``string.Template`` syntax integration compatible with ES6 (Javascript), deferred rendering, -and a similar object return value. +and an object return value. Implementations in Other Languages @@ -374,7 +361,8 @@ Designers of `Template strings`_ faced the same issue as Python where single and double quotes were taken. Unlike Python however, "backticks" were not. -They were chosen as part of the ECMAScript 2015 (ES6) standard:: +Despite `their issues`_, +they were chosen as part of the ECMAScript 2015 (ES6) standard:: console.log(`Fifteen is ${a + b} and\nnot ${2 * a + b}.`); @@ -391,8 +379,10 @@ * User implemented prefixes supported. * Arbitrary expressions are supported. +.. _their issues: https://mail.python.org/pipermail/python-ideas/2007-January/000054.html .. _Template strings: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/template_strings + C#, Version 6 ''''''''''''' @@ -428,13 +418,14 @@ Additional examples ''''''''''''''''''' -A number of additional examples may be `found at Wikipedia`_. +A number of additional examples of string interpolation may be +`found at Wikipedia`_. + +Now that background and history have been covered, +let's continue on for a solution. .. _found at Wikipedia: https://en.wikipedia.org/wiki/String_interpolation#Examples -Now that background and imlementation history have been covered, -let's continue on for a solution. - New Syntax ---------- @@ -442,178 +433,47 @@ This should be an option of last resort, as every new syntax feature has a cost in terms of real-estate in a brain it inhabits. -There is one alternative left on our list of possibilities, +There is however one alternative left on our list of possibilities, which follows. New String Prefix ----------------- -Given the history of string formatting in Python, -backwards-compatibility, +Given the history of string formatting in Python and backwards-compatibility, implementations in other languages, -and the avoidance of new syntax unless necessary, +avoidance of new syntax unless necessary, an acceptable design is reached through elimination rather than unique insight. -Therefore, we choose to explicitly mark interpolated string literals with a -string prefix. +Therefore, marking interpolated string literals with a string prefix is chosen. -We also choose an expression syntax that reuses and builds on the strongest of +We also choose an expression syntax that reuses and builds on the strongest of the existing choices, -``str.format()`` to avoid further duplication. - - -Specification -============= - -String literals with the prefix of ``e`` shall be converted at compile-time to -the construction of an ``estr`` (perhaps ``types.ExpressionString``?) object. -Strings and values are parsed from the literal and passed as tuples to the -constructor:: +``str.format()`` to avoid further duplication of functionality:: >>> location = 'World' - >>> e'Hello, {location} !' + >>> f'Hello, {location} !' # new prefix: f'' + 'Hello, World !' # interpolated result - # becomes - # estr('Hello, {location} !', # template - ('Hello, ', ' !'), # string fragments - ('location',), # expressions - ('World',), # values - ) +PEP 498 -- Literal String Formatting, delves into the mechanics and +implementation of this design. -The object interpolates its result immediately at run-time:: - 'Hello, World !' - - -ExpressionString Objects ------------------------- - -The ExpressionString object supports both immediate and deferred rendering of -its given template and parameters. -It does this by immediately rendering its inputs to its internal string and -``.rendered`` string member (still necessary?), -useful in the majority of use cases. -To allow for deferred rendering and caller-specified escaping, -all inputs are saved for later inspection, -with convenience methods available. - -Notes: - -* Inputs are saved to the object as ``.template`` and ``.context`` members - for later use. -* No explicit ``str(estr)`` call is necessary to render the result, - though doing so might be desired to free resources if significant. -* Additional or deferred rendering is available through the ``.render()`` - method, which allows template and context to be overriden for flexibility. -* Manual escaping of potentially dangerous input is available through the - ``.escape(escape_function)`` method, - the rules of which may therefore be specified by the caller. - The given function should both accept and return a single modified string. - -* A sample Python implementation can `found at Bitbucket`_: - -.. _found at Bitbucket: https://bitbucket.org/mixmastamyk/docs/src/default/pep/estring_demo.py - - -Inherits From ``str`` Type -''''''''''''''''''''''''''' - -Inheriting from the ``str`` class is one of the techniques available to improve -compatibility with code expecting a string object, -as it will pass an ``isinstance(obj, str)`` test. -ExpressionString implements this and also renders its result into the "raw" -string of its string superclass, -providing compatibility with a majority of code. - - -Interpolation Syntax --------------------- - -The strongest of the existing string formatting syntaxes is chosen, -``str.format()`` as a base to build on. [10]_ [11]_ - -.. - -* Additionally, single arbitrary expressions shall also be supported inside - braces as an extension:: - - >>> e'My age is {age + 1} years.' - - See below for section on safety. - -* Triple quoted strings with multiple lines shall be supported:: - - >>> e'''Hello, - {location} !''' - 'Hello,\n World !' - -* Adjacent implicit concatenation shall be supported; - interpolation does not `not bleed into`_ other strings:: - - >>> 'Hello {1, 2, 3} ' e'{location} !' - 'Hello {1, 2, 3} World !' - -* Additional implementation details, - for example expression and error-handling, - are specified in the compatible PEP 498. - -.. _not bleed into: https://mail.python.org/pipermail/python-ideas/2015-July/034763.html - - -Composition with Other Prefixes -------------------------------- - -* Expression-strings apply to unicode objects only, - therefore ``u''`` is never needed. - Should it be prevented? - -* Bytes objects are not included here and do not compose with e'' as they - do not support ``__format__()``. - -* Complimentary to raw strings, - backslash codes shall not be converted in the expression-string, - when combined with ``r''`` as ``re''``. - - -Examples --------- - -A more complicated example follows:: - - n = 5; # t0, t1 = ? TODO - a = e"Sliced {n} onions in {t1-t0:.3f} seconds." - # returns the equvalent of - estr("Sliced {n} onions in {t1-t0:.3f} seconds", # template - ('Sliced ', ' onions in ', ' seconds'), # strings - ('n', 't1-t0:.3f'), # expressions - (5, 0.555555) # values - ) - -With expressions only:: - - b = e"Three random numbers: {rand()}, {rand()}, {rand()}." - # returns the equvalent of - estr("Three random numbers: {rand():f}, {rand():f}, {rand():}.", # template - ('Three random numbers: ', ', ', ', ', '.'), # strings - ('rand():f', 'rand():f', 'rand():f'), # expressions - (rand(), rand(), rand()) # values - ) +Additional Topics +================= Safety ----------- In this section we will describe the safety situation and precautions taken -in support of expression-strings. +in support of format-strings. -#. Only string literals shall be considered here, +#. Only string literals have been considered for format-strings, not variables to be taken as input or passed around, making external attacks difficult to accomplish. - * ``str.format()`` `already handles`_ this use-case. - * Direct instantiation of the ExpressionString object with non-literal input - shall not be allowed. (Practicality?) + ``str.format()`` and alternatives `already handle`_ this use-case. #. Neither ``locals()`` nor ``globals()`` are necessary nor used during the transformation, @@ -622,37 +482,72 @@ #. To eliminate complexity as well as ``RuntimeError`` (s) due to recursion depth, recursive interpolation is not supported. -#. Restricted characters or expression classes?, such as ``=`` for assignment. - However, mistakes or malicious code could be missed inside string literals. Though that can be said of code in general, that these expressions are inside strings means they are a bit more likely to be obscured. -.. _already handles: https://mail.python.org/pipermail/python-ideas/2015-July/034729.html +.. _already handle: https://mail.python.org/pipermail/python-ideas/2015-July/034729.html -Mitigation via tools +Mitigation via Tools '''''''''''''''''''' The idea is that tools or linters such as pyflakes, pylint, or Pycharm, -could check inside strings for constructs that exceed project policy. -As this is a common task with languages these days, -tools won't have to implement this feature solely for Python, +may check inside strings with expressions and mark them up appropriately. +As this is a common task with programming languages today, +multi-language tools won't have to implement this feature solely for Python, significantly shortening time to implementation. -Additionally the Python interpreter could check(?) and warn with appropriate -command-line parameters passed. +Farther in the future, +strings might also be checked for constructs that exceed the safety policy of +a project. + + +Style Guide/Precautions +----------------------- + +As arbitrary expressions may accomplish anything a Python expression is +able to, +it is highly recommended to avoid constructs inside format-strings that could +cause side effects. + +Further guidelines may be written once usage patterns and true problems are +known. + + +Reference Implementation(s) +--------------------------- + +The `say module on PyPI`_ implements string interpolation as described here +with the small burden of a callable interface:: + + ? pip install say + + from say import say + nums = list(range(4)) + say("Nums has {len(nums)} items: {nums}") + +A Python implementation of Ruby interpolation `is also available`_. +It uses the codecs module to do its work:: + + ? pip install interpy + + # coding: interpy + location = 'World' + print("Hello #{location}.") + +.. _say module on PyPI: https://pypi.python.org/pypi/say/ +.. _is also available: https://github.com/syrusakbary/interpy Backwards Compatibility ----------------------- -By using existing syntax and avoiding use of current or historical features, -expression-strings (and any associated sub-features), -were designed so as to not interfere with existing code and is not expected -to cause any issues. +By using existing syntax and avoiding current or historical features, +format strings were designed so as to not interfere with existing code and are +not expected to cause any issues. Postponed Ideas @@ -666,20 +561,12 @@ the finer details diverge at almost every point, making a common solution unlikely: [15]_ -* Use-cases -* Compile and run-time tasks -* Interpolation Syntax +* Use-cases differ +* Compile vs. run-time tasks +* Interpolation syntax needs * Intended audience * Security policy -Rather than try to fit a "square peg in a round hole," -this PEP attempts to allow internationalization to be supported in the future -by not preventing it. -In this proposal, -expression-string inputs are saved for inspection and re-rendering at a later -time, -allowing for their use by an external library of any sort. - Rejected Ideas -------------- @@ -687,17 +574,24 @@ Restricting Syntax to ``str.format()`` Only ''''''''''''''''''''''''''''''''''''''''''' -This was deemed not enough of a solution to the problem. +The common `arguments against`_ support of arbitrary expresssions were: + +#. `YAGNI`_, "You aren't gonna need it." +#. The feature is not congruent with historical Python conservatism. +#. Postpone - can implement in a future version if need is demonstrated. + +.. _YAGNI: https://en.wikipedia.org/wiki/You_aren't_gonna_need_it +.. _arguments against: https://mail.python.org/pipermail/python-ideas/2015-August/034913.html + +Support of only ``str.format()`` syntax however, +was deemed not enough of a solution to the problem. +Often a simple length or increment of an object, for example, +is desired before printing. + It can be seen in the `Implementations in Other Languages`_ section that the developer community at large tends to agree. - -The common `arguments against`_ arbitrary expresssions were: - -#. YAGNI, "You ain't gonna need it." -#. The change is not congruent with historical Python conservatism. -#. Postpone - can implement in a future version if need is demonstrated. - -.. _arguments against: https://mail.python.org/pipermail/python-ideas/2015-August/034913.html +String interpolation with arbitrary expresssions is becoming an industry +standard in modern languages due to its utility. Additional/Custom String-Prefixes @@ -720,7 +614,7 @@ expressions could be used safely or not. The concept was also difficult to describe to others. [12]_ -Always consider expression-string variables to be unescaped, +Always consider format string variables to be unescaped, unless the developer has explicitly escaped them. @@ -735,33 +629,13 @@ which could encourage bad habits. [13]_ -Reference Implementation(s) -=========================== - -An expression-string implementation is currently attached to PEP 498, -under the ``f''`` prefix, -and may be available in nightly builds. - -A Python implementation of Ruby interpolation `is also available`_, -which is similar to this proposal. -It uses the codecs module to do its work:: - - ? pip install interpy - - # coding: interpy - location = 'World' - print("Hello #{location}.") - -.. _is also available: https://github.com/syrusakbary/interpy - - Acknowledgements ================ -* Eric V. Smith for providing invaluable implementation work and design - opinions, helping to focus this PEP. -* Others on the python-ideas mailing list for rejecting the craziest of ideas, - also helping to achieve focus. +* Eric V. Smith for the authoring and implementation of PEP 498. +* Everyone on the python-ideas mailing list for rejecting the various crazy + ideas that came up, + helping to keep the final design in focus. References @@ -771,7 +645,6 @@ (https://mail.python.org/pipermail/python-ideas/2015-July/034659.html) - .. [2] Briefer String Format (https://mail.python.org/pipermail/python-ideas/2015-July/034669.html) diff --git a/pep-0503.txt b/pep-0503.txt --- a/pep-0503.txt +++ b/pep-0503.txt @@ -5,11 +5,12 @@ Author: Donald Stufft BDFL-Delegate: Donald Stufft Discussions-To: distutils-sig at python.org -Status: Draft +Status: Accepted Type: Informational Content-Type: text/x-rst Created: 04-Sep-2015 Post-History: 04-Sep-2015 +Resolution: https://mail.python.org/pipermail/distutils-sig/2015-September/026899.html Abstract @@ -91,6 +92,10 @@ associated signature, the signature would be located at ``/packages/HolyGrail-1.0.tar.gz.asc``. +* A repository **MAY** include a ``data-gpg-sig`` attribute on a file link with + a value of either ``true`` or ``false`` to indicate whether or not there is a + GPG signature. Repositories that do this **SHOULD** include it on every link. + Normalized Names ---------------- diff --git a/pep-0504.txt b/pep-0504.txt new file mode 100644 --- /dev/null +++ b/pep-0504.txt @@ -0,0 +1,396 @@ +PEP: 504 +Title: Using the System RNG by default +Version: $Revision$ +Last-Modified: $Date$ +Author: Nick Coghlan +Status: Withdrawn +Type: Standards Track +Content-Type: text/x-rst +Created: 15-Sep-2015 +Python-Version: 3.6 +Post-History: 15-Sep-2015 + +Abstract +======== + +Python currently defaults to using the deterministic Mersenne Twister random +number generator for the module level APIs in the ``random`` module, requiring +users to know that when they're performing "security sensitive" work, they +should instead switch to using the cryptographically secure ``os.urandom`` or +``random.SystemRandom`` interfaces or a third party library like +``cryptography``. + +Unfortunately, this approach has resulted in a situation where developers that +aren't aware that they're doing security sensitive work use the default module +level APIs, and thus expose their users to unnecessary risks. + +This isn't an acute problem, but it is a chronic one, and the often long +delays between the introduction of security flaws and their exploitation means +that it is difficult for developers to naturally learn from experience. + +In order to provide an eventually pervasive solution to the problem, this PEP +proposes that Python switch to using the system random number generator by +default in Python 3.6, and require developers to opt-in to using the +deterministic random number generator process wide either by using a new +``random.ensure_repeatable()`` API, or by explicitly creating their own +``random.Random()`` instance. + +To minimise the impact on existing code, module level APIs that require +determinism will implicitly switch to the deterministic PRNG. + +PEP Withdrawal +============== + +During discussion of this PEP, Steven D'Aprano proposed the simpler alternative +of offering a standardised ``secrets`` module that provides "one obvious way" +to handle security sensitive tasks like generating default passwords and other +tokens. + +Steven's proposal has the desired effect of aligning the easy way to generate +such tokens and the right way to generate them, without introducing any +compatibility risks for the existing ``random`` module API, so this PEP has +been withdrawn in favour of further work on refining Steven's proposal as +PEP 506. + + +Proposal +======== + +Currently, it is never correct to use the module level functions in the +``random`` module for security sensitive applications. This PEP proposes to +change that admonition in Python 3.6+ to instead be that it is not correct to +use the module level functions in the ``random`` module for security sensitive +applications if ``random.ensure_repeatable()`` is ever called (directly or +indirectly) in that process. + +To achieve this, rather than being bound methods of a ``random.Random`` +instance as they are today, the module level callables in ``random`` would +change to be functions that delegate to the corresponding method of the +existing ``random._inst`` module attribute. + +By default, this attribute will be bound to a ``random.SystemRandom`` instance. + +A new ``random.ensure_repeatable()`` API will then rebind the ``random._inst`` +attribute to a ``system.Random`` instance, restoring the same module level +API behaviour as existed in previous Python versions (aside from the +additional level of indirection):: + + def ensure_repeatable(): + """Switch to using random.Random() for the module level APIs + + This switches the default RNG instance from the crytographically + secure random.SystemRandom() to the deterministic random.Random(), + enabling the seed(), getstate() and setstate() operations. This means + a particular random scenario can be replayed later by providing the + same seed value or restoring a previously saved state. + + NOTE: Libraries implementing security sensitive operations should + always explicitly use random.SystemRandom() or os.urandom in order to + correctly handle applications that call this function. + """ + if not isinstance(_inst, Random): + _inst = random.Random() + +To minimise the impact on existing code, calling any of the following module +level functions will implicitly call ``random.ensure_repeatable()``: + +* ``random.seed`` +* ``random.getstate`` +* ``random.setstate`` + +There are no changes proposed to the ``random.Random`` or +``random.SystemRandom`` class APIs - applications that explicitly instantiate +their own random number generators will be entirely unaffected by this +proposal. + +Warning on implicit opt-in +-------------------------- + +In Python 3.6, implicitly opting in to the use of the deterministic PRNG will +emit a deprecation warning using the following check:: + + if not isinstance(_inst, Random): + warnings.warn(DeprecationWarning, + "Implicitly ensuring repeatability. " + "See help(random.ensure_repeatable) for details") + ensure_repeatable() + +The specific wording of the warning should have a suitable answer added to +Stack Overflow as was done for the custom error message that was added for +missing parentheses in a call to print [#print]_. + +In the first Python 3 release after Python 2.7 switches to security fix only +mode, the deprecation warning will be upgraded to a RuntimeWarning so it is +visible by default. + +This PEP does *not* propose ever removing the ability to ensure the default RNG +used process wide is a deterministic PRNG that will produce the same series of +outputs given a specific seed. That capability is widely used in modelling +and simulation scenarios, and requiring that ``ensure_repeatable()`` be called +either directly or indirectly is a sufficient enhancement to address the cases +where the module level random API is used for security sensitive tasks in web +applications without due consideration for the potential security implications +of using a deterministic PRNG. + +Performance impact +------------------ + +Due to the large performance difference between ``random.Random`` and +``random.SystemRandom``, applications ported to Python 3.6 will encounter a +significant performance regression in cases where: + +* the application is using the module level random API +* cryptographic quality randomness isn't needed +* the application doesn't already implicitly opt back in to the deterministic + PRNG by calling ``random.seed``, ``random.getstate``, or ``random.setstate`` +* the application isn't updated to explicitly call ``random.ensure_repeatable`` + +This would be noted in the Porting section of the Python 3.6 What's New guide, +with the recommendation to include the following code in the ``__main__`` +module of affected applications:: + + if hasattr(random, "ensure_repeatable"): + random.ensure_repeatable() + +Applications that do need cryptographic quality randomness should be using the +system random number generator regardless of speed considerations, so in those +cases the change proposed in this PEP will fix a previously latent security +defect. + +Documentation changes +--------------------- + +The ``random`` module documentation would be updated to move the documentation +of the ``seed``, ``getstate`` and ``setstate`` interfaces later in the module, +along with the documentation of the new ``ensure_repeatable`` function and the +associated security warning. + +That section of the module documentation would also gain a discussion of the +respective use cases for the deterministic PRNG enabled by +``ensure_repeatable`` (games, modelling & simulation, software testing) and the +system RNG that is used by default (cryptography, security token generation). +This discussion will also recommend the use of third party security libraries +for the latter task. + +Rationale +========= + +Writing secure software under deadline and budget pressures is a hard problem. +This is reflected in regular notifications of data breaches involving personally +identifiable information [#breaches]_, as well as with failures to take +security considerations into account when new systems, like motor vehicles +[#uconnect]_, are connected to the internet. It's also the case that a lot of +the programming advice readily available on the internet [#search] simply +doesn't take the mathemetical arcana of computer security into account. +Compounding these issues is the fact that defenders have to cover *all* of +their potential vulnerabilites, as a single mistake can make it possible to +subvert other defences [#bcrypt]_. + +One of the factors that contributes to making this last aspect particularly +difficult is APIs where using them inappropriately creates a *silent* security +failure - one where the only way to find out that what you're doing is +incorrect is for someone reviewing your code to say "that's a potential +security problem", or for a system you're responsible for to be compromised +through such an oversight (and you're not only still responsible for that +system when it is compromised, but your intrusion detection and auditing +mechanisms are good enough for you to be able to figure out after the event +how the compromise took place). + +This kind of situation is a significant contributor to "security fatigue", +where developers (often rightly [#owasptopten]_) feel that security engineers +spend all their time saying "don't do that the easy way, it creates a +security vulnerability". + +As the designers of one of the world's most popular languages [#ieeetopten]_, +we can help reduce that problem by making the easy way the right way (or at +least the "not wrong" way) in more circumstances, so developers and security +engineers can spend more time worrying about mitigating actually interesting +threats, and less time fighting with default language behaviours. + +Discussion +========== + +Why "ensure_repeatable" over "ensure_deterministic"? +---------------------------------------------------- + +This is a case where the meaning of a word as specialist jargon conflicts with +the typical meaning of the word, even though it's *technically* the same. + +From a technical perspective, a "deterministic RNG" means that given knowledge +of the algorithm and the current state, you can reliably compute arbitrary +future states. + +The problem is that "deterministic" on its own doesn't convey those qualifiers, +so it's likely to instead be interpreted as "predictable" or "not random" by +folks that are familiar with the conventional meaning, but aren't familiar with +the additional qualifiers on the technical meaning. + +A second problem with "deterministic" as a description for the traditional RNG +is that it doesn't really tell you what you can *do* with the traditional RNG +that you can't do with the system one. + +"ensure_repeatable" aims to address both of those problems, as its common +meaning accurately describes the main reason for preferring the deterministic +PRNG over the system RNG: ensuring you can repeat the same series of outputs +by providing the same seed value, or by restoring a previously saved PRNG state. + +Only changing the default for Python 3.6+ +----------------------------------------- + +Some other recent security changes, such as upgrading the capabilities of the +``ssl`` module and switching to properly verifying HTTPS certificates by +default, have been considered critical enough to justify backporting the +change to all currently supported versions of Python. + +The difference in this case is one of degree - the additional benefits from +rolling out this particular change a couple of years earlier than will +otherwise be the case aren't sufficient to justify either the additional effort +or the stability risks involved in making such an intrusive change in a +maintenance release. + +Keeping the module level functions +---------------------------------- + +In additional to general backwards compatibility considerations, Python is +widely used for educational purposes, and we specifically don't want to +invalidate the wide array of educational material that assumes the availabilty +of the current ``random`` module API. Accordingly, this proposal ensures that +most of the public API can continue to be used not only without modification, +but without generating any new warnings. + +Warning when implicitly opting in to the deterministic RNG +---------------------------------------------------------- + +It's necessary to implicitly opt in to the deterministic PRNG as Python is +widely used for modelling and simulation purposes where this is the right +thing to do, and in many cases, these software models won't have a dedicated +maintenance team tasked with ensuring they keep working on the latest versions +of Python. + +Unfortunately, explicitly calling ``random.seed`` with data from ``os.urandom`` +is also a mistake that appears in a number of the flawed "how to generate a +security token in Python" guides readily available online. + +Using first DeprecationWarning, and then eventually a RuntimeWarning, to +advise against implicitly switching to the deterministic PRNG aims to +nudge future users that need a cryptographically secure RNG away from +calling ``random.seed()`` and those that genuinely need a deterministic +generator towards explicitily calling ``random.ensure_repeatable()``. + +Avoiding the introduction of a userspace CSPRNG +----------------------------------------------- + +The original discussion of this proposal on python-ideas[#csprng]_ suggested +introducing a cryptographically secure pseudo-random number generator and using +that by default, rather than defaulting to the relatively slow system random +number generator. + +The problem [#nocsprng]_ with this approach is that it introduces an additional +point of failure in security sensitive situations, for the sake of applications +where the random number generation may not even be on a critical performance +path. + +Applications that do need cryptographic quality randomness should be using the +system random number generator regardless of speed considerations, so in those +cases. + +Isn't the deterministic PRNG "secure enough"? +--------------------------------------------- + +In a word, "No" - that's why there's a warning in the module documentation +that says not to use it for security sensitive purposes. While we're not +currently aware of any studies of Python's random number generator specifically, +studies of PHP's random number generator [#php]_ have demonstrated the ability +to use weaknesses in that subsystem to facilitate a practical attack on +password recovery tokens in popular PHP web applications. + +However, one of the rules of secure software development is that "attacks only +get better, never worse", so it may be that by the time Python 3.6 is released +we will actually see a practical attack on Python's deterministic PRNG publicly +documented. + +Security fatigue in the Python ecosystem +---------------------------------------- + +Over the past few years, the computing industry as a whole has been +making a concerted effort to upgrade the shared network infrastructure we all +depend on to a "secure by default" stance. As one of the most widely used +programming languages for network service development (including the OpenStack +Infrastructure-as-a-Service platform) and for systems administration +on Linux systems in general, a fair share of that burden has fallen on the +Python ecosystem, which is understandably frustrating for Pythonistas using +Python in other contexts where these issues aren't of as great a concern. + +This consideration is one of the primary factors driving the substantial +backwards compatibility improvements in this proposal relative to the initial +draft concept posted to python-ideas [#draft]_. + +Acknowledgements +================ + +* Theo de Raadt, for making the suggestion to Guido van Rossum that we + seriously consider defaulting to a cryptographically secure random number + generator +* Serhiy Storchaka, Terry Reedy, Petr Viktorin, and anyone else in the + python-ideas threads that suggested the approach of transparently switching + to the ``random.Random`` implementation when any of the functions that only + make sense for a deterministic RNG are called +* Nathaniel Smith for providing the reference on practical attacks against + PHP's random number generator when used to generate password reset tokens +* Donald Stufft for pursuing additional discussions with network security + experts that suggested the introduction of a userspace CSPRNG would mean + additional complexity for insufficient gain relative to just using the + system RNG directly +* Paul Moore for eloquently making the case for the current level of security + fatigue in the Python ecosystem + +References +========== + +.. [#breaches] Visualization of data breaches involving more than 30k records (each) + (http://www.informationisbeautiful.net/visualizations/worlds-biggest-data-breaches-hacks/) + +.. [#uconnect] Remote UConnect hack for Jeep Cherokee + (http://www.wired.com/2015/07/hackers-remotely-kill-jeep-highway/) + +.. [#php] PRNG based attack against password reset tokens in PHP applications + (https://media.blackhat.com/bh-us-12/Briefings/Argyros/BH_US_12_Argyros_PRNG_WP.pdf) + +.. [#search] Search link for "python password generator" + (https://www.google.com.au/search?q=python+password+generator) + +.. [#csprng] python-ideas thread discussing using a userspace CSPRNG + (https://mail.python.org/pipermail/python-ideas/2015-September/035886.html) + +.. [#draft] Initial draft concept that eventually became this PEP + (https://mail.python.org/pipermail/python-ideas/2015-September/036095.html) + +.. [#nocsprng] Safely generating random numbers + (http://sockpuppet.org/blog/2014/02/25/safely-generate-random-numbers/) + +.. [#ieeetopten] IEEE Spectrum 2015 Top Ten Programming Languages + (http://spectrum.ieee.org/computing/software/the-2015-top-ten-programming-languages) + +.. [#owasptopten] OWASP Top Ten Web Security Issues for 2013 + (https://www.owasp.org/index.php/OWASP_Top_Ten_Project#tab=OWASP_Top_10_for_2013) + +.. [#print] Stack Overflow answer for missing parentheses in call to print + (http://stackoverflow.com/questions/25445439/what-does-syntaxerror-missing-parentheses-in-call-to-print-mean-in-python/25445440#25445440) + +.. [#bcrypt] Bypassing bcrypt through an insecure data cache + (http://arstechnica.com/security/2015/09/once-seen-as-bulletproof-11-million-ashley-madison-passwords-already-cracked/) + +Copyright +========= + +This document has been placed in the public domain. + + +.. + Local Variables: + mode: indented-text + indent-tabs-mode: nil + sentence-end-double-space: t + fill-column: 70 + coding: utf-8 + End: diff --git a/pep-0505.txt b/pep-0505.txt new file mode 100644 --- /dev/null +++ b/pep-0505.txt @@ -0,0 +1,205 @@ +PEP: 505 +Title: None coalescing operators +Version: $Revision$ +Last-Modified: $Date$ +Author: Mark E. Haase +Status: Draft +Type: Standards Track +Content-Type: text/x-rst +Created: 18-Sep-2015 +Python-Version: 3.6 + +Abstract +======== + +Several modern programming languages have so-called "null coalescing" or +"null aware" operators, including C#, Dart, Perl, Swift, and PHP (starting in +version 7). These operators provide syntactic sugar for common patterns +involving null references. [1]_ [2]_ + +* The "null coalescing" operator is a binary operator that returns its first + first non-null operand. +* The "null aware member access" operator is a binary operator that accesses + an instance member only if that instance is non-null. It returns null + otherwise. +* The "null aware index access" operator is a binary operator that accesses a + member of a collection only if that collection is non-null. It returns null + otherwise. + +Python does not have any directly equivalent syntax. The ``or`` operator can +be used to similar effect but checks for a truthy value, not ``None`` +specifically. The ternary operator ``... if ... else ...`` can be used for +explicit null checks but is more verbose and typically duplicates part of the +expression in between ``if`` and ``else``. The proposed ``None`` coalescing +and ``None`` aware operators ofter an alternative syntax that is more +intuitive and concise. + + +Rationale +========= + +Null Coalescing Operator +------------------------ + +The following code illustrates how the ``None`` coalescing operators would +work in Python:: + + >>> title = 'My Title' + >>> title ?? 'Default Title' + 'My Title' + >>> title = None + >>> title ?? 'Default Title' + 'Default Title' + +Similar behavior can be achieved with the ``or`` operator, but ``or`` checks +whether its left operand is false-y, not specifically ``None``. This can lead +to surprising behavior. Consider the scenario of computing the price of some +products a customer has in his/her shopping cart:: + + >>> price = 100 + >>> requested_quantity = 5 + >>> default_quantity = 1 + >>> (requested_quantity or default_quantity) * price + 500 + >>> requested_quantity = None + >>> (requested_quantity or default_quantity) * price + 100 + >>> requested_quantity = 0 + >>> (requested_quantity or default_quantity) * price # oops! + 100 + +This type of bug is not possible with the ``None`` coalescing operator, +because there is no implicit type coersion to ``bool``:: + + >>> price = 100 + >>> requested_quantity = 0 + >>> default_quantity = 1 + >>> (requested_quantity ?? default_quantity) * price + 0 + +The same correct behavior can be achieved with the ternary operator. Here is +an excerpt from the popular Requests package:: + + data = [] if data is None else data + files = [] if files is None else files + headers = {} if headers is None else headers + params = {} if params is None else params + hooks = {} if hooks is None else hooks + +This particular formulation has the undesirable effect of putting the operands +in an unintuitive order: the brain thinks, "use ``data`` if possible and use +``[]`` as a fallback," but the code puts the fallback *before* the preferred +value. + +The author of this package could have written it like this instead:: + + data = data if data is not None else [] + files = files if files is not None else [] + headers = headers if headers is not None else {} + params = params if params is not None else {} + hooks = hooks if hooks is not None else {} + +This ordering of the operands is more intuitive, but it requires 4 extra +characters (for "not "). It also highlights the repetition of identifiers: +``data if data``, ``files if files``, etc. The ``None`` coalescing operator +improves readability:: + + data = data ?? [] + files = files ?? [] + headers = headers ?? {} + params = params ?? {} + hooks = hooks ?? {} + +The ``None`` coalescing operator also has a corresponding assignment shortcut. + +:: + + data ?= [] + files ?= [] + headers ?= {} + params ?= {} + hooks ?= {} + +The ``None`` coalescing operator is left-associative, which allows for easy +chaining:: + + >>> user_title = None + >>> local_default_title = None + >>> global_default_title = 'Global Default Title' + >>> title = user_title ?? local_default_title ?? global_default_title + 'Global Default Title' + +The direction of associativity is important because the ``None`` coalescing +operator short circuits: if its left operand is non-null, then the right +operand is not evaluated. + +:: + + >>> def get_default(): raise Exception() + >>> 'My Title' ?? get_default() + 'My Title' + + +Null-Aware Member Access Operator +--------------------------------- + +:: + + >>> title = 'My Title' + >>> title.upper() + 'MY TITLE' + >>> title = None + >>> title.upper() + Traceback (most recent call last): + File "", line 1, in + AttributeError: 'NoneType' object has no attribute 'upper' + >>> title?.upper() + None + + +Null-Aware Index Access Operator +--------------------------------- + +:: + + >>> person = {'name': 'Mark', 'age': 32} + >>> person['name'] + 'Mark' + >>> person = None + >>> person['name'] + Traceback (most recent call last): + File "", line 1, in + TypeError: 'NoneType' object is not subscriptable + >>> person?['name'] + None + + +Specification +============= + + +References +========== + +.. [1] Wikipedia: Null coalescing operator + (https://en.wikipedia.org/wiki/Null_coalescing_operator) + +.. [2] Seth Ladd's Blog: Null-aware operators in Dart + (http://blog.sethladd.com/2015/07/null-aware-operators-in-dart.html) + + +Copyright +========= + +This document has been placed in the public domain. + + + +.. + Local Variables: + mode: indented-text + indent-tabs-mode: nil + sentence-end-double-space: t + fill-column: 70 + coding: utf-8 + End: diff --git a/pep-0506.txt b/pep-0506.txt new file mode 100644 --- /dev/null +++ b/pep-0506.txt @@ -0,0 +1,449 @@ +PEP: 506 +Title: Adding A Secrets Module To The Standard Library +Version: $Revision$ +Last-Modified: $Date$ +Author: Steven D'Aprano +Status: Draft +Type: Standards Track +Content-Type: text/x-rst +Created: 19-Sep-2015 +Python-Version: 3.6 +Post-History: + + +Abstract +======== + +This PEP proposes the addition of a module for common security-related +functions such as generating tokens to the Python standard library. + + +Definitions +=========== + +Some common abbreviations used in this proposal: + +* PRNG: + + Pseudo Random Number Generator. A deterministic algorithm used + to produce random-looking numbers with certain desirable + statistical properties. + +* CSPRNG: + + Cryptographically Strong Pseudo Random Number Generator. An + algorithm used to produce random-looking numbers which are + resistant to prediction. + +* MT: + + Mersenne Twister. An extensively studied PRNG which is currently + used by the ``random`` module as the default. + + +Rationale +========= + +This proposal is motivated by concerns that Python's standard library +makes it too easy for developers to inadvertently make serious security +errors. Theo de Raadt, the founder of OpenBSD, contacted Guido van Rossum +and expressed some concern [#]_ about the use of MT for generating sensitive +information such as passwords, secure tokens, session keys and similar. + +Although the documentation for the ``random`` module explicitly states that +the default is not suitable for security purposes [#]_, it is strongly +believed that this warning may be missed, ignored or misunderstood by +many Python developers. In particular: + +* developers may not have read the documentation and consequently + not seen the warning; + +* they may not realise that their specific use of the module has security + implications; or + +* not realising that there could be a problem, they have copied code + (or learned techniques) from websites which don't offer best + practises. + +The first [#]_ hit when searching for "python how to generate passwords" on +Google is a tutorial that uses the default functions from the ``random`` +module [#]_. Although it is not intended for use in web applications, it is +likely that similar techniques find themselves used in that situation. +The second hit is to a StackOverflow question about generating +passwords [#]_. Most of the answers given, including the accepted one, use +the default functions. When one user warned that the default could be +easily compromised, they were told "I think you worry too much." [#]_ + +This strongly suggests that the existing ``random`` module is an attractive +nuisance when it comes to generating (for example) passwords or secure +tokens. + +Additional motivation (of a more philosophical bent) can be found in the +post which first proposed this idea [#]_. + + +Proposal +======== + +Alternative proposals have focused on the default PRNG in the ``random`` +module, with the aim of providing "secure by default" cryptographically +strong primitives that developers can build upon without thinking about +security. (See Alternatives below.) This proposes a different approach: + +* The standard library already provides cryptographically strong + primitives, but many users don't know they exist or when to use them. + +* Instead of requiring crypto-naive users to write secure code, the + standard library should include a set of ready-to-use "batteries" for + the most common needs, such as generating secure tokens. This code + will both directly satisfy a need ("How do I generate a password reset + token?"), and act as an example of acceptable practises which + developers can learn from [#]_. + +To do this, this PEP proposes that we add a new module to the standard +library, with the suggested name ``secrets``. This module will contain a +set of ready-to-use functions for common activities with security +implications, together with some lower-level primitives. + +The suggestion is that ``secrets`` becomes the go-to module for dealing +with anything which should remain secret (passwords, tokens, etc.) +while the ``random`` module remains backward-compatible. + + +API and Implementation +====================== + +The contents of the ``secrets`` module is expected to evolve over time, and +likely will evolve between the time of writing this PEP and actual release +in the standard library [#]_. At the time of writing, the following functions +have been suggested: + +* A high-level function for generating secure tokens suitable for use + in (e.g.) password recovery, as session keys, etc. + +* A limited interface to the system CSPRNG, using either ``os.urandom`` + directly or ``random.SystemRandom``. Unlike the ``random`` module, this + does not need to provide methods for seeding, getting or setting the + state, or any non-uniform distributions. It should provide the + following: + + - A function for choosing items from a sequence, ``secrets.choice``. + - A function for generating an integer within some range, such as + ``secrets.randrange`` or ``secrets.randint``. + - A function for generating a given number of random bits and/or bytes + as an integer. + - A similar function which returns the value as a hex digit string. + +* ``hmac.compare_digest`` under the name ``equal``. + +The consensus appears to be that there is no need to add a new CSPRNG to +the ``random`` module to support these uses, ``SystemRandom`` will be +sufficient. + +Some illustrative implementations have been given by Nick Coghlan [#]_ +and a minimalist API by Tim Peters [#]_. This idea has also been discussed +on the issue tracker for the "cryptography" module [#]_. The following +pseudo-code can be taken as a possible starting point for the real +implementation:: + + from random import SystemRandom + from hmac import compare_digest as equal + + _sysrand = SystemRandom() + + randrange = _sysrand.randrange + randint = _sysrand.randint + randbits = _sysrand.getrandbits + choice = _sysrand.choice + + def randbelow(exclusive_upper_bound): + return _sysrand._randbelow(exclusive_upper_bound) + + DEFAULT_ENTROPY = 32 # bytes + + def token_bytes(nbytes=None): + if nbytes is None: + nbytes = DEFAULT_ENTROPY + return os.urandom(nbytes) + + def token_hex(nbytes=None): + return binascii.hexlify(token_bytes(nbytes)).decode('ascii') + + def token_url(nbytes=None): + tok = token_bytes(nbytes) + return base64.urlsafe_b64encode(tok).rstrip(b'=').decode('ascii') + + +The ``secrets`` module itself will be pure Python, and other Python +implementations can easily make use of it unchanged, or adapt it as +necessary. + +Default arguments +~~~~~~~~~~~~~~~~~ + +One difficult question is "How many bytes should my token be?". We can +help with this question by providing a default amount of entropy for the +"token_*" functions. If the ``nbytes`` argument is None or not given, the +default entropy will be used. This default value should be large enough +to be expected to be secure for medium-security uses, but is expected to +change in the future, possibly even in a maintenance release [#]_. + +Naming conventions +~~~~~~~~~~~~~~~~~~ + +One question is the naming conventions used in the module [#]_, whether to +use C-like naming conventions such as "randrange" or more Pythonic names +such as "random_range". + +Functions which are simply bound methods of the private ``SystemRandom`` +instance (e.g. ``randrange``), or a thin wrapper around such, should keep +the familiar names. Those which are something new (such as the various +``token_*`` functions) will use more Pythonic names. + +Alternatives +============ + +One alternative is to change the default PRNG provided by the ``random`` +module [#]_. This received considerable scepticism and outright opposition: + +* There is fear that a CSPRNG may be slower than the current PRNG (which + in the case of MT is already quite slow). + +* Some applications (such as scientific simulations, and replaying + gameplay) require the ability to seed the PRNG into a known state, + which a CSPRNG lacks by design. + +* Another major use of the ``random`` module is for simple "guess a number" + games written by beginners, and many people are loath to make any + change to the ``random`` module which may make that harder. + +* Although there is no proposal to remove MT from the ``random`` module, + there was considerable hostility to the idea of having to opt-in to + a non-CSPRNG or any backwards-incompatible changes. + +* Demonstrated attacks against MT are typically against PHP applications. + It is believed that PHP's version of MT is a significantly softer target + than Python's version, due to a poor seeding technique [#]_. Consequently, + without a proven attack against Python applications, many people object + to a backwards-incompatible change. + +Nick Coghlan made an earlier suggestion for a globally configurable PRNG +which uses the system CSPRNG by default [#]_, but has since withdrawn it +in favour of this proposal. + + +Comparison To Other Languages +============================= + +* PHP + + PHP includes a function ``uniqid`` [#]_ which by default returns a + thirteen character string based on the current time in microseconds. + Translated into Python syntax, it has the following signature:: + + def uniqid(prefix='', more_entropy=False)->str + + The PHP documentation warns that this function is not suitable for + security purposes. Nevertheless, various mature, well-known PHP + applications use it for that purpose (citation needed). + + PHP 5.3 and better also includes a function ``openssl_random_pseudo_bytes`` + [#]_. Translated into Python syntax, it has roughly the following + signature:: + + def openssl_random_pseudo_bytes(length:int)->Tuple[str, bool] + + This function returns a pseudo-random string of bytes of the given + length, and an boolean flag giving whether the string is considered + cryptographically strong. The PHP manual suggests that returning + anything but True should be rare except for old or broken platforms. + +* JavaScript + + Based on a rather cursory search [#]_, there do not appear to be any + well-known standard functions for producing strong random values in + JavaScript. ``Math.random`` is often used, despite serious weaknesses + making it unsuitable for cryptographic purposes [#]_. In recent years + the majority of browsers have gained support for ``window.crypto.getRandomValues`` [#]_. + + Node.js offers a rich cryptographic module, ``crypto`` [#]_, most of + which is beyond the scope of this PEP. It does include a single function + for generating random bytes, ``crypto.randomBytes``. + +* Ruby + + The Ruby standard library includes a module ``SecureRandom`` [#]_ + which includes the following methods: + + * base64 - returns a Base64 encoded random string. + + * hex - returns a random hexadecimal string. + + * random_bytes - returns a random byte string. + + * random_number - depending on the argument, returns either a random + integer in the range(0, n), or a random float between 0.0 and 1.0. + + * urlsafe_base64 - returns a random URL-safe Base64 encoded string. + + * uuid - return a version 4 random Universally Unique IDentifier. + + +What Should Be The Name Of The Module? +====================================== + +There was a proposal to add a "random.safe" submodule, quoting the Zen +of Python "Namespaces are one honking great idea" koan. However, the +author of the Zen, Tim Peters, has come out against this idea [#]_, and +recommends a top-level module. + +In discussion on the python-ideas mailing list so far, the name "secrets" +has received some approval, and no strong opposition. + +There is already an existing third-party module with the same name [#]_, +but it appears to be unused and abandoned. + + +Frequently Asked Questions +========================== + +* Q: Is this a real problem? Surely MT is random enough that nobody can + predict its output. + + A: The consensus among security professionals is that MT is not safe + in security contexts. It is not difficult to reconstruct the internal + state of MT [#]_ [#]_ and so predict all past and future values. There + are a number of known, practical attacks on systems using MT for + randomness [#]_. + + While there are currently no known direct attacks on applications + written in Python due to the use of MT, there is widespread agreement + that such usage is unsafe. + +* Q: Is this an alternative to specialise cryptographic software such as SSL? + + A: No. This is a "batteries included" solution, not a full-featured + "nuclear reactor". It is intended to mitigate against some basic + security errors, not be a solution to all security-related issues. To + quote Nick Coghlan referring to his earlier proposal [#]_:: + + "...folks really are better off learning to use things like + cryptography.io for security sensitive software, so this change + is just about harm mitigation given that it's inevitable that a + non-trivial proportion of the millions of current and future + Python developers won't do that." + +* Q: What about a password generator? + + A: The consensus is that the requirements for password generators are too + variable for it to be a good match for the standard library [#]_. No + password generator will be included in the initial release of the + module, instead it will be given in the documentation as a recipe (? la + the recipes in the ``itertools`` module) [#]_. + +* Q: Will ``secrets`` use /dev/random (which blocks) or /dev/urandom (which + doesn't block) on Linux? What about other platforms? + + A: ``secrets`` will be based on ``os.urandom`` and ``random.SystemRandom``, + which are interfaces to your operating system's best source of + cryptographic randomness. On Linux, that may be ``/dev/urandom`` [#]_, + on Windows it may be ``CryptGenRandom()``, but see the documentation + and/or source code for the detailed implementation details. + + +References +========== + +.. [#] https://mail.python.org/pipermail/python-ideas/2015-September/035820.html + +.. [#] https://docs.python.org/3/library/random.html + +.. [#] As of the date of writing. Also, as Google search terms may be + automatically customised for the user without their knowledge, some + readers may see different results. + +.. [#] http://interactivepython.org/runestone/static/everyday/2013/01/3_password.html + +.. [#] http://stackoverflow.com/questions/3854692/generate-password-in-python + +.. [#] http://stackoverflow.com/questions/3854692/generate-password-in-python/3854766#3854766 + +.. [#] https://mail.python.org/pipermail/python-ideas/2015-September/036238.html + +.. [#] At least those who are motivated to read the source code and documentation. + +.. [#] Tim Peters suggests that bike-shedding the contents of the module will + be 10000 times more time consuming than actually implementing the + module. Words do not begin to express how much I am looking forward to + this. + +.. [#] https://mail.python.org/pipermail/python-ideas/2015-September/036271.html + +.. [#] https://mail.python.org/pipermail/python-ideas/2015-September/036350.html + +.. [#] https://github.com/pyca/cryptography/issues/2347 + +.. [#] https://mail.python.org/pipermail/python-ideas/2015-September/036517.html + https://mail.python.org/pipermail/python-ideas/2015-September/036515.html + +.. [#] https://mail.python.org/pipermail/python-ideas/2015-September/036474.html + +.. [#] Link needed. + +.. [#] By default PHP seeds the MT PRNG with the time (citation needed), + which is exploitable by attackers, while Python seeds the PRNG with + output from the system CSPRNG, which is believed to be much harder to + exploit. + +.. [#] http://legacy.python.org/dev/peps/pep-0504/ + +.. [#] http://php.net/manual/en/function.uniqid.php + +.. [#] http://php.net/manual/en/function.openssl-random-pseudo-bytes.php + +.. [#] Volunteers and patches are welcome. + +.. [#] http://ifsec.blogspot.fr/2012/05/cross-domain-mathrandom-prediction.html + +.. [#] https://developer.mozilla.org/en-US/docs/Web/API/RandomSource/getRandomValues + +.. [#] https://nodejs.org/api/crypto.html + +.. [#] http://ruby-doc.org/stdlib-2.1.2/libdoc/securerandom/rdoc/SecureRandom.html + +.. [#] https://mail.python.org/pipermail/python-ideas/2015-September/036254.html + +.. [#] https://pypi.python.org/pypi/secrets + +.. [#] https://jazzy.id.au/2010/09/22/cracking_random_number_generators_part_3.html + +.. [#] https://mail.python.org/pipermail/python-ideas/2015-September/036077.html + +.. [#] https://media.blackhat.com/bh-us-12/Briefings/Argyros/BH_US_12_Argyros_PRNG_WP.pdf + +.. [#] https://mail.python.org/pipermail/python-ideas/2015-September/036157.html + +.. [#] https://mail.python.org/pipermail/python-ideas/2015-September/036476.html + https://mail.python.org/pipermail/python-ideas/2015-September/036478.html + +.. [#] https://mail.python.org/pipermail/python-ideas/2015-September/036488.html + +.. [#] http://sockpuppet.org/blog/2014/02/25/safely-generate-random-numbers/ + http://www.2uo.de/myths-about-urandom/ + + +Copyright +========= + +This document has been placed in the public domain. + + + +.. + Local Variables: + mode: indented-text + indent-tabs-mode: nil + sentence-end-double-space: t + fill-column: 70 + coding: utf-8 + End: diff --git a/pep-0507.txt b/pep-0507.txt new file mode 100644 --- /dev/null +++ b/pep-0507.txt @@ -0,0 +1,331 @@ +PEP: 507 +Title: Migrate CPython to Git and GitLab +Version: $Revision$ +Last-Modified: $Date$ +Author: Barry Warsaw +Status: Draft +Type: Process +Content-Type: text/x-rst +Created: 2015-09-30 +Post-History: + + +Abstract +======== + +This PEP proposes migrating the repository hosting of CPython and the +supporting repositories to Git. Further, it proposes adopting a +hosted GitLab instance as the primary way of handling merge requests, +code reviews, and code hosting. It is similar in intent to PEP 481 +but proposes an open source alternative to GitHub and omits the +proposal to run Phabricator. As with PEP 481, this particular PEP is +offered as an alternative to PEP 474 and PEP 462. + + +Rationale +========= + +CPython is an open source project which relies on a number of +volunteers donating their time. As with any healthy, vibrant open +source project, it relies on attracting new volunteers as well as +retaining existing developers. Given that volunteer time is the most +scarce resource, providing a process that maximizes the efficiency of +contributors and reduces the friction for contributions, is of vital +importance for the long-term health of the project. + +The current tool chain of the CPython project is a custom and unique +combination of tools. This has two critical implications: + +* The unique nature of the tool chain means that contributors must + remember or relearn, the process, workflow, and tools whenever they + contribute to CPython, without the advantage of leveraging long-term + memory and familiarity they retain by working with other projects in + the FLOSS ecosystem. The knowledge they gain in working with + CPython is unlikely to be applicable to other projects. + +* The burden on the Python/PSF infrastructure team is much greater in + order to continue to maintain custom tools, improve them over time, + fix bugs, address security issues, and more generally adapt to new + standards in online software development with global collaboration. + +These limitations act as a barrier to contribution both for highly +engaged contributors (e.g. core Python developers) and especially for +more casual "drive-by" contributors, who care more about getting their +bug fix than learning a new suite of tools and workflows. + +By proposing the adoption of both a different version control system +and a modern, well-maintained hosting solution, this PEP addresses +these limitations. It aims to enable a modern, well-understood +process that will carry CPython development for many years. + + +Version Control System +---------------------- + +Currently the CPython and supporting repositories use Mercurial. As a +modern distributed version control system, it has served us well since +the migration from Subversion. However, when evaluating the VCS we +must consider the capabilities of the VCS itself as well as the +network effect and mindshare of the community around that VCS. + +There are really only two real options for this, Mercurial and Git. +The technical capabilities of the two systems are largely equivalent, +therefore this PEP instead focuses on their social aspects. + +It is not possible to get exact numbers for the number of projects or +people which are using a particular VCS, however we can infer this by +looking at several sources of information for what VCS projects are +using. + +The Open Hub (previously Ohloh) statistics [#openhub-stats]_ show that +37% of the repositories indexed by The Open Hub are using Git (second +only to Subversion which has 48%) while Mercurial has just 2%, beating +only Bazaar which has 1%. This has Git being just over 18 times as +popular as Mercurial on The Open Hub. + +Another source of information on VCS popularity is PyPI itself. This +source is more targeted at the Python community itself since it +represents projects developed for Python. Unfortunately PyPI does not +have a standard location for representing this information, so this +requires manual processing. If we limit our search to the top 100 +projects on PyPI (ordered by download counts) we can see that 62% of +them use Git, while 22% of them use Mercurial, and 13% use something +else. This has Git being just under 3 times as popular as Mercurial +for the top 100 projects on PyPI. + +These numbers back up the anecdotal evidence for Git as the far more +popular DVCS for open source projects. Choosing the more popular VCS +has a number of positive benefits. + +For new contributors it increases the likelihood that they will have already +learned the basics of Git as part of working with another project or if they +are just now learning Git, that they'll be able to take that knowledge and +apply it to other projects. Additionally a larger community means more people +writing how to guides, answering questions, and writing articles about Git +which makes it easier for a new user to find answers and information about the +tool they are trying to learn and use. Given its popularity, there may also +be more auxiliary tooling written *around* Git. This increases options for +everything from GUI clients, helper scripts, repository hosting, etc. + +Further, the adoption of Git as the proposed back-end repository +format doesn't prohibit the use of Mercurial by fans of that VCS! +Mercurial users have the [#hg-git]_ plugin which allows them to push +and pull from a Git server using the Mercurial front-end. It's a +well-maintained and highly functional plugin that seems to be +well-liked by Mercurial users. + + +Repository Hosting +------------------ + +Where and how the official repositories for CPython are hosted is in +someways determined by the choice of VCS. With Git there are several +options. In fact, once the repository is hosted in Git, branches can +be mirrored in many locations, within many free, open, and proprietary +code hosting sites. + +It's still important for CPython to adopt a single, official +repository, with a web front-end that allows for many convenient and +common interactions entirely through the web, without always requiring +local VCS manipulations. These interactions include as a minimum, +code review with inline comments, branch diffing, CI integration, and +auto-merging. + +This PEP proposes to adopt a [#GitLab]_ instance, run within the +python.org domain, accessible to and with ultimate control from the +PSF and the Python infrastructure team, but donated, hosted, and +primarily maintained by GitLab, Inc. + +Why GitLab? Because it is a fully functional Git hosting system, that +sports modern web interactions, software workflows, and CI +integration. GitLab's Community Edition (CE) is open source software, +and thus is closely aligned with the principles of the CPython +community. + + +Code Review +----------- + +Currently CPython uses a custom fork of Rietveld modified to not run +on Google App Engine and which is currently only really maintained by +one person. It is missing common features present in many modern code +review tools. + +This PEP proposes to utilize GitLab's built-in merge requests and +online code review features to facilitate reviews of all proposed +changes. + + +GitLab merge requests +--------------------- + +The normal workflow for a GitLab hosted project is to submit a *merge request* +asking that a feature or bug fix branch be merged into a target branch, +usually one or more of the stable maintenance branches or the next-version +master branch for new features. GitLab's merge requests are similar in form +and function to GitHub's pull requests, so anybody who is already familiar +with the latter should be able to immediately utilize the former. + +Once submitted, a conversation about the change can be had between the +submitter and reviewer. This includes both general comments, and inline +comments attached to a particular line of the diff between the source and +target branches. Projects can also be configured to automatically run +continuous integration on the submitted branch, the results of which are +readily visible from the merge request page. Thus both the reviewer and +submitter can immediately see the results of the tests, making it much easier +to only land branches with passing tests. Each new push to the source branch +(e.g. to respond to a commenter's feedback or to fix a failing test) results +in a new run of the CI, so that the state of the request always reflects the +latest commit. + +Merge requests have a fairly major advantage over the older "submit a patch to +a bug tracker" model. They allow developers to work completely within the VCS +using standard VCS tooling, without requiring the creation of a patch file or +figuring out the right location to upload the patch to. This lowers the +barrier for sending a change to be reviewed. + +Merge requests are far easier to review. For example, they provide nice +syntax highlighted diffs which can operate in either unified or side by side +views. They allow commenting inline and on the merge request as a whole and +they present that in a nice unified way which will also hide comments which no +longer apply. Comments can be hidden and revealed. + +Actually merging a merge request is quite simple, if the source branch applies +cleanly to the target branch. A core reviewer simply needs to press the +"Merge" button for GitLab to automatically perform the merge. The source +branch can be optionally rebased, and once the merge is completed, the source +branch can be automatically deleted. + +GitLab also has a good workflow for submitting pull requests to a project +completely through their web interface. This would enable the Python +documentation to have "Edit on GitLab" buttons on every page and people who +discover things like typos, inaccuracies, or just want to make improvements to +the docs they are currently reading. They can simply hit that button and get +an in browser editor that will let them make changes and submit a merge +request all from the comfort of their browser. + + +Criticism +========= + +X is not written in Python +-------------------------- + +One feature that the current tooling (Mercurial, Rietveld) has is that the +primary language for all of the pieces are written in Python. This PEP +focuses more on the *best* tools for the job and not necessarily on the *best* +tools that happen to be written in Python. Volunteer time is the most +precious resource for any open source project and we can best respect and +utilize that time by focusing on the benefits and downsides of the tools +themselves rather than what language their authors happened to write them in. + +One concern is the ability to modify tools to work for us, however one of the +Goals here is to *not* modify software to work for us and instead adapt +ourselves to a more standardized workflow. This standardization pays off in +the ability to re-use tools out of the box freeing up developer time to +actually work on Python itself as well as enabling knowledge sharing between +projects. + +However if we do need to modify the tooling, Git itself is largely written in +C the same as CPython itself. It can also have commands written for it using +any language, including Python. GitLab itself is largely written in Ruby and +since it is Open Source software, we would have the ability to submit merge +requests to the upstream Community Edition, albeit in language potentially +unfamiliar to most Python programmers. + + +Mercurial is better than Git +---------------------------- + +Whether Mercurial or Git is better on a technical level is a highly subjective +opinion. This PEP does not state whether the mechanics of Git or Mercurial +are better, and instead focuses on the network effect that is available for +either option. While this PEP proposes switching to Git, Mercurial users are +not left completely out of the loop. By using the hg-git extension for +Mercurial, working with server-side Git repositories is fairly easy and +straightforward. + + +CPython Workflow is too Complicated +----------------------------------- + +One sentiment that came out of previous discussions was that the multi-branch +model of CPython was too complicated for GitLab style merge requests. This +PEP disagrees with that sentiment. + +Currently any particular change requires manually creating a patch for 2.7 and +3.x which won't change at all in this regards. + +If someone submits a fix for the current stable branch (e.g. 3.5) the merge +request workflow can be used to create a request to merge the current stable +branch into the master branch, assuming there is no merge conflicts. As +always, merge conflicts must be manually and locally resolved. Because +developers also have the *option* of performing the merge locally, this +provides an improvement over the current situation where the merge *must* +always happen locally. + +For fixes in the current development branch that must also be applied to +stable release branches, it is possible in many situations to locally cherry +pick and apply the change to other branches, with merge requests submitted for +each stable branch. It is also possible just cherry pick and complete the +merge locally. These are all accomplished with standard Git commands and +techniques, with the advantage that all such changes can go through the review +and CI test workflows, even for merges to stable branches. Minor changes may +be easily accomplished in the GitLab web editor. + +No system can hide all the complexities involved in maintaining several long +lived branches. The only thing that the tooling can do is make it as easy as +possible to submit and commit changes. + + +Open issues +=========== + +* What level of hosted support will GitLab offer? The PEP author has been in + contact with the GitLab CEO, with positive interest on their part. The + details of the hosting offer would have to be discussed. + +* What happens to Roundup and do we switch to the GitLab issue tracker? + Currently, this PEP is *not* suggesting we move from Roundup to GitLab + issues. We have way too much invested in Roundup right now and migrating + the data would be a huge effort. GitLab does support webhooks, so we will + probably want to use webhooks to integrate merges and other events with + updates to Roundup (e.g. to include pointers to commits, close issues, + etc. similar to what is currently done). + +* What happens to wiki.python.org? Nothing! While GitLab does support wikis + in repositories, there's no reason for us to migration our Moin wikis. + +* What happens to the existing GitHub mirrors? We'd probably want to + regenerate them once the official upstream branches are natively hosted in + Git. This may change commit ids, but after that, it should be easy to + mirror the official Git branches and repositories far and wide. + +* Where would the GitLab instance live? Physically, in whatever hosting + provider GitLab chooses. We would point gitlab.python.org (or + git.python.org?) to this host. + + +References +========== + +.. [#openhub-stats] `Open Hub Statistics ` +.. [#hg-git] `Hg-Git mercurial plugin ` +.. [#GitLab] `https://about.gitlab.com/` + + +Copyright +========= + +This document has been placed in the public domain. + + + +.. + Local Variables: + mode: indented-text + indent-tabs-mode: nil + sentence-end-double-space: t + fill-column: 70 + coding: utf-8 + End: diff --git a/pep-3140.txt b/pep-3140.txt --- a/pep-3140.txt +++ b/pep-3140.txt @@ -2,7 +2,7 @@ Title: str(container) should call str(item), not repr(item) Version: $Revision$ Last-Modified: $Date$ -Author: Oleg Broytmann , +Author: Oleg Broytman , Jim J. Jewett Discussions-To: python-3000 at python.org Status: Rejected -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Fri Oct 16 20:39:16 2015 From: python-checkins at python.org (brett.cannon) Date: Fri, 16 Oct 2015 18:39:16 +0000 Subject: [Python-checkins] =?utf-8?q?benchmarks=3A_Introduce_the_chameleon?= =?utf-8?q?=5Fv2_benchmark=2E?= Message-ID: <20151016183916.3287.73969@psf.io> https://hg.python.org/benchmarks/rev/598070f939b9 changeset: 228:598070f939b9 user: Brett Cannon date: Fri Oct 16 11:39:10 2015 -0700 summary: Introduce the chameleon_v2 benchmark. This benchmark uses Chameleon 2.22, edited to work with Python 3.6. It is based on the chameleon benchmark, but with the amount of work required increased and the iteration_scaling removed. This new benchmark has also been added to the default set to help make up for the loss of the recent loss of html5lib and spambayes. files: lib/Chameleon-2.22/.gitignore | 12 + lib/Chameleon-2.22/.travis.yml | 15 + lib/Chameleon-2.22/BENCHMARK_CHANGES.txt | 2 + lib/Chameleon-2.22/CHANGES.rst | 1305 ++++ lib/Chameleon-2.22/COPYRIGHT.txt | 7 + lib/Chameleon-2.22/LICENSE.txt | 185 + lib/Chameleon-2.22/MANIFEST.in | 2 + lib/Chameleon-2.22/Makefile | 89 + lib/Chameleon-2.22/PKG-INFO | 1353 ++++ lib/Chameleon-2.22/README.rst | 25 + lib/Chameleon-2.22/benchmarks/bm_chameleon.py | 128 + lib/Chameleon-2.22/benchmarks/bm_mako.py | 153 + lib/Chameleon-2.22/benchmarks/util.py | 51 + lib/Chameleon-2.22/docs/conf.py | 194 + lib/Chameleon-2.22/docs/configuration.rst | 43 + lib/Chameleon-2.22/docs/index.rst | 217 + lib/Chameleon-2.22/docs/integration.rst | 46 + lib/Chameleon-2.22/docs/library.rst | 205 + lib/Chameleon-2.22/docs/reference.rst | 1693 +++++ lib/Chameleon-2.22/docs/requirements.txt | 1 + lib/Chameleon-2.22/setup.cfg | 14 + lib/Chameleon-2.22/setup.py | 85 + lib/Chameleon-2.22/src/Chameleon.egg-info/PKG-INFO | 1353 ++++ lib/Chameleon-2.22/src/Chameleon.egg-info/SOURCES.txt | 437 + lib/Chameleon-2.22/src/Chameleon.egg-info/dependency_links.txt | 1 + lib/Chameleon-2.22/src/Chameleon.egg-info/not-zip-safe | 1 + lib/Chameleon-2.22/src/Chameleon.egg-info/top_level.txt | 1 + lib/Chameleon-2.22/src/chameleon/__init__.py | 6 + lib/Chameleon-2.22/src/chameleon/ast25.py | 135 + lib/Chameleon-2.22/src/chameleon/astutil.py | 977 +++ lib/Chameleon-2.22/src/chameleon/benchmark.py | 478 + lib/Chameleon-2.22/src/chameleon/codegen.py | 237 + lib/Chameleon-2.22/src/chameleon/compiler.py | 1680 +++++ lib/Chameleon-2.22/src/chameleon/config.py | 55 + lib/Chameleon-2.22/src/chameleon/exc.py | 299 + lib/Chameleon-2.22/src/chameleon/i18n.py | 129 + lib/Chameleon-2.22/src/chameleon/interfaces.py | 102 + lib/Chameleon-2.22/src/chameleon/loader.py | 174 + lib/Chameleon-2.22/src/chameleon/metal.py | 23 + lib/Chameleon-2.22/src/chameleon/namespaces.py | 9 + lib/Chameleon-2.22/src/chameleon/nodes.py | 233 + lib/Chameleon-2.22/src/chameleon/parser.py | 241 + lib/Chameleon-2.22/src/chameleon/program.py | 38 + lib/Chameleon-2.22/src/chameleon/py25.py | 36 + lib/Chameleon-2.22/src/chameleon/py26.py | 15 + lib/Chameleon-2.22/src/chameleon/tal.py | 497 + lib/Chameleon-2.22/src/chameleon/tales.py | 556 + lib/Chameleon-2.22/src/chameleon/template.py | 333 + lib/Chameleon-2.22/src/chameleon/tests/__init__.py | 1 + lib/Chameleon-2.22/src/chameleon/tests/inputs/001-interpolation.txt | 1 + lib/Chameleon-2.22/src/chameleon/tests/inputs/001-interpolation.txt.py | 48 + lib/Chameleon-2.22/src/chameleon/tests/inputs/001-variable-scope.html | 7 + lib/Chameleon-2.22/src/chameleon/tests/inputs/001-variable-scope.pt | 11 + lib/Chameleon-2.22/src/chameleon/tests/inputs/001-variable-scope.pt.py | 207 + lib/Chameleon-2.22/src/chameleon/tests/inputs/001.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/002-repeat-scope.pt | 8 + lib/Chameleon-2.22/src/chameleon/tests/inputs/002-repeat-scope.pt.py | 255 + lib/Chameleon-2.22/src/chameleon/tests/inputs/002.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/003-content.pt | 17 + lib/Chameleon-2.22/src/chameleon/tests/inputs/003-content.pt.py | 1158 +++ lib/Chameleon-2.22/src/chameleon/tests/inputs/003.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/004-attributes.pt | 24 + lib/Chameleon-2.22/src/chameleon/tests/inputs/004-attributes.pt.py | 789 ++ lib/Chameleon-2.22/src/chameleon/tests/inputs/004.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/005-default.pt | 12 + lib/Chameleon-2.22/src/chameleon/tests/inputs/005-default.pt.py | 445 + lib/Chameleon-2.22/src/chameleon/tests/inputs/005.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/006-attribute-interpolation.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/inputs/006-attribute-interpolation.pt.py | 272 + lib/Chameleon-2.22/src/chameleon/tests/inputs/006.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/007-content-interpolation.pt | 16 + lib/Chameleon-2.22/src/chameleon/tests/inputs/007-content-interpolation.pt.py | 261 + lib/Chameleon-2.22/src/chameleon/tests/inputs/007.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/008-builtins.pt | 12 + lib/Chameleon-2.22/src/chameleon/tests/inputs/008-builtins.pt.py | 227 + lib/Chameleon-2.22/src/chameleon/tests/inputs/008.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/009-literals.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/009-literals.pt.py | 108 + lib/Chameleon-2.22/src/chameleon/tests/inputs/009.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/010-structure.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/inputs/010-structure.pt.py | 401 + lib/Chameleon-2.22/src/chameleon/tests/inputs/010.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/011-messages.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/inputs/011-messages.pt-en.py | 406 + lib/Chameleon-2.22/src/chameleon/tests/inputs/011-messages.pt.py | 406 + lib/Chameleon-2.22/src/chameleon/tests/inputs/011.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/012-translation.pt | 22 + lib/Chameleon-2.22/src/chameleon/tests/inputs/012-translation.pt-en.py | 491 + lib/Chameleon-2.22/src/chameleon/tests/inputs/012-translation.pt.py | 491 + lib/Chameleon-2.22/src/chameleon/tests/inputs/012.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/013-repeat-nested.pt | 11 + lib/Chameleon-2.22/src/chameleon/tests/inputs/013-repeat-nested.pt.py | 255 + lib/Chameleon-2.22/src/chameleon/tests/inputs/013.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/014-repeat-nested-similar.pt | 7 + lib/Chameleon-2.22/src/chameleon/tests/inputs/014-repeat-nested-similar.pt.py | 234 + lib/Chameleon-2.22/src/chameleon/tests/inputs/014.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/015-translation-nested.pt | 10 + lib/Chameleon-2.22/src/chameleon/tests/inputs/015-translation-nested.pt-en.py | 190 + lib/Chameleon-2.22/src/chameleon/tests/inputs/015-translation-nested.pt.py | 190 + lib/Chameleon-2.22/src/chameleon/tests/inputs/015.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/016-explicit-translation.pt | 11 + lib/Chameleon-2.22/src/chameleon/tests/inputs/016-explicit-translation.pt-en.py | 419 + lib/Chameleon-2.22/src/chameleon/tests/inputs/016-explicit-translation.pt.py | 419 + lib/Chameleon-2.22/src/chameleon/tests/inputs/016.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/017-omit-tag.pt | 12 + lib/Chameleon-2.22/src/chameleon/tests/inputs/017-omit-tag.pt.py | 278 + lib/Chameleon-2.22/src/chameleon/tests/inputs/017.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/018-translation-nested-dynamic.pt | 13 + lib/Chameleon-2.22/src/chameleon/tests/inputs/018-translation-nested-dynamic.pt-en.py | 142 + lib/Chameleon-2.22/src/chameleon/tests/inputs/018-translation-nested-dynamic.pt.py | 142 + lib/Chameleon-2.22/src/chameleon/tests/inputs/018.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/019-replace.pt | 13 + lib/Chameleon-2.22/src/chameleon/tests/inputs/019-replace.pt.py | 809 ++ lib/Chameleon-2.22/src/chameleon/tests/inputs/019.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/020-on-error.pt | 10 + lib/Chameleon-2.22/src/chameleon/tests/inputs/020-on-error.pt.py | 180 + lib/Chameleon-2.22/src/chameleon/tests/inputs/020.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/021-translation-domain.pt | 16 + lib/Chameleon-2.22/src/chameleon/tests/inputs/021-translation-domain.pt-en.py | 268 + lib/Chameleon-2.22/src/chameleon/tests/inputs/021-translation-domain.pt.py | 268 + lib/Chameleon-2.22/src/chameleon/tests/inputs/021.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/022-switch.pt | 21 + lib/Chameleon-2.22/src/chameleon/tests/inputs/022-switch.pt.py | 327 + lib/Chameleon-2.22/src/chameleon/tests/inputs/022.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/023-condition.pt | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/023-condition.pt.py | 147 + lib/Chameleon-2.22/src/chameleon/tests/inputs/023.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/024-namespace-elements.pt | 16 + lib/Chameleon-2.22/src/chameleon/tests/inputs/024-namespace-elements.pt.py | 150 + lib/Chameleon-2.22/src/chameleon/tests/inputs/024.xml | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/025-repeat-whitespace.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/inputs/025-repeat-whitespace.pt.py | 421 + lib/Chameleon-2.22/src/chameleon/tests/inputs/025.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/026-repeat-variable.pt | 13 + lib/Chameleon-2.22/src/chameleon/tests/inputs/026-repeat-variable.pt.py | 733 ++ lib/Chameleon-2.22/src/chameleon/tests/inputs/026.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/027-attribute-replacement.pt | 11 + lib/Chameleon-2.22/src/chameleon/tests/inputs/027-attribute-replacement.pt.py | 518 + lib/Chameleon-2.22/src/chameleon/tests/inputs/027.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/028-attribute-toggle.pt | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/028-attribute-toggle.pt.py | 333 + lib/Chameleon-2.22/src/chameleon/tests/inputs/028.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/029-attribute-ordering.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/029-attribute-ordering.pt.py | 124 + lib/Chameleon-2.22/src/chameleon/tests/inputs/029.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/030-repeat-tuples.pt | 7 + lib/Chameleon-2.22/src/chameleon/tests/inputs/030-repeat-tuples.pt.py | 242 + lib/Chameleon-2.22/src/chameleon/tests/inputs/030.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/031-namespace-with-tal.pt | 7 + lib/Chameleon-2.22/src/chameleon/tests/inputs/031-namespace-with-tal.pt.py | 356 + lib/Chameleon-2.22/src/chameleon/tests/inputs/031.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/032-master-template.pt | 20 + lib/Chameleon-2.22/src/chameleon/tests/inputs/032-master-template.pt.py | 378 + lib/Chameleon-2.22/src/chameleon/tests/inputs/032.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/033-use-macro-trivial.pt | 1 + lib/Chameleon-2.22/src/chameleon/tests/inputs/033-use-macro-trivial.pt.py | 35 + lib/Chameleon-2.22/src/chameleon/tests/inputs/033.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/034-use-template-as-macro.pt | 1 + lib/Chameleon-2.22/src/chameleon/tests/inputs/034-use-template-as-macro.pt.py | 32 + lib/Chameleon-2.22/src/chameleon/tests/inputs/034.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/035-use-macro-with-fill-slot.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/035-use-macro-with-fill-slot.pt.py | 118 + lib/Chameleon-2.22/src/chameleon/tests/inputs/035.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/036-use-macro-inherits-dynamic-scope.pt | 2 + lib/Chameleon-2.22/src/chameleon/tests/inputs/036.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/037-use-macro-local-variable-scope.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/037.xml | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/038-use-macro-globals.pt | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/038.xml | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/039-globals.pt | 1 + lib/Chameleon-2.22/src/chameleon/tests/inputs/039.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/040-macro-using-template-symbol.pt | 20 + lib/Chameleon-2.22/src/chameleon/tests/inputs/040.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/041-translate-nested-names.pt | 22 + lib/Chameleon-2.22/src/chameleon/tests/inputs/041.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/042-use-macro-fill-footer.pt | 3 + lib/Chameleon-2.22/src/chameleon/tests/inputs/042.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/043-macro-nested-dynamic-vars.pt | 19 + lib/Chameleon-2.22/src/chameleon/tests/inputs/043.xml | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/044-tuple-define.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/044.xml | 10 + lib/Chameleon-2.22/src/chameleon/tests/inputs/045-namespaces.pt | 13 + lib/Chameleon-2.22/src/chameleon/tests/inputs/045.xml | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/046-extend-macro.pt | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/046.xml | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/047-use-extended-macro.pt | 3 + lib/Chameleon-2.22/src/chameleon/tests/inputs/047.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/048-use-extended-macro-fill-original.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/048.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/049-entities-in-attributes.pt | 11 + lib/Chameleon-2.22/src/chameleon/tests/inputs/049.xml | Bin lib/Chameleon-2.22/src/chameleon/tests/inputs/050-define-macro-and-use-not-extend.pt | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/050.xml | Bin lib/Chameleon-2.22/src/chameleon/tests/inputs/051-use-non-extended-macro.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/051.xml | Bin lib/Chameleon-2.22/src/chameleon/tests/inputs/052-i18n-domain-inside-filled-slot.pt | 8 + lib/Chameleon-2.22/src/chameleon/tests/inputs/052.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/053-special-characters-in-attributes.pt | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/053.xml | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/054-import-expression.pt | 3 + lib/Chameleon-2.22/src/chameleon/tests/inputs/054.xml | 10 + lib/Chameleon-2.22/src/chameleon/tests/inputs/055-attribute-fallback-to-dict-lookup.pt | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/055.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/056-comment-attribute.pt | 7 + lib/Chameleon-2.22/src/chameleon/tests/inputs/056.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/057-order.pt | 8 + lib/Chameleon-2.22/src/chameleon/tests/inputs/057.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/058-script.pt | 16 + lib/Chameleon-2.22/src/chameleon/tests/inputs/058.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/059-embedded-javascript.pt | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/059.xml | 10 + lib/Chameleon-2.22/src/chameleon/tests/inputs/060-macro-with-multiple-same-slots.pt | 8 + lib/Chameleon-2.22/src/chameleon/tests/inputs/060.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/061-fill-one-slot-but-two-defined.pt | 3 + lib/Chameleon-2.22/src/chameleon/tests/inputs/061.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/062-comments-and-expressions.pt | 27 + lib/Chameleon-2.22/src/chameleon/tests/inputs/062.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/063-continuation.pt | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/063.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/064-tags-and-special-characters.pt | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/064.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/065-use-macro-in-fill.pt | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/065.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/066-load-expression.pt | 1 + lib/Chameleon-2.22/src/chameleon/tests/inputs/066.xml | 7 + lib/Chameleon-2.22/src/chameleon/tests/inputs/067-attribute-decode.pt | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/067.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/068-less-than-greater-than-in-attributes.pt | 8 + lib/Chameleon-2.22/src/chameleon/tests/inputs/068.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/069-translation-domain-and-macro.pt | 3 + lib/Chameleon-2.22/src/chameleon/tests/inputs/069.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/070-translation-domain-and-use-macro.pt | 3 + lib/Chameleon-2.22/src/chameleon/tests/inputs/070.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/071-html-attribute-defaults.pt | 12 + lib/Chameleon-2.22/src/chameleon/tests/inputs/071.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/072-repeat-interpolation.pt | 13 + lib/Chameleon-2.22/src/chameleon/tests/inputs/072.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/073-utf8-encoded.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/073.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/074-encoded-template.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/074.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/075-nested-macros.pt | 11 + lib/Chameleon-2.22/src/chameleon/tests/inputs/075.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/076-nested-macro-override.pt | 3 + lib/Chameleon-2.22/src/chameleon/tests/inputs/076.xml | 7 + lib/Chameleon-2.22/src/chameleon/tests/inputs/077-i18n-attributes.pt | 1 + lib/Chameleon-2.22/src/chameleon/tests/inputs/077.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/078-tags-and-newlines.pt | 23 + lib/Chameleon-2.22/src/chameleon/tests/inputs/078.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/079-implicit-i18n.pt | 16 + lib/Chameleon-2.22/src/chameleon/tests/inputs/079.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/080-xmlns-namespace-on-tal.pt | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/080.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/081-load-spec.pt | 1 + lib/Chameleon-2.22/src/chameleon/tests/inputs/081.xml | 7 + lib/Chameleon-2.22/src/chameleon/tests/inputs/082-load-spec-computed.pt | 1 + lib/Chameleon-2.22/src/chameleon/tests/inputs/082.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/083-template-dict-to-macro.pt | 2 + lib/Chameleon-2.22/src/chameleon/tests/inputs/083.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/084-interpolation-in-cdata.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/inputs/084.xml | 1 + lib/Chameleon-2.22/src/chameleon/tests/inputs/085-nested-translation.pt | 11 + lib/Chameleon-2.22/src/chameleon/tests/inputs/085.xml | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/086-self-closing.pt | 10 + lib/Chameleon-2.22/src/chameleon/tests/inputs/086.xml | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/087-code-blocks.pt | 28 + lib/Chameleon-2.22/src/chameleon/tests/inputs/087.xml | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/088-python-newlines.pt | 2 + lib/Chameleon-2.22/src/chameleon/tests/inputs/088.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/089-load-fallback.pt | 3 + lib/Chameleon-2.22/src/chameleon/tests/inputs/089.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/090-tuple-expression.pt | 8 + lib/Chameleon-2.22/src/chameleon/tests/inputs/090.xml | 7 + lib/Chameleon-2.22/src/chameleon/tests/inputs/091-repeat-none.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/091.xml | 7 + lib/Chameleon-2.22/src/chameleon/tests/inputs/092.xml | 10 + lib/Chameleon-2.22/src/chameleon/tests/inputs/093.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/094.xml | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/095.xml | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/096.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/097.xml | 8 + lib/Chameleon-2.22/src/chameleon/tests/inputs/098.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/099.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/100.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/101-unclosed-tags.html | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/101.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/102-unquoted-attributes.html | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/102.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/103-simple-attribute.html | 8 + lib/Chameleon-2.22/src/chameleon/tests/inputs/103.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/104.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/105.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/106.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/107.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/108.xml | 7 + lib/Chameleon-2.22/src/chameleon/tests/inputs/109.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/110.xml | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/111.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/112.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/113.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/114.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/115.xml | 6 + lib/Chameleon-2.22/src/chameleon/tests/inputs/116.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/117.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/118.xml | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/119.xml | 4 + lib/Chameleon-2.22/src/chameleon/tests/inputs/120-translation-context.pt | 13 + lib/Chameleon-2.22/src/chameleon/tests/inputs/121-translation-comment.pt | 7 + lib/Chameleon-2.22/src/chameleon/tests/inputs/greeting.pt | 1 + lib/Chameleon-2.22/src/chameleon/tests/inputs/hello_world.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/inputs/hello_world.txt | 1 + lib/Chameleon-2.22/src/chameleon/tests/inputs/hello_world.txt.py | 48 + lib/Chameleon-2.22/src/chameleon/tests/outputs/001.html | 7 + lib/Chameleon-2.22/src/chameleon/tests/outputs/001.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/outputs/001.txt | 1 + lib/Chameleon-2.22/src/chameleon/tests/outputs/002.pt | 13 + lib/Chameleon-2.22/src/chameleon/tests/outputs/003.pt | 17 + lib/Chameleon-2.22/src/chameleon/tests/outputs/004.pt | 24 + lib/Chameleon-2.22/src/chameleon/tests/outputs/005.pt | 12 + lib/Chameleon-2.22/src/chameleon/tests/outputs/006.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/outputs/007.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/outputs/008.pt | 12 + lib/Chameleon-2.22/src/chameleon/tests/outputs/009.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/outputs/010.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/outputs/011-en.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/outputs/011.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/outputs/012-en.pt | 10 + lib/Chameleon-2.22/src/chameleon/tests/outputs/012.pt | 10 + lib/Chameleon-2.22/src/chameleon/tests/outputs/013.pt | 22 + lib/Chameleon-2.22/src/chameleon/tests/outputs/014.pt | 12 + lib/Chameleon-2.22/src/chameleon/tests/outputs/015-en.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/outputs/015.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/outputs/016-en.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/outputs/016.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/outputs/017.pt | 12 + lib/Chameleon-2.22/src/chameleon/tests/outputs/018-en.pt | 3 + lib/Chameleon-2.22/src/chameleon/tests/outputs/018.pt | 3 + lib/Chameleon-2.22/src/chameleon/tests/outputs/019.pt | 13 + lib/Chameleon-2.22/src/chameleon/tests/outputs/020.pt | 8 + lib/Chameleon-2.22/src/chameleon/tests/outputs/021-en.pt | 12 + lib/Chameleon-2.22/src/chameleon/tests/outputs/021.pt | 12 + lib/Chameleon-2.22/src/chameleon/tests/outputs/022.pt | 21 + lib/Chameleon-2.22/src/chameleon/tests/outputs/023.pt | 6 + lib/Chameleon-2.22/src/chameleon/tests/outputs/024.pt | 14 + lib/Chameleon-2.22/src/chameleon/tests/outputs/025.pt | 22 + lib/Chameleon-2.22/src/chameleon/tests/outputs/026.pt | 17 + lib/Chameleon-2.22/src/chameleon/tests/outputs/027.pt | 7 + lib/Chameleon-2.22/src/chameleon/tests/outputs/028.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/outputs/029.pt | 3 + lib/Chameleon-2.22/src/chameleon/tests/outputs/030.pt | 10 + lib/Chameleon-2.22/src/chameleon/tests/outputs/031.pt | 7 + lib/Chameleon-2.22/src/chameleon/tests/outputs/032.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/outputs/033.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/outputs/034.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/outputs/035.pt | 17 + lib/Chameleon-2.22/src/chameleon/tests/outputs/036.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/outputs/037.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/outputs/038.pt | 6 + lib/Chameleon-2.22/src/chameleon/tests/outputs/039.pt | 0 lib/Chameleon-2.22/src/chameleon/tests/outputs/040.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/outputs/041.pt | 7 + lib/Chameleon-2.22/src/chameleon/tests/outputs/042.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/outputs/043.pt | 11 + lib/Chameleon-2.22/src/chameleon/tests/outputs/044.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/outputs/045.pt | 12 + lib/Chameleon-2.22/src/chameleon/tests/outputs/046.pt | 17 + lib/Chameleon-2.22/src/chameleon/tests/outputs/047.pt | 17 + lib/Chameleon-2.22/src/chameleon/tests/outputs/048.pt | 17 + lib/Chameleon-2.22/src/chameleon/tests/outputs/049.pt | 11 + lib/Chameleon-2.22/src/chameleon/tests/outputs/050.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/outputs/051.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/outputs/052.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/outputs/053.pt | 6 + lib/Chameleon-2.22/src/chameleon/tests/outputs/054.pt | 3 + lib/Chameleon-2.22/src/chameleon/tests/outputs/055.pt | 4 + lib/Chameleon-2.22/src/chameleon/tests/outputs/056.pt | 7 + lib/Chameleon-2.22/src/chameleon/tests/outputs/057.pt | 8 + lib/Chameleon-2.22/src/chameleon/tests/outputs/058.pt | 16 + lib/Chameleon-2.22/src/chameleon/tests/outputs/059.pt | 6 + lib/Chameleon-2.22/src/chameleon/tests/outputs/060.pt | 8 + lib/Chameleon-2.22/src/chameleon/tests/outputs/061.pt | 8 + lib/Chameleon-2.22/src/chameleon/tests/outputs/062.pt | 27 + lib/Chameleon-2.22/src/chameleon/tests/outputs/063.pt | 3 + lib/Chameleon-2.22/src/chameleon/tests/outputs/064.pt | 3 + lib/Chameleon-2.22/src/chameleon/tests/outputs/065.pt | 13 + lib/Chameleon-2.22/src/chameleon/tests/outputs/066.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/outputs/067.pt | 6 + lib/Chameleon-2.22/src/chameleon/tests/outputs/068.pt | 8 + lib/Chameleon-2.22/src/chameleon/tests/outputs/069-en.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/outputs/069.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/outputs/070-en.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/outputs/070.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/outputs/071.pt | 12 + lib/Chameleon-2.22/src/chameleon/tests/outputs/072.pt | 19 + lib/Chameleon-2.22/src/chameleon/tests/outputs/073.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/outputs/074.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/outputs/075.pt | 19 + lib/Chameleon-2.22/src/chameleon/tests/outputs/076.pt | 17 + lib/Chameleon-2.22/src/chameleon/tests/outputs/077-en.pt | 1 + lib/Chameleon-2.22/src/chameleon/tests/outputs/077.pt | 1 + lib/Chameleon-2.22/src/chameleon/tests/outputs/078.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/outputs/079-en.pt | 16 + lib/Chameleon-2.22/src/chameleon/tests/outputs/079.pt | 16 + lib/Chameleon-2.22/src/chameleon/tests/outputs/080.pt | 3 + lib/Chameleon-2.22/src/chameleon/tests/outputs/081.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/outputs/082.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/outputs/083.pt | 15 + lib/Chameleon-2.22/src/chameleon/tests/outputs/084.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/outputs/085-en.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/outputs/085.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/outputs/086.pt | 18 + lib/Chameleon-2.22/src/chameleon/tests/outputs/087.pt | 25 + lib/Chameleon-2.22/src/chameleon/tests/outputs/088.pt | 1 + lib/Chameleon-2.22/src/chameleon/tests/outputs/089.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/outputs/090.pt | 14 + lib/Chameleon-2.22/src/chameleon/tests/outputs/091.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/outputs/101.html | 5 + lib/Chameleon-2.22/src/chameleon/tests/outputs/102.html | 5 + lib/Chameleon-2.22/src/chameleon/tests/outputs/103.html | 8 + lib/Chameleon-2.22/src/chameleon/tests/outputs/120-en.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/outputs/120.pt | 9 + lib/Chameleon-2.22/src/chameleon/tests/outputs/121.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/outputs/greeting.pt | 1 + lib/Chameleon-2.22/src/chameleon/tests/outputs/hello_world.pt | 5 + lib/Chameleon-2.22/src/chameleon/tests/outputs/hello_world.txt | 1 + lib/Chameleon-2.22/src/chameleon/tests/test_doctests.py | 40 + lib/Chameleon-2.22/src/chameleon/tests/test_exc.py | 13 + lib/Chameleon-2.22/src/chameleon/tests/test_loader.py | 110 + lib/Chameleon-2.22/src/chameleon/tests/test_parser.py | 92 + lib/Chameleon-2.22/src/chameleon/tests/test_sniffing.py | 124 + lib/Chameleon-2.22/src/chameleon/tests/test_templates.py | 698 ++ lib/Chameleon-2.22/src/chameleon/tests/test_tokenizer.py | 47 + lib/Chameleon-2.22/src/chameleon/tokenize.py | 144 + lib/Chameleon-2.22/src/chameleon/utils.py | 435 + lib/Chameleon-2.22/src/chameleon/zpt/__init__.py | 1 + lib/Chameleon-2.22/src/chameleon/zpt/loader.py | 30 + lib/Chameleon-2.22/src/chameleon/zpt/program.py | 798 ++ lib/Chameleon-2.22/src/chameleon/zpt/template.py | 429 + lib/Chameleon-2.22/src/pkg_resources/__init__.py | 3109 ++++++++++ lib/Chameleon-2.22/src/pkg_resources/_vendor/__init__.py | 0 lib/Chameleon-2.22/src/pkg_resources/_vendor/packaging/__about__.py | 31 + lib/Chameleon-2.22/src/pkg_resources/_vendor/packaging/__init__.py | 24 + lib/Chameleon-2.22/src/pkg_resources/_vendor/packaging/_compat.py | 40 + lib/Chameleon-2.22/src/pkg_resources/_vendor/packaging/_structures.py | 78 + lib/Chameleon-2.22/src/pkg_resources/_vendor/packaging/specifiers.py | 784 ++ lib/Chameleon-2.22/src/pkg_resources/_vendor/packaging/version.py | 403 + lib/Chameleon-2.22/src/pkg_resources/_vendor/vendored.txt | 1 + lib/Chameleon-2.22/src/pkg_resources/api_tests.txt | 422 + lib/Chameleon-2.22/src/pkg_resources/tests/__init__.py | 0 lib/Chameleon-2.22/src/pkg_resources/tests/test_markers.py | 16 + lib/Chameleon-2.22/src/pkg_resources/tests/test_pkg_resources.py | 111 + lib/Chameleon-2.22/src/pkg_resources/tests/test_resources.py | 661 ++ lib/Chameleon-2.22/tox.ini | 34 + perf.py | 19 +- performance/bm_chameleon_v2.py | 39 + 455 files changed, 38961 insertions(+), 4 deletions(-) diff --git a/lib/Chameleon-2.22/.gitignore b/lib/Chameleon-2.22/.gitignore new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/.gitignore @@ -0,0 +1,12 @@ +*.pyc +*.egg +*.egg-info +.coverage +.tox/ +coverage.xml +nosetests.xml +*.tar.gz +env25/ +env26/ +env27/ +env32/ diff --git a/lib/Chameleon-2.22/.travis.yml b/lib/Chameleon-2.22/.travis.yml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/.travis.yml @@ -0,0 +1,15 @@ +language: python + +python: + - 2.6 + - 2.7 + - pypy + - 3.1 + - 3.2 + - 3.3 + +install: + - python setup.py install + +script: + - python setup.py test -q diff --git a/lib/Chameleon-2.22/BENCHMARK_CHANGES.txt b/lib/Chameleon-2.22/BENCHMARK_CHANGES.txt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/BENCHMARK_CHANGES.txt @@ -0,0 +1,2 @@ +Use of inspect.getargspec() changed to inspect.getfullargspec() in order to run under Python 3.6. +pkg_resources was also added manually. \ No newline at end of file diff --git a/lib/Chameleon-2.22/CHANGES.rst b/lib/Chameleon-2.22/CHANGES.rst new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/CHANGES.rst @@ -0,0 +1,1305 @@ +Changes +======= + +2.22 (2015-02-06) +----------------- + +- Fix brown bag release. + + +2.21 (2015-02-06) +----------------- + +- Added ``RenderError`` exception which indicates that an error + occurred during the evaluation of an expression. + +- Clean up ``TemplateError`` exception implementation. + + +2.20 (2015-01-12) +----------------- + +- Pass ``search_path`` to template class when loaded using + ``TemplateLoader`` (or one of the derived classes). + [faassen] + + +2.19 (2015-01-06) +----------------- + +- Fix logging deprecation. + +- Fix environment-based configuration logging error. + + +2.18 (2014-11-03) +----------------- + +- Fix minor compilation error. + + +2.17 (2014-11-03) +----------------- + +- Add support for ``i18n:context``. + [wiggy] + +- Add missing 'parity' repeat property. + [voxspox] + +- Don't modify environment when getting variables from it. + [fschulze] + + +2.16 (2014-05-06) +----------------- + +- If a repeat expression evaluates to ``None`` then it is now + equivalent to an empty set. + + This changes a behavior introduced in 2.14. + + This fixes issue #172. + +- Remove fossil test dependency on deprecated ``distribute``. + +- Add explicit support / testing for Python 3.3 / 3.4. + +- Drop explicit support for Python 2.5 (out of maintenance, and no longer + supported by ``tox`` or ``Travis-CI``). + + +2.15 (2014-03-11) +----------------- + +- Add Support for Python 3.4's ``NameConstant``. + [brakhane] + + +2.14 (2013-11-28) +----------------- + +- Element repetition using the ``TAL`` namespace no longer includes + whitespace. This fixes issue #110. + +- Use absolute import for ``chameleon.interfaces`` module. This fixes + issue #161. + + +2.13-1 (2013-10-24) +------------------- + +- Fixing brown bag release. + +2.13 (2013-10-21) +----------------- + +Bugfixes: + +- The template cache mechanism now includes additional configuration + settings as part of the cache key such as ``strict`` and + ``trim_attribute_space``. + [ossmkitty] + +- Fix cache issue where sometimes cached templates would not load + correctly. + [ossmkitty] + +- In debug-mode, correctly remove temporary files when the module + loader is garbage-collected (on ``__del__``). + [graffic] + +- Fix error message when duplicate i18n:name directives are used in a + translation. + +- Using the three-argument form of ``getattr`` on a + ``chameleon.tal.RepeatDict`` no longer raises ``KeyError``, + letting the default provided to ``getattr`` be used. This fixes + attempting to adapt a ``RepeatDict`` to a Zope interface under + PyPy. + +2.12 (2013-03-26) +----------------- + +Changes: + +- When a ``tal:case`` condition succeeds, no other case now will. + +Bugfixes: + +- Implicit translation now correctly extracts and normalizes complete + sentences, instead of words. + [witsch] + +- The ``default`` symbol in a ``tal:case`` condition now allows the + element only if no other case succeeds. + + +2.11 (2012-11-15) +----------------- + +Bugfixes: + +- An issue was resolved where a METAL statement was combined with a + ``tal:on-error`` handler. + +- Fix minor parser issue with incorrectly formatted processing + instructions. + +- Provide proper error handling for Python inline code blocks. + +Features: + +- The simple translation function now supports the + ``translationstring`` interface. + +Optimizations: + +- Minor optimization which correctly detects when an element has no + attributes. + + +2.10 (2012-10-12) +----------------- + +Deprecations: + +- The ``fast_translate`` function has been deprecated. Instead, the + default translation function is now always a function that simply + interpolates the mapping onto the message default or id. + + The motivation is that since version 2.9, the ``context`` argument + is non-trivial: the ``econtext`` mapping is passed. This breaks an + expectation on the Zope platform that the ``context`` parameter is + the HTTP request. Previously, with Chameleon this parameter was + simply not provided and so that did not cause issues as such. + +- The ``ast24`` module has been renamed to ``ast25``. This should help + clear up any confusion that Chameleon 2.x might be support a Python + interpreter less than version 2.5 (it does not). + +Features: + +- The ``ProxyExpr`` expression class (and hence the ``load:`` + expression type) is now a TALES-expression. In practical terms, this + means that the expression type (which computes a string result using + the standard ``"${...}"`` interpolation syntax and proxies the + result through a function) now supports fallback using the pipe + operator (``"|"``). This fixes issue #128. + +- An attempt to interpolate using the empty string as the expression + (i.e. ``${}``) now does nothing: the string ``${}`` is simply output + as is. + +- Added support for adding, modifying, and removing attributes using a + dictionary expression in ``tal:attributes`` (analogous to Genshi's + ``py:attrs`` directive):: + +
          + + In the example above, ``name`` is an identifier, while ``value`` and + ``attrs`` are Python expressions. However, ``attrs`` must evaluate + to a Python dictionary object (more concisely, the value must + implement the dictionary API-methods ``update()`` and ``items()``). + +Optimizations: + +- In order to cut down on the size of the compiled function objects, + some conversion and quoting statements have been put into + functions. In one measurement, the reduction was 35%. The benchmark + suite does *not* report of an increased render time (actually + slightly decreased). + +Bugfixes: + +- An exception is now raised if a trivial string is passed for + ``metal:fill-slot``. This fixes issue #89. + +- An empty string is now never translated. Not really a bug, but it's + been reported in as an issue (#92) because some translation + frameworks handle this case incorrectly. + +- The template module loader (file cache) now correctly encodes + generated template source code as UTF-8. This fixes issue #125. + +- Fixed issue where a closure might be reused unsafely in nested + template rendering. + +- Fixed markup class ``__repr__`` method. This fixes issue #124. + +- Added missing return statement to fix printing the non-abbreviated + filename in case of an exception. + [tomo] + +2.9.2 (2012-06-06) +------------------ + +Bugfixes: + +- Fixed a PyPy incompatibility. + +- Fixed issue #109 which caused testing failures on some platforms. + +2.9.1 (2012-06-01) +------------------ + +Bugfixes: + +- Fixed issue #103. The ``tal:on-error`` statement now always adds an + explicit end-tag to the element, even with a substitution content of + nothing. + +- Fixed issue #113. The ``tal:on-error`` statement now works correctly + also for dynamic attributes. That is, the fallback tag now includes + only static attributes. + +- Fixed name error which prevented the benchmark from running + correctly. + +Compatibility: + +- Fixed deprecation warning on Python 3 for zope interface implements + declaration. This fixes issue #116. + +2.9.0 (2012-05-31) +------------------ + +Features: + +- The translation function now gets the ``econtext`` argument as the + value for ``context``. Note that historically, this was usually an + HTTP request which might provide language negotiation data through a + dictionary interface. + [alvinyue] + +Bugfixes: + +- Fixed import alias issue which would lead to a syntax error in + generated Python code. Fixes issue #114. + +2.8.5 (2012-05-02) +------------------ + +Bugfixes: + +- Fixed minor installation issues on Python 2.5 and 3. + [ppaez] + +- Ensure output is unicode even when trivial (an empty string). + +2.8.4 (2012-04-18) +------------------ + +Features: + +- In exception output, long filenames are now truncated to 60 + characters of output, preventing line wrap which makes it difficult + to scan the exception output. + +Bugfixes: + +- Include filename and location in exception output for exceptions + raised during compilation. + +- If a trivial translation substitution variable is given (i.e. an + empty string), simply ignore it. This fixes issue #106. + +2.8.3 (2012-04-16) +------------------ + +Features: + +- Log template source on debug-level before cooking. + +- The `target_language` argument, if given, is now available as a + variable in templates. + +2.8.2 (2012-03-30) +------------------ + +Features: + +- Temporary caches used in debug mode are cleaned up eagerly, rather + than waiting for process termination. + [mitchellrj] + +Bugfixes: + +- The `index`, `start` and `end` methods on the TAL repeat object are + now callable. This fixes an incompatibility with ZPT. + +- The loader now correctly handles absolute paths on Windows. + [rdale] + +2.8.1 (2012-03-29) +------------------ + +Features: + +- The exception formatter now lists errors in 'wrapping order'. This + means that the innermost, and presumably most relevant exception is + shown last. + +Bugfixes: + +- The exception formatter now correctly recognizes nested errors and + does not rewrap the dynamically generated exception class. + +- The exception formatter now correctly sets the ``__module__`` + attribute to that of the original exception class. + +2.8.0 (2012-02-29) +------------------ + +Features: + +- Added support for code blocks using the `` processing + instruction syntax. + + The scope is name assignments is up until the nearest macro + definition, or the template itself if macros are not used. + +Bugfixes: + +- Fall back to the exception class' ``__new__`` method to safely + create an exception object that is not implemented in Python. + +- The exception formatter now keeps track of already formatted + exceptions, and ignores them from further output. + +2.7.4 (2012-02-27) +------------------ + +- The error handler now invokes the ``__init__`` method of + ``BaseException`` instead of the possibly overriden method (which + may take required arguments). This fixes issue #97. + [j23d, malthe] + +2.7.3 (2012-01-16) +------------------ + +Bugfixes: + +- The trim whitespace option now correctly trims actual whitespace to + a single character, appearing either to the left or to the right of + an element prefix or suffix string. + +2.7.2 (2012-01-08) +------------------ + +Features: + +- Added option ``trim_attribute_space`` that decides whether attribute + whitespace is stripped (at most down to a single space). This option + exists to provide compatibility with the reference + implementation. Fixes issue #85. + +Bugfixes: + +- Ignore unhashable builtins when generating a reverse builtin + map to quickly look up a builtin value. + [malthe] + +- Apply translation mapping even when a translation function is not + available. This fixes issue #83. + [malthe] + +- Fixed issue #80. The translation domain for a slot is defined by the + source document, i.e. the template providing the content for a slot + whether it be the default or provided through ``metal:fill-slot``. + [jcbrand] + +- In certain circumstances, a Unicode non-breaking space character would cause + a define clause to fail to parse. + +2.7.1 (2011-12-29) +------------------ + +Features: + +- Enable expression interpolation in CDATA. + +- The page template class now implements dictionary access to macros:: + + template[name] + + This is a short-hand for:: + + template.macros[name] + +Bugfixes: + +- An invalid define clause would be silently ignored; we now raise a + language error exception. This fixes issue #79. + +- Fixed regression where ``${...}`` interpolation expressions could + not span multiple lines. This fixes issue #77. + +2.7.0 (2011-12-13) +------------------ + +Features: + +- The ``load:`` expression now derives from the string expression such + that the ``${...}`` operator can be used for expression + interpolation. + +- The ``load:`` expression now accepts asset specs; these are resolved + by the ``pkg_resources.resource_filename`` function:: + + : + + An example from the test suite:: + + chameleon:tests/inputs/hello_world.pt + +Bugfixes: + +- If an attribute name for translation was not a valid Python + identifier, the compiler would generate invalid code. This has been + fixed, and the compiler now also throws an exception if an attribute + specification contains a comma. (Note that the only valid separator + character is the semicolon, when specifying attributes for + translation via the ``i18n:translate`` statement). This addresses + issue #76. + +2.6.2 (2011-12-08) +------------------ + +Bugfixes: + +- Fixed issue where ``tal:on-error`` would not respect + ``tal:omit-tag`` or namespace elements which are omitted by default + (such as ````). + +- Fixed issue where ``macros`` attribute would not be available on + file-based templates due to incorrect initialization. + +- The ``TryExcept`` and ``TryFinally`` AST nodes are not available on + Python 3.3. These have been aliased to ``Try``. This fixes issue + #75. + +Features: + +- The TAL repeat item now makes a security declaration that grants + access to unprotected subobjects on the Zope 2 platform:: + + __allow_access_to_unprotected_subobjects__ = True + + This is required for legacy compatibility and does not affect other + environments. + +- The template object now has a method ``write(body)`` which + explicitly decodes and cooks a string input. + +- Added configuration option ``loader_class`` which sets the class + used to create the template loader object. + + The class (essentially a callable) is created at template + construction time. + +2.6.1 (2011-11-30) +------------------ + +Bugfixes: + +- Decode HTML entities in expression interpolation strings. This fixes + issue #74. + +- Allow ``xml`` and ``xmlns`` attributes on TAL, I18N and METAL + namespace elements. This fixes issue #73. + +2.6.0 (2011-11-24) +------------------ + +Features: + +- Added support for implicit translation: + + The ``implicit_i18n_translate`` option enables implicit translation + of text. The ``implicit_i18n_attributes`` enables implicit + translation of attributes. The latter must be a set and for an + attribute to be implicitly translated, its lowercase string value + must be included in the set. + +- Added option ``strict`` (enabled by default) which decides whether + expressions are required to be valid at compile time. That is, if + not set, an exception is only raised for an invalid expression at + evaluation time. + +- An expression error now results in an exception only if the + expression is attempted evaluated during a rendering. + +- Added a configuration option ``prepend_relative_search_path`` which + decides whether the path relative to a file-based template is + prepended to the load search path. The default is ``True``. + +- Added a configuration option ``search_path`` to the file-based + template class, which adds additional paths to the template load + instance bound to the ``load:`` expression. The option takes a + string path or an iterable yielding string paths. The default value + is the empty set. + +Bugfixes: + +- Exception instances now support pickle/unpickle. + +- An attributes in i18n:attributes no longer needs to match an + existing or dynamic attribute in order to appear in the + element. This fixes issue #66. + +2.5.3 (2011-10-23) +------------------ + +Bugfixes: + +- Fixed an issue where a nested macro slot definition would fail even + though there existed a parent macro definition. This fixes issue + #69. + +2.5.2 (2011-10-12) +------------------ + +Bugfixes: + +- Fixed an issue where technically invalid input would result in a + compiler error. + +Features: + +- The markup class now inherits from the unicode string type such that + it's compatible with the string interface. + +2.5.1 (2011-09-29) +------------------ + +Bugfixes: + +- The symbol names "convert", "decode" and "translate" are now no + longer set as read-only *compiler internals*. This fixes issue #65. + +- Fixed an issue where a macro extension chain nested two levels (a + template uses a macro that extends a macro) would lose the middle + slot definitions if slots were defined nested. + + The compiler now throws an error if a nested slot definition is used + outside a macro extension context. + +2.5.0 (2011-09-23) +------------------ + +Features: + +- An expression type ``structure:`` is now available which wraps the + expression result as *structure* such that it is not escaped on + insertion, e.g.:: + +
          + ${structure: context.body} +
          + + This also means that the ``structure`` keyword for ``tal:content`` + and ``tal:replace`` now has an alternative spelling via the + expression type ``structure:``. + +- The string-based template constructor now accepts encoded input. + +2.4.6 (2011-09-23) +------------------ + +Bugfixes: + +- The ``tal:on-error`` statement should catch all exceptions. + +- Fixed issue that would prevent escaping of interpolation expression + values appearing in text. + +2.4.5 (2011-09-21) +------------------ + +Bugfixes: + +- The ``tal:on-error`` handler should have a ``error`` variable + defined that has the value of the exception thrown. + +- The ``tal:on-error`` statement is a substitution statement and + should support the "text" and "structure" insertion methods. + +2.4.4 (2011-09-15) +------------------ + +Bugfixes: + +- An encoding specified in the XML document preamble is now read and + used to decode the template input to unicode. This fixes issue #55. + +- Encoded expression input on Python 3 is now correctly + decoded. Previously, the string representation output would be + included instead of an actually decoded string. + +- Expression result conversion steps are now correctly included in + error handling such that the exception output points to the + expression location. + +2.4.3 (2011-09-13) +------------------ + +Features: + +- When an encoding is provided, pass the 'ignore' flag to avoid + decoding issues with bad input. + +Bugfixes: + +- Fixed pypy compatibility issue (introduced in previous release). + +2.4.2 (2011-09-13) +------------------ + +Bugfixes: + +- Fixed an issue in the compiler where an internal variable (such as a + translation default value) would be cached, resulting in variable + scope corruption (see issue #49). + +2.4.1 (2011-09-08) +------------------ + +Bugfixes: + +- Fixed an issue where a default value for an attribute would + sometimes spill over into another attribute. + +- Fixed issue where the use of the ``default`` name in an attribute + interpolation expression would print the attribute value. This is + unexpected, because it's an expression, not a static text suitable + for output. An attribute value of ``default`` now correctly drops + the attribute. + +2.4.0 (2011-08-22) +------------------ + +Features: + +- Added an option ``boolean_attributes`` to evaluate and render a + provided set of attributes using a boolean logic: if the attribute + is a true value, the value will be the attribute name, otherwise the + attribute is dropped. + + In the reference implementation, the following attributes are + configured as boolean values when the template is rendered in + HTML-mode:: + + "compact", "nowrap", "ismap", "declare", "noshade", + "checked", "disabled", "readonly", "multiple", "selected", + "noresize", "defer" + + Note that in Chameleon, these attributes must be manually provided. + +Bugfixes: + +- The carriage return character (used on Windows platforms) would + incorrectly be included in Python comments. + + It is now replaced with a line break. + + This fixes issue #44. + +2.3.8 (2011-08-19) +------------------ + +- Fixed import error that affected Python 2.5 only. + +2.3.7 (2011-08-19) +------------------ + +Features: + +- Added an option ``literal_false`` that disables the default behavior + of dropping an attribute for a value of ``False`` (in addition to + ``None``). This modified behavior is the behavior exhibited in + reference implementation. + +Bugfixes: + +- Undo attribute special HTML attribute behavior (see previous + release). + + This turned out not to be a compatible behavior; rather, boolean + values should simply be coerced to a string. + + Meanwhile, the reference implementation does support an HTML mode in + which the special attribute behavior is exhibited. + + We do not currently support this mode. + +2.3.6 (2011-08-18) +------------------ + +Features: + +- Certain HTML attribute names now have a special behavior for a + attribute value of ``True`` (or ``default`` if no default is + defined). For these attributes, this return value will result in the + name being printed as the value:: + + + + will be rendered as:: + + + + This behavior is compatible with the reference implementation. + +2.3.5 (2011-08-18) +------------------ + +Features: + +- Added support for the set operator (``{item, item, ...}``). + +Bugfixes: + +- If macro is defined on the same element as a translation name, this + no longer results in a "translation name not allowed outside + translation" error. This fixes issue #43. + +- Attribute fallback to dictionary lookup now works on multiple items + (e.g. ``d1.d2.d2``). This fixes issue #42. + +2.3.4 (2011-08-16) +------------------ + +Features: + +- When inserting content in either attributes or text, a value of + ``True`` (like ``False`` and ``None``) will result in no + action. + +- Use statically assigned variables for ``"attrs"`` and + ``"default"``. This change yields a performance improvement of + 15-20%. + +- The template loader class now accepts an optional argument + ``default_extension`` which accepts a filename extension which will + be appended to the filename if there's not already an extension. + +Bugfixes: + +- The default symbol is now ``True`` for an attribute if the attribute + default is not provided. Note that the result is that the attribute + is dropped. This fixes issue #41. + +- Fixed an issue where assignment to a variable ``"type"`` would + fail. This fixes issue #40. + +- Fixed an issue where an (unsuccesful) assignment for a repeat loop + to a compiler internal name would not result in an error. + +- If the translation function returns the identical object, manually + coerce it to string. This fixes a compatibility issue with + translation functions which do not convert non-string objects to a + string value, but simply return them unchanged. + +2.3.3 (2011-08-15) +------------------ + +Features: + +- The ``load:`` expression now passes the initial keyword arguments to + its template loader (e.g. ``auto_reload`` and ``encoding``). + +- In the exception output, string variable values are now limited to a + limited output of characters, single line only. + +Bugfixes: + +- Fixed horizontal alignment of exception location info + (i.e. 'String:', 'Filename:' and 'Location:') such that they match + the template exception formatter. + +2.3.2 (2011-08-11) +------------------ + +Bugfixes: + +- Fixed issue where i18n:domain would not be inherited through macros + and slots. This fixes issue #37. + +2.3.1 (2011-08-11) +------------------ + +Features: + +- The ``Builtin`` node type may now be used to represent any Python + local or global name. This allows expression compilers to refer to + e.g. ``get`` or ``getitem``, or to explicit require a builtin object + such as one from the ``extra_builtins`` dictionary. + +Bugfixes: + +- Builtins which are not explicitly disallowed may now be redefined + and used as variables (e.g. ``nothing``). + +- Fixed compiler issue with circular node annotation loop. + +2.3 (2011-08-10) +---------------- + +Features: + +- Added support for the following syntax to disable inline evaluation + in a comment: + + + + Note that the initial question mark character (?) will be omitted + from output. + +- The parser now accepts '<' and '>' in attributes. Note that this is + invalid markup. Previously, the '<' would not be accepted as a valid + attribute value, but this would result in an 'unexpected end tag' + error elsewhere. This fixes issue #38. + +- The expression compiler now provides methods ``assign_text`` and + ``assign_value`` such that a template engine might configure this + value conversion to support e.g. encoded strings. + + Note that currently, the only client for the ``assign_text`` method + is the string expression type. + +- Enable template loader for string-based template classes. Note that + the ``filename`` keyword argument may be provided on initialization + to identify the template source by filename. This fixes issue #36. + +- Added ``extra_builtins`` option to the page template class. These + builtins are added to the default builtins dictionary at cook time + and may be provided at initialization using the ``extra_builtins`` + keyword argument. + +Bugfixes: + +- If a translation domain is set for a fill slot, use this setting + instead of the macro template domain. + +- The Python expression compiler now correctly decodes HTML entities + ``'gt'`` and ``'lt'``. This fixes issue #32. + +- The string expression compiler now correctly handles encoded text + (when support for encoded strings is enabled). This fixes issue #35. + +- Fixed an issue where setting the ``filename`` attribute on a + file-based template would not automatically cause an invalidation. + +- Exceptions raised by Chameleon can now be copied via + ``copy.copy``. This fixes issue #36. + [leorochael] + +- If copying the exception fails in the exception handler, simply + re-raise the original exception and log a warning. + +2.2 (2011-07-28) +---------------- + +Features: + +- Added new expression type ``load:`` that allows loading a + template. Both relative and absolute paths are supported. If the + path given is relative, then it will be resolved with respect to the + directory of the template. + +- Added support for dynamic evaluation of expressions. + + Note that this is to support legacy applications. It is not + currently wired into the provided template classes. + +- Template classes now have a ``builtins`` attribute which may be used + to define built-in variables always available in the template + variable scope. + +Incompatibilities: + +- The file-based template class no longer accepts a parameter + ``loader``. This parameter would be used to load a template from a + relative path, using a ``find(filename)`` method. This was however, + undocumented, and probably not very useful since we have the + ``TemplateLoader`` mechanism already. + +- The compiled template module now contains an ``initialize`` function + which takes values that map to the template builtins. The return + value of this function is a dictionary that contains the render + functions. + +Bugfixes: + +- The file-based template class no longer verifies the existance of a + template file (using ``os.lstat``). This now happens implicitly if + eager parsing is enabled, or otherwise when first needed (e.g. at + render time). + + This is classified as a bug fix because the previous behavior was + probably not what you'd expect, especially if an application + initializes a lot of templates without needing to render them + immediately. + +2.1.1 (2011-07-28) +------------------ + +Features: + +- Improved exception display. The expression string is now shown in + the context of the original source (if available) with a marker + string indicating the location of the expression in the template + source. + +Bugfixes: + +- The ``structure`` insertion mode now correctly decodes entities for + any expression type (including ``string:``). This fixes issue #30. + +- Don't show internal variables in the exception formatter variable + listing. + +2.1 (2011-07-25) +---------------- + +Features: + +- Expression interpolation (using the ``${...}`` operator and + previously also ``$identifier``) now requires braces everywhere + except inside the ``string:`` expression type. + + This change is motivated by a number of legacy templates in which + the interpolation format without braces ``$identifier`` appears as + text. + +2.0.2 (2011-07-25) +------------------ + +Bugfixes: + +- Don't use dynamic variable scope for lambda-scoped variables (#27). + +- Avoid duplication of exception class and message in traceback. + +- Fixed issue where a ``metal:fill-slot`` would be ignored if a macro + was set to be used on the same element (#16). + +2.0.1 (2011-07-23) +------------------ + +Bugfixes: + +- Fixed issue where global variable definition from macro slots would + fail (they would instead be local). This also affects error + reporting from inside slots because this would be recorded + internally as a global. + +- Fixed issue with template cache digest (used for filenames); modules + are now invalidated whenever any changes are made to the + distribution set available (packages on ``sys.path``). + +- Fixed exception handler to better let exceptions propagate through + the renderer. + +- The disk-based module compiler now mangles template source filenames + such that the output Python module is valid and at root level (dots + and hyphens are replaced by an underscore). This fixes issue #17. + +- Fixed translations (i18n) on Python 2.5. + +2.0 (2011-07-14) +---------------- + +- Point release. + +2.0-rc14 (2011-07-13) +--------------------- + +Bugfixes: + +- The tab character (``\t``) is now parsed correctly when used inside + tags. + +Features: + +- The ``RepeatDict`` class now works as a proxy behind a seperate + dictionary instance. + +- Added template constructor option ``keep_body`` which is a flag + (also available as a class attribute) that controls whether to save + the template body input in the ``body`` attribute. + + This is disabled by default, unless debug-mode is enabled. + +- The page template loader class now accepts an optional ``formats`` + argument which can be used to select an alternative template class. + +2.0-rc13 (2011-07-07) +--------------------- + +Bugfixes: + +- The backslash character (followed by optional whitespace and a line + break) was not correctly interpreted as a continuation for Python + expressions. + +Features: + +- The Python expression implementation is now more flexible for + external subclassing via a new ``parse`` method. + +2.0-rc12 (2011-07-04) +--------------------- + +Bugfixes: + +- Initial keyword arguments passed to a template now no longer "leak" + into the template variable space after a macro call. + +- An unexpected end tag is now an unrecoverable error. + +Features: + +- Improve exception output. + +2.0-rc11 (2011-05-26) +--------------------- + +Bugfixes: + +- Fixed issue where variable names that begin with an underscore were + seemingly allowed, but their use resulted in a compiler error. + +Features: + +- Template variable names are now allowed to be prefixed with a single + underscore, but not two or more (reserved for internal use). + + Examples of valid names:: + + item + ITEM + _item + camelCase + underscore_delimited + help + +- Added support for Genshi's comment "drop" syntax:: + + + + Note the additional exclamation (!) character. + + This fixes addresses issue #10. + +2.0-rc10 (2011-05-24) +--------------------- + +Bugfixes: + +- The ``tal:attributes`` statement now correctly operates + case-insensitive. The attribute name given in the statement will + replace an existing attribute with the same name, without respect to + case. + +Features: + +- Added ``meta:interpolation`` statement to control expression + interpolation setting. + + Strings that disable the setting: ``"off"`` and ``"false"``. + Strings that enable the setting: ``"on"`` and ``"true"``. + +- Expression interpolation now works inside XML comments. + +2.0-rc9 (2011-05-05) +-------------------- + +Features: + +- Better debugging support for string decode and conversion. If a + naive join fails, each element in the output will now be attempted + coerced to unicode to try and trigger the failure near to the bad + string. + +2.0-rc8 (2011-04-11) +-------------------- + +Bugfixes: + +- If a macro defines two slots with the same name, a caller will now + fill both with a single usage. + +- If a valid of ``None`` is provided as the translation function + argument, we now fall back to the class default. + +2.0-rc7 (2011-03-29) +-------------------- + +Bugfixes: + +- Fixed issue with Python 2.5 compatibility AST. This affected at + least PyPy 1.4. + +Features: + +- The ``auto_reload`` setting now defaults to the class value; the + base template class gives a default value of + ``chameleon.config.AUTO_RELOAD``. This change allows a subclass to + provide a custom default value (such as an application-specific + debug mode setting). + + +2.0-rc6 (2011-03-19) +-------------------- + +Features: + +- Added support for ``target_language`` keyword argument to render + method. If provided, the argument will be curried onto the + translation function. + +Bugfixes: + +- The HTML entities 'lt', 'gt' and 'quot' appearing inside content + subtition expressions are now translated into their native character + values. This fixes an issue where you could not dynamically create + elements using the ``structure`` (which is possible in ZPT). The + need to create such structure stems from the lack of an expression + interpolation operator in ZPT. + +- Fixed duplicate file pointer issue with test suite (affected Windows + platforms only). This fixes issue #9. + [oliora] + +- Use already open file using ``os.fdopen`` when trying to write out + the module source. This fixes LP #731803. + + +2.0-rc5 (2011-03-07) +-------------------- + +Bugfixes: + +- Fixed a number of issues concerning the escaping of attribute + values: + + 1) Static attribute values are now included as they appear in the + source. + + This means that invalid attribute values such as ``"true && + false"`` are now left alone. It's not the job of the template + engine to correct such markup, at least not in the default mode + of operation. + + 2) The string expression compiler no longer unescapes + values. Instead, this is left to each expression + compiler. Currently only the Python expression compiler unescapes + its input. + + 3) The dynamic escape code sequence now correctly only replaces + ampersands that are part of an HTML escape format. + +Imports: + +- The page template classes and the loader class can now be imported + directly from the ``chameleon`` module. + +Features: + +- If a custom template loader is not provided, relative paths are now + resolved using ``os.abspath`` (i.e. to the current working + directory). + +- Absolute paths are normalized using ``os.path.normpath`` and + ``os.path.expanduser``. This ensures that all paths are kept in + their "canonical" form. + + +2.0-rc4 (2011-03-03) +-------------------- + +Bugfixes: + +- Fixed an issue where the output of an end-to-end string expression + would raise an exception if the expression evaluated to ``None`` (it + should simply output nothing). + +- The ``convert`` function (which is configurable on the template + class level) now defaults to the ``translate`` function (at + run-time). + + This fixes an issue where message objects were not translated (and + thus converted to a string) using the a provided ``translate`` + function. + +- Fixed string interpolation issue where an expression immediately + succeeded by a right curly bracket would not parse. + + This fixes issue #5. + +- Fixed error where ``tal:condition`` would be evaluated after + ``tal:repeat``. + +Features: + +- Python expression is now a TALES expression. That means that the + pipe operator can be used to chain two or more expressions in a + try-except sequence. + + This behavior was ported from the 1.x series. Note that while it's + still possible to use the pipe character ("|") in an expression, it + must now be escaped. + +- The template cache can now be shared by multiple processes. + + +2.0-rc3 (2011-03-02) +-------------------- + +Bugfixes: + +- Fixed ``atexit`` handler. + + This fixes issue #3. + +- If a cache directory is specified, it will now be used even when not + in debug mode. + +- Allow "comment" attribute in the TAL namespace. + + This fixes an issue in the sense that the reference engine allows + any attribute within the TAL namespace. However, only "comment" is + in common use. + +- The template constructor now accepts a flag ``debug`` which puts the + template *instance* into debug-mode regardless of the global + setting. + + This fixes issue #1. + +Features: + +- Added exception handler for exceptions raised while evaluating an + expression. + + This handler raises (or attempts to) a new exception of the type + ``RenderError``, with an additional base class of the original + exception class. The string value of the exception is a formatted + error message which includes the expression that caused the + exception. + + If we are unable to create the exception class, the original + exception is re-raised. + +2.0-rc2 (2011-02-28) +-------------------- + +- Fixed upload issue. + +2.0-rc1 (2011-02-28) +-------------------- + +- Initial public release. See documentation for what's new in this + series. diff --git a/lib/Chameleon-2.22/COPYRIGHT.txt b/lib/Chameleon-2.22/COPYRIGHT.txt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/COPYRIGHT.txt @@ -0,0 +1,7 @@ +Copyright (c) 2011 Malthe Borch and Contributors. All Rights Reserved. + +Portions (c) Zope Foundation and contributors (http://www.zope.org/). + +Portions (c) Edgewall Software. + +Portions (c) 2008 Armin Ronacher. diff --git a/lib/Chameleon-2.22/LICENSE.txt b/lib/Chameleon-2.22/LICENSE.txt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/LICENSE.txt @@ -0,0 +1,185 @@ +The majority of the code in Chameleon is supplied under this license: + + A copyright notice accompanies this license document that identifies + the copyright holders. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions in source code must retain the accompanying + copyright notice, this list of conditions, and the following + disclaimer. + + 2. Redistributions in binary form must reproduce the accompanying + copyright notice, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + 3. Names of the copyright holders must not be used to endorse or + promote products derived from this software without prior + written permission from the copyright holders. + + 4. If any files are modified, you must cause the modified files to + carry prominent notices stating that you changed the files and + the date of any change. + + Disclaimer + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND + ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF + THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + SUCH DAMAGE. + +Portions of the code in Chameleon are supplied under the ZPL (headers +within individiual files indicate that these portions are licensed +under the ZPL): + + Zope Public License (ZPL) Version 2.1 + ------------------------------------- + + A copyright notice accompanies this license document that + identifies the copyright holders. + + This license has been certified as open source. It has also + been designated as GPL compatible by the Free Software + Foundation (FSF). + + Redistribution and use in source and binary forms, with or + without modification, are permitted provided that the + following conditions are met: + + 1. Redistributions in source code must retain the + accompanying copyright notice, this list of conditions, + and the following disclaimer. + + 2. Redistributions in binary form must reproduce the accompanying + copyright notice, this list of conditions, and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + + 3. Names of the copyright holders must not be used to + endorse or promote products derived from this software + without prior written permission from the copyright + holders. + + 4. The right to distribute this software or to use it for + any purpose does not give you the right to use + Servicemarks (sm) or Trademarks (tm) of the copyright + holders. Use of them is covered by separate agreement + with the copyright holders. + + 5. If any files are modified, you must cause the modified + files to carry prominent notices stating that you changed + the files and the date of any change. + + Disclaimer + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' + AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT + NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + NO EVENT SHALL THE COPYRIGHT HOLDERS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + DAMAGE. + +Portions of the code in Chameleon are supplied under the BSD license +(headers within individiual files indicate that these portions are +licensed under this license): + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS + OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Portions of the code in Chameleon are supplied under the Python +License (headers within individiual files indicate that these portions +are licensed under this license): + + PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 + -------------------------------------------- + + 1. This LICENSE AGREEMENT is between the Python Software Foundation + ("PSF"), and the Individual or Organization ("Licensee") accessing and + otherwise using this software ("Python") in source or binary form and + its associated documentation. + + 2. Subject to the terms and conditions of this License Agreement, PSF + hereby grants Licensee a nonexclusive, royalty-free, world-wide + license to reproduce, analyze, test, perform and/or display publicly, + prepare derivative works, distribute, and otherwise use Python + alone or in any derivative version, provided, however, that PSF's + License Agreement and PSF's notice of copyright, i.e., "Copyright (c) + 2001, 2002, 2003, 2004 Python Software Foundation; All Rights Reserved" + are retained in Python alone or in any derivative version prepared + by Licensee. + + 3. In the event Licensee prepares a derivative work that is based on + or incorporates Python or any part thereof, and wants to make + the derivative work available to others as provided herein, then + Licensee hereby agrees to include in any such work a brief summary of + the changes made to Python. + + 4. PSF is making Python available to Licensee on an "AS IS" + basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR + IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND + DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS + FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT + INFRINGE ANY THIRD PARTY RIGHTS. + + 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON + FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS + A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, + OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + + 6. This License Agreement will automatically terminate upon a material + breach of its terms and conditions. + + 7. Nothing in this License Agreement shall be deemed to create any + relationship of agency, partnership, or joint venture between PSF and + Licensee. This License Agreement does not grant permission to use PSF + trademarks or trade name in a trademark sense to endorse or promote + products or services of Licensee, or any third party. + + 8. By copying, installing or otherwise using Python, Licensee + agrees to be bound by the terms and conditions of this License + Agreement. diff --git a/lib/Chameleon-2.22/MANIFEST.in b/lib/Chameleon-2.22/MANIFEST.in new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include src/chameleon/tests/inputs * +recursive-include src/chameleon/tests/outputs * diff --git a/lib/Chameleon-2.22/Makefile b/lib/Chameleon-2.22/Makefile new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/Makefile @@ -0,0 +1,89 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = docs +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) + +.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Chameleon.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Chameleon.qhc" + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/lib/Chameleon-2.22/PKG-INFO b/lib/Chameleon-2.22/PKG-INFO new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/PKG-INFO @@ -0,0 +1,1353 @@ +Metadata-Version: 1.1 +Name: Chameleon +Version: 2.22 +Summary: Fast HTML/XML Template Compiler. +Home-page: http://www.pagetemplates.org/ +Author: Malthe Borch +Author-email: mborch at gmail.com +License: BSD-like (http://repoze.org/license.html) +Description: Overview + ======== + + Chameleon is an HTML/XML template engine for `Python + `_. It uses the *page templates* language. + + You can use it in any Python web application with just about any + version of Python (2.5 and up, including 3.x and `pypy + `_). + + Visit the `website `_ for more information + or the `documentation `_. + + License and Copyright + --------------------- + + This software is made available as-is under a BSD-like license [1]_ + (see included copyright notice). + + + Notes + ----- + + .. [1] This software is licensed under the `Repoze + `_ license. + + + Changes + ======= + + 2.22 (2015-02-06) + ----------------- + + - Fix brown bag release. + + + 2.21 (2015-02-06) + ----------------- + + - Added ``RenderError`` exception which indicates that an error + occurred during the evaluation of an expression. + + - Clean up ``TemplateError`` exception implementation. + + + 2.20 (2015-01-12) + ----------------- + + - Pass ``search_path`` to template class when loaded using + ``TemplateLoader`` (or one of the derived classes). + [faassen] + + + 2.19 (2015-01-06) + ----------------- + + - Fix logging deprecation. + + - Fix environment-based configuration logging error. + + + 2.18 (2014-11-03) + ----------------- + + - Fix minor compilation error. + + + 2.17 (2014-11-03) + ----------------- + + - Add support for ``i18n:context``. + [wiggy] + + - Add missing 'parity' repeat property. + [voxspox] + + - Don't modify environment when getting variables from it. + [fschulze] + + + 2.16 (2014-05-06) + ----------------- + + - If a repeat expression evaluates to ``None`` then it is now + equivalent to an empty set. + + This changes a behavior introduced in 2.14. + + This fixes issue #172. + + - Remove fossil test dependency on deprecated ``distribute``. + + - Add explicit support / testing for Python 3.3 / 3.4. + + - Drop explicit support for Python 2.5 (out of maintenance, and no longer + supported by ``tox`` or ``Travis-CI``). + + + 2.15 (2014-03-11) + ----------------- + + - Add Support for Python 3.4's ``NameConstant``. + [brakhane] + + + 2.14 (2013-11-28) + ----------------- + + - Element repetition using the ``TAL`` namespace no longer includes + whitespace. This fixes issue #110. + + - Use absolute import for ``chameleon.interfaces`` module. This fixes + issue #161. + + + 2.13-1 (2013-10-24) + ------------------- + + - Fixing brown bag release. + + 2.13 (2013-10-21) + ----------------- + + Bugfixes: + + - The template cache mechanism now includes additional configuration + settings as part of the cache key such as ``strict`` and + ``trim_attribute_space``. + [ossmkitty] + + - Fix cache issue where sometimes cached templates would not load + correctly. + [ossmkitty] + + - In debug-mode, correctly remove temporary files when the module + loader is garbage-collected (on ``__del__``). + [graffic] + + - Fix error message when duplicate i18n:name directives are used in a + translation. + + - Using the three-argument form of ``getattr`` on a + ``chameleon.tal.RepeatDict`` no longer raises ``KeyError``, + letting the default provided to ``getattr`` be used. This fixes + attempting to adapt a ``RepeatDict`` to a Zope interface under + PyPy. + + 2.12 (2013-03-26) + ----------------- + + Changes: + + - When a ``tal:case`` condition succeeds, no other case now will. + + Bugfixes: + + - Implicit translation now correctly extracts and normalizes complete + sentences, instead of words. + [witsch] + + - The ``default`` symbol in a ``tal:case`` condition now allows the + element only if no other case succeeds. + + + 2.11 (2012-11-15) + ----------------- + + Bugfixes: + + - An issue was resolved where a METAL statement was combined with a + ``tal:on-error`` handler. + + - Fix minor parser issue with incorrectly formatted processing + instructions. + + - Provide proper error handling for Python inline code blocks. + + Features: + + - The simple translation function now supports the + ``translationstring`` interface. + + Optimizations: + + - Minor optimization which correctly detects when an element has no + attributes. + + + 2.10 (2012-10-12) + ----------------- + + Deprecations: + + - The ``fast_translate`` function has been deprecated. Instead, the + default translation function is now always a function that simply + interpolates the mapping onto the message default or id. + + The motivation is that since version 2.9, the ``context`` argument + is non-trivial: the ``econtext`` mapping is passed. This breaks an + expectation on the Zope platform that the ``context`` parameter is + the HTTP request. Previously, with Chameleon this parameter was + simply not provided and so that did not cause issues as such. + + - The ``ast24`` module has been renamed to ``ast25``. This should help + clear up any confusion that Chameleon 2.x might be support a Python + interpreter less than version 2.5 (it does not). + + Features: + + - The ``ProxyExpr`` expression class (and hence the ``load:`` + expression type) is now a TALES-expression. In practical terms, this + means that the expression type (which computes a string result using + the standard ``"${...}"`` interpolation syntax and proxies the + result through a function) now supports fallback using the pipe + operator (``"|"``). This fixes issue #128. + + - An attempt to interpolate using the empty string as the expression + (i.e. ``${}``) now does nothing: the string ``${}`` is simply output + as is. + + - Added support for adding, modifying, and removing attributes using a + dictionary expression in ``tal:attributes`` (analogous to Genshi's + ``py:attrs`` directive):: + +
          + + In the example above, ``name`` is an identifier, while ``value`` and + ``attrs`` are Python expressions. However, ``attrs`` must evaluate + to a Python dictionary object (more concisely, the value must + implement the dictionary API-methods ``update()`` and ``items()``). + + Optimizations: + + - In order to cut down on the size of the compiled function objects, + some conversion and quoting statements have been put into + functions. In one measurement, the reduction was 35%. The benchmark + suite does *not* report of an increased render time (actually + slightly decreased). + + Bugfixes: + + - An exception is now raised if a trivial string is passed for + ``metal:fill-slot``. This fixes issue #89. + + - An empty string is now never translated. Not really a bug, but it's + been reported in as an issue (#92) because some translation + frameworks handle this case incorrectly. + + - The template module loader (file cache) now correctly encodes + generated template source code as UTF-8. This fixes issue #125. + + - Fixed issue where a closure might be reused unsafely in nested + template rendering. + + - Fixed markup class ``__repr__`` method. This fixes issue #124. + + - Added missing return statement to fix printing the non-abbreviated + filename in case of an exception. + [tomo] + + 2.9.2 (2012-06-06) + ------------------ + + Bugfixes: + + - Fixed a PyPy incompatibility. + + - Fixed issue #109 which caused testing failures on some platforms. + + 2.9.1 (2012-06-01) + ------------------ + + Bugfixes: + + - Fixed issue #103. The ``tal:on-error`` statement now always adds an + explicit end-tag to the element, even with a substitution content of + nothing. + + - Fixed issue #113. The ``tal:on-error`` statement now works correctly + also for dynamic attributes. That is, the fallback tag now includes + only static attributes. + + - Fixed name error which prevented the benchmark from running + correctly. + + Compatibility: + + - Fixed deprecation warning on Python 3 for zope interface implements + declaration. This fixes issue #116. + + 2.9.0 (2012-05-31) + ------------------ + + Features: + + - The translation function now gets the ``econtext`` argument as the + value for ``context``. Note that historically, this was usually an + HTTP request which might provide language negotiation data through a + dictionary interface. + [alvinyue] + + Bugfixes: + + - Fixed import alias issue which would lead to a syntax error in + generated Python code. Fixes issue #114. + + 2.8.5 (2012-05-02) + ------------------ + + Bugfixes: + + - Fixed minor installation issues on Python 2.5 and 3. + [ppaez] + + - Ensure output is unicode even when trivial (an empty string). + + 2.8.4 (2012-04-18) + ------------------ + + Features: + + - In exception output, long filenames are now truncated to 60 + characters of output, preventing line wrap which makes it difficult + to scan the exception output. + + Bugfixes: + + - Include filename and location in exception output for exceptions + raised during compilation. + + - If a trivial translation substitution variable is given (i.e. an + empty string), simply ignore it. This fixes issue #106. + + 2.8.3 (2012-04-16) + ------------------ + + Features: + + - Log template source on debug-level before cooking. + + - The `target_language` argument, if given, is now available as a + variable in templates. + + 2.8.2 (2012-03-30) + ------------------ + + Features: + + - Temporary caches used in debug mode are cleaned up eagerly, rather + than waiting for process termination. + [mitchellrj] + + Bugfixes: + + - The `index`, `start` and `end` methods on the TAL repeat object are + now callable. This fixes an incompatibility with ZPT. + + - The loader now correctly handles absolute paths on Windows. + [rdale] + + 2.8.1 (2012-03-29) + ------------------ + + Features: + + - The exception formatter now lists errors in 'wrapping order'. This + means that the innermost, and presumably most relevant exception is + shown last. + + Bugfixes: + + - The exception formatter now correctly recognizes nested errors and + does not rewrap the dynamically generated exception class. + + - The exception formatter now correctly sets the ``__module__`` + attribute to that of the original exception class. + + 2.8.0 (2012-02-29) + ------------------ + + Features: + + - Added support for code blocks using the `` processing + instruction syntax. + + The scope is name assignments is up until the nearest macro + definition, or the template itself if macros are not used. + + Bugfixes: + + - Fall back to the exception class' ``__new__`` method to safely + create an exception object that is not implemented in Python. + + - The exception formatter now keeps track of already formatted + exceptions, and ignores them from further output. + + 2.7.4 (2012-02-27) + ------------------ + + - The error handler now invokes the ``__init__`` method of + ``BaseException`` instead of the possibly overriden method (which + may take required arguments). This fixes issue #97. + [j23d, malthe] + + 2.7.3 (2012-01-16) + ------------------ + + Bugfixes: + + - The trim whitespace option now correctly trims actual whitespace to + a single character, appearing either to the left or to the right of + an element prefix or suffix string. + + 2.7.2 (2012-01-08) + ------------------ + + Features: + + - Added option ``trim_attribute_space`` that decides whether attribute + whitespace is stripped (at most down to a single space). This option + exists to provide compatibility with the reference + implementation. Fixes issue #85. + + Bugfixes: + + - Ignore unhashable builtins when generating a reverse builtin + map to quickly look up a builtin value. + [malthe] + + - Apply translation mapping even when a translation function is not + available. This fixes issue #83. + [malthe] + + - Fixed issue #80. The translation domain for a slot is defined by the + source document, i.e. the template providing the content for a slot + whether it be the default or provided through ``metal:fill-slot``. + [jcbrand] + + - In certain circumstances, a Unicode non-breaking space character would cause + a define clause to fail to parse. + + 2.7.1 (2011-12-29) + ------------------ + + Features: + + - Enable expression interpolation in CDATA. + + - The page template class now implements dictionary access to macros:: + + template[name] + + This is a short-hand for:: + + template.macros[name] + + Bugfixes: + + - An invalid define clause would be silently ignored; we now raise a + language error exception. This fixes issue #79. + + - Fixed regression where ``${...}`` interpolation expressions could + not span multiple lines. This fixes issue #77. + + 2.7.0 (2011-12-13) + ------------------ + + Features: + + - The ``load:`` expression now derives from the string expression such + that the ``${...}`` operator can be used for expression + interpolation. + + - The ``load:`` expression now accepts asset specs; these are resolved + by the ``pkg_resources.resource_filename`` function:: + + : + + An example from the test suite:: + + chameleon:tests/inputs/hello_world.pt + + Bugfixes: + + - If an attribute name for translation was not a valid Python + identifier, the compiler would generate invalid code. This has been + fixed, and the compiler now also throws an exception if an attribute + specification contains a comma. (Note that the only valid separator + character is the semicolon, when specifying attributes for + translation via the ``i18n:translate`` statement). This addresses + issue #76. + + 2.6.2 (2011-12-08) + ------------------ + + Bugfixes: + + - Fixed issue where ``tal:on-error`` would not respect + ``tal:omit-tag`` or namespace elements which are omitted by default + (such as ````). + + - Fixed issue where ``macros`` attribute would not be available on + file-based templates due to incorrect initialization. + + - The ``TryExcept`` and ``TryFinally`` AST nodes are not available on + Python 3.3. These have been aliased to ``Try``. This fixes issue + #75. + + Features: + + - The TAL repeat item now makes a security declaration that grants + access to unprotected subobjects on the Zope 2 platform:: + + __allow_access_to_unprotected_subobjects__ = True + + This is required for legacy compatibility and does not affect other + environments. + + - The template object now has a method ``write(body)`` which + explicitly decodes and cooks a string input. + + - Added configuration option ``loader_class`` which sets the class + used to create the template loader object. + + The class (essentially a callable) is created at template + construction time. + + 2.6.1 (2011-11-30) + ------------------ + + Bugfixes: + + - Decode HTML entities in expression interpolation strings. This fixes + issue #74. + + - Allow ``xml`` and ``xmlns`` attributes on TAL, I18N and METAL + namespace elements. This fixes issue #73. + + 2.6.0 (2011-11-24) + ------------------ + + Features: + + - Added support for implicit translation: + + The ``implicit_i18n_translate`` option enables implicit translation + of text. The ``implicit_i18n_attributes`` enables implicit + translation of attributes. The latter must be a set and for an + attribute to be implicitly translated, its lowercase string value + must be included in the set. + + - Added option ``strict`` (enabled by default) which decides whether + expressions are required to be valid at compile time. That is, if + not set, an exception is only raised for an invalid expression at + evaluation time. + + - An expression error now results in an exception only if the + expression is attempted evaluated during a rendering. + + - Added a configuration option ``prepend_relative_search_path`` which + decides whether the path relative to a file-based template is + prepended to the load search path. The default is ``True``. + + - Added a configuration option ``search_path`` to the file-based + template class, which adds additional paths to the template load + instance bound to the ``load:`` expression. The option takes a + string path or an iterable yielding string paths. The default value + is the empty set. + + Bugfixes: + + - Exception instances now support pickle/unpickle. + + - An attributes in i18n:attributes no longer needs to match an + existing or dynamic attribute in order to appear in the + element. This fixes issue #66. + + 2.5.3 (2011-10-23) + ------------------ + + Bugfixes: + + - Fixed an issue where a nested macro slot definition would fail even + though there existed a parent macro definition. This fixes issue + #69. + + 2.5.2 (2011-10-12) + ------------------ + + Bugfixes: + + - Fixed an issue where technically invalid input would result in a + compiler error. + + Features: + + - The markup class now inherits from the unicode string type such that + it's compatible with the string interface. + + 2.5.1 (2011-09-29) + ------------------ + + Bugfixes: + + - The symbol names "convert", "decode" and "translate" are now no + longer set as read-only *compiler internals*. This fixes issue #65. + + - Fixed an issue where a macro extension chain nested two levels (a + template uses a macro that extends a macro) would lose the middle + slot definitions if slots were defined nested. + + The compiler now throws an error if a nested slot definition is used + outside a macro extension context. + + 2.5.0 (2011-09-23) + ------------------ + + Features: + + - An expression type ``structure:`` is now available which wraps the + expression result as *structure* such that it is not escaped on + insertion, e.g.:: + +
          + ${structure: context.body} +
          + + This also means that the ``structure`` keyword for ``tal:content`` + and ``tal:replace`` now has an alternative spelling via the + expression type ``structure:``. + + - The string-based template constructor now accepts encoded input. + + 2.4.6 (2011-09-23) + ------------------ + + Bugfixes: + + - The ``tal:on-error`` statement should catch all exceptions. + + - Fixed issue that would prevent escaping of interpolation expression + values appearing in text. + + 2.4.5 (2011-09-21) + ------------------ + + Bugfixes: + + - The ``tal:on-error`` handler should have a ``error`` variable + defined that has the value of the exception thrown. + + - The ``tal:on-error`` statement is a substitution statement and + should support the "text" and "structure" insertion methods. + + 2.4.4 (2011-09-15) + ------------------ + + Bugfixes: + + - An encoding specified in the XML document preamble is now read and + used to decode the template input to unicode. This fixes issue #55. + + - Encoded expression input on Python 3 is now correctly + decoded. Previously, the string representation output would be + included instead of an actually decoded string. + + - Expression result conversion steps are now correctly included in + error handling such that the exception output points to the + expression location. + + 2.4.3 (2011-09-13) + ------------------ + + Features: + + - When an encoding is provided, pass the 'ignore' flag to avoid + decoding issues with bad input. + + Bugfixes: + + - Fixed pypy compatibility issue (introduced in previous release). + + 2.4.2 (2011-09-13) + ------------------ + + Bugfixes: + + - Fixed an issue in the compiler where an internal variable (such as a + translation default value) would be cached, resulting in variable + scope corruption (see issue #49). + + 2.4.1 (2011-09-08) + ------------------ + + Bugfixes: + + - Fixed an issue where a default value for an attribute would + sometimes spill over into another attribute. + + - Fixed issue where the use of the ``default`` name in an attribute + interpolation expression would print the attribute value. This is + unexpected, because it's an expression, not a static text suitable + for output. An attribute value of ``default`` now correctly drops + the attribute. + + 2.4.0 (2011-08-22) + ------------------ + + Features: + + - Added an option ``boolean_attributes`` to evaluate and render a + provided set of attributes using a boolean logic: if the attribute + is a true value, the value will be the attribute name, otherwise the + attribute is dropped. + + In the reference implementation, the following attributes are + configured as boolean values when the template is rendered in + HTML-mode:: + + "compact", "nowrap", "ismap", "declare", "noshade", + "checked", "disabled", "readonly", "multiple", "selected", + "noresize", "defer" + + Note that in Chameleon, these attributes must be manually provided. + + Bugfixes: + + - The carriage return character (used on Windows platforms) would + incorrectly be included in Python comments. + + It is now replaced with a line break. + + This fixes issue #44. + + 2.3.8 (2011-08-19) + ------------------ + + - Fixed import error that affected Python 2.5 only. + + 2.3.7 (2011-08-19) + ------------------ + + Features: + + - Added an option ``literal_false`` that disables the default behavior + of dropping an attribute for a value of ``False`` (in addition to + ``None``). This modified behavior is the behavior exhibited in + reference implementation. + + Bugfixes: + + - Undo attribute special HTML attribute behavior (see previous + release). + + This turned out not to be a compatible behavior; rather, boolean + values should simply be coerced to a string. + + Meanwhile, the reference implementation does support an HTML mode in + which the special attribute behavior is exhibited. + + We do not currently support this mode. + + 2.3.6 (2011-08-18) + ------------------ + + Features: + + - Certain HTML attribute names now have a special behavior for a + attribute value of ``True`` (or ``default`` if no default is + defined). For these attributes, this return value will result in the + name being printed as the value:: + + + + will be rendered as:: + + + + This behavior is compatible with the reference implementation. + + 2.3.5 (2011-08-18) + ------------------ + + Features: + + - Added support for the set operator (``{item, item, ...}``). + + Bugfixes: + + - If macro is defined on the same element as a translation name, this + no longer results in a "translation name not allowed outside + translation" error. This fixes issue #43. + + - Attribute fallback to dictionary lookup now works on multiple items + (e.g. ``d1.d2.d2``). This fixes issue #42. + + 2.3.4 (2011-08-16) + ------------------ + + Features: + + - When inserting content in either attributes or text, a value of + ``True`` (like ``False`` and ``None``) will result in no + action. + + - Use statically assigned variables for ``"attrs"`` and + ``"default"``. This change yields a performance improvement of + 15-20%. + + - The template loader class now accepts an optional argument + ``default_extension`` which accepts a filename extension which will + be appended to the filename if there's not already an extension. + + Bugfixes: + + - The default symbol is now ``True`` for an attribute if the attribute + default is not provided. Note that the result is that the attribute + is dropped. This fixes issue #41. + + - Fixed an issue where assignment to a variable ``"type"`` would + fail. This fixes issue #40. + + - Fixed an issue where an (unsuccesful) assignment for a repeat loop + to a compiler internal name would not result in an error. + + - If the translation function returns the identical object, manually + coerce it to string. This fixes a compatibility issue with + translation functions which do not convert non-string objects to a + string value, but simply return them unchanged. + + 2.3.3 (2011-08-15) + ------------------ + + Features: + + - The ``load:`` expression now passes the initial keyword arguments to + its template loader (e.g. ``auto_reload`` and ``encoding``). + + - In the exception output, string variable values are now limited to a + limited output of characters, single line only. + + Bugfixes: + + - Fixed horizontal alignment of exception location info + (i.e. 'String:', 'Filename:' and 'Location:') such that they match + the template exception formatter. + + 2.3.2 (2011-08-11) + ------------------ + + Bugfixes: + + - Fixed issue where i18n:domain would not be inherited through macros + and slots. This fixes issue #37. + + 2.3.1 (2011-08-11) + ------------------ + + Features: + + - The ``Builtin`` node type may now be used to represent any Python + local or global name. This allows expression compilers to refer to + e.g. ``get`` or ``getitem``, or to explicit require a builtin object + such as one from the ``extra_builtins`` dictionary. + + Bugfixes: + + - Builtins which are not explicitly disallowed may now be redefined + and used as variables (e.g. ``nothing``). + + - Fixed compiler issue with circular node annotation loop. + + 2.3 (2011-08-10) + ---------------- + + Features: + + - Added support for the following syntax to disable inline evaluation + in a comment: + + + + Note that the initial question mark character (?) will be omitted + from output. + + - The parser now accepts '<' and '>' in attributes. Note that this is + invalid markup. Previously, the '<' would not be accepted as a valid + attribute value, but this would result in an 'unexpected end tag' + error elsewhere. This fixes issue #38. + + - The expression compiler now provides methods ``assign_text`` and + ``assign_value`` such that a template engine might configure this + value conversion to support e.g. encoded strings. + + Note that currently, the only client for the ``assign_text`` method + is the string expression type. + + - Enable template loader for string-based template classes. Note that + the ``filename`` keyword argument may be provided on initialization + to identify the template source by filename. This fixes issue #36. + + - Added ``extra_builtins`` option to the page template class. These + builtins are added to the default builtins dictionary at cook time + and may be provided at initialization using the ``extra_builtins`` + keyword argument. + + Bugfixes: + + - If a translation domain is set for a fill slot, use this setting + instead of the macro template domain. + + - The Python expression compiler now correctly decodes HTML entities + ``'gt'`` and ``'lt'``. This fixes issue #32. + + - The string expression compiler now correctly handles encoded text + (when support for encoded strings is enabled). This fixes issue #35. + + - Fixed an issue where setting the ``filename`` attribute on a + file-based template would not automatically cause an invalidation. + + - Exceptions raised by Chameleon can now be copied via + ``copy.copy``. This fixes issue #36. + [leorochael] + + - If copying the exception fails in the exception handler, simply + re-raise the original exception and log a warning. + + 2.2 (2011-07-28) + ---------------- + + Features: + + - Added new expression type ``load:`` that allows loading a + template. Both relative and absolute paths are supported. If the + path given is relative, then it will be resolved with respect to the + directory of the template. + + - Added support for dynamic evaluation of expressions. + + Note that this is to support legacy applications. It is not + currently wired into the provided template classes. + + - Template classes now have a ``builtins`` attribute which may be used + to define built-in variables always available in the template + variable scope. + + Incompatibilities: + + - The file-based template class no longer accepts a parameter + ``loader``. This parameter would be used to load a template from a + relative path, using a ``find(filename)`` method. This was however, + undocumented, and probably not very useful since we have the + ``TemplateLoader`` mechanism already. + + - The compiled template module now contains an ``initialize`` function + which takes values that map to the template builtins. The return + value of this function is a dictionary that contains the render + functions. + + Bugfixes: + + - The file-based template class no longer verifies the existance of a + template file (using ``os.lstat``). This now happens implicitly if + eager parsing is enabled, or otherwise when first needed (e.g. at + render time). + + This is classified as a bug fix because the previous behavior was + probably not what you'd expect, especially if an application + initializes a lot of templates without needing to render them + immediately. + + 2.1.1 (2011-07-28) + ------------------ + + Features: + + - Improved exception display. The expression string is now shown in + the context of the original source (if available) with a marker + string indicating the location of the expression in the template + source. + + Bugfixes: + + - The ``structure`` insertion mode now correctly decodes entities for + any expression type (including ``string:``). This fixes issue #30. + + - Don't show internal variables in the exception formatter variable + listing. + + 2.1 (2011-07-25) + ---------------- + + Features: + + - Expression interpolation (using the ``${...}`` operator and + previously also ``$identifier``) now requires braces everywhere + except inside the ``string:`` expression type. + + This change is motivated by a number of legacy templates in which + the interpolation format without braces ``$identifier`` appears as + text. + + 2.0.2 (2011-07-25) + ------------------ + + Bugfixes: + + - Don't use dynamic variable scope for lambda-scoped variables (#27). + + - Avoid duplication of exception class and message in traceback. + + - Fixed issue where a ``metal:fill-slot`` would be ignored if a macro + was set to be used on the same element (#16). + + 2.0.1 (2011-07-23) + ------------------ + + Bugfixes: + + - Fixed issue where global variable definition from macro slots would + fail (they would instead be local). This also affects error + reporting from inside slots because this would be recorded + internally as a global. + + - Fixed issue with template cache digest (used for filenames); modules + are now invalidated whenever any changes are made to the + distribution set available (packages on ``sys.path``). + + - Fixed exception handler to better let exceptions propagate through + the renderer. + + - The disk-based module compiler now mangles template source filenames + such that the output Python module is valid and at root level (dots + and hyphens are replaced by an underscore). This fixes issue #17. + + - Fixed translations (i18n) on Python 2.5. + + 2.0 (2011-07-14) + ---------------- + + - Point release. + + 2.0-rc14 (2011-07-13) + --------------------- + + Bugfixes: + + - The tab character (``\t``) is now parsed correctly when used inside + tags. + + Features: + + - The ``RepeatDict`` class now works as a proxy behind a seperate + dictionary instance. + + - Added template constructor option ``keep_body`` which is a flag + (also available as a class attribute) that controls whether to save + the template body input in the ``body`` attribute. + + This is disabled by default, unless debug-mode is enabled. + + - The page template loader class now accepts an optional ``formats`` + argument which can be used to select an alternative template class. + + 2.0-rc13 (2011-07-07) + --------------------- + + Bugfixes: + + - The backslash character (followed by optional whitespace and a line + break) was not correctly interpreted as a continuation for Python + expressions. + + Features: + + - The Python expression implementation is now more flexible for + external subclassing via a new ``parse`` method. + + 2.0-rc12 (2011-07-04) + --------------------- + + Bugfixes: + + - Initial keyword arguments passed to a template now no longer "leak" + into the template variable space after a macro call. + + - An unexpected end tag is now an unrecoverable error. + + Features: + + - Improve exception output. + + 2.0-rc11 (2011-05-26) + --------------------- + + Bugfixes: + + - Fixed issue where variable names that begin with an underscore were + seemingly allowed, but their use resulted in a compiler error. + + Features: + + - Template variable names are now allowed to be prefixed with a single + underscore, but not two or more (reserved for internal use). + + Examples of valid names:: + + item + ITEM + _item + camelCase + underscore_delimited + help + + - Added support for Genshi's comment "drop" syntax:: + + + + Note the additional exclamation (!) character. + + This fixes addresses issue #10. + + 2.0-rc10 (2011-05-24) + --------------------- + + Bugfixes: + + - The ``tal:attributes`` statement now correctly operates + case-insensitive. The attribute name given in the statement will + replace an existing attribute with the same name, without respect to + case. + + Features: + + - Added ``meta:interpolation`` statement to control expression + interpolation setting. + + Strings that disable the setting: ``"off"`` and ``"false"``. + Strings that enable the setting: ``"on"`` and ``"true"``. + + - Expression interpolation now works inside XML comments. + + 2.0-rc9 (2011-05-05) + -------------------- + + Features: + + - Better debugging support for string decode and conversion. If a + naive join fails, each element in the output will now be attempted + coerced to unicode to try and trigger the failure near to the bad + string. + + 2.0-rc8 (2011-04-11) + -------------------- + + Bugfixes: + + - If a macro defines two slots with the same name, a caller will now + fill both with a single usage. + + - If a valid of ``None`` is provided as the translation function + argument, we now fall back to the class default. + + 2.0-rc7 (2011-03-29) + -------------------- + + Bugfixes: + + - Fixed issue with Python 2.5 compatibility AST. This affected at + least PyPy 1.4. + + Features: + + - The ``auto_reload`` setting now defaults to the class value; the + base template class gives a default value of + ``chameleon.config.AUTO_RELOAD``. This change allows a subclass to + provide a custom default value (such as an application-specific + debug mode setting). + + + 2.0-rc6 (2011-03-19) + -------------------- + + Features: + + - Added support for ``target_language`` keyword argument to render + method. If provided, the argument will be curried onto the + translation function. + + Bugfixes: + + - The HTML entities 'lt', 'gt' and 'quot' appearing inside content + subtition expressions are now translated into their native character + values. This fixes an issue where you could not dynamically create + elements using the ``structure`` (which is possible in ZPT). The + need to create such structure stems from the lack of an expression + interpolation operator in ZPT. + + - Fixed duplicate file pointer issue with test suite (affected Windows + platforms only). This fixes issue #9. + [oliora] + + - Use already open file using ``os.fdopen`` when trying to write out + the module source. This fixes LP #731803. + + + 2.0-rc5 (2011-03-07) + -------------------- + + Bugfixes: + + - Fixed a number of issues concerning the escaping of attribute + values: + + 1) Static attribute values are now included as they appear in the + source. + + This means that invalid attribute values such as ``"true && + false"`` are now left alone. It's not the job of the template + engine to correct such markup, at least not in the default mode + of operation. + + 2) The string expression compiler no longer unescapes + values. Instead, this is left to each expression + compiler. Currently only the Python expression compiler unescapes + its input. + + 3) The dynamic escape code sequence now correctly only replaces + ampersands that are part of an HTML escape format. + + Imports: + + - The page template classes and the loader class can now be imported + directly from the ``chameleon`` module. + + Features: + + - If a custom template loader is not provided, relative paths are now + resolved using ``os.abspath`` (i.e. to the current working + directory). + + - Absolute paths are normalized using ``os.path.normpath`` and + ``os.path.expanduser``. This ensures that all paths are kept in + their "canonical" form. + + + 2.0-rc4 (2011-03-03) + -------------------- + + Bugfixes: + + - Fixed an issue where the output of an end-to-end string expression + would raise an exception if the expression evaluated to ``None`` (it + should simply output nothing). + + - The ``convert`` function (which is configurable on the template + class level) now defaults to the ``translate`` function (at + run-time). + + This fixes an issue where message objects were not translated (and + thus converted to a string) using the a provided ``translate`` + function. + + - Fixed string interpolation issue where an expression immediately + succeeded by a right curly bracket would not parse. + + This fixes issue #5. + + - Fixed error where ``tal:condition`` would be evaluated after + ``tal:repeat``. + + Features: + + - Python expression is now a TALES expression. That means that the + pipe operator can be used to chain two or more expressions in a + try-except sequence. + + This behavior was ported from the 1.x series. Note that while it's + still possible to use the pipe character ("|") in an expression, it + must now be escaped. + + - The template cache can now be shared by multiple processes. + + + 2.0-rc3 (2011-03-02) + -------------------- + + Bugfixes: + + - Fixed ``atexit`` handler. + + This fixes issue #3. + + - If a cache directory is specified, it will now be used even when not + in debug mode. + + - Allow "comment" attribute in the TAL namespace. + + This fixes an issue in the sense that the reference engine allows + any attribute within the TAL namespace. However, only "comment" is + in common use. + + - The template constructor now accepts a flag ``debug`` which puts the + template *instance* into debug-mode regardless of the global + setting. + + This fixes issue #1. + + Features: + + - Added exception handler for exceptions raised while evaluating an + expression. + + This handler raises (or attempts to) a new exception of the type + ``RenderError``, with an additional base class of the original + exception class. The string value of the exception is a formatted + error message which includes the expression that caused the + exception. + + If we are unable to create the exception class, the original + exception is re-raised. + + 2.0-rc2 (2011-02-28) + -------------------- + + - Fixed upload issue. + + 2.0-rc1 (2011-02-28) + -------------------- + + - Initial public release. See documentation for what's new in this + series. + +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3.1 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 diff --git a/lib/Chameleon-2.22/README.rst b/lib/Chameleon-2.22/README.rst new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/README.rst @@ -0,0 +1,25 @@ +Overview +======== + +Chameleon is an HTML/XML template engine for `Python +`_. It uses the *page templates* language. + +You can use it in any Python web application with just about any +version of Python (2.5 and up, including 3.x and `pypy +`_). + +Visit the `website `_ for more information +or the `documentation `_. + +License and Copyright +--------------------- + +This software is made available as-is under a BSD-like license [1]_ +(see included copyright notice). + + +Notes +----- + +.. [1] This software is licensed under the `Repoze + `_ license. diff --git a/lib/Chameleon-2.22/benchmarks/bm_chameleon.py b/lib/Chameleon-2.22/benchmarks/bm_chameleon.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/benchmarks/bm_chameleon.py @@ -0,0 +1,128 @@ +#!/usr/bin/python2 + +""" +Benchmark for test the performance of Chameleon page template engine. +""" + +__author__ = "mborch at gmail.com (Malthe Borch)" + +# Python imports +import os +import sys +import optparse +import time + +# Local imports +import util + + +def relative(*args): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), *args) + +sys.path.insert(0, relative('..', 'src')) + +# Chameleon imports +from chameleon import PageTemplate + + +LOREM_IPSUM = """Quisque lobortis hendrerit posuere. Curabitur +aliquet consequat sapien molestie pretium. Nunc adipiscing luc +tus mi, viverra porttitor lorem vulputate et. Ut at purus sem, +sed tincidunt ante. Vestibulum ante ipsum primis in faucibus +orci luctus et ultrices posuere cubilia Curae; Praesent pulvinar +sodales justo at congue. Praesent aliquet facilisis nisl a +molestie. Sed tempus nisl ut augue eleifend tincidunt. Sed a +lacinia nulla. Cras tortor est, mollis et consequat at, +vulputate et orci. Nulla sollicitudin""" + +BASE_TEMPLATE = ''' + + + + + +
          ${col}
          + ${alt} +
          + + ${title.strip()} + + +''' + +PAGE_TEMPLATE = ''' + + + +images: + + + + +

          ${lorem}

          +
          + + +''' + +CONTENT_TEMPLATE = ''' + +fun1 +fun2 +fun3 +fun4 +fun5 +fun6 + +

          Lorem ipsum dolor sit amet, consectetur adipiscing elit. +Nam laoreet justo in velit faucibus lobortis. Sed dictum sagittis +volutpat. Sed adipiscing vestibulum consequat. Nullam laoreet, ante +nec pretium varius, libero arcu porttitor orci, id cursus odio nibh +nec leo. Vestibulum dapibus pellentesque purus, sed bibendum tortor +laoreet id. Praesent quis sodales ipsum. Fusce ut ligula sed diam +pretium sagittis vel at ipsum. Nulla sagittis sem quam, et volutpat +velit. Fusce dapibus ligula quis lectus ultricies tempor. Pellente

          + + + + + + + + +''' + + +def test_mako(count): + template = PageTemplate(CONTENT_TEMPLATE) + base = PageTemplate(BASE_TEMPLATE) + page = PageTemplate(PAGE_TEMPLATE) + + table = [xrange(150) for i in xrange(150)] + paragraphs = xrange(50) + title = 'Hello world!' + + times = [] + for i in range(count): + t0 = time.time() + data = template.render( + table=table, paragraphs=paragraphs, + lorem=LOREM_IPSUM, title=title, + img_count=50, + base=base, + page=page, + ) + t1 = time.time() + times.append(t1-t0) + return times + +if __name__ == "__main__": + parser = optparse.OptionParser( + usage="%prog [options]", + description=("Test the performance of Chameleon templates.")) + util.add_standard_options_to(parser) + (options, args) = parser.parse_args() + + util.run_benchmark(options, options.num_runs, test_mako) diff --git a/lib/Chameleon-2.22/benchmarks/bm_mako.py b/lib/Chameleon-2.22/benchmarks/bm_mako.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/benchmarks/bm_mako.py @@ -0,0 +1,153 @@ +#!/usr/bin/python + +""" +Benchmark for test the performance of Mako templates engine. +Includes: + -two template inherences + -HTML escaping, XML escaping, URL escaping, whitespace trimming + -function defitions and calls + -forloops +""" + +__author__ = "virhilo at gmail.com (Lukasz Fidosz)" + +# Python imports +import os +import sys +import optparse +import time + +# Local imports +import util + +def relative(*args): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), *args) + +sys.path.insert(0, relative('..', 'lib')) + +# Mako imports +from mako.template import Template +from mako.lookup import TemplateLookup + + +LOREM_IPSUM = """Quisque lobortis hendrerit posuere. Curabitur +aliquet consequat sapien molestie pretium. Nunc adipiscing luc +tus mi, viverra porttitor lorem vulputate et. Ut at purus sem, +sed tincidunt ante. Vestibulum ante ipsum primis in faucibus +orci luctus et ultrices posuere cubilia Curae; Praesent pulvinar +sodales justo at congue. Praesent aliquet facilisis nisl a +molestie. Sed tempus nisl ut augue eleifend tincidunt. Sed a +lacinia nulla. Cras tortor est, mollis et consequat at, +vulputate et orci. Nulla sollicitudin""" + +BASE_TEMPLATE = """ +<%def name="render_table(table)"> +
          + % for row in table: + + % for col in row: + + % endfor + + % endfor +
          ${col|h}
          + +<%def name="img(src, alt)"> + ${alt} + + + ${title|h,trim} + + ${next.body()} + + +""" + +PAGE_TEMPLATE = """ +<%inherit file="base.mako"/> + + % for row in table: + + % for col in row: + + % endfor + + % endfor +
          ${col}
          +% for nr in xrange(img_count): + ${parent.img('/foo/bar/baz.png', 'no image :o')} +% endfor +${next.body()} +% for nr in paragraphs: +

          ${lorem|x}

          +% endfor +${parent.render_table(table)} +""" + +CONTENT_TEMPLATE = """ +<%inherit file="page.mako"/> +<%def name="fun1()"> + fun1 + +<%def name="fun2()"> + fun2 + +<%def name="fun3()"> + foo3 + +<%def name="fun4()"> + foo4 + +<%def name="fun5()"> + foo5 + +<%def name="fun6()"> + foo6 + +

          Lorem ipsum dolor sit amet, consectetur adipiscing elit. +Nam laoreet justo in velit faucibus lobortis. Sed dictum sagittis +volutpat. Sed adipiscing vestibulum consequat. Nullam laoreet, ante +nec pretium varius, libero arcu porttitor orci, id cursus odio nibh +nec leo. Vestibulum dapibus pellentesque purus, sed bibendum tortor +laoreet id. Praesent quis sodales ipsum. Fusce ut ligula sed diam +pretium sagittis vel at ipsum. Nulla sagittis sem quam, et volutpat +velit. Fusce dapibus ligula quis lectus ultricies tempor. Pellente

          +${fun1()} +${fun2()} +${fun3()} +${fun4()} +${fun5()} +${fun6()} +""" + + +def test_mako(count): + + lookup = TemplateLookup() + lookup.put_string('base.mako', BASE_TEMPLATE) + lookup.put_string('page.mako', PAGE_TEMPLATE) + + template = Template(CONTENT_TEMPLATE, lookup=lookup) + + table = [xrange(150) for i in xrange(150)] + paragraphs = xrange(50) + title = 'Hello world!' + + times = [] + for i in range(count): + t0 = time.time() + data = template.render(table=table, paragraphs=paragraphs, + lorem=LOREM_IPSUM, title=title, + img_count=50) + t1 = time.time() + times.append(t1-t0) + return times + +if __name__ == "__main__": + parser = optparse.OptionParser( + usage="%prog [options]", + description=("Test the performance of Mako templates.")) + util.add_standard_options_to(parser) + (options, args) = parser.parse_args() + + util.run_benchmark(options, options.num_runs, test_mako) diff --git a/lib/Chameleon-2.22/benchmarks/util.py b/lib/Chameleon-2.22/benchmarks/util.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/benchmarks/util.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +"""Utility code for benchmark scripts.""" + +__author__ = "collinwinter at google.com (Collin Winter)" + +import math +import operator + + +def run_benchmark(options, num_runs, bench_func, *args): + """Run the given benchmark, print results to stdout. + + Args: + options: optparse.Values instance. + num_runs: number of times to run the benchmark + bench_func: benchmark function. `num_runs, *args` will be passed to this + function. This should return a list of floats (benchmark execution + times). + """ + if options.profile: + import cProfile + prof = cProfile.Profile() + prof.runcall(bench_func, num_runs, *args) + prof.print_stats(sort=options.profile_sort) + else: + data = bench_func(num_runs, *args) + if options.take_geo_mean: + product = reduce(operator.mul, data, 1) + print math.pow(product, 1.0 / len(data)) + else: + for x in data: + print x + + +def add_standard_options_to(parser): + """Add a bunch of common command-line flags to an existing OptionParser. + + This function operates on `parser` in-place. + + Args: + parser: optparse.OptionParser instance. + """ + parser.add_option("-n", action="store", type="int", default=100, + dest="num_runs", help="Number of times to run the test.") + parser.add_option("--profile", action="store_true", + help="Run the benchmark through cProfile.") + parser.add_option("--profile_sort", action="store", type="str", + default="time", help="Column to sort cProfile output by.") + parser.add_option("--take_geo_mean", action="store_true", + help="Return the geo mean, rather than individual data.") diff --git a/lib/Chameleon-2.22/docs/conf.py b/lib/Chameleon-2.22/docs/conf.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/docs/conf.py @@ -0,0 +1,194 @@ +# -*- coding: utf-8 -*- +# +# Chameleon documentation build configuration file, created by +# sphinx-quickstart on Sun Nov 1 16:08:00 2009. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.append(os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Chameleon' +copyright = u'2008-2011 by Malthe Borch and the Repoze Community' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '2.10' +# The full version, including alpha/beta/rc tags. +release = '2.10' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +#unused_docs = [] + +# List of directories, relative to source directory, that shouldn't be searched +# for source files. +exclude_trees = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +html_title = "Chameleon %s documentation" % version + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bchameleonm, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_use_modindex = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'chameleondoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'chameleon.tex', u'Chameleon Documentation', + u'Malthe Borch et. al', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_use_modindex = True diff --git a/lib/Chameleon-2.22/docs/configuration.rst b/lib/Chameleon-2.22/docs/configuration.rst new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/docs/configuration.rst @@ -0,0 +1,43 @@ +Configuration +============= + +Most settings can be provided as keyword-arguments to the template +constructor classes. + +There are certain settings which are required at environment +level. Acceptable values are ``"0"``, ``"1"``, or the literals +``"true"`` or ``"false"`` (case-insensitive). + +General usage +------------- + +The following settings are useful in general. + +``CHAMELEON_EAGER`` + Parse and compile templates on instantiation. + +``CHAMELEON_CACHE`` + + When set to a file system path, the template compiler will write + its output to files in this directory and use it as a cache. + + This not only enables you to see the compiler output, but also + speeds up startup. + +``CHAMELEON_RELOAD`` + This setting controls the default value of the ``auto_reload`` + parameter. + +Development +----------- + +The following settings are mostly useful during development or +debugging of the library itself. + +``CHAMELEON_DEBUG`` + + Enables a set of debugging settings which make it easier to + discover and research issues with the engine itself. + + This implicitly enables auto-reload for any template. + diff --git a/lib/Chameleon-2.22/docs/index.rst b/lib/Chameleon-2.22/docs/index.rst new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/docs/index.rst @@ -0,0 +1,217 @@ +Chameleon +========= + +Chameleon is an HTML/XML template engine for `Python +`_. + +It's designed to generate the document output of a web application, +typically HTML markup or XML. + +The language used is *page templates*, originally a `Zope +`_ invention [1]_, but available here as a +:ref:`standalone library ` that you can use in any +script or application running Python 2.5 and up (including 3.x and +`pypy `_). It comes with a set of :ref:`new features +`, too. + +The template engine compiles templates into Python byte-code and is optimized +for speed. For a complex template language, the performance is +:ref:`very good `. + + *Found a bug?* Please report issues to the `issue tracker `_. + + *Need help?* Post to the Pylons `discussion list `_ or join the ``#pyramid`` channel on `Freenode IRC `_. + +Getting the code +---------------- + +You can `download `_ the +package from the Python package index or install the latest release +using setuptools or the newer `distribute +`_ (required for Python 3.x):: + + $ easy_install Chameleon + +.. _no-dependencies: + +There are no required library dependencies on Python 2.7 and up +[2]_. On 2.5 and 2.6, the `ordereddict +`_ and `unittest2 +`_ packages are set as +dependencies. + +The project is hosted in a `GitHub repository +`_. Code contributions are +welcome. The easiest way is to use the `pull request +`_ interface. + + +Introduction +------------ + +The *page templates* language is used within your document structure +as special element attributes and text markup. Using a set of simple +language constructs, you control the document flow, element +repetition, text replacement and translation. + +.. note:: If you've used page templates in a Zope environment previously, note that Chameleon uses Python as the default expression language (instead of *path* expressions). + +The basic language (known as the *template attribute language* or TAL) +is simple enough to grasp from an example: + +.. code-block:: genshi + + + +

          Hello, ${'world'}!

          + + + + +
          + ${row.capitalize()} ${col} +
          + + + +The ``${...}`` notation is short-hand for text insertion [3]_. The +Python-expression inside the braces is evaluated and the result +included in the output. By default, the string is escaped before +insertion. To avoid this, use the ``structure:`` prefix: + +.. code-block:: genshi + +
          ${structure: ...}
          + +Note that if the expression result is an object that implements an +``__html__()`` method [4]_, this method will be called and the result +treated as "structure". An example of such an object is the +``Markup`` class that's included as a utility:: + + from chameleon.utils import Markup + username = Markup("%s" % username) + +The macro language (known as the *macro expansion language* or METAL) +provides a means of filling in portions of a generic template. + +On the left, the macro template; on the right, a template that loads +and uses the macro, filling in the "content" slot: + +.. code-block:: genshi + + +

          ${structure: document.body}

          + Example — ${document.title} + + +

          ${document.title}

          + +
          + +
          + + + +In the example, the expression type :ref:`load ` is +used to retrieve a template from the file system using a path relative +to the calling template. + +The METAL system works with TAL such that you can for instance fill in +a slot that appears in a ``tal:repeat`` loop, or refer to variables +defined using ``tal:define``. + +The third language subset is the translation system (known as the +*internationalization language* or I18N): + +.. code-block:: genshi + + + + ... + +
          + You have ${round(amount, 2)} dollars in your account. +
          + + ... + + + +Each translation message is marked up using ``i18n:translate`` and +values can be mapped using ``i18n:name``. Attributes are marked for +translation using ``i18n:attributes``. The template engine generates +`gettext `_ translation strings from +the markup:: + + "You have ${amount} dollars in your account." + +If you use a web framework such as `Pyramid +`_, the translation +system is set up automatically and will negotiate on a *target +language* based on the HTTP request or other parameter. If not, then +you need to configure this manually. + +Next steps +---------- + +This was just an introduction. There are a number of other basic +statements that you need to know in order to use the language. This is +all covered in the :ref:`language reference `. + +If you're already familiar with the page template language, you can +skip ahead to the :ref:`getting started ` +section to learn how to use the template engine in your code. + +To learn about integration with your favorite web framework see the +section on :ref:`framework integration `. + +License +------- + +This software is made available under a BSD-like license. + + +Contents +======== + +.. toctree:: + :maxdepth: 2 + + library.rst + reference.rst + integration.rst + configuration.rst + +Indices and Tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + +Notes +===== + +.. [1] The template language specifications and API for the Page + Templates engine are based on Zope Page Templates (see in + particular `zope.pagetemplate + `_). However, + the Chameleon compiler and Page Templates engine is an entirely + new codebase, packaged as a standalone distribution. It does + not require a Zope software environment. + +.. [2] The translation system in Chameleon is pluggable and based on + `gettext `_. + There is built-in support for the `zope.i18n + `_ package. If this + package is installed, it will be used by default. The + `translationstring + `_ package + offers some of the same helper and utility classes, without the + Zope application interface. + +.. [3] This syntax was taken from `Genshi `_. + +.. [4] See the `WebHelpers + `_ + library which provide a simple wrapper around this method. diff --git a/lib/Chameleon-2.22/docs/integration.rst b/lib/Chameleon-2.22/docs/integration.rst new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/docs/integration.rst @@ -0,0 +1,46 @@ +.. _framework-integration: + +Integration +=========== + +Integration with Chameleon is available for a number of popular web +frameworks. The framework will usually provide loading mechanisms and +translation (internationalization) configuration. + +Pyramid +------- + +Chameleon is the default template engine for the `Pyramid +`_ framework. See the +section on `Page Templates +`_ for a complete reference. + +Zope 2 / Plone +-------------- + +Install the `five.pt `_ package +to replace the reference template engine (globally). + +Zope Toolkit (ZTK) +------------------ + +Install the `z3c.pt `_ package for +applications based on the `Zope Toolkit +`_ (ZTK). Note that you need to +explicit use the template classes from this package. + +Grok +---- + +Support for the `Grok `_ framework is available +in the `grokcore.chameleon +`_ package. + +This package will setup Grok's policy for templating integration and +associate the Chameleon template components for the ``.cpt`` template +filename extension. + +Django +------ + +Install the `django-chameleon-templates `_ app to enable Chameleon as a template engine. diff --git a/lib/Chameleon-2.22/docs/library.rst b/lib/Chameleon-2.22/docs/library.rst new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/docs/library.rst @@ -0,0 +1,205 @@ +Library Documentation +===================== + +This section documents the package as a Python library. To learn about +the page template language, consult the :ref:`language reference +`. + +.. _getting-started-with-cpt: + +Getting started +--------------- + +There are several template constructor classes available, one for each +of the combinations *text* or *xml*, and *string* or *file*. + +The file-based constructor requires an absolute path. To set up a +templates directory *once*, use the template loader class:: + + import os + + path = os.path.dirname(__file__) + + from chameleon import PageTemplateLoader + templates = PageTemplateLoader(os.path.join(path, "templates")) + +Then, to load a template relative to the provided path, use dictionary +syntax:: + + template = templates['hello.pt'] + +Alternatively, use the appropriate template class directly. Let's try +with a string input:: + + from chameleon import PageTemplate + template = PageTemplate("
          Hello, ${name}.
          ") + +All template instances are callable. Provide variables by keyword +argument:: + + >>> template(name='John') + '
          Hello, John.
          ' + +.. _fast: + +Performance +----------- + +The template engine compiles (or *translates*) template source code +into Python byte-code. In simple templates this yields an increase in +performance of about 7 times in comparison to the reference +implementation. + +In benchmarks for the content management system `Plone +`_, switching to Chameleon yields a request to +response improvement of 20-50%. + +Extension +--------- + +You can extend the language through the expression engine by writing +your own expression compiler. + +Let's try and write an expression compiler for an expression type that +will simply uppercase the supplied value. We'll call it ``upper``. + +You can write such a compiler as a closure: + +.. code-block:: python + + import ast + + def uppercase_expression(string): + def compiler(target, engine): + uppercased = self.string.uppercase() + value = ast.Str(uppercased) + return [ast.Assign(targets=[target], value=value)] + return compiler + +To make it available under a certain prefix, we'll add it to the +expression types dictionary. + +.. code-block:: python + + from chameleon import PageTemplate + PageTemplate.expression_types['upper'] = uppercase_expression + +Alternatively, you could subclass the template class and set the +attribute ``expression_types`` to a dictionary that includes your +expression: + +.. code-block:: python + + from chameleon import PageTemplateFile + from chameleon.tales import PythonExpr + + class MyPageTemplateFile(PageTemplateFile): + expression_types = { + 'python': PythonExpr, + 'upper': uppercase_expression + } + +You can now uppercase strings *natively* in your templates:: + +
          + +It's probably best to stick with a Python expression:: + +
          + + +API reference +------------- + +This section describes the documented API of the library. + + +Templates +~~~~~~~~~ + +Use the ``PageTemplate*`` template classes to define a template from a +string or file input: + +.. automodule:: chameleon + + .. autoclass:: chameleon.PageTemplate + + Note: The remaining classes take the same general configuration + arguments. + + .. automethod:: render + + .. autoclass:: chameleon.PageTemplateFile(filename, **config) + + .. autoclass:: chameleon.PageTextTemplate + + .. autoclass:: chameleon.PageTextTemplateFile + +Loader +~~~~~~ + +Some systems have framework support for loading templates from +files. The following loader class is directly compatible with the +Pylons framework and may be adapted to other frameworks: + +.. class:: chameleon.PageTemplateLoader(search_path=None, default_extension=None, **config) + + Load templates from ``search_path`` (must be a string or a list of + strings):: + + templates = PageTemplateLoader(path) + example = templates['example.pt'] + + If ``default_extension`` is provided, this will be added to inputs + that do not already have an extension:: + + templates = PageTemplateLoader(path, ".pt") + example = templates['example'] + + Any additional keyword arguments will be passed to the template + constructor:: + + templates = PageTemplateLoader(path, debug=True, encoding="utf-8") + + .. automethod:: load + + +Exceptions +~~~~~~~~~~ + +Chameleon may raise exceptions during both the cooking and the +rendering phase, but those raised during the cooking phase (parse and +compile) all inherit from a single base class: + +.. class:: chameleon.TemplateError(msg, token) + + This exception is the base class of all exceptions raised by the + template engine in the case where a template has an error. + + It may be raised during rendering since templates are processed + lazily (unless eager loading is enabled). + + +An error that occurs during the rendering of a template is wrapped in +an exception class to disambiguate the two cases: + +.. class:: chameleon.RenderError(*args) + + Indicates an exception that resulted from the evaluation of an + expression in a template. + + A complete traceback is attached to the exception beginning with + the expression that resulted in the error. The traceback includes + a string representation of the template variable scope for further + reference. + + +Expressions +~~~~~~~~~~~ + +For advanced integration, the compiler module provides support for +dynamic expression evaluation: + +.. automodule:: chameleon.compiler + + .. autoclass:: chameleon.compiler.ExpressionEvaluator diff --git a/lib/Chameleon-2.22/docs/reference.rst b/lib/Chameleon-2.22/docs/reference.rst new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/docs/reference.rst @@ -0,0 +1,1693 @@ +:tocdepth: 4 + +.. _language-reference: + +.. highlight:: xml + +Language Reference +================== + +The language reference is structured such that it can be read as a +general introduction to the *page templates* language. + +It's split into parts that correspond to each of the main language +features. + +Syntax +###### + +You can safely :ref:`skip this section ` if you're familiar with +how template languages work or just want to learn by example. + +An *attribute language* is a programming language designed to render +documents written in XML or HTML markup. The input must be a +well-formed document. The output from the template is usually +XML-like but isn't required to be well-formed. + +The statements of the language are document tags with special +attributes, and look like this:: + +

          ...

          + +In the above example, the attribute +``namespace-prefix:command="argument"`` is the statement, and the +entire paragraph tag is the statement's element. The statement's +element is the portion of the document on which this statement +operates. + +The namespace prefixes are typically declared once, at the top of a +template (note that prefix declarations for the template language +namespaces are omitted from the template output):: + + + ... + + +Thankfully, sane namespace prefix defaults are in place to let us skip +most of the boilerplate:: + + + +

          ...

          + + + +Note how ``tal`` is used without an explicit namespace +declaration. Chameleon sets up defaults for ``metal`` and ``i18n`` as +well. + +.. note:: Default prefixes are a special feature of Chameleon. + +.. _tal: + +Basics (TAL) +############ + +The *template attribute language* is used to create dynamic XML-like +content. It allows elements of a document to be replaced, repeated, +or omitted. + +Statements +---------- + +These are the available statements: + +================== ============== + Statement Description +================== ============== +``tal:define`` Define variables. +``tal:switch`` Defines a switch condition +``tal:condition`` Include element only if expression is true. +``tal:repeat`` Repeat an element. +``tal:case`` Includes element only if expression is equal to parent switch. +``tal:content`` Substitute the content of an element. +``tal:replace`` Replace the element with dynamic content. +``tal:omit-tag`` Omit the element tags, leaving only the inner content. +``tal:attributes`` Dynamically change or insert element attributes. +``tal:on-error`` Substitute the content of an element if processing fails. +================== ============== + +When there is only one TAL statement per element, the order in which +they are executed is simple. Starting with the root element, each +element's statements are executed, then each of its child elements is +visited, in order, to do the same:: + + + + + </meta> + <body> + <div tal:condition="items"> + <p>These are your items:</p> + <ul> + <li tal:repeat="item items" tal:content="item" /> + </ul> + </div> + </body> + </html> + +Any combination of statements may appear on the same element, except +that the ``tal:content`` and ``tal:replace`` statements may not be +used on the same element. + +.. note:: The ``tal:case`` and ``tal:switch`` statements are available + in Chameleon only. + +TAL does not use the order in which statements are written in the +tag to determine the order in which they are executed. When an +element has multiple statements, they are executed in the order +printed in the table above. + +There is a reasoning behind this ordering. Because users often want +to set up variables for use in other statements contained within this +element or subelements, ``tal:define`` is executed first. Then any +switch statement. ``tal:condition`` follows, then ``tal:repeat``, then +``tal:case``. We are now rendering an element; first ``tal:content`` +or ``tal:replace``. Finally, before ``tal:attributes``, we have +``tal:omit-tag`` (which is implied with ``tal:replace``). + +.. note:: *TALES* is used as the expression language for the "stuff in + the quotes". The default syntax is simply Python, but + other inputs are possible --- see the section on :ref:`expressions + <tales>`. + +``tal:attributes`` +^^^^^^^^^^^^^^^^^^ + +Removes, updates or inserts element attributes. + +:: + + tal:attributes="href request.url" + +Syntax +~~~~~~ + +``tal:attributes`` syntax:: + + argument ::= attribute_statement [';' attribute_statement]* + attribute_statement ::= (attribute_name expression | expression) + attribute_name ::= [namespace-prefix ':'] Name + namespace-prefix ::= Name + + +Description +~~~~~~~~~~~ + +The ``tal:attributes`` statement replaces the value of an attribute +(or drops, or creates an attribute) with a dynamic value. The value +of each expression is converted to a string, if necessary. + +.. note:: You can qualify an attribute name with a namespace prefix, + for example ``html:table``, if you are generating an XML document + with multiple namespaces. + +If an attribute expression evaluates to ``None``, the attribute is +deleted from the statement element (or simply not inserted). + +If an attribute statement is just an expression, it must evaluate to a +Python dict (or implement the methods ``update()`` and ``items()`` +from the dictionary specification). + +If the expression evaluates to the symbol ``default`` (a symbol which +is always available when evaluating attributes), its value is defined +as the default static attribute value. If there is no such default +value, a return value of ``default`` will drop the attribute. + +If you use ``tal:attributes`` on an element with an active +``tal:replace`` command, the ``tal:attributes`` statement is ignored. + +If you use ``tal:attributes`` on an element with a ``tal:repeat`` +statement, the replacement is made on each repetition of the element, +and the replacement expression is evaluated fresh for each repetition. + +.. note:: If you want to include a semicolon (";") in an expression, it + must be escaped by doubling it (";;"). + +Examples +~~~~~~~~ + +Replacing a link:: + + <a href="/sample/link.html" + tal:attributes="href context.url()" + > + ... + </a> + +Replacing two attributes:: + + <textarea rows="80" cols="20" + tal:attributes="rows request.rows();cols request.cols()" + /> + +A checkbox input:: + + <input type="input" tal:attributes="checked True" /> + +``tal:condition`` +^^^^^^^^^^^^^^^^^ + +Conditionally includes or omits an element:: + + <div tal:condition="comments"> + ... + </div> + +Syntax +~~~~~~ + +``tal:condition`` syntax:: + + argument ::= expression + +Description +~~~~~~~~~~~ + + The ``tal:condition`` statement includes the statement element in the + template only if the condition is met, and omits it otherwise. If + its expression evaluates to a *true* value, then normal processing of + the element continues, otherwise the statement element is immediately + removed from the template. For these purposes, the value ``nothing`` + is false, and ``default`` has the same effect as returning a true + value. + +.. note:: Like Python itself, ZPT considers None, zero, empty strings, + empty sequences, empty dictionaries, and instances which return a + nonzero value from ``__len__`` or ``__nonzero__`` false; all other + values are true, including ``default``. + +Examples +~~~~~~~~ + +Test a variable before inserting it:: + + <p tal:condition="request.message" tal:content="request.message" /> + +Testing for odd/even in a repeat-loop:: + + <div tal:repeat="item range(10)"> + <p tal:condition="repeat.item.even">Even</p> + <p tal:condition="repeat.item.odd">Odd</p> + </div> + +``tal:content`` +^^^^^^^^^^^^^^^ + +Replaces the content of an element. + +Syntax +~~~~~~ + +``tal:content`` syntax:: + + argument ::= (['text'] | 'structure') expression + +Description +~~~~~~~~~~~ + +Rather than replacing an entire element, you can insert text or +structure in place of its children with the ``tal:content`` statement. +The statement argument is exactly like that of ``tal:replace``, and is +interpreted in the same fashion. If the expression evaluates to +``nothing``, the statement element is left childless. If the +expression evaluates to ``default``, then the element's contents are +evaluated. + +The default replacement behavior is ``text``, which replaces +angle-brackets and ampersands with their HTML entity equivalents. The +``structure`` keyword passes the replacement text through unchanged, +allowing HTML/XML markup to be inserted. This can break your page if +the text contains unanticipated markup (eg. text submitted via a web +form), which is the reason that it is not the default. + +.. note:: The ``structure`` keyword exists to provide backwards + compatibility. In Chameleon, the ``structure:`` expression + type provides the same functionality (also for inline + expressions). + + +Examples +~~~~~~~~ + +Inserting the user name:: + + <p tal:content="user.getUserName()">Fred Farkas</p> + +Inserting HTML/XML:: + + <p tal:content="structure context.getStory()"> + Marked <b>up</b> content goes here. + </p> + +``tal:define`` +^^^^^^^^^^^^^^ + +Defines local variables. + +Syntax +~~~~~~ + +``tal:define`` syntax:: + + argument ::= define_scope [';' define_scope]* + define_scope ::= (['local'] | 'global') + define_var define_var ::= variable_name + expression variable_name ::= Name + +Description +~~~~~~~~~~~ + +The ``tal:define`` statement defines variables. When you define a +local variable in a statement element, you can use that variable in +that element and the elements it contains. If you redefine a variable +in a contained element, the new definition hides the outer element's +definition within the inner element. + +Note that valid variable names are any Python identifier string +including underscore, although two or more leading underscores are +disallowed (used internally by the compiler). Further, names are +case-sensitive. + +Python builtins are always "in scope", but most of them may be +redefined (such as ``help``). Exceptions are:: ``float``, ``int``, +``len``, ``long``, ``str``, ``None``, ``True`` and ``False``. + +In addition, the following names are reserved: ``econtext``, +``rcontext``, ``translate``, ``decode`` and ``convert``. + +If the expression associated with a variable evaluates to ``nothing``, +then that variable has the value ``nothing``, and may be used as such +in further expressions. Likewise, if the expression evaluates to +``default``, then the variable has the value ``default``, and may be +used as such in further expressions. + +You can define two different kinds of variables: *local* and +*global*. When you define a local variable in a statement element, you +can only use that variable in that element and the elements it +contains. If you redefine a local variable in a contained element, the +new definition hides the outer element's definition within the inner +element. When you define a global variables, you can use it in any +element processed after the defining element. If you redefine a global +variable, you replace its definition for the rest of the template. + +To set the definition scope of a variable, use the keywords ``local`` +or ``global`` in front of the assignment. The default setting is +``local``; thus, in practice, only the ``global`` keyword is used. + +.. note:: If you want to include a semicolon (";") in an expression, it + must be escaped by doubling it (";;"). + +Examples +~~~~~~~~ + +Defining a variable:: + + tal:define="company_name 'Zope Corp, Inc.'" + +Defining two variables, where the second depends on the first:: + + tal:define="mytitle context.title; tlen len(mytitle)" + + +``tal:switch`` and ``tal:case`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Defines a switch clause. + +:: + + <ul tal:switch="len(items) % 2"> + <li tal:case="True">odd</li> + <li tal:case="False">even</li> + </ul> + +Syntax +~~~~~~ + +``tal:case`` and ``tal:switch`` syntax:: + + argument ::= expression + +Description +~~~~~~~~~~~ + +The *switch* and *case* construct is a short-hand syntax for matching +a set of expressions against a single parent. + +The ``tal:switch`` statement is used to set a new parent expression +and the contained ``tal:case`` statements are then matched in sequence +such that only the first match succeeds. + +Note that the symbol ``default`` affirms the case precisely when no +previous case has been successful. It should therefore be placed last. + +.. note:: These statements are only available in Chameleon 2.x and not + part of the ZPT specification. + +Examples +~~~~~~~~ + +:: + + <ul tal:switch="item.type"> + <li tal:case="'document'"> + Document + </li> + <li tal:case="'folder'"> + Folder + </li> + <li tal:case="default"> + Other + </li> + </ul> + + +``tal:omit-tag`` +^^^^^^^^^^^^^^^^ + +Removes an element, leaving its contents. + +Syntax +~~~~~~ + +``tal:omit-tag`` syntax:: + + argument ::= [ expression ] + +Description +~~~~~~~~~~~ + +The ``tal:omit-tag`` statement leaves the contents of an element in +place while omitting the surrounding start and end tags. + +If the expression evaluates to a *false* value, then normal processing +of the element continues and the tags are not omitted. If the +expression evaluates to a *true* value, or no expression is provided, +the statement element is replaced with its contents. + +.. note:: Like Python itself, ZPT considers None, zero, empty strings, + empty sequences, empty dictionaries, and instances which return a + nonzero value from ``__len__`` or ``__nonzero__`` false; all other + values are true, including ``default``. + +Examples +~~~~~~~~ + +Unconditionally omitting a tag:: + + <div tal:omit-tag="" comment="This tag will be removed"> + <i>...but this text will remain.</i> + </div> + +Conditionally omitting a tag:: + + <b tal:omit-tag="not:bold">I may be bold.</b> + +The above example will omit the ``b`` tag if the variable ``bold`` is false. + +Creating ten paragraph tags, with no enclosing tag:: + + <span tal:repeat="n range(10)" + tal:omit-tag=""> + <p tal:content="n">1</p> + </span> + +.. _tal_repeat: + +``tal:repeat`` +^^^^^^^^^^^^^^ + +Repeats an element. + +Syntax +~~~~~~ + +``tal:repeat`` syntax:: + + argument ::= variable_name expression + variable_name ::= Name + +Description +~~~~~~~~~~~ + +The ``tal:repeat`` statement replicates a sub-tree of your document +once for each item in a sequence. The expression should evaluate to a +sequence. If the sequence is empty, then the statement element is +deleted, otherwise it is repeated for each value in the sequence. If +the expression is ``default``, then the element is left unchanged, and +no new variables are defined. + +The ``variable_name`` is used to define a local variable and a repeat +variable. For each repetition, the local variable is set to the +current sequence element, and the repeat variable is set to an +iteration object. + +Repeat variables +~~~~~~~~~~~~~~~~~ + +You use repeat variables to access information about the current +repetition (such as the repeat index). The repeat variable has the +same name as the local variable, but is only accessible through the +built-in variable named ``repeat``. + +The following information is available from the repeat variable: + +================== ============== + Attribute Description +================== ============== +``index`` Repetition number, starting from zero. +``number`` Repetition number, starting from one. +``even`` True for even-indexed repetitions (0, 2, 4, ...). +``odd`` True for odd-indexed repetitions (1, 3, 5, ...). +``parity`` For odd-indexed repetitions, this is 'odd', else 'even'. +``start`` True for the starting repetition (index 0). +``end`` True for the ending, or final, repetition. +``first`` True for the first item in a group - see note below +``last`` True for the last item in a group - see note below +``length`` Length of the sequence, which will be the total number of repetitions. +``letter`` Repetition number as a lower-case letter: "a" - "z", "aa" - "az", "ba" - "bz", ..., "za" - "zz", "aaa" - "aaz", and so forth. +``Letter`` Upper-case version of *letter*. +``roman`` Repetition number as a lower-case roman numeral: "i", "ii", "iii", "iv", "v", etc. +``Roman`` Upper-case version of *roman*. +================== ============== + +You can access the contents of the repeat variable using either +dictionary- or attribute-style access, e.g. ``repeat['item'].start`` +or ``repeat.item.start``. + +.. note:: For legacy compatibility, the attributes ``odd``, ``even``, ``number``, ``letter``, ``Letter``, ``roman``, and ``Roman`` are callable (returning ``self``). + +Note that ``first`` and ``last`` are intended for use with sorted +sequences. They try to divide the sequence into group of items with +the same value. + +Examples +~~~~~~~~ + +Iterating over a sequence of strings:: + + <p tal:repeat="txt ('one', 'two', 'three')"> + <span tal:replace="txt" /> + </p> + +Inserting a sequence of table rows, and using the repeat variable +to number the rows:: + + <table> + <tr tal:repeat="item here.cart"> + <td tal:content="repeat.item.number">1</td> + <td tal:content="item.description">Widget</td> + <td tal:content="item.price">$1.50</td> + </tr> + </table> + +Nested repeats:: + + <table border="1"> + <tr tal:repeat="row range(10)"> + <td tal:repeat="column range(10)"> + <span tal:define="x repeat.row.number; + y repeat.column.number; + z x * y" + tal:replace="string:$x * $y = $z">1 * 1 = 1</span> + </td> + </tr> + </table> + +Insert objects. Separates groups of objects by type by drawing a rule +between them:: + + <div tal:repeat="object objects"> + <h2 tal:condition="repeat.object.first.meta_type" + tal:content="object.type">Meta Type</h2> + <p tal:content="object.id">Object ID</p> + <hr tal:condition="object.last.meta_type" /> + </div> + +.. note:: the objects in the above example should already be sorted by + type. + +``tal:replace`` +^^^^^^^^^^^^^^^ + +Replaces an element. + +Syntax +~~~~~~ + +``tal:replace`` syntax:: + + argument ::= ['structure'] expression + +Description +~~~~~~~~~~~ + + +The ``tal:replace`` statement replaces an element with dynamic +content. It replaces the statement element with either text or a +structure (unescaped markup). The body of the statement is an +expression with an optional type prefix. The value of the expression +is converted into an escaped string unless you provide the 'structure' prefix. Escaping consists of converting ``&`` to +``&amp;``, ``<`` to ``&lt;``, and ``>`` to ``&gt;``. + +.. note:: If the inserted object provides an ``__html__`` method, that method is called with the result inserted as structure. This feature is not implemented by ZPT. + +If the expression evaluates to ``None``, the element is simply removed. If the value is ``default``, then the element is left unchanged. + +Examples +~~~~~~~~ + +Inserting a title:: + + <span tal:replace="context.title">Title</span> + +Inserting HTML/XML:: + + <div tal:replace="structure table" /> + +.. _tales: + +Expressions (TALES) +################### + +The *Template Attribute Language Expression Syntax* (TALES) standard +describes expressions that supply :ref:`tal` and +:ref:`metal` with data. TALES is *one* possible expression +syntax for these languages, but they are not bound to this definition. +Similarly, TALES could be used in a context having nothing to do with +TAL or METAL. + +TALES expressions are described below with any delimiter or quote +markup from higher language layers removed. Here is the basic +definition of TALES syntax:: + + Expression ::= [type_prefix ':'] String + type_prefix ::= Name + +Here are some simple examples:: + + 1 + 2 + None + string:Hello, ${view.user_name} + +The optional *type prefix* determines the semantics and syntax of the +*expression string* that follows it. A given implementation of TALES +can define any number of expression types, with whatever syntax you +like. It also determines which expression type is indicated by +omitting the prefix. + +Types +----- + +These are the available TALES expression types: + +============= ============== + Prefix Description +============= ============== +``exists`` Evaluate the result inside an exception handler; if one of the exceptions ``AttributeError``, ``LookupError``, ``TypeError``, ``NameError``, or ``KeyError`` is raised during evaluation, the result is ``False``, otherwise ``True``. Note that the original result is discarded in any case. +``import`` Import a global symbol using dotted notation. +``load`` Load a template relative to the current template or absolute. +``not`` Negate the expression result +``python`` Evaluate a Python expression +``string`` Format a string +``structure`` Wraps the expression result as *structure*. +============= ============== + +.. note:: The default expression type is ``python``. + +.. warning:: The Zope reference engine defaults to a ``path`` + expression type, which is closely tied to the Zope + framework. This expression is not implemented in + Chameleon (but it's available in a Zope framework + compatibility package). + +There's a mechanism to allow fallback to alternative expressions, if +one should fail (raise an exception). The pipe character ('|') is used +to separate two expressions:: + + <div tal:define="page request.GET['page'] | 0"> + +This mechanism applies only to the ``python`` expression type, and by +derivation ``string``. + +.. _tales_built_in_names: + +``python`` +^^^^^^^^^^ + +Evaluates a Python expression. + +Syntax +~~~~~~ + +Python expression syntax:: + + Any valid Python language expression + +Description +~~~~~~~~~~~ + +Python expressions are executed natively within the translated +template source code. There is no built-in security apparatus. + +``string`` +^^^^^^^^^^ + +Syntax +~~~~~~ + +String expression syntax:: + + string_expression ::= ( plain_string | [ varsub ] )* + varsub ::= ( '$' Variable ) | ( '${ Expression }' ) + plain_string ::= ( '$$' | non_dollar )* + non_dollar ::= any character except '$' + +Description +~~~~~~~~~~~ + +String expressions interpret the expression string as text. If no +expression string is supplied the resulting string is *empty*. The +string can contain variable substitutions of the form ``$name`` or +``${expression}``, where ``name`` is a variable name, and ``expression`` is a TALES-expression. The escaped string value of the expression is inserted into the string. + +.. note:: To prevent a ``$`` from being interpreted this + way, it must be escaped as ``$$``. + +Examples +~~~~~~~~ + +Basic string formatting:: + + <span tal:replace="string:$this and $that"> + Spam and Eggs + </span> + + <p tal:content="string:${request.form['total']}"> + total: 12 + </p> + +Including a dollar sign:: + + <p tal:content="string:$$$cost"> + cost: $42.00 + </p> + +.. _import-expression: + +``import`` +^^^^^^^^^^ + +Imports a module global. + +.. _structure-expression: + +``structure`` +^^^^^^^^^^^^^ + +Wraps the expression result as *structure*: The replacement text is +inserted into the document without escaping, allowing HTML/XML markup +to be inserted. This can break your page if the text contains +unanticipated markup (eg. text submitted via a web form), which is +the reason that it is not the default. + +.. _load-expression: + +``load`` +^^^^^^^^ + +Loads a template instance. + +Syntax +~~~~~~ + +Load expression syntax:: + + Relative or absolute file path + +Description +~~~~~~~~~~~ + +The template will be loaded using the same template class as the +calling template. + +Examples +~~~~~~~~ + +Loading a template and using it as a macro:: + + <div tal:define="master load: ../master.pt" metal:use-macro="master" /> + + +Built-in names +-------------- + +These are the names always available in the TALES expression namespace: + +- ``default`` - special value used to specify that existing text or attributes should not be replaced. See the documentation for individual TAL statements for details on how they interpret *default*. + +- ``repeat`` - the *repeat* variables; see :ref:`tal_repeat` for more + information. + +- ``template`` - reference to the template which was first called; this symbol is carried over when using macros. + +- ``macros`` - reference to the macros dictionary that corresponds to the current template. + + +.. _metal: + +Macros (METAL) +############## + +The *Macro Expansion Template Attribute Language* (METAL) standard is +a facility for HTML/XML macro preprocessing. It can be used in +conjunction with or independently of TAL and TALES. + +Macros provide a way to define a chunk of presentation in one +template, and share it in others, so that changes to the macro are +immediately reflected in all of the places that share it. +Additionally, macros are always fully expanded, even in a template's +source text, so that the template appears very similar to its final +rendering. + +A single Page Template can accomodate multiple macros. + +Namespace +--------- + +The METAL namespace URI and recommended alias are currently defined +as:: + + xmlns:metal="http://xml.zope.org/namespaces/metal" + +Just like the TAL namespace URI, this URI is not attached to a web +page; it's just a unique identifier. This identifier must be used in +all templates which use METAL. + +Statements +---------- + +METAL defines a number of statements: + +* ``metal:define-macro`` Define a macro. +* ``metal:use-macro`` Use a macro. +* ``metal:extend-macro`` Extend a macro. +* ``metal:define-slot`` Define a macro customization point. +* ``metal:fill-slot`` Customize a macro. + +Although METAL does not define the syntax of expression non-terminals, +leaving that up to the implementation, a canonical expression syntax +for use in METAL arguments is described in TALES Specification. + +``define-macro`` +^^^^^^^^^^^^^^^^ + +Defines a macro. + +Syntax +~~~~~~ + +``metal:define-macro`` syntax:: + + argument ::= Name + +Description +~~~~~~~~~~~ + +The ``metal:define-macro`` statement defines a macro. The macro is named +by the statement expression, and is defined as the element and its +sub-tree. + +Examples +~~~~~~~~ + +Simple macro definition:: + + <p metal:define-macro="copyright"> + Copyright 2011, <em>Foobar</em> Inc. + </p> + +``define-slot`` +^^^^^^^^^^^^^^^ + +Defines a macro customization point. + +Syntax +~~~~~~ + +``metal:define-slot`` syntax:: + + argument ::= Name + +Description +~~~~~~~~~~~ + +The ``metal:define-slot`` statement defines a macro customization +point or *slot*. When a macro is used, its slots can be replaced, in +order to customize the macro. Slot definitions provide default content +for the slot. You will get the default slot contents if you decide not +to customize the macro when using it. + +The ``metal:define-slot`` statement must be used inside a +``metal:define-macro`` statement. + +Slot names must be unique within a macro. + +Examples +~~~~~~~~ + +Simple macro with slot:: + + <p metal:define-macro="hello"> + Hello <b metal:define-slot="name">World</b> + </p> + +This example defines a macro with one slot named ``name``. When you use +this macro you can customize the ``b`` element by filling the ``name`` +slot. + +``fill-slot`` +^^^^^^^^^^^^^ + +Customize a macro. + +Syntax +~~~~~~ + +``metal:fill-slot`` syntax:: + + argument ::= Name + +Description +~~~~~~~~~~~ + +The ``metal:fill-slot`` statement customizes a macro by replacing a +*slot* in the macro with the statement element (and its content). + +The ``metal:fill-slot`` statement must be used inside a +``metal:use-macro`` statement. + +Slot names must be unique within a macro. + +If the named slot does not exist within the macro, the slot +contents will be silently dropped. + +Examples +~~~~~~~~ + +Given this macro:: + + <p metal:define-macro="hello"> + Hello <b metal:define-slot="name">World</b> + </p> + +You can fill the ``name`` slot like so:: + + <p metal:use-macro="container['master.html'].macros.hello"> + Hello <b metal:fill-slot="name">Kevin Bacon</b> + </p> + +``use-macro`` +^^^^^^^^^^^^^ + +Use a macro. + +Syntax +~~~~~~ + +``metal:use-macro`` syntax:: + + argument ::= expression + +Description +~~~~~~~~~~~ + +The ``metal:use-macro`` statement replaces the statement element with +a macro. The statement expression describes a macro definition. + +.. note:: In Chameleon the expression may point to a template instance; in this case it will be rendered in its entirety. + +``extend-macro`` +^^^^^^^^^^^^^^^^ + +Extends a macro. + +Syntax +~~~~~~ + +``metal:extend-macro`` syntax:: + + argument ::= expression + +Description +~~~~~~~~~~~ + +To extend an existing macro, choose a name for the macro and add a +define-macro attribute to a document element with the name as the +argument. Add an extend-macro attribute to the document element with +an expression referencing the base macro as the argument. The +extend-macro must be used in conjunction with define-macro, and must +not be used with use-macro. The element's subtree is the macro +body. + +Examples +~~~~~~~~ + +:: + + <div metal:define-macro="page-header" + metal:extend-macro="standard_macros['page-header']"> + <div metal:fill-slot="breadcrumbs"> + You are here: + <div metal:define-slot="breadcrumbs"/> + </div> + </div> + + +.. _i18n: + +Translation (I18N) +################## + +Translation of template contents and attributes is supported via the +``i18n`` namespace and message objects. + +Messages +-------- + +The translation machinery defines a message as *any object* which is +not a string or a number and which does not provide an ``__html__`` +method. + +When any such object is inserted into the template, the translate +function is invoked first to see if it needs translation. The result +is always coerced to a native string before it's inserted into the +template. + +Translation function +-------------------- + +The simplest way to hook into the translation machinery is to provide +a translation function to the template constructor or at +render-time. In either case it should be passed as the keyword +argument ``translate``. + +The function has the following signature: + +.. code-block:: python + + def translate(msgid, domain=None, mapping=None, context=None, target_language=None, default=None): + ... + +The result should be a string or ``None``. If another type of object +is returned, it's automatically coerced into a string. + +If `zope.i18n <http://pypi.python.org/pypi/zope.i18n>`_ is available, +the translation machinery defaults to using its translation +function. Note that this function requires messages to conform to the +message class from `zope.i18nmessageid +<http://pypi.python.org/pypi/zope.i18nmessageid>`_; specifically, +messages must have attributes ``domain``, ``mapping`` and +``default``. Example use: + +.. code-block:: python + + from zope.i18nmessageid import MessageFactory + _ = MessageFactory("food") + + apple = _(u"Apple") + +There's currently no further support for other translation frameworks. + +Using Zope's translation framework +----------------------------------- + +The translation function from ``zope.i18n`` relies on *translation +domains* to provide translations. + +These are components that are registered for some translation domain +identifier and which implement a ``translate`` method that translates +messages for that domain. + +.. note:: To register translation domain components, the Zope Component Architecture must be used (see `zope.component <http://pypi.python.org/pypi/zope.component>`_). + +The easiest way to configure translation domains is to use the the +``registerTranslations`` ZCML-directive; this requires the use of the +`zope.configuration <http://pypi.python.org/pypi/zope.configuration>`_ +package. This will set up translation domains and gettext catalogs +automatically: + +.. code-block:: xml + + <configure xmlns="http://namespaces.zope.org/zope" + xmlns:i18n="http://xml.zope.org/namespaces/i18n"> + + <i18n:registerTranslations directory="locales" /> + + </configure> + +The ``./locales`` directory must follow a particular directory +structure: + +.. code-block:: bash + + ./locales/en/LC_MESSAGES + ./locales/de/LC_MESSAGES + ... + +In each of the ``LC_MESSAGES`` directories, one `GNU gettext +<http://en.wikipedia.org/wiki/GNU_gettext>`_ file in the ``.po`` +format must be present per translation domain: + +.. code-block:: po + + # ./locales/de/LC_MESSAGES/food.po + + msgid "" + msgstr "" + "MIME-Version: 1.0\n" + "Content-Type: text/plain; charset=UTF-8\n" + "Content-Transfer-Encoding: 8bit\n" + + msgid "Apple" + msgstr "Apfel" + +It may be necessary to compile the message catalog using the +``msgfmt`` utility. This will produce a ``.mo`` file. + +Translation domains without gettext +----------------------------------- + +The following example demonstrates how to manually set up and +configure a translation domain for which messages are provided +directly:: + + from zope import component + from zope.i18n.simpletranslationdomain import SimpleTranslationDomain + + food = SimpleTranslationDomain("food", { + ('de', u'Apple'): u'Apfel', + }) + + component.provideUtility(food, food.domain) + +An example of a custom translation domain class:: + + from zope import interface + + class TranslationDomain(object): + interface.implements(ITranslationDomain) + + def translate(self, msgid, mapping=None, context=None, + target_language=None, default=None): + + ... + + component.provideUtility(TranslationDomain(), name="custom") + +This approach can be used to integrate other translation catalog +implementations. + +.. highlight:: xml + +Namespace +--------- + +The ``i18n`` namespace URI and recommended prefix are currently +defined as:: + + xmlns:i18n="http://xml.zope.org/namespaces/i18n" + +This is not a URL, but merely a unique identifier. Do not expect a +browser to resolve it successfully. + +Statements +---------- + +The allowable ``i18n`` statements are: + +- ``i18n:translate`` +- ``i18n:domain`` +- ``i18n:context`` +- ``i18n:source`` +- ``i18n:target`` +- ``i18n:name`` +- ``i18n:attributes`` +- ``i18n:data`` +- ``i18n:comment`` + +``i18n:translate`` +^^^^^^^^^^^^^^^^^^ + +This attribute is used to mark units of text for translation. If this +attribute is specified with an empty string as the value, the message +ID is computed from the content of the element bearing this attribute. +Otherwise, the value of the element gives the message ID. + +``i18n:domain`` +^^^^^^^^^^^^^^^ + +The ``i18n:domain`` attribute is used to specify the domain to be used +to get the translation. If not specified, the translation services +will use a default domain. The value of the attribute is used +directly; it is not a TALES expression. + + +``i18n:context`` +^^^^^^^^^^^^^^^ + +The ``i18n:context`` attribute is used to specify the context to be +used to get the translation. If not specified, the translation +services will use a default context. The context is generally use to +distinguish identical texts in different context (because in a +translation this may not be the case.) The value of the attribute is +used literally; it is not an expression. + + +``i18n:source`` +^^^^^^^^^^^^^^^ + +The ``i18n:source`` attribute specifies the language of the text to be +translated. The default is ``nothing``, which means we don't provide +this information to the translation services. + + +``i18n:target`` +^^^^^^^^^^^^^^^ + +The ``i18n:target`` attribute specifies the language of the +translation we want to get. If the value is ``default``, the language +negotiation services will be used to choose the destination language. +If the value is ``nothing``, no translation will be performed; this +can be used to suppress translation within a larger translated unit. +Any other value must be a language code. + +The attribute value is a TALES expression; the result of evaluating +the expression is the language code or one of the reserved values. + +.. note:: ``i18n:target`` is primarily used for hints to text + extraction tools and translation teams. If you had some text that + should only be translated to e.g. German, then it probably + shouldn't be wrapped in an ``i18n:translate`` span. + +``i18n:name`` +^^^^^^^^^^^^^ + +Name the content of the current element for use in interpolation +within translated content. This allows a replaceable component in +content to be re-ordered by translation. For example:: + + <span i18n:translate=''> + <span tal:replace='context.name' i18n:name='name' /> was born in + <span tal:replace='context.country_of_birth' i18n:name='country' />. + </span> + +would cause this text to be passed to the translation service:: + + "${name} was born in ${country}." + +``i18n:attributes`` +^^^^^^^^^^^^^^^^^^^ + +This attribute will allow us to translate attributes of HTML tags, +such as the ``alt`` attribute in the ``img`` tag. The +``i18n:attributes`` attribute specifies a list of attributes to be +translated with optional message IDs for each; if multiple attribute +names are given, they must be separated by semicolons. Message IDs +used in this context must not include whitespace. + +Note that the value of the particular attributes come either from the +HTML attribute value itself or from the data inserted by +``tal:attributes``. + +If an attibute is to be both computed using ``tal:attributes`` and +translated, the translation service is passed the result of the TALES +expression for that attribute. + +An example:: + + <img src="http://foo.com/logo" alt="Visit us" + tal:attributes="alt context.greeting" + i18n:attributes="alt" + > + +In this example, we let ``tal:attributes`` set the value of the ``alt`` +attribute to the text "Stop by for a visit!". This text will be +passed to the translation service, which uses the result of language +negotiation to translate "Stop by for a visit!" into the requested +language. The example text in the template, "Visit us", will simply +be discarded. + +Another example, with explicit message IDs:: + + <img src="../icons/uparrow.png" alt="Up" + i18n:attributes="src up-arrow-icon; alt up-arrow-alttext" + > + +Here, the message ID ``up-arrow-icon`` will be used to generate the +link to an icon image file, and the message ID 'up-arrow-alttext' will +be used for the "alt" text. + +``i18n:data`` +^^^^^^^^^^^^^ + +Since TAL always returns strings, we need a way in ZPT to translate +objects, one of the most obvious cases being ``datetime`` objects. The +``data`` attribute will allow us to specify such an object, and +``i18n:translate`` will provide us with a legal format string for that +object. If ``data`` is used, ``i18n:translate`` must be used to give +an explicit message ID, rather than relying on a message ID computed +from the content. + +``i18n:comment`` +^^^^^^^^^^^^^^^^ + +The ``i18n:comment`` attribute can be used to add extra comments for +translators. It is not used by Chameleon for processing, but will be +picked up by tools like `lingua <pypi.python.org/pypi/lingua>`_. + +An example: + + <h3 i18n:comment="Header for the news section" + i18n:translate="">News</h3> + + +Relation with TAL processing +---------------------------- + +The attributes defined in the ``i18n`` namespace modify the behavior +of the TAL interpreter for the ``tal:attributes``, ``tal:content``, +``tal:repeat``, and ``tal:replace`` attributes, but otherwise do not +affect TAL processing. + +Since these attributes only affect TAL processing by causing +translations to occur at specific times, using these with a TAL +processor which does not support the ``i18n`` namespace degrades well; +the structural expectations for a template which uses the ``i18n`` +support is no different from those for a page which does not. The +only difference is that translations will not be performed in a legacy +processor. + +Relation with METAL processing +------------------------------- + +When using translation with METAL macros, the internationalization +context is considered part of the specific documents that page +components are retrieved from rather than part of the combined page. +This makes the internationalization context lexical rather than +dynamic, making it easier for a site builder to understand the +behavior of each element with respect to internationalization. + +Let's look at an example to see what this means:: + + <html i18n:translate='' i18n:domain='EventsCalendar' + metal:use-macro="container['master.html'].macros.thismonth"> + + <div metal:fill-slot='additional-notes'> + <ol tal:condition="context.notes"> + <li tal:repeat="note context.notes"> + <tal:block tal:omit-tag="" + tal:condition="note.heading"> + <strong tal:content="note.heading"> + Note heading goes here + </strong> + <br /> + </tal:block> + <span tal:replace="note/description"> + Some longer explanation for the note goes here. + </span> + </li> + </ol> + </div> + + </html> + +And the macro source:: + + <html i18n:domain='CalendarService'> + <div tal:replace='python:DateTime().Month()' + i18n:translate=''>January</div> + + <!-- really hairy TAL code here ;-) --> + + <div define-slot="additional-notes"> + Place for the application to add additional notes if desired. + </div> + + </html> + +Note that the macro is using a different domain than the application +(which it should be). With lexical scoping, no special markup needs +to be applied to cause the slot-filler in the application to be part +of the same domain as the rest of the application's page components. +If dynamic scoping were used, the internationalization context would +need to be re-established in the slot-filler. + + +Extracting translatable message +------------------------------- + +Translators use `PO files +<http://www.gnu.org/software/hello/manual/gettext/PO-Files.html>`_ +when translating messages. To create and update PO files you need to +do two things: *extract* all messages from python and templates files +and store them in a ``.pot`` file, and for each language *update* its +``.po`` file. Chameleon facilitates this by providing extractors for +`Babel <http://babel.edgewall.org/>`_. To use this you need modify +``setup.py``. For example: + +.. code-block:: python + + from setuptools import setup + + setup(name="mypackage", + install_requires = [ + "Babel", + ], + message_extractors = { "src": [ + ("**.py", "chameleon_python", None ), + ("**.pt", "chameleon_xml", None ), + ]}, + ) + +This tells Babel to scan the ``src`` directory while using the +``chameleon_python`` extractor for all ``.py`` files and the +``chameleon_xml`` extractor for all ``.pt`` files. + +You can now use Babel to manage your PO files: + +.. code-block:: bash + + python setup.py extract_messages --output-file=i18n/mydomain.pot + python setup.py update_catalog \ + -l nl \ + -i i18n/mydomain.pot \ + -o i18n/nl/LC_MESSAGES/mydomain.po + python setup.py compile_catalog \ + --directory i18n --locale nl + +You can also configure default options in a ``setup.cfg`` file. For example:: + + [compile_catalog] + domain = mydomain + directory = i18n + + [extract_messages] + copyright_holder = Acme Inc. + output_file = i18n/mydomain.pot + charset = UTF-8 + + [init_catalog] + domain = mydomain + input_file = i18n/mydomain.pot + output_dir = i18n + + [update_catalog] + domain = mydomain + input_file = i18n/mydomain.pot + output_dir = i18n + previous = true + +You can now use the Babel commands directly:: + + python setup.py extract_messages + python setup.py update_catalog + python setup.py compile_catalog + + +${...} operator +############### + +The ``${...}`` notation is short-hand for text insertion. The +Python-expression inside the braces is evaluated and the result +included in the output (all inserted text is escaped by default): + +.. code-block:: html + + <div id="section-${index + 1}"> + ${content} + </div> + +To escape this behavior, prefix the notation with a backslash +character: ``\${...}``. + +Note that if an object implements the ``__html__`` method, the result +of this method will be inserted as-is (without XML escaping). + +Code blocks +########### + +The ``<?python ... ?>`` notation allows you to embed Python code in +templates: + +.. code-block:: html + + <div> + <?python numbers = map(str, range(1, 10)) ?> + Please input a number from the range ${", ".join(numbers)}. + </div> + +The scope of name assignments is up to the nearest macro definition, +or the template, if macros are not used. + +Note that code blocks can span multiple line and start on the next +line of where the processing instruction begins: + +.. code-block:: html + + <?python + foo = [1, 2, 3] + ?> + +You can use this to debug templates: + +.. code-block:: html + + <div> + <?python import pdb; pdb.set_trace() ?> + </div> + + +Markup comments +############### + +You can apply the "!" and "?" modifiers to change how comments are +processed: + +Drop + + ``<!--! This comment will be dropped from output -->`` + +Verbatim + + ``<!--? This comment will be included verbatim -->`` + + That is, evaluation of ``${...}`` expressions is disabled if the + comment opens with the "?" character. + + +.. _new-features: + +Language extensions +################### + +Chameleon extends the *page template* language with a new expression +types and language features. Some take inspiration from `Genshi +<http://genshi.edgewall.org/>`_. + + *New expression types* + + The :ref:`structure <structure-expression>` expression wraps an + expression result as *structure*:: + + <div>${structure: body.text}</div> + + The :ref:`import <import-expression>` expression imports module globals:: + + <div tal:define="compile import: re.compile"> + ... + </div> + + The :ref:`load <load-expression>` expression loads templates + relative to the current template:: + + <div tal:define="compile load: main.pt"> + ... + </div> + + *Tuple unpacking* + + The ``tal:define`` and ``tal:repeat`` statements support tuple + unpacking:: + + tal:define="(a, b, c) [1, 2, 3]" + + Extended `iterable unpacking + <http://www.python.org/dev/peps/pep-3132/>`_ using the asterisk + character is not currently supported (even for versions of + Python that support it natively). + + *Dictionary lookup as fallback after attribute error* + + If attribute lookup (using the ``obj.<name>`` syntax) raises an + ``AttributeError`` exception, a secondary lookup is attempted + using dictionary lookup --- ``obj['<name>']``. + + Behind the scenes, this is done by rewriting all + attribute-lookups to a custom lookup call: + + .. code-block:: python + + def lookup_attr(obj, key): + try: + return getattr(obj, key) + except AttributeError as exc: + try: + get = obj.__getitem__ + except AttributeError: + raise exc + try: + return get(key) + except KeyError: + raise exc + + *Inline string substitution* + + In element attributes and in the text or tail of an element, + string expression interpolation is available using the + ``${...}`` syntax:: + + <span class="content-${item_type}"> + ${title or item_id} + </span> + + *Code blocks* + + Using ``<?python ... ?>`` notation, you can embed Python + statements in your templates: + + .. code-block:: html + + <div> + <?python numbers = map(str, range(1, 10)) ?> + Please input a number from the range ${", ".join(numbers)}. + </div> + + *Literal content* + + While the ``tal:content`` and ``tal:repeat`` attributes both + support the ``structure`` keyword which inserts the content as + a literal (without XML-escape), an object may also provide an + ``__html__`` method to the same effect. + + The result of the method will be inserted as *structure*. + + This is particularly useful for content which is substituted + using the expression operator: ``"${...}"`` since the + ``structure`` keyword is not allowed here. + + *Switch statement* + + Two new attributes have been added: ``tal:switch`` and + ``tal:case``. A case attribute works like a condition and only + allows content if the value matches that of the nearest parent + switch value. + + +Incompatibilities and differences +################################# + +There are a number of incompatibilities and differences between the +Chameleon language implementation and the Zope reference +implementation (ZPT): + + *Default expression* + + The default expression type is Python. + + *Template arguments* + + Arguments passed by keyword to the render- or call method are + inserted directly into the template execution namespace. This is + different from ZPT where these are only available through the + ``options`` dictionary. + + Zope:: + + <div tal:content="options/title" /> + + Chameleon:: + + <div tal:content="title" /> + + *Special symbols* + + The ``CONTEXTS`` symbol is not available. + +The `z3c.pt <http://pypi.python.org/pypi/z3c.pt>`_ package works as a +compatibility layer. The template classes in this package provide a +implementation which is fully compatible with ZPT. diff --git a/lib/Chameleon-2.22/docs/requirements.txt b/lib/Chameleon-2.22/docs/requirements.txt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/docs/requirements.txt @@ -0,0 +1,1 @@ +Chameleon==2.11 diff --git a/lib/Chameleon-2.22/setup.cfg b/lib/Chameleon-2.22/setup.cfg new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/setup.cfg @@ -0,0 +1,14 @@ +[easy_install] +zip_ok = false + +[nosetests] +match = ^test +nocapture = 1 +cover-package = tree.codegen, tree.lexer, tree.parser, tree.nodes, tree.translation, tree.language, tree.tales, tree.expressions +cover-erase = 1 + +[egg_info] +tag_build = +tag_date = 0 +tag_svn_revision = 0 + diff --git a/lib/Chameleon-2.22/setup.py b/lib/Chameleon-2.22/setup.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/setup.py @@ -0,0 +1,85 @@ +__version__ = '2.22' + +import os +import sys + +from setuptools import setup, find_packages +from setuptools.command.test import test + +here = os.path.abspath(os.path.dirname(__file__)) +try: + README = open(os.path.join(here, 'README.rst')).read() + CHANGES = open(os.path.join(here, 'CHANGES.rst')).read() +except: # doesn't work under tox/pip + README = '' + CHANGES = '' + +install_requires = [] + +version = sys.version_info[:3] +if version < (2, 7, 0): + install_requires.append("ordereddict") + install_requires.append("unittest2") + + +class Benchmark(test): + description = "Run benchmarks" + user_options = [] + test_suite = None + + def initialize_options(self): + """init options""" + pass + + def finalize_options(self): + """finalize options""" + + self.distribution.tests_require = [ + 'zope.pagetemplate', + 'zope.component', + 'zope.i18n', + 'zope.testing'] + + def run(self): + test.run(self) + self.with_project_on_sys_path(self.run_benchmark) + + def run_benchmark(self): + from chameleon import benchmark + print("running benchmark...") + + benchmark.start() + +setup( + name="Chameleon", + version=__version__, + description="Fast HTML/XML Template Compiler.", + long_description="\n\n".join((README, CHANGES)), + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Programming Language :: Python", + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 2.6", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3.1", + "Programming Language :: Python :: 3.2", + "Programming Language :: Python :: 3.3", + "Programming Language :: Python :: 3.4", + ], + author="Malthe Borch", + author_email="mborch at gmail.com", + url="http://www.pagetemplates.org/", + license='BSD-like (http://repoze.org/license.html)', + packages=find_packages('src'), + package_dir = {'': 'src'}, + include_package_data=True, + install_requires=install_requires, + zip_safe=False, + test_suite="chameleon.tests", + cmdclass={ + 'benchmark': Benchmark, + } + ) + diff --git a/lib/Chameleon-2.22/src/Chameleon.egg-info/PKG-INFO b/lib/Chameleon-2.22/src/Chameleon.egg-info/PKG-INFO new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/Chameleon.egg-info/PKG-INFO @@ -0,0 +1,1353 @@ +Metadata-Version: 1.1 +Name: Chameleon +Version: 2.22 +Summary: Fast HTML/XML Template Compiler. +Home-page: http://www.pagetemplates.org/ +Author: Malthe Borch +Author-email: mborch at gmail.com +License: BSD-like (http://repoze.org/license.html) +Description: Overview + ======== + + Chameleon is an HTML/XML template engine for `Python + <http://www.python.org>`_. It uses the *page templates* language. + + You can use it in any Python web application with just about any + version of Python (2.5 and up, including 3.x and `pypy + <http://pypy.org>`_). + + Visit the `website <http://pagetemplates.org/>`_ for more information + or the `documentation <http://chameleon.readthedocs.org/en/latest/>`_. + + License and Copyright + --------------------- + + This software is made available as-is under a BSD-like license [1]_ + (see included copyright notice). + + + Notes + ----- + + .. [1] This software is licensed under the `Repoze + <http://repoze.org/license.html>`_ license. + + + Changes + ======= + + 2.22 (2015-02-06) + ----------------- + + - Fix brown bag release. + + + 2.21 (2015-02-06) + ----------------- + + - Added ``RenderError`` exception which indicates that an error + occurred during the evaluation of an expression. + + - Clean up ``TemplateError`` exception implementation. + + + 2.20 (2015-01-12) + ----------------- + + - Pass ``search_path`` to template class when loaded using + ``TemplateLoader`` (or one of the derived classes). + [faassen] + + + 2.19 (2015-01-06) + ----------------- + + - Fix logging deprecation. + + - Fix environment-based configuration logging error. + + + 2.18 (2014-11-03) + ----------------- + + - Fix minor compilation error. + + + 2.17 (2014-11-03) + ----------------- + + - Add support for ``i18n:context``. + [wiggy] + + - Add missing 'parity' repeat property. + [voxspox] + + - Don't modify environment when getting variables from it. + [fschulze] + + + 2.16 (2014-05-06) + ----------------- + + - If a repeat expression evaluates to ``None`` then it is now + equivalent to an empty set. + + This changes a behavior introduced in 2.14. + + This fixes issue #172. + + - Remove fossil test dependency on deprecated ``distribute``. + + - Add explicit support / testing for Python 3.3 / 3.4. + + - Drop explicit support for Python 2.5 (out of maintenance, and no longer + supported by ``tox`` or ``Travis-CI``). + + + 2.15 (2014-03-11) + ----------------- + + - Add Support for Python 3.4's ``NameConstant``. + [brakhane] + + + 2.14 (2013-11-28) + ----------------- + + - Element repetition using the ``TAL`` namespace no longer includes + whitespace. This fixes issue #110. + + - Use absolute import for ``chameleon.interfaces`` module. This fixes + issue #161. + + + 2.13-1 (2013-10-24) + ------------------- + + - Fixing brown bag release. + + 2.13 (2013-10-21) + ----------------- + + Bugfixes: + + - The template cache mechanism now includes additional configuration + settings as part of the cache key such as ``strict`` and + ``trim_attribute_space``. + [ossmkitty] + + - Fix cache issue where sometimes cached templates would not load + correctly. + [ossmkitty] + + - In debug-mode, correctly remove temporary files when the module + loader is garbage-collected (on ``__del__``). + [graffic] + + - Fix error message when duplicate i18n:name directives are used in a + translation. + + - Using the three-argument form of ``getattr`` on a + ``chameleon.tal.RepeatDict`` no longer raises ``KeyError``, + letting the default provided to ``getattr`` be used. This fixes + attempting to adapt a ``RepeatDict`` to a Zope interface under + PyPy. + + 2.12 (2013-03-26) + ----------------- + + Changes: + + - When a ``tal:case`` condition succeeds, no other case now will. + + Bugfixes: + + - Implicit translation now correctly extracts and normalizes complete + sentences, instead of words. + [witsch] + + - The ``default`` symbol in a ``tal:case`` condition now allows the + element only if no other case succeeds. + + + 2.11 (2012-11-15) + ----------------- + + Bugfixes: + + - An issue was resolved where a METAL statement was combined with a + ``tal:on-error`` handler. + + - Fix minor parser issue with incorrectly formatted processing + instructions. + + - Provide proper error handling for Python inline code blocks. + + Features: + + - The simple translation function now supports the + ``translationstring`` interface. + + Optimizations: + + - Minor optimization which correctly detects when an element has no + attributes. + + + 2.10 (2012-10-12) + ----------------- + + Deprecations: + + - The ``fast_translate`` function has been deprecated. Instead, the + default translation function is now always a function that simply + interpolates the mapping onto the message default or id. + + The motivation is that since version 2.9, the ``context`` argument + is non-trivial: the ``econtext`` mapping is passed. This breaks an + expectation on the Zope platform that the ``context`` parameter is + the HTTP request. Previously, with Chameleon this parameter was + simply not provided and so that did not cause issues as such. + + - The ``ast24`` module has been renamed to ``ast25``. This should help + clear up any confusion that Chameleon 2.x might be support a Python + interpreter less than version 2.5 (it does not). + + Features: + + - The ``ProxyExpr`` expression class (and hence the ``load:`` + expression type) is now a TALES-expression. In practical terms, this + means that the expression type (which computes a string result using + the standard ``"${...}"`` interpolation syntax and proxies the + result through a function) now supports fallback using the pipe + operator (``"|"``). This fixes issue #128. + + - An attempt to interpolate using the empty string as the expression + (i.e. ``${}``) now does nothing: the string ``${}`` is simply output + as is. + + - Added support for adding, modifying, and removing attributes using a + dictionary expression in ``tal:attributes`` (analogous to Genshi's + ``py:attrs`` directive):: + + <div tal:attributes="name value; attrs" /> + + In the example above, ``name`` is an identifier, while ``value`` and + ``attrs`` are Python expressions. However, ``attrs`` must evaluate + to a Python dictionary object (more concisely, the value must + implement the dictionary API-methods ``update()`` and ``items()``). + + Optimizations: + + - In order to cut down on the size of the compiled function objects, + some conversion and quoting statements have been put into + functions. In one measurement, the reduction was 35%. The benchmark + suite does *not* report of an increased render time (actually + slightly decreased). + + Bugfixes: + + - An exception is now raised if a trivial string is passed for + ``metal:fill-slot``. This fixes issue #89. + + - An empty string is now never translated. Not really a bug, but it's + been reported in as an issue (#92) because some translation + frameworks handle this case incorrectly. + + - The template module loader (file cache) now correctly encodes + generated template source code as UTF-8. This fixes issue #125. + + - Fixed issue where a closure might be reused unsafely in nested + template rendering. + + - Fixed markup class ``__repr__`` method. This fixes issue #124. + + - Added missing return statement to fix printing the non-abbreviated + filename in case of an exception. + [tomo] + + 2.9.2 (2012-06-06) + ------------------ + + Bugfixes: + + - Fixed a PyPy incompatibility. + + - Fixed issue #109 which caused testing failures on some platforms. + + 2.9.1 (2012-06-01) + ------------------ + + Bugfixes: + + - Fixed issue #103. The ``tal:on-error`` statement now always adds an + explicit end-tag to the element, even with a substitution content of + nothing. + + - Fixed issue #113. The ``tal:on-error`` statement now works correctly + also for dynamic attributes. That is, the fallback tag now includes + only static attributes. + + - Fixed name error which prevented the benchmark from running + correctly. + + Compatibility: + + - Fixed deprecation warning on Python 3 for zope interface implements + declaration. This fixes issue #116. + + 2.9.0 (2012-05-31) + ------------------ + + Features: + + - The translation function now gets the ``econtext`` argument as the + value for ``context``. Note that historically, this was usually an + HTTP request which might provide language negotiation data through a + dictionary interface. + [alvinyue] + + Bugfixes: + + - Fixed import alias issue which would lead to a syntax error in + generated Python code. Fixes issue #114. + + 2.8.5 (2012-05-02) + ------------------ + + Bugfixes: + + - Fixed minor installation issues on Python 2.5 and 3. + [ppaez] + + - Ensure output is unicode even when trivial (an empty string). + + 2.8.4 (2012-04-18) + ------------------ + + Features: + + - In exception output, long filenames are now truncated to 60 + characters of output, preventing line wrap which makes it difficult + to scan the exception output. + + Bugfixes: + + - Include filename and location in exception output for exceptions + raised during compilation. + + - If a trivial translation substitution variable is given (i.e. an + empty string), simply ignore it. This fixes issue #106. + + 2.8.3 (2012-04-16) + ------------------ + + Features: + + - Log template source on debug-level before cooking. + + - The `target_language` argument, if given, is now available as a + variable in templates. + + 2.8.2 (2012-03-30) + ------------------ + + Features: + + - Temporary caches used in debug mode are cleaned up eagerly, rather + than waiting for process termination. + [mitchellrj] + + Bugfixes: + + - The `index`, `start` and `end` methods on the TAL repeat object are + now callable. This fixes an incompatibility with ZPT. + + - The loader now correctly handles absolute paths on Windows. + [rdale] + + 2.8.1 (2012-03-29) + ------------------ + + Features: + + - The exception formatter now lists errors in 'wrapping order'. This + means that the innermost, and presumably most relevant exception is + shown last. + + Bugfixes: + + - The exception formatter now correctly recognizes nested errors and + does not rewrap the dynamically generated exception class. + + - The exception formatter now correctly sets the ``__module__`` + attribute to that of the original exception class. + + 2.8.0 (2012-02-29) + ------------------ + + Features: + + - Added support for code blocks using the `<?python ... ?>` processing + instruction syntax. + + The scope is name assignments is up until the nearest macro + definition, or the template itself if macros are not used. + + Bugfixes: + + - Fall back to the exception class' ``__new__`` method to safely + create an exception object that is not implemented in Python. + + - The exception formatter now keeps track of already formatted + exceptions, and ignores them from further output. + + 2.7.4 (2012-02-27) + ------------------ + + - The error handler now invokes the ``__init__`` method of + ``BaseException`` instead of the possibly overriden method (which + may take required arguments). This fixes issue #97. + [j23d, malthe] + + 2.7.3 (2012-01-16) + ------------------ + + Bugfixes: + + - The trim whitespace option now correctly trims actual whitespace to + a single character, appearing either to the left or to the right of + an element prefix or suffix string. + + 2.7.2 (2012-01-08) + ------------------ + + Features: + + - Added option ``trim_attribute_space`` that decides whether attribute + whitespace is stripped (at most down to a single space). This option + exists to provide compatibility with the reference + implementation. Fixes issue #85. + + Bugfixes: + + - Ignore unhashable builtins when generating a reverse builtin + map to quickly look up a builtin value. + [malthe] + + - Apply translation mapping even when a translation function is not + available. This fixes issue #83. + [malthe] + + - Fixed issue #80. The translation domain for a slot is defined by the + source document, i.e. the template providing the content for a slot + whether it be the default or provided through ``metal:fill-slot``. + [jcbrand] + + - In certain circumstances, a Unicode non-breaking space character would cause + a define clause to fail to parse. + + 2.7.1 (2011-12-29) + ------------------ + + Features: + + - Enable expression interpolation in CDATA. + + - The page template class now implements dictionary access to macros:: + + template[name] + + This is a short-hand for:: + + template.macros[name] + + Bugfixes: + + - An invalid define clause would be silently ignored; we now raise a + language error exception. This fixes issue #79. + + - Fixed regression where ``${...}`` interpolation expressions could + not span multiple lines. This fixes issue #77. + + 2.7.0 (2011-12-13) + ------------------ + + Features: + + - The ``load:`` expression now derives from the string expression such + that the ``${...}`` operator can be used for expression + interpolation. + + - The ``load:`` expression now accepts asset specs; these are resolved + by the ``pkg_resources.resource_filename`` function:: + + <package_name>:<path> + + An example from the test suite:: + + chameleon:tests/inputs/hello_world.pt + + Bugfixes: + + - If an attribute name for translation was not a valid Python + identifier, the compiler would generate invalid code. This has been + fixed, and the compiler now also throws an exception if an attribute + specification contains a comma. (Note that the only valid separator + character is the semicolon, when specifying attributes for + translation via the ``i18n:translate`` statement). This addresses + issue #76. + + 2.6.2 (2011-12-08) + ------------------ + + Bugfixes: + + - Fixed issue where ``tal:on-error`` would not respect + ``tal:omit-tag`` or namespace elements which are omitted by default + (such as ``<tal:block />``). + + - Fixed issue where ``macros`` attribute would not be available on + file-based templates due to incorrect initialization. + + - The ``TryExcept`` and ``TryFinally`` AST nodes are not available on + Python 3.3. These have been aliased to ``Try``. This fixes issue + #75. + + Features: + + - The TAL repeat item now makes a security declaration that grants + access to unprotected subobjects on the Zope 2 platform:: + + __allow_access_to_unprotected_subobjects__ = True + + This is required for legacy compatibility and does not affect other + environments. + + - The template object now has a method ``write(body)`` which + explicitly decodes and cooks a string input. + + - Added configuration option ``loader_class`` which sets the class + used to create the template loader object. + + The class (essentially a callable) is created at template + construction time. + + 2.6.1 (2011-11-30) + ------------------ + + Bugfixes: + + - Decode HTML entities in expression interpolation strings. This fixes + issue #74. + + - Allow ``xml`` and ``xmlns`` attributes on TAL, I18N and METAL + namespace elements. This fixes issue #73. + + 2.6.0 (2011-11-24) + ------------------ + + Features: + + - Added support for implicit translation: + + The ``implicit_i18n_translate`` option enables implicit translation + of text. The ``implicit_i18n_attributes`` enables implicit + translation of attributes. The latter must be a set and for an + attribute to be implicitly translated, its lowercase string value + must be included in the set. + + - Added option ``strict`` (enabled by default) which decides whether + expressions are required to be valid at compile time. That is, if + not set, an exception is only raised for an invalid expression at + evaluation time. + + - An expression error now results in an exception only if the + expression is attempted evaluated during a rendering. + + - Added a configuration option ``prepend_relative_search_path`` which + decides whether the path relative to a file-based template is + prepended to the load search path. The default is ``True``. + + - Added a configuration option ``search_path`` to the file-based + template class, which adds additional paths to the template load + instance bound to the ``load:`` expression. The option takes a + string path or an iterable yielding string paths. The default value + is the empty set. + + Bugfixes: + + - Exception instances now support pickle/unpickle. + + - An attributes in i18n:attributes no longer needs to match an + existing or dynamic attribute in order to appear in the + element. This fixes issue #66. + + 2.5.3 (2011-10-23) + ------------------ + + Bugfixes: + + - Fixed an issue where a nested macro slot definition would fail even + though there existed a parent macro definition. This fixes issue + #69. + + 2.5.2 (2011-10-12) + ------------------ + + Bugfixes: + + - Fixed an issue where technically invalid input would result in a + compiler error. + + Features: + + - The markup class now inherits from the unicode string type such that + it's compatible with the string interface. + + 2.5.1 (2011-09-29) + ------------------ + + Bugfixes: + + - The symbol names "convert", "decode" and "translate" are now no + longer set as read-only *compiler internals*. This fixes issue #65. + + - Fixed an issue where a macro extension chain nested two levels (a + template uses a macro that extends a macro) would lose the middle + slot definitions if slots were defined nested. + + The compiler now throws an error if a nested slot definition is used + outside a macro extension context. + + 2.5.0 (2011-09-23) + ------------------ + + Features: + + - An expression type ``structure:`` is now available which wraps the + expression result as *structure* such that it is not escaped on + insertion, e.g.:: + + <div id="content"> + ${structure: context.body} + </div> + + This also means that the ``structure`` keyword for ``tal:content`` + and ``tal:replace`` now has an alternative spelling via the + expression type ``structure:``. + + - The string-based template constructor now accepts encoded input. + + 2.4.6 (2011-09-23) + ------------------ + + Bugfixes: + + - The ``tal:on-error`` statement should catch all exceptions. + + - Fixed issue that would prevent escaping of interpolation expression + values appearing in text. + + 2.4.5 (2011-09-21) + ------------------ + + Bugfixes: + + - The ``tal:on-error`` handler should have a ``error`` variable + defined that has the value of the exception thrown. + + - The ``tal:on-error`` statement is a substitution statement and + should support the "text" and "structure" insertion methods. + + 2.4.4 (2011-09-15) + ------------------ + + Bugfixes: + + - An encoding specified in the XML document preamble is now read and + used to decode the template input to unicode. This fixes issue #55. + + - Encoded expression input on Python 3 is now correctly + decoded. Previously, the string representation output would be + included instead of an actually decoded string. + + - Expression result conversion steps are now correctly included in + error handling such that the exception output points to the + expression location. + + 2.4.3 (2011-09-13) + ------------------ + + Features: + + - When an encoding is provided, pass the 'ignore' flag to avoid + decoding issues with bad input. + + Bugfixes: + + - Fixed pypy compatibility issue (introduced in previous release). + + 2.4.2 (2011-09-13) + ------------------ + + Bugfixes: + + - Fixed an issue in the compiler where an internal variable (such as a + translation default value) would be cached, resulting in variable + scope corruption (see issue #49). + + 2.4.1 (2011-09-08) + ------------------ + + Bugfixes: + + - Fixed an issue where a default value for an attribute would + sometimes spill over into another attribute. + + - Fixed issue where the use of the ``default`` name in an attribute + interpolation expression would print the attribute value. This is + unexpected, because it's an expression, not a static text suitable + for output. An attribute value of ``default`` now correctly drops + the attribute. + + 2.4.0 (2011-08-22) + ------------------ + + Features: + + - Added an option ``boolean_attributes`` to evaluate and render a + provided set of attributes using a boolean logic: if the attribute + is a true value, the value will be the attribute name, otherwise the + attribute is dropped. + + In the reference implementation, the following attributes are + configured as boolean values when the template is rendered in + HTML-mode:: + + "compact", "nowrap", "ismap", "declare", "noshade", + "checked", "disabled", "readonly", "multiple", "selected", + "noresize", "defer" + + Note that in Chameleon, these attributes must be manually provided. + + Bugfixes: + + - The carriage return character (used on Windows platforms) would + incorrectly be included in Python comments. + + It is now replaced with a line break. + + This fixes issue #44. + + 2.3.8 (2011-08-19) + ------------------ + + - Fixed import error that affected Python 2.5 only. + + 2.3.7 (2011-08-19) + ------------------ + + Features: + + - Added an option ``literal_false`` that disables the default behavior + of dropping an attribute for a value of ``False`` (in addition to + ``None``). This modified behavior is the behavior exhibited in + reference implementation. + + Bugfixes: + + - Undo attribute special HTML attribute behavior (see previous + release). + + This turned out not to be a compatible behavior; rather, boolean + values should simply be coerced to a string. + + Meanwhile, the reference implementation does support an HTML mode in + which the special attribute behavior is exhibited. + + We do not currently support this mode. + + 2.3.6 (2011-08-18) + ------------------ + + Features: + + - Certain HTML attribute names now have a special behavior for a + attribute value of ``True`` (or ``default`` if no default is + defined). For these attributes, this return value will result in the + name being printed as the value:: + + <input type="input" tal:attributes="checked True" /> + + will be rendered as:: + + <input type="input" checked="checked" /> + + This behavior is compatible with the reference implementation. + + 2.3.5 (2011-08-18) + ------------------ + + Features: + + - Added support for the set operator (``{item, item, ...}``). + + Bugfixes: + + - If macro is defined on the same element as a translation name, this + no longer results in a "translation name not allowed outside + translation" error. This fixes issue #43. + + - Attribute fallback to dictionary lookup now works on multiple items + (e.g. ``d1.d2.d2``). This fixes issue #42. + + 2.3.4 (2011-08-16) + ------------------ + + Features: + + - When inserting content in either attributes or text, a value of + ``True`` (like ``False`` and ``None``) will result in no + action. + + - Use statically assigned variables for ``"attrs"`` and + ``"default"``. This change yields a performance improvement of + 15-20%. + + - The template loader class now accepts an optional argument + ``default_extension`` which accepts a filename extension which will + be appended to the filename if there's not already an extension. + + Bugfixes: + + - The default symbol is now ``True`` for an attribute if the attribute + default is not provided. Note that the result is that the attribute + is dropped. This fixes issue #41. + + - Fixed an issue where assignment to a variable ``"type"`` would + fail. This fixes issue #40. + + - Fixed an issue where an (unsuccesful) assignment for a repeat loop + to a compiler internal name would not result in an error. + + - If the translation function returns the identical object, manually + coerce it to string. This fixes a compatibility issue with + translation functions which do not convert non-string objects to a + string value, but simply return them unchanged. + + 2.3.3 (2011-08-15) + ------------------ + + Features: + + - The ``load:`` expression now passes the initial keyword arguments to + its template loader (e.g. ``auto_reload`` and ``encoding``). + + - In the exception output, string variable values are now limited to a + limited output of characters, single line only. + + Bugfixes: + + - Fixed horizontal alignment of exception location info + (i.e. 'String:', 'Filename:' and 'Location:') such that they match + the template exception formatter. + + 2.3.2 (2011-08-11) + ------------------ + + Bugfixes: + + - Fixed issue where i18n:domain would not be inherited through macros + and slots. This fixes issue #37. + + 2.3.1 (2011-08-11) + ------------------ + + Features: + + - The ``Builtin`` node type may now be used to represent any Python + local or global name. This allows expression compilers to refer to + e.g. ``get`` or ``getitem``, or to explicit require a builtin object + such as one from the ``extra_builtins`` dictionary. + + Bugfixes: + + - Builtins which are not explicitly disallowed may now be redefined + and used as variables (e.g. ``nothing``). + + - Fixed compiler issue with circular node annotation loop. + + 2.3 (2011-08-10) + ---------------- + + Features: + + - Added support for the following syntax to disable inline evaluation + in a comment: + + <!--? comment appears verbatim (no ${...} evaluation) --> + + Note that the initial question mark character (?) will be omitted + from output. + + - The parser now accepts '<' and '>' in attributes. Note that this is + invalid markup. Previously, the '<' would not be accepted as a valid + attribute value, but this would result in an 'unexpected end tag' + error elsewhere. This fixes issue #38. + + - The expression compiler now provides methods ``assign_text`` and + ``assign_value`` such that a template engine might configure this + value conversion to support e.g. encoded strings. + + Note that currently, the only client for the ``assign_text`` method + is the string expression type. + + - Enable template loader for string-based template classes. Note that + the ``filename`` keyword argument may be provided on initialization + to identify the template source by filename. This fixes issue #36. + + - Added ``extra_builtins`` option to the page template class. These + builtins are added to the default builtins dictionary at cook time + and may be provided at initialization using the ``extra_builtins`` + keyword argument. + + Bugfixes: + + - If a translation domain is set for a fill slot, use this setting + instead of the macro template domain. + + - The Python expression compiler now correctly decodes HTML entities + ``'gt'`` and ``'lt'``. This fixes issue #32. + + - The string expression compiler now correctly handles encoded text + (when support for encoded strings is enabled). This fixes issue #35. + + - Fixed an issue where setting the ``filename`` attribute on a + file-based template would not automatically cause an invalidation. + + - Exceptions raised by Chameleon can now be copied via + ``copy.copy``. This fixes issue #36. + [leorochael] + + - If copying the exception fails in the exception handler, simply + re-raise the original exception and log a warning. + + 2.2 (2011-07-28) + ---------------- + + Features: + + - Added new expression type ``load:`` that allows loading a + template. Both relative and absolute paths are supported. If the + path given is relative, then it will be resolved with respect to the + directory of the template. + + - Added support for dynamic evaluation of expressions. + + Note that this is to support legacy applications. It is not + currently wired into the provided template classes. + + - Template classes now have a ``builtins`` attribute which may be used + to define built-in variables always available in the template + variable scope. + + Incompatibilities: + + - The file-based template class no longer accepts a parameter + ``loader``. This parameter would be used to load a template from a + relative path, using a ``find(filename)`` method. This was however, + undocumented, and probably not very useful since we have the + ``TemplateLoader`` mechanism already. + + - The compiled template module now contains an ``initialize`` function + which takes values that map to the template builtins. The return + value of this function is a dictionary that contains the render + functions. + + Bugfixes: + + - The file-based template class no longer verifies the existance of a + template file (using ``os.lstat``). This now happens implicitly if + eager parsing is enabled, or otherwise when first needed (e.g. at + render time). + + This is classified as a bug fix because the previous behavior was + probably not what you'd expect, especially if an application + initializes a lot of templates without needing to render them + immediately. + + 2.1.1 (2011-07-28) + ------------------ + + Features: + + - Improved exception display. The expression string is now shown in + the context of the original source (if available) with a marker + string indicating the location of the expression in the template + source. + + Bugfixes: + + - The ``structure`` insertion mode now correctly decodes entities for + any expression type (including ``string:``). This fixes issue #30. + + - Don't show internal variables in the exception formatter variable + listing. + + 2.1 (2011-07-25) + ---------------- + + Features: + + - Expression interpolation (using the ``${...}`` operator and + previously also ``$identifier``) now requires braces everywhere + except inside the ``string:`` expression type. + + This change is motivated by a number of legacy templates in which + the interpolation format without braces ``$identifier`` appears as + text. + + 2.0.2 (2011-07-25) + ------------------ + + Bugfixes: + + - Don't use dynamic variable scope for lambda-scoped variables (#27). + + - Avoid duplication of exception class and message in traceback. + + - Fixed issue where a ``metal:fill-slot`` would be ignored if a macro + was set to be used on the same element (#16). + + 2.0.1 (2011-07-23) + ------------------ + + Bugfixes: + + - Fixed issue where global variable definition from macro slots would + fail (they would instead be local). This also affects error + reporting from inside slots because this would be recorded + internally as a global. + + - Fixed issue with template cache digest (used for filenames); modules + are now invalidated whenever any changes are made to the + distribution set available (packages on ``sys.path``). + + - Fixed exception handler to better let exceptions propagate through + the renderer. + + - The disk-based module compiler now mangles template source filenames + such that the output Python module is valid and at root level (dots + and hyphens are replaced by an underscore). This fixes issue #17. + + - Fixed translations (i18n) on Python 2.5. + + 2.0 (2011-07-14) + ---------------- + + - Point release. + + 2.0-rc14 (2011-07-13) + --------------------- + + Bugfixes: + + - The tab character (``\t``) is now parsed correctly when used inside + tags. + + Features: + + - The ``RepeatDict`` class now works as a proxy behind a seperate + dictionary instance. + + - Added template constructor option ``keep_body`` which is a flag + (also available as a class attribute) that controls whether to save + the template body input in the ``body`` attribute. + + This is disabled by default, unless debug-mode is enabled. + + - The page template loader class now accepts an optional ``formats`` + argument which can be used to select an alternative template class. + + 2.0-rc13 (2011-07-07) + --------------------- + + Bugfixes: + + - The backslash character (followed by optional whitespace and a line + break) was not correctly interpreted as a continuation for Python + expressions. + + Features: + + - The Python expression implementation is now more flexible for + external subclassing via a new ``parse`` method. + + 2.0-rc12 (2011-07-04) + --------------------- + + Bugfixes: + + - Initial keyword arguments passed to a template now no longer "leak" + into the template variable space after a macro call. + + - An unexpected end tag is now an unrecoverable error. + + Features: + + - Improve exception output. + + 2.0-rc11 (2011-05-26) + --------------------- + + Bugfixes: + + - Fixed issue where variable names that begin with an underscore were + seemingly allowed, but their use resulted in a compiler error. + + Features: + + - Template variable names are now allowed to be prefixed with a single + underscore, but not two or more (reserved for internal use). + + Examples of valid names:: + + item + ITEM + _item + camelCase + underscore_delimited + help + + - Added support for Genshi's comment "drop" syntax:: + + <!--! This comment will be dropped --> + + Note the additional exclamation (!) character. + + This fixes addresses issue #10. + + 2.0-rc10 (2011-05-24) + --------------------- + + Bugfixes: + + - The ``tal:attributes`` statement now correctly operates + case-insensitive. The attribute name given in the statement will + replace an existing attribute with the same name, without respect to + case. + + Features: + + - Added ``meta:interpolation`` statement to control expression + interpolation setting. + + Strings that disable the setting: ``"off"`` and ``"false"``. + Strings that enable the setting: ``"on"`` and ``"true"``. + + - Expression interpolation now works inside XML comments. + + 2.0-rc9 (2011-05-05) + -------------------- + + Features: + + - Better debugging support for string decode and conversion. If a + naive join fails, each element in the output will now be attempted + coerced to unicode to try and trigger the failure near to the bad + string. + + 2.0-rc8 (2011-04-11) + -------------------- + + Bugfixes: + + - If a macro defines two slots with the same name, a caller will now + fill both with a single usage. + + - If a valid of ``None`` is provided as the translation function + argument, we now fall back to the class default. + + 2.0-rc7 (2011-03-29) + -------------------- + + Bugfixes: + + - Fixed issue with Python 2.5 compatibility AST. This affected at + least PyPy 1.4. + + Features: + + - The ``auto_reload`` setting now defaults to the class value; the + base template class gives a default value of + ``chameleon.config.AUTO_RELOAD``. This change allows a subclass to + provide a custom default value (such as an application-specific + debug mode setting). + + + 2.0-rc6 (2011-03-19) + -------------------- + + Features: + + - Added support for ``target_language`` keyword argument to render + method. If provided, the argument will be curried onto the + translation function. + + Bugfixes: + + - The HTML entities 'lt', 'gt' and 'quot' appearing inside content + subtition expressions are now translated into their native character + values. This fixes an issue where you could not dynamically create + elements using the ``structure`` (which is possible in ZPT). The + need to create such structure stems from the lack of an expression + interpolation operator in ZPT. + + - Fixed duplicate file pointer issue with test suite (affected Windows + platforms only). This fixes issue #9. + [oliora] + + - Use already open file using ``os.fdopen`` when trying to write out + the module source. This fixes LP #731803. + + + 2.0-rc5 (2011-03-07) + -------------------- + + Bugfixes: + + - Fixed a number of issues concerning the escaping of attribute + values: + + 1) Static attribute values are now included as they appear in the + source. + + This means that invalid attribute values such as ``"true && + false"`` are now left alone. It's not the job of the template + engine to correct such markup, at least not in the default mode + of operation. + + 2) The string expression compiler no longer unescapes + values. Instead, this is left to each expression + compiler. Currently only the Python expression compiler unescapes + its input. + + 3) The dynamic escape code sequence now correctly only replaces + ampersands that are part of an HTML escape format. + + Imports: + + - The page template classes and the loader class can now be imported + directly from the ``chameleon`` module. + + Features: + + - If a custom template loader is not provided, relative paths are now + resolved using ``os.abspath`` (i.e. to the current working + directory). + + - Absolute paths are normalized using ``os.path.normpath`` and + ``os.path.expanduser``. This ensures that all paths are kept in + their "canonical" form. + + + 2.0-rc4 (2011-03-03) + -------------------- + + Bugfixes: + + - Fixed an issue where the output of an end-to-end string expression + would raise an exception if the expression evaluated to ``None`` (it + should simply output nothing). + + - The ``convert`` function (which is configurable on the template + class level) now defaults to the ``translate`` function (at + run-time). + + This fixes an issue where message objects were not translated (and + thus converted to a string) using the a provided ``translate`` + function. + + - Fixed string interpolation issue where an expression immediately + succeeded by a right curly bracket would not parse. + + This fixes issue #5. + + - Fixed error where ``tal:condition`` would be evaluated after + ``tal:repeat``. + + Features: + + - Python expression is now a TALES expression. That means that the + pipe operator can be used to chain two or more expressions in a + try-except sequence. + + This behavior was ported from the 1.x series. Note that while it's + still possible to use the pipe character ("|") in an expression, it + must now be escaped. + + - The template cache can now be shared by multiple processes. + + + 2.0-rc3 (2011-03-02) + -------------------- + + Bugfixes: + + - Fixed ``atexit`` handler. + + This fixes issue #3. + + - If a cache directory is specified, it will now be used even when not + in debug mode. + + - Allow "comment" attribute in the TAL namespace. + + This fixes an issue in the sense that the reference engine allows + any attribute within the TAL namespace. However, only "comment" is + in common use. + + - The template constructor now accepts a flag ``debug`` which puts the + template *instance* into debug-mode regardless of the global + setting. + + This fixes issue #1. + + Features: + + - Added exception handler for exceptions raised while evaluating an + expression. + + This handler raises (or attempts to) a new exception of the type + ``RenderError``, with an additional base class of the original + exception class. The string value of the exception is a formatted + error message which includes the expression that caused the + exception. + + If we are unable to create the exception class, the original + exception is re-raised. + + 2.0-rc2 (2011-02-28) + -------------------- + + - Fixed upload issue. + + 2.0-rc1 (2011-02-28) + -------------------- + + - Initial public release. See documentation for what's new in this + series. + +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3.1 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 diff --git a/lib/Chameleon-2.22/src/Chameleon.egg-info/SOURCES.txt b/lib/Chameleon-2.22/src/Chameleon.egg-info/SOURCES.txt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/Chameleon.egg-info/SOURCES.txt @@ -0,0 +1,437 @@ +.gitignore +.travis.yml +CHANGES.rst +COPYRIGHT.txt +LICENSE.txt +MANIFEST.in +Makefile +README.rst +setup.cfg +setup.py +tox.ini +benchmarks/bm_chameleon.py +benchmarks/bm_mako.py +benchmarks/util.py +docs/conf.py +docs/configuration.rst +docs/index.rst +docs/integration.rst +docs/library.rst +docs/reference.rst +docs/requirements.txt +src/Chameleon.egg-info/PKG-INFO +src/Chameleon.egg-info/SOURCES.txt +src/Chameleon.egg-info/dependency_links.txt +src/Chameleon.egg-info/not-zip-safe +src/Chameleon.egg-info/top_level.txt +src/chameleon/__init__.py +src/chameleon/ast25.py +src/chameleon/astutil.py +src/chameleon/benchmark.py +src/chameleon/codegen.py +src/chameleon/compiler.py +src/chameleon/config.py +src/chameleon/exc.py +src/chameleon/i18n.py +src/chameleon/interfaces.py +src/chameleon/loader.py +src/chameleon/metal.py +src/chameleon/namespaces.py +src/chameleon/nodes.py +src/chameleon/parser.py +src/chameleon/program.py +src/chameleon/py25.py +src/chameleon/py26.py +src/chameleon/tal.py +src/chameleon/tales.py +src/chameleon/template.py +src/chameleon/tokenize.py +src/chameleon/utils.py +src/chameleon/tests/__init__.py +src/chameleon/tests/test_doctests.py +src/chameleon/tests/test_exc.py +src/chameleon/tests/test_loader.py +src/chameleon/tests/test_parser.py +src/chameleon/tests/test_sniffing.py +src/chameleon/tests/test_templates.py +src/chameleon/tests/test_tokenizer.py +src/chameleon/tests/inputs/001-interpolation.txt +src/chameleon/tests/inputs/001-interpolation.txt.py +src/chameleon/tests/inputs/001-variable-scope.html +src/chameleon/tests/inputs/001-variable-scope.pt +src/chameleon/tests/inputs/001-variable-scope.pt.py +src/chameleon/tests/inputs/001.xml +src/chameleon/tests/inputs/002-repeat-scope.pt +src/chameleon/tests/inputs/002-repeat-scope.pt.py +src/chameleon/tests/inputs/002.xml +src/chameleon/tests/inputs/003-content.pt +src/chameleon/tests/inputs/003-content.pt.py +src/chameleon/tests/inputs/003.xml +src/chameleon/tests/inputs/004-attributes.pt +src/chameleon/tests/inputs/004-attributes.pt.py +src/chameleon/tests/inputs/004.xml +src/chameleon/tests/inputs/005-default.pt +src/chameleon/tests/inputs/005-default.pt.py +src/chameleon/tests/inputs/005.xml +src/chameleon/tests/inputs/006-attribute-interpolation.pt +src/chameleon/tests/inputs/006-attribute-interpolation.pt.py +src/chameleon/tests/inputs/006.xml +src/chameleon/tests/inputs/007-content-interpolation.pt +src/chameleon/tests/inputs/007-content-interpolation.pt.py +src/chameleon/tests/inputs/007.xml +src/chameleon/tests/inputs/008-builtins.pt +src/chameleon/tests/inputs/008-builtins.pt.py +src/chameleon/tests/inputs/008.xml +src/chameleon/tests/inputs/009-literals.pt +src/chameleon/tests/inputs/009-literals.pt.py +src/chameleon/tests/inputs/009.xml +src/chameleon/tests/inputs/010-structure.pt +src/chameleon/tests/inputs/010-structure.pt.py +src/chameleon/tests/inputs/010.xml +src/chameleon/tests/inputs/011-messages.pt +src/chameleon/tests/inputs/011-messages.pt-en.py +src/chameleon/tests/inputs/011-messages.pt.py +src/chameleon/tests/inputs/011.xml +src/chameleon/tests/inputs/012-translation.pt +src/chameleon/tests/inputs/012-translation.pt-en.py +src/chameleon/tests/inputs/012-translation.pt.py +src/chameleon/tests/inputs/012.xml +src/chameleon/tests/inputs/013-repeat-nested.pt +src/chameleon/tests/inputs/013-repeat-nested.pt.py +src/chameleon/tests/inputs/013.xml +src/chameleon/tests/inputs/014-repeat-nested-similar.pt +src/chameleon/tests/inputs/014-repeat-nested-similar.pt.py +src/chameleon/tests/inputs/014.xml +src/chameleon/tests/inputs/015-translation-nested.pt +src/chameleon/tests/inputs/015-translation-nested.pt-en.py +src/chameleon/tests/inputs/015-translation-nested.pt.py +src/chameleon/tests/inputs/015.xml +src/chameleon/tests/inputs/016-explicit-translation.pt +src/chameleon/tests/inputs/016-explicit-translation.pt-en.py +src/chameleon/tests/inputs/016-explicit-translation.pt.py +src/chameleon/tests/inputs/016.xml +src/chameleon/tests/inputs/017-omit-tag.pt +src/chameleon/tests/inputs/017-omit-tag.pt.py +src/chameleon/tests/inputs/017.xml +src/chameleon/tests/inputs/018-translation-nested-dynamic.pt +src/chameleon/tests/inputs/018-translation-nested-dynamic.pt-en.py +src/chameleon/tests/inputs/018-translation-nested-dynamic.pt.py +src/chameleon/tests/inputs/018.xml +src/chameleon/tests/inputs/019-replace.pt +src/chameleon/tests/inputs/019-replace.pt.py +src/chameleon/tests/inputs/019.xml +src/chameleon/tests/inputs/020-on-error.pt +src/chameleon/tests/inputs/020-on-error.pt.py +src/chameleon/tests/inputs/020.xml +src/chameleon/tests/inputs/021-translation-domain.pt +src/chameleon/tests/inputs/021-translation-domain.pt-en.py +src/chameleon/tests/inputs/021-translation-domain.pt.py +src/chameleon/tests/inputs/021.xml +src/chameleon/tests/inputs/022-switch.pt +src/chameleon/tests/inputs/022-switch.pt.py +src/chameleon/tests/inputs/022.xml +src/chameleon/tests/inputs/023-condition.pt +src/chameleon/tests/inputs/023-condition.pt.py +src/chameleon/tests/inputs/023.xml +src/chameleon/tests/inputs/024-namespace-elements.pt +src/chameleon/tests/inputs/024-namespace-elements.pt.py +src/chameleon/tests/inputs/024.xml +src/chameleon/tests/inputs/025-repeat-whitespace.pt +src/chameleon/tests/inputs/025-repeat-whitespace.pt.py +src/chameleon/tests/inputs/025.xml +src/chameleon/tests/inputs/026-repeat-variable.pt +src/chameleon/tests/inputs/026-repeat-variable.pt.py +src/chameleon/tests/inputs/026.xml +src/chameleon/tests/inputs/027-attribute-replacement.pt +src/chameleon/tests/inputs/027-attribute-replacement.pt.py +src/chameleon/tests/inputs/027.xml +src/chameleon/tests/inputs/028-attribute-toggle.pt +src/chameleon/tests/inputs/028-attribute-toggle.pt.py +src/chameleon/tests/inputs/028.xml +src/chameleon/tests/inputs/029-attribute-ordering.pt +src/chameleon/tests/inputs/029-attribute-ordering.pt.py +src/chameleon/tests/inputs/029.xml +src/chameleon/tests/inputs/030-repeat-tuples.pt +src/chameleon/tests/inputs/030-repeat-tuples.pt.py +src/chameleon/tests/inputs/030.xml +src/chameleon/tests/inputs/031-namespace-with-tal.pt +src/chameleon/tests/inputs/031-namespace-with-tal.pt.py +src/chameleon/tests/inputs/031.xml +src/chameleon/tests/inputs/032-master-template.pt +src/chameleon/tests/inputs/032-master-template.pt.py +src/chameleon/tests/inputs/032.xml +src/chameleon/tests/inputs/033-use-macro-trivial.pt +src/chameleon/tests/inputs/033-use-macro-trivial.pt.py +src/chameleon/tests/inputs/033.xml +src/chameleon/tests/inputs/034-use-template-as-macro.pt +src/chameleon/tests/inputs/034-use-template-as-macro.pt.py +src/chameleon/tests/inputs/034.xml +src/chameleon/tests/inputs/035-use-macro-with-fill-slot.pt +src/chameleon/tests/inputs/035-use-macro-with-fill-slot.pt.py +src/chameleon/tests/inputs/035.xml +src/chameleon/tests/inputs/036-use-macro-inherits-dynamic-scope.pt +src/chameleon/tests/inputs/036.xml +src/chameleon/tests/inputs/037-use-macro-local-variable-scope.pt +src/chameleon/tests/inputs/037.xml +src/chameleon/tests/inputs/038-use-macro-globals.pt +src/chameleon/tests/inputs/038.xml +src/chameleon/tests/inputs/039-globals.pt +src/chameleon/tests/inputs/039.xml +src/chameleon/tests/inputs/040-macro-using-template-symbol.pt +src/chameleon/tests/inputs/040.xml +src/chameleon/tests/inputs/041-translate-nested-names.pt +src/chameleon/tests/inputs/041.xml +src/chameleon/tests/inputs/042-use-macro-fill-footer.pt +src/chameleon/tests/inputs/042.xml +src/chameleon/tests/inputs/043-macro-nested-dynamic-vars.pt +src/chameleon/tests/inputs/043.xml +src/chameleon/tests/inputs/044-tuple-define.pt +src/chameleon/tests/inputs/044.xml +src/chameleon/tests/inputs/045-namespaces.pt +src/chameleon/tests/inputs/045.xml +src/chameleon/tests/inputs/046-extend-macro.pt +src/chameleon/tests/inputs/046.xml +src/chameleon/tests/inputs/047-use-extended-macro.pt +src/chameleon/tests/inputs/047.xml +src/chameleon/tests/inputs/048-use-extended-macro-fill-original.pt +src/chameleon/tests/inputs/048.xml +src/chameleon/tests/inputs/049-entities-in-attributes.pt +src/chameleon/tests/inputs/049.xml +src/chameleon/tests/inputs/050-define-macro-and-use-not-extend.pt +src/chameleon/tests/inputs/050.xml +src/chameleon/tests/inputs/051-use-non-extended-macro.pt +src/chameleon/tests/inputs/051.xml +src/chameleon/tests/inputs/052-i18n-domain-inside-filled-slot.pt +src/chameleon/tests/inputs/052.xml +src/chameleon/tests/inputs/053-special-characters-in-attributes.pt +src/chameleon/tests/inputs/053.xml +src/chameleon/tests/inputs/054-import-expression.pt +src/chameleon/tests/inputs/054.xml +src/chameleon/tests/inputs/055-attribute-fallback-to-dict-lookup.pt +src/chameleon/tests/inputs/055.xml +src/chameleon/tests/inputs/056-comment-attribute.pt +src/chameleon/tests/inputs/056.xml +src/chameleon/tests/inputs/057-order.pt +src/chameleon/tests/inputs/057.xml +src/chameleon/tests/inputs/058-script.pt +src/chameleon/tests/inputs/058.xml +src/chameleon/tests/inputs/059-embedded-javascript.pt +src/chameleon/tests/inputs/059.xml +src/chameleon/tests/inputs/060-macro-with-multiple-same-slots.pt +src/chameleon/tests/inputs/060.xml +src/chameleon/tests/inputs/061-fill-one-slot-but-two-defined.pt +src/chameleon/tests/inputs/061.xml +src/chameleon/tests/inputs/062-comments-and-expressions.pt +src/chameleon/tests/inputs/062.xml +src/chameleon/tests/inputs/063-continuation.pt +src/chameleon/tests/inputs/063.xml +src/chameleon/tests/inputs/064-tags-and-special-characters.pt +src/chameleon/tests/inputs/064.xml +src/chameleon/tests/inputs/065-use-macro-in-fill.pt +src/chameleon/tests/inputs/065.xml +src/chameleon/tests/inputs/066-load-expression.pt +src/chameleon/tests/inputs/066.xml +src/chameleon/tests/inputs/067-attribute-decode.pt +src/chameleon/tests/inputs/067.xml +src/chameleon/tests/inputs/068-less-than-greater-than-in-attributes.pt +src/chameleon/tests/inputs/068.xml +src/chameleon/tests/inputs/069-translation-domain-and-macro.pt +src/chameleon/tests/inputs/069.xml +src/chameleon/tests/inputs/070-translation-domain-and-use-macro.pt +src/chameleon/tests/inputs/070.xml +src/chameleon/tests/inputs/071-html-attribute-defaults.pt +src/chameleon/tests/inputs/071.xml +src/chameleon/tests/inputs/072-repeat-interpolation.pt +src/chameleon/tests/inputs/072.xml +src/chameleon/tests/inputs/073-utf8-encoded.pt +src/chameleon/tests/inputs/073.xml +src/chameleon/tests/inputs/074-encoded-template.pt +src/chameleon/tests/inputs/074.xml +src/chameleon/tests/inputs/075-nested-macros.pt +src/chameleon/tests/inputs/075.xml +src/chameleon/tests/inputs/076-nested-macro-override.pt +src/chameleon/tests/inputs/076.xml +src/chameleon/tests/inputs/077-i18n-attributes.pt +src/chameleon/tests/inputs/077.xml +src/chameleon/tests/inputs/078-tags-and-newlines.pt +src/chameleon/tests/inputs/078.xml +src/chameleon/tests/inputs/079-implicit-i18n.pt +src/chameleon/tests/inputs/079.xml +src/chameleon/tests/inputs/080-xmlns-namespace-on-tal.pt +src/chameleon/tests/inputs/080.xml +src/chameleon/tests/inputs/081-load-spec.pt +src/chameleon/tests/inputs/081.xml +src/chameleon/tests/inputs/082-load-spec-computed.pt +src/chameleon/tests/inputs/082.xml +src/chameleon/tests/inputs/083-template-dict-to-macro.pt +src/chameleon/tests/inputs/083.xml +src/chameleon/tests/inputs/084-interpolation-in-cdata.pt +src/chameleon/tests/inputs/084.xml +src/chameleon/tests/inputs/085-nested-translation.pt +src/chameleon/tests/inputs/085.xml +src/chameleon/tests/inputs/086-self-closing.pt +src/chameleon/tests/inputs/086.xml +src/chameleon/tests/inputs/087-code-blocks.pt +src/chameleon/tests/inputs/087.xml +src/chameleon/tests/inputs/088-python-newlines.pt +src/chameleon/tests/inputs/088.xml +src/chameleon/tests/inputs/089-load-fallback.pt +src/chameleon/tests/inputs/089.xml +src/chameleon/tests/inputs/090-tuple-expression.pt +src/chameleon/tests/inputs/090.xml +src/chameleon/tests/inputs/091-repeat-none.pt +src/chameleon/tests/inputs/091.xml +src/chameleon/tests/inputs/092.xml +src/chameleon/tests/inputs/093.xml +src/chameleon/tests/inputs/094.xml +src/chameleon/tests/inputs/095.xml +src/chameleon/tests/inputs/096.xml +src/chameleon/tests/inputs/097.xml +src/chameleon/tests/inputs/098.xml +src/chameleon/tests/inputs/099.xml +src/chameleon/tests/inputs/100.xml +src/chameleon/tests/inputs/101-unclosed-tags.html +src/chameleon/tests/inputs/101.xml +src/chameleon/tests/inputs/102-unquoted-attributes.html +src/chameleon/tests/inputs/102.xml +src/chameleon/tests/inputs/103-simple-attribute.html +src/chameleon/tests/inputs/103.xml +src/chameleon/tests/inputs/104.xml +src/chameleon/tests/inputs/105.xml +src/chameleon/tests/inputs/106.xml +src/chameleon/tests/inputs/107.xml +src/chameleon/tests/inputs/108.xml +src/chameleon/tests/inputs/109.xml +src/chameleon/tests/inputs/110.xml +src/chameleon/tests/inputs/111.xml +src/chameleon/tests/inputs/112.xml +src/chameleon/tests/inputs/113.xml +src/chameleon/tests/inputs/114.xml +src/chameleon/tests/inputs/115.xml +src/chameleon/tests/inputs/116.xml +src/chameleon/tests/inputs/117.xml +src/chameleon/tests/inputs/118.xml +src/chameleon/tests/inputs/119.xml +src/chameleon/tests/inputs/120-translation-context.pt +src/chameleon/tests/inputs/121-translation-comment.pt +src/chameleon/tests/inputs/greeting.pt +src/chameleon/tests/inputs/hello_world.pt +src/chameleon/tests/inputs/hello_world.txt +src/chameleon/tests/inputs/hello_world.txt.py +src/chameleon/tests/outputs/001.html +src/chameleon/tests/outputs/001.pt +src/chameleon/tests/outputs/001.txt +src/chameleon/tests/outputs/002.pt +src/chameleon/tests/outputs/003.pt +src/chameleon/tests/outputs/004.pt +src/chameleon/tests/outputs/005.pt +src/chameleon/tests/outputs/006.pt +src/chameleon/tests/outputs/007.pt +src/chameleon/tests/outputs/008.pt +src/chameleon/tests/outputs/009.pt +src/chameleon/tests/outputs/010.pt +src/chameleon/tests/outputs/011-en.pt +src/chameleon/tests/outputs/011.pt +src/chameleon/tests/outputs/012-en.pt +src/chameleon/tests/outputs/012.pt +src/chameleon/tests/outputs/013.pt +src/chameleon/tests/outputs/014.pt +src/chameleon/tests/outputs/015-en.pt +src/chameleon/tests/outputs/015.pt +src/chameleon/tests/outputs/016-en.pt +src/chameleon/tests/outputs/016.pt +src/chameleon/tests/outputs/017.pt +src/chameleon/tests/outputs/018-en.pt +src/chameleon/tests/outputs/018.pt +src/chameleon/tests/outputs/019.pt +src/chameleon/tests/outputs/020.pt +src/chameleon/tests/outputs/021-en.pt +src/chameleon/tests/outputs/021.pt +src/chameleon/tests/outputs/022.pt +src/chameleon/tests/outputs/023.pt +src/chameleon/tests/outputs/024.pt +src/chameleon/tests/outputs/025.pt +src/chameleon/tests/outputs/026.pt +src/chameleon/tests/outputs/027.pt +src/chameleon/tests/outputs/028.pt +src/chameleon/tests/outputs/029.pt +src/chameleon/tests/outputs/030.pt +src/chameleon/tests/outputs/031.pt +src/chameleon/tests/outputs/032.pt +src/chameleon/tests/outputs/033.pt +src/chameleon/tests/outputs/034.pt +src/chameleon/tests/outputs/035.pt +src/chameleon/tests/outputs/036.pt +src/chameleon/tests/outputs/037.pt +src/chameleon/tests/outputs/038.pt +src/chameleon/tests/outputs/039.pt +src/chameleon/tests/outputs/040.pt +src/chameleon/tests/outputs/041.pt +src/chameleon/tests/outputs/042.pt +src/chameleon/tests/outputs/043.pt +src/chameleon/tests/outputs/044.pt +src/chameleon/tests/outputs/045.pt +src/chameleon/tests/outputs/046.pt +src/chameleon/tests/outputs/047.pt +src/chameleon/tests/outputs/048.pt +src/chameleon/tests/outputs/049.pt +src/chameleon/tests/outputs/050.pt +src/chameleon/tests/outputs/051.pt +src/chameleon/tests/outputs/052.pt +src/chameleon/tests/outputs/053.pt +src/chameleon/tests/outputs/054.pt +src/chameleon/tests/outputs/055.pt +src/chameleon/tests/outputs/056.pt +src/chameleon/tests/outputs/057.pt +src/chameleon/tests/outputs/058.pt +src/chameleon/tests/outputs/059.pt +src/chameleon/tests/outputs/060.pt +src/chameleon/tests/outputs/061.pt +src/chameleon/tests/outputs/062.pt +src/chameleon/tests/outputs/063.pt +src/chameleon/tests/outputs/064.pt +src/chameleon/tests/outputs/065.pt +src/chameleon/tests/outputs/066.pt +src/chameleon/tests/outputs/067.pt +src/chameleon/tests/outputs/068.pt +src/chameleon/tests/outputs/069-en.pt +src/chameleon/tests/outputs/069.pt +src/chameleon/tests/outputs/070-en.pt +src/chameleon/tests/outputs/070.pt +src/chameleon/tests/outputs/071.pt +src/chameleon/tests/outputs/072.pt +src/chameleon/tests/outputs/073.pt +src/chameleon/tests/outputs/074.pt +src/chameleon/tests/outputs/075.pt +src/chameleon/tests/outputs/076.pt +src/chameleon/tests/outputs/077-en.pt +src/chameleon/tests/outputs/077.pt +src/chameleon/tests/outputs/078.pt +src/chameleon/tests/outputs/079-en.pt +src/chameleon/tests/outputs/079.pt +src/chameleon/tests/outputs/080.pt +src/chameleon/tests/outputs/081.pt +src/chameleon/tests/outputs/082.pt +src/chameleon/tests/outputs/083.pt +src/chameleon/tests/outputs/084.pt +src/chameleon/tests/outputs/085-en.pt +src/chameleon/tests/outputs/085.pt +src/chameleon/tests/outputs/086.pt +src/chameleon/tests/outputs/087.pt +src/chameleon/tests/outputs/088.pt +src/chameleon/tests/outputs/089.pt +src/chameleon/tests/outputs/090.pt +src/chameleon/tests/outputs/091.pt +src/chameleon/tests/outputs/101.html +src/chameleon/tests/outputs/102.html +src/chameleon/tests/outputs/103.html +src/chameleon/tests/outputs/120-en.pt +src/chameleon/tests/outputs/120.pt +src/chameleon/tests/outputs/121.pt +src/chameleon/tests/outputs/greeting.pt +src/chameleon/tests/outputs/hello_world.pt +src/chameleon/tests/outputs/hello_world.txt +src/chameleon/zpt/__init__.py +src/chameleon/zpt/loader.py +src/chameleon/zpt/program.py +src/chameleon/zpt/template.py \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/Chameleon.egg-info/dependency_links.txt b/lib/Chameleon-2.22/src/Chameleon.egg-info/dependency_links.txt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/Chameleon.egg-info/dependency_links.txt @@ -0,0 +1,1 @@ + diff --git a/lib/Chameleon-2.22/src/Chameleon.egg-info/not-zip-safe b/lib/Chameleon-2.22/src/Chameleon.egg-info/not-zip-safe new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/Chameleon.egg-info/not-zip-safe @@ -0,0 +1,1 @@ + diff --git a/lib/Chameleon-2.22/src/Chameleon.egg-info/top_level.txt b/lib/Chameleon-2.22/src/Chameleon.egg-info/top_level.txt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/Chameleon.egg-info/top_level.txt @@ -0,0 +1,1 @@ +chameleon diff --git a/lib/Chameleon-2.22/src/chameleon/__init__.py b/lib/Chameleon-2.22/src/chameleon/__init__.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/__init__.py @@ -0,0 +1,6 @@ +from .zpt.template import PageTemplate +from .zpt.template import PageTemplateFile +from .zpt.template import PageTextTemplate +from .zpt.template import PageTextTemplateFile +from .zpt.loader import TemplateLoader as PageTemplateLoader +from .exc import TemplateError diff --git a/lib/Chameleon-2.22/src/chameleon/ast25.py b/lib/Chameleon-2.22/src/chameleon/ast25.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/ast25.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2008 by Armin Ronacher. +# License: Python License. +# + +import _ast + +from _ast import * + + +def fix_missing_locations(node): + """ + When you compile a node tree with compile(), the compiler expects lineno and + col_offset attributes for every node that supports them. This is rather + tedious to fill in for generated nodes, so this helper adds these attributes + recursively where not already set, by setting them to the values of the + parent node. It works recursively starting at *node*. + """ + def _fix(node, lineno, col_offset): + if 'lineno' in node._attributes: + if not hasattr(node, 'lineno'): + node.lineno = lineno + else: + lineno = node.lineno + if 'col_offset' in node._attributes: + if not hasattr(node, 'col_offset'): + node.col_offset = col_offset + else: + col_offset = node.col_offset + for child in iter_child_nodes(node): + _fix(child, lineno, col_offset) + _fix(node, 1, 0) + return node + + +def iter_child_nodes(node): + """ + Yield all direct child nodes of *node*, that is, all fields that are nodes + and all items of fields that are lists of nodes. + """ + for name, field in iter_fields(node): + if isinstance(field, (AST, _ast.AST)): + yield field + elif isinstance(field, list): + for item in field: + if isinstance(item, (AST, _ast.AST)): + yield item + + +def iter_fields(node): + """ + Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields`` + that is present on *node*. + """ + + for field in node._fields or (): + try: + yield field, getattr(node, field) + except AttributeError: + pass + + +def walk(node): + """ + Recursively yield all child nodes of *node*, in no specified order. This is + useful if you only want to modify nodes in place and don't care about the + context. + """ + from collections import deque + todo = deque([node]) + while todo: + node = todo.popleft() + todo.extend(iter_child_nodes(node)) + yield node + + +class NodeVisitor(object): + """ + A node visitor base class that walks the abstract syntax tree and calls a + visitor function for every node found. This function may return a value + which is forwarded by the `visit` method. + + This class is meant to be subclassed, with the subclass adding visitor + methods. + + Per default the visitor functions for the nodes are ``'visit_'`` + + class name of the node. So a `TryFinally` node visit function would + be `visit_TryFinally`. This behavior can be changed by overriding + the `visit` method. If no visitor function exists for a node + (return value `None`) the `generic_visit` visitor is used instead. + + Don't use the `NodeVisitor` if you want to apply changes to nodes during + traversing. For this a special visitor exists (`NodeTransformer`) that + allows modifications. + """ + + def visit(self, node): + """Visit a node.""" + method = 'visit_' + node.__class__.__name__ + visitor = getattr(self, method, self.generic_visit) + return visitor(node) + + def generic_visit(self, node): + """Called if no explicit visitor function exists for a node.""" + for field, value in iter_fields(node): + if isinstance(value, list): + for item in value: + if isinstance(item, (AST, _ast.AST)): + self.visit(item) + elif isinstance(value, (AST, _ast.AST)): + self.visit(value) + + +class AST(object): + _fields = () + _attributes = 'lineno', 'col_offset' + + def __init__(self, *args, **kwargs): + self.__dict__.update(kwargs) + self._fields = self._fields or () + for name, value in zip(self._fields, args): + setattr(self, name, value) + + +for name, cls in _ast.__dict__.items(): + if isinstance(cls, type) and issubclass(cls, _ast.AST): + try: + cls.__bases__ = (AST, ) + cls.__bases__ + except TypeError: + pass + + +class ExceptHandler(AST): + _fields = "type", "name", "body" diff --git a/lib/Chameleon-2.22/src/chameleon/astutil.py b/lib/Chameleon-2.22/src/chameleon/astutil.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/astutil.py @@ -0,0 +1,977 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2008-2009 Edgewall Software +# All rights reserved. +# +# This software is licensed as described in the file COPYING, which +# you should have received as part of this distribution. The terms +# are also available at http://genshi.edgewall.org/wiki/License. +# +# This software consists of voluntary contributions made by many +# individuals. For the exact contribution history, see the revision +# history and logs, available at http://genshi.edgewall.org/log/. + +"""Support classes for generating code from abstract syntax trees.""" + +try: + import ast +except ImportError: + from chameleon import ast25 as ast + +import sys +import logging +import weakref +import collections + +node_annotations = weakref.WeakKeyDictionary() + +try: + node_annotations[ast.Name()] = None +except TypeError: + logging.debug( + "Unable to create weak references to AST nodes. " \ + "A lock will be used around compilation loop." + ) + + node_annotations = {} + +__docformat__ = 'restructuredtext en' + + +def annotated(value): + node = load("annotation") + node_annotations[node] = value + return node + + +def parse(source, mode='eval'): + return compile(source, '', mode, ast.PyCF_ONLY_AST) + + +def load(name): + return ast.Name(id=name, ctx=ast.Load()) + + +def store(name): + return ast.Name(id=name, ctx=ast.Store()) + + +def param(name): + return ast.Name(id=name, ctx=ast.Param()) + + +def delete(name): + return ast.Name(id=name, ctx=ast.Del()) + + +def subscript(name, value, ctx): + return ast.Subscript( + value=value, + slice=ast.Index(value=ast.Str(s=name)), + ctx=ctx, + ) + + +def walk_names(target, mode): + for node in ast.walk(target): + if isinstance(node, ast.Name) and \ + isinstance(node.ctx, mode): + yield node.id + + +def iter_fields(node): + """ + Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields`` + that is present on *node*. + """ + for field in node._fields: + try: + yield field, getattr(node, field) + except AttributeError: + pass + + +def iter_child_nodes(node): + """ + Yield all direct child nodes of *node*, that is, all fields that are nodes + and all items of fields that are lists of nodes. + """ + for name, field in iter_fields(node): + if isinstance(field, Node): + yield field + elif isinstance(field, list): + for item in field: + if isinstance(item, Node): + yield item + + +def walk(node): + """ + Recursively yield all descendant nodes in the tree starting at *node* + (including *node* itself), in no specified order. This is useful if you + only want to modify nodes in place and don't care about the context. + """ + todo = collections.deque([node]) + while todo: + node = todo.popleft() + todo.extend(iter_child_nodes(node)) + yield node + + +def copy(source, target): + target.__class__ = source.__class__ + target.__dict__ = source.__dict__ + + +def swap(root, replacement, name): + for node in ast.walk(root): + if (isinstance(node, ast.Name) and + isinstance(node.ctx, ast.Load) and + node.id == name): + assert hasattr(replacement, '_fields') + node_annotations.setdefault(node, replacement) + + +def marker(name): + return ast.Str(s="__%s" % name) + + +class Node(object): + """AST baseclass that gives us a convenient initialization + method. We explicitly declare and use the ``_fields`` attribute.""" + + _fields = () + + def __init__(self, *args, **kwargs): + assert isinstance(self._fields, tuple) + self.__dict__.update(kwargs) + for name, value in zip(self._fields, args): + setattr(self, name, value) + + def __repr__(self): + """Poor man's single-line pretty printer.""" + + name = type(self).__name__ + return '<%s%s at %x>' % ( + name, + "".join(" %s=%r" % (name, getattr(self, name, "\"?\"")) + for name in self._fields), + id(self) + ) + + def extract(self, condition): + result = [] + for node in walk(self): + if condition(node): + result.append(node) + + return result + + +class Builtin(Node): + """Represents a Python builtin. + + Used when a builtin is used internally by the compiler, to avoid + clashing with a user assignment (e.g. ``help`` is a builtin, but + also commonly assigned in templates). + """ + + _fields = "id", "ctx" + + ctx = ast.Load() + + +class Symbol(Node): + """Represents an importable symbol.""" + + _fields = "value", + + +class Static(Node): + """Represents a static value.""" + + _fields = "value", "name" + + name = None + + +class Comment(Node): + _fields = "text", "space", "stmt" + + stmt = None + space = "" + + +class ASTCodeGenerator(object): + """General purpose base class for AST transformations. + + Every visitor method can be overridden to return an AST node that has been + altered or replaced in some way. + """ + + def __init__(self, tree): + self.lines_info = [] + self.line_info = [] + self.lines = [] + self.line = "" + self.last = None + self.indent = 0 + self.blame_stack = [] + self.visit(tree) + + if self.line.strip(): + self._new_line() + + self.line = None + self.line_info = None + + # strip trivial lines + self.code = "\n".join( + line.strip() and line or "" + for line in self.lines + ) + + def _change_indent(self, delta): + self.indent += delta + + def _new_line(self): + if self.line is not None: + self.lines.append(self.line) + self.lines_info.append(self.line_info) + self.line = ' ' * 4 * self.indent + if len(self.blame_stack) == 0: + self.line_info = [] + self.last = None + else: + self.line_info = [(0, self.blame_stack[-1],)] + self.last = self.blame_stack[-1] + + def _write(self, s): + if len(s) == 0: + return + if len(self.blame_stack) == 0: + if self.last is not None: + self.last = None + self.line_info.append((len(self.line), self.last)) + else: + if self.last != self.blame_stack[-1]: + self.last = self.blame_stack[-1] + self.line_info.append((len(self.line), self.last)) + self.line += s + + def flush(self): + if self.line: + self._new_line() + + def visit(self, node): + if node is None: + return None + if type(node) is tuple: + return tuple([self.visit(n) for n in node]) + try: + self.blame_stack.append((node.lineno, node.col_offset,)) + info = True + except AttributeError: + info = False + visitor = getattr(self, 'visit_%s' % node.__class__.__name__, None) + if visitor is None: + raise Exception('No handler for ``%s`` (%s).' % ( + node.__class__.__name__, repr(node))) + ret = visitor(node) + if info: + self.blame_stack.pop() + return ret + + def visit_Module(self, node): + for n in node.body: + self.visit(n) + visit_Interactive = visit_Module + visit_Suite = visit_Module + + def visit_Expression(self, node): + return self.visit(node.body) + + # arguments = (expr* args, identifier? vararg, + # identifier? kwarg, expr* defaults) + def visit_arguments(self, node): + first = True + no_default_count = len(node.args) - len(node.defaults) + for i, arg in enumerate(node.args): + if not first: + self._write(', ') + else: + first = False + self.visit(arg) + if i >= no_default_count: + self._write('=') + self.visit(node.defaults[i - no_default_count]) + if getattr(node, 'vararg', None): + if not first: + self._write(', ') + else: + first = False + self._write('*' + node.vararg) + if getattr(node, 'kwarg', None): + if not first: + self._write(', ') + else: + first = False + self._write('**' + node.kwarg) + + def visit_arg(self, node): + self._write(node.arg) + + # FunctionDef(identifier name, arguments args, + # stmt* body, expr* decorators) + def visit_FunctionDef(self, node): + self._new_line() + for decorator in getattr(node, 'decorator_list', ()): + self._new_line() + self._write('@') + self.visit(decorator) + self._new_line() + self._write('def ' + node.name + '(') + self.visit(node.args) + self._write('):') + self._change_indent(1) + for statement in node.body: + self.visit(statement) + self._change_indent(-1) + + # ClassDef(identifier name, expr* bases, stmt* body) + def visit_ClassDef(self, node): + self._new_line() + self._write('class ' + node.name) + if node.bases: + self._write('(') + self.visit(node.bases[0]) + for base in node.bases[1:]: + self._write(', ') + self.visit(base) + self._write(')') + self._write(':') + self._change_indent(1) + for statement in node.body: + self.visit(statement) + self._change_indent(-1) + + # Return(expr? value) + def visit_Return(self, node): + self._new_line() + self._write('return') + if getattr(node, 'value', None): + self._write(' ') + self.visit(node.value) + + # Delete(expr* targets) + def visit_Delete(self, node): + self._new_line() + self._write('del ') + self.visit(node.targets[0]) + for target in node.targets[1:]: + self._write(', ') + self.visit(target) + + # Assign(expr* targets, expr value) + def visit_Assign(self, node): + self._new_line() + for target in node.targets: + self.visit(target) + self._write(' = ') + self.visit(node.value) + + # AugAssign(expr target, operator op, expr value) + def visit_AugAssign(self, node): + self._new_line() + self.visit(node.target) + self._write(' ' + self.binary_operators[node.op.__class__] + '= ') + self.visit(node.value) + + # Print(expr? dest, expr* values, bool nl) + def visit_Print(self, node): + self._new_line() + self._write('print') + if getattr(node, 'dest', None): + self._write(' >> ') + self.visit(node.dest) + if getattr(node, 'values', None): + self._write(', ') + else: + self._write(' ') + if getattr(node, 'values', None): + self.visit(node.values[0]) + for value in node.values[1:]: + self._write(', ') + self.visit(value) + if not node.nl: + self._write(',') + + # For(expr target, expr iter, stmt* body, stmt* orelse) + def visit_For(self, node): + self._new_line() + self._write('for ') + self.visit(node.target) + self._write(' in ') + self.visit(node.iter) + self._write(':') + self._change_indent(1) + for statement in node.body: + self.visit(statement) + self._change_indent(-1) + if getattr(node, 'orelse', None): + self._new_line() + self._write('else:') + self._change_indent(1) + for statement in node.orelse: + self.visit(statement) + self._change_indent(-1) + + # While(expr test, stmt* body, stmt* orelse) + def visit_While(self, node): + self._new_line() + self._write('while ') + self.visit(node.test) + self._write(':') + self._change_indent(1) + for statement in node.body: + self.visit(statement) + self._change_indent(-1) + if getattr(node, 'orelse', None): + self._new_line() + self._write('else:') + self._change_indent(1) + for statement in node.orelse: + self.visit(statement) + self._change_indent(-1) + + # If(expr test, stmt* body, stmt* orelse) + def visit_If(self, node): + self._new_line() + self._write('if ') + self.visit(node.test) + self._write(':') + self._change_indent(1) + for statement in node.body: + self.visit(statement) + self._change_indent(-1) + if getattr(node, 'orelse', None): + self._new_line() + self._write('else:') + self._change_indent(1) + for statement in node.orelse: + self.visit(statement) + self._change_indent(-1) + + # With(expr context_expr, expr? optional_vars, stmt* body) + def visit_With(self, node): + self._new_line() + self._write('with ') + self.visit(node.context_expr) + if getattr(node, 'optional_vars', None): + self._write(' as ') + self.visit(node.optional_vars) + self._write(':') + self._change_indent(1) + for statement in node.body: + self.visit(statement) + self._change_indent(-1) + + # Raise(expr? type, expr? inst, expr? tback) + def visit_Raise(self, node): + self._new_line() + self._write('raise') + if not getattr(node, "type", None): + exc = getattr(node, "exc", None) + if exc is None: + return + self._write(' ') + return self.visit(exc) + self._write(' ') + self.visit(node.type) + if not node.inst: + return + self._write(', ') + self.visit(node.inst) + if not node.tback: + return + self._write(', ') + self.visit(node.tback) + + # Try(stmt* body, excepthandler* handlers, stmt* orelse, stmt* finalbody) + def visit_Try(self, node): + self._new_line() + self._write('try:') + self._change_indent(1) + for statement in node.body: + self.visit(statement) + self._change_indent(-1) + if getattr(node, 'handlers', None): + for handler in node.handlers: + self.visit(handler) + self._new_line() + + if getattr(node, 'orelse', None): + self._write('else:') + self._change_indent(1) + for statement in node.orelse: + self.visit(statement) + self._change_indent(-1) + + if getattr(node, 'finalbody', None): + self._new_line() + self._write('finally:') + self._change_indent(1) + for statement in node.finalbody: + self.visit(statement) + self._change_indent(-1) + + # TryExcept(stmt* body, excepthandler* handlers, stmt* orelse) + def visit_TryExcept(self, node): + self._new_line() + self._write('try:') + self._change_indent(1) + for statement in node.body: + self.visit(statement) + self._change_indent(-1) + if getattr(node, 'handlers', None): + for handler in node.handlers: + self.visit(handler) + self._new_line() + if getattr(node, 'orelse', None): + self._write('else:') + self._change_indent(1) + for statement in node.orelse: + self.visit(statement) + self._change_indent(-1) + + # excepthandler = (expr? type, expr? name, stmt* body) + def visit_ExceptHandler(self, node): + self._new_line() + self._write('except') + if getattr(node, 'type', None): + self._write(' ') + self.visit(node.type) + if getattr(node, 'name', None): + if sys.version_info[0] == 2: + assert getattr(node, 'type', None) + self._write(', ') + else: + self._write(' as ') + self.visit(node.name) + self._write(':') + self._change_indent(1) + for statement in node.body: + self.visit(statement) + self._change_indent(-1) + visit_excepthandler = visit_ExceptHandler + + # TryFinally(stmt* body, stmt* finalbody) + def visit_TryFinally(self, node): + self._new_line() + self._write('try:') + self._change_indent(1) + for statement in node.body: + self.visit(statement) + self._change_indent(-1) + + if getattr(node, 'finalbody', None): + self._new_line() + self._write('finally:') + self._change_indent(1) + for statement in node.finalbody: + self.visit(statement) + self._change_indent(-1) + + # Assert(expr test, expr? msg) + def visit_Assert(self, node): + self._new_line() + self._write('assert ') + self.visit(node.test) + if getattr(node, 'msg', None): + self._write(', ') + self.visit(node.msg) + + def visit_alias(self, node): + self._write(node.name) + if getattr(node, 'asname', None): + self._write(' as ') + self._write(node.asname) + + # Import(alias* names) + def visit_Import(self, node): + self._new_line() + self._write('import ') + self.visit(node.names[0]) + for name in node.names[1:]: + self._write(', ') + self.visit(name) + + # ImportFrom(identifier module, alias* names, int? level) + def visit_ImportFrom(self, node): + self._new_line() + self._write('from ') + if node.level: + self._write('.' * node.level) + self._write(node.module) + self._write(' import ') + self.visit(node.names[0]) + for name in node.names[1:]: + self._write(', ') + self.visit(name) + + # Exec(expr body, expr? globals, expr? locals) + def visit_Exec(self, node): + self._new_line() + self._write('exec ') + self.visit(node.body) + if not node.globals: + return + self._write(', ') + self.visit(node.globals) + if not node.locals: + return + self._write(', ') + self.visit(node.locals) + + # Global(identifier* names) + def visit_Global(self, node): + self._new_line() + self._write('global ') + self.visit(node.names[0]) + for name in node.names[1:]: + self._write(', ') + self.visit(name) + + # Expr(expr value) + def visit_Expr(self, node): + self._new_line() + self.visit(node.value) + + # Pass + def visit_Pass(self, node): + self._new_line() + self._write('pass') + + # Break + def visit_Break(self, node): + self._new_line() + self._write('break') + + # Continue + def visit_Continue(self, node): + self._new_line() + self._write('continue') + + ### EXPRESSIONS + def with_parens(f): + def _f(self, node): + self._write('(') + f(self, node) + self._write(')') + return _f + + bool_operators = {ast.And: 'and', ast.Or: 'or'} + + # BoolOp(boolop op, expr* values) + @with_parens + def visit_BoolOp(self, node): + joiner = ' ' + self.bool_operators[node.op.__class__] + ' ' + self.visit(node.values[0]) + for value in node.values[1:]: + self._write(joiner) + self.visit(value) + + binary_operators = { + ast.Add: '+', + ast.Sub: '-', + ast.Mult: '*', + ast.Div: '/', + ast.Mod: '%', + ast.Pow: '**', + ast.LShift: '<<', + ast.RShift: '>>', + ast.BitOr: '|', + ast.BitXor: '^', + ast.BitAnd: '&', + ast.FloorDiv: '//' + } + + # BinOp(expr left, operator op, expr right) + @with_parens + def visit_BinOp(self, node): + self.visit(node.left) + self._write(' ' + self.binary_operators[node.op.__class__] + ' ') + self.visit(node.right) + + unary_operators = { + ast.Invert: '~', + ast.Not: 'not', + ast.UAdd: '+', + ast.USub: '-', + } + + # UnaryOp(unaryop op, expr operand) + def visit_UnaryOp(self, node): + self._write(self.unary_operators[node.op.__class__] + ' ') + self.visit(node.operand) + + # Lambda(arguments args, expr body) + @with_parens + def visit_Lambda(self, node): + self._write('lambda ') + self.visit(node.args) + self._write(': ') + self.visit(node.body) + + # IfExp(expr test, expr body, expr orelse) + @with_parens + def visit_IfExp(self, node): + self.visit(node.body) + self._write(' if ') + self.visit(node.test) + self._write(' else ') + self.visit(node.orelse) + + # Dict(expr* keys, expr* values) + def visit_Dict(self, node): + self._write('{') + for key, value in zip(node.keys, node.values): + self.visit(key) + self._write(': ') + self.visit(value) + self._write(', ') + self._write('}') + + def visit_Set(self, node): + self._write('{') + elts = list(node.elts) + last = elts.pop() + for elt in elts: + self.visit(elt) + self._write(', ') + self.visit(last) + self._write('}') + + # ListComp(expr elt, comprehension* generators) + def visit_ListComp(self, node): + self._write('[') + self.visit(node.elt) + for generator in node.generators: + # comprehension = (expr target, expr iter, expr* ifs) + self._write(' for ') + self.visit(generator.target) + self._write(' in ') + self.visit(generator.iter) + for ifexpr in generator.ifs: + self._write(' if ') + self.visit(ifexpr) + self._write(']') + + # GeneratorExp(expr elt, comprehension* generators) + def visit_GeneratorExp(self, node): + self._write('(') + self.visit(node.elt) + for generator in node.generators: + # comprehension = (expr target, expr iter, expr* ifs) + self._write(' for ') + self.visit(generator.target) + self._write(' in ') + self.visit(generator.iter) + for ifexpr in generator.ifs: + self._write(' if ') + self.visit(ifexpr) + self._write(')') + + # Yield(expr? value) + def visit_Yield(self, node): + self._write('yield') + if getattr(node, 'value', None): + self._write(' ') + self.visit(node.value) + + comparison_operators = { + ast.Eq: '==', + ast.NotEq: '!=', + ast.Lt: '<', + ast.LtE: '<=', + ast.Gt: '>', + ast.GtE: '>=', + ast.Is: 'is', + ast.IsNot: 'is not', + ast.In: 'in', + ast.NotIn: 'not in', + } + + # Compare(expr left, cmpop* ops, expr* comparators) + @with_parens + def visit_Compare(self, node): + self.visit(node.left) + for op, comparator in zip(node.ops, node.comparators): + self._write(' ' + self.comparison_operators[op.__class__] + ' ') + self.visit(comparator) + + # Call(expr func, expr* args, keyword* keywords, + # expr? starargs, expr? kwargs) + def visit_Call(self, node): + self.visit(node.func) + self._write('(') + first = True + for arg in node.args: + if not first: + self._write(', ') + first = False + self.visit(arg) + + for keyword in node.keywords: + if not first: + self._write(', ') + first = False + # keyword = (identifier arg, expr value) + self._write(keyword.arg) + self._write('=') + self.visit(keyword.value) + if getattr(node, 'starargs', None): + if not first: + self._write(', ') + first = False + self._write('*') + self.visit(node.starargs) + + if getattr(node, 'kwargs', None): + if not first: + self._write(', ') + first = False + self._write('**') + self.visit(node.kwargs) + self._write(')') + + # Repr(expr value) + def visit_Repr(self, node): + self._write('`') + self.visit(node.value) + self._write('`') + + # Num(object n) + def visit_Num(self, node): + self._write(repr(node.n)) + + # Str(string s) + def visit_Str(self, node): + self._write(repr(node.s)) + + # Attribute(expr value, identifier attr, expr_context ctx) + def visit_Attribute(self, node): + self.visit(node.value) + self._write('.') + self._write(node.attr) + + # Subscript(expr value, slice slice, expr_context ctx) + def visit_Subscript(self, node): + self.visit(node.value) + self._write('[') + + def _process_slice(node): + if isinstance(node, ast.Ellipsis): + self._write('...') + elif isinstance(node, ast.Slice): + if getattr(node, 'lower', 'None'): + self.visit(node.lower) + self._write(':') + if getattr(node, 'upper', None): + self.visit(node.upper) + if getattr(node, 'step', None): + self._write(':') + self.visit(node.step) + elif isinstance(node, ast.Index): + self.visit(node.value) + elif isinstance(node, ast.ExtSlice): + self.visit(node.dims[0]) + for dim in node.dims[1:]: + self._write(', ') + self.visit(dim) + else: + raise NotImplemented('Slice type not implemented') + _process_slice(node.slice) + self._write(']') + + # Name(identifier id, expr_context ctx) + def visit_Name(self, node): + self._write(node.id) + + # List(expr* elts, expr_context ctx) + def visit_List(self, node): + self._write('[') + for elt in node.elts: + self.visit(elt) + self._write(', ') + self._write(']') + + # Tuple(expr *elts, expr_context ctx) + def visit_Tuple(self, node): + self._write('(') + for elt in node.elts: + self.visit(elt) + self._write(', ') + self._write(')') + + # NameConstant(singleton value) + def visit_NameConstant(self, node): + self._write(str(node.value)) + +class AnnotationAwareVisitor(ast.NodeVisitor): + def visit(self, node): + annotation = node_annotations.get(node) + if annotation is not None: + assert hasattr(annotation, '_fields') + node = annotation + + super(AnnotationAwareVisitor, self).visit(node) + + def apply_transform(self, node): + if node not in node_annotations: + result = self.transform(node) + if result is not None and result is not node: + node_annotations[node] = result + + +class NameLookupRewriteVisitor(AnnotationAwareVisitor): + def __init__(self, transform): + self.transform = transform + self.transformed = set() + self.scopes = [set()] + + def __call__(self, node): + self.visit(node) + return self.transformed + + def visit_Name(self, node): + scope = self.scopes[-1] + if isinstance(node.ctx, ast.Param): + scope.add(node.id) + elif node.id not in scope: + self.transformed.add(node.id) + self.apply_transform(node) + + def visit_FunctionDef(self, node): + self.scopes[-1].add(node.name) + + def visit_alias(self, node): + name = node.asname if node.asname is not None else node.name + self.scopes[-1].add(name) + + def visit_Lambda(self, node): + self.scopes.append(set()) + try: + self.visit(node.args) + self.visit(node.body) + finally: + self.scopes.pop() + + +class ItemLookupOnAttributeErrorVisitor(AnnotationAwareVisitor): + def __init__(self, transform): + self.transform = transform + + def visit_Attribute(self, node): + self.generic_visit(node) + self.apply_transform(node) diff --git a/lib/Chameleon-2.22/src/chameleon/benchmark.py b/lib/Chameleon-2.22/src/chameleon/benchmark.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/benchmark.py @@ -0,0 +1,478 @@ +import unittest +import time +import os +import re +from .utils import text_ + +re_amp = re.compile(r'&(?!([A-Za-z]+|#[0-9]+);)') + +BIGTABLE_ZPT = """\ +<table xmlns="http://www.w3.org/1999/xhtml" +xmlns:tal="http://xml.zope.org/namespaces/tal"> +<tr tal:repeat="row python: options['table']"> +<td tal:repeat="c python: row.values()"> +<span tal:define="d python: c + 1" +tal:attributes="class python: 'column-' + str(d)" +tal:content="python: d" /> +</td> +</tr> +</table>""" + +MANY_STRINGS_ZPT = """\ +<table xmlns="http://www.w3.org/1999/xhtml" +xmlns:tal="http://xml.zope.org/namespaces/tal"> +<tr tal:repeat="i python: xrange(1000)"> +<td tal:content="string: number ${i}" /> +</tr> +</table> +""" + +HELLO_WORLD_ZPT = """\ +<html xmlns="http://www.w3.org/1999/xhtml" +xmlns:tal="http://xml.zope.org/namespaces/tal"> +<body> +<h1>Hello, world!</h1> +</body> +</html> +""" + +I18N_ZPT = """\ +<html xmlns="http://www.w3.org/1999/xhtml" +xmlns:tal="http://xml.zope.org/namespaces/tal" +xmlns:i18n="http://xml.zope.org/namespaces/i18n"> + <body> + <div tal:repeat="i python: xrange(10)"> + <div i18n:translate=""> + Hello world! + </div> + <div i18n:translate="hello_world"> + Hello world! + </div> + <div i18n:translate=""> + <sup>Hello world!</sup> + </div> + </div> + </body> +</html> +""" + + +def benchmark(title): + def decorator(f): + def wrapper(*args): + print( + "==========================\n " \ + "%s\n==========================" % \ + title) + return f(*args) + return wrapper + return decorator + + +def timing(func, *args, **kwargs): + t1 = t2 = time.time() + i = 0 + while t2 - t1 < 3: + func(**kwargs) + func(**kwargs) + func(**kwargs) + func(**kwargs) + i += 4 + t2 = time.time() + return float(10 * (t2 - t1)) / i + + +START = 0 +END = 1 +TAG = 2 + + +def yield_tokens(table=None): + index = [] + tag = index.append + _re_amp = re_amp + tag(START) + yield "<", "html", "", ">\n" + for r in table: + tag(START) + yield "<", "tr", "", ">\n" + + for c in r.values(): + d = c + 1 + tag(START) + yield "<", "td", "", ">\n" + + _tmp5 = d + if not isinstance(_tmp5, unicode): + _tmp5 = str(_tmp5) + if ('&' in _tmp5): + if (';' in _tmp5): + _tmp5 = _re_amp.sub('&', _tmp5) + else: + _tmp5 = _tmp5.replace('&', '&') + if ('<' in _tmp5): + _tmp5 = _tmp5.replace('<', '<') + if ('>' in _tmp5): + _tmp5 = _tmp5.replace('>', '>') + if ('"' in _tmp5): + _tmp5 = _tmp5.replace('"', '"') + _tmp5 = "column-%s" % _tmp5 + + _tmp = d + if (_tmp.__class__ not in (str, unicode, int, float, )): + raise + if (_tmp is not None): + if not isinstance(_tmp, unicode): + _tmp = str(_tmp) + if ('&' in _tmp): + if (';' in _tmp): + _tmp = _re_amp.sub('&', _tmp) + else: + _tmp = _tmp.replace('&', '&') + if ('<' in _tmp): + _tmp = _tmp.replace('<', '<') + if ('>' in _tmp): + _tmp = _tmp.replace('>', '>') + tag(START) + + t = ["classicism"] + + yield "<", "span", " ", t[0], '="', _tmp5, '"', ">\n" + tag(END) + yield "</", "span", ">\n" + tag(END) + yield "</", "td", ">\n" + tag(END) + yield "</", "tr", ">\n" + tag(END) + yield "</", "html", ">\n" + + +def yield_tokens_dict_version(**kwargs): + index = [] + tag = index.append + _re_amp = re_amp + tag(START) + yield "<", "html", "", ">\n" + + for r in kwargs['table']: + kwargs['r'] = r + tag(START) + yield "<", "tr", "", ">\n" + + for c in kwargs['r'].values(): + kwargs['d'] = c + 1 + tag(START) + yield "<", "td", "", ">\n" + + _tmp5 = kwargs['d'] + if not isinstance(_tmp5, unicode): + _tmp5 = str(_tmp5) + if ('&' in _tmp5): + if (';' in _tmp5): + _tmp5 = _re_amp.sub('&', _tmp5) + else: + _tmp5 = _tmp5.replace('&', '&') + if ('<' in _tmp5): + _tmp5 = _tmp5.replace('<', '<') + if ('>' in _tmp5): + _tmp5 = _tmp5.replace('>', '>') + if ('"' in _tmp5): + _tmp5 = _tmp5.replace('"', '"') + _tmp5 = "column-%s" % _tmp5 + + _tmp = kwargs['d'] + if (_tmp.__class__ not in (str, unicode, int, float, )): + raise + if (_tmp is not None): + if not isinstance(_tmp, unicode): + _tmp = str(_tmp) + if ('&' in _tmp): + if (';' in _tmp): + _tmp = _re_amp.sub('&', _tmp) + else: + _tmp = _tmp.replace('&', '&') + if ('<' in _tmp): + _tmp = _tmp.replace('<', '<') + if ('>' in _tmp): + _tmp = _tmp.replace('>', '>') + tag(START) + + t = ["classicism"] + + yield "<", "span", " ", t[0], '="', _tmp5, '"', ">\n" + tag(END) + yield "</", "span", ">\n" + tag(END) + yield "</", "td", ">\n" + tag(END) + yield "</", "tr", ">\n" + tag(END) + yield "</", "html", ">\n" + + +def yield_stream(table=None): + _re_amp = re_amp + yield START, ("html", "", "\n"), None + for r in table: + yield START, ("tr", "", "\n"), None + + for c in r.values(): + d = c + 1 + yield START, ("td", "", "\n"), None + + _tmp5 = d + if not isinstance(_tmp5, unicode): + _tmp5 = str(_tmp5) + if ('&' in _tmp5): + if (';' in _tmp5): + _tmp5 = _re_amp.sub('&', _tmp5) + else: + _tmp5 = _tmp5.replace('&', '&') + if ('<' in _tmp5): + _tmp5 = _tmp5.replace('<', '<') + if ('>' in _tmp5): + _tmp5 = _tmp5.replace('>', '>') + if ('"' in _tmp5): + _tmp5 = _tmp5.replace('"', '"') + _tmp5 = "column-%s" % _tmp5 + + _tmp = d + if (_tmp.__class__ not in (str, unicode, int, float, )): + raise + if (_tmp is not None): + if not isinstance(_tmp, unicode): + _tmp = str(_tmp) + if ('&' in _tmp): + if (';' in _tmp): + _tmp = _re_amp.sub('&', _tmp) + else: + _tmp = _tmp.replace('&', '&') + if ('<' in _tmp): + _tmp = _tmp.replace('<', '<') + if ('>' in _tmp): + _tmp = _tmp.replace('>', '>') + yield START, ("span", "", _tmp, " ", "class", _tmp5), None + + yield END, ("span", "", "\n"), None + yield END, ("td", "", "\n"), None + yield END, ("tr", "", "\n"), None + yield END, ("html", "", "\n"), None + +from itertools import chain + + +def bigtable_python_tokens(table=None, renderer=None): + iterable = renderer(table=table) + stream = chain(*iterable) + return "".join(stream) + + +def bigtable_python_stream(table=None, renderer=None): + stream = renderer(table=table) + return "".join(stream_output(stream)) + + +def bigtable_python_stream_with_filter(table=None, renderer=None): + stream = renderer(table=table) + return "".join(stream_output(uppercase_filter(stream))) + + +def uppercase_filter(stream): + for kind, data, pos in stream: + if kind is START: + data = (data[0], data[1], data[2].upper(),) + data[3:] + elif kind is END: + data = (data[0], data[1], data[2].upper()) + elif kind is TAG: + raise NotImplemented + yield kind, data, pos + + +def stream_output(stream): + for kind, data, pos in stream: + if kind is START: + tag = data[0] + yield "<%s" % tag + l = len(data) + + # optimize for common cases + if l == 3: + pass + elif l == 6: + yield '%s%s="%s"' % (data[3], data[4], data[5]) + else: + i = 3 + while i < l: + yield '%s%s="%s"' % (data[i], data[i + 1], data[i + 2]) + i += 3 + yield "%s>%s" % (data[1], data[2]) + elif kind is END: + yield "</%s%s>%s" % data + elif kind is TAG: + raise NotImplemented + + +class Benchmarks(unittest.TestCase): + table = [dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9, j=10) \ + for x in range(1000)] + + def setUp(self): + # set up i18n component + from zope.i18n import translate + from zope.i18n.interfaces import INegotiator + from zope.i18n.interfaces import ITranslationDomain + from zope.i18n.negotiator import Negotiator + from zope.i18n.simpletranslationdomain import SimpleTranslationDomain + from zope.i18n.tests.test_negotiator import Env + from zope.tales.tales import Context + + self.env = Env(('klingon', 'da', 'en', 'fr', 'no')) + + class ZopeI18NContext(Context): + + def translate(self, msgid, domain=None, context=None, + mapping=None, default=None): + context = self.vars['options']['env'] + return translate(msgid, domain, mapping, + context=context, default=default) + + def _getContext(self, contexts=None, **kwcontexts): + if contexts is not None: + if kwcontexts: + kwcontexts.update(contexts) + else: + kwcontexts = contexts + return ZopeI18NContext(self, kwcontexts) + + def _pt_getEngineContext(namespace): + self = namespace['template'] + engine = self.pt_getEngine() + return _getContext(engine, namespace) + + import zope.component + zope.component.provideUtility(Negotiator(), INegotiator) + catalog = SimpleTranslationDomain('domain') + zope.component.provideUtility(catalog, ITranslationDomain, 'domain') + self.files = os.path.abspath(os.path.join(__file__, '..', 'input')) + + @staticmethod + def _chameleon(body, **kwargs): + from .zpt.template import PageTemplate + return PageTemplate(body, **kwargs) + + @staticmethod + def _zope(body): + from zope.pagetemplate.pagetemplatefile import PageTemplate + template = PageTemplate() + template.pt_edit(body, 'text/xhtml') + return template + + @benchmark(text_("BIGTABLE [python]")) + def test_bigtable(self): + options = {'table': self.table} + + t_chameleon = timing(self._chameleon(BIGTABLE_ZPT), options=options) + print("chameleon: %7.2f" % t_chameleon) + + t_chameleon_utf8 = timing( + self._chameleon(BIGTABLE_ZPT, encoding='utf-8'), options=options) + print("chameleon (utf-8): %7.2f" % t_chameleon_utf8) + + t_tokens = timing( + bigtable_python_tokens, table=self.table, renderer=yield_tokens) + print("token: %7.2f" % t_tokens) + + t_tokens_dict_version = timing( + bigtable_python_tokens, table=self.table, + renderer=yield_tokens_dict_version) + print("token (dict): %7.2f" % t_tokens_dict_version) + + t_stream = timing( + bigtable_python_stream, table=self.table, renderer=yield_stream) + print("stream: %7.2f" % t_stream) + + t_zope = timing(self._zope(BIGTABLE_ZPT), table=self.table) + print("zope.pagetemplate: %7.2f" % t_zope) + print(" %7.1fX" % (t_zope / t_chameleon)) + + print("--------------------------") + print("check: %d vs %d" % ( + len(self._chameleon(BIGTABLE_ZPT)(options=options)), + len(self._zope(BIGTABLE_ZPT)(table=self.table)))) + print("--------------------------") + + @benchmark(text_("MANY STRINGS [python]")) + def test_many_strings(self): + t_chameleon = timing(self._chameleon(MANY_STRINGS_ZPT)) + print("chameleon: %7.2f" % t_chameleon) + t_zope = timing(self._zope(MANY_STRINGS_ZPT)) + print("zope.pagetemplate: %7.2f" % t_zope) + print(" %7.1fX" % (t_zope / t_chameleon)) + + print("--------------------------") + print("check: %d vs %d" % ( + len(self._chameleon(MANY_STRINGS_ZPT)()), + len(self._zope(MANY_STRINGS_ZPT)()))) + print("--------------------------") + + @benchmark(text_("HELLO WORLD")) + def test_hello_world(self): + t_chameleon = timing(self._chameleon(HELLO_WORLD_ZPT)) * 1000 + print("chameleon: %7.2f" % t_chameleon) + t_zope = timing(self._zope(HELLO_WORLD_ZPT)) * 1000 + print("zope.pagetemplate: %7.2f" % t_zope) + print(" %7.1fX" % (t_zope / t_chameleon)) + + print("--------------------------") + print("check: %d vs %d" % ( + len(self._chameleon(HELLO_WORLD_ZPT)()), + len(self._zope(HELLO_WORLD_ZPT)()))) + print("--------------------------") + + @benchmark(text_("I18N")) + def test_i18n(self): + from zope.i18n import translate + t_chameleon = timing( + self._chameleon(I18N_ZPT), + translate=translate, + language="klingon") * 1000 + print("chameleon: %7.2f" % t_chameleon) + t_zope = timing(self._zope(I18N_ZPT), env=self.env) * 1000 + print("zope.pagetemplate: %7.2f" % t_zope) + print(" %7.1fX" % (t_zope / t_chameleon)) + + @benchmark(text_("COMPILATION")) + def test_compilation(self): + template = self._chameleon(HELLO_WORLD_ZPT) + + def chameleon_cook_and_render(template=template): + template.cook(HELLO_WORLD_ZPT) + template() + + t_chameleon = timing(chameleon_cook_and_render) * 1000 + print("chameleon: %7.2f" % t_chameleon) + + template = self._zope(HELLO_WORLD_ZPT) + + def zope_cook_and_render(templte=template): + template._cook() + template() + + t_zope = timing(zope_cook_and_render) * 1000 + print("zope.pagetemplate: %7.2f" % t_zope) + print(" %0.3fX" % (t_zope / t_chameleon)) + + +def start(): + result = unittest.TestResult() + test = unittest.makeSuite(Benchmarks) + test.run(result) + + for error in result.errors: + print("Error in %s...\n" % error[0]) + print(error[1]) + + for failure in result.failures: + print("Failure in %s...\n" % failure[0]) + print(failure[1]) diff --git a/lib/Chameleon-2.22/src/chameleon/codegen.py b/lib/Chameleon-2.22/src/chameleon/codegen.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/codegen.py @@ -0,0 +1,237 @@ +try: + import ast +except ImportError: + from chameleon import ast25 as ast + +import inspect +try: + getargspec = inspect.getfullargspec +except AttributeError: + getargspec = inspect.getargspec +import textwrap +import types +import copy + +try: + import __builtin__ as builtins +except ImportError: + import builtins + +reverse_builtin_map = {} +for name, value in builtins.__dict__.items(): + try: + hash(value) + except TypeError: + continue + + reverse_builtin_map[value] = name + +try: + basestring +except NameError: + basestring = str + +from .astutil import ASTCodeGenerator +from .astutil import load +from .astutil import store +from .astutil import parse +from .astutil import Builtin +from .astutil import Symbol +from .astutil import node_annotations + +from .exc import CompilationError + + +try: + NATIVE_NUMBERS = int, float, long, bool +except NameError: + NATIVE_NUMBERS = int, float, bool + + +def template(function, mode='exec', **kw): + def wrapper(*vargs, **kwargs): + symbols = dict(zip(args, vargs + defaults)) + symbols.update(kwargs) + + class Visitor(ast.NodeVisitor): + def visit_FunctionDef(self, node): + self.generic_visit(node) + + name = symbols.get(node.name, self) + if name is not self: + node_annotations[node] = ast.FunctionDef( + name=name, + args=node.args, + body=node.body, + decorator_list=getattr(node, "decorator_list", []), + ) + + def visit_Name(self, node): + value = symbols.get(node.id, self) + if value is not self: + if isinstance(value, basestring): + value = load(value) + if isinstance(value, type) or value in reverse_builtin_map: + name = reverse_builtin_map.get(value) + if name is not None: + value = Builtin(name) + else: + value = Symbol(value) + + assert node not in node_annotations + assert hasattr(value, '_fields') + node_annotations[node] = value + + expr = parse(source, mode=mode) + if not isinstance(function, basestring): + expr = expr.body[0] + + Visitor().visit(expr) + return expr.body + + if isinstance(function, basestring): + source = function + defaults = args = () + return wrapper(**kw) + + source = textwrap.dedent(inspect.getsource(function)) + argspec = getargspec(function) + args = argspec[0] + defaults = argspec[3] or () + return wrapper + + +class TemplateCodeGenerator(ASTCodeGenerator): + """Extends the standard Python code generator class with handlers + for the helper node classes: + + - Symbol (an importable value) + - Static (value that can be made global) + - Builtin (from the builtins module) + - Marker (short-hand for a unique static object) + + """ + + names = () + + def __init__(self, tree): + self.imports = {} + self.defines = {} + self.markers = {} + + # Generate code + super(TemplateCodeGenerator, self).__init__(tree) + + def visit_Module(self, node): + super(TemplateCodeGenerator, self).visit_Module(node) + + # Make sure we terminate the line printer + self.flush() + + # Clear lines array for import visits + body = self.lines + self.lines = [] + + while self.defines: + name, node = self.defines.popitem() + assignment = ast.Assign(targets=[store(name)], value=node) + self.visit(assignment) + + # Make sure we terminate the line printer + self.flush() + + # Clear lines array for import visits + defines = self.lines + self.lines = [] + + while self.imports: + value, node = self.imports.popitem() + + if isinstance(value, types.ModuleType): + stmt = ast.Import( + names=[ast.alias(name=value.__name__, asname=node.id)]) + elif hasattr(value, '__name__'): + path = reverse_builtin_map.get(value) + if path is None: + path = value.__module__ + name = value.__name__ + stmt = ast.ImportFrom( + module=path, + names=[ast.alias(name=name, asname=node.id)], + level=0, + ) + else: + raise TypeError(value) + + self.visit(stmt) + + # Clear last import + self.flush() + + # Stich together lines + self.lines += defines + body + + def define(self, name, node): + assert node is not None + value = self.defines.get(name) + + if value is node: + pass + elif value is None: + self.defines[name] = node + else: + raise CompilationError( + "Duplicate symbol name for define.", name) + + return load(name) + + def require(self, value): + if value is None: + return load("None") + + if isinstance(value, NATIVE_NUMBERS): + return ast.Num(value) + + node = self.imports.get(value) + if node is None: + # we come up with a unique symbol based on the class name + name = "_%s" % getattr(value, '__name__', str(value)).\ + rsplit('.', 1)[-1] + node = load(name) + self.imports[value] = store(node.id) + + return node + + def visit(self, node): + annotation = node_annotations.get(node) + if annotation is None: + super(TemplateCodeGenerator, self).visit(node) + else: + self.visit(annotation) + + def visit_Comment(self, node): + if node.stmt is None: + self._new_line() + else: + self.visit(node.stmt) + + for line in node.text.replace('\r', '\n').split('\n'): + self._new_line() + self._write("%s#%s" % (node.space, line)) + + def visit_Builtin(self, node): + name = load(node.id) + self.visit(name) + + def visit_Symbol(self, node): + node = self.require(node.value) + self.visit(node) + + def visit_Static(self, node): + if node.name is None: + name = "_static_%s" % str(id(node.value)).replace('-', '_') + else: + name = node.name + + node = self.define(name, node.value) + self.visit(node) diff --git a/lib/Chameleon-2.22/src/chameleon/compiler.py b/lib/Chameleon-2.22/src/chameleon/compiler.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/compiler.py @@ -0,0 +1,1680 @@ +import re +import cgi +import sys +import itertools +import logging +import threading +import functools +import collections +import pickle +import textwrap + +from .astutil import load +from .astutil import store +from .astutil import param +from .astutil import swap +from .astutil import subscript +from .astutil import node_annotations +from .astutil import annotated +from .astutil import NameLookupRewriteVisitor +from .astutil import Comment +from .astutil import Symbol +from .astutil import Builtin +from .astutil import Static + +from .codegen import TemplateCodeGenerator +from .codegen import template + +from .tal import ErrorInfo +from .tal import NAME +from .i18n import simple_translate + +from .nodes import Text +from .nodes import Value +from .nodes import Substitution +from .nodes import Assignment +from .nodes import Module +from .nodes import Context + +from .tokenize import Token +from .config import DEBUG_MODE +from .exc import TranslationError +from .exc import ExpressionError +from .parser import groupdict + +from .utils import DebuggingOutputStream +from .utils import char2entity +from .utils import ListDictProxy +from .utils import native_string +from .utils import byte_string +from .utils import string_type +from .utils import unicode_string +from .utils import version +from .utils import ast +from .utils import safe_native +from .utils import builtins +from .utils import decode_htmlentities + + +if version >= (3, 0, 0): + long = int + +log = logging.getLogger('chameleon.compiler') + +COMPILER_INTERNALS_OR_DISALLOWED = set([ + "econtext", + "rcontext", + "str", + "int", + "float", + "long", + "len", + "None", + "True", + "False", + "RuntimeError", + ]) + + +RE_MANGLE = re.compile('[^\w_]') +RE_NAME = re.compile('^%s$' % NAME) + +if DEBUG_MODE: + LIST = template("cls()", cls=DebuggingOutputStream, mode="eval") +else: + LIST = template("[]", mode="eval") + + +def identifier(prefix, suffix=None): + return "__%s_%s" % (prefix, mangle(suffix or id(prefix))) + + +def mangle(string): + return RE_MANGLE.sub('_', str(string)).replace('\n', '').replace('-', '_') + + +def load_econtext(name): + return template("getitem(KEY)", KEY=ast.Str(s=name), mode="eval") + + +def store_econtext(name): + name = native_string(name) + return subscript(name, load("econtext"), ast.Store()) + + +def store_rcontext(name): + name = native_string(name) + return subscript(name, load("rcontext"), ast.Store()) + + +def set_error(token, exception): + try: + line, column = token.location + filename = token.filename + except AttributeError: + line, column = 0, 0 + filename = "<string>" + + string = safe_native(token) + + return template( + "rcontext.setdefault('__error__', [])." + "append((string, line, col, src, exc))", + string=ast.Str(s=string), + line=ast.Num(n=line), + col=ast.Num(n=column), + src=ast.Str(s=filename), + sys=Symbol(sys), + exc=exception, + ) + + +def try_except_wrap(stmts, token): + exception = template( + "exc_info()[1]", exc_info=Symbol(sys.exc_info), mode="eval" + ) + + body = set_error(token, exception) + template("raise") + + return ast.TryExcept( + body=stmts, + handlers=[ast.ExceptHandler(body=body)], + ) + + + at template +def emit_node(node): # pragma: no cover + __append(node) + + + at template +def emit_node_if_non_trivial(node): # pragma: no cover + if node is not None: + __append(node) + + + at template +def emit_bool(target, s, default_marker=None, + default=None): # pragma: no cover + if target is default_marker: + target = default + elif target: + target = s + else: + target = None + + + at template +def emit_convert( + target, encoded=byte_string, str=unicode_string, + long=long, type=type, + default_marker=None, default=None): # pragma: no cover + if target is None: + pass + elif target is default_marker: + target = default + else: + __tt = type(target) + + if __tt is int or __tt is float or __tt is long: + target = str(target) + elif __tt is encoded: + target = decode(target) + elif __tt is not str: + try: + target = target.__html__ + except AttributeError: + __converted = convert(target) + target = str(target) if target is __converted else __converted + else: + target = target() + + + at template +def emit_func_convert( + func, encoded=byte_string, str=unicode_string, + long=long, type=type): # pragma: no cover + def func(target): + if target is None: + return + + __tt = type(target) + + if __tt is int or __tt is float or __tt is long: + target = str(target) + + elif __tt is encoded: + target = decode(target) + + elif __tt is not str: + try: + target = target.__html__ + except AttributeError: + __converted = convert(target) + target = str(target) if target is __converted else __converted + else: + target = target() + + return target + + + at template +def emit_translate(target, msgid, default=None): # pragma: no cover + target = translate(msgid, default=default, domain=__i18n_domain, context=__i18n_context) + + + at template +def emit_func_convert_and_escape( + func, str=unicode_string, long=long, + type=type, encoded=byte_string): # pragma: no cover + + def func(target, quote, quote_entity, default, default_marker): + if target is None: + return + + if target is default_marker: + return default + + __tt = type(target) + + if __tt is int or __tt is float or __tt is long: + target = str(target) + else: + if __tt is encoded: + target = decode(target) + elif __tt is not str: + try: + target = target.__html__ + except: + __converted = convert(target) + target = str(target) if target is __converted \ + else __converted + else: + return target() + + if target is not None: + try: + escape = __re_needs_escape(target) is not None + except TypeError: + pass + else: + if escape: + # Character escape + if '&' in target: + target = target.replace('&', '&') + if '<' in target: + target = target.replace('<', '<') + if '>' in target: + target = target.replace('>', '>') + if quote is not None and quote in target: + target = target.replace(quote, quote_entity) + + return target + + +class Interpolator(object): + braces_required_regex = re.compile( + r'(?<!\\)\$({(?P<expression>.*)})', + re.DOTALL) + + braces_optional_regex = re.compile( + r'(?<!\\)\$({(?P<expression>.*)}|(?P<variable>[A-Za-z][A-Za-z0-9_]*))', + re.DOTALL) + + def __init__(self, expression, braces_required, translate=False): + self.expression = expression + self.regex = self.braces_required_regex if braces_required else \ + self.braces_optional_regex + self.translate = translate + + def __call__(self, name, engine): + """The strategy is to find possible expression strings and + call the ``validate`` function of the parser to validate. + + For every possible starting point, the longest possible + expression is tried first, then the second longest and so + forth. + + Example 1: + + ${'expressions use the ${<expression>} format'} + + The entire expression is attempted first and it is also the + only one that validates. + + Example 2: + + ${'Hello'} ${'world!'} + + Validation of the longest possible expression (the entire + string) will fail, while the second round of attempts, + ``${'Hello'}`` and ``${'world!'}`` respectively, validate. + + """ + + body = [] + nodes = [] + text = self.expression + + expr_map = {} + translate = self.translate + + while text: + matched = text + m = self.regex.search(matched) + if m is None: + nodes.append(ast.Str(s=text)) + break + + part = text[:m.start()] + text = text[m.start():] + + if part: + node = ast.Str(s=part) + nodes.append(node) + + if not body: + target = name + else: + target = store("%s_%d" % (name.id, text.pos)) + + while True: + d = groupdict(m, matched) + string = d["expression"] or d.get("variable") or "" + string = decode_htmlentities(string) + + if string: + try: + compiler = engine.parse(string) + body += compiler.assign_text(target) + except ExpressionError: + matched = matched[m.start():m.end() - 1] + m = self.regex.search(matched) + if m is None: + raise + + continue + else: + s = m.group() + assign = ast.Assign(targets=[target], value=ast.Str(s=s)) + body += [assign] + + break + + # If one or more expressions are not simple names, we + # disable translation. + if RE_NAME.match(string) is None: + translate = False + + # if this is the first expression, use the provided + # assignment name; otherwise, generate one (here based + # on the string position) + node = load(target.id) + nodes.append(node) + + expr_map[node] = safe_native(string) + + text = text[len(m.group()):] + + if len(nodes) == 1: + target = nodes[0] + + if translate and isinstance(target, ast.Str): + target = template( + "translate(msgid, domain=__i18n_domain, context=__i18n_context)", + msgid=target, mode="eval", + ) + else: + if translate: + formatting_string = "" + keys = [] + values = [] + + for node in nodes: + if isinstance(node, ast.Str): + formatting_string += node.s + else: + string = expr_map[node] + formatting_string += "${%s}" % string + keys.append(ast.Str(s=string)) + values.append(node) + + target = template( + "translate(msgid, mapping=mapping, domain=__i18n_domain, context=__i18n_context)", + msgid=ast.Str(s=formatting_string), + mapping=ast.Dict(keys=keys, values=values), + mode="eval" + ) + else: + nodes = [ + template( + "NODE if NODE is not None else ''", + NODE=node, mode="eval" + ) + for node in nodes + ] + + target = ast.BinOp( + left=ast.Str(s="%s" * len(nodes)), + op=ast.Mod(), + right=ast.Tuple(elts=nodes, ctx=ast.Load())) + + body += [ast.Assign(targets=[name], value=target)] + return body + + +class ExpressionEngine(object): + """Expression engine. + + This test demonstrates how to configure and invoke the engine. + + >>> from chameleon import tales + >>> parser = tales.ExpressionParser({ + ... 'python': tales.PythonExpr, + ... 'not': tales.NotExpr, + ... 'exists': tales.ExistsExpr, + ... 'string': tales.StringExpr, + ... }, 'python') + + >>> engine = ExpressionEngine(parser) + + An expression evaluation function: + + >>> eval = lambda expression: tales.test( + ... tales.IdentityExpr(expression), engine) + + We have provided 'python' as the default expression type. This + means that when no prefix is given, the expression is evaluated as + a Python expression: + + >>> eval('not False') + True + + Note that the ``type`` prefixes bind left. If ``not`` and + ``exits`` are two expression type prefixes, consider the + following:: + + >>> eval('not: exists: int(None)') + True + + The pipe operator binds right. In the following example, but + arguments are evaluated against ``not: exists: ``. + + >>> eval('not: exists: help') + False + + >>> eval('string:test ${1}${2}') + 'test 12' + + """ + + supported_char_escape_set = set(('&', '<', '>')) + + def __init__(self, parser, char_escape=(), + default=None, default_marker=None): + self._parser = parser + self._char_escape = char_escape + self._default = default + self._default_marker = default_marker + + def __call__(self, string, target): + # BBB: This method is deprecated. Instead, a call should first + # be made to ``parse`` and then one of the assignment methods + # ("value" or "text"). + + compiler = self.parse(string) + return compiler(string, target) + + def parse(self, string): + expression = self._parser(string) + compiler = self.get_compiler(expression, string) + return ExpressionCompiler(compiler, self) + + def get_compiler(self, expression, string): + def compiler(target, engine, result_type=None, *args): + stmts = expression(target, engine) + + if result_type is not None: + method = getattr(self, '_convert_%s' % result_type) + steps = method(target, *args) + stmts.extend(steps) + + return [try_except_wrap(stmts, string)] + + return compiler + + def _convert_bool(self, target, s): + """Converts value given by ``target`` to a string ``s`` if the + target is a true value, otherwise ``None``. + """ + + return emit_bool( + target, ast.Str(s=s), + default=self._default, + default_marker=self._default_marker + ) + + def _convert_text(self, target): + """Converts value given by ``target`` to text.""" + + if self._char_escape: + # This is a cop-out - we really only support a very select + # set of escape characters + other = set(self._char_escape) - self.supported_char_escape_set + + if other: + for supported in '"', '\'', '': + if supported in self._char_escape: + quote = supported + break + else: + raise RuntimeError( + "Unsupported escape set: %s." % repr(self._char_escape) + ) + else: + quote = '\0' + + entity = char2entity(quote or '\0') + + return template( + "TARGET = __quote(TARGET, QUOTE, Q_ENTITY, DEFAULT, MARKER)", + TARGET=target, + QUOTE=ast.Str(s=quote), + Q_ENTITY=ast.Str(s=entity), + DEFAULT=self._default, + MARKER=self._default_marker, + ) + + return emit_convert( + target, + default=self._default, + default_marker=self._default_marker, + ) + + +class ExpressionCompiler(object): + def __init__(self, compiler, engine): + self.compiler = compiler + self.engine = engine + + def assign_bool(self, target, s): + return self.compiler(target, self.engine, "bool", s) + + def assign_text(self, target): + return self.compiler(target, self.engine, "text") + + def assign_value(self, target): + return self.compiler(target, self.engine) + + +class ExpressionEvaluator(object): + """Evaluates dynamic expression. + + This is not particularly efficient, but supported for legacy + applications. + + >>> from chameleon import tales + >>> parser = tales.ExpressionParser({'python': tales.PythonExpr}, 'python') + >>> engine = functools.partial(ExpressionEngine, parser) + + >>> evaluate = ExpressionEvaluator(engine, { + ... 'foo': 'bar', + ... }) + + The evaluation function is passed the local and remote context, + the expression type and finally the expression. + + >>> evaluate({'boo': 'baz'}, {}, 'python', 'foo + boo') + 'barbaz' + + The cache is now primed: + + >>> evaluate({'boo': 'baz'}, {}, 'python', 'foo + boo') + 'barbaz' + + Note that the call method supports currying of the expression + argument: + + >>> python = evaluate({'boo': 'baz'}, {}, 'python') + >>> python('foo + boo') + 'barbaz' + + """ + + __slots__ = "_engine", "_cache", "_names", "_builtins" + + def __init__(self, engine, builtins): + self._engine = engine + self._names, self._builtins = zip(*builtins.items()) + self._cache = {} + + def __call__(self, econtext, rcontext, expression_type, string=None): + if string is None: + return functools.partial( + self.__call__, econtext, rcontext, expression_type + ) + + expression = "%s:%s" % (expression_type, string) + + try: + evaluate = self._cache[expression] + except KeyError: + assignment = Assignment(["_result"], expression, True) + module = Module("evaluate", Context(assignment)) + + compiler = Compiler( + self._engine, module, ('econtext', 'rcontext') + self._names + ) + + env = {} + exec(compiler.code, env) + evaluate = self._cache[expression] = env["evaluate"] + + evaluate(econtext, rcontext, *self._builtins) + return econtext['_result'] + + +class NameTransform(object): + """ + >>> nt = NameTransform( + ... set(('foo', 'bar', )), {'boo': 'boz'}, + ... ('econtext', ), + ... ) + + >>> def test(node): + ... rewritten = nt(node) + ... module = ast.Module([ast.fix_missing_locations(rewritten)]) + ... codegen = TemplateCodeGenerator(module) + ... return codegen.code + + Any odd name: + + >>> test(load('frobnitz')) + "getitem('frobnitz')" + + A 'builtin' name will first be looked up via ``get`` allowing fall + back to the global builtin value: + + >>> test(load('foo')) + "get('foo', foo)" + + Internal names (with two leading underscores) are left alone: + + >>> test(load('__internal')) + '__internal' + + Compiler internals or disallowed names: + + >>> test(load('econtext')) + 'econtext' + + Aliased names: + + >>> test(load('boo')) + 'boz' + + """ + + def __init__(self, builtins, aliases, internals): + self.builtins = builtins + self.aliases = aliases + self.internals = internals + + def __call__(self, node): + name = node.id + + # Don't rewrite names that begin with an underscore; they are + # internal and can be assumed to be locally defined. This + # policy really should be part of the template program, not + # defined here in the compiler. + if name.startswith('__') or name in self.internals: + return node + + if isinstance(node.ctx, ast.Store): + return store_econtext(name) + + aliased = self.aliases.get(name) + if aliased is not None: + return load(aliased) + + # If the name is a Python global, first try acquiring it from + # the dynamic context, then fall back to the global. + if name in self.builtins: + return template( + "get(key, name)", + mode="eval", + key=ast.Str(s=name), + name=load(name), + ) + + # Otherwise, simply acquire it from the dynamic context. + return load_econtext(name) + + +class ExpressionTransform(object): + """Internal wrapper to transform expression nodes into assignment + statements. + + The node input may use the provided expression engine, but other + expression node types are supported such as ``Builtin`` which + simply resolves a built-in name. + + Used internally be the compiler. + """ + + loads_symbol = Symbol(pickle.loads) + + def __init__(self, engine_factory, cache, visitor, strict=True): + self.engine_factory = engine_factory + self.cache = cache + self.strict = strict + self.visitor = visitor + + def __call__(self, expression, target): + if isinstance(target, string_type): + target = store(target) + + try: + stmts = self.translate(expression, target) + except ExpressionError: + if self.strict: + raise + + exc = sys.exc_info()[1] + p = pickle.dumps(exc, -1) + + stmts = template( + "__exc = loads(p)", loads=self.loads_symbol, p=ast.Str(s=p) + ) + + token = Token(exc.token, exc.offset, filename=exc.filename) + + stmts += set_error(token, load("__exc")) + stmts += [ast.Raise(exc=load("__exc"))] + + # Apply visitor to each statement + for stmt in stmts: + self.visitor(stmt) + + return stmts + + def translate(self, expression, target): + if isinstance(target, string_type): + target = store(target) + + cached = self.cache.get(expression) + + if cached is not None: + stmts = [ast.Assign(targets=[target], value=cached)] + elif isinstance(expression, ast.expr): + stmts = [ast.Assign(targets=[target], value=expression)] + else: + # The engine interface supports simple strings, which + # default to expression nodes + if isinstance(expression, string_type): + expression = Value(expression, True) + + kind = type(expression).__name__ + visitor = getattr(self, "visit_%s" % kind) + stmts = visitor(expression, target) + + # Add comment + target_id = getattr(target, "id", target) + comment = Comment(" %r -> %s" % (expression, target_id)) + stmts.insert(0, comment) + + return stmts + + def visit_Value(self, node, target): + engine = self.engine_factory() + compiler = engine.parse(node.value) + return compiler.assign_value(target) + + def visit_Copy(self, node, target): + return self.translate(node.expression, target) + + def visit_Default(self, node, target): + value = annotated(node.marker) + return [ast.Assign(targets=[target], value=value)] + + def visit_Substitution(self, node, target): + engine = self.engine_factory( + char_escape=node.char_escape, + default=node.default, + ) + compiler = engine.parse(node.value) + return compiler.assign_text(target) + + def visit_Negate(self, node, target): + return self.translate(node.value, target) + \ + template("TARGET = not TARGET", TARGET=target) + + def visit_Identity(self, node, target): + expression = self.translate(node.expression, "__expression") + value = self.translate(node.value, "__value") + + return expression + value + \ + template("TARGET = __expression is __value", TARGET=target) + + def visit_Equality(self, node, target): + expression = self.translate(node.expression, "__expression") + value = self.translate(node.value, "__value") + + return expression + value + \ + template("TARGET = __expression == __value", TARGET=target) + + def visit_Boolean(self, node, target): + engine = self.engine_factory() + compiler = engine.parse(node.value) + return compiler.assign_bool(target, node.s) + + def visit_Interpolation(self, node, target): + expr = node.value + if isinstance(expr, Substitution): + engine = self.engine_factory( + char_escape=expr.char_escape, + default=expr.default, + ) + elif isinstance(expr, Value): + engine = self.engine_factory() + else: + raise RuntimeError("Bad value: %r." % node.value) + + interpolator = Interpolator( + expr.value, node.braces_required, node.translation + ) + + compiler = engine.get_compiler(interpolator, expr.value) + return compiler(target, engine) + + def visit_Translate(self, node, target): + if node.msgid is not None: + msgid = ast.Str(s=node.msgid) + else: + msgid = target + return self.translate(node.node, target) + \ + emit_translate(target, msgid, default=target) + + def visit_Static(self, node, target): + value = annotated(node) + return [ast.Assign(targets=[target], value=value)] + + def visit_Builtin(self, node, target): + value = annotated(node) + return [ast.Assign(targets=[target], value=value)] + + +class Compiler(object): + """Generic compiler class. + + Iterates through nodes and yields Python statements which form a + template program. + """ + + exceptions = NameError, \ + ValueError, \ + AttributeError, \ + LookupError, \ + TypeError + + defaults = { + 'translate': Symbol(simple_translate), + 'decode': Builtin("str"), + 'convert': Builtin("str"), + } + + lock = threading.Lock() + + global_builtins = set(builtins.__dict__) + + def __init__(self, engine_factory, node, builtins={}, strict=True): + self._scopes = [set()] + self._expression_cache = {} + self._translations = [] + self._builtins = builtins + self._aliases = [{}] + self._macros = [] + self._current_slot = [] + + internals = COMPILER_INTERNALS_OR_DISALLOWED | \ + set(self.defaults) + + transform = NameTransform( + self.global_builtins | set(builtins), + ListDictProxy(self._aliases), + internals, + ) + + self._visitor = visitor = NameLookupRewriteVisitor(transform) + + self._engine = ExpressionTransform( + engine_factory, + self._expression_cache, + visitor, + strict=strict, + ) + + if isinstance(node_annotations, dict): + self.lock.acquire() + backup = node_annotations.copy() + else: + backup = None + + try: + module = ast.Module([]) + module.body += self.visit(node) + ast.fix_missing_locations(module) + generator = TemplateCodeGenerator(module) + finally: + if backup is not None: + node_annotations.clear() + node_annotations.update(backup) + self.lock.release() + + self.code = generator.code + + def visit(self, node): + if node is None: + return () + kind = type(node).__name__ + visitor = getattr(self, "visit_%s" % kind) + iterator = visitor(node) + return list(iterator) + + def visit_Sequence(self, node): + for item in node.items: + for stmt in self.visit(item): + yield stmt + + def visit_Element(self, node): + for stmt in self.visit(node.start): + yield stmt + + for stmt in self.visit(node.content): + yield stmt + + if node.end is not None: + for stmt in self.visit(node.end): + yield stmt + + def visit_Module(self, node): + body = [] + + body += template("import re") + body += template("import functools") + body += template("from itertools import chain as __chain") + body += template("__marker = object()") + body += template( + r"g_re_amp = re.compile(r'&(?!([A-Za-z]+|#[0-9]+);)')" + ) + body += template( + r"g_re_needs_escape = re.compile(r'[&<>\"\']').search") + + body += template( + r"__re_whitespace = " + r"functools.partial(re.compile('\s+').sub, ' ')", + ) + + # Visit module content + program = self.visit(node.program) + + body += [ast.FunctionDef( + name=node.name, args=ast.arguments( + args=[param(b) for b in self._builtins], + defaults=(), + ), + body=program + )] + + return body + + def visit_MacroProgram(self, node): + functions = [] + + # Visit defined macros + macros = getattr(node, "macros", ()) + names = [] + for macro in macros: + stmts = self.visit(macro) + function = stmts[-1] + names.append(function.name) + functions += stmts + + # Return function dictionary + functions += [ast.Return(value=ast.Dict( + keys=[ast.Str(s=name) for name in names], + values=[load(name) for name in names], + ))] + + return functions + + def visit_Context(self, node): + return template("getitem = econtext.__getitem__") + \ + template("get = econtext.get") + \ + self.visit(node.node) + + def visit_Macro(self, node): + body = [] + + # Initialization + body += template("__append = __stream.append") + body += template("__re_amp = g_re_amp") + body += template("__re_needs_escape = g_re_needs_escape") + + body += emit_func_convert("__convert") + body += emit_func_convert_and_escape("__quote") + + # Resolve defaults + for name in self.defaults: + body += template( + "NAME = econtext[KEY]", + NAME=name, KEY=ast.Str(s="__" + name) + ) + + # Internal set of defined slots + self._slots = set() + + # Visit macro body + nodes = itertools.chain(*tuple(map(self.visit, node.body))) + + # Slot resolution + for name in self._slots: + body += template( + "try: NAME = econtext[KEY].pop()\n" + "except: NAME = None", + KEY=ast.Str(s=name), NAME=store(name)) + + # Append visited nodes + body += nodes + + function_name = "render" if node.name is None else \ + "render_%s" % mangle(node.name) + + function = ast.FunctionDef( + name=function_name, args=ast.arguments( + args=[ + param("__stream"), + param("econtext"), + param("rcontext"), + param("__i18n_domain"), + param("__i18n_context"), + ], + defaults=[load("None"), load("None")], + ), + body=body + ) + + yield function + + def visit_Text(self, node): + return emit_node(ast.Str(s=node.value)) + + def visit_Domain(self, node): + backup = "__previous_i18n_domain_%s" % mangle(id(node)) + return template("BACKUP = __i18n_domain", BACKUP=backup) + \ + template("__i18n_domain = NAME", NAME=ast.Str(s=node.name)) + \ + self.visit(node.node) + \ + template("__i18n_domain = BACKUP", BACKUP=backup) + + def visit_TxContext(self, node): + backup = "__previous_i18n_context_%s" % mangle(id(node)) + return template("BACKUP = __i18n_context", BACKUP=backup) + \ + template("__i18n_context = NAME", NAME=ast.Str(s=node.name)) + \ + self.visit(node.node) + \ + template("__i18n_context = BACKUP", BACKUP=backup) + + def visit_OnError(self, node): + body = [] + + fallback = identifier("__fallback") + body += template("fallback = len(__stream)", fallback=fallback) + + self._enter_assignment((node.name, )) + fallback_body = self.visit(node.fallback) + self._leave_assignment((node.name, )) + + error_assignment = template( + "econtext[key] = cls(__exc, rcontext['__error__'][-1][1:3])", + cls=ErrorInfo, + key=ast.Str(s=node.name), + ) + + body += [ast.TryExcept( + body=self.visit(node.node), + handlers=[ast.ExceptHandler( + type=ast.Tuple(elts=[Builtin("Exception")], ctx=ast.Load()), + name=store("__exc"), + body=(error_assignment + \ + template("del __stream[fallback:]", fallback=fallback) + \ + fallback_body + ), + )] + )] + + return body + + def visit_Content(self, node): + name = "__content" + body = self._engine(node.expression, store(name)) + + if node.translate: + body += emit_translate(name, name) + + if node.char_escape: + body += template( + "NAME=__quote(NAME, None, '\255', None, None)", + NAME=name, + ) + else: + body += template("NAME = __convert(NAME)", NAME=name) + + body += template("if NAME is not None: __append(NAME)", NAME=name) + + return body + + def visit_Interpolation(self, node): + name = identifier("content") + return self._engine(node, name) + \ + emit_node_if_non_trivial(name) + + def visit_Alias(self, node): + assert len(node.names) == 1 + name = node.names[0] + target = self._aliases[-1][name] = identifier(name, id(node)) + return self._engine(node.expression, target) + + def visit_Assignment(self, node): + for name in node.names: + if name in COMPILER_INTERNALS_OR_DISALLOWED: + raise TranslationError( + "Name disallowed by compiler.", name + ) + + if name.startswith('__'): + raise TranslationError( + "Name disallowed by compiler (double underscore).", + name + ) + + assignment = self._engine(node.expression, store("__value")) + + if len(node.names) != 1: + target = ast.Tuple( + elts=[store_econtext(name) for name in node.names], + ctx=ast.Store(), + ) + else: + target = store_econtext(node.names[0]) + + assignment.append(ast.Assign(targets=[target], value=load("__value"))) + + for name in node.names: + if not node.local: + assignment += template( + "rcontext[KEY] = __value", KEY=ast.Str(s=native_string(name)) + ) + + return assignment + + def visit_Define(self, node): + scope = set(self._scopes[-1]) + self._scopes.append(scope) + self._aliases.append(self._aliases[-1].copy()) + + for assignment in node.assignments: + if assignment.local: + for stmt in self._enter_assignment(assignment.names): + yield stmt + + for stmt in self.visit(assignment): + yield stmt + + for stmt in self.visit(node.node): + yield stmt + + for assignment in node.assignments: + if assignment.local: + for stmt in self._leave_assignment(assignment.names): + yield stmt + + self._scopes.pop() + self._aliases.pop() + + def visit_Omit(self, node): + return self.visit_Condition(node) + + def visit_Condition(self, node): + target = "__condition" + assignment = self._engine(node.expression, target) + + assert assignment + + for stmt in assignment: + yield stmt + + body = self.visit(node.node) or [ast.Pass()] + + orelse = getattr(node, "orelse", None) + if orelse is not None: + orelse = self.visit(orelse) + + test = load(target) + + yield ast.If(test=test, body=body, orelse=orelse) + + def visit_Translate(self, node): + """Translation. + + Visit items and assign output to a default value. + + Finally, compile a translation expression and use either + result or default. + """ + + body = [] + + # Track the blocks of this translation + self._translations.append(set()) + + # Prepare new stream + append = identifier("append", id(node)) + stream = identifier("stream", id(node)) + body += template("s = new_list", s=stream, new_list=LIST) + \ + template("a = s.append", a=append, s=stream) + + # Visit body to generate the message body + code = self.visit(node.node) + swap(ast.Suite(body=code), load(append), "__append") + body += code + + # Reduce white space and assign as message id + msgid = identifier("msgid", id(node)) + body += template( + "msgid = __re_whitespace(''.join(stream)).strip()", + msgid=msgid, stream=stream + ) + + default = msgid + + # Compute translation block mapping if applicable + names = self._translations[-1] + if names: + keys = [] + values = [] + + for name in names: + stream, append = self._get_translation_identifiers(name) + keys.append(ast.Str(s=name)) + values.append(load(stream)) + + # Initialize value + body.insert( + 0, ast.Assign( + targets=[store(stream)], + value=ast.Str(s=native_string("")))) + + mapping = ast.Dict(keys=keys, values=values) + else: + mapping = None + + # if this translation node has a name, use it as the message id + if node.msgid: + msgid = ast.Str(s=node.msgid) + + # emit the translation expression + body += template( + "if msgid: __append(translate(" + "msgid, mapping=mapping, default=default, domain=__i18n_domain, context=__i18n_context))", + msgid=msgid, default=default, mapping=mapping + ) + + # pop away translation block reference + self._translations.pop() + + return body + + def visit_Start(self, node): + try: + line, column = node.prefix.location + except AttributeError: + line, column = 0, 0 + + yield Comment( + " %s%s ... (%d:%d)\n" + " --------------------------------------------------------" % ( + node.prefix, node.name, line, column)) + + if node.attributes: + for stmt in emit_node(ast.Str(s=node.prefix + node.name)): + yield stmt + + for stmt in self.visit(node.attributes): + yield stmt + + for stmt in emit_node(ast.Str(s=node.suffix)): + yield stmt + else: + for stmt in emit_node( + ast.Str(s=node.prefix + node.name + node.suffix)): + yield stmt + + def visit_End(self, node): + for stmt in emit_node(ast.Str( + s=node.prefix + node.name + node.space + node.suffix)): + yield stmt + + def visit_Attribute(self, node): + attr_format = (node.space + node.name + node.eq + + node.quote + "%s" + node.quote) + + filter_args = list(map(self._engine.cache.get, node.filters)) + + filter_condition = template( + "NAME not in CHAIN", + NAME=ast.Str(s=node.name), + CHAIN=ast.Call( + func=load("__chain"), + args=filter_args, + keywords=[], + starargs=None, + kwargs=None, + ), + mode="eval" + ) + + # Static attributes are just outputted directly + if isinstance(node.expression, ast.Str): + s = attr_format % node.expression.s + if node.filters: + return template( + "if C: __append(S)", C=filter_condition, S=ast.Str(s=s) + ) + else: + return template("__append(S)", S=ast.Str(s=s)) + + target = identifier("attr", node.name) + body = self._engine(node.expression, store(target)) + + condition = template("TARGET is not None", TARGET=target, mode="eval") + + if node.filters: + condition = ast.BoolOp( + values=[condition, filter_condition], + op=ast.And(), + ) + + return body + template( + "if CONDITION: __append(FORMAT % TARGET)", + FORMAT=ast.Str(s=attr_format), + TARGET=target, + CONDITION=condition, + ) + + def visit_DictAttributes(self, node): + target = identifier("attr", id(node)) + body = self._engine(node.expression, store(target)) + + exclude = Static(template( + "set(LIST)", LIST=ast.List( + elts=[ast.Str(s=name) for name in node.exclude], + ctx=ast.Load(), + ), mode="eval" + )) + + body += template( + "for name, value in TARGET.items():\n " + "if name not in EXCLUDE and value is not None: __append(" + "' ' + name + '=' + QUOTE + " + "QUOTE_FUNC(value, QUOTE, QUOTE_ENTITY, None, None) + QUOTE" + ")", + TARGET=target, + EXCLUDE=exclude, + QUOTE_FUNC="__quote", + QUOTE=ast.Str(s=node.quote), + QUOTE_ENTITY=ast.Str(s=char2entity(node.quote or '\0')), + ) + + return body + + def visit_Cache(self, node): + body = [] + + for expression in node.expressions: + name = identifier("cache", id(expression)) + target = store(name) + + # Skip re-evaluation + if self._expression_cache.get(expression): + continue + + body += self._engine(expression, target) + self._expression_cache[expression] = target + + body += self.visit(node.node) + + return body + + def visit_Cancel(self, node): + body = [] + + for expression in node.expressions: + name = identifier("cache", id(expression)) + target = store(name) + + if not self._expression_cache.get(expression): + continue + + body.append(ast.Assign([target], load("None"))) + + body += self.visit(node.node) + + return body + + def visit_UseInternalMacro(self, node): + if node.name is None: + render = "render" + else: + render = "render_%s" % mangle(node.name) + + return template( + "f(__stream, econtext.copy(), rcontext, __i18n_domain)", + f=render) + \ + template("econtext.update(rcontext)") + + def visit_DefineSlot(self, node): + name = "__slot_%s" % mangle(node.name) + body = self.visit(node.node) + + self._slots.add(name) + + orelse = template( + "SLOT(__stream, econtext.copy(), rcontext)", + SLOT=name) + test = ast.Compare( + left=load(name), + ops=[ast.Is()], + comparators=[load("None")] + ) + + return [ + ast.If(test=test, body=body or [ast.Pass()], orelse=orelse) + ] + + def visit_Name(self, node): + """Translation name.""" + + if not self._translations: + raise TranslationError( + "Not allowed outside of translation.", node.name) + + if node.name in self._translations[-1]: + raise TranslationError( + "Duplicate translation name: %s.", node.name) + + self._translations[-1].add(node.name) + body = [] + + # prepare new stream + stream, append = self._get_translation_identifiers(node.name) + body += template("s = new_list", s=stream, new_list=LIST) + \ + template("a = s.append", a=append, s=stream) + + # generate code + code = self.visit(node.node) + swap(ast.Suite(body=code), load(append), "__append") + body += code + + # output msgid + text = Text('${%s}' % node.name) + body += self.visit(text) + + # Concatenate stream + body += template("stream = ''.join(stream)", stream=stream) + + return body + + def visit_CodeBlock(self, node): + stmts = template(textwrap.dedent(node.source.strip('\n'))) + + for stmt in stmts: + self._visitor(stmt) + + return [try_except_wrap(stmts, node.source)] + + def visit_UseExternalMacro(self, node): + self._macros.append(node.extend) + + callbacks = [] + for slot in node.slots: + key = "__slot_%s" % mangle(slot.name) + fun = "__fill_%s" % mangle(slot.name) + + self._current_slot.append(slot.name) + + body = template("getitem = econtext.__getitem__") + \ + template("get = econtext.get") + \ + self.visit(slot.node) + + assert self._current_slot.pop() == slot.name + + callbacks.append( + ast.FunctionDef( + name=fun, + args=ast.arguments( + args=[ + param("__stream"), + param("econtext"), + param("rcontext"), + param("__i18n_domain"), + param("__i18n_context"), + ], + defaults=[load("__i18n_domain"), load("__i18n_context")], + ), + body=body or [ast.Pass()], + )) + + key = ast.Str(s=key) + + assignment = template( + "_slots = econtext[KEY] = DEQUE((NAME,))", + KEY=key, NAME=fun, DEQUE=Symbol(collections.deque), + ) + + if node.extend: + append = template("_slots.appendleft(NAME)", NAME=fun) + + assignment = [ast.TryExcept( + body=template("_slots = getitem(KEY)", KEY=key), + handlers=[ast.ExceptHandler(body=assignment)], + orelse=append, + )] + + callbacks.extend(assignment) + + assert self._macros.pop() == node.extend + + assignment = self._engine(node.expression, store("__macro")) + + return ( + callbacks + \ + assignment + \ + template( + "__macro.include(__stream, econtext.copy(), " \ + "rcontext, __i18n_domain)") + \ + template("econtext.update(rcontext)") + ) + + def visit_Repeat(self, node): + # Used for loop variable definition and restore + self._scopes.append(set()) + + # Variable assignment and repeat key for single- and + # multi-variable repeat clause + if node.local: + contexts = "econtext", + else: + contexts = "econtext", "rcontext" + + for name in node.names: + if name in COMPILER_INTERNALS_OR_DISALLOWED: + raise TranslationError( + "Name disallowed by compiler.", name + ) + + if len(node.names) > 1: + targets = [ + ast.Tuple(elts=[ + subscript(native_string(name), load(context), ast.Store()) + for name in node.names], ctx=ast.Store()) + for context in contexts + ] + + key = ast.Tuple( + elts=[ast.Str(s=name) for name in node.names], + ctx=ast.Load()) + else: + name = node.names[0] + targets = [ + subscript(native_string(name), load(context), ast.Store()) + for context in contexts + ] + + key = ast.Str(s=node.names[0]) + + index = identifier("__index", id(node)) + assignment = [ast.Assign(targets=targets, value=load("__item"))] + + # Make repeat assignment in outer loop + names = node.names + local = node.local + + outer = self._engine(node.expression, store("__iterator")) + + if local: + outer[:] = list(self._enter_assignment(names)) + outer + + outer += template( + "__iterator, INDEX = getitem('repeat')(key, __iterator)", + key=key, INDEX=index + ) + + # Set a trivial default value for each name assigned to make + # sure we assign a value even if the iteration is empty + outer += [ast.Assign( + targets=[store_econtext(name) + for name in node.names], + value=load("None")) + ] + + # Compute inner body + inner = self.visit(node.node) + + # After each iteration, decrease the index + inner += template("index -= 1", index=index) + + # For items up to N - 1, emit repeat whitespace + inner += template( + "if INDEX > 0: __append(WHITESPACE)", + INDEX=index, WHITESPACE=ast.Str(s=node.whitespace) + ) + + # Main repeat loop + outer += [ast.For( + target=store("__item"), + iter=load("__iterator"), + body=assignment + inner, + )] + + # Finally, clean up assignment if it's local + if outer: + outer += self._leave_assignment(names) + + self._scopes.pop() + + return outer + + def _get_translation_identifiers(self, name): + assert self._translations + prefix = str(id(self._translations[-1])).replace('-', '_') + stream = identifier("stream_%s" % prefix, name) + append = identifier("append_%s" % prefix, name) + return stream, append + + def _enter_assignment(self, names): + for name in names: + for stmt in template( + "BACKUP = get(KEY, __marker)", + BACKUP=identifier("backup_%s" % name, id(names)), + KEY=ast.Str(s=native_string(name)), + ): + yield stmt + + def _leave_assignment(self, names): + for name in names: + for stmt in template( + "if BACKUP is __marker: del econtext[KEY]\n" + "else: econtext[KEY] = BACKUP", + BACKUP=identifier("backup_%s" % name, id(names)), + KEY=ast.Str(s=native_string(name)), + ): + yield stmt diff --git a/lib/Chameleon-2.22/src/chameleon/config.py b/lib/Chameleon-2.22/src/chameleon/config.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/config.py @@ -0,0 +1,55 @@ +import os +import logging + +log = logging.getLogger('chameleon.config') +environment = dict( + (k[10:], v) for (k, v) in ( + ((j.lower(), x) for (j, x) in os.environ.items())) + if k.startswith('chameleon_') +) + +# Define which values are read as true +TRUE = ('y', 'yes', 't', 'true', 'on', '1') + +# If eager parsing is enabled, templates are parsed upon +# instantiation, rather than when first called upon; this mode is +# useful for verifying validity of templates across a project +EAGER_PARSING = environment.pop('eager', 'false') +EAGER_PARSING = EAGER_PARSING.lower() in TRUE + +# Debug mode is mostly useful for debugging the template engine +# itself. When enabled, generated source code is written to disk to +# ease step-debugging and some log levels are lowered to increase +# output. Also, the generated source code is available in the +# ``source`` attribute of the template instance if compilation +# succeeded. +DEBUG_MODE = environment.pop('debug', 'false') +DEBUG_MODE = DEBUG_MODE.lower() in TRUE + +# If a cache directory is specified, template source code will be +# persisted on disk and reloaded between sessions +path = environment.pop('cache', None) +if path is not None: + CACHE_DIRECTORY = os.path.abspath(path) + if not os.path.exists(CACHE_DIRECTORY): + raise ValueError( + "Cache directory does not exist: %s." % CACHE_DIRECTORY + ) + log.info("directory cache: %s." % CACHE_DIRECTORY) +else: + CACHE_DIRECTORY = None + +# When auto-reload is enabled, templates are reloaded on file change. +AUTO_RELOAD = environment.pop('reload', 'false') +AUTO_RELOAD = AUTO_RELOAD.lower() in TRUE + +for key in environment: + log.warning( + "unknown environment variable set: \"CHAMELEON_%s\"." % key.upper() + ) + +# This is the slice length of the expression displayed in the +# formatted exception string +SOURCE_EXPRESSION_MARKER_LENGTH = 60 + + diff --git a/lib/Chameleon-2.22/src/chameleon/exc.py b/lib/Chameleon-2.22/src/chameleon/exc.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/exc.py @@ -0,0 +1,299 @@ +# -*- coding: utf-8 -*- + +import traceback + +from .utils import format_kwargs +from .utils import safe_native +from .tokenize import Token +from .config import SOURCE_EXPRESSION_MARKER_LENGTH as LENGTH + + +def compute_source_marker(line, column, expression, size): + """Computes source marker location string. + + >>> def test(l, c, e, s): + ... s, marker = compute_source_marker(l, c, e, s) + ... out = s + '\\n' + marker + ... + ... # Replace dot with middle-dot to work around doctest ellipsis + ... print(out.replace('...', '??????')) + + >>> test('foo bar', 4, 'bar', 7) + foo bar + ^^^ + + >>> test('foo ${bar}', 4, 'bar', 10) + foo ${bar} + ^^^ + + >>> test(' foo bar', 6, 'bar', 6) + ?????? oo bar + ^^^ + + >>> test(' foo bar baz ', 6, 'bar', 6) + ?????? o bar ?????? + ^^^ + + The entire expression is always shown, even if ``size`` does not + accomodate for it. + + >>> test(' foo bar baz ', 6, 'bar baz', 10) + ?????? oo bar baz + ^^^^^^^ + + >>> test(' foo bar', 10, 'bar', 5) + ?????? o bar + ^^^ + + >>> test(' foo bar', 10, 'boo', 5) + ?????? o bar + ^ + + """ + + s = line.lstrip() + column -= len(line) - len(s) + s = s.rstrip() + + try: + i = s[column:].index(expression) + except ValueError: + # If we can't find the expression + # (this shouldn't happen), simply + # use a standard size marker + marker = "^" + else: + column += i + marker = "^" * len(expression) + + if len(expression) > size: + offset = column + size = len(expression) + else: + window = (size - len(expression)) / 2.0 + offset = column - window + offset -= min(3, max(0, column + window + len(expression) - len(s))) + offset = int(offset) + + if offset > 0: + s = s[offset:] + r = s.lstrip() + d = len(s) - len(r) + s = "... " + r + column += 4 - d + column -= offset + + # This also adds to the displayed length + size += 4 + + if len(s) > size: + s = s[:size].rstrip() + " ..." + + return s, column * " " + marker + + +def ellipsify(string, limit): + if len(string) > limit: + return "... " + string[-(limit - 4):] + + return string + + +class RenderError(Exception): + """An error raised during rendering. + + This class is used as a mixin which is added to the original + exception. + """ + + +class TemplateError(Exception): + """An error raised by Chameleon. + + >>> from chameleon.tokenize import Token + >>> token = Token('token') + >>> message = 'message' + + Make sure the exceptions can be copied: + + >>> from copy import copy + >>> copy(TemplateError(message, token)) + TemplateError('message', 'token') + + And pickle/unpickled: + + >>> from pickle import dumps, loads + >>> loads(dumps(TemplateError(message, token), -1)) + TemplateError('message', 'token') + + """ + + def __init__(self, msg, token): + if not isinstance(token, Token): + token = Token(token, 0) + + Exception.__init__(self, msg, token) + + def __copy__(self): + inst = Exception.__new__(type(self)) + inst.args = self.args + return inst + + def __str__(self): + text = "%s\n\n" % self.args[0] + text += " - String: \"%s\"" % safe_native(self.token) + + if self.filename: + text += "\n" + text += " - Filename: %s" % self.filename + + line, column = self.location + text += "\n" + text += " - Location: (line %d: col %d)" % (line, column) + + return text + + def __repr__(self): + try: + return "%s('%s', '%s')" % ( + self.__class__.__name__, self.args[0], safe_native(self.token) + ) + except AttributeError: + return object.__repr__(self) + + @property + def token(self): + return self.args[1] + + @property + def filename(self): + return self.token.filename + + @property + def location(self): + return self.token.location + + @property + def offset(self): + return getattr(self.token, "pos", 0) + + +class ParseError(TemplateError): + """An error occurred during parsing. + + Indicates an error on the structural level. + """ + + +class CompilationError(TemplateError): + """An error occurred during compilation. + + Indicates a general compilation error. + """ + + +class TranslationError(TemplateError): + """An error occurred during translation. + + Indicates a general translation error. + """ + + +class LanguageError(CompilationError): + """Language syntax error. + + Indicates a syntactical error due to incorrect usage of the + template language. + """ + + +class ExpressionError(LanguageError): + """An error occurred compiling an expression. + + Indicates a syntactical error in an expression. + """ + + +class ExceptionFormatter(object): + def __init__(self, errors, econtext, rcontext): + kwargs = rcontext.copy() + kwargs.update(econtext) + + for name in tuple(kwargs): + if name.startswith('__'): + del kwargs[name] + + self._errors = errors + self._kwargs = kwargs + + def __call__(self): + # Format keyword arguments; consecutive arguments are indented + # for readability + try: + formatted = format_kwargs(self._kwargs) + except: + # the ``pprint.pformat`` method calls the representation + # method of the arguments; this may fail and since we're + # already in an exception handler, there's no point in + # pursuing this further + formatted = () + + for index, string in enumerate(formatted[1:]): + formatted[index + 1] = " " * 15 + string + + out = [] + seen = set() + + for error in reversed(self._errors): + expression, line, column, filename, exc = error + + if exc in seen: + continue + + seen.add(exc) + + if isinstance(exc, UnicodeDecodeError): + string = safe_native(exc.object) + + s, marker = compute_source_marker( + string, exc.start, string[exc.start:exc.end], LENGTH + ) + + out.append(" - Stream: %s" % s) + out.append(" %s" % marker) + + _filename = ellipsify(filename, 60) if filename else "<string>" + + out.append(" - Expression: \"%s\"" % expression) + out.append(" - Filename: %s" % _filename) + out.append(" - Location: (line %d: col %d)" % (line, column)) + + if filename and line and column: + try: + f = open(filename, 'r') + except IOError: + pass + else: + try: + # Pick out source line and format marker + for i, l in enumerate(f): + if i + 1 == line: + s, marker = compute_source_marker( + l, column, expression, LENGTH + ) + + out.append(" - Source: %s" % s) + out.append(" %s" % marker) + break + finally: + f.close() + + out.append(" - Arguments: %s" % "\n".join(formatted)) + + formatted = traceback.format_exception_only(type(exc), exc)[-1] + formatted_class = "%s:" % type(exc).__name__ + + if formatted.startswith(formatted_class): + formatted = formatted[len(formatted_class):].lstrip() + + return "\n".join(map(safe_native, [formatted] + out)) diff --git a/lib/Chameleon-2.22/src/chameleon/i18n.py b/lib/Chameleon-2.22/src/chameleon/i18n.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/i18n.py @@ -0,0 +1,129 @@ +############################################################################## +# +# Copyright (c) 2001, 2002 Zope Foundation and Contributors. +# All Rights Reserved. +# +# This software is subject to the provisions of the Zope Public License, +# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. +# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED +# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS +# FOR A PARTICULAR PURPOSE. +# +############################################################################## + +import re + +from .exc import CompilationError +from .utils import unicode_string + +NAME_RE = r"[a-zA-Z][-a-zA-Z0-9_]*" + +WHITELIST = frozenset([ + "translate", + "domain", + "context", + "target", + "source", + "attributes", + "data", + "name", + "mode", + "xmlns", + "xml", + "comment", + ]) + +_interp_regex = re.compile(r'(?<!\$)(\$(?:(%(n)s)|{(%(n)s)}))' + % ({'n': NAME_RE})) + + +try: # pragma: no cover + str = unicode +except NameError: + pass + +# BBB: The ``fast_translate`` function here is kept for backwards +# compatibility reasons. Do not use! + +try: # pragma: no cover + from zope.i18n import interpolate + from zope.i18n import translate + from zope.i18nmessageid import Message +except ImportError: # pragma: no cover + pass +else: # pragma: no cover + def fast_translate(msgid, domain=None, mapping=None, context=None, + target_language=None, default=None): + if msgid is None: + return + + if target_language is not None or context is not None: + result = translate( + msgid, domain=domain, mapping=mapping, context=context, + target_language=target_language, default=default) + if result != msgid: + return result + + if isinstance(msgid, Message): + default = msgid.default + mapping = msgid.mapping + + if default is None: + default = str(msgid) + + if not isinstance(default, basestring): + return default + + return interpolate(default, mapping) + + +def simple_translate(msgid, domain=None, mapping=None, context=None, + target_language=None, default=None): + if default is None: + default = getattr(msgid, "default", msgid) + + if mapping is None: + mapping = getattr(msgid, "mapping", None) + + if mapping: + def replace(match): + whole, param1, param2 = match.groups() + return unicode_string(mapping.get(param1 or param2, whole)) + return _interp_regex.sub(replace, default) + + return default + + +def parse_attributes(attrs, xml=True): + d = {} + + # filter out empty items, eg: + # i18n:attributes="value msgid; name msgid2;" + # would result in 3 items where the last one is empty + attrs = [spec for spec in attrs.split(";") if spec] + + for spec in attrs: + if ',' in spec: + raise CompilationError( + "Attribute must not contain comma. Use semicolon to " + "list multiple attributes", spec + ) + parts = spec.split() + if len(parts) == 2: + attr, msgid = parts + elif len(parts) == 1: + attr = parts[0] + msgid = None + else: + raise CompilationError( + "Illegal i18n:attributes specification.", spec) + if not xml: + attr = attr.lower() + attr = attr.strip() + if attr in d: + raise CompilationError( + "Attribute may only be specified once in i18n:attributes", attr) + d[attr] = msgid + + return d diff --git a/lib/Chameleon-2.22/src/chameleon/interfaces.py b/lib/Chameleon-2.22/src/chameleon/interfaces.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/interfaces.py @@ -0,0 +1,102 @@ +from zope.interface import Interface +from zope.interface import Attribute + + +class ITALExpressionErrorInfo(Interface): + + type = Attribute("type", + "The exception class.") + + value = Attribute("value", + "The exception instance.") + + lineno = Attribute("lineno", + "The line number the error occurred on in the source.") + + offset = Attribute("offset", + "The character offset at which the error occurred.") + + +class ITALIterator(Interface): # pragma: no cover + """A TAL iterator + + Not to be confused with a Python iterator. + """ + + def next(): + """Advance to the next value in the iteration, if possible + + Return a true value if it was possible to advance and return + a false value otherwise. + """ + + +class ITALESIterator(ITALIterator): # pragma: no cover + """TAL Iterator provided by TALES + + Values of this iterator are assigned to items in the repeat namespace. + + For example, with a TAL statement like: tal:repeat="item items", + an iterator will be assigned to "repeat/item". The iterator + provides a number of handy methods useful in writing TAL loops. + + The results are undefined of calling any of the methods except + 'length' before the first iteration. + """ + + def index(): + """Return the position (starting with "0") within the iteration + """ + + def number(): + """Return the position (starting with "1") within the iteration + """ + + def even(): + """Return whether the current position is even + """ + + def odd(): + """Return whether the current position is odd + """ + + def parity(): + """Return 'odd' or 'even' depending on the position's parity + + Useful for assigning CSS class names to table rows. + """ + + def start(): + """Return whether the current position is the first position + """ + + def end(): + """Return whether the current position is the last position + """ + + def letter(): + """Return the position (starting with "a") within the iteration + """ + + def Letter(): + """Return the position (starting with "A") within the iteration + """ + + def roman(): + """Return the position (starting with "i") within the iteration + """ + + def Roman(): + """Return the position (starting with "I") within the iteration + """ + + def item(): + """Return the item at the current position + """ + + def length(): + """Return the length of the sequence + + Note that this may fail if the TAL iterator was created on a Python + iterator. + """ diff --git a/lib/Chameleon-2.22/src/chameleon/loader.py b/lib/Chameleon-2.22/src/chameleon/loader.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/loader.py @@ -0,0 +1,174 @@ +import functools +import imp +import logging +import os +import py_compile +import shutil +import sys +import tempfile +import warnings +import pkg_resources + +log = logging.getLogger('chameleon.loader') + +from .utils import string_type +from .utils import encode_string + + +def cache(func): + def load(self, *args, **kwargs): + template = self.registry.get(args) + if template is None: + self.registry[args] = template = func(self, *args, **kwargs) + return template + return load + + +def abspath_from_asset_spec(spec): + pname, filename = spec.split(':', 1) + return pkg_resources.resource_filename(pname, filename) + +if os.name == "nt": + def abspath_from_asset_spec(spec, f=abspath_from_asset_spec): + if spec[1] == ":": + return spec + return f(spec) + + +class TemplateLoader(object): + """Template loader class. + + To load templates using relative filenames, pass a sequence of + paths (or a single path) as ``search_path``. + + To apply a default filename extension to inputs which do not have + an extension already (i.e. no dot), provide this as + ``default_extension`` (e.g. ``'.pt'``). + + Additional keyword-arguments will be passed on to the template + constructor. + """ + + default_extension = None + + def __init__(self, search_path=None, default_extension=None, **kwargs): + if search_path is None: + search_path = [] + if isinstance(search_path, string_type): + search_path = [search_path] + if default_extension is not None: + self.default_extension = ".%s" % default_extension.lstrip('.') + self.search_path = search_path + self.registry = {} + self.kwargs = kwargs + + @cache + def load(self, spec, cls=None): + if cls is None: + raise ValueError("Unbound template loader.") + + spec = spec.strip() + + if self.default_extension is not None and '.' not in spec: + spec += self.default_extension + + if ':' in spec: + spec = abspath_from_asset_spec(spec) + + if not os.path.isabs(spec): + for path in self.search_path: + path = os.path.join(path, spec) + if os.path.exists(path): + spec = path + break + else: + raise ValueError("Template not found: %s." % spec) + + return cls(spec, search_path=self.search_path, **self.kwargs) + + def bind(self, cls): + return functools.partial(self.load, cls=cls) + + +class MemoryLoader(object): + def build(self, source, filename): + code = compile(source, filename, 'exec') + env = {} + exec(code, env) + return env + + def get(self, name): + return None + + +class ModuleLoader(object): + def __init__(self, path, remove=False): + self.path = path + self.remove = remove + + def __del__(self, shutil=shutil): + if not self.remove: + return + try: + shutil.rmtree(self.path) + except: + warnings.warn("Could not clean up temporary file path: %s" % (self.path,)) + + def get(self, filename): + path = os.path.join(self.path, filename) + if os.path.exists(path): + log.debug("loading module from cache: %s." % filename) + base, ext = os.path.splitext(filename) + return self._load(base, path) + else: + log.debug('cache miss: %s' % filename) + + def build(self, source, filename): + imp.acquire_lock() + try: + d = self.get(filename) + if d is not None: + return d + + base, ext = os.path.splitext(filename) + name = os.path.join(self.path, base + ".py") + + log.debug("writing source to disk (%d bytes)." % len(source)) + fd, fn = tempfile.mkstemp(prefix=base, suffix='.tmp', dir=self.path) + temp = os.fdopen(fd, 'wb') + encoded = source.encode('utf-8') + header = encode_string("# -*- coding: utf-8 -*-" + "\n") + + try: + try: + temp.write(header) + temp.write(encoded) + finally: + temp.close() + except: + os.remove(fn) + raise + + os.rename(fn, name) + log.debug("compiling %s into byte-code..." % filename) + py_compile.compile(name) + + return self._load(base, name) + finally: + imp.release_lock() + + def _load(self, base, filename): + imp.acquire_lock() + try: + module = sys.modules.get(base) + if module is None: + f = open(filename, 'rb') + try: + assert base not in sys.modules + module = imp.load_source(base, filename, f) + finally: + f.close() + finally: + imp.release_lock() + + return module.__dict__ diff --git a/lib/Chameleon-2.22/src/chameleon/metal.py b/lib/Chameleon-2.22/src/chameleon/metal.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/metal.py @@ -0,0 +1,23 @@ +############################################################################## +# +# Copyright (c) 2001, 2002 Zope Foundation and Contributors. +# All Rights Reserved. +# +# This software is subject to the provisions of the Zope Public License, +# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. +# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED +# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS +# FOR A PARTICULAR PURPOSE. +# +############################################################################## + +WHITELIST = frozenset([ + "define-macro", + "extend-macro", + "use-macro", + "define-slot", + "fill-slot", + "xmlns", + "xml" + ]) diff --git a/lib/Chameleon-2.22/src/chameleon/namespaces.py b/lib/Chameleon-2.22/src/chameleon/namespaces.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/namespaces.py @@ -0,0 +1,9 @@ +XML_NS = "http://www.w3.org/XML/1998/namespace" +XMLNS_NS = "http://www.w3.org/2000/xmlns/" +XHTML_NS = "http://www.w3.org/1999/xhtml" +TAL_NS = "http://xml.zope.org/namespaces/tal" +META_NS = "http://xml.zope.org/namespaces/meta" +METAL_NS = "http://xml.zope.org/namespaces/metal" +XI_NS = "http://www.w3.org/2001/XInclude" +I18N_NS = "http://xml.zope.org/namespaces/i18n" +PY_NS = "http://genshi.edgewall.org/" diff --git a/lib/Chameleon-2.22/src/chameleon/nodes.py b/lib/Chameleon-2.22/src/chameleon/nodes.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/nodes.py @@ -0,0 +1,233 @@ +from .astutil import Node + + +class UseExternalMacro(Node): + """Extend external macro.""" + + _fields = "expression", "slots", "extend" + + +class Sequence(Node): + """Element sequence.""" + + _fields = "items", + + def __nonzero__(self): + return bool(self.items) + + +class Content(Node): + """Content substitution.""" + + _fields = "expression", "char_escape", "translate" + + +class Default(Node): + """Represents a default value.""" + + _fields = "marker", + + +class CodeBlock(Node): + _fields = "source", + + +class Value(Node): + """Expression object value.""" + + _fields = "value", + + def __repr__(self): + try: + line, column = self.value.location + except AttributeError: + line, column = 0, 0 + + return "<%s %r (%d:%d)>" % ( + type(self).__name__, self.value, line, column + ) + + +class Substitution(Value): + """Expression value for text substitution.""" + + _fields = "value", "char_escape", "default" + + default = None + + +class Boolean(Value): + _fields = "value", "s" + + +class Negate(Node): + """Wraps an expression with a negation.""" + + _fields = "value", + + +class Element(Node): + """XML element.""" + + _fields = "start", "end", "content" + + +class DictAttributes(Node): + """Element attributes from one or more Python dicts.""" + + _fields = "expression", "char_escape", "quote", "exclude" + + +class Attribute(Node): + """Element attribute.""" + + _fields = "name", "expression", "quote", "eq", "space", "filters" + + +class Start(Node): + """Start-tag.""" + + _fields = "name", "prefix", "suffix", "attributes" + + +class End(Node): + """End-tag.""" + + _fields = "name", "space", "prefix", "suffix" + + +class Condition(Node): + """Node visited only if some condition holds.""" + + _fields = "expression", "node", "orelse" + + +class Identity(Node): + """Condition expression that is true on identity.""" + + _fields = "expression", "value" + + +class Equality(Node): + """Condition expression that is true on equality.""" + + _fields = "expression", "value" + + +class Cache(Node): + """Cache (evaluate only once) the value of ``expression`` inside + ``node``. + """ + + _fields = "expressions", "node" + + +class Cancel(Cache): + pass + + +class Copy(Node): + _fields = "expression", + + +class Assignment(Node): + """Variable assignment.""" + + _fields = "names", "expression", "local" + + +class Alias(Assignment): + """Alias assignment. + + Note that ``expression`` should be a cached or global value. + """ + + local = False + + +class Define(Node): + """Variable definition in scope.""" + + _fields = "assignments", "node" + + +class Repeat(Assignment): + """Iterate over provided assignment and repeat body.""" + + _fields = "names", "expression", "local", "whitespace", "node" + + +class Macro(Node): + """Macro definition.""" + + _fields = "name", "body" + + +class Program(Node): + _fields = "name", "body" + + +class Module(Node): + _fields = "name", "program", + + +class Context(Node): + _fields = "node", + + +class Text(Node): + """Static text output.""" + + _fields = "value", + + +class Interpolation(Text): + """String interpolation output.""" + + _fields = "value", "braces_required", "translation" + + +class Translate(Node): + """Translate node.""" + + _fields = "msgid", "node" + + +class Name(Node): + """Translation name.""" + + _fields = "name", "node" + + +class Domain(Node): + """Update translation domain.""" + + _fields = "name", "node" + + +class TxContext(Node): + """Update translation context.""" + + _fields = "name", "node" + + +class OnError(Node): + _fields = "fallback", "name", "node" + + +class UseInternalMacro(Node): + """Use internal macro (defined inside same program).""" + + _fields = "name", + + +class FillSlot(Node): + """Fill a macro slot.""" + + _fields = "name", "node" + + +class DefineSlot(Node): + """Define a macro slot.""" + + _fields = "name", "node" diff --git a/lib/Chameleon-2.22/src/chameleon/parser.py b/lib/Chameleon-2.22/src/chameleon/parser.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/parser.py @@ -0,0 +1,241 @@ +import re +import logging + +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict + +from .exc import ParseError +from .namespaces import XML_NS +from .tokenize import Token + +match_tag_prefix_and_name = re.compile( + r'^(?P<prefix></?)(?P<name>([^:\n ]+:)?[^ \n\t>/]+)' + '(?P<suffix>(?P<space>\s*)/?>)?', + re.UNICODE | re.DOTALL) +match_single_attribute = re.compile( + r'(?P<space>\s+)(?!\d)' + r'(?P<name>[^ =/>\n\t]+)' + r'((?P<eq>\s*=\s*)' + r'((?P<quote>[\'"])(?P<value>.*?)(?P=quote)|' + r'(?P<alt_value>[^\s\'">/]+))|' + r'(?P<simple_value>(?![ \\n\\t\\r]*=)))', + re.UNICODE | re.DOTALL) +match_comment = re.compile( + r'^<!--(?P<text>.*)-->$', re.DOTALL) +match_cdata = re.compile( + r'^<!\[CDATA\[(?P<text>.*)\]>$', re.DOTALL) +match_declaration = re.compile( + r'^<!(?P<text>[^>]+)>$', re.DOTALL) +match_processing_instruction = re.compile( + r'^<\?(?P<name>\w+)(?P<text>.*?)\?>', re.DOTALL) +match_xml_declaration = re.compile(r'^<\?xml(?=[ /])', re.DOTALL) + +log = logging.getLogger('chameleon.parser') + + +def substitute(regex, repl, token): + if not isinstance(token, Token): + token = Token(token) + + return Token( + regex.sub(repl, token), + token.pos, + token.source, + token.filename + ) + + +def groups(m, token): + result = [] + for i, group in enumerate(m.groups()): + if group is not None: + j, k = m.span(i + 1) + group = token[j:k] + + result.append(group) + + return tuple(result) + + +def groupdict(m, token): + d = m.groupdict() + for name, value in d.items(): + if value is not None: + i, j = m.span(name) + d[name] = token[i:j] + + return d + + +def match_tag(token, regex=match_tag_prefix_and_name): + m = regex.match(token) + d = groupdict(m, token) + + end = m.end() + token = token[end:] + + attrs = d['attrs'] = [] + for m in match_single_attribute.finditer(token): + attr = groupdict(m, token) + alt_value = attr.pop('alt_value', None) + if alt_value is not None: + attr['value'] = alt_value + attr['quote'] = '' + simple_value = attr.pop('simple_value', None) + if simple_value is not None: + attr['quote'] = '' + attr['value'] = '' + attr['eq'] = '' + attrs.append(attr) + d['suffix'] = token[m.end():] + + return d + + +def parse_tag(token, namespace): + node = match_tag(token) + + update_namespace(node['attrs'], namespace) + + if ':' in node['name']: + prefix = node['name'].split(':')[0] + else: + prefix = None + + default = node['namespace'] = namespace.get(prefix, XML_NS) + + node['ns_attrs'] = unpack_attributes( + node['attrs'], namespace, default) + + return node + + +def update_namespace(attributes, namespace): + # possibly update namespaces; we do this in a separate step + # because this assignment is irrespective of order + for attribute in attributes: + name = attribute['name'] + value = attribute['value'] + if name == 'xmlns': + namespace[None] = value + elif name.startswith('xmlns:'): + namespace[name[6:]] = value + + +def unpack_attributes(attributes, namespace, default): + namespaced = OrderedDict() + + for index, attribute in enumerate(attributes): + name = attribute['name'] + value = attribute['value'] + + if ':' in name: + prefix = name.split(':')[0] + name = name[len(prefix) + 1:] + try: + ns = namespace[prefix] + except KeyError: + raise KeyError( + "Undefined namespace prefix: %s." % prefix) + else: + ns = default + namespaced[ns, name] = value + + return namespaced + + +def identify(string): + if string.startswith("<"): + if string.startswith("<!--"): + return "comment" + if string.startswith("<![CDATA["): + return "cdata" + if string.startswith("<!"): + return "declaration" + if string.startswith("<?xml"): + return "xml_declaration" + if string.startswith("<?"): + return "processing_instruction" + if string.startswith("</"): + return "end_tag" + if string.endswith("/>"): + return "empty_tag" + if string.endswith(">"): + return "start_tag" + return "error" + return "text" + + +class ElementParser(object): + """Parses tokens into elements.""" + + def __init__(self, stream, default_namespaces): + self.stream = stream + self.queue = [] + self.index = [] + self.namespaces = [default_namespaces.copy()] + + def __iter__(self): + for token in self.stream: + item = self.parse(token) + self.queue.append(item) + + return iter(self.queue) + + def parse(self, token): + kind = identify(token) + visitor = getattr(self, "visit_%s" % kind, self.visit_default) + return visitor(kind, token) + + def visit_comment(self, kind, token): + return "comment", (token, ) + + def visit_cdata(self, kind, token): + return "cdata", (token, ) + + def visit_default(self, kind, token): + return "default", (token, ) + + def visit_processing_instruction(self, kind, token): + m = match_processing_instruction.match(token) + if m is None: + return self.visit_default(kind, token) + + return "processing_instruction", (groupdict(m, token), ) + + def visit_text(self, kind, token): + return kind, (token, ) + + def visit_start_tag(self, kind, token): + namespace = self.namespaces[-1].copy() + self.namespaces.append(namespace) + node = parse_tag(token, namespace) + self.index.append((node['name'], len(self.queue))) + return kind, (node, ) + + def visit_end_tag(self, kind, token): + try: + namespace = self.namespaces.pop() + except IndexError: + raise ParseError("Unexpected end tag.", token) + + node = parse_tag(token, namespace) + + while self.index: + name, pos = self.index.pop() + if name == node['name']: + start, = self.queue.pop(pos)[1] + children = self.queue[pos:] + del self.queue[pos:] + break + else: + raise ParseError("Unexpected end tag.", token) + + return "element", (start, node, children) + + def visit_empty_tag(self, kind, token): + namespace = self.namespaces[-1].copy() + node = parse_tag(token, namespace) + return "element", (node, None, []) diff --git a/lib/Chameleon-2.22/src/chameleon/program.py b/lib/Chameleon-2.22/src/chameleon/program.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/program.py @@ -0,0 +1,38 @@ +try: + str = unicode +except NameError: + long = int + +from .tokenize import iter_xml +from .tokenize import iter_text +from .parser import ElementParser +from .namespaces import XML_NS +from .namespaces import XMLNS_NS + + +class ElementProgram(object): + DEFAULT_NAMESPACES = { + 'xmlns': XMLNS_NS, + 'xml': XML_NS, + } + + tokenizers = { + 'xml': iter_xml, + 'text': iter_text, + } + + def __init__(self, source, mode="xml", filename=None): + tokenizer = self.tokenizers[mode] + tokens = tokenizer(source, filename) + parser = ElementParser(tokens, self.DEFAULT_NAMESPACES) + + self.body = [] + + for kind, args in parser: + node = self.visit(kind, args) + if node is not None: + self.body.append(node) + + def visit(self, kind, args): + visitor = getattr(self, "visit_%s" % kind) + return visitor(*args) diff --git a/lib/Chameleon-2.22/src/chameleon/py25.py b/lib/Chameleon-2.22/src/chameleon/py25.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/py25.py @@ -0,0 +1,36 @@ +import sys + +def lookup_attr(obj, key): + try: + return getattr(obj, key) + except AttributeError: + exc = sys.exc_info()[1] + try: + get = obj.__getitem__ + except AttributeError: + raise exc + try: + return get(key) + except KeyError: + raise exc + +def exec_(code, globs=None, locs=None): + """Execute code in a namespace.""" + if globs is None: + frame = sys._getframe(1) + globs = frame.f_globals + if locs is None: + locs = frame.f_locals + del frame + elif locs is None: + locs = globs + exec("""exec code in globs, locs""") + + +exec_("""def raise_with_traceback(exc, tb): + raise type(exc), exc, tb +""") + + +def next(iter): + return iter.next() diff --git a/lib/Chameleon-2.22/src/chameleon/py26.py b/lib/Chameleon-2.22/src/chameleon/py26.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/py26.py @@ -0,0 +1,15 @@ +import sys + +def lookup_attr(obj, key): + try: + return getattr(obj, key) + except AttributeError: + exc = sys.exc_info()[1] + try: + get = obj.__getitem__ + except AttributeError: + raise exc + try: + return get(key) + except KeyError: + raise exc diff --git a/lib/Chameleon-2.22/src/chameleon/tal.py b/lib/Chameleon-2.22/src/chameleon/tal.py new file mode 100755 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tal.py @@ -0,0 +1,497 @@ +############################################################################## +# +# Copyright (c) 2001, 2002 Zope Foundation and Contributors. +# All Rights Reserved. +# +# This software is subject to the provisions of the Zope Public License, +# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. +# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED +# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS +# FOR A PARTICULAR PURPOSE. +# +############################################################################## + +import re +import copy + +from .exc import LanguageError +from .utils import descriptorint +from .utils import descriptorstr +from .namespaces import XMLNS_NS +from .parser import groups + + +try: + next +except NameError: + from chameleon.py25 import next + +try: + # optional library: `zope.interface` + from chameleon import interfaces + import zope.interface +except ImportError: + interfaces = None + + +NAME = r"[a-zA-Z_][-a-zA-Z0-9_]*" +DEFINE_RE = re.compile(r"(?s)\s*(?:(global|local)\s+)?" + + r"(%s|\(%s(?:,\s*%s)*\))\s+(.*)\Z" % (NAME, NAME, NAME), + re.UNICODE) +SUBST_RE = re.compile(r"\s*(?:(text|structure)\s+)?(.*)\Z", re.S | re.UNICODE) +ATTR_RE = re.compile(r"\s*([^\s{}'\"]+)\s+([^\s].*)\Z", re.S | re.UNICODE) + +ENTITY_RE = re.compile(r'(&(#?)(x?)(\d{1,5}|\w{1,8});)') + +WHITELIST = frozenset([ + "define", + "comment", + "condition", + "content", + "replace", + "repeat", + "attributes", + "on-error", + "omit-tag", + "script", + "switch", + "case", + "xmlns", + "xml" + ]) + + +def split_parts(arg): + # Break in pieces at undoubled semicolons and + # change double semicolons to singles: + i = 0 + while i < len(arg): + m = ENTITY_RE.search(arg[i:]) + if m is None: + break + arg = arg[:i + m.end()] + ';' + arg[i + m.end():] + i += m.end() + + arg = arg.replace(";;", "\0") + parts = arg.split(';') + parts = [p.replace("\0", ";") for p in parts] + if len(parts) > 1 and not parts[-1].strip(): + del parts[-1] # It ended in a semicolon + + return parts + + +def parse_attributes(clause): + attrs = [] + seen = set() + for part in split_parts(clause): + m = ATTR_RE.match(part) + if not m: + name, expr = None, part.strip() + else: + name, expr = groups(m, part) + + if name in seen: + raise LanguageError( + "Duplicate attribute name in attributes.", part) + + seen.add(name) + attrs.append((name, expr)) + + return attrs + + +def parse_substitution(clause): + m = SUBST_RE.match(clause) + if m is None: + raise LanguageError( + "Invalid content substitution syntax.", clause) + + key, expression = groups(m, clause) + if not key: + key = "text" + + return key, expression + + +def parse_defines(clause): + """ + Parses a tal:define value. + + # Basic syntax, implicit local + >>> parse_defines('hello lovely') + [('local', ('hello',), 'lovely')] + + # Explicit local + >>> parse_defines('local hello lovely') + [('local', ('hello',), 'lovely')] + + # With global + >>> parse_defines('global hello lovely') + [('global', ('hello',), 'lovely')] + + # Multiple expressions + >>> parse_defines('hello lovely; tea time') + [('local', ('hello',), 'lovely'), ('local', ('tea',), 'time')] + + # With multiple names + >>> parse_defines('(hello, howdy) lovely') + [('local', ['hello', 'howdy'], 'lovely')] + + # With unicode whitespace + >>> try: + ... s = '\xc2\xa0hello lovely'.decode('utf-8') + ... except AttributeError: + ... s = '\xa0hello lovely' + >>> from chameleon.utils import unicode_string + >>> parse_defines(s) == [ + ... ('local', ('hello',), 'lovely') + ... ] + True + + """ + defines = [] + for part in split_parts(clause): + m = DEFINE_RE.match(part) + if m is None: + raise LanguageError("Invalid define syntax", part) + context, name, expr = groups(m, part) + context = context or "local" + + if name.startswith('('): + names = [n.strip() for n in name.strip('()').split(',')] + else: + names = (name,) + + defines.append((context, names, expr)) + + return defines + + +def prepare_attributes(attrs, dyn_attributes, i18n_attributes, + ns_attributes, drop_ns): + drop = set([attribute['name'] for attribute, (ns, value) + in zip(attrs, ns_attributes) + if ns in drop_ns or ( + ns == XMLNS_NS and + attribute['value'] in drop_ns + ) + ]) + + attributes = [] + normalized = {} + computed = [] + + for attribute in attrs: + name = attribute['name'] + + if name in drop: + continue + + attributes.append(( + name, + attribute['value'], + attribute['quote'], + attribute['space'], + attribute['eq'], + None, + )) + + normalized[name.lower()] = len(attributes) - 1 + + for name, expr in dyn_attributes: + index = normalized.get(name.lower()) if name else None + + if index is not None: + _, text, quote, space, eq, _ = attributes[index] + add = attributes.__setitem__ + else: + text = None + quote = '"' + space = " " + eq = "=" + index = len(attributes) + add = attributes.insert + if name is not None: + normalized[name.lower()] = len(attributes) - 1 + + attribute = name, text, quote, space, eq, expr + add(index, attribute) + + for name in i18n_attributes: + attr = name.lower() + if attr not in normalized: + attributes.append((name, name, '"', " ", "=", None)) + normalized[attr] = len(attributes) - 1 + + return attributes + + +class RepeatItem(object): + __slots__ = "length", "_iterator" + + __allow_access_to_unprotected_subobjects__ = True + + def __init__(self, iterator, length): + self.length = length + self._iterator = iterator + + def __iter__(self): + return self._iterator + + try: + iter(()).__len__ + except AttributeError: + @descriptorint + def index(self): + try: + remaining = self._iterator.__length_hint__() + except AttributeError: + remaining = len(tuple(copy.copy(self._iterator))) + return self.length - remaining - 1 + else: + @descriptorint + def index(self): + remaining = self._iterator.__len__() + return self.length - remaining - 1 + + @descriptorint + def start(self): + return self.index == 0 + + @descriptorint + def end(self): + return self.index == self.length - 1 + + @descriptorint + def number(self): + return self.index + 1 + + @descriptorstr + def odd(self): + """Returns a true value if the item index is odd. + + >>> it = RepeatItem(iter(("apple", "pear")), 2) + + >>> next(it._iterator) + 'apple' + >>> it.odd() + '' + + >>> next(it._iterator) + 'pear' + >>> it.odd() + 'odd' + """ + + return self.index % 2 == 1 and 'odd' or '' + + @descriptorstr + def even(self): + """Returns a true value if the item index is even. + + >>> it = RepeatItem(iter(("apple", "pear")), 2) + + >>> next(it._iterator) + 'apple' + >>> it.even() + 'even' + + >>> next(it._iterator) + 'pear' + >>> it.even() + '' + """ + + return self.index % 2 == 0 and 'even' or '' + + @descriptorstr + def parity(self): + """Return 'odd' or 'even' depending on the position's parity + + Useful for assigning CSS class names to table rows. + """ + + return self.index % 2 == 0 and 'even' or 'odd' + + def next(self): + raise NotImplementedError( + "Method not implemented (can't update local variable).") + + def _letter(self, base=ord('a'), radix=26): + """Get the iterator position as a lower-case letter + + >>> it = RepeatItem(iter(("apple", "pear", "orange")), 3) + >>> next(it._iterator) + 'apple' + >>> it.letter() + 'a' + >>> next(it._iterator) + 'pear' + >>> it.letter() + 'b' + >>> next(it._iterator) + 'orange' + >>> it.letter() + 'c' + """ + + index = self.index + if index < 0: + raise TypeError("No iteration position") + s = "" + while 1: + index, off = divmod(index, radix) + s = chr(base + off) + s + if not index: + return s + + letter = descriptorstr(_letter) + + @descriptorstr + def Letter(self): + """Get the iterator position as an upper-case letter + + >>> it = RepeatItem(iter(("apple", "pear", "orange")), 3) + >>> next(it._iterator) + 'apple' + >>> it.Letter() + 'A' + >>> next(it._iterator) + 'pear' + >>> it.Letter() + 'B' + >>> next(it._iterator) + 'orange' + >>> it.Letter() + 'C' + """ + + return self._letter(base=ord('A')) + + @descriptorstr + def Roman(self, rnvalues=( + (1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'), + (100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'), + (10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I'))): + """Get the iterator position as an upper-case roman numeral + + >>> it = RepeatItem(iter(("apple", "pear", "orange")), 3) + >>> next(it._iterator) + 'apple' + >>> it.Roman() + 'I' + >>> next(it._iterator) + 'pear' + >>> it.Roman() + 'II' + >>> next(it._iterator) + 'orange' + >>> it.Roman() + 'III' + """ + + n = self.index + 1 + s = "" + for v, r in rnvalues: + rct, n = divmod(n, v) + s = s + r * rct + return s + + @descriptorstr + def roman(self): + """Get the iterator position as a lower-case roman numeral + + >>> it = RepeatItem(iter(("apple", "pear", "orange")), 3) + >>> next(it._iterator) + 'apple' + >>> it.roman() + 'i' + >>> next(it._iterator) + 'pear' + >>> it.roman() + 'ii' + >>> next(it._iterator) + 'orange' + >>> it.roman() + 'iii' + """ + + return self.Roman().lower() + + +if interfaces is not None: + zope.interface.classImplements(RepeatItem, interfaces.ITALESIterator) + + +class RepeatDict(dict): + """Repeat dictionary implementation. + + >>> repeat = RepeatDict({}) + >>> iterator, length = repeat('numbers', range(5)) + >>> length + 5 + + >>> repeat['numbers'] + <chameleon.tal.RepeatItem object at ...> + + >>> repeat.numbers + <chameleon.tal.RepeatItem object at ...> + + >>> getattr(repeat, 'missing_key', None) is None + True + + >>> try: + ... from chameleon import interfaces + ... interfaces.ITALESIterator(repeat,None) is None + ... except ImportError: + ... True + ... + True + """ + + __slots__ = "__setitem__", "__getitem__" + + def __init__(self, d): + self.__setitem__ = d.__setitem__ + self.__getitem__ = d.__getitem__ + + def __getattr__(self,key): + try: + return self[key] + except KeyError: + raise AttributeError(key) + + + def __call__(self, key, iterable): + """We coerce the iterable to a tuple and return an iterator + after registering it in the repeat dictionary.""" + + iterable = list(iterable) if iterable is not None else () + + length = len(iterable) + iterator = iter(iterable) + + # Insert as repeat item + self[key] = RepeatItem(iterator, length) + + return iterator, length + + +class ErrorInfo(object): + """Information about an exception passed to an on-error handler.""" + + def __init__(self, err, position=(None, None)): + if isinstance(err, Exception): + self.type = err.__class__ + self.value = err + else: + self.type = err + self.value = None + self.lineno = position[0] + self.offset = position[1] + + +if interfaces is not None: + zope.interface.classImplements(ErrorInfo, interfaces.ITALExpressionErrorInfo) diff --git a/lib/Chameleon-2.22/src/chameleon/tales.py b/lib/Chameleon-2.22/src/chameleon/tales.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tales.py @@ -0,0 +1,556 @@ +import re +import sys + +from .astutil import parse +from .astutil import store +from .astutil import load +from .astutil import ItemLookupOnAttributeErrorVisitor +from .codegen import TemplateCodeGenerator +from .codegen import template +from .codegen import reverse_builtin_map +from .astutil import Builtin +from .astutil import Symbol +from .exc import ExpressionError +from .utils import resolve_dotted +from .utils import Markup +from .utils import ast +from .tokenize import Token +from .parser import substitute +from .compiler import Interpolator + +try: + from .py26 import lookup_attr +except SyntaxError: + from .py25 import lookup_attr + + +split_parts = re.compile(r'(?<!\\)\|') +match_prefix = re.compile(r'^\s*([a-z\-_]+):').match +re_continuation = re.compile(r'\\\s*$', re.MULTILINE) + +try: + from __builtin__ import basestring +except ImportError: + basestring = str + + +def resolve_global(value): + name = reverse_builtin_map.get(value) + if name is not None: + return Builtin(name) + + return Symbol(value) + + +def test(expression, engine=None, **env): + if engine is None: + engine = SimpleEngine() + + body = expression(store("result"), engine) + module = ast.Module(body) + module = ast.fix_missing_locations(module) + env['rcontext'] = {} + source = TemplateCodeGenerator(module).code + code = compile(source, '<string>', 'exec') + exec(code, env) + result = env["result"] + + if isinstance(result, basestring): + result = str(result) + + return result + + +def transform_attribute(node): + return template( + "lookup(object, name)", + lookup=Symbol(lookup_attr), + object=node.value, + name=ast.Str(s=node.attr), + mode="eval" + ) + + +class TalesExpr(object): + """Base class. + + This class helps implementations for the Template Attribute + Language Expression Syntax (TALES). + + The syntax evaluates one or more expressions, separated by '|' + (pipe). The first expression that succeeds, is returned. + + Expression: + + expression := (type ':')? line ('|' expression)? + line := .* + + Expression lines may not contain the pipe character unless + escaped. It has a special meaning: + + If the expression to the left of the pipe fails (raises one of the + exceptions listed in ``catch_exceptions``), evaluation proceeds to + the expression(s) on the right. + + Subclasses must implement ``translate`` which assigns a value for + a given expression. + + >>> class PythonPipeExpr(TalesExpr): + ... def translate(self, expression, target): + ... compiler = PythonExpr(expression) + ... return compiler(target, None) + + >>> test(PythonPipeExpr('foo | bar | 42')) + 42 + + >>> test(PythonPipeExpr('foo|42')) + 42 + """ + + exceptions = NameError, \ + ValueError, \ + AttributeError, \ + LookupError, \ + TypeError + + ignore_prefix = True + + def __init__(self, expression): + self.expression = expression + + def __call__(self, target, engine): + remaining = self.expression + assignments = [] + + while remaining: + if self.ignore_prefix and match_prefix(remaining) is not None: + compiler = engine.parse(remaining) + assignment = compiler.assign_value(target) + remaining = "" + else: + for m in split_parts.finditer(remaining): + expression = remaining[:m.start()] + remaining = remaining[m.end():] + break + else: + expression = remaining + remaining = "" + + expression = expression.replace('\\|', '|') + assignment = self.translate_proxy(engine, expression, target) + assignments.append(assignment) + + if not assignments: + if not remaining: + raise ExpressionError("No input:", remaining) + + assignments.append( + self.translate_proxy(engine, remaining, target) + ) + + for i, assignment in enumerate(reversed(assignments)): + if i == 0: + body = assignment + else: + body = [ast.TryExcept( + body=assignment, + handlers=[ast.ExceptHandler( + type=ast.Tuple( + elts=map(resolve_global, self.exceptions), + ctx=ast.Load()), + name=None, + body=body, + )], + )] + + return body + + def translate_proxy(self, engine, *args): + """Default implementation delegates to ``translate`` method.""" + + return self.translate(*args) + + def translate(self, expression, target): + """Return statements that assign a value to ``target``.""" + + raise NotImplementedError( + "Must be implemented by a subclass.") + + +class PathExpr(TalesExpr): + """Path expression compiler. + + Syntax:: + + PathExpr ::= Path [ '|' Path ]* + Path ::= variable [ '/' URL_Segment ]* + variable ::= Name + + For example:: + + request/cookies/oatmeal + nothing + here/some-file 2001_02.html.tar.gz/foo + root/to/branch | default + + When a path expression is evaluated, it attempts to traverse + each path, from left to right, until it succeeds or runs out of + paths. To traverse a path, it first fetches the object stored in + the variable. For each path segment, it traverses from the current + object to the subobject named by the path segment. + + Once a path has been successfully traversed, the resulting object + is the value of the expression. If it is a callable object, such + as a method or class, it is called. + + The semantics of traversal (and what it means to be callable) are + implementation-dependent (see the ``translate`` method). + """ + + def translate(self, expression, target): + raise NotImplementedError( + "Path expressions are not yet implemented. " + "It's unclear whether a general implementation " + "can be devised.") + + +class PythonExpr(TalesExpr): + """Python expression compiler. + + >>> test(PythonExpr('2 + 2')) + 4 + + The Python expression is a TALES expression. That means we can use + the pipe operator: + + >>> test(PythonExpr('foo | 2 + 2 | 5')) + 4 + + To include a pipe character, use a backslash escape sequence: + + >>> test(PythonExpr('\"\|\"')) + '|' + """ + + transform = ItemLookupOnAttributeErrorVisitor(transform_attribute) + + def parse(self, string): + return parse(string, 'eval').body + + def translate(self, expression, target): + # Strip spaces + string = expression.strip() + + # Conver line continuations to newlines + string = substitute(re_continuation, '\n', string) + + # Convert newlines to spaces + string = string.replace('\n', ' ') + + try: + value = self.parse(string) + except SyntaxError: + exc = sys.exc_info()[1] + raise ExpressionError(exc.msg, string) + + # Transform attribute lookups to allow fallback to item lookup + self.transform.visit(value) + + return [ast.Assign(targets=[target], value=value)] + + +class ImportExpr(object): + re_dotted = re.compile(r'^[A-Za-z.]+$') + + def __init__(self, expression): + self.expression = expression + + def __call__(self, target, engine): + string = self.expression.strip().replace('\n', ' ') + value = template( + "RESOLVE(NAME)", + RESOLVE=Symbol(resolve_dotted), + NAME=ast.Str(s=string), + mode="eval", + ) + return [ast.Assign(targets=[target], value=value)] + + +class NotExpr(object): + """Negates the expression. + + >>> engine = SimpleEngine(PythonExpr) + + >>> test(NotExpr('False'), engine) + True + >>> test(NotExpr('True'), engine) + False + """ + + def __init__(self, expression): + self.expression = expression + + def __call__(self, target, engine): + compiler = engine.parse(self.expression) + body = compiler.assign_value(target) + return body + template("target = not target", target=target) + + +class StructureExpr(object): + """Wraps the expression result as 'structure'. + + >>> engine = SimpleEngine(PythonExpr) + + >>> test(StructureExpr('\"<tt>foo</tt>\"'), engine) + '<tt>foo</tt>' + """ + + wrapper_class = Symbol(Markup) + + def __init__(self, expression): + self.expression = expression + + def __call__(self, target, engine): + compiler = engine.parse(self.expression) + body = compiler.assign_value(target) + return body + template( + "target = wrapper(target)", + target=target, + wrapper=self.wrapper_class + ) + + +class IdentityExpr(object): + """Identity expression. + + Exists to demonstrate the interface. + + >>> test(IdentityExpr('42')) + 42 + """ + + def __init__(self, expression): + self.expression = expression + + def __call__(self, target, engine): + compiler = engine.parse(self.expression) + return compiler.assign_value(target) + + +class StringExpr(object): + """Similar to the built-in ``string.Template``, but uses an + expression engine to support pluggable string substitution + expressions. + + Expr string: + + string := (text | substitution) (string)? + substitution := ('$' variable | '${' expression '}') + text := .* + + In other words, an expression string can contain multiple + substitutions. The text- and substitution parts will be + concatenated back into a string. + + >>> test(StringExpr('Hello ${name}!'), name='world') + 'Hello world!' + + In the default configuration, braces may be omitted if the + expression is an identifier. + + >>> test(StringExpr('Hello $name!'), name='world') + 'Hello world!' + + The ``braces_required`` flag changes this setting: + + >>> test(StringExpr('Hello $name!', True)) + 'Hello $name!' + + We can escape interpolation using the standard escaping + syntax: + + >>> test(StringExpr('\\${name}')) + '\\\${name}' + + Multiple interpolations in one: + + >>> test(StringExpr(\"Hello ${'a'}${'b'}${'c'}!\")) + 'Hello abc!' + + Here's a more involved example taken from a javascript source: + + >>> result = test(StringExpr(\"\"\" + ... function(oid) { + ... $('#' + oid).autocomplete({source: ${'source'}}); + ... } + ... \"\"\")) + + >>> 'source: source' in result + True + + In the above examples, the expression is evaluated using the + dummy engine which just returns the input as a string. + + As an example, we'll implement an expression engine which + instead counts the number of characters in the expresion and + returns an integer result. + + >>> class engine: + ... @staticmethod + ... def parse(expression): + ... class compiler: + ... @staticmethod + ... def assign_text(target): + ... return [ + ... ast.Assign( + ... targets=[target], + ... value=ast.Num(n=len(expression)) + ... )] + ... + ... return compiler + + This will demonstrate how the string expression coerces the + input to a string. + + >>> expr = StringExpr( + ... 'There are ${hello world} characters in \"hello world\"') + + We evaluate the expression using the new engine: + + >>> test(expr, engine) + 'There are 11 characters in \"hello world\"' + """ + + def __init__(self, expression, braces_required=False): + # The code relies on the expression being a token string + if not isinstance(expression, Token): + expression = Token(expression, 0) + + self.translator = Interpolator(expression, braces_required) + + def __call__(self, name, engine): + return self.translator(name, engine) + + +class ProxyExpr(TalesExpr): + braces_required = False + + def __init__(self, name, expression, ignore_prefix=True): + super(ProxyExpr, self).__init__(expression) + self.ignore_prefix = ignore_prefix + self.name = name + + def translate_proxy(self, engine, expression, target): + translator = Interpolator(expression, self.braces_required) + assignment = translator(target, engine) + + return assignment + [ + ast.Assign(targets=[target], value=ast.Call( + func=load(self.name), + args=[target], + keywords=[], + starargs=None, + kwargs=None + )) + ] + + +class ExistsExpr(object): + """Boolean wrapper. + + Return 0 if the expression results in an exception, otherwise 1. + + As a means to generate exceptions, we set up an expression engine + which evaluates the provided expression using Python: + + >>> engine = SimpleEngine(PythonExpr) + + >>> test(ExistsExpr('int(0)'), engine) + 1 + >>> test(ExistsExpr('int(None)'), engine) + 0 + + """ + + exceptions = AttributeError, LookupError, TypeError, NameError, KeyError + + def __init__(self, expression): + self.expression = expression + + def __call__(self, target, engine): + ignore = store("_ignore") + compiler = engine.parse(self.expression) + body = compiler.assign_value(ignore) + + classes = map(resolve_global, self.exceptions) + + return [ + ast.TryExcept( + body=body, + handlers=[ast.ExceptHandler( + type=ast.Tuple(elts=classes, ctx=ast.Load()), + name=None, + body=template("target = 0", target=target), + )], + orelse=template("target = 1", target=target) + ) + ] + + +class ExpressionParser(object): + def __init__(self, factories, default): + self.factories = factories + self.default = default + + def __call__(self, expression): + m = match_prefix(expression) + if m is not None: + prefix = m.group(1) + expression = expression[m.end():] + else: + prefix = self.default + + try: + factory = self.factories[prefix] + except KeyError: + exc = sys.exc_info()[1] + raise LookupError( + "Unknown expression type: %s." % str(exc) + ) + + return factory(expression) + + +class SimpleEngine(object): + expression = PythonExpr + + def __init__(self, expression=None): + if expression is not None: + self.expression = expression + + def parse(self, string): + compiler = self.expression(string) + return SimpleCompiler(compiler, self) + + +class SimpleCompiler(object): + def __init__(self, compiler, engine): + self.compiler = compiler + self.engine = engine + + def assign_text(self, target): + """Assign expression string as a text value.""" + + return self._assign_value_and_coerce(target, "str") + + def assign_value(self, target): + """Assign expression string as object value.""" + + return self.compiler(target, self.engine) + + def _assign_value_and_coerce(self, target, builtin): + return self.assign_value(target) + template( + "target = builtin(target)", + target=target, + builtin=builtin + ) diff --git a/lib/Chameleon-2.22/src/chameleon/template.py b/lib/Chameleon-2.22/src/chameleon/template.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/template.py @@ -0,0 +1,333 @@ +from __future__ import with_statement + +import os +import sys +import hashlib +import logging +import tempfile +import inspect + +pkg_digest = hashlib.sha1(__name__.encode('utf-8')) + +try: + import pkg_resources +except ImportError: + logging.info("Setuptools not installed. Unable to determine version.") +else: + for path in sys.path: + for distribution in pkg_resources.find_distributions(path): + if distribution.has_version(): + version = distribution.version.encode('utf-8') + pkg_digest.update(version) + + +from .exc import RenderError +from .exc import TemplateError +from .exc import ExceptionFormatter +from .compiler import Compiler +from .config import DEBUG_MODE +from .config import AUTO_RELOAD +from .config import EAGER_PARSING +from .config import CACHE_DIRECTORY +from .loader import ModuleLoader +from .loader import MemoryLoader +from .nodes import Module +from .utils import DebuggingOutputStream +from .utils import Scope +from .utils import join +from .utils import mangle +from .utils import create_formatted_exception +from .utils import read_bytes +from .utils import raise_with_traceback +from .utils import byte_string + + +log = logging.getLogger('chameleon.template') + + +def _make_module_loader(): + remove = False + if CACHE_DIRECTORY: + path = CACHE_DIRECTORY + else: + path = tempfile.mkdtemp() + remove = True + + return ModuleLoader(path, remove) + + +class BaseTemplate(object): + """Template base class. + + Takes a string input which must be one of the following: + + - a unicode string (or string on Python 3); + - a utf-8 encoded byte string; + - a byte string for an XML document that defines an encoding + in the document premamble; + - an HTML document that specifies the encoding via the META tag. + + Note that the template input is decoded, parsed and compiled on + initialization. + """ + + default_encoding = "utf-8" + + # This attribute is strictly informational in this template class + # and is used in exception formatting. It may be set on + # initialization using the optional ``filename`` keyword argument. + filename = '<string>' + + _cooked = False + + if DEBUG_MODE or CACHE_DIRECTORY: + loader = _make_module_loader() + else: + loader = MemoryLoader() + + if DEBUG_MODE: + output_stream_factory = DebuggingOutputStream + else: + output_stream_factory = list + + debug = DEBUG_MODE + + # The ``builtins`` dictionary can be used by a template class to + # add symbols which may not be redefined and which are (cheaply) + # available in the template variable scope + builtins = {} + + # The ``builtins`` dictionary is updated with this dictionary at + # cook time. Note that it can be provided at class initialization + # using the ``extra_builtins`` keyword argument. + extra_builtins = {} + + # Expression engine must be provided by subclass + engine = None + + # When ``strict`` is set, expressions must be valid at compile + # time. When not set, this is only required at evaluation time. + strict = True + + def __init__(self, body=None, **config): + self.__dict__.update(config) + + if body is not None: + self.write(body) + + # This is only necessary if the ``debug`` flag was passed as a + # keyword argument + if self.__dict__.get('debug') is True: + self.loader = _make_module_loader() + + def __call__(self, **kwargs): + return self.render(**kwargs) + + def __repr__(self): + return "<%s %s>" % (self.__class__.__name__, self.filename) + + @property + def keep_body(self): + # By default, we only save the template body if we're + # in debugging mode (to save memory). + return self.__dict__.get('keep_body', DEBUG_MODE) + + @property + def keep_source(self): + # By default, we only save the generated source code if we're + # in debugging mode (to save memory). + return self.__dict__.get('keep_source', DEBUG_MODE) + + def cook(self, body): + builtins_dict = self.builtins.copy() + builtins_dict.update(self.extra_builtins) + names, builtins = zip(*sorted(builtins_dict.items())) + digest = self.digest(body, names) + program = self._cook(body, digest, names) + + initialize = program['initialize'] + functions = initialize(*builtins) + + for name, function in functions.items(): + setattr(self, "_" + name, function) + + self._cooked = True + + if self.keep_body: + self.body = body + + def cook_check(self): + assert self._cooked + + def parse(self, body): + raise NotImplementedError("Must be implemented by subclass.") + + def render(self, **__kw): + econtext = Scope(__kw) + rcontext = {} + self.cook_check() + stream = self.output_stream_factory() + try: + self._render(stream, econtext, rcontext) + except: + cls, exc, tb = sys.exc_info() + errors = rcontext.get('__error__') + if errors: + formatter = exc.__str__ + if isinstance(formatter, ExceptionFormatter): + if errors is not formatter._errors: + formatter._errors.extend(errors) + raise + + formatter = ExceptionFormatter(errors, econtext, rcontext) + + try: + exc = create_formatted_exception( + exc, cls, formatter, RenderError + ) + except TypeError: + pass + + raise_with_traceback(exc, tb) + + raise + + return join(stream) + + def write(self, body): + if isinstance(body, byte_string): + body, encoding, content_type = read_bytes( + body, self.default_encoding + ) + else: + content_type = body.startswith('<?xml') + encoding = None + + self.content_type = content_type + self.content_encoding = encoding + + self.cook(body) + + def _get_module_name(self, name): + return "%s.py" % name + + def _cook(self, body, name, builtins): + filename = self._get_module_name(name) + cooked = self.loader.get(filename) + if cooked is None: + try: + source = self._make(body, builtins) + if self.debug: + source = "# template: %s\n#\n%s" % ( + self.filename, source) + if self.keep_source: + self.source = source + cooked = self.loader.build(source, filename) + except TemplateError: + exc = sys.exc_info()[1] + exc.token.filename = self.filename + raise + elif self.keep_source: + module = sys.modules.get(cooked.get('__name__')) + if module is not None: + self.source = inspect.getsource(module) + else: + self.source = None + return cooked + + def digest(self, body, names): + class_name = type(self).__name__.encode('utf-8') + sha = pkg_digest.copy() + sha.update(body.encode('utf-8', 'ignore')) + sha.update(class_name) + return sha.hexdigest() + + def _compile(self, program, builtins): + compiler = Compiler(self.engine, program, builtins, strict=self.strict) + return compiler.code + + def _make(self, body, builtins): + program = self.parse(body) + module = Module("initialize", program) + return self._compile(module, builtins) + + +class BaseTemplateFile(BaseTemplate): + """File-based template base class. + + Relative path names are supported only when a template loader is + provided as the ``loader`` parameter. + """ + + # Auto reload is not enabled by default because it's a significant + # performance hit + auto_reload = AUTO_RELOAD + + def __init__(self, filename, auto_reload=None, **config): + # Normalize filename + filename = os.path.abspath( + os.path.normpath(os.path.expanduser(filename)) + ) + + self.filename = filename + + # Override reload setting only if value is provided explicitly + if auto_reload is not None: + self.auto_reload = auto_reload + + super(BaseTemplateFile, self).__init__(**config) + + if EAGER_PARSING: + self.cook_check() + + def cook_check(self): + if self.auto_reload: + mtime = self.mtime() + + if mtime != self._v_last_read: + self._v_last_read = mtime + self._cooked = False + + if self._cooked is False: + body = self.read() + log.debug("cooking %r (%d bytes)..." % (self.filename, len(body))) + self.cook(body) + + def mtime(self): + try: + return os.path.getmtime(self.filename) + except (IOError, OSError): + return 0 + + def read(self): + with open(self.filename, "rb") as f: + data = f.read() + + body, encoding, content_type = read_bytes( + data, self.default_encoding + ) + + # In non-XML mode, we support various platform-specific line + # endings and convert them to the UNIX newline character + if content_type != "text/xml" and '\r' in body: + body = body.replace('\r\n', '\n').replace('\r', '\n') + + self.content_type = content_type + self.content_encoding = encoding + + return body + + def _get_module_name(self, name): + filename = os.path.basename(self.filename) + mangled = mangle(filename) + return "%s_%s.py" % (mangled, name) + + def _get_filename(self): + return self.__dict__.get('filename') + + def _set_filename(self, filename): + self.__dict__['filename'] = filename + self._v_last_read = None + self._cooked = False + + filename = property(_get_filename, _set_filename) diff --git a/lib/Chameleon-2.22/src/chameleon/tests/__init__.py b/lib/Chameleon-2.22/src/chameleon/tests/__init__.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/__init__.py @@ -0,0 +1,1 @@ +# diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/001-interpolation.txt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/001-interpolation.txt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/001-interpolation.txt @@ -0,0 +1,1 @@ +${'<Hello world>'}<&> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/001-interpolation.txt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/001-interpolation.txt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/001-interpolation.txt.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + + # <Expression u"'<Hello world>'" (1:2)> -> _content_139955154988272 + try: + _content_139955154988272 = '<Hello world>' + except: + rcontext.setdefault('__error__', []).append((u"'<Hello world>'", 1, 2, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is not None): + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = str(_content_139955154988272) + else: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except AttributeError: + _content_139955154988272 = convert(_content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272() + _content_139955154988272 = ('%s%s' % ((_content_139955154988272 if (_content_139955154988272 is not None) else ''), (u'<&>\n' if (u'<&>\n' is not None) else ''), )) + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/001-variable-scope.html b/lib/Chameleon-2.22/src/chameleon/tests/inputs/001-variable-scope.html new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/001-variable-scope.html @@ -0,0 +1,7 @@ +<html> + <body py:with="text 'Hello world!'"> + ${text} + $text + </body> + ${text | 'Goodbye world!'} +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/001-variable-scope.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/001-variable-scope.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/001-variable-scope.pt @@ -0,0 +1,11 @@ +<html> + <body tal:define="text 'Hello world!'"> + ${text} + </body> + <tal:check condition="exists: text"> + bad + </tal:check> + <tal:check condition="not: exists: text"> + ok + </tal:check> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/001-variable-scope.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/001-variable-scope.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/001-variable-scope.pt.py @@ -0,0 +1,207 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_35706576 = {} +_static_35782288 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_35341232 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x221fe90> name=None at 221fb50> -> _value + _value = _static_35782288 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_text_35704976 = get('text', _marker) + + # <Expression u"'Hello world!'" (2:25)> -> _value + try: + _value = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 2, 25, '<string>', _sys.exc_info()[1], )) + raise + + econtext['text'] = _value + _backup_attrs_35343320 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x220d6d0> name=None at 221f410> -> _value + _value = _static_35706576 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + + # <Expression u'text' (3:6)> -> _content_139955154988272 + try: + _content_139955154988272 = getitem('text') + except: + rcontext.setdefault('__error__', []).append((u'text', 3, 6, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + + # <Expression u'text' (4:5)> -> _content_139955154988272_65 + try: + _content_139955154988272_65 = getitem('text') + except: + rcontext.setdefault('__error__', []).append((u'text', 4, 5, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272_65 is None): + pass + else: + if (_content_139955154988272_65 is False): + _content_139955154988272_65 = None + else: + _tt = type(_content_139955154988272_65) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272_65 = unicode(_content_139955154988272_65) + else: + try: + if (_tt is str): + _content_139955154988272_65 = decode(_content_139955154988272_65) + else: + if (_tt is not unicode): + try: + _content_139955154988272_65 = _content_139955154988272_65.__html__ + except: + _content_139955154988272_65 = convert(_content_139955154988272_65) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272_65 = _content_139955154988272_65() + else: + if ((_content_139955154988272_65 is not None) and (re_needs_escape(_content_139955154988272_65) is not None)): + if ('&' in _content_139955154988272_65): + if (';' in _content_139955154988272_65): + _content_139955154988272_65 = re_amp.sub('&', _content_139955154988272_65) + else: + _content_139955154988272_65 = _content_139955154988272_65.replace('&', '&') + if ('<' in _content_139955154988272_65): + _content_139955154988272_65 = _content_139955154988272_65.replace('<', '<') + if ('>' in _content_139955154988272_65): + _content_139955154988272_65 = _content_139955154988272_65.replace('>', '>') + if ('\x00' in _content_139955154988272_65): + _content_139955154988272_65 = _content_139955154988272_65.replace('\x00', '"') + _content_139955154988272 = ('%s%s%s%s%s' % ((u'\n ' if (u'\n ' is not None) else ''), (_content_139955154988272 if (_content_139955154988272 is not None) else ''), (u'\n ' if (u'\n ' is not None) else ''), (_content_139955154988272_65 if (_content_139955154988272_65 is not None) else ''), (u'\n ' if (u'\n ' is not None) else ''), )) + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_35343320 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35343320 + if (_backup_text_35704976 is _marker): + del econtext['text'] + else: + econtext['text'] = _backup_text_35704976 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + + # <Expression u'exists: text' (6:24)> -> _condition + try: + try: + _ignore = getitem('text') + except (AttributeError, LookupError, TypeError, NameError, KeyError, ): + _condition = 0 + else: + _condition = 1 + except: + rcontext.setdefault('__error__', []).append((u'exists: text', 6, 24, '<string>', _sys.exc_info()[1], )) + raise + + if _condition: + _content_139955154988272 = u'\n bad\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + + # <Expression u'not: exists: text' (9:24)> -> _condition + try: + try: + _ignore = getitem('text') + except (AttributeError, LookupError, TypeError, NameError, KeyError, ): + _condition = 0 + else: + _condition = 1 + _condition = not _condition + except: + rcontext.setdefault('__error__', []).append((u'not: exists: text', 9, 24, '<string>', _sys.exc_info()[1], )) + raise + + if _condition: + _content_139955154988272 = u'\n ok\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_35341232 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35341232 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/001.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/001.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/001.xml @@ -0,0 +1,4 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/002-repeat-scope.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/002-repeat-scope.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/002-repeat-scope.pt @@ -0,0 +1,8 @@ +<html> + <body> + <div tal:repeat="text ('Hello', 'Goodbye')"> + <span tal:repeat="char ('!', '.')">${text}${char}</span> + </div> + <tal:check condition="not: exists: text">ok</tal:check> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/002-repeat-scope.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/002-repeat-scope.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/002-repeat-scope.pt.py @@ -0,0 +1,255 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_35799760 = {} +_static_35781456 = {} +_static_35800848 = {} +_static_35801104 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_35876448 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2224710> name=None at 220dc10> -> _value + _value = _static_35800848 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35899944 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22242d0> name=None at 2224190> -> _value + _value = _static_35799760 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_text_35782352 = get('text', _marker) + + # <Expression u"('Hello', 'Goodbye')" (3:26)> -> _iterator + try: + _iterator = ('Hello', 'Goodbye', ) + except: + rcontext.setdefault('__error__', []).append((u"('Hello', 'Goodbye')", 3, 26, '<string>', _sys.exc_info()[1], )) + raise + + (_iterator, __index_35781840, ) = getitem('repeat')(u'text', _iterator) + econtext['text'] = None + for _item in _iterator: + econtext['text'] = _item + _backup_attrs_35900664 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2224810> name=None at 2224a10> -> _value + _value = _static_35801104 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_char_35801040 = get('char', _marker) + + # <Expression u"('!', '.')" (4:29)> -> _iterator + try: + _iterator = ('!', '.', ) + except: + rcontext.setdefault('__error__', []).append((u"('!', '.')", 4, 29, '<string>', _sys.exc_info()[1], )) + raise + + (_iterator, __index_35780944, ) = getitem('repeat')(u'char', _iterator) + econtext['char'] = None + for _item in _iterator: + econtext['char'] = _item + _backup_attrs_35899800 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x221fb50> name=None at 221fbd0> -> _value + _value = _static_35781456 + econtext['attrs'] = _value + + # <span ... (4:6) + # -------------------------------------------------------- + append(u'<span>') + + # <Expression u'text' (4:43)> -> _content_139955154988272 + try: + _content_139955154988272 = getitem('text') + except: + rcontext.setdefault('__error__', []).append((u'text', 4, 43, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + + # <Expression u'char' (4:50)> -> _content_139955154988272_113 + try: + _content_139955154988272_113 = getitem('char') + except: + rcontext.setdefault('__error__', []).append((u'char', 4, 50, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272_113 is None): + pass + else: + if (_content_139955154988272_113 is False): + _content_139955154988272_113 = None + else: + _tt = type(_content_139955154988272_113) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272_113 = unicode(_content_139955154988272_113) + else: + try: + if (_tt is str): + _content_139955154988272_113 = decode(_content_139955154988272_113) + else: + if (_tt is not unicode): + try: + _content_139955154988272_113 = _content_139955154988272_113.__html__ + except: + _content_139955154988272_113 = convert(_content_139955154988272_113) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272_113 = _content_139955154988272_113() + else: + if ((_content_139955154988272_113 is not None) and (re_needs_escape(_content_139955154988272_113) is not None)): + if ('&' in _content_139955154988272_113): + if (';' in _content_139955154988272_113): + _content_139955154988272_113 = re_amp.sub('&', _content_139955154988272_113) + else: + _content_139955154988272_113 = _content_139955154988272_113.replace('&', '&') + if ('<' in _content_139955154988272_113): + _content_139955154988272_113 = _content_139955154988272_113.replace('<', '<') + if ('>' in _content_139955154988272_113): + _content_139955154988272_113 = _content_139955154988272_113.replace('>', '>') + if ('\x00' in _content_139955154988272_113): + _content_139955154988272_113 = _content_139955154988272_113.replace('\x00', '"') + _content_139955154988272 = ('%s%s' % ((_content_139955154988272 if (_content_139955154988272 is not None) else ''), (_content_139955154988272_113 if (_content_139955154988272_113 is not None) else ''), )) + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</span>') + if (_backup_attrs_35899800 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35899800 + __index_35780944 -= 1 + if (__index_35780944 > 0): + append('\n ') + if (_backup_char_35801040 is _marker): + del econtext['char'] + else: + econtext['char'] = _backup_char_35801040 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_35900664 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35900664 + __index_35781840 -= 1 + if (__index_35781840 > 0): + append('\n ') + if (_backup_text_35782352 is _marker): + del econtext['text'] + else: + econtext['text'] = _backup_text_35782352 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + + # <Expression u'not: exists: text' (6:26)> -> _condition + try: + try: + _ignore = getitem('text') + except (AttributeError, LookupError, TypeError, NameError, KeyError, ): + _condition = 0 + else: + _condition = 1 + _condition = not _condition + except: + rcontext.setdefault('__error__', []).append((u'not: exists: text', 6, 26, '<string>', _sys.exc_info()[1], )) + raise + + if _condition: + _content_139955154988272 = u'ok' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_35899944 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35899944 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_35876448 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35876448 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/002.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/002.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/002.xml @@ -0,0 +1,4 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc ></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/003-content.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/003-content.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/003-content.pt @@ -0,0 +1,17 @@ +<html> + <body> + <div tal:content="'Hello world!'" /> + <div tal:content="'Hello world!'" />1 + 2<div tal:content="'Hello world!'" /> + <div tal:content="'Hello world!'" />3 + <div tal:content="'Hello world!'">4</div>5 + 6<div tal:content="'Hello world!'"></div> + <div tal:content="1" /> + <div tal:content="1.0" /> + <div tal:content="True" /> + <div tal:content="False" /> + <div tal:content="0" /> + <div tal:content="None" /> + <div tal:replace="content" /> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/003-content.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/003-content.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/003-content.pt.py @@ -0,0 +1,1158 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import Placeholder as _Placeholder +import sys as _sys +pass +_static_35889296 = {} +_static_36669328 = {} +_static_35932752 = {} +_static_35780752 = {} +_static_35829264 = {} +_static_35830160 = {} +_static_36668752 = {} +_static_35800464 = {} +_static_35829776 = {} +_static_36669776 = {} +_static_35831056 = {} +_static_35932624 = {} +_static_35883024 = {} +_static_35889872 = {} +_static_35882896 = {} +_marker_default = _Placeholder() +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_36729128 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2238790> name=None at 2238590> -> _value + _value = _static_35882896 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36718280 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2244a50> name=None at 2244990> -> _value + _value = _static_35932752 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36722808 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22449d0> name=None at 2244d50> -> _value + _value = _static_35932624 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_36723816 = get('default', _marker) + + # <Marker name='default' at 2244b90> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello world!'" (3:22)> -> _cache_35933712 + try: + _cache_35933712 = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 3, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello world!'" (3:22)> value=<Marker name='default' at 2244e50> at 2244d10> -> _condition + _expression = _cache_35933712 + + # <Marker name='default' at 2244e50> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_35933712 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_36723816 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36723816 + append(u'</div>') + if (_backup_attrs_36722808 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36722808 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36725608 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x222b810> name=None at 222b050> -> _value + _value = _static_35829776 + econtext['attrs'] = _value + + # <div ... (4:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_36723960 = get('default', _marker) + + # <Marker name='default' at 2244e90> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello world!'" (4:22)> -> _cache_35932816 + try: + _cache_35932816 = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 4, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello world!'" (4:22)> value=<Marker name='default' at 2244650> at 2244850> -> _condition + _expression = _cache_35932816 + + # <Marker name='default' at 2244650> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_35932816 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_36723960 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36723960 + append(u'</div>') + if (_backup_attrs_36725608 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36725608 + _content_139955154988272 = u'1\n 2' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36728632 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x222b610> name=None at 222b6d0> -> _value + _value = _static_35829264 + econtext['attrs'] = _value + + # <div ... (5:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_36726760 = get('default', _marker) + + # <Marker name='default' at 222b510> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello world!'" (5:22)> -> _cache_35828624 + try: + _cache_35828624 = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 5, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello world!'" (5:22)> value=<Marker name='default' at 222b0d0> at 222b190> -> _condition + _expression = _cache_35828624 + + # <Marker name='default' at 222b0d0> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_35828624 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_36726760 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36726760 + append(u'</div>') + if (_backup_attrs_36728632 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36728632 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35918048 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x222b990> name=None at 222b110> -> _value + _value = _static_35830160 + econtext['attrs'] = _value + + # <div ... (6:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_35918624 = get('default', _marker) + + # <Marker name='default' at 222bfd0> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello world!'" (6:22)> -> _cache_35828432 + try: + _cache_35828432 = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 6, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello world!'" (6:22)> value=<Marker name='default' at 222b090> at 222bad0> -> _condition + _expression = _cache_35828432 + + # <Marker name='default' at 222b090> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_35828432 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_35918624 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35918624 + append(u'</div>') + if (_backup_attrs_35918048 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35918048 + _content_139955154988272 = u'3\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36622920 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x222bd10> name=None at 222b750> -> _value + _value = _static_35831056 + econtext['attrs'] = _value + + # <div ... (7:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_35917904 = get('default', _marker) + + # <Marker name='default' at 222b890> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello world!'" (7:22)> -> _cache_35830544 + try: + _cache_35830544 = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 7, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello world!'" (7:22)> value=<Marker name='default' at 222b910> at 222ba10> -> _condition + _expression = _cache_35830544 + + # <Marker name='default' at 222b910> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _content_139955154988272 = u'4' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + else: + _content = _cache_35830544 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_35917904 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35917904 + append(u'</div>') + if (_backup_attrs_36622920 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36622920 + _content_139955154988272 = u'5\n 6' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36625656 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2224590> name=None at 22243d0> -> _value + _value = _static_35800464 + econtext['attrs'] = _value + + # <div ... (8:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_36625512 = get('default', _marker) + + # <Marker name='default' at 22242d0> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello world!'" (8:22)> -> _cache_35799696 + try: + _cache_35799696 = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 8, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello world!'" (8:22)> value=<Marker name='default' at 2224150> at 2224fd0> -> _condition + _expression = _cache_35799696 + + # <Marker name='default' at 2224150> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_35799696 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_36625512 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36625512 + append(u'</div>') + if (_backup_attrs_36625656 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36625656 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36629968 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8550> name=None at 22f82d0> -> _value + _value = _static_36668752 + econtext['attrs'] = _value + + # <div ... (9:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_36638016 = get('default', _marker) + + # <Marker name='default' at 2224dd0> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'1' (9:22)> -> _cache_35801168 + try: + _cache_35801168 = 1 + except: + rcontext.setdefault('__error__', []).append((u'1', 9, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'1' (9:22)> value=<Marker name='default' at 22249d0> at 2224990> -> _condition + _expression = _cache_35801168 + + # <Marker name='default' at 22249d0> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_35801168 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_36638016 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36638016 + append(u'</div>') + if (_backup_attrs_36629968 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36629968 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36645704 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8790> name=None at 22f8750> -> _value + _value = _static_36669328 + econtext['attrs'] = _value + + # <div ... (10:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_36644696 = get('default', _marker) + + # <Marker name='default' at 22f8350> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'1.0' (10:22)> -> _cache_36667600 + try: + _cache_36667600 = 1.0 + except: + rcontext.setdefault('__error__', []).append((u'1.0', 10, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'1.0' (10:22)> value=<Marker name='default' at 22f8150> at 22f8490> -> _condition + _expression = _cache_36667600 + + # <Marker name='default' at 22f8150> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_36667600 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_36644696 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36644696 + append(u'</div>') + if (_backup_attrs_36645704 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36645704 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35900664 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8950> name=None at 22f8890> -> _value + _value = _static_36669776 + econtext['attrs'] = _value + + # <div ... (11:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_35898288 = get('default', _marker) + + # <Marker name='default' at 22f8bd0> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'True' (11:22)> -> _cache_36671120 + try: + _cache_36671120 = True + except: + rcontext.setdefault('__error__', []).append((u'True', 11, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'True' (11:22)> value=<Marker name='default' at 22f8f10> at 22f8f50> -> _condition + _expression = _cache_36671120 + + # <Marker name='default' at 22f8f10> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_36671120 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_35898288 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35898288 + append(u'</div>') + if (_backup_attrs_35900664 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35900664 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36634424 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2238810> name=None at 2238290> -> _value + _value = _static_35883024 + econtext['attrs'] = _value + + # <div ... (12:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_35900232 = get('default', _marker) + + # <Marker name='default' at 22f84d0> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'False' (12:22)> -> _cache_36668816 + try: + _cache_36668816 = False + except: + rcontext.setdefault('__error__', []).append((u'False', 12, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'False' (12:22)> value=<Marker name='default' at 22f8c90> at 22f8c50> -> _condition + _expression = _cache_36668816 + + # <Marker name='default' at 22f8c90> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_36668816 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_35900232 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35900232 + append(u'</div>') + if (_backup_attrs_36634424 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36634424 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35898792 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x221f890> name=None at 221ff90> -> _value + _value = _static_35780752 + econtext['attrs'] = _value + + # <div ... (13:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_36721512 = get('default', _marker) + + # <Marker name='default' at 221fcd0> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'0' (13:22)> -> _cache_35708496 + try: + _cache_35708496 = 0 + except: + rcontext.setdefault('__error__', []).append((u'0', 13, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'0' (13:22)> value=<Marker name='default' at 220ddd0> at 220dd50> -> _condition + _expression = _cache_35708496 + + # <Marker name='default' at 220ddd0> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_35708496 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_36721512 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36721512 + append(u'</div>') + if (_backup_attrs_35898792 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35898792 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35898504 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x223a090> name=None at 223a050> -> _value + _value = _static_35889296 + econtext['attrs'] = _value + + # <div ... (14:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_35897784 = get('default', _marker) + + # <Marker name='default' at 221f610> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'None' (14:22)> -> _cache_35781136 + try: + _cache_35781136 = None + except: + rcontext.setdefault('__error__', []).append((u'None', 14, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'None' (14:22)> value=<Marker name='default' at 221ff10> at 221f490> -> _condition + _expression = _cache_35781136 + + # <Marker name='default' at 221ff10> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_35781136 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_35897784 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35897784 + append(u'</div>') + if (_backup_attrs_35898504 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35898504 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_35899440 = get('default', _marker) + + # <Marker name='default' at 223a590> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'content' (15:22)> -> _cache_35890192 + try: + _cache_35890192 = getitem('content') + except: + rcontext.setdefault('__error__', []).append((u'content', 15, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'content' (15:22)> value=<Marker name='default' at 223a490> at 223a4d0> -> _condition + _expression = _cache_35890192 + + # <Marker name='default' at 223a490> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_35901024 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x223a2d0> name=None at 223a290> -> _value + _value = _static_35889872 + econtext['attrs'] = _value + + # <div ... (15:4) + # -------------------------------------------------------- + append(u'<div />') + if (_backup_attrs_35901024 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35901024 + else: + _content = _cache_35890192 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_35899440 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35899440 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_36718280 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36718280 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_36729128 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36729128 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/003.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/003.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/003.xml @@ -0,0 +1,4 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc></doc > diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/004-attributes.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/004-attributes.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/004-attributes.pt @@ -0,0 +1,24 @@ +<html> + <body> + <span tal:attributes="class 'hello'" /> + <span class="goodbye" tal:attributes="class 'hello'" /> + <span CLASS="goodbye" tal:attributes="class 'hello'" /> + <span tal:attributes="class None" /> + <span a="1" b="2" c="3" tal:attributes="a None" /> + <span a="1" b="2" c="3" tal:attributes="b None" /> + <span a="1" b="2" c="3" tal:attributes="c None" /> + <span a="1" b="2" c="3" tal:attributes="b None; c None" /> + <span a="1" b="2" c="3" tal:attributes="b string:;;" /> + <span a="1" b="2" c="3" tal:attributes="b string:&" /> + <span class="hello" tal:attributes="class 'goodbye'" /> + <span class="hello" tal:attributes="class '"goodbye"'" /> + <span class="hello" tal:attributes="class '\'goodbye\''" /> + <span class='hello' tal:attributes="class '\'goodbye\''" /> + <span tal:attributes="{'class': 'goodbye'}" /> + <span class="hello" tal:attributes="{'class': 'goodbye'}" /> + <span a="1" class="hello" tal:attributes="{'class': 'goodbye'}" /> + <span tal:attributes="{'class': '"goodbye"'}" /> + <span tal:attributes="class 'hello'; {'class': '"goodbye"'}" /> + <span tal:attributes="{'class': '"goodbye"'}; class 'hello'" /> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/004-attributes.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/004-attributes.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/004-attributes.pt.py @@ -0,0 +1,789 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_36671184 = {u'a': u'1', u'c': u'3', u'b': u'2', } +_static_35828944 = {} +_static_35829584 = {u'class': u'None', } +_static_35889552 = {u'a': u'1', u'c': u'3', u'b': u'2', } +_static_35801168 = {u'a': u'1', u'c': u'3', u'b': u'2', } +_static_35707920 = {u'a': u'1', u'c': u'3', u'b': u'2', } +_static_35800848 = {u'a': u'1', u'c': u'3', u'b': u'2', } +_static_35830736 = {u'class': u"'hello'", } +_static_35890768 = {u'class': u'hello', } +_static_36668176 = {u'a': u'1', u'c': u'3', u'b': u'2', } +_static_35828624 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_39099424 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x222b4d0> name=None at 222b850> -> _value + _value = _static_35828944 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36624936 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x222b390> name=None at 222b250> -> _value + _value = _static_35828624 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36627376 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x222bbd0> name=None at 222b990> -> _value + _value = _static_35830736 + econtext['attrs'] = _value + + # <span ... (3:4) + # -------------------------------------------------------- + append(u'<span') + _backup_default_36629176 = get('default', _marker) + _value = None + econtext['default'] = _value + + # <Expression u"'hello'" (3:32)> -> _attr_class + try: + _attr_class = 'hello' + except: + rcontext.setdefault('__error__', []).append((u"'hello'", 3, 32, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_class is None): + pass + else: + if (_attr_class is False): + _attr_class = None + else: + _tt = type(_attr_class) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_class = unicode(_attr_class) + else: + try: + if (_tt is str): + _attr_class = decode(_attr_class) + else: + if (_tt is not unicode): + try: + _attr_class = _attr_class.__html__ + except: + _attr_class = convert(_attr_class) + else: + raise RuntimeError + except RuntimeError: + _attr_class = _attr_class() + else: + if ((_attr_class is not None) and (re_needs_escape(_attr_class) is not None)): + if ('&' in _attr_class): + if (';' in _attr_class): + _attr_class = re_amp.sub('&', _attr_class) + else: + _attr_class = _attr_class.replace('&', '&') + if ('<' in _attr_class): + _attr_class = _attr_class.replace('<', '<') + if ('>' in _attr_class): + _attr_class = _attr_class.replace('>', '>') + if ('"' in _attr_class): + _attr_class = _attr_class.replace('"', '"') + if (_attr_class is not None): + append((u' class="%s"' % _attr_class)) + if (_backup_default_36629176 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36629176 + append(u' />') + if (_backup_attrs_36627376 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36627376 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36724032 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x222b750> name=None at 222bf90> -> _value + _value = _static_35829584 + econtext['attrs'] = _value + + # <span ... (4:4) + # -------------------------------------------------------- + append(u'<span') + _backup_default_36724464 = get('default', _marker) + _value = None + econtext['default'] = _value + + # <Expression u'None' (4:32)> -> _attr_class + try: + _attr_class = None + except: + rcontext.setdefault('__error__', []).append((u'None', 4, 32, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_class is None): + pass + else: + if (_attr_class is False): + _attr_class = None + else: + _tt = type(_attr_class) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_class = unicode(_attr_class) + else: + try: + if (_tt is str): + _attr_class = decode(_attr_class) + else: + if (_tt is not unicode): + try: + _attr_class = _attr_class.__html__ + except: + _attr_class = convert(_attr_class) + else: + raise RuntimeError + except RuntimeError: + _attr_class = _attr_class() + else: + if ((_attr_class is not None) and (re_needs_escape(_attr_class) is not None)): + if ('&' in _attr_class): + if (';' in _attr_class): + _attr_class = re_amp.sub('&', _attr_class) + else: + _attr_class = _attr_class.replace('&', '&') + if ('<' in _attr_class): + _attr_class = _attr_class.replace('<', '<') + if ('>' in _attr_class): + _attr_class = _attr_class.replace('>', '>') + if ('"' in _attr_class): + _attr_class = _attr_class.replace('"', '"') + if (_attr_class is not None): + append((u' class="%s"' % _attr_class)) + if (_backup_default_36724464 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36724464 + append(u' />') + if (_backup_attrs_36724032 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36724032 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36721872 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2224710> name=None at 2224090> -> _value + _value = _static_35800848 + econtext['attrs'] = _value + + # <span ... (5:4) + # -------------------------------------------------------- + append(u'<span') + _backup_default_36721584 = get('default', _marker) + _value = u'1' + econtext['default'] = _value + + # <Expression u'None' (5:46)> -> _attr_a + try: + _attr_a = None + except: + rcontext.setdefault('__error__', []).append((u'None', 5, 46, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_a is None): + pass + else: + if (_attr_a is False): + _attr_a = None + else: + _tt = type(_attr_a) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_a = unicode(_attr_a) + else: + try: + if (_tt is str): + _attr_a = decode(_attr_a) + else: + if (_tt is not unicode): + try: + _attr_a = _attr_a.__html__ + except: + _attr_a = convert(_attr_a) + else: + raise RuntimeError + except RuntimeError: + _attr_a = _attr_a() + else: + if ((_attr_a is not None) and (re_needs_escape(_attr_a) is not None)): + if ('&' in _attr_a): + if (';' in _attr_a): + _attr_a = re_amp.sub('&', _attr_a) + else: + _attr_a = _attr_a.replace('&', '&') + if ('<' in _attr_a): + _attr_a = _attr_a.replace('<', '<') + if ('>' in _attr_a): + _attr_a = _attr_a.replace('>', '>') + if (u'"' in _attr_a): + _attr_a = _attr_a.replace(u'"', '"') + if (_attr_a is not None): + append((u' a="%s"' % _attr_a)) + if (_backup_default_36721584 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36721584 + _attr_b = u'2' + if (_attr_b is not None): + append((u' b="%s"' % _attr_b)) + _attr_c = u'3' + if (_attr_c is not None): + append((u' c="%s"' % _attr_c)) + append(u' />') + if (_backup_attrs_36721872 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36721872 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36722376 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2224850> name=None at 2224050> -> _value + _value = _static_35801168 + econtext['attrs'] = _value + + # <span ... (6:4) + # -------------------------------------------------------- + append(u'<span') + _attr_a = u'1' + if (_attr_a is not None): + append((u' a="%s"' % _attr_a)) + _backup_default_36722520 = get('default', _marker) + _value = u'2' + econtext['default'] = _value + + # <Expression u'None' (6:46)> -> _attr_b + try: + _attr_b = None + except: + rcontext.setdefault('__error__', []).append((u'None', 6, 46, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_b is None): + pass + else: + if (_attr_b is False): + _attr_b = None + else: + _tt = type(_attr_b) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_b = unicode(_attr_b) + else: + try: + if (_tt is str): + _attr_b = decode(_attr_b) + else: + if (_tt is not unicode): + try: + _attr_b = _attr_b.__html__ + except: + _attr_b = convert(_attr_b) + else: + raise RuntimeError + except RuntimeError: + _attr_b = _attr_b() + else: + if ((_attr_b is not None) and (re_needs_escape(_attr_b) is not None)): + if ('&' in _attr_b): + if (';' in _attr_b): + _attr_b = re_amp.sub('&', _attr_b) + else: + _attr_b = _attr_b.replace('&', '&') + if ('<' in _attr_b): + _attr_b = _attr_b.replace('<', '<') + if ('>' in _attr_b): + _attr_b = _attr_b.replace('>', '>') + if (u'"' in _attr_b): + _attr_b = _attr_b.replace(u'"', '"') + if (_attr_b is not None): + append((u' b="%s"' % _attr_b)) + if (_backup_default_36722520 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36722520 + _attr_c = u'3' + if (_attr_c is not None): + append((u' c="%s"' % _attr_c)) + append(u' />') + if (_backup_attrs_36722376 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36722376 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36723312 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8310> name=None at 22f8650> -> _value + _value = _static_36668176 + econtext['attrs'] = _value + + # <span ... (7:4) + # -------------------------------------------------------- + append(u'<span') + _attr_a = u'1' + if (_attr_a is not None): + append((u' a="%s"' % _attr_a)) + _attr_b = u'2' + if (_attr_b is not None): + append((u' b="%s"' % _attr_b)) + _backup_default_36723240 = get('default', _marker) + _value = u'3' + econtext['default'] = _value + + # <Expression u'None' (7:46)> -> _attr_c + try: + _attr_c = None + except: + rcontext.setdefault('__error__', []).append((u'None', 7, 46, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_c is None): + pass + else: + if (_attr_c is False): + _attr_c = None + else: + _tt = type(_attr_c) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_c = unicode(_attr_c) + else: + try: + if (_tt is str): + _attr_c = decode(_attr_c) + else: + if (_tt is not unicode): + try: + _attr_c = _attr_c.__html__ + except: + _attr_c = convert(_attr_c) + else: + raise RuntimeError + except RuntimeError: + _attr_c = _attr_c() + else: + if ((_attr_c is not None) and (re_needs_escape(_attr_c) is not None)): + if ('&' in _attr_c): + if (';' in _attr_c): + _attr_c = re_amp.sub('&', _attr_c) + else: + _attr_c = _attr_c.replace('&', '&') + if ('<' in _attr_c): + _attr_c = _attr_c.replace('<', '<') + if ('>' in _attr_c): + _attr_c = _attr_c.replace('>', '>') + if (u'"' in _attr_c): + _attr_c = _attr_c.replace(u'"', '"') + if (_attr_c is not None): + append((u' c="%s"' % _attr_c)) + if (_backup_default_36723240 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36723240 + append(u' />') + if (_backup_attrs_36723312 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36723312 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36724248 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8ed0> name=None at 22f8f90> -> _value + _value = _static_36671184 + econtext['attrs'] = _value + + # <span ... (8:4) + # -------------------------------------------------------- + append(u'<span') + _attr_a = u'1' + if (_attr_a is not None): + append((u' a="%s"' % _attr_a)) + _backup_default_36723384 = get('default', _marker) + _value = u'2' + econtext['default'] = _value + + # <Expression u'None' (8:46)> -> _attr_b + try: + _attr_b = None + except: + rcontext.setdefault('__error__', []).append((u'None', 8, 46, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_b is None): + pass + else: + if (_attr_b is False): + _attr_b = None + else: + _tt = type(_attr_b) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_b = unicode(_attr_b) + else: + try: + if (_tt is str): + _attr_b = decode(_attr_b) + else: + if (_tt is not unicode): + try: + _attr_b = _attr_b.__html__ + except: + _attr_b = convert(_attr_b) + else: + raise RuntimeError + except RuntimeError: + _attr_b = _attr_b() + else: + if ((_attr_b is not None) and (re_needs_escape(_attr_b) is not None)): + if ('&' in _attr_b): + if (';' in _attr_b): + _attr_b = re_amp.sub('&', _attr_b) + else: + _attr_b = _attr_b.replace('&', '&') + if ('<' in _attr_b): + _attr_b = _attr_b.replace('<', '<') + if ('>' in _attr_b): + _attr_b = _attr_b.replace('>', '>') + if (u'"' in _attr_b): + _attr_b = _attr_b.replace(u'"', '"') + if (_attr_b is not None): + append((u' b="%s"' % _attr_b)) + if (_backup_default_36723384 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36723384 + _backup_default_36724680 = get('default', _marker) + _value = u'3' + econtext['default'] = _value + + # <Expression u'None' (8:53)> -> _attr_c + try: + _attr_c = None + except: + rcontext.setdefault('__error__', []).append((u'None', 8, 53, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_c is None): + pass + else: + if (_attr_c is False): + _attr_c = None + else: + _tt = type(_attr_c) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_c = unicode(_attr_c) + else: + try: + if (_tt is str): + _attr_c = decode(_attr_c) + else: + if (_tt is not unicode): + try: + _attr_c = _attr_c.__html__ + except: + _attr_c = convert(_attr_c) + else: + raise RuntimeError + except RuntimeError: + _attr_c = _attr_c() + else: + if ((_attr_c is not None) and (re_needs_escape(_attr_c) is not None)): + if ('&' in _attr_c): + if (';' in _attr_c): + _attr_c = re_amp.sub('&', _attr_c) + else: + _attr_c = _attr_c.replace('&', '&') + if ('<' in _attr_c): + _attr_c = _attr_c.replace('<', '<') + if ('>' in _attr_c): + _attr_c = _attr_c.replace('>', '>') + if (u'"' in _attr_c): + _attr_c = _attr_c.replace(u'"', '"') + if (_attr_c is not None): + append((u' c="%s"' % _attr_c)) + if (_backup_default_36724680 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36724680 + append(u' />') + if (_backup_attrs_36724248 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36724248 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38641744 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x220dc10> name=None at 220ddd0> -> _value + _value = _static_35707920 + econtext['attrs'] = _value + + # <span ... (9:4) + # -------------------------------------------------------- + append(u'<span') + _attr_a = u'1' + if (_attr_a is not None): + append((u' a="%s"' % _attr_a)) + _backup_default_38642032 = get('default', _marker) + _value = u'2' + econtext['default'] = _value + + # <Expression u'string:;' (9:46)> -> _attr_b + try: + _attr_b = u';' + except: + rcontext.setdefault('__error__', []).append((u'string:;', 9, 46, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_b is None): + pass + else: + if (_attr_b is False): + _attr_b = None + else: + _tt = type(_attr_b) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_b = unicode(_attr_b) + else: + try: + if (_tt is str): + _attr_b = decode(_attr_b) + else: + if (_tt is not unicode): + try: + _attr_b = _attr_b.__html__ + except: + _attr_b = convert(_attr_b) + else: + raise RuntimeError + except RuntimeError: + _attr_b = _attr_b() + else: + if ((_attr_b is not None) and (re_needs_escape(_attr_b) is not None)): + if ('&' in _attr_b): + if (';' in _attr_b): + _attr_b = re_amp.sub('&', _attr_b) + else: + _attr_b = _attr_b.replace('&', '&') + if ('<' in _attr_b): + _attr_b = _attr_b.replace('<', '<') + if ('>' in _attr_b): + _attr_b = _attr_b.replace('>', '>') + if (u'"' in _attr_b): + _attr_b = _attr_b.replace(u'"', '"') + if (_attr_b is not None): + append((u' b="%s"' % _attr_b)) + if (_backup_default_38642032 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38642032 + _attr_c = u'3' + if (_attr_c is not None): + append((u' c="%s"' % _attr_c)) + append(u' />') + if (_backup_attrs_38641744 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38641744 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36721224 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x223a190> name=None at 223a1d0> -> _value + _value = _static_35889552 + econtext['attrs'] = _value + + # <span ... (10:4) + # -------------------------------------------------------- + append(u'<span') + _attr_a = u'1' + if (_attr_a is not None): + append((u' a="%s"' % _attr_a)) + _backup_default_36721080 = get('default', _marker) + _value = u'2' + econtext['default'] = _value + + # <Expression u'string:&' (10:46)> -> _attr_b + try: + _attr_b = u'&' + except: + rcontext.setdefault('__error__', []).append((u'string:&', 10, 46, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_b is None): + pass + else: + if (_attr_b is False): + _attr_b = None + else: + _tt = type(_attr_b) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_b = unicode(_attr_b) + else: + try: + if (_tt is str): + _attr_b = decode(_attr_b) + else: + if (_tt is not unicode): + try: + _attr_b = _attr_b.__html__ + except: + _attr_b = convert(_attr_b) + else: + raise RuntimeError + except RuntimeError: + _attr_b = _attr_b() + else: + if ((_attr_b is not None) and (re_needs_escape(_attr_b) is not None)): + if ('&' in _attr_b): + if (';' in _attr_b): + _attr_b = re_amp.sub('&', _attr_b) + else: + _attr_b = _attr_b.replace('&', '&') + if ('<' in _attr_b): + _attr_b = _attr_b.replace('<', '<') + if ('>' in _attr_b): + _attr_b = _attr_b.replace('>', '>') + if (u'"' in _attr_b): + _attr_b = _attr_b.replace(u'"', '"') + if (_attr_b is not None): + append((u' b="%s"' % _attr_b)) + if (_backup_default_36721080 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36721080 + _attr_c = u'3' + if (_attr_c is not None): + append((u' c="%s"' % _attr_c)) + append(u' />') + if (_backup_attrs_36721224 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36721224 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38642176 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x223a650> name=None at 223a250> -> _value + _value = _static_35890768 + econtext['attrs'] = _value + + # <span ... (11:4) + # -------------------------------------------------------- + append(u'<span') + _backup_default_38642104 = get('default', _marker) + _value = u'hello' + econtext['default'] = _value + + # <Expression u"'goodbye'" (11:46)> -> _attr_class + try: + _attr_class = 'goodbye' + except: + rcontext.setdefault('__error__', []).append((u"'goodbye'", 11, 46, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_class is None): + pass + else: + if (_attr_class is False): + _attr_class = None + else: + _tt = type(_attr_class) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_class = unicode(_attr_class) + else: + try: + if (_tt is str): + _attr_class = decode(_attr_class) + else: + if (_tt is not unicode): + try: + _attr_class = _attr_class.__html__ + except: + _attr_class = convert(_attr_class) + else: + raise RuntimeError + except RuntimeError: + _attr_class = _attr_class() + else: + if ((_attr_class is not None) and (re_needs_escape(_attr_class) is not None)): + if ('&' in _attr_class): + if (';' in _attr_class): + _attr_class = re_amp.sub('&', _attr_class) + else: + _attr_class = _attr_class.replace('&', '&') + if ('<' in _attr_class): + _attr_class = _attr_class.replace('<', '<') + if ('>' in _attr_class): + _attr_class = _attr_class.replace('>', '>') + if (u'"' in _attr_class): + _attr_class = _attr_class.replace(u'"', '"') + if (_attr_class is not None): + append((u' class="%s"' % _attr_class)) + if (_backup_default_38642104 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38642104 + append(u' />') + if (_backup_attrs_38642176 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38642176 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_36624936 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36624936 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_39099424 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39099424 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/004.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/004.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/004.xml @@ -0,0 +1,5 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +<!ATTLIST doc a1 CDATA #IMPLIED> +]> +<doc a1="v1"></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/005-default.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/005-default.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/005-default.pt @@ -0,0 +1,12 @@ +<html> + <body> + <img class="default" tal:attributes="class default" /> + <img tal:attributes="class default" /> + <span tal:content="default">Default</span> + <span tal:content="True">Default</span> + <span tal:content="False">Default</span> + <span tal:content="default"> + <em>${'Computed default'}</em> + </span> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/005-default.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/005-default.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/005-default.pt.py @@ -0,0 +1,445 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import Placeholder as _Placeholder +import sys as _sys +pass +_static_35707984 = {} +_static_35803088 = {} +_static_36671184 = {} +_static_35779600 = {} +_static_36670416 = {u'class': u'default', } +_static_36668432 = {u'class': u'default', } +_static_35802384 = {} +_marker_default = _Placeholder() +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_38643400 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2224d10> name=None at 2224a10> -> _value + _value = _static_35802384 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38494936 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2224fd0> name=None at 2224290> -> _value + _value = _static_35803088 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38642320 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8410> name=None at 2224dd0> -> _value + _value = _static_36668432 + econtext['attrs'] = _value + + # <img ... (3:4) + # -------------------------------------------------------- + append(u'<img') + _backup_default_38494504 = get('default', _marker) + _value = u'default' + econtext['default'] = _value + + # <Expression u'default' (3:47)> -> _attr_class + try: + _attr_class = getitem('default') + except: + rcontext.setdefault('__error__', []).append((u'default', 3, 47, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_class is None): + pass + else: + if (_attr_class is False): + _attr_class = None + else: + _tt = type(_attr_class) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_class = unicode(_attr_class) + else: + try: + if (_tt is str): + _attr_class = decode(_attr_class) + else: + if (_tt is not unicode): + try: + _attr_class = _attr_class.__html__ + except: + _attr_class = convert(_attr_class) + else: + raise RuntimeError + except RuntimeError: + _attr_class = _attr_class() + else: + if ((_attr_class is not None) and (re_needs_escape(_attr_class) is not None)): + if ('&' in _attr_class): + if (';' in _attr_class): + _attr_class = re_amp.sub('&', _attr_class) + else: + _attr_class = _attr_class.replace('&', '&') + if ('<' in _attr_class): + _attr_class = _attr_class.replace('<', '<') + if ('>' in _attr_class): + _attr_class = _attr_class.replace('>', '>') + if (u'"' in _attr_class): + _attr_class = _attr_class.replace(u'"', '"') + if (_attr_class is not None): + append((u' class="%s"' % _attr_class)) + if (_backup_default_38494504 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38494504 + append(u' />') + if (_backup_attrs_38642320 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38642320 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38495800 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8bd0> name=None at 22f8810> -> _value + _value = _static_36670416 + econtext['attrs'] = _value + + # <img ... (4:4) + # -------------------------------------------------------- + append(u'<img') + _backup_default_38495728 = get('default', _marker) + _value = None + econtext['default'] = _value + + # <Expression u'default' (4:31)> -> _attr_class + try: + _attr_class = getitem('default') + except: + rcontext.setdefault('__error__', []).append((u'default', 4, 31, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_class is None): + pass + else: + if (_attr_class is False): + _attr_class = None + else: + _tt = type(_attr_class) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_class = unicode(_attr_class) + else: + try: + if (_tt is str): + _attr_class = decode(_attr_class) + else: + if (_tt is not unicode): + try: + _attr_class = _attr_class.__html__ + except: + _attr_class = convert(_attr_class) + else: + raise RuntimeError + except RuntimeError: + _attr_class = _attr_class() + else: + if ((_attr_class is not None) and (re_needs_escape(_attr_class) is not None)): + if ('&' in _attr_class): + if (';' in _attr_class): + _attr_class = re_amp.sub('&', _attr_class) + else: + _attr_class = _attr_class.replace('&', '&') + if ('<' in _attr_class): + _attr_class = _attr_class.replace('<', '<') + if ('>' in _attr_class): + _attr_class = _attr_class.replace('>', '>') + if ('"' in _attr_class): + _attr_class = _attr_class.replace('"', '"') + if (_attr_class is not None): + append((u' class="%s"' % _attr_class)) + if (_backup_default_38495728 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38495728 + append(u' />') + if (_backup_attrs_38495800 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38495800 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38571832 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8ed0> name=None at 22f8f90> -> _value + _value = _static_36671184 + econtext['attrs'] = _value + + # <span ... (5:4) + # -------------------------------------------------------- + append(u'<span>') + _backup_default_37019016 = get('default', _marker) + + # <Marker name='default' at 22f8890> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'default' (5:23)> -> _cache_36670032 + try: + _cache_36670032 = getitem('default') + except: + rcontext.setdefault('__error__', []).append((u'default', 5, 23, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'default' (5:23)> value=<Marker name='default' at 22f8990> at 22f89d0> -> _condition + _expression = _cache_36670032 + + # <Marker name='default' at 22f8990> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _content_139955154988272 = u'Default' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + else: + _content = _cache_36670032 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_37019016 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_37019016 + append(u'</span>') + if (_backup_attrs_38571832 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38571832 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38604240 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x220dc50> name=None at 22f8e90> -> _value + _value = _static_35707984 + econtext['attrs'] = _value + + # <span ... (6:4) + # -------------------------------------------------------- + append(u'<span>') + _backup_default_38601360 = get('default', _marker) + + # <Marker name='default' at 22f8b50> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'default' (6:23)> -> _cache_36670672 + try: + _cache_36670672 = getitem('default') + except: + rcontext.setdefault('__error__', []).append((u'default', 6, 23, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'default' (6:23)> value=<Marker name='default' at 22f88d0> at 22f8590> -> _condition + _expression = _cache_36670672 + + # <Marker name='default' at 22f88d0> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38632624 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x221f410> name=None at 221f490> -> _value + _value = _static_35779600 + econtext['attrs'] = _value + + # <em ... (7:6) + # -------------------------------------------------------- + append(u'<em>') + + # <Expression u"'Computed default'" (7:12)> -> _content_139955154988272 + try: + _content_139955154988272 = 'Computed default' + except: + rcontext.setdefault('__error__', []).append((u"'Computed default'", 7, 12, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = _content_139955154988272 + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</em>') + if (_backup_attrs_38632624 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38632624 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + else: + _content = _cache_36670672 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_38601360 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38601360 + append(u'</span>') + if (_backup_attrs_38604240 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38604240 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_38494936 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38494936 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_38643400 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38643400 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/005.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/005.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/005.xml @@ -0,0 +1,5 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +<!ATTLIST doc a1 CDATA #IMPLIED> +]> +<doc a1 = "v1"></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/006-attribute-interpolation.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/006-attribute-interpolation.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/006-attribute-interpolation.pt @@ -0,0 +1,9 @@ +<html> + <body class="ltr" tal:define="hash string:#"> + <img src="${'#'}" alt="copyright (c) ${2010}" /> + <img src="" alt="copyright (c) ${2010}" tal:attributes="src string:$hash" /> + <img src="" alt="copyright (c) ${2010}" tal:attributes="src string:${hash}" /> + <img src="${None}" alt="$ignored" /> + <img src="" alt="${'%stype \'str\'%s' % (chr(60), chr(62))}" /> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/006-attribute-interpolation.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/006-attribute-interpolation.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/006-attribute-interpolation.pt.py @@ -0,0 +1,272 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_36670736 = {u'src': u"${'#'}", u'alt': u'copyright (c) ${2010}', } +_static_36669072 = {u'class': u'ltr', } +_static_36669648 = {u'src': u'${None}', } +_static_36667984 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_36723672 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8250> name=None at 22f8550> -> _value + _value = _static_36667984 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36648360 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8690> name=None at 22f80d0> -> _value + _value = _static_36669072 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body') + _attr_class = u'ltr' + if (_attr_class is not None): + append((u' class="%s"' % _attr_class)) + append(u'>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36648216 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8d10> name=None at 22f8d90> -> _value + _value = _static_36670736 + econtext['attrs'] = _value + + # <img ... (3:4) + # -------------------------------------------------------- + append(u'<img') + _backup_default_36648432 = get('default', _marker) + _value = u"${'#'}" + econtext['default'] = _value + + # <Interpolation value=u"${'#'}" escape=True at 22f8a50> -> _attr_src + + # <Expression u"'#'" (3:16)> -> _attr_src + try: + _attr_src = '#' + except: + rcontext.setdefault('__error__', []).append((u"'#'", 3, 16, '<string>', _sys.exc_info()[1], )) + raise + + _attr_src = _attr_src + if (_attr_src is None): + pass + else: + if (_attr_src is False): + _attr_src = None + else: + _tt = type(_attr_src) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_src = unicode(_attr_src) + else: + try: + if (_tt is str): + _attr_src = decode(_attr_src) + else: + if (_tt is not unicode): + try: + _attr_src = _attr_src.__html__ + except: + _attr_src = convert(_attr_src) + else: + raise RuntimeError + except RuntimeError: + _attr_src = _attr_src() + else: + if ((_attr_src is not None) and (re_needs_escape(_attr_src) is not None)): + if ('&' in _attr_src): + if (';' in _attr_src): + _attr_src = re_amp.sub('&', _attr_src) + else: + _attr_src = _attr_src.replace('&', '&') + if ('<' in _attr_src): + _attr_src = _attr_src.replace('<', '<') + if ('>' in _attr_src): + _attr_src = _attr_src.replace('>', '>') + if (u'"' in _attr_src): + _attr_src = _attr_src.replace(u'"', '"') + if (_attr_src is not None): + append((u' src="%s"' % _attr_src)) + if (_backup_default_36648432 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36648432 + _backup_default_39035896 = get('default', _marker) + _value = u'copyright (c) ${2010}' + econtext['default'] = _value + + # <Interpolation value=u'copyright (c) ${2010}' escape=True at 22f8090> -> _attr_alt + + # <Expression u'2010' (3:43)> -> _attr_alt + try: + _attr_alt = 2010 + except: + rcontext.setdefault('__error__', []).append((u'2010', 3, 43, '<string>', _sys.exc_info()[1], )) + raise + + _attr_alt = ('%s%s' % ((u'copyright (c) ' if (u'copyright (c) ' is not None) else ''), (_attr_alt if (_attr_alt is not None) else ''), )) + if (_attr_alt is None): + pass + else: + if (_attr_alt is False): + _attr_alt = None + else: + _tt = type(_attr_alt) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_alt = unicode(_attr_alt) + else: + try: + if (_tt is str): + _attr_alt = decode(_attr_alt) + else: + if (_tt is not unicode): + try: + _attr_alt = _attr_alt.__html__ + except: + _attr_alt = convert(_attr_alt) + else: + raise RuntimeError + except RuntimeError: + _attr_alt = _attr_alt() + else: + if ((_attr_alt is not None) and (re_needs_escape(_attr_alt) is not None)): + if ('&' in _attr_alt): + if (';' in _attr_alt): + _attr_alt = re_amp.sub('&', _attr_alt) + else: + _attr_alt = _attr_alt.replace('&', '&') + if ('<' in _attr_alt): + _attr_alt = _attr_alt.replace('<', '<') + if ('>' in _attr_alt): + _attr_alt = _attr_alt.replace('>', '>') + if (u'"' in _attr_alt): + _attr_alt = _attr_alt.replace(u'"', '"') + if (_attr_alt is not None): + append((u' alt="%s"' % _attr_alt)) + if (_backup_default_39035896 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39035896 + append(u' />') + if (_backup_attrs_36648216 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36648216 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36650448 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f88d0> name=None at 22f8590> -> _value + _value = _static_36669648 + econtext['attrs'] = _value + + # <img ... (4:4) + # -------------------------------------------------------- + append(u'<img') + _backup_default_36650232 = get('default', _marker) + _value = u'${None}' + econtext['default'] = _value + + # <Interpolation value=u'${None}' escape=True at 221fb90> -> _attr_src + + # <Expression u'None' (4:16)> -> _attr_src + try: + _attr_src = None + except: + rcontext.setdefault('__error__', []).append((u'None', 4, 16, '<string>', _sys.exc_info()[1], )) + raise + + _attr_src = _attr_src + if (_attr_src is None): + pass + else: + if (_attr_src is False): + _attr_src = None + else: + _tt = type(_attr_src) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_src = unicode(_attr_src) + else: + try: + if (_tt is str): + _attr_src = decode(_attr_src) + else: + if (_tt is not unicode): + try: + _attr_src = _attr_src.__html__ + except: + _attr_src = convert(_attr_src) + else: + raise RuntimeError + except RuntimeError: + _attr_src = _attr_src() + else: + if ((_attr_src is not None) and (re_needs_escape(_attr_src) is not None)): + if ('&' in _attr_src): + if (';' in _attr_src): + _attr_src = re_amp.sub('&', _attr_src) + else: + _attr_src = _attr_src.replace('&', '&') + if ('<' in _attr_src): + _attr_src = _attr_src.replace('<', '<') + if ('>' in _attr_src): + _attr_src = _attr_src.replace('>', '>') + if (u'"' in _attr_src): + _attr_src = _attr_src.replace(u'"', '"') + if (_attr_src is not None): + append((u' src="%s"' % _attr_src)) + if (_backup_default_36650232 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36650232 + append(u' />') + if (_backup_attrs_36650448 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36650448 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_36648360 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36648360 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_36723672 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36723672 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/006.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/006.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/006.xml @@ -0,0 +1,5 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +<!ATTLIST doc a1 CDATA #IMPLIED> +]> +<doc a1='v1'></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/007-content-interpolation.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/007-content-interpolation.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/007-content-interpolation.pt @@ -0,0 +1,16 @@ +<html> + <body> + ${'Hello world!'} + ${literal} + ${structure: literal.s} + ${"%stype 'str'%s" % (chr(60), chr(62))} + && + ${None} + ${None or + 'Hello world'} + $leftalone + <div>${None}</div> + <div>${1 < 2 and 'Hello world' or None}</div> + <div>${} is ignored.</div> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/007-content-interpolation.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/007-content-interpolation.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/007-content-interpolation.pt.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_36670096 = {} +_static_35829392 = {} +_static_35708624 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_37246736 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x220ded0> name=None at 221fb90> -> _value + _value = _static_35708624 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38435512 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x222b690> name=None at 222bb50> -> _value + _value = _static_35829392 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + + # <Expression u"'Hello world!'" (3:6)> -> _content_139955154988272 + try: + _content_139955154988272 = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 3, 6, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + + # <Expression u'literal' (4:6)> -> _content_139955154988272_42 + try: + _content_139955154988272_42 = getitem('literal') + except: + rcontext.setdefault('__error__', []).append((u'literal', 4, 6, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272_42 is None): + pass + else: + if (_content_139955154988272_42 is False): + _content_139955154988272_42 = None + else: + _tt = type(_content_139955154988272_42) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272_42 = unicode(_content_139955154988272_42) + else: + try: + if (_tt is str): + _content_139955154988272_42 = decode(_content_139955154988272_42) + else: + if (_tt is not unicode): + try: + _content_139955154988272_42 = _content_139955154988272_42.__html__ + except: + _content_139955154988272_42 = convert(_content_139955154988272_42) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272_42 = _content_139955154988272_42() + else: + if ((_content_139955154988272_42 is not None) and (re_needs_escape(_content_139955154988272_42) is not None)): + if ('&' in _content_139955154988272_42): + if (';' in _content_139955154988272_42): + _content_139955154988272_42 = re_amp.sub('&', _content_139955154988272_42) + else: + _content_139955154988272_42 = _content_139955154988272_42.replace('&', '&') + if ('<' in _content_139955154988272_42): + _content_139955154988272_42 = _content_139955154988272_42.replace('<', '<') + if ('>' in _content_139955154988272_42): + _content_139955154988272_42 = _content_139955154988272_42.replace('>', '>') + if ('\x00' in _content_139955154988272_42): + _content_139955154988272_42 = _content_139955154988272_42.replace('\x00', '"') + + # <Expression u'None' (5:6)> -> _content_139955154988272_57 + try: + _content_139955154988272_57 = None + except: + rcontext.setdefault('__error__', []).append((u'None', 5, 6, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272_57 is None): + pass + else: + if (_content_139955154988272_57 is False): + _content_139955154988272_57 = None + else: + _tt = type(_content_139955154988272_57) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272_57 = unicode(_content_139955154988272_57) + else: + try: + if (_tt is str): + _content_139955154988272_57 = decode(_content_139955154988272_57) + else: + if (_tt is not unicode): + try: + _content_139955154988272_57 = _content_139955154988272_57.__html__ + except: + _content_139955154988272_57 = convert(_content_139955154988272_57) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272_57 = _content_139955154988272_57() + else: + if ((_content_139955154988272_57 is not None) and (re_needs_escape(_content_139955154988272_57) is not None)): + if ('&' in _content_139955154988272_57): + if (';' in _content_139955154988272_57): + _content_139955154988272_57 = re_amp.sub('&', _content_139955154988272_57) + else: + _content_139955154988272_57 = _content_139955154988272_57.replace('&', '&') + if ('<' in _content_139955154988272_57): + _content_139955154988272_57 = _content_139955154988272_57.replace('<', '<') + if ('>' in _content_139955154988272_57): + _content_139955154988272_57 = _content_139955154988272_57.replace('>', '>') + if ('\x00' in _content_139955154988272_57): + _content_139955154988272_57 = _content_139955154988272_57.replace('\x00', '"') + _content_139955154988272 = ('%s%s%s%s%s%s%s' % ((u'\n ' if (u'\n ' is not None) else ''), (_content_139955154988272 if (_content_139955154988272 is not None) else ''), (u'\n ' if (u'\n ' is not None) else ''), (_content_139955154988272_42 if (_content_139955154988272_42 is not None) else ''), (u'\n ' if (u'\n ' is not None) else ''), (_content_139955154988272_57 if (_content_139955154988272_57 is not None) else ''), (u'\n ' if (u'\n ' is not None) else ''), )) + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38446936 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8a90> name=None at 22f8810> -> _value + _value = _static_36670096 + econtext['attrs'] = _value + + # <div ... (6:4) + # -------------------------------------------------------- + append(u'<div>') + + # <Expression u'None' (6:11)> -> _content_139955154988272 + try: + _content_139955154988272 = None + except: + rcontext.setdefault('__error__', []).append((u'None', 6, 11, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = _content_139955154988272 + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_38446936 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38446936 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_38435512 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38435512 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_37246736 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37246736 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/007.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/007.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/007.xml @@ -0,0 +1,4 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc> </doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/008-builtins.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/008-builtins.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/008-builtins.pt @@ -0,0 +1,12 @@ +<html> + <body> + ${attrs} + ${nothing} + <div tal:attributes="class string:dynamic" class="static"> + ${attrs['class']} + </div> + <div tal:define="nothing string:nothing"> + ${nothing} + </div> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/008-builtins.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/008-builtins.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/008-builtins.pt.py @@ -0,0 +1,227 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_35829392 = {} +_static_36669456 = {} +_static_36669712 = {u'class': u'static', } +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_35754928 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x222b690> name=None at 222bb50> -> _value + _value = _static_35829392 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35872992 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8810> name=None at 22f8210> -> _value + _value = _static_36669456 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + + # <Expression u'nothing' (3:6)> -> _content_139955154988272 + try: + _content_139955154988272 = getitem('nothing') + except: + rcontext.setdefault('__error__', []).append((u'nothing', 3, 6, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = ('%s%s%s' % ((u'\n ' if (u'\n ' is not None) else ''), (_content_139955154988272 if (_content_139955154988272 is not None) else ''), (u'\n ' if (u'\n ' is not None) else ''), )) + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35755576 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8910> name=None at 22f8f10> -> _value + _value = _static_36669712 + econtext['attrs'] = _value + + # <div ... (4:4) + # -------------------------------------------------------- + append(u'<div') + _backup_default_35755720 = get('default', _marker) + _value = u'static' + econtext['default'] = _value + + # <Expression u'string:dynamic' (4:31)> -> _attr_class + try: + _attr_class = u'dynamic' + except: + rcontext.setdefault('__error__', []).append((u'string:dynamic', 4, 31, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_class is None): + pass + else: + if (_attr_class is False): + _attr_class = None + else: + _tt = type(_attr_class) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_class = unicode(_attr_class) + else: + try: + if (_tt is str): + _attr_class = decode(_attr_class) + else: + if (_tt is not unicode): + try: + _attr_class = _attr_class.__html__ + except: + _attr_class = convert(_attr_class) + else: + raise RuntimeError + except RuntimeError: + _attr_class = _attr_class() + else: + if ((_attr_class is not None) and (re_needs_escape(_attr_class) is not None)): + if ('&' in _attr_class): + if (';' in _attr_class): + _attr_class = re_amp.sub('&', _attr_class) + else: + _attr_class = _attr_class.replace('&', '&') + if ('<' in _attr_class): + _attr_class = _attr_class.replace('<', '<') + if ('>' in _attr_class): + _attr_class = _attr_class.replace('>', '>') + if (u'"' in _attr_class): + _attr_class = _attr_class.replace(u'"', '"') + if (_attr_class is not None): + append((u' class="%s"' % _attr_class)) + if (_backup_default_35755720 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35755720 + append(u'>') + + # <Expression u"attrs['class']" (5:8)> -> _content_139955154988272 + try: + _content_139955154988272 = getitem('attrs')['class'] + except: + rcontext.setdefault('__error__', []).append((u"attrs['class']", 5, 8, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = ('%s%s%s' % ((u'\n ' if (u'\n ' is not None) else ''), (_content_139955154988272 if (_content_139955154988272 is not None) else ''), (u'\n ' if (u'\n ' is not None) else ''), )) + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_35755576 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35755576 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_35872992 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35872992 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_35754928 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35754928 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/008.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/008.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/008.xml @@ -0,0 +1,4 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc>&<>"'</doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/009-literals.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/009-literals.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/009-literals.pt @@ -0,0 +1,5 @@ +<html> + <body> + ${literal} + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/009-literals.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/009-literals.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/009-literals.pt.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_36667792 = {} +_static_36669264 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_37031304 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8750> name=None at 22f8a90> -> _value + _value = _static_36669264 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_37004352 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8190> name=None at 22f8bd0> -> _value + _value = _static_36667792 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + + # <Expression u'literal' (3:6)> -> _content_139955154988272 + try: + _content_139955154988272 = getitem('literal') + except: + rcontext.setdefault('__error__', []).append((u'literal', 3, 6, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = ('%s%s%s' % ((u'\n ' if (u'\n ' is not None) else ''), (_content_139955154988272 if (_content_139955154988272 is not None) else ''), (u'\n ' if (u'\n ' is not None) else ''), )) + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_37004352 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37004352 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_37031304 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37031304 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/009.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/009.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/009.xml @@ -0,0 +1,4 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc> </doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/010-structure.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/010-structure.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/010-structure.pt @@ -0,0 +1,9 @@ +<html> + <body> + <div tal:content="text string:1 < 2" /> + <div tal:content="structure string:2 < 3, 2&3, 2<3, 2>3" /> + <div tal:content="structure string:3 ${'<'} 4" /> + <div tal:content="structure '%d < %d' % (4, 5)" /> + <div tal:replace="structure content" /> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/010-structure.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/010-structure.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/010-structure.pt.py @@ -0,0 +1,401 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import Placeholder as _Placeholder +import sys as _sys +pass +_static_38579792 = {} +_static_38552272 = {} +_static_35801168 = {} +_static_38555600 = {} +_static_35799376 = {} +_static_38553168 = {} +_static_38553552 = {} +_marker_default = _Placeholder() +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_39070032 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24c4650> name=None at 223a290> -> _value + _value = _static_38553168 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_39067944 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24c4fd0> name=None at 24c4f90> -> _value + _value = _static_38555600 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_39069312 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24c47d0> name=None at 24c4b90> -> _value + _value = _static_38553552 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_39069744 = get('default', _marker) + + # <Marker name='default' at 24c4610> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'string:1 < 2' (3:27)> -> _cache_38555024 + try: + _cache_38555024 = u'1 < 2' + except: + rcontext.setdefault('__error__', []).append((u'string:1 < 2', 3, 27, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'string:1 < 2' (3:27)> value=<Marker name='default' at 24c4e10> at 24c4950> -> _condition + _expression = _cache_38555024 + + # <Marker name='default' at 24c4e10> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_38555024 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_39069744 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39069744 + append(u'</div>') + if (_backup_attrs_39069312 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39069312 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_39070608 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24c42d0> name=None at 24c4290> -> _value + _value = _static_38552272 + econtext['attrs'] = _value + + # <div ... (4:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_39044592 = get('default', _marker) + + # <Marker name='default' at 24c49d0> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'string:2 < 3' (4:32)> -> _cache_38552400 + try: + _cache_38552400 = u'2 < 3' + except: + rcontext.setdefault('__error__', []).append((u'string:2 < 3', 4, 32, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'string:2 < 3' (4:32)> value=<Marker name='default' at 24c4890> at 24c4250> -> _condition + _expression = _cache_38552400 + + # <Marker name='default' at 24c4890> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_38552400 + if (_content is not None): + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = str(_content) + else: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except AttributeError: + _content = convert(_content) + else: + _content = _content() + if (_content is not None): + append(_content) + if (_backup_default_39044592 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39044592 + append(u'</div>') + if (_backup_attrs_39070608 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39070608 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_37107256 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2224850> name=None at 22241d0> -> _value + _value = _static_35801168 + econtext['attrs'] = _value + + # <div ... (5:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_35897424 = get('default', _marker) + + # <Marker name='default' at 2224250> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"string:3 ${'<'} 4" (5:32)> -> _cache_38551888 + try: + _cache_38551888 = '<' + _cache_38551888 = ('%s%s%s' % ((u'3 ' if (u'3 ' is not None) else ''), (_cache_38551888 if (_cache_38551888 is not None) else ''), (u' 4' if (u' 4' is not None) else ''), )) + except: + rcontext.setdefault('__error__', []).append((u"string:3 ${'<'} 4", 5, 32, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"string:3 ${'<'} 4" (5:32)> value=<Marker name='default' at 24c4110> at 24c40d0> -> _condition + _expression = _cache_38551888 + + # <Marker name='default' at 24c4110> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_38551888 + if (_content is not None): + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = str(_content) + else: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except AttributeError: + _content = convert(_content) + else: + _content = _content() + if (_content is not None): + append(_content) + if (_backup_default_35897424 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35897424 + append(u'</div>') + if (_backup_attrs_37107256 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37107256 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_39055304 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2224150> name=None at 2224dd0> -> _value + _value = _static_35799376 + econtext['attrs'] = _value + + # <div ... (6:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_39086560 = get('default', _marker) + + # <Marker name='default' at 2224a10> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'%d < %d' % (4, 5)" (6:32)> -> _cache_35801680 + try: + _cache_35801680 = ('%d < %d' % (4, 5, )) + except: + rcontext.setdefault('__error__', []).append((u"'%d < %d' % (4, 5)", 6, 32, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'%d < %d' % (4, 5)" (6:32)> value=<Marker name='default' at 2224cd0> at 2224ed0> -> _condition + _expression = _cache_35801680 + + # <Marker name='default' at 2224cd0> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_35801680 + if (_content is not None): + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = str(_content) + else: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except AttributeError: + _content = convert(_content) + else: + _content = _content() + if (_content is not None): + append(_content) + if (_backup_default_39086560 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39086560 + append(u'</div>') + if (_backup_attrs_39055304 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39055304 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_39076928 = get('default', _marker) + + # <Marker name='default' at 24ca810> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'content' (7:32)> -> _cache_38578128 + try: + _cache_38578128 = getitem('content') + except: + rcontext.setdefault('__error__', []).append((u'content', 7, 32, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'content' (7:32)> value=<Marker name='default' at 24cab50> at 24ca610> -> _condition + _expression = _cache_38578128 + + # <Marker name='default' at 24cab50> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_39051992 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24cae50> name=None at 24cac10> -> _value + _value = _static_38579792 + econtext['attrs'] = _value + + # <div ... (7:4) + # -------------------------------------------------------- + append(u'<div />') + if (_backup_attrs_39051992 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39051992 + else: + _content = _cache_38578128 + if (_content is not None): + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = str(_content) + else: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except AttributeError: + _content = convert(_content) + else: + _content = _content() + if (_content is not None): + append(_content) + if (_backup_default_39076928 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39076928 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_39067944 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39067944 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_39070032 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39070032 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/010.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/010.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/010.xml @@ -0,0 +1,5 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +<!ATTLIST doc a1 CDATA #IMPLIED> +]> +<doc a1="v1" ></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/011-messages.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/011-messages.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/011-messages.pt @@ -0,0 +1,9 @@ +<html> + <body> + <div tal:content="text message" /> + <div tal:content="structure message" /> + <div tal:content="text string:${message}" /> + <div tal:content="structure string:${message}" /> + ${message} + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/011-messages.pt-en.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/011-messages.pt-en.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/011-messages.pt-en.py @@ -0,0 +1,406 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import Placeholder as _Placeholder +import sys as _sys +pass +_static_38588816 = {} +_static_35799696 = {} +_static_38462608 = {} +_static_38590736 = {} +_static_38589968 = {} +_static_35802320 = {} +_marker_default = _Placeholder() +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_35342528 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24cd910> name=None at 24cd410> -> _value + _value = _static_38590736 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35343752 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24cd610> name=None at 24cd8d0> -> _value + _value = _static_38589968 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35343680 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24cd190> name=None at 24cdfd0> -> _value + _value = _static_38588816 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_35343392 = get('default', _marker) + + # <Marker name='default' at 24cd710> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'message' (3:27)> -> _cache_38588624 + try: + _cache_38588624 = getitem('message') + except: + rcontext.setdefault('__error__', []).append((u'message', 3, 27, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'message' (3:27)> value=<Marker name='default' at 24cd590> at 24cd490> -> _condition + _expression = _cache_38588624 + + # <Marker name='default' at 24cd590> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_38588624 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_35343392 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35343392 + append(u'</div>') + if (_backup_attrs_35343680 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35343680 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35343176 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24ae490> name=None at 24ae810> -> _value + _value = _static_38462608 + econtext['attrs'] = _value + + # <div ... (4:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_35344112 = get('default', _marker) + + # <Marker name='default' at 24aebd0> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'message' (4:32)> -> _cache_38461520 + try: + _cache_38461520 = getitem('message') + except: + rcontext.setdefault('__error__', []).append((u'message', 4, 32, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'message' (4:32)> value=<Marker name='default' at 24ae910> at 24ae210> -> _condition + _expression = _cache_38461520 + + # <Marker name='default' at 24ae910> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_38461520 + if (_content is not None): + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = str(_content) + else: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except AttributeError: + _content = convert(_content) + else: + _content = _content() + if (_content is not None): + append(_content) + if (_backup_default_35344112 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35344112 + append(u'</div>') + if (_backup_attrs_35343176 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35343176 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38452616 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2224290> name=None at 2224050> -> _value + _value = _static_35799696 + econtext['attrs'] = _value + + # <div ... (5:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_35340656 = get('default', _marker) + + # <Marker name='default' at 24ae110> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'string:${message}' (5:27)> -> _cache_38464720 + try: + _cache_38464720 = getitem('message') + _cache_38464720 = _cache_38464720 + except: + rcontext.setdefault('__error__', []).append((u'string:${message}', 5, 27, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'string:${message}' (5:27)> value=<Marker name='default' at 24ae690> at 24ae990> -> _condition + _expression = _cache_38464720 + + # <Marker name='default' at 24ae690> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_38464720 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_35340656 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35340656 + append(u'</div>') + if (_backup_attrs_38452616 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38452616 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_37030800 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2224cd0> name=None at 22244d0> -> _value + _value = _static_35802320 + econtext['attrs'] = _value + + # <div ... (6:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_35875584 = get('default', _marker) + + # <Marker name='default' at 2224d10> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'string:${message}' (6:32)> -> _cache_35802448 + try: + _cache_35802448 = getitem('message') + _cache_35802448 = _cache_35802448 + except: + rcontext.setdefault('__error__', []).append((u'string:${message}', 6, 32, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'string:${message}' (6:32)> value=<Marker name='default' at 22243d0> at 2224090> -> _condition + _expression = _cache_35802448 + + # <Marker name='default' at 22243d0> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_35802448 + if (_content is not None): + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = str(_content) + else: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except AttributeError: + _content = convert(_content) + else: + _content = _content() + if (_content is not None): + append(_content) + if (_backup_default_35875584 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35875584 + append(u'</div>') + if (_backup_attrs_37030800 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37030800 + + # <Expression u'message' (7:6)> -> _content_139955154988272 + try: + _content_139955154988272 = getitem('message') + except: + rcontext.setdefault('__error__', []).append((u'message', 7, 6, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = ('%s%s%s' % ((u'\n ' if (u'\n ' is not None) else ''), (_content_139955154988272 if (_content_139955154988272 is not None) else ''), (u'\n ' if (u'\n ' is not None) else ''), )) + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_35343752 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35343752 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_35342528 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35342528 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/011-messages.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/011-messages.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/011-messages.pt.py @@ -0,0 +1,406 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import Placeholder as _Placeholder +import sys as _sys +pass +_static_38464336 = {} +_static_38462992 = {} +_static_35800464 = {} +_static_38589648 = {} +_static_35799184 = {} +_static_38591312 = {} +_marker_default = _Placeholder() +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_38435944 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2224590> name=None at 2224e10> -> _value + _value = _static_35800464 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35756224 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2224090> name=None at 22243d0> -> _value + _value = _static_35799184 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35757952 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24cdb50> name=None at 24cde50> -> _value + _value = _static_38591312 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_35754928 = get('default', _marker) + + # <Marker name='default' at 24cd810> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'message' (3:27)> -> _cache_35800272 + try: + _cache_35800272 = getitem('message') + except: + rcontext.setdefault('__error__', []).append((u'message', 3, 27, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'message' (3:27)> value=<Marker name='default' at 2317990> at 2317810> -> _condition + _expression = _cache_35800272 + + # <Marker name='default' at 2317990> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_35800272 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_35754928 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35754928 + append(u'</div>') + if (_backup_attrs_35757952 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35757952 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35754136 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24cd4d0> name=None at 24cd390> -> _value + _value = _static_38589648 + econtext['attrs'] = _value + + # <div ... (4:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_35755144 = get('default', _marker) + + # <Marker name='default' at 24cd590> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'message' (4:32)> -> _cache_38591632 + try: + _cache_38591632 = getitem('message') + except: + rcontext.setdefault('__error__', []).append((u'message', 4, 32, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'message' (4:32)> value=<Marker name='default' at 24cd7d0> at 24cde90> -> _condition + _expression = _cache_38591632 + + # <Marker name='default' at 24cd7d0> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_38591632 + if (_content is not None): + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = str(_content) + else: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except AttributeError: + _content = convert(_content) + else: + _content = _content() + if (_content is not None): + append(_content) + if (_backup_default_35755144 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35755144 + append(u'</div>') + if (_backup_attrs_35754136 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35754136 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35756728 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24aeb50> name=None at 24aead0> -> _value + _value = _static_38464336 + econtext['attrs'] = _value + + # <div ... (5:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_35756008 = get('default', _marker) + + # <Marker name='default' at 24aeb90> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'string:${message}' (5:27)> -> _cache_38592208 + try: + _cache_38592208 = getitem('message') + _cache_38592208 = _cache_38592208 + except: + rcontext.setdefault('__error__', []).append((u'string:${message}', 5, 27, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'string:${message}' (5:27)> value=<Marker name='default' at 222be10> at 222bd50> -> _condition + _expression = _cache_38592208 + + # <Marker name='default' at 222be10> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_38592208 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_35756008 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35756008 + append(u'</div>') + if (_backup_attrs_35756728 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35756728 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_39036616 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24ae610> name=None at 24ae490> -> _value + _value = _static_38462992 + econtext['attrs'] = _value + + # <div ... (6:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_37197080 = get('default', _marker) + + # <Marker name='default' at 24ae410> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'string:${message}' (6:32)> -> _cache_38465488 + try: + _cache_38465488 = getitem('message') + _cache_38465488 = _cache_38465488 + except: + rcontext.setdefault('__error__', []).append((u'string:${message}', 6, 32, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'string:${message}' (6:32)> value=<Marker name='default' at 24ae3d0> at 24ae190> -> _condition + _expression = _cache_38465488 + + # <Marker name='default' at 24ae3d0> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_38465488 + if (_content is not None): + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = str(_content) + else: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except AttributeError: + _content = convert(_content) + else: + _content = _content() + if (_content is not None): + append(_content) + if (_backup_default_37197080 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_37197080 + append(u'</div>') + if (_backup_attrs_39036616 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39036616 + + # <Expression u'message' (7:6)> -> _content_139955154988272 + try: + _content_139955154988272 = getitem('message') + except: + rcontext.setdefault('__error__', []).append((u'message', 7, 6, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = ('%s%s%s' % ((u'\n ' if (u'\n ' is not None) else ''), (_content_139955154988272 if (_content_139955154988272 is not None) else ''), (u'\n ' if (u'\n ' is not None) else ''), )) + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_35756224 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35756224 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_38435944 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38435944 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/011.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/011.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/011.xml @@ -0,0 +1,5 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +<!ATTLIST doc a1 CDATA #IMPLIED a2 CDATA #IMPLIED> +]> +<doc a1="v1" a2="v2"></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/012-translation.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/012-translation.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/012-translation.pt @@ -0,0 +1,22 @@ +<html> + <body> + <div i18n:translate=""></div> + <div i18n:translate=""> + Hello world! + </div> + <div i18n:translate="hello_world"> + Hello world! + </div> + <div i18n:translate=""> + <sup>Hello world!</sup> + </div> + <div i18n:translate=""> + Hello <em i18n:name="first">${'world'}</em>! + Goodbye <em i18n:name="second">${'planet'}</em>! + </div> + <div i18n:translate="hello_goodbye"> + Hello <em i18n:name="first">${'world'}</em>! + Goodbye <em i18n:name="second">${'planet'}</em>! + </div> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/012-translation.pt-en.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/012-translation.pt-en.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/012-translation.pt-en.py @@ -0,0 +1,491 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +from chameleon.utils import DebuggingOutputStream as _DebuggingOutputStream +pass +_static_38488016 = {} +_static_38579984 = {} +_static_38488272 = {} +_static_38487184 = {} +_static_38579536 = {} +_static_35831120 = {} +_static_38489936 = {} +_static_38487632 = {} +_static_35883600 = {} +_static_36668432 = {} +_static_38576848 = {} +_static_35331024 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_38523248 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8410> name=None at 2372990> -> _value + _value = _static_36668432 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38625792 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2238a50> name=None at 2238690> -> _value + _value = _static_35883600 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38624936 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x222bd50> name=None at 222be10> -> _value + _value = _static_35831120 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div>') + _stream_35884752 = _DebuggingOutputStream() + _append_35884752 = _stream_35884752.append + _content_139955154988272 = u'\n Hello world!\n ' + if (_content_139955154988272 is not None): + _append_35884752(_content_139955154988272) + _msgid_35884752 = re_whitespace(''.join(_stream_35884752)).strip() + append(translate(_msgid_35884752, mapping=None, default=_msgid_35884752, domain=_i18n_domain)) + append(u'</div>') + if (_backup_attrs_38624936 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38624936 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38619904 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x21b1bd0> name=None at 23171d0> -> _value + _value = _static_35331024 + econtext['attrs'] = _value + + # <div ... (6:4) + # -------------------------------------------------------- + append(u'<div>') + _stream_36796624 = _DebuggingOutputStream() + _append_36796624 = _stream_36796624.append + _content_139955154988272 = u'\n Hello world!\n ' + if (_content_139955154988272 is not None): + _append_36796624(_content_139955154988272) + _msgid_36796624 = re_whitespace(''.join(_stream_36796624)).strip() + append(translate(u'hello_world', mapping=None, default=_msgid_36796624, domain=_i18n_domain)) + append(u'</div>') + if (_backup_attrs_38619904 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38619904 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38620264 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24b4f50> name=None at 24b4d10> -> _value + _value = _static_38489936 + econtext['attrs'] = _value + + # <div ... (9:4) + # -------------------------------------------------------- + append(u'<div>') + _stream_38487504 = _DebuggingOutputStream() + _append_38487504 = _stream_38487504.append + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + _append_38487504(_content_139955154988272) + _backup_attrs_38617888 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24b48d0> name=None at 24b4e10> -> _value + _value = _static_38488272 + econtext['attrs'] = _value + + # <sup ... (10:6) + # -------------------------------------------------------- + _append_38487504(u'<sup>') + _content_139955154988272 = u'Hello world!' + if (_content_139955154988272 is not None): + _append_38487504(_content_139955154988272) + _append_38487504(u'</sup>') + if (_backup_attrs_38617888 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38617888 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + _append_38487504(_content_139955154988272) + _msgid_38487504 = re_whitespace(''.join(_stream_38487504)).strip() + append(translate(_msgid_38487504, mapping=None, default=_msgid_38487504, domain=_i18n_domain)) + append(u'</div>') + if (_backup_attrs_38620264 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38620264 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38616672 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24b47d0> name=None at 24b4890> -> _value + _value = _static_38488016 + econtext['attrs'] = _value + + # <div ... (12:4) + # -------------------------------------------------------- + append(u'<div>') + _stream_35215016_first = '' + _stream_35215016_second = '' + _stream_38487888 = _DebuggingOutputStream() + _append_38487888 = _stream_38487888.append + _content_139955154988272 = u'\n Hello ' + if (_content_139955154988272 is not None): + _append_38487888(_content_139955154988272) + _stream_35215016_first = _DebuggingOutputStream() + _append_35215016_first = _stream_35215016_first.append + _backup_attrs_39096120 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24b4650> name=None at 24b42d0> -> _value + _value = _static_38487632 + econtext['attrs'] = _value + + # <em ... (13:12) + # -------------------------------------------------------- + _append_35215016_first(u'<em>') + + # <Expression u"'world'" (13:36)> -> _content_139955154988272 + try: + _content_139955154988272 = 'world' + except: + rcontext.setdefault('__error__', []).append((u"'world'", 13, 36, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = _content_139955154988272 + if (_content_139955154988272 is not None): + _append_35215016_first(_content_139955154988272) + _append_35215016_first(u'</em>') + if (_backup_attrs_39096120 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39096120 + _append_38487888(u'${first}') + _stream_35215016_first = ''.join(_stream_35215016_first) + _content_139955154988272 = u'!\n Goodbye ' + if (_content_139955154988272 is not None): + _append_38487888(_content_139955154988272) + _stream_35215016_second = _DebuggingOutputStream() + _append_35215016_second = _stream_35215016_second.append + _backup_attrs_39090152 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24b4490> name=None at 24b4150> -> _value + _value = _static_38487184 + econtext['attrs'] = _value + + # <em ... (14:14) + # -------------------------------------------------------- + _append_35215016_second(u'<em>') + + # <Expression u"'planet'" (14:39)> -> _content_139955154988272 + try: + _content_139955154988272 = 'planet' + except: + rcontext.setdefault('__error__', []).append((u"'planet'", 14, 39, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = _content_139955154988272 + if (_content_139955154988272 is not None): + _append_35215016_second(_content_139955154988272) + _append_35215016_second(u'</em>') + if (_backup_attrs_39090152 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39090152 + _append_38487888(u'${second}') + _stream_35215016_second = ''.join(_stream_35215016_second) + _content_139955154988272 = u'!\n ' + if (_content_139955154988272 is not None): + _append_38487888(_content_139955154988272) + _msgid_38487888 = re_whitespace(''.join(_stream_38487888)).strip() + append(translate(_msgid_38487888, mapping={u'second': _stream_35215016_second, u'first': _stream_35215016_first, }, default=_msgid_38487888, domain=_i18n_domain)) + append(u'</div>') + if (_backup_attrs_38616672 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38616672 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_39091376 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24caf10> name=None at 24ca850> -> _value + _value = _static_38579984 + econtext['attrs'] = _value + + # <div ... (16:4) + # -------------------------------------------------------- + append(u'<div>') + _stream_35215016_first = '' + _stream_35215016_second = '' + _stream_38486672 = _DebuggingOutputStream() + _append_38486672 = _stream_38486672.append + _content_139955154988272 = u'\n Hello ' + if (_content_139955154988272 is not None): + _append_38486672(_content_139955154988272) + _stream_35215016_first = _DebuggingOutputStream() + _append_35215016_first = _stream_35215016_first.append + _backup_attrs_39080520 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24ca2d0> name=None at 24cafd0> -> _value + _value = _static_38576848 + econtext['attrs'] = _value + + # <em ... (17:12) + # -------------------------------------------------------- + _append_35215016_first(u'<em>') + + # <Expression u"'world'" (17:36)> -> _content_139955154988272 + try: + _content_139955154988272 = 'world' + except: + rcontext.setdefault('__error__', []).append((u"'world'", 17, 36, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = _content_139955154988272 + if (_content_139955154988272 is not None): + _append_35215016_first(_content_139955154988272) + _append_35215016_first(u'</em>') + if (_backup_attrs_39080520 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39080520 + _append_38486672(u'${first}') + _stream_35215016_first = ''.join(_stream_35215016_first) + _content_139955154988272 = u'!\n Goodbye ' + if (_content_139955154988272 is not None): + _append_38486672(_content_139955154988272) + _stream_35215016_second = _DebuggingOutputStream() + _append_35215016_second = _stream_35215016_second.append + _backup_attrs_39080592 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24cad50> name=None at 24ca110> -> _value + _value = _static_38579536 + econtext['attrs'] = _value + + # <em ... (18:14) + # -------------------------------------------------------- + _append_35215016_second(u'<em>') + + # <Expression u"'planet'" (18:39)> -> _content_139955154988272 + try: + _content_139955154988272 = 'planet' + except: + rcontext.setdefault('__error__', []).append((u"'planet'", 18, 39, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = _content_139955154988272 + if (_content_139955154988272 is not None): + _append_35215016_second(_content_139955154988272) + _append_35215016_second(u'</em>') + if (_backup_attrs_39080592 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39080592 + _append_38486672(u'${second}') + _stream_35215016_second = ''.join(_stream_35215016_second) + _content_139955154988272 = u'!\n ' + if (_content_139955154988272 is not None): + _append_38486672(_content_139955154988272) + _msgid_38486672 = re_whitespace(''.join(_stream_38486672)).strip() + append(translate(u'hello_goodbye', mapping={u'second': _stream_35215016_second, u'first': _stream_35215016_first, }, default=_msgid_38486672, domain=_i18n_domain)) + append(u'</div>') + if (_backup_attrs_39091376 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39091376 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_38625792 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38625792 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_38523248 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38523248 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/012-translation.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/012-translation.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/012-translation.pt.py @@ -0,0 +1,491 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +from chameleon.utils import DebuggingOutputStream as _DebuggingOutputStream +pass +_static_38489680 = {} +_static_38487248 = {} +_static_38578448 = {} +_static_38486544 = {} +_static_35828816 = {} +_static_38488720 = {} +_static_36670992 = {} +_static_38579280 = {} +_static_35884240 = {} +_static_38486480 = {} +_static_38578832 = {} +_static_38486800 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_38432352 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x222b450> name=None at 222bfd0> -> _value + _value = _static_35828816 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38429976 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24b4310> name=None at 24b4a50> -> _value + _value = _static_38486800 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38429832 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24b44d0> name=None at 24b45d0> -> _value + _value = _static_38487248 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div>') + _stream_38489104 = _DebuggingOutputStream() + _append_38489104 = _stream_38489104.append + _content_139955154988272 = u'\n Hello world!\n ' + if (_content_139955154988272 is not None): + _append_38489104(_content_139955154988272) + _msgid_38489104 = re_whitespace(''.join(_stream_38489104)).strip() + append(translate(_msgid_38489104, mapping=None, default=_msgid_38489104, domain=_i18n_domain)) + append(u'</div>') + if (_backup_attrs_38429832 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38429832 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38412088 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24b4210> name=None at 24b4510> -> _value + _value = _static_38486544 + econtext['attrs'] = _value + + # <div ... (6:4) + # -------------------------------------------------------- + append(u'<div>') + _stream_38487760 = _DebuggingOutputStream() + _append_38487760 = _stream_38487760.append + _content_139955154988272 = u'\n Hello world!\n ' + if (_content_139955154988272 is not None): + _append_38487760(_content_139955154988272) + _msgid_38487760 = re_whitespace(''.join(_stream_38487760)).strip() + append(translate(u'hello_world', mapping=None, default=_msgid_38487760, domain=_i18n_domain)) + append(u'</div>') + if (_backup_attrs_38412088 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38412088 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38411152 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24b4e50> name=None at 24b4950> -> _value + _value = _static_38489680 + econtext['attrs'] = _value + + # <div ... (9:4) + # -------------------------------------------------------- + append(u'<div>') + _stream_38487888 = _DebuggingOutputStream() + _append_38487888 = _stream_38487888.append + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + _append_38487888(_content_139955154988272) + _backup_attrs_38409280 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24b4a90> name=None at 24b4f10> -> _value + _value = _static_38488720 + econtext['attrs'] = _value + + # <sup ... (10:6) + # -------------------------------------------------------- + _append_38487888(u'<sup>') + _content_139955154988272 = u'Hello world!' + if (_content_139955154988272 is not None): + _append_38487888(_content_139955154988272) + _append_38487888(u'</sup>') + if (_backup_attrs_38409280 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38409280 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + _append_38487888(_content_139955154988272) + _msgid_38487888 = re_whitespace(''.join(_stream_38487888)).strip() + append(translate(_msgid_38487888, mapping=None, default=_msgid_38487888, domain=_i18n_domain)) + append(u'</div>') + if (_backup_attrs_38411152 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38411152 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38408848 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24b41d0> name=None at 24b4350> -> _value + _value = _static_38486480 + econtext['attrs'] = _value + + # <div ... (12:4) + # -------------------------------------------------------- + append(u'<div>') + _stream_35215016_first = '' + _stream_35215016_second = '' + _stream_38489040 = _DebuggingOutputStream() + _append_38489040 = _stream_38489040.append + _content_139955154988272 = u'\n Hello ' + if (_content_139955154988272 is not None): + _append_38489040(_content_139955154988272) + _stream_35215016_first = _DebuggingOutputStream() + _append_35215016_first = _stream_35215016_first.append + _backup_attrs_38460520 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24cac50> name=None at 24ca050> -> _value + _value = _static_38579280 + econtext['attrs'] = _value + + # <em ... (13:12) + # -------------------------------------------------------- + _append_35215016_first(u'<em>') + + # <Expression u"'world'" (13:36)> -> _content_139955154988272 + try: + _content_139955154988272 = 'world' + except: + rcontext.setdefault('__error__', []).append((u"'world'", 13, 36, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = _content_139955154988272 + if (_content_139955154988272 is not None): + _append_35215016_first(_content_139955154988272) + _append_35215016_first(u'</em>') + if (_backup_attrs_38460520 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38460520 + _append_38489040(u'${first}') + _stream_35215016_first = ''.join(_stream_35215016_first) + _content_139955154988272 = u'!\n Goodbye ' + if (_content_139955154988272 is not None): + _append_38489040(_content_139955154988272) + _stream_35215016_second = _DebuggingOutputStream() + _append_35215016_second = _stream_35215016_second.append + _backup_attrs_38461384 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24ca910> name=None at 24cae10> -> _value + _value = _static_38578448 + econtext['attrs'] = _value + + # <em ... (14:14) + # -------------------------------------------------------- + _append_35215016_second(u'<em>') + + # <Expression u"'planet'" (14:39)> -> _content_139955154988272 + try: + _content_139955154988272 = 'planet' + except: + rcontext.setdefault('__error__', []).append((u"'planet'", 14, 39, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = _content_139955154988272 + if (_content_139955154988272 is not None): + _append_35215016_second(_content_139955154988272) + _append_35215016_second(u'</em>') + if (_backup_attrs_38461384 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38461384 + _append_38489040(u'${second}') + _stream_35215016_second = ''.join(_stream_35215016_second) + _content_139955154988272 = u'!\n ' + if (_content_139955154988272 is not None): + _append_38489040(_content_139955154988272) + _msgid_38489040 = re_whitespace(''.join(_stream_38489040)).strip() + append(translate(_msgid_38489040, mapping={u'second': _stream_35215016_second, u'first': _stream_35215016_first, }, default=_msgid_38489040, domain=_i18n_domain)) + append(u'</div>') + if (_backup_attrs_38408848 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38408848 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38459368 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24caa90> name=None at 24ca6d0> -> _value + _value = _static_38578832 + econtext['attrs'] = _value + + # <div ... (16:4) + # -------------------------------------------------------- + append(u'<div>') + _stream_35215016_first = '' + _stream_35215016_second = '' + _stream_38578576 = _DebuggingOutputStream() + _append_38578576 = _stream_38578576.append + _content_139955154988272 = u'\n Hello ' + if (_content_139955154988272 is not None): + _append_38578576(_content_139955154988272) + _stream_35215016_first = _DebuggingOutputStream() + _append_35215016_first = _stream_35215016_first.append + _backup_attrs_37105888 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2238cd0> name=None at 2238f90> -> _value + _value = _static_35884240 + econtext['attrs'] = _value + + # <em ... (17:12) + # -------------------------------------------------------- + _append_35215016_first(u'<em>') + + # <Expression u"'world'" (17:36)> -> _content_139955154988272 + try: + _content_139955154988272 = 'world' + except: + rcontext.setdefault('__error__', []).append((u"'world'", 17, 36, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = _content_139955154988272 + if (_content_139955154988272 is not None): + _append_35215016_first(_content_139955154988272) + _append_35215016_first(u'</em>') + if (_backup_attrs_37105888 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37105888 + _append_38578576(u'${first}') + _stream_35215016_first = ''.join(_stream_35215016_first) + _content_139955154988272 = u'!\n Goodbye ' + if (_content_139955154988272 is not None): + _append_38578576(_content_139955154988272) + _stream_35215016_second = _DebuggingOutputStream() + _append_35215016_second = _stream_35215016_second.append + _backup_attrs_37108840 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8e10> name=None at 22f8510> -> _value + _value = _static_36670992 + econtext['attrs'] = _value + + # <em ... (18:14) + # -------------------------------------------------------- + _append_35215016_second(u'<em>') + + # <Expression u"'planet'" (18:39)> -> _content_139955154988272 + try: + _content_139955154988272 = 'planet' + except: + rcontext.setdefault('__error__', []).append((u"'planet'", 18, 39, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = _content_139955154988272 + if (_content_139955154988272 is not None): + _append_35215016_second(_content_139955154988272) + _append_35215016_second(u'</em>') + if (_backup_attrs_37108840 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37108840 + _append_38578576(u'${second}') + _stream_35215016_second = ''.join(_stream_35215016_second) + _content_139955154988272 = u'!\n ' + if (_content_139955154988272 is not None): + _append_38578576(_content_139955154988272) + _msgid_38578576 = re_whitespace(''.join(_stream_38578576)).strip() + append(translate(u'hello_goodbye', mapping={u'second': _stream_35215016_second, u'first': _stream_35215016_first, }, default=_msgid_38578576, domain=_i18n_domain)) + append(u'</div>') + if (_backup_attrs_38459368 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38459368 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_38429976 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38429976 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_38432352 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38432352 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/012.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/012.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/012.xml @@ -0,0 +1,5 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +<!ATTLIST doc : CDATA #IMPLIED> +]> +<doc :="v1"></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/013-repeat-nested.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/013-repeat-nested.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/013-repeat-nested.pt @@ -0,0 +1,11 @@ +<html> + <body> + <table> + <tr tal:repeat="i (1,2)"> + <td tal:repeat="j (1,2)"> + [${i},${j}] + </td> + </tr> + </table> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/013-repeat-nested.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/013-repeat-nested.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/013-repeat-nested.pt.py @@ -0,0 +1,255 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_36668880 = {} +_static_38578832 = {} +_static_38579728 = {} +_static_38579280 = {} +_static_35883664 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_35898288 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24cac50> name=None at 24ca050> -> _value + _value = _static_38579280 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38449232 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24cae10> name=None at 24cadd0> -> _value + _value = _static_38579728 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38451104 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24caa90> name=None at 24ca6d0> -> _value + _value = _static_38578832 + econtext['attrs'] = _value + + # <table ... (3:4) + # -------------------------------------------------------- + append(u'<table>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_i_38402704 = get('i', _marker) + + # <Expression u'(1,2)' (4:24)> -> _iterator + try: + _iterator = (1, 2, ) + except: + rcontext.setdefault('__error__', []).append((u'(1,2)', 4, 24, '<string>', _sys.exc_info()[1], )) + raise + + (_iterator, __index_35889808, ) = getitem('repeat')(u'i', _iterator) + econtext['i'] = None + for _item in _iterator: + econtext['i'] = _item + _backup_attrs_38450384 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f85d0> name=None at 2372f50> -> _value + _value = _static_36668880 + econtext['attrs'] = _value + + # <tr ... (4:6) + # -------------------------------------------------------- + append(u'<tr>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_j_34805456 = get('j', _marker) + + # <Expression u'(1,2)' (5:26)> -> _iterator + try: + _iterator = (1, 2, ) + except: + rcontext.setdefault('__error__', []).append((u'(1,2)', 5, 26, '<string>', _sys.exc_info()[1], )) + raise + + (_iterator, __index_35883600, ) = getitem('repeat')(u'j', _iterator) + econtext['j'] = None + for _item in _iterator: + econtext['j'] = _item + _backup_attrs_38450744 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2238a90> name=None at 2238a10> -> _value + _value = _static_35883664 + econtext['attrs'] = _value + + # <td ... (5:8) + # -------------------------------------------------------- + append(u'<td>') + + # <Expression u'i' (6:13)> -> _content_139955154988272 + try: + _content_139955154988272 = getitem('i') + except: + rcontext.setdefault('__error__', []).append((u'i', 6, 13, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + + # <Expression u'j' (6:18)> -> _content_139955154988272_110 + try: + _content_139955154988272_110 = getitem('j') + except: + rcontext.setdefault('__error__', []).append((u'j', 6, 18, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272_110 is None): + pass + else: + if (_content_139955154988272_110 is False): + _content_139955154988272_110 = None + else: + _tt = type(_content_139955154988272_110) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272_110 = unicode(_content_139955154988272_110) + else: + try: + if (_tt is str): + _content_139955154988272_110 = decode(_content_139955154988272_110) + else: + if (_tt is not unicode): + try: + _content_139955154988272_110 = _content_139955154988272_110.__html__ + except: + _content_139955154988272_110 = convert(_content_139955154988272_110) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272_110 = _content_139955154988272_110() + else: + if ((_content_139955154988272_110 is not None) and (re_needs_escape(_content_139955154988272_110) is not None)): + if ('&' in _content_139955154988272_110): + if (';' in _content_139955154988272_110): + _content_139955154988272_110 = re_amp.sub('&', _content_139955154988272_110) + else: + _content_139955154988272_110 = _content_139955154988272_110.replace('&', '&') + if ('<' in _content_139955154988272_110): + _content_139955154988272_110 = _content_139955154988272_110.replace('<', '<') + if ('>' in _content_139955154988272_110): + _content_139955154988272_110 = _content_139955154988272_110.replace('>', '>') + if ('\x00' in _content_139955154988272_110): + _content_139955154988272_110 = _content_139955154988272_110.replace('\x00', '"') + _content_139955154988272 = ('%s%s%s%s%s' % ((u'\n [' if (u'\n [' is not None) else ''), (_content_139955154988272 if (_content_139955154988272 is not None) else ''), (u',' if (u',' is not None) else ''), (_content_139955154988272_110 if (_content_139955154988272_110 is not None) else ''), (u']\n ' if (u']\n ' is not None) else ''), )) + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</td>') + if (_backup_attrs_38450744 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38450744 + __index_35883600 -= 1 + if (__index_35883600 > 0): + append('\n ') + if (_backup_j_34805456 is _marker): + del econtext['j'] + else: + econtext['j'] = _backup_j_34805456 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</tr>') + if (_backup_attrs_38450384 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38450384 + __index_35889808 -= 1 + if (__index_35889808 > 0): + append('\n ') + if (_backup_i_38402704 is _marker): + del econtext['i'] + else: + econtext['i'] = _backup_i_38402704 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</table>') + if (_backup_attrs_38451104 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38451104 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_38449232 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38449232 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_35898288 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35898288 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/013.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/013.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/013.xml @@ -0,0 +1,5 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +<!ATTLIST doc _.-0123456789 CDATA #IMPLIED> +]> +<doc _.-0123456789="v1"></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/014-repeat-nested-similar.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/014-repeat-nested-similar.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/014-repeat-nested-similar.pt @@ -0,0 +1,7 @@ +<html> + <body> + <span tal:repeat="i (3,4)"> + <span tal:repeat="j (3,4)">[${i},${j}]</span> + </span> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/014-repeat-nested-similar.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/014-repeat-nested-similar.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/014-repeat-nested-similar.pt.py @@ -0,0 +1,234 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_36670928 = {} +_static_35883856 = {} +_static_38579280 = {} +_static_38578448 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_38460808 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2238b50> name=None at 2238f90> -> _value + _value = _static_35883856 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38457496 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8dd0> name=None at 22f8610> -> _value + _value = _static_36670928 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_i_38402704 = get('i', _marker) + + # <Expression u'(3,4)' (3:24)> -> _iterator + try: + _iterator = (3, 4, ) + except: + rcontext.setdefault('__error__', []).append((u'(3,4)', 3, 24, '<string>', _sys.exc_info()[1], )) + raise + + (_iterator, __index_38579664, ) = getitem('repeat')(u'i', _iterator) + econtext['i'] = None + for _item in _iterator: + econtext['i'] = _item + _backup_attrs_38524328 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24cac50> name=None at 24ca050> -> _value + _value = _static_38579280 + econtext['attrs'] = _value + + # <span ... (3:4) + # -------------------------------------------------------- + append(u'<span>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_j_35779152 = get('j', _marker) + + # <Expression u'(3,4)' (4:26)> -> _iterator + try: + _iterator = (3, 4, ) + except: + rcontext.setdefault('__error__', []).append((u'(3,4)', 4, 26, '<string>', _sys.exc_info()[1], )) + raise + + (_iterator, __index_38578064, ) = getitem('repeat')(u'j', _iterator) + econtext['j'] = None + for _item in _iterator: + econtext['j'] = _item + _backup_attrs_38460304 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24ca910> name=None at 24cad10> -> _value + _value = _static_38578448 + econtext['attrs'] = _value + + # <span ... (4:6) + # -------------------------------------------------------- + append(u'<span>') + + # <Expression u'i' (4:36)> -> _content_139955154988272 + try: + _content_139955154988272 = getitem('i') + except: + rcontext.setdefault('__error__', []).append((u'i', 4, 36, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + + # <Expression u'j' (4:41)> -> _content_139955154988272_87 + try: + _content_139955154988272_87 = getitem('j') + except: + rcontext.setdefault('__error__', []).append((u'j', 4, 41, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272_87 is None): + pass + else: + if (_content_139955154988272_87 is False): + _content_139955154988272_87 = None + else: + _tt = type(_content_139955154988272_87) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272_87 = unicode(_content_139955154988272_87) + else: + try: + if (_tt is str): + _content_139955154988272_87 = decode(_content_139955154988272_87) + else: + if (_tt is not unicode): + try: + _content_139955154988272_87 = _content_139955154988272_87.__html__ + except: + _content_139955154988272_87 = convert(_content_139955154988272_87) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272_87 = _content_139955154988272_87() + else: + if ((_content_139955154988272_87 is not None) and (re_needs_escape(_content_139955154988272_87) is not None)): + if ('&' in _content_139955154988272_87): + if (';' in _content_139955154988272_87): + _content_139955154988272_87 = re_amp.sub('&', _content_139955154988272_87) + else: + _content_139955154988272_87 = _content_139955154988272_87.replace('&', '&') + if ('<' in _content_139955154988272_87): + _content_139955154988272_87 = _content_139955154988272_87.replace('<', '<') + if ('>' in _content_139955154988272_87): + _content_139955154988272_87 = _content_139955154988272_87.replace('>', '>') + if ('\x00' in _content_139955154988272_87): + _content_139955154988272_87 = _content_139955154988272_87.replace('\x00', '"') + _content_139955154988272 = ('%s%s%s%s%s' % ((u'[' if (u'[' is not None) else ''), (_content_139955154988272 if (_content_139955154988272 is not None) else ''), (u',' if (u',' is not None) else ''), (_content_139955154988272_87 if (_content_139955154988272_87 is not None) else ''), (u']' if (u']' is not None) else ''), )) + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</span>') + if (_backup_attrs_38460304 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38460304 + __index_38578064 -= 1 + if (__index_38578064 > 0): + append('\n ') + if (_backup_j_35779152 is _marker): + del econtext['j'] + else: + econtext['j'] = _backup_j_35779152 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</span>') + if (_backup_attrs_38524328 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38524328 + __index_38579664 -= 1 + if (__index_38579664 > 0): + append('\n ') + if (_backup_i_38402704 is _marker): + del econtext['i'] + else: + econtext['i'] = _backup_i_38402704 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_38457496 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38457496 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_38460808 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38460808 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/014.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/014.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/014.xml @@ -0,0 +1,5 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +<!ATTLIST doc abcdefghijklmnopqrstuvwxyz CDATA #IMPLIED> +]> +<doc abcdefghijklmnopqrstuvwxyz="v1"></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/015-translation-nested.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/015-translation-nested.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/015-translation-nested.pt @@ -0,0 +1,10 @@ +<html> + <body> + <div i18n:translate=""> + Price: + <span i18n:name="price" i18n:translate=""> + Per kilo <em i18n:name="amount">${12.5}</em> + </span> + </div> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/015-translation-nested.pt-en.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/015-translation-nested.pt-en.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/015-translation-nested.pt-en.py @@ -0,0 +1,190 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +from chameleon.utils import DebuggingOutputStream as _DebuggingOutputStream +pass +_static_36670928 = {} +_static_38428304 = {} +_static_38579280 = {} +_static_38488592 = {} +_static_38578448 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_38960088 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8dd0> name=None at 22f8610> -> _value + _value = _static_36670928 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38430264 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24cac50> name=None at 24ca050> -> _value + _value = _static_38579280 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_39079448 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24ca910> name=None at 24cad10> -> _value + _value = _static_38578448 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div>') + _stream_35215016_price = '' + _stream_38577872 = _DebuggingOutputStream() + _append_38577872 = _stream_38577872.append + _content_139955154988272 = u'\n Price:\n ' + if (_content_139955154988272 is not None): + _append_38577872(_content_139955154988272) + _stream_35215016_price = _DebuggingOutputStream() + _append_35215016_price = _stream_35215016_price.append + _backup_attrs_39124432 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24b4a10> name=None at 24b4d50> -> _value + _value = _static_38488592 + econtext['attrs'] = _value + + # <span ... (5:6) + # -------------------------------------------------------- + _append_35215016_price(u'<span>') + _stream_35215248_amount = '' + _stream_38576464 = _DebuggingOutputStream() + _append_38576464 = _stream_38576464.append + _content_139955154988272 = u'\n Per kilo ' + if (_content_139955154988272 is not None): + _append_38576464(_content_139955154988272) + _stream_35215248_amount = _DebuggingOutputStream() + _append_35215248_amount = _stream_35215248_amount.append + _backup_attrs_38958432 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24a5e90> name=None at 24a5bd0> -> _value + _value = _static_38428304 + econtext['attrs'] = _value + + # <em ... (6:17) + # -------------------------------------------------------- + _append_35215248_amount(u'<em>') + + # <Expression u'12.5' (6:42)> -> _content_139955154988272 + try: + _content_139955154988272 = 12.5 + except: + rcontext.setdefault('__error__', []).append((u'12.5', 6, 42, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = _content_139955154988272 + if (_content_139955154988272 is not None): + _append_35215248_amount(_content_139955154988272) + _append_35215248_amount(u'</em>') + if (_backup_attrs_38958432 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38958432 + _append_38576464(u'${amount}') + _stream_35215248_amount = ''.join(_stream_35215248_amount) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + _append_38576464(_content_139955154988272) + _msgid_38576464 = re_whitespace(''.join(_stream_38576464)).strip() + _append_35215016_price(translate(_msgid_38576464, mapping={u'amount': _stream_35215248_amount, }, default=_msgid_38576464, domain=_i18n_domain)) + _append_35215016_price(u'</span>') + if (_backup_attrs_39124432 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39124432 + _append_38577872(u'${price}') + _stream_35215016_price = ''.join(_stream_35215016_price) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + _append_38577872(_content_139955154988272) + _msgid_38577872 = re_whitespace(''.join(_stream_38577872)).strip() + append(translate(_msgid_38577872, mapping={u'price': _stream_35215016_price, }, default=_msgid_38577872, domain=_i18n_domain)) + append(u'</div>') + if (_backup_attrs_39079448 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39079448 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_38430264 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38430264 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_38960088 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38960088 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/015-translation-nested.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/015-translation-nested.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/015-translation-nested.pt.py @@ -0,0 +1,190 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +from chameleon.utils import DebuggingOutputStream as _DebuggingOutputStream +pass +_static_38579280 = {} +_static_38425872 = {} +_static_38427536 = {} +_static_36668688 = {} +_static_38576400 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_38408776 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24a5510> name=None at 24a59d0> -> _value + _value = _static_38425872 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36681056 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24a5b90> name=None at 24a5490> -> _value + _value = _static_38427536 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_37011112 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24cac50> name=None at 24ca050> -> _value + _value = _static_38579280 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div>') + _stream_35215016_price = '' + _stream_38579984 = _DebuggingOutputStream() + _append_38579984 = _stream_38579984.append + _content_139955154988272 = u'\n Price:\n ' + if (_content_139955154988272 is not None): + _append_38579984(_content_139955154988272) + _stream_35215016_price = _DebuggingOutputStream() + _append_35215016_price = _stream_35215016_price.append + _backup_attrs_38411512 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24ca110> name=None at 24cad90> -> _value + _value = _static_38576400 + econtext['attrs'] = _value + + # <span ... (5:6) + # -------------------------------------------------------- + _append_35215016_price(u'<span>') + _stream_35215248_amount = '' + _stream_38578448 = _DebuggingOutputStream() + _append_38578448 = _stream_38578448.append + _content_139955154988272 = u'\n Per kilo ' + if (_content_139955154988272 is not None): + _append_38578448(_content_139955154988272) + _stream_35215248_amount = _DebuggingOutputStream() + _append_35215248_amount = _stream_35215248_amount.append + _backup_attrs_37011184 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8510> name=None at 24cacd0> -> _value + _value = _static_36668688 + econtext['attrs'] = _value + + # <em ... (6:17) + # -------------------------------------------------------- + _append_35215248_amount(u'<em>') + + # <Expression u'12.5' (6:42)> -> _content_139955154988272 + try: + _content_139955154988272 = 12.5 + except: + rcontext.setdefault('__error__', []).append((u'12.5', 6, 42, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = _content_139955154988272 + if (_content_139955154988272 is not None): + _append_35215248_amount(_content_139955154988272) + _append_35215248_amount(u'</em>') + if (_backup_attrs_37011184 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37011184 + _append_38578448(u'${amount}') + _stream_35215248_amount = ''.join(_stream_35215248_amount) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + _append_38578448(_content_139955154988272) + _msgid_38578448 = re_whitespace(''.join(_stream_38578448)).strip() + _append_35215016_price(translate(_msgid_38578448, mapping={u'amount': _stream_35215248_amount, }, default=_msgid_38578448, domain=_i18n_domain)) + _append_35215016_price(u'</span>') + if (_backup_attrs_38411512 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38411512 + _append_38579984(u'${price}') + _stream_35215016_price = ''.join(_stream_35215016_price) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + _append_38579984(_content_139955154988272) + _msgid_38579984 = re_whitespace(''.join(_stream_38579984)).strip() + append(translate(_msgid_38579984, mapping={u'price': _stream_35215016_price, }, default=_msgid_38579984, domain=_i18n_domain)) + append(u'</div>') + if (_backup_attrs_37011112 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37011112 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_36681056 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36681056 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_38408776 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38408776 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/015.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/015.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/015.xml @@ -0,0 +1,5 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +<!ATTLIST doc ABCDEFGHIJKLMNOPQRSTUVWXYZ CDATA #IMPLIED> +]> +<doc ABCDEFGHIJKLMNOPQRSTUVWXYZ="v1"></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/016-explicit-translation.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/016-explicit-translation.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/016-explicit-translation.pt @@ -0,0 +1,11 @@ +<html> + <body> + <div i18n:translate="" tal:content="string:Hello world!"> + Hello world! + </div> + <img alt="${'Hello world!'}" i18n:attributes="alt" /> + <img alt="${'Hello world!'}" i18n:attributes="alt hello_world" /> + <img tal:attributes="alt 'Hello world!'" i18n:attributes="alt" /> + <img tal:attributes="alt 'Hello world!'" i18n:attributes="alt hello_world" /> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/016-explicit-translation.pt-en.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/016-explicit-translation.pt-en.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/016-explicit-translation.pt-en.py @@ -0,0 +1,419 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import Placeholder as _Placeholder +import sys as _sys +pass +_static_37011728 = {u'alt': u"${'Hello world!'}", } +_static_38463824 = {u'alt': u"'Hello world!'", } +_static_38579664 = {} +_static_38427088 = {} +_static_38579728 = {} +_static_38425616 = {u'alt': u"${'Hello world!'}", } +_static_38465488 = {u'alt': u"'Hello world!'", } +_marker_default = _Placeholder() +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_36729560 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24cadd0> name=None at 22f8e50> -> _value + _value = _static_38579664 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36729920 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24cae10> name=None at 24caf10> -> _value + _value = _static_38579728 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35804152 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24a59d0> name=None at 24a5690> -> _value + _value = _static_38427088 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_36732008 = get('default', _marker) + + # <Marker name='default' at 24ca410> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'string:Hello world!' (3:40)> -> _cache_38578448 + try: + _cache_38578448 = u'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u'string:Hello world!', 3, 40, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'string:Hello world!' (3:40)> value=<Marker name='default' at 24cae90> at 24cacd0> -> _condition + _expression = _cache_38578448 + + # <Marker name='default' at 24cae90> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _content_139955154988272 = u'\n Hello world!\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + else: + _content = _cache_38578448 + _content = translate(_content, default=None, domain=_i18n_domain) + if (_content is not None): + append(_content) + if (_backup_default_36732008 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36732008 + append(u'</div>') + if (_backup_attrs_35804152 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35804152 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36729776 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24a5410> name=None at 24a5b90> -> _value + _value = _static_38425616 + econtext['attrs'] = _value + + # <img ... (6:4) + # -------------------------------------------------------- + append(u'<img') + _backup_default_36729128 = get('default', _marker) + _value = u"${'Hello world!'}" + econtext['default'] = _value + + # <Translate msgid=None node=<Interpolation value=u"${'Hello world!'}" escape=True at 24a5bd0> at 24a5a90> -> _attr_alt + + # <Interpolation value=u"${'Hello world!'}" escape=True at 24a5bd0> -> _attr_alt + + # <Expression u"'Hello world!'" (6:16)> -> _attr_alt + try: + _attr_alt = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 6, 16, '<string>', _sys.exc_info()[1], )) + raise + + _attr_alt = _attr_alt + _attr_alt = translate(_attr_alt, default=_attr_alt, domain=_i18n_domain) + if (_attr_alt is None): + pass + else: + if (_attr_alt is False): + _attr_alt = None + else: + _tt = type(_attr_alt) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_alt = unicode(_attr_alt) + else: + try: + if (_tt is str): + _attr_alt = decode(_attr_alt) + else: + if (_tt is not unicode): + try: + _attr_alt = _attr_alt.__html__ + except: + _attr_alt = convert(_attr_alt) + else: + raise RuntimeError + except RuntimeError: + _attr_alt = _attr_alt() + else: + if ((_attr_alt is not None) and (re_needs_escape(_attr_alt) is not None)): + if ('&' in _attr_alt): + if (';' in _attr_alt): + _attr_alt = re_amp.sub('&', _attr_alt) + else: + _attr_alt = _attr_alt.replace('&', '&') + if ('<' in _attr_alt): + _attr_alt = _attr_alt.replace('<', '<') + if ('>' in _attr_alt): + _attr_alt = _attr_alt.replace('>', '>') + if (u'"' in _attr_alt): + _attr_alt = _attr_alt.replace(u'"', '"') + if (_attr_alt is not None): + append((u' alt="%s"' % _attr_alt)) + if (_backup_default_36729128 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36729128 + append(u' />') + if (_backup_attrs_36729776 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36729776 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35874000 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x234c110> name=None at 234ce10> -> _value + _value = _static_37011728 + econtext['attrs'] = _value + + # <img ... (7:4) + # -------------------------------------------------------- + append(u'<img') + _backup_default_35876808 = get('default', _marker) + _value = u"${'Hello world!'}" + econtext['default'] = _value + + # <Translate msgid=u'hello_world' node=<Interpolation value=u"${'Hello world!'}" escape=True at 234ce50> at 234c9d0> -> _attr_alt + + # <Interpolation value=u"${'Hello world!'}" escape=True at 234ce50> -> _attr_alt + + # <Expression u"'Hello world!'" (7:16)> -> _attr_alt + try: + _attr_alt = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 7, 16, '<string>', _sys.exc_info()[1], )) + raise + + _attr_alt = _attr_alt + _attr_alt = translate(u'hello_world', default=_attr_alt, domain=_i18n_domain) + if (_attr_alt is None): + pass + else: + if (_attr_alt is False): + _attr_alt = None + else: + _tt = type(_attr_alt) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_alt = unicode(_attr_alt) + else: + try: + if (_tt is str): + _attr_alt = decode(_attr_alt) + else: + if (_tt is not unicode): + try: + _attr_alt = _attr_alt.__html__ + except: + _attr_alt = convert(_attr_alt) + else: + raise RuntimeError + except RuntimeError: + _attr_alt = _attr_alt() + else: + if ((_attr_alt is not None) and (re_needs_escape(_attr_alt) is not None)): + if ('&' in _attr_alt): + if (';' in _attr_alt): + _attr_alt = re_amp.sub('&', _attr_alt) + else: + _attr_alt = _attr_alt.replace('&', '&') + if ('<' in _attr_alt): + _attr_alt = _attr_alt.replace('<', '<') + if ('>' in _attr_alt): + _attr_alt = _attr_alt.replace('>', '>') + if (u'"' in _attr_alt): + _attr_alt = _attr_alt.replace(u'"', '"') + if (_attr_alt is not None): + append((u' alt="%s"' % _attr_alt)) + if (_backup_default_35876808 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35876808 + append(u' />') + if (_backup_attrs_35874000 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35874000 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35875152 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24aefd0> name=None at 24aebd0> -> _value + _value = _static_38465488 + econtext['attrs'] = _value + + # <img ... (8:4) + # -------------------------------------------------------- + append(u'<img') + _backup_default_35874072 = get('default', _marker) + _value = None + econtext['default'] = _value + + # <Translate msgid=None node=<Expression u"'Hello world!'" (8:29)> at 24aead0> -> _attr_alt + + # <Expression u"'Hello world!'" (8:29)> -> _attr_alt + try: + _attr_alt = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 8, 29, '<string>', _sys.exc_info()[1], )) + raise + + _attr_alt = translate(_attr_alt, default=_attr_alt, domain=_i18n_domain) + if (_attr_alt is None): + pass + else: + if (_attr_alt is False): + _attr_alt = None + else: + _tt = type(_attr_alt) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_alt = unicode(_attr_alt) + else: + try: + if (_tt is str): + _attr_alt = decode(_attr_alt) + else: + if (_tt is not unicode): + try: + _attr_alt = _attr_alt.__html__ + except: + _attr_alt = convert(_attr_alt) + else: + raise RuntimeError + except RuntimeError: + _attr_alt = _attr_alt() + else: + if ((_attr_alt is not None) and (re_needs_escape(_attr_alt) is not None)): + if ('&' in _attr_alt): + if (';' in _attr_alt): + _attr_alt = re_amp.sub('&', _attr_alt) + else: + _attr_alt = _attr_alt.replace('&', '&') + if ('<' in _attr_alt): + _attr_alt = _attr_alt.replace('<', '<') + if ('>' in _attr_alt): + _attr_alt = _attr_alt.replace('>', '>') + if ('"' in _attr_alt): + _attr_alt = _attr_alt.replace('"', '"') + if (_attr_alt is not None): + append((u' alt="%s"' % _attr_alt)) + if (_backup_default_35874072 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35874072 + append(u' />') + if (_backup_attrs_35875152 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35875152 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35872848 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24ae950> name=None at 24ae910> -> _value + _value = _static_38463824 + econtext['attrs'] = _value + + # <img ... (9:4) + # -------------------------------------------------------- + append(u'<img') + _backup_default_35873208 = get('default', _marker) + _value = None + econtext['default'] = _value + + # <Translate msgid=u'hello_world' node=<Expression u"'Hello world!'" (9:29)> at 24ae890> -> _attr_alt + + # <Expression u"'Hello world!'" (9:29)> -> _attr_alt + try: + _attr_alt = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 9, 29, '<string>', _sys.exc_info()[1], )) + raise + + _attr_alt = translate(u'hello_world', default=_attr_alt, domain=_i18n_domain) + if (_attr_alt is None): + pass + else: + if (_attr_alt is False): + _attr_alt = None + else: + _tt = type(_attr_alt) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_alt = unicode(_attr_alt) + else: + try: + if (_tt is str): + _attr_alt = decode(_attr_alt) + else: + if (_tt is not unicode): + try: + _attr_alt = _attr_alt.__html__ + except: + _attr_alt = convert(_attr_alt) + else: + raise RuntimeError + except RuntimeError: + _attr_alt = _attr_alt() + else: + if ((_attr_alt is not None) and (re_needs_escape(_attr_alt) is not None)): + if ('&' in _attr_alt): + if (';' in _attr_alt): + _attr_alt = re_amp.sub('&', _attr_alt) + else: + _attr_alt = _attr_alt.replace('&', '&') + if ('<' in _attr_alt): + _attr_alt = _attr_alt.replace('<', '<') + if ('>' in _attr_alt): + _attr_alt = _attr_alt.replace('>', '>') + if ('"' in _attr_alt): + _attr_alt = _attr_alt.replace('"', '"') + if (_attr_alt is not None): + append((u' alt="%s"' % _attr_alt)) + if (_backup_default_35873208 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35873208 + append(u' />') + if (_backup_attrs_35872848 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35872848 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_36729920 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36729920 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_36729560 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36729560 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/016-explicit-translation.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/016-explicit-translation.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/016-explicit-translation.pt.py @@ -0,0 +1,419 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import Placeholder as _Placeholder +import sys as _sys +pass +_static_38461520 = {u'alt': u"'Hello world!'", } +_static_38577424 = {} +_static_38427984 = {} +_static_38578896 = {u'alt': u"${'Hello world!'}", } +_static_38401104 = {u'alt': u"'Hello world!'", } +_static_38462480 = {u'alt': u"${'Hello world!'}", } +_static_38428112 = {} +_marker_default = _Placeholder() +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_37150016 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24a5dd0> name=None at 24a59d0> -> _value + _value = _static_38428112 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36785184 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24a5d50> name=None at 24a5a50> -> _value + _value = _static_38427984 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38523176 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24ca510> name=None at 24ca9d0> -> _value + _value = _static_38577424 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_36784176 = get('default', _marker) + + # <Marker name='default' at 24cae10> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'string:Hello world!' (3:40)> -> _cache_38578576 + try: + _cache_38578576 = u'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u'string:Hello world!', 3, 40, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'string:Hello world!' (3:40)> value=<Marker name='default' at 24ca890> at 24ca110> -> _condition + _expression = _cache_38578576 + + # <Marker name='default' at 24ca890> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _content_139955154988272 = u'\n Hello world!\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + else: + _content = _cache_38578576 + _content = translate(_content, default=None, domain=_i18n_domain) + if (_content is not None): + append(_content) + if (_backup_default_36784176 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36784176 + append(u'</div>') + if (_backup_attrs_38523176 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38523176 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36793448 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24caad0> name=None at 24cae90> -> _value + _value = _static_38578896 + econtext['attrs'] = _value + + # <img ... (6:4) + # -------------------------------------------------------- + append(u'<img') + _backup_default_36792152 = get('default', _marker) + _value = u"${'Hello world!'}" + econtext['default'] = _value + + # <Translate msgid=None node=<Interpolation value=u"${'Hello world!'}" escape=True at 24ae090> at 24ae190> -> _attr_alt + + # <Interpolation value=u"${'Hello world!'}" escape=True at 24ae090> -> _attr_alt + + # <Expression u"'Hello world!'" (6:16)> -> _attr_alt + try: + _attr_alt = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 6, 16, '<string>', _sys.exc_info()[1], )) + raise + + _attr_alt = _attr_alt + _attr_alt = translate(_attr_alt, default=_attr_alt, domain=_i18n_domain) + if (_attr_alt is None): + pass + else: + if (_attr_alt is False): + _attr_alt = None + else: + _tt = type(_attr_alt) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_alt = unicode(_attr_alt) + else: + try: + if (_tt is str): + _attr_alt = decode(_attr_alt) + else: + if (_tt is not unicode): + try: + _attr_alt = _attr_alt.__html__ + except: + _attr_alt = convert(_attr_alt) + else: + raise RuntimeError + except RuntimeError: + _attr_alt = _attr_alt() + else: + if ((_attr_alt is not None) and (re_needs_escape(_attr_alt) is not None)): + if ('&' in _attr_alt): + if (';' in _attr_alt): + _attr_alt = re_amp.sub('&', _attr_alt) + else: + _attr_alt = _attr_alt.replace('&', '&') + if ('<' in _attr_alt): + _attr_alt = _attr_alt.replace('<', '<') + if ('>' in _attr_alt): + _attr_alt = _attr_alt.replace('>', '>') + if (u'"' in _attr_alt): + _attr_alt = _attr_alt.replace(u'"', '"') + if (_attr_alt is not None): + append((u' alt="%s"' % _attr_alt)) + if (_backup_default_36792152 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36792152 + append(u' />') + if (_backup_attrs_36793448 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36793448 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_37151448 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24ae410> name=None at 24ae310> -> _value + _value = _static_38462480 + econtext['attrs'] = _value + + # <img ... (7:4) + # -------------------------------------------------------- + append(u'<img') + _backup_default_37154184 = get('default', _marker) + _value = u"${'Hello world!'}" + econtext['default'] = _value + + # <Translate msgid=u'hello_world' node=<Interpolation value=u"${'Hello world!'}" escape=True at 24ae9d0> at 24aecd0> -> _attr_alt + + # <Interpolation value=u"${'Hello world!'}" escape=True at 24ae9d0> -> _attr_alt + + # <Expression u"'Hello world!'" (7:16)> -> _attr_alt + try: + _attr_alt = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 7, 16, '<string>', _sys.exc_info()[1], )) + raise + + _attr_alt = _attr_alt + _attr_alt = translate(u'hello_world', default=_attr_alt, domain=_i18n_domain) + if (_attr_alt is None): + pass + else: + if (_attr_alt is False): + _attr_alt = None + else: + _tt = type(_attr_alt) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_alt = unicode(_attr_alt) + else: + try: + if (_tt is str): + _attr_alt = decode(_attr_alt) + else: + if (_tt is not unicode): + try: + _attr_alt = _attr_alt.__html__ + except: + _attr_alt = convert(_attr_alt) + else: + raise RuntimeError + except RuntimeError: + _attr_alt = _attr_alt() + else: + if ((_attr_alt is not None) and (re_needs_escape(_attr_alt) is not None)): + if ('&' in _attr_alt): + if (';' in _attr_alt): + _attr_alt = re_amp.sub('&', _attr_alt) + else: + _attr_alt = _attr_alt.replace('&', '&') + if ('<' in _attr_alt): + _attr_alt = _attr_alt.replace('<', '<') + if ('>' in _attr_alt): + _attr_alt = _attr_alt.replace('>', '>') + if (u'"' in _attr_alt): + _attr_alt = _attr_alt.replace(u'"', '"') + if (_attr_alt is not None): + append((u' alt="%s"' % _attr_alt)) + if (_backup_default_37154184 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_37154184 + append(u' />') + if (_backup_attrs_37151448 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37151448 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_37152528 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24ae050> name=None at 24ae210> -> _value + _value = _static_38461520 + econtext['attrs'] = _value + + # <img ... (8:4) + # -------------------------------------------------------- + append(u'<img') + _backup_default_37151520 = get('default', _marker) + _value = None + econtext['default'] = _value + + # <Translate msgid=None node=<Expression u"'Hello world!'" (8:29)> at 24ae750> -> _attr_alt + + # <Expression u"'Hello world!'" (8:29)> -> _attr_alt + try: + _attr_alt = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 8, 29, '<string>', _sys.exc_info()[1], )) + raise + + _attr_alt = translate(_attr_alt, default=_attr_alt, domain=_i18n_domain) + if (_attr_alt is None): + pass + else: + if (_attr_alt is False): + _attr_alt = None + else: + _tt = type(_attr_alt) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_alt = unicode(_attr_alt) + else: + try: + if (_tt is str): + _attr_alt = decode(_attr_alt) + else: + if (_tt is not unicode): + try: + _attr_alt = _attr_alt.__html__ + except: + _attr_alt = convert(_attr_alt) + else: + raise RuntimeError + except RuntimeError: + _attr_alt = _attr_alt() + else: + if ((_attr_alt is not None) and (re_needs_escape(_attr_alt) is not None)): + if ('&' in _attr_alt): + if (';' in _attr_alt): + _attr_alt = re_amp.sub('&', _attr_alt) + else: + _attr_alt = _attr_alt.replace('&', '&') + if ('<' in _attr_alt): + _attr_alt = _attr_alt.replace('<', '<') + if ('>' in _attr_alt): + _attr_alt = _attr_alt.replace('>', '>') + if ('"' in _attr_alt): + _attr_alt = _attr_alt.replace('"', '"') + if (_attr_alt is not None): + append((u' alt="%s"' % _attr_alt)) + if (_backup_default_37151520 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_37151520 + append(u' />') + if (_backup_attrs_37152528 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37152528 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_37151088 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x249f450> name=None at 249fcd0> -> _value + _value = _static_38401104 + econtext['attrs'] = _value + + # <img ... (9:4) + # -------------------------------------------------------- + append(u'<img') + _backup_default_37154472 = get('default', _marker) + _value = None + econtext['default'] = _value + + # <Translate msgid=u'hello_world' node=<Expression u"'Hello world!'" (9:29)> at 249fc50> -> _attr_alt + + # <Expression u"'Hello world!'" (9:29)> -> _attr_alt + try: + _attr_alt = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 9, 29, '<string>', _sys.exc_info()[1], )) + raise + + _attr_alt = translate(u'hello_world', default=_attr_alt, domain=_i18n_domain) + if (_attr_alt is None): + pass + else: + if (_attr_alt is False): + _attr_alt = None + else: + _tt = type(_attr_alt) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_alt = unicode(_attr_alt) + else: + try: + if (_tt is str): + _attr_alt = decode(_attr_alt) + else: + if (_tt is not unicode): + try: + _attr_alt = _attr_alt.__html__ + except: + _attr_alt = convert(_attr_alt) + else: + raise RuntimeError + except RuntimeError: + _attr_alt = _attr_alt() + else: + if ((_attr_alt is not None) and (re_needs_escape(_attr_alt) is not None)): + if ('&' in _attr_alt): + if (';' in _attr_alt): + _attr_alt = re_amp.sub('&', _attr_alt) + else: + _attr_alt = _attr_alt.replace('&', '&') + if ('<' in _attr_alt): + _attr_alt = _attr_alt.replace('<', '<') + if ('>' in _attr_alt): + _attr_alt = _attr_alt.replace('>', '>') + if ('"' in _attr_alt): + _attr_alt = _attr_alt.replace('"', '"') + if (_attr_alt is not None): + append((u' alt="%s"' % _attr_alt)) + if (_backup_default_37154472 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_37154472 + append(u' />') + if (_backup_attrs_37151088 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37151088 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_36785184 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36785184 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_37150016 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37150016 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/016.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/016.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/016.xml @@ -0,0 +1,4 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc><?pi?></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/017-omit-tag.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/017-omit-tag.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/017-omit-tag.pt @@ -0,0 +1,12 @@ +<html> + <body> + <div tal:omit-tag="">Hello world!</div> + <div tal:omit-tag="">1 + Hello world! + 2</div>3 + 4<div tal:omit-tag="True">Hello world!</div> + <div tal:omit-tag="False">Hello world!</div> + <div class="omitted" tal:omit-tag="True">Hello world!</div> + <div class="${'omitted'}" tal:omit-tag="True">Hello world!</div> + </body> +</html> \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/017-omit-tag.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/017-omit-tag.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/017-omit-tag.pt.py @@ -0,0 +1,278 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_38461840 = {} +_static_38461520 = {} +_static_38576336 = {u'class': u"${'omitted'}", } +_static_38578640 = {u'class': u'omitted', } +_static_38464592 = {} +_static_38400400 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_38457568 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24ae190> name=None at 24aebd0> -> _value + _value = _static_38461840 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36649800 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24aec50> name=None at 24ae510> -> _value + _value = _static_38464592 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _content_139955154988272 = u'Hello world!' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _content_139955154988272 = u'1\n Hello world!\n 2' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _content_139955154988272 = u'3\n 4' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + + # <Negate value=<Expression u'True' (7:23)> at 249f690> -> _cache_38401680 + + # <Expression u'True' (7:23)> -> _cache_38401680 + try: + _cache_38401680 = True + except: + rcontext.setdefault('__error__', []).append((u'True', 7, 23, '<string>', _sys.exc_info()[1], )) + raise + + _cache_38401680 = not _cache_38401680 + _backup_attrs_38602080 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24ae050> name=None at 24ae210> -> _value + _value = _static_38461520 + econtext['attrs'] = _value + _condition = _cache_38401680 + if _condition: + + # <div ... (7:4) + # -------------------------------------------------------- + append(u'<div>') + _content_139955154988272 = u'Hello world!' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _condition = _cache_38401680 + if _condition: + append(u'</div>') + if (_backup_attrs_38602080 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38602080 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + + # <Negate value=<Expression u'False' (8:23)> at 249f750> -> _cache_38401872 + + # <Expression u'False' (8:23)> -> _cache_38401872 + try: + _cache_38401872 = False + except: + rcontext.setdefault('__error__', []).append((u'False', 8, 23, '<string>', _sys.exc_info()[1], )) + raise + + _cache_38401872 = not _cache_38401872 + _backup_attrs_37106464 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x249f190> name=None at 249f950> -> _value + _value = _static_38400400 + econtext['attrs'] = _value + _condition = _cache_38401872 + if _condition: + + # <div ... (8:4) + # -------------------------------------------------------- + append(u'<div>') + _content_139955154988272 = u'Hello world!' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _condition = _cache_38401872 + if _condition: + append(u'</div>') + if (_backup_attrs_37106464 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37106464 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + + # <Negate value=<Expression u'True' (9:39)> at 24cae10> -> _cache_38579728 + + # <Expression u'True' (9:39)> -> _cache_38579728 + try: + _cache_38579728 = True + except: + rcontext.setdefault('__error__', []).append((u'True', 9, 39, '<string>', _sys.exc_info()[1], )) + raise + + _cache_38579728 = not _cache_38579728 + _backup_attrs_37107904 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24ca9d0> name=None at 24ca490> -> _value + _value = _static_38578640 + econtext['attrs'] = _value + _condition = _cache_38579728 + if _condition: + + # <div ... (9:4) + # -------------------------------------------------------- + append(u'<div') + _attr_class = u'omitted' + if (_attr_class is not None): + append((u' class="%s"' % _attr_class)) + append(u'>') + _content_139955154988272 = u'Hello world!' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _condition = _cache_38579728 + if _condition: + append(u'</div>') + if (_backup_attrs_37107904 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37107904 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + + # <Negate value=<Expression u'True' (10:44)> at 24a5f90> -> _cache_38428560 + + # <Expression u'True' (10:44)> -> _cache_38428560 + try: + _cache_38428560 = True + except: + rcontext.setdefault('__error__', []).append((u'True', 10, 44, '<string>', _sys.exc_info()[1], )) + raise + + _cache_38428560 = not _cache_38428560 + _backup_attrs_37108768 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24ca0d0> name=None at 24caad0> -> _value + _value = _static_38576336 + econtext['attrs'] = _value + _condition = _cache_38428560 + if _condition: + + # <div ... (10:4) + # -------------------------------------------------------- + append(u'<div') + _backup_default_36682712 = get('default', _marker) + _value = u"${'omitted'}" + econtext['default'] = _value + + # <Interpolation value=u"${'omitted'}" escape=True at 24ca050> -> _attr_class + + # <Expression u"'omitted'" (10:18)> -> _attr_class + try: + _attr_class = 'omitted' + except: + rcontext.setdefault('__error__', []).append((u"'omitted'", 10, 18, '<string>', _sys.exc_info()[1], )) + raise + + _attr_class = _attr_class + if (_attr_class is None): + pass + else: + if (_attr_class is False): + _attr_class = None + else: + _tt = type(_attr_class) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_class = unicode(_attr_class) + else: + try: + if (_tt is str): + _attr_class = decode(_attr_class) + else: + if (_tt is not unicode): + try: + _attr_class = _attr_class.__html__ + except: + _attr_class = convert(_attr_class) + else: + raise RuntimeError + except RuntimeError: + _attr_class = _attr_class() + else: + if ((_attr_class is not None) and (re_needs_escape(_attr_class) is not None)): + if ('&' in _attr_class): + if (';' in _attr_class): + _attr_class = re_amp.sub('&', _attr_class) + else: + _attr_class = _attr_class.replace('&', '&') + if ('<' in _attr_class): + _attr_class = _attr_class.replace('<', '<') + if ('>' in _attr_class): + _attr_class = _attr_class.replace('>', '>') + if (u'"' in _attr_class): + _attr_class = _attr_class.replace(u'"', '"') + if (_attr_class is not None): + append((u' class="%s"' % _attr_class)) + if (_backup_default_36682712 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36682712 + append(u'>') + _content_139955154988272 = u'Hello world!' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _condition = _cache_38428560 + if _condition: + append(u'</div>') + if (_backup_attrs_37108768 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37108768 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_36649800 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36649800 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_38457568 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38457568 +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/017.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/017.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/017.xml @@ -0,0 +1,4 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc><?pi some data ? > <??></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/018-translation-nested-dynamic.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/018-translation-nested-dynamic.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/018-translation-nested-dynamic.pt @@ -0,0 +1,13 @@ +<div xmlns="http://www.w3.org/1999/xhtml" + xmlns:i18n="http://xml.zope.org/namespaces/i18n"> + <div i18n:translate="" tal:omit-tag=""> + <span i18n:name="monthname" + i18n:translate="" + tal:content="'october'" + tal:omit-tag="">monthname</span> + <span i18n:name="year" + i18n:translate="" + tal:content="1982" + tal:omit-tag="">year</span> + </div> +</div> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/018-translation-nested-dynamic.pt-en.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/018-translation-nested-dynamic.pt-en.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/018-translation-nested-dynamic.pt-en.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import Placeholder as _Placeholder +import sys as _sys +from chameleon.utils import DebuggingOutputStream as _DebuggingOutputStream +pass +_static_38577168 = {u'xmlns': u'http://www.w3.org/1999/xhtml', } +_marker_default = _Placeholder() +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_38450168 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24ca410> name=None at 24ca490> -> _value + _value = _static_38577168 + econtext['attrs'] = _value + + # <div ... (1:0) + # -------------------------------------------------------- + append(u'<div') + _attr_xmlns = u'http://www.w3.org/1999/xhtml' + if (_attr_xmlns is not None): + append((u' xmlns="%s"' % _attr_xmlns)) + append(u'>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _stream_35214320_year = '' + _stream_35214320_monthname = '' + _stream_38579664 = _DebuggingOutputStream() + _append_38579664 = _stream_38579664.append + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + _append_38579664(_content_139955154988272) + _stream_35214320_monthname = _DebuggingOutputStream() + _append_35214320_monthname = _stream_35214320_monthname.append + _backup_default_38618392 = get('default', _marker) + + # <Marker name='default' at 24ae690> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'october'" (6:25)> -> _cache_38463888 + try: + _cache_38463888 = 'october' + except: + rcontext.setdefault('__error__', []).append((u"'october'", 6, 25, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'october'" (6:25)> value=<Marker name='default' at 24ae150> at 24ae890> -> _condition + _expression = _cache_38463888 + + # <Marker name='default' at 24ae150> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _content_139955154988272 = u'monthname' + if (_content_139955154988272 is not None): + _append_35214320_monthname(_content_139955154988272) + else: + _content = _cache_38463888 + _content = translate(_content, default=None, domain=_i18n_domain) + if (_content is not None): + _append_35214320_monthname(_content) + if (_backup_default_38618392 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38618392 + _append_38579664(u'${monthname}') + _stream_35214320_monthname = ''.join(_stream_35214320_monthname) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + _append_38579664(_content_139955154988272) + _stream_35214320_year = _DebuggingOutputStream() + _append_35214320_year = _stream_35214320_year.append + _backup_default_36748968 = get('default', _marker) + + # <Marker name='default' at 24a5dd0> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'1982' (10:25)> -> _cache_38427600 + try: + _cache_38427600 = 1982 + except: + rcontext.setdefault('__error__', []).append((u'1982', 10, 25, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'1982' (10:25)> value=<Marker name='default' at 24a5410> at 24a5f90> -> _condition + _expression = _cache_38427600 + + # <Marker name='default' at 24a5410> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _content_139955154988272 = u'year' + if (_content_139955154988272 is not None): + _append_35214320_year(_content_139955154988272) + else: + _content = _cache_38427600 + _content = translate(_content, default=None, domain=_i18n_domain) + if (_content is not None): + _append_35214320_year(_content) + if (_backup_default_36748968 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36748968 + _append_38579664(u'${year}') + _stream_35214320_year = ''.join(_stream_35214320_year) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + _append_38579664(_content_139955154988272) + _msgid_38579664 = re_whitespace(''.join(_stream_38579664)).strip() + append(translate(_msgid_38579664, mapping={u'monthname': _stream_35214320_monthname, u'year': _stream_35214320_year, }, default=_msgid_38579664, domain=_i18n_domain)) + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_38450168 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38450168 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/018-translation-nested-dynamic.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/018-translation-nested-dynamic.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/018-translation-nested-dynamic.pt.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import Placeholder as _Placeholder +import sys as _sys +from chameleon.utils import DebuggingOutputStream as _DebuggingOutputStream +pass +_static_38400080 = {u'xmlns': u'http://www.w3.org/1999/xhtml', } +_marker_default = _Placeholder() +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_37147352 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x249f050> name=None at 249f250> -> _value + _value = _static_38400080 + econtext['attrs'] = _value + + # <div ... (1:0) + # -------------------------------------------------------- + append(u'<div') + _attr_xmlns = u'http://www.w3.org/1999/xhtml' + if (_attr_xmlns is not None): + append((u' xmlns="%s"' % _attr_xmlns)) + append(u'>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _stream_35214320_year = '' + _stream_35214320_monthname = '' + _stream_38401104 = _DebuggingOutputStream() + _append_38401104 = _stream_38401104.append + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + _append_38401104(_content_139955154988272) + _stream_35214320_monthname = _DebuggingOutputStream() + _append_35214320_monthname = _stream_35214320_monthname.append + _backup_default_38434648 = get('default', _marker) + + # <Marker name='default' at 24ca150> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'october'" (6:25)> -> _cache_38577296 + try: + _cache_38577296 = 'october' + except: + rcontext.setdefault('__error__', []).append((u"'october'", 6, 25, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'october'" (6:25)> value=<Marker name='default' at 24ca410> at 24ca510> -> _condition + _expression = _cache_38577296 + + # <Marker name='default' at 24ca410> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _content_139955154988272 = u'monthname' + if (_content_139955154988272 is not None): + _append_35214320_monthname(_content_139955154988272) + else: + _content = _cache_38577296 + _content = translate(_content, default=None, domain=_i18n_domain) + if (_content is not None): + _append_35214320_monthname(_content) + if (_backup_default_38434648 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38434648 + _append_38401104(u'${monthname}') + _stream_35214320_monthname = ''.join(_stream_35214320_monthname) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + _append_38401104(_content_139955154988272) + _stream_35214320_year = _DebuggingOutputStream() + _append_35214320_year = _stream_35214320_year.append + _backup_default_38436304 = get('default', _marker) + + # <Marker name='default' at 24ca050> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'1982' (10:25)> -> _cache_38577872 + try: + _cache_38577872 = 1982 + except: + rcontext.setdefault('__error__', []).append((u'1982', 10, 25, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'1982' (10:25)> value=<Marker name='default' at 24caa10> at 24cacd0> -> _condition + _expression = _cache_38577872 + + # <Marker name='default' at 24caa10> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _content_139955154988272 = u'year' + if (_content_139955154988272 is not None): + _append_35214320_year(_content_139955154988272) + else: + _content = _cache_38577872 + _content = translate(_content, default=None, domain=_i18n_domain) + if (_content is not None): + _append_35214320_year(_content) + if (_backup_default_38436304 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38436304 + _append_38401104(u'${year}') + _stream_35214320_year = ''.join(_stream_35214320_year) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + _append_38401104(_content_139955154988272) + _msgid_38401104 = re_whitespace(''.join(_stream_38401104)).strip() + append(translate(_msgid_38401104, mapping={u'monthname': _stream_35214320_monthname, u'year': _stream_35214320_year, }, default=_msgid_38401104, domain=_i18n_domain)) + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_37147352 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37147352 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/018.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/018.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/018.xml @@ -0,0 +1,4 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc><![CDATA[<foo>]]></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/019-replace.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/019-replace.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/019-replace.pt @@ -0,0 +1,13 @@ +<html> + <body> + <div tal:replace="'Hello world!'" /> + <div tal:replace="'Hello world!'" />1 + 2<div tal:replace="'Hello world!'" /> + <div tal:replace="'Hello world!'" />3 + <div tal:replace="'Hello world!'">4</div>5 + 6<div tal:replace="'Hello world!'"></div> + <div tal:replace="1" /> + <div tal:replace="1.0" /> + <div tal:replace="True" /> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/019-replace.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/019-replace.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/019-replace.pt.py @@ -0,0 +1,809 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import Placeholder as _Placeholder +import sys as _sys +pass +_static_36788688 = {} +_static_36744848 = {} +_static_38465168 = {} +_static_38401424 = {} +_static_38427088 = {} +_static_38579856 = {} +_static_37141712 = {} +_static_37140688 = {} +_static_36790160 = {} +_static_36789584 = {} +_static_37141776 = {} +_marker_default = _Placeholder() +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_39046608 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24aee90> name=None at 24ae750> -> _value + _value = _static_38465168 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35794168 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24a59d0> name=None at 24a5690> -> _value + _value = _static_38427088 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_38452904 = get('default', _marker) + + # <Marker name='default' at 236b550> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello world!'" (3:22)> -> _cache_36742672 + try: + _cache_36742672 = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 3, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello world!'" (3:22)> value=<Marker name='default' at 230a750> at 230ac90> -> _condition + _expression = _cache_36742672 + + # <Marker name='default' at 230a750> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_35754136 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x230ae90> name=None at 24a5e90> -> _value + _value = _static_36744848 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div />') + if (_backup_attrs_35754136 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35754136 + else: + _content = _cache_36742672 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_38452904 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38452904 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_39065936 = get('default', _marker) + + # <Marker name='default' at 236b910> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello world!'" (4:22)> -> _cache_37138960 + try: + _cache_37138960 = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 4, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello world!'" (4:22)> value=<Marker name='default' at 236bf10> at 236b250> -> _condition + _expression = _cache_37138960 + + # <Marker name='default' at 236bf10> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_39064568 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236bcd0> name=None at 236b5d0> -> _value + _value = _static_37141712 + econtext['attrs'] = _value + + # <div ... (4:4) + # -------------------------------------------------------- + append(u'<div />') + if (_backup_attrs_39064568 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39064568 + else: + _content = _cache_37138960 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_39065936 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39065936 + _content_139955154988272 = u'1\n 2' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_39065000 = get('default', _marker) + + # <Marker name='default' at 236b650> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello world!'" (5:22)> -> _cache_37140112 + try: + _cache_37140112 = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 5, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello world!'" (5:22)> value=<Marker name='default' at 236bb90> at 236b3d0> -> _condition + _expression = _cache_37140112 + + # <Marker name='default' at 236bb90> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_39067304 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236b8d0> name=None at 236bbd0> -> _value + _value = _static_37140688 + econtext['attrs'] = _value + + # <div ... (5:4) + # -------------------------------------------------------- + append(u'<div />') + if (_backup_attrs_39067304 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39067304 + else: + _content = _cache_37140112 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_39065000 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39065000 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_39095472 = get('default', _marker) + + # <Marker name='default' at 249f290> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello world!'" (6:22)> -> _cache_38401040 + try: + _cache_38401040 = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 6, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello world!'" (6:22)> value=<Marker name='default' at 249f750> at 249f210> -> _condition + _expression = _cache_38401040 + + # <Marker name='default' at 249f750> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_39095400 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236bd10> name=None at 236b590> -> _value + _value = _static_37141776 + econtext['attrs'] = _value + + # <div ... (6:4) + # -------------------------------------------------------- + append(u'<div />') + if (_backup_attrs_39095400 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39095400 + else: + _content = _cache_38401040 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_39095472 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39095472 + _content_139955154988272 = u'3\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_35897496 = get('default', _marker) + + # <Marker name='default' at 24caa90> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello world!'" (7:22)> -> _cache_38403600 + try: + _cache_38403600 = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 7, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello world!'" (7:22)> value=<Marker name='default' at 249fc50> at 249f950> -> _condition + _expression = _cache_38403600 + + # <Marker name='default' at 249fc50> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_35900880 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x249f590> name=None at 249f450> -> _value + _value = _static_38401424 + econtext['attrs'] = _value + + # <div ... (7:4) + # -------------------------------------------------------- + append(u'<div>') + _content_139955154988272 = u'4' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_35900880 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35900880 + else: + _content = _cache_38403600 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_35897496 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35897496 + _content_139955154988272 = u'5\n 6' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_38618392 = get('default', _marker) + + # <Marker name='default' at 24ca150> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello world!'" (8:22)> -> _cache_38578576 + try: + _cache_38578576 = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 8, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello world!'" (8:22)> value=<Marker name='default' at 24cadd0> at 24caa10> -> _condition + _expression = _cache_38578576 + + # <Marker name='default' at 24cadd0> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_35899152 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x24cae90> name=None at 24ca510> -> _value + _value = _static_38579856 + econtext['attrs'] = _value + + # <div ... (8:4) + # -------------------------------------------------------- + append(u'<div>') + append(u'</div>') + if (_backup_attrs_35899152 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35899152 + else: + _content = _cache_38578576 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_38618392 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38618392 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_38617960 = get('default', _marker) + + # <Marker name='default' at 2315510> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'1' (9:22)> -> _cache_36790096 + try: + _cache_36790096 = 1 + except: + rcontext.setdefault('__error__', []).append((u'1', 9, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'1' (9:22)> value=<Marker name='default' at 2315690> at 23155d0> -> _condition + _expression = _cache_36790096 + + # <Marker name='default' at 2315690> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_38618320 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2315f90> name=None at 23154d0> -> _value + _value = _static_36790160 + econtext['attrs'] = _value + + # <div ... (9:4) + # -------------------------------------------------------- + append(u'<div />') + if (_backup_attrs_38618320 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38618320 + else: + _content = _cache_36790096 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_38617960 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38617960 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_38631256 = get('default', _marker) + + # <Marker name='default' at 2315dd0> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'1.0' (10:22)> -> _cache_36789200 + try: + _cache_36789200 = 1.0 + except: + rcontext.setdefault('__error__', []).append((u'1.0', 10, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'1.0' (10:22)> value=<Marker name='default' at 2315c10> at 2315a10> -> _condition + _expression = _cache_36789200 + + # <Marker name='default' at 2315c10> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_38619832 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2315d50> name=None at 2315190> -> _value + _value = _static_36789584 + econtext['attrs'] = _value + + # <div ... (10:4) + # -------------------------------------------------------- + append(u'<div />') + if (_backup_attrs_38619832 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38619832 + else: + _content = _cache_36789200 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_38631256 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38631256 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_38647928 = get('default', _marker) + + # <Marker name='default' at 2315290> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'True' (11:22)> -> _cache_36789072 + try: + _cache_36789072 = True + except: + rcontext.setdefault('__error__', []).append((u'True', 11, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'True' (11:22)> value=<Marker name='default' at 23157d0> at 2315b90> -> _condition + _expression = _cache_36789072 + + # <Marker name='default' at 23157d0> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_39044664 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x23159d0> name=None at 2315110> -> _value + _value = _static_36788688 + econtext['attrs'] = _value + + # <div ... (11:4) + # -------------------------------------------------------- + append(u'<div />') + if (_backup_attrs_39044664 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39044664 + else: + _content = _cache_36789072 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_38647928 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38647928 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_35794168 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35794168 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_39046608 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39046608 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/019.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/019.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/019.xml @@ -0,0 +1,4 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc><![CDATA[<&]]></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/020-on-error.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/020-on-error.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/020-on-error.pt @@ -0,0 +1,10 @@ +<html> + <body> + <div id="test" tal:attributes="class python: 'abc' + 2" tal:on-error="nothing" /> + <div tal:on-error="string:${type(error.value).__name__} thrown at ${error.lineno}:${error.offset}."> + <div tal:content="undefined" /> + </div> + <div tal:replace="undefined" tal:on-error="nothing" /> + <div tal:content="undefined" tal:on-error="nothing" /> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/020-on-error.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/020-on-error.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/020-on-error.pt.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import Placeholder as _Placeholder +import sys as _sys +pass +_static_37141392 = {} +_static_37138960 = {} +_static_36743056 = {} +_marker_default = _Placeholder() +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_40147424 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x230a790> name=None at 230a490> -> _value + _value = _static_36743056 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + __fallback_34354496 = len(stream) + try: + _backup_attrs_40147568 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236b210> name=None at 236b490> -> _value + _value = _static_37138960 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_40167832 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236bb90> name=None at 236b3d0> -> _value + _value = _static_37141392 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div>') + _backup_default_40145120 = get('default', _marker) + + # <Marker name='default' at 236b690> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'undefined' (3:22)> -> _cache_37138512 + try: + _cache_37138512 = getitem('undefined') + except: + rcontext.setdefault('__error__', []).append((u'undefined', 3, 22, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'undefined' (3:22)> value=<Marker name='default' at 236b7d0> at 236bfd0> -> _condition + _expression = _cache_37138512 + + # <Marker name='default' at 236b7d0> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_37138512 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_40145120 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_40145120 + append(u'</div>') + if (_backup_attrs_40167832 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_40167832 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_40147568 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_40147568 + except (NameError, ValueError, AttributeError, LookupError, TypeError, ): + del stream[__fallback_34354496:] + + # <Expression u'string:error' (2:22)> -> _content + try: + _content = u'error' + except: + rcontext.setdefault('__error__', []).append((u'string:error', 2, 22, '<string>', _sys.exc_info()[1], )) + raise + + if (_content is not None): + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = str(_content) + else: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except AttributeError: + _content = convert(_content) + else: + _content = _content() + if (_content is not None): + append(_content) + + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_40147424 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_40147424 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/020.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/020.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/020.xml @@ -0,0 +1,4 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc><![CDATA[<&]>]]]></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/021-translation-domain.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/021-translation-domain.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/021-translation-domain.pt @@ -0,0 +1,16 @@ +<html> + <body i18n:domain="old"> + <div i18n:domain="new" i18n:translate=""> + Hello world! + </div> + <div i18n:translate=""> + Hello world! + </div> + <div class="test" i18n:domain="new" i18n:attributes="class"> + Hello world! + </div> + <div class="test" i18n:domain="new" i18n:attributes="class test_msgid"> + Hello world! + </div> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/021-translation-domain.pt-en.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/021-translation-domain.pt-en.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/021-translation-domain.pt-en.py @@ -0,0 +1,268 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import DebuggingOutputStream as _DebuggingOutputStream +pass +_static_37140048 = {} +_static_38400656 = {} +_static_36787600 = {u'class': u'test', } +_static_37140688 = {} +_static_38402256 = {u'class': u'test', } +_static_37142352 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_39931128 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236bf50> name=None at 236be90> -> _value + _value = _static_37142352 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _previous_i18n_domain_36787856 = _i18n_domain + _i18n_domain = u'old' + _backup_attrs_39848848 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236b8d0> name=None at 236b150> -> _value + _value = _static_37140688 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _previous_i18n_domain_37142224 = _i18n_domain + _i18n_domain = u'new' + _backup_attrs_40301704 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236b650> name=None at 236b050> -> _value + _value = _static_37140048 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div>') + _stream_37142416 = _DebuggingOutputStream() + _append_37142416 = _stream_37142416.append + _content_139955154988272 = u'\n Hello world!\n ' + if (_content_139955154988272 is not None): + _append_37142416(_content_139955154988272) + _msgid_37142416 = re_whitespace(''.join(_stream_37142416)).strip() + append(translate(_msgid_37142416, mapping=None, default=_msgid_37142416, domain=_i18n_domain)) + append(u'</div>') + if (_backup_attrs_40301704 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_40301704 + _i18n_domain = _previous_i18n_domain_37142224 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_39862784 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x249f290> name=None at 249f3d0> -> _value + _value = _static_38400656 + econtext['attrs'] = _value + + # <div ... (6:4) + # -------------------------------------------------------- + append(u'<div>') + _stream_38576208 = _DebuggingOutputStream() + _append_38576208 = _stream_38576208.append + _content_139955154988272 = u'\n Hello world!\n ' + if (_content_139955154988272 is not None): + _append_38576208(_content_139955154988272) + _msgid_38576208 = re_whitespace(''.join(_stream_38576208)).strip() + append(translate(_msgid_38576208, mapping=None, default=_msgid_38576208, domain=_i18n_domain)) + append(u'</div>') + if (_backup_attrs_39862784 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39862784 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _previous_i18n_domain_36789456 = _i18n_domain + _i18n_domain = u'new' + _backup_attrs_38619904 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x249f8d0> name=None at 249fe10> -> _value + _value = _static_38402256 + econtext['attrs'] = _value + + # <div ... (9:4) + # -------------------------------------------------------- + append(u'<div') + _backup_default_38620624 = get('default', _marker) + _value = u'test' + econtext['default'] = _value + + # <Translate msgid=None node=<_ast.Str object at 0x249fc10> at 249f190> -> _attr_class + _attr_class = u'test' + _attr_class = translate(_attr_class, default=_attr_class, domain=_i18n_domain) + if (_attr_class is None): + pass + else: + if (_attr_class is False): + _attr_class = None + else: + _tt = type(_attr_class) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_class = unicode(_attr_class) + else: + try: + if (_tt is str): + _attr_class = decode(_attr_class) + else: + if (_tt is not unicode): + try: + _attr_class = _attr_class.__html__ + except: + _attr_class = convert(_attr_class) + else: + raise RuntimeError + except RuntimeError: + _attr_class = _attr_class() + else: + if ((_attr_class is not None) and (re_needs_escape(_attr_class) is not None)): + if ('&' in _attr_class): + if (';' in _attr_class): + _attr_class = re_amp.sub('&', _attr_class) + else: + _attr_class = _attr_class.replace('&', '&') + if ('<' in _attr_class): + _attr_class = _attr_class.replace('<', '<') + if ('>' in _attr_class): + _attr_class = _attr_class.replace('>', '>') + if (u'"' in _attr_class): + _attr_class = _attr_class.replace(u'"', '"') + if (_attr_class is not None): + append((u' class="%s"' % _attr_class)) + if (_backup_default_38620624 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38620624 + append(u'>') + _content_139955154988272 = u'\n Hello world!\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_38619904 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38619904 + _i18n_domain = _previous_i18n_domain_36789456 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _previous_i18n_domain_36786320 = _i18n_domain + _i18n_domain = u'new' + _backup_attrs_38620120 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2315590> name=None at 23151d0> -> _value + _value = _static_36787600 + econtext['attrs'] = _value + + # <div ... (12:4) + # -------------------------------------------------------- + append(u'<div') + _backup_default_38618464 = get('default', _marker) + _value = u'test' + econtext['default'] = _value + + # <Translate msgid=u'test_msgid' node=<_ast.Str object at 0x2315050> at 2315f10> -> _attr_class + _attr_class = u'test' + _attr_class = translate(u'test_msgid', default=_attr_class, domain=_i18n_domain) + if (_attr_class is None): + pass + else: + if (_attr_class is False): + _attr_class = None + else: + _tt = type(_attr_class) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_class = unicode(_attr_class) + else: + try: + if (_tt is str): + _attr_class = decode(_attr_class) + else: + if (_tt is not unicode): + try: + _attr_class = _attr_class.__html__ + except: + _attr_class = convert(_attr_class) + else: + raise RuntimeError + except RuntimeError: + _attr_class = _attr_class() + else: + if ((_attr_class is not None) and (re_needs_escape(_attr_class) is not None)): + if ('&' in _attr_class): + if (';' in _attr_class): + _attr_class = re_amp.sub('&', _attr_class) + else: + _attr_class = _attr_class.replace('&', '&') + if ('<' in _attr_class): + _attr_class = _attr_class.replace('<', '<') + if ('>' in _attr_class): + _attr_class = _attr_class.replace('>', '>') + if (u'"' in _attr_class): + _attr_class = _attr_class.replace(u'"', '"') + if (_attr_class is not None): + append((u' class="%s"' % _attr_class)) + if (_backup_default_38618464 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38618464 + append(u'>') + _content_139955154988272 = u'\n Hello world!\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_38620120 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38620120 + _i18n_domain = _previous_i18n_domain_36786320 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_39848848 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39848848 + _i18n_domain = _previous_i18n_domain_36787856 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_39931128 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39931128 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/021-translation-domain.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/021-translation-domain.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/021-translation-domain.pt.py @@ -0,0 +1,268 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import DebuggingOutputStream as _DebuggingOutputStream +pass +_static_37141584 = {u'class': u'test', } +_static_38400080 = {} +_static_36787472 = {} +_static_36787536 = {} +_static_37142224 = {u'class': u'test', } +_static_38401744 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_35899728 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x249f050> name=None at 249fdd0> -> _value + _value = _static_38400080 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _previous_i18n_domain_37140368 = _i18n_domain + _i18n_domain = u'old' + _backup_attrs_35899296 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x249f6d0> name=None at 249f850> -> _value + _value = _static_38401744 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _previous_i18n_domain_36787664 = _i18n_domain + _i18n_domain = u'new' + _backup_attrs_35899656 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2315550> name=None at 2315d90> -> _value + _value = _static_36787536 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div>') + _stream_36789456 = _DebuggingOutputStream() + _append_36789456 = _stream_36789456.append + _content_139955154988272 = u'\n Hello world!\n ' + if (_content_139955154988272 is not None): + _append_36789456(_content_139955154988272) + _msgid_36789456 = re_whitespace(''.join(_stream_36789456)).strip() + append(translate(_msgid_36789456, mapping=None, default=_msgid_36789456, domain=_i18n_domain)) + append(u'</div>') + if (_backup_attrs_35899656 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35899656 + _i18n_domain = _previous_i18n_domain_36787664 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38614584 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2315510> name=None at 23153d0> -> _value + _value = _static_36787472 + econtext['attrs'] = _value + + # <div ... (6:4) + # -------------------------------------------------------- + append(u'<div>') + _stream_36787280 = _DebuggingOutputStream() + _append_36787280 = _stream_36787280.append + _content_139955154988272 = u'\n Hello world!\n ' + if (_content_139955154988272 is not None): + _append_36787280(_content_139955154988272) + _msgid_36787280 = re_whitespace(''.join(_stream_36787280)).strip() + append(translate(_msgid_36787280, mapping=None, default=_msgid_36787280, domain=_i18n_domain)) + append(u'</div>') + if (_backup_attrs_38614584 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38614584 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _previous_i18n_domain_37139408 = _i18n_domain + _i18n_domain = u'new' + _backup_attrs_36648648 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236bc50> name=None at 24ae750> -> _value + _value = _static_37141584 + econtext['attrs'] = _value + + # <div ... (9:4) + # -------------------------------------------------------- + append(u'<div') + _backup_default_36650880 = get('default', _marker) + _value = u'test' + econtext['default'] = _value + + # <Translate msgid=None node=<_ast.Str object at 0x236bf50> at 236b210> -> _attr_class + _attr_class = u'test' + _attr_class = translate(_attr_class, default=_attr_class, domain=_i18n_domain) + if (_attr_class is None): + pass + else: + if (_attr_class is False): + _attr_class = None + else: + _tt = type(_attr_class) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_class = unicode(_attr_class) + else: + try: + if (_tt is str): + _attr_class = decode(_attr_class) + else: + if (_tt is not unicode): + try: + _attr_class = _attr_class.__html__ + except: + _attr_class = convert(_attr_class) + else: + raise RuntimeError + except RuntimeError: + _attr_class = _attr_class() + else: + if ((_attr_class is not None) and (re_needs_escape(_attr_class) is not None)): + if ('&' in _attr_class): + if (';' in _attr_class): + _attr_class = re_amp.sub('&', _attr_class) + else: + _attr_class = _attr_class.replace('&', '&') + if ('<' in _attr_class): + _attr_class = _attr_class.replace('<', '<') + if ('>' in _attr_class): + _attr_class = _attr_class.replace('>', '>') + if (u'"' in _attr_class): + _attr_class = _attr_class.replace(u'"', '"') + if (_attr_class is not None): + append((u' class="%s"' % _attr_class)) + if (_backup_default_36650880 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36650880 + append(u'>') + _content_139955154988272 = u'\n Hello world!\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_36648648 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36648648 + _i18n_domain = _previous_i18n_domain_37139408 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _previous_i18n_domain_37139344 = _i18n_domain + _i18n_domain = u'new' + _backup_attrs_39044808 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236bed0> name=None at 236b4d0> -> _value + _value = _static_37142224 + econtext['attrs'] = _value + + # <div ... (12:4) + # -------------------------------------------------------- + append(u'<div') + _backup_default_39044664 = get('default', _marker) + _value = u'test' + econtext['default'] = _value + + # <Translate msgid=u'test_msgid' node=<_ast.Str object at 0x236b650> at 236b7d0> -> _attr_class + _attr_class = u'test' + _attr_class = translate(u'test_msgid', default=_attr_class, domain=_i18n_domain) + if (_attr_class is None): + pass + else: + if (_attr_class is False): + _attr_class = None + else: + _tt = type(_attr_class) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_class = unicode(_attr_class) + else: + try: + if (_tt is str): + _attr_class = decode(_attr_class) + else: + if (_tt is not unicode): + try: + _attr_class = _attr_class.__html__ + except: + _attr_class = convert(_attr_class) + else: + raise RuntimeError + except RuntimeError: + _attr_class = _attr_class() + else: + if ((_attr_class is not None) and (re_needs_escape(_attr_class) is not None)): + if ('&' in _attr_class): + if (';' in _attr_class): + _attr_class = re_amp.sub('&', _attr_class) + else: + _attr_class = _attr_class.replace('&', '&') + if ('<' in _attr_class): + _attr_class = _attr_class.replace('<', '<') + if ('>' in _attr_class): + _attr_class = _attr_class.replace('>', '>') + if (u'"' in _attr_class): + _attr_class = _attr_class.replace(u'"', '"') + if (_attr_class is not None): + append((u' class="%s"' % _attr_class)) + if (_backup_default_39044664 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39044664 + append(u'>') + _content_139955154988272 = u'\n Hello world!\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_39044808 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39044808 + _i18n_domain = _previous_i18n_domain_37139344 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_35899296 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35899296 + _i18n_domain = _previous_i18n_domain_37140368 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_35899728 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35899728 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/021.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/021.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/021.xml @@ -0,0 +1,4 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc><!-- a comment --></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/022-switch.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/022-switch.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/022-switch.pt @@ -0,0 +1,21 @@ +<html> + <body> + <div tal:switch="True"> + <span tal:case="False">bad</span> + <span tal:case="True">ok</span> + <span tal:case="True">ok</span> + <span tal:case="default">bad</span> + <span tal:case="True">bad</span> + ${default|string:ok} + </div> + <div tal:switch="True"> + <span tal:case="False">bad</span> + <span tal:case="default">ok</span> + </div> + <div tal:switch="3"> + <span tal:case="1">bad</span> + <span tal:case="2">bad</span> + <span tal:case="default">ok</span> + </div> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/022-switch.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/022-switch.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/022-switch.pt.py @@ -0,0 +1,327 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_37142480 = {} +_static_38891664 = {} +_static_37142352 = {} +_static_38894800 = {} +_static_38893584 = {} +_static_36788624 = {} +_static_37141584 = {} +_static_36789456 = {} +_static_38400400 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_39791864 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2315cd0> name=None at 2315090> -> _value + _value = _static_36789456 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_39094824 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2315990> name=None at 2315f90> -> _value + _value = _static_36788624 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + + # <Expression u'True' (3:21)> -> _cache_37139408 + try: + _cache_37139408 = True + except: + rcontext.setdefault('__error__', []).append((u'True', 3, 21, '<string>', _sys.exc_info()[1], )) + raise + + _backup_attrs_38450528 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236bc50> name=None at 236b150> -> _value + _value = _static_37141584 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_35792944 = get('default', _marker) + _value = _cache_37139408 + econtext['default'] = _value + + # <Equality expression=<Expression u'True' (3:21)> value=<Expression u'False' (4:22)> at 236bcd0> -> _condition + _expression = _cache_37139408 + + # <Expression u'False' (4:22)> -> _value + try: + _value = False + except: + rcontext.setdefault('__error__', []).append((u'False', 4, 22, '<string>', _sys.exc_info()[1], )) + raise + + _condition = (_expression == _value) + if _condition: + _backup_attrs_35791864 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236bf50> name=None at 236b210> -> _value + _value = _static_37142352 + econtext['attrs'] = _value + + # <span ... (4:6) + # -------------------------------------------------------- + append(u'<span>') + _content_139955154988272 = u'bad' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</span>') + if (_backup_attrs_35791864 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35791864 + if (_backup_default_35792944 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35792944 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_35794240 = get('default', _marker) + _value = _cache_37139408 + econtext['default'] = _value + + # <Equality expression=<Expression u'True' (3:21)> value=<Expression u'True' (5:22)> at 249f050> -> _condition + _expression = _cache_37139408 + + # <Expression u'True' (5:22)> -> _value + try: + _value = True + except: + rcontext.setdefault('__error__', []).append((u'True', 5, 22, '<string>', _sys.exc_info()[1], )) + raise + + _condition = (_expression == _value) + if _condition: + _backup_attrs_35794024 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236bfd0> name=None at 236bb10> -> _value + _value = _static_37142480 + econtext['attrs'] = _value + + # <span ... (5:6) + # -------------------------------------------------------- + append(u'<span>') + _content_139955154988272 = u'ok' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</span>') + if (_backup_attrs_35794024 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35794024 + if (_backup_default_35794240 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35794240 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_35792224 = get('default', _marker) + _value = _cache_37139408 + econtext['default'] = _value + + # <Equality expression=<Expression u'True' (3:21)> value=<Expression u'not not True' (6:22)> at 249fc50> -> _condition + _expression = _cache_37139408 + + # <Expression u'not not True' (6:22)> -> _value + try: + _value = not not True + except: + rcontext.setdefault('__error__', []).append((u'not not True', 6, 22, '<string>', _sys.exc_info()[1], )) + raise + + _condition = (_expression == _value) + if _condition: + _backup_attrs_35794312 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x249f190> name=None at 249f950> -> _value + _value = _static_38400400 + econtext['attrs'] = _value + + # <span ... (6:6) + # -------------------------------------------------------- + append(u'<span>') + _content_139955154988272 = u'ok' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</span>') + if (_backup_attrs_35794312 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35794312 + if (_backup_default_35792224 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35792224 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_38450528 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38450528 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + + # <Expression u'True' (8:21)> -> _cache_38892304 + try: + _cache_38892304 = True + except: + rcontext.setdefault('__error__', []).append((u'True', 8, 21, '<string>', _sys.exc_info()[1], )) + raise + + _backup_attrs_39792936 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2517090> name=None at 2517150> -> _value + _value = _static_38891664 + econtext['attrs'] = _value + + # <div ... (8:4) + # -------------------------------------------------------- + append(u'<div>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_39865448 = get('default', _marker) + _value = _cache_38892304 + econtext['default'] = _value + + # <Equality expression=<Expression u'True' (8:21)> value=<Expression u'False' (9:22)> at 2517590> -> _condition + _expression = _cache_38892304 + + # <Expression u'False' (9:22)> -> _value + try: + _value = False + except: + rcontext.setdefault('__error__', []).append((u'False', 9, 22, '<string>', _sys.exc_info()[1], )) + raise + + _condition = (_expression == _value) + if _condition: + _backup_attrs_39863216 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2517810> name=None at 2517690> -> _value + _value = _static_38893584 + econtext['attrs'] = _value + + # <span ... (9:6) + # -------------------------------------------------------- + append(u'<span>') + _content_139955154988272 = u'bad' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</span>') + if (_backup_attrs_39863216 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39863216 + if (_backup_default_39865448 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39865448 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_39804224 = get('default', _marker) + _value = _cache_38892304 + econtext['default'] = _value + + # <Equality expression=<Expression u'True' (8:21)> value=<Expression u'default' (10:22)> at 2517a50> -> _condition + _expression = _cache_38892304 + + # <Expression u'default' (10:22)> -> _value + try: + _value = getitem('default') + except: + rcontext.setdefault('__error__', []).append((u'default', 10, 22, '<string>', _sys.exc_info()[1], )) + raise + + _condition = (_expression == _value) + if _condition: + _backup_attrs_39865808 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2517cd0> name=None at 2517b90> -> _value + _value = _static_38894800 + econtext['attrs'] = _value + + # <span ... (10:6) + # -------------------------------------------------------- + append(u'<span>') + _content_139955154988272 = u'ok' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</span>') + if (_backup_attrs_39865808 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39865808 + if (_backup_default_39804224 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39804224 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_39792936 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39792936 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_39094824 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39094824 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_39791864 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39791864 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/022.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/022.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/022.xml @@ -0,0 +1,4 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc><!-- a comment ->--></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/023-condition.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/023-condition.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/023-condition.pt @@ -0,0 +1,6 @@ +<html> + <body tal:condition="True"> + <span tal:define="selector False" tal:condition="selector">bad</span> + <span tal:condition="True">ok</span> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/023-condition.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/023-condition.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/023-condition.pt.py @@ -0,0 +1,147 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_37141392 = {} +_static_38401104 = {} +_static_38400656 = {} +_static_37139024 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_38619040 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x249f290> name=None at 249fc10> -> _value + _value = _static_38400656 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + + # <Expression u'True' (2:23)> -> _condition + try: + _condition = True + except: + rcontext.setdefault('__error__', []).append((u'True', 2, 23, '<string>', _sys.exc_info()[1], )) + raise + + if _condition: + _backup_attrs_39065432 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x249f450> name=None at 249f850> -> _value + _value = _static_38401104 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_selector_38402576 = get('selector', _marker) + + # <Expression u'False' (3:31)> -> _value + try: + _value = False + except: + rcontext.setdefault('__error__', []).append((u'False', 3, 31, '<string>', _sys.exc_info()[1], )) + raise + + econtext['selector'] = _value + + # <Expression u'selector' (3:53)> -> _condition + try: + _condition = getitem('selector') + except: + rcontext.setdefault('__error__', []).append((u'selector', 3, 53, '<string>', _sys.exc_info()[1], )) + raise + + if _condition: + _backup_attrs_39065576 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236b250> name=None at 236b450> -> _value + _value = _static_37139024 + econtext['attrs'] = _value + + # <span ... (3:4) + # -------------------------------------------------------- + append(u'<span>') + _content_139955154988272 = u'bad' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</span>') + if (_backup_attrs_39065576 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39065576 + if (_backup_selector_38402576 is _marker): + del econtext['selector'] + else: + econtext['selector'] = _backup_selector_38402576 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + + # <Expression u'True' (4:25)> -> _condition + try: + _condition = True + except: + rcontext.setdefault('__error__', []).append((u'True', 4, 25, '<string>', _sys.exc_info()[1], )) + raise + + if _condition: + _backup_attrs_39064784 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236bb90> name=None at 236bbd0> -> _value + _value = _static_37141392 + econtext['attrs'] = _value + + # <span ... (4:4) + # -------------------------------------------------------- + append(u'<span>') + _content_139955154988272 = u'ok' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</span>') + if (_backup_attrs_39064784 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39064784 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_39065432 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39065432 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_38619040 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38619040 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/023.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/023.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/023.xml @@ -0,0 +1,5 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +<!ENTITY e ""> +]> +<doc>&e;</doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/024-namespace-elements.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/024-namespace-elements.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/024-namespace-elements.pt @@ -0,0 +1,16 @@ +<html> + <body> + <tal:first> + <tal:second> + ${'first'} + </tal:second> + second + </tal:first> + <tal:block condition="True"> + ok + </tal:block> + <tal:block condition="False"> + bad + </tal:block> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/024-namespace-elements.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/024-namespace-elements.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/024-namespace-elements.pt.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_37140624 = {} +_static_37139664 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_37108192 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236b4d0> name=None at 236b510> -> _value + _value = _static_37139664 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38451104 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236b890> name=None at 236b8d0> -> _value + _value = _static_37140624 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + + # <Expression u"'first'" (5:10)> -> _content_139955154988272 + try: + _content_139955154988272 = 'first' + except: + rcontext.setdefault('__error__', []).append((u"'first'", 5, 10, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = ('%s%s%s' % ((u'\n ' if (u'\n ' is not None) else ''), (_content_139955154988272 if (_content_139955154988272 is not None) else ''), (u'\n ' if (u'\n ' is not None) else ''), )) + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _content_139955154988272 = u'\n second\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + + # <Expression u'True' (9:26)> -> _condition + try: + _condition = True + except: + rcontext.setdefault('__error__', []).append((u'True', 9, 26, '<string>', _sys.exc_info()[1], )) + raise + + if _condition: + _content_139955154988272 = u'\n ok\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + + # <Expression u'False' (12:26)> -> _condition + try: + _condition = False + except: + rcontext.setdefault('__error__', []).append((u'False', 12, 26, '<string>', _sys.exc_info()[1], )) + raise + + if _condition: + _content_139955154988272 = u'\n bad\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_38451104 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38451104 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_37108192 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37108192 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/024.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/024.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/024.xml @@ -0,0 +1,6 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (foo)> +<!ELEMENT foo (#PCDATA)> +<!ENTITY e "<foo></foo>"> +]> +<doc>&e;</doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/025-repeat-whitespace.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/025-repeat-whitespace.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/025-repeat-whitespace.pt @@ -0,0 +1,15 @@ +<html> + <body> + <ul> + <li tal:repeat="i (1, 2, 3)" tal:content="i" /> + <tal:item repeat="i (1, 2, 3)"><li tal:content="i" /></tal:item> + <span tal:omit-tag="" tal:repeat="j (1, 2, 3)"><li tal:content="j" /></span> + <tal:count> + <tal:count-loop repeat="count (1, 2, 3)"> + <span tal:replace="count" + /><tal:comma condition="not repeat['count'].end">,</tal:comma> + </tal:count-loop> + </tal:count>. + </ul> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/025-repeat-whitespace.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/025-repeat-whitespace.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/025-repeat-whitespace.pt.py @@ -0,0 +1,421 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import Placeholder as _Placeholder +import sys as _sys +pass +_static_39102032 = {} +_static_39102544 = {} +_static_37140880 = {} +_static_39103376 = {} +_static_37139664 = {} +_static_37139600 = {} +_marker_default = _Placeholder() +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_39095400 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236b4d0> name=None at 236b310> -> _value + _value = _static_37139664 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_39815792 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236b990> name=None at 236b890> -> _value + _value = _static_37140880 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_39828080 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236b490> name=None at 236b390> -> _value + _value = _static_37139600 + econtext['attrs'] = _value + + # <ul ... (3:4) + # -------------------------------------------------------- + append(u'<ul>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_i_35883472 = get('i', _marker) + + # <Expression u'(1, 2, 3)' (4:26)> -> _iterator + try: + _iterator = (1, 2, 3, ) + except: + rcontext.setdefault('__error__', []).append((u'(1, 2, 3)', 4, 26, '<string>', _sys.exc_info()[1], )) + raise + + (_iterator, __index_39101968, ) = getitem('repeat')(u'i', _iterator) + econtext['i'] = None + for _item in _iterator: + econtext['i'] = _item + _backup_attrs_40288912 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x254a650> name=None at 254a190> -> _value + _value = _static_39102032 + econtext['attrs'] = _value + + # <li ... (4:37) + # -------------------------------------------------------- + append(u'<li>') + _backup_default_39802640 = get('default', _marker) + + # <Marker name='default' at 254a510> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'i' (4:54)> -> _cache_39101904 + try: + _cache_39101904 = getitem('i') + except: + rcontext.setdefault('__error__', []).append((u'i', 4, 54, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'i' (4:54)> value=<Marker name='default' at 254a110> at 254a2d0> -> _condition + _expression = _cache_39101904 + + # <Marker name='default' at 254a110> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_39101904 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_39802640 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39802640 + append(u'</li>') + if (_backup_attrs_40288912 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_40288912 + __index_39101968 -= 1 + if (__index_39101968 > 0): + append('\n ') + if (_backup_i_35883472 is _marker): + del econtext['i'] + else: + econtext['i'] = _backup_i_35883472 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_j_37194704 = get('j', _marker) + + # <Expression u'(1, 2, 3)' (5:42)> -> _iterator + try: + _iterator = (1, 2, 3, ) + except: + rcontext.setdefault('__error__', []).append((u'(1, 2, 3)', 5, 42, '<string>', _sys.exc_info()[1], )) + raise + + (_iterator, __index_39100944, ) = getitem('repeat')(u'j', _iterator) + econtext['j'] = None + for _item in _iterator: + econtext['j'] = _item + _backup_attrs_40302712 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x254ab90> name=None at 254af90> -> _value + _value = _static_39103376 + econtext['attrs'] = _value + + # <li ... (5:53) + # -------------------------------------------------------- + append(u'<li>') + _backup_default_40289920 = get('default', _marker) + + # <Marker name='default' at 254a890> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'j' (5:70)> -> _cache_39103632 + try: + _cache_39103632 = getitem('j') + except: + rcontext.setdefault('__error__', []).append((u'j', 5, 70, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'j' (5:70)> value=<Marker name='default' at 254aa90> at 254a390> -> _condition + _expression = _cache_39103632 + + # <Marker name='default' at 254aa90> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_39103632 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_40289920 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_40289920 + append(u'</li>') + if (_backup_attrs_40302712 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_40302712 + __index_39100944 -= 1 + if (__index_39100944 > 0): + append('\n ') + if (_backup_j_37194704 is _marker): + del econtext['j'] + else: + econtext['j'] = _backup_j_37194704 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_count_37019152 = get('count', _marker) + + # <Expression u'(1, 2, 3)' (7:38)> -> _iterator + try: + _iterator = (1, 2, 3, ) + except: + rcontext.setdefault('__error__', []).append((u'(1, 2, 3)', 7, 38, '<string>', _sys.exc_info()[1], )) + raise + + (_iterator, __index_39104016, ) = getitem('repeat')(u'count', _iterator) + econtext['count'] = None + for _item in _iterator: + econtext['count'] = _item + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_39927960 = get('default', _marker) + + # <Marker name='default' at 251bf10> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'count' (8:29)> -> _cache_38911312 + try: + _cache_38911312 = getitem('count') + except: + rcontext.setdefault('__error__', []).append((u'count', 8, 29, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'count' (8:29)> value=<Marker name='default' at 251bc10> at 251bc90> -> _condition + _expression = _cache_38911312 + + # <Marker name='default' at 251bc10> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_39930912 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x254a850> name=None at 254ae90> -> _value + _value = _static_39102544 + econtext['attrs'] = _value + + # <span ... (8:10) + # -------------------------------------------------------- + append(u'<span\n />') + if (_backup_attrs_39930912 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39930912 + else: + _content = _cache_38911312 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_39927960 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39927960 + + # <Expression u"not repeat['count'].end" (9:40)> -> _condition + try: + _condition = not getitem('repeat')['count'].end + except: + rcontext.setdefault('__error__', []).append((u"not repeat['count'].end", 9, 40, '<string>', _sys.exc_info()[1], )) + raise + + if _condition: + _content_139955154988272 = u',' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + __index_39104016 -= 1 + if (__index_39104016 > 0): + append('\n ') + if (_backup_count_37019152 is _marker): + del econtext['count'] + else: + econtext['count'] = _backup_count_37019152 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _content_139955154988272 = u'.\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</ul>') + if (_backup_attrs_39828080 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39828080 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_39815792 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39815792 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_39095400 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39095400 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/025.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/025.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/025.xml @@ -0,0 +1,5 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (foo*)> +<!ELEMENT foo (#PCDATA)> +]> +<doc><foo/><foo></foo></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/026-repeat-variable.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/026-repeat-variable.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/026-repeat-variable.pt @@ -0,0 +1,13 @@ +<div xmlns="http://www.w3.org/1999/xhtml" + xmlns:tal="http://xml.zope.org/namespaces/tal"> + <ul> + <li tal:attributes="class repeat['i'].even()+repeat['i'].odd()" name="${i}-${repeat.i.index}" tal:repeat="i range(3)"><span tal:replace="i" /></li> + </ul> + <ul> + <li tal:attributes="class repeat['i'].even+repeat['i'].odd" + tal:repeat="i range(3)"><span tal:replace="i" /></li> + </ul> + <ul> + <li tal:repeat="i range(3)"><span tal:condition="repeat['i'].even" tal:replace="repeat['i'].even" /><span tal:condition="repeat['i'].odd" tal:replace="repeat['i'].odd" /></li> + </ul> +</div> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/026-repeat-variable.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/026-repeat-variable.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/026-repeat-variable.pt.py @@ -0,0 +1,733 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import Placeholder as _Placeholder +from chameleon.py26 import lookup_attr as _lookup_attr +import sys as _sys +pass +_static_38933456 = {} +_static_39103952 = {u'class': u"repeat['i'].even+repeat['i'].odd", } +_static_38935056 = {} +_static_39102992 = {} +_static_38908624 = {u'xmlns': u'http://www.w3.org/1999/xhtml', } +_static_39102672 = {} +_static_37140880 = {} +_static_38936208 = {} +_static_38934416 = {} +_static_37140112 = {u'name': u'${i}-${repeat.i.index}', u'class': u"repeat['i'].even()+repeat['i'].odd()", } +_static_38935312 = {} +_marker_default = _Placeholder() +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_38631760 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x251b2d0> name=None at 251b610> -> _value + _value = _static_38908624 + econtext['attrs'] = _value + + # <div ... (1:0) + # -------------------------------------------------------- + append(u'<div') + _attr_xmlns = u'http://www.w3.org/1999/xhtml' + if (_attr_xmlns is not None): + append((u' xmlns="%s"' % _attr_xmlns)) + append(u'>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_39790784 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236b990> name=None at 236b890> -> _value + _value = _static_37140880 + econtext['attrs'] = _value + + # <ul ... (3:2) + # -------------------------------------------------------- + append(u'<ul>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_i_35883472 = get('i', _marker) + + # <Expression u'range(3)' (4:112)> -> _iterator + try: + _iterator = get('range', range)(3) + except: + rcontext.setdefault('__error__', []).append((u'range(3)', 4, 112, '<string>', _sys.exc_info()[1], )) + raise + + (_iterator, __index_39101072, ) = getitem('repeat')(u'i', _iterator) + econtext['i'] = None + for _item in _iterator: + econtext['i'] = _item + _backup_attrs_35757664 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x236b690> name=None at 236b490> -> _value + _value = _static_37140112 + econtext['attrs'] = _value + + # <li ... (4:4) + # -------------------------------------------------------- + append(u'<li') + _backup_default_35754928 = get('default', _marker) + _value = u'${i}-${repeat.i.index}' + econtext['default'] = _value + + # <Interpolation value=u'${i}-${repeat.i.index}' escape=True at 236b2d0> -> _attr_name + + # <Expression u'i' (4:76)> -> _attr_name + try: + _attr_name = getitem('i') + except: + rcontext.setdefault('__error__', []).append((u'i', 4, 76, '<string>', _sys.exc_info()[1], )) + raise + + + # <Expression u'repeat.i.index' (4:81)> -> _attr_name_181 + try: + _attr_name_181 = _lookup_attr(getitem('repeat'), 'i').index + except: + rcontext.setdefault('__error__', []).append((u'repeat.i.index', 4, 81, '<string>', _sys.exc_info()[1], )) + raise + + _attr_name = ('%s%s%s' % ((_attr_name if (_attr_name is not None) else ''), (u'-' if (u'-' is not None) else ''), (_attr_name_181 if (_attr_name_181 is not None) else ''), )) + if (_attr_name is None): + pass + else: + if (_attr_name is False): + _attr_name = None + else: + _tt = type(_attr_name) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_name = unicode(_attr_name) + else: + try: + if (_tt is str): + _attr_name = decode(_attr_name) + else: + if (_tt is not unicode): + try: + _attr_name = _attr_name.__html__ + except: + _attr_name = convert(_attr_name) + else: + raise RuntimeError + except RuntimeError: + _attr_name = _attr_name() + else: + if ((_attr_name is not None) and (re_needs_escape(_attr_name) is not None)): + if ('&' in _attr_name): + if (';' in _attr_name): + _attr_name = re_amp.sub('&', _attr_name) + else: + _attr_name = _attr_name.replace('&', '&') + if ('<' in _attr_name): + _attr_name = _attr_name.replace('<', '<') + if ('>' in _attr_name): + _attr_name = _attr_name.replace('>', '>') + if (u'"' in _attr_name): + _attr_name = _attr_name.replace(u'"', '"') + if (_attr_name is not None): + append((u' name="%s"' % _attr_name)) + if (_backup_default_35754928 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35754928 + _backup_default_35754064 = get('default', _marker) + _value = None + econtext['default'] = _value + + # <Expression u"repeat['i'].even()+repeat['i'].odd()" (4:30)> -> _attr_class + try: + _attr_class = (getitem('repeat')['i'].even() + getitem('repeat')['i'].odd()) + except: + rcontext.setdefault('__error__', []).append((u"repeat['i'].even()+repeat['i'].odd()", 4, 30, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_class is None): + pass + else: + if (_attr_class is False): + _attr_class = None + else: + _tt = type(_attr_class) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_class = unicode(_attr_class) + else: + try: + if (_tt is str): + _attr_class = decode(_attr_class) + else: + if (_tt is not unicode): + try: + _attr_class = _attr_class.__html__ + except: + _attr_class = convert(_attr_class) + else: + raise RuntimeError + except RuntimeError: + _attr_class = _attr_class() + else: + if ((_attr_class is not None) and (re_needs_escape(_attr_class) is not None)): + if ('&' in _attr_class): + if (';' in _attr_class): + _attr_class = re_amp.sub('&', _attr_class) + else: + _attr_class = _attr_class.replace('&', '&') + if ('<' in _attr_class): + _attr_class = _attr_class.replace('<', '<') + if ('>' in _attr_class): + _attr_class = _attr_class.replace('>', '>') + if ('"' in _attr_class): + _attr_class = _attr_class.replace('"', '"') + if (_attr_class is not None): + append((u' class="%s"' % _attr_class)) + if (_backup_default_35754064 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35754064 + append(u'>') + _backup_default_35756584 = get('default', _marker) + + # <Marker name='default' at 254aed0> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'i' (4:141)> -> _cache_39102928 + try: + _cache_39102928 = getitem('i') + except: + rcontext.setdefault('__error__', []).append((u'i', 4, 141, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'i' (4:141)> value=<Marker name='default' at 254ab90> at 254aa50> -> _condition + _expression = _cache_39102928 + + # <Marker name='default' at 254ab90> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_35754712 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x254a8d0> name=None at 254a110> -> _value + _value = _static_39102672 + econtext['attrs'] = _value + + # <span ... (4:122) + # -------------------------------------------------------- + append(u'<span />') + if (_backup_attrs_35754712 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35754712 + else: + _content = _cache_39102928 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_35756584 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35756584 + append(u'</li>') + if (_backup_attrs_35757664 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35757664 + __index_39101072 -= 1 + if (__index_39101072 > 0): + append('\n ') + if (_backup_i_35883472 is _marker): + del econtext['i'] + else: + econtext['i'] = _backup_i_35883472 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</ul>') + if (_backup_attrs_39790784 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39790784 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35754496 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x254aa10> name=None at 254ac90> -> _value + _value = _static_39102992 + econtext['attrs'] = _value + + # <ul ... (6:2) + # -------------------------------------------------------- + append(u'<ul>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_i_40028112 = get('i', _marker) + + # <Expression u'range(3)' (8:22)> -> _iterator + try: + _iterator = get('range', range)(3) + except: + rcontext.setdefault('__error__', []).append((u'range(3)', 8, 22, '<string>', _sys.exc_info()[1], )) + raise + + (_iterator, __index_38403088, ) = getitem('repeat')(u'i', _iterator) + econtext['i'] = None + for _item in _iterator: + econtext['i'] = _item + _backup_attrs_35756368 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x254add0> name=None at 254aad0> -> _value + _value = _static_39103952 + econtext['attrs'] = _value + + # <li ... (7:4) + # -------------------------------------------------------- + append(u'<li') + _backup_default_35757088 = get('default', _marker) + _value = None + econtext['default'] = _value + + # <Expression u"repeat['i'].even+repeat['i'].odd" (7:30)> -> _attr_class + try: + _attr_class = (getitem('repeat')['i'].even + getitem('repeat')['i'].odd) + except: + rcontext.setdefault('__error__', []).append((u"repeat['i'].even+repeat['i'].odd", 7, 30, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_class is None): + pass + else: + if (_attr_class is False): + _attr_class = None + else: + _tt = type(_attr_class) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_class = unicode(_attr_class) + else: + try: + if (_tt is str): + _attr_class = decode(_attr_class) + else: + if (_tt is not unicode): + try: + _attr_class = _attr_class.__html__ + except: + _attr_class = convert(_attr_class) + else: + raise RuntimeError + except RuntimeError: + _attr_class = _attr_class() + else: + if ((_attr_class is not None) and (re_needs_escape(_attr_class) is not None)): + if ('&' in _attr_class): + if (';' in _attr_class): + _attr_class = re_amp.sub('&', _attr_class) + else: + _attr_class = _attr_class.replace('&', '&') + if ('<' in _attr_class): + _attr_class = _attr_class.replace('<', '<') + if ('>' in _attr_class): + _attr_class = _attr_class.replace('>', '>') + if ('"' in _attr_class): + _attr_class = _attr_class.replace('"', '"') + if (_attr_class is not None): + append((u' class="%s"' % _attr_class)) + if (_backup_default_35757088 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35757088 + append(u'>') + _backup_default_36681632 = get('default', _marker) + + # <Marker name='default' at 2521fd0> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'i' (8:51)> -> _cache_38935952 + try: + _cache_38935952 = getitem('i') + except: + rcontext.setdefault('__error__', []).append((u'i', 8, 51, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'i' (8:51)> value=<Marker name='default' at 2521950> at 2521d10> -> _condition + _expression = _cache_38935952 + + # <Marker name='default' at 2521950> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_36679832 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2521e90> name=None at 249f050> -> _value + _value = _static_38936208 + econtext['attrs'] = _value + + # <span ... (8:32) + # -------------------------------------------------------- + append(u'<span />') + if (_backup_attrs_36679832 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36679832 + else: + _content = _cache_38935952 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_36681632 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36681632 + append(u'</li>') + if (_backup_attrs_35756368 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35756368 + __index_38403088 -= 1 + if (__index_38403088 > 0): + append('\n ') + if (_backup_i_40028112 is _marker): + del econtext['i'] + else: + econtext['i'] = _backup_i_40028112 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</ul>') + if (_backup_attrs_35754496 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35754496 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36680552 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2521b10> name=None at 2521f90> -> _value + _value = _static_38935312 + econtext['attrs'] = _value + + # <ul ... (10:2) + # -------------------------------------------------------- + append(u'<ul>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_i_35203664 = get('i', _marker) + + # <Expression u'range(3)' (11:22)> -> _iterator + try: + _iterator = get('range', range)(3) + except: + rcontext.setdefault('__error__', []).append((u'range(3)', 11, 22, '<string>', _sys.exc_info()[1], )) + raise + + (_iterator, __index_38934352, ) = getitem('repeat')(u'i', _iterator) + econtext['i'] = None + for _item in _iterator: + econtext['i'] = _item + _backup_attrs_36746592 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2521a10> name=None at 2521550> -> _value + _value = _static_38935056 + econtext['attrs'] = _value + + # <li ... (11:4) + # -------------------------------------------------------- + append(u'<li>') + + # <Expression u"repeat['i'].even" (11:53)> -> _condition + try: + _condition = getitem('repeat')['i'].even + except: + rcontext.setdefault('__error__', []).append((u"repeat['i'].even", 11, 53, '<string>', _sys.exc_info()[1], )) + raise + + if _condition: + _backup_default_39092096 = get('default', _marker) + + # <Marker name='default' at 2521150> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"repeat['i'].even" (11:84)> -> _cache_38932752 + try: + _cache_38932752 = getitem('repeat')['i'].even + except: + rcontext.setdefault('__error__', []).append((u"repeat['i'].even", 11, 84, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"repeat['i'].even" (11:84)> value=<Marker name='default' at 2521390> at 25214d0> -> _condition + _expression = _cache_38932752 + + # <Marker name='default' at 2521390> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_39088424 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2521790> name=None at 25217d0> -> _value + _value = _static_38934416 + econtext['attrs'] = _value + + # <span ... (11:32) + # -------------------------------------------------------- + append(u'<span />') + if (_backup_attrs_39088424 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39088424 + else: + _content = _cache_38932752 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_39092096 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39092096 + + # <Expression u"repeat['i'].odd" (11:125)> -> _condition + try: + _condition = getitem('repeat')['i'].odd + except: + rcontext.setdefault('__error__', []).append((u"repeat['i'].odd", 11, 125, '<string>', _sys.exc_info()[1], )) + raise + + if _condition: + _backup_default_39089000 = get('default', _marker) + + # <Marker name='default' at 262c590> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"repeat['i'].odd" (11:155)> -> _cache_38936400 + try: + _cache_38936400 = getitem('repeat')['i'].odd + except: + rcontext.setdefault('__error__', []).append((u"repeat['i'].odd", 11, 155, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"repeat['i'].odd" (11:155)> value=<Marker name='default' at 262c0d0> at 262c850> -> _condition + _expression = _cache_38936400 + + # <Marker name='default' at 262c0d0> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_39091592 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x25213d0> name=None at 2521050> -> _value + _value = _static_38933456 + econtext['attrs'] = _value + + # <span ... (11:104) + # -------------------------------------------------------- + append(u'<span />') + if (_backup_attrs_39091592 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39091592 + else: + _content = _cache_38936400 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_39089000 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39089000 + append(u'</li>') + if (_backup_attrs_36746592 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36746592 + __index_38934352 -= 1 + if (__index_38934352 > 0): + append('\n ') + if (_backup_i_35203664 is _marker): + del econtext['i'] + else: + econtext['i'] = _backup_i_35203664 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</ul>') + if (_backup_attrs_36680552 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36680552 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_38631760 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38631760 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/026.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/026.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/026.xml @@ -0,0 +1,5 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (foo*)> +<!ELEMENT foo EMPTY> +]> +<doc><foo/><foo></foo></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/027-attribute-replacement.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/027-attribute-replacement.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/027-attribute-replacement.pt @@ -0,0 +1,11 @@ +<div xmlns="http://www.w3.org/1999/xhtml" + xmlns:tal="http://xml.zope.org/namespaces/tal"> + <span id="test" + class="dummy" + onClick="" + tal:define="a 'abc'" + tal:attributes="class 'def' + a + default; style 'hij'; onClick 'alert();;'" + tal:content="a + 'ghi'" /> + <span tal:replace="'Hello World!'">Hello <b>Universe</b>!</span> + <span tal:replace="'Hello World!'"><b>Hello Universe!</b></span> +</div> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/027-attribute-replacement.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/027-attribute-replacement.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/027-attribute-replacement.pt.py @@ -0,0 +1,518 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import Placeholder as _Placeholder +import sys as _sys +pass +_static_38936400 = {} +_static_38932624 = {} +_static_39101904 = {u'xmlns': u'http://www.w3.org/1999/xhtml', } +_static_39103376 = {u'style': u"'hij'", u'id': u'test', u'onClick': u'', u'class': u'dummy', } +_static_38935952 = {} +_static_38934352 = {} +_marker_default = _Placeholder() +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_39046104 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x254a5d0> name=None at 249f050> -> _value + _value = _static_39101904 + econtext['attrs'] = _value + + # <div ... (1:0) + # -------------------------------------------------------- + append(u'<div') + _attr_xmlns = u'http://www.w3.org/1999/xhtml' + if (_attr_xmlns is not None): + append((u' xmlns="%s"' % _attr_xmlns)) + append(u'>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_a_35883472 = get('a', _marker) + + # <Expression u"'abc'" (6:22)> -> _value + try: + _value = 'abc' + except: + rcontext.setdefault('__error__', []).append((u"'abc'", 6, 22, '<string>', _sys.exc_info()[1], )) + raise + + econtext['a'] = _value + _backup_attrs_38523464 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x254ab90> name=None at 254aa50> -> _value + _value = _static_39103376 + econtext['attrs'] = _value + + # <span ... (3:2) + # -------------------------------------------------------- + append(u'<span') + _attr_id = u'test' + if (_attr_id is not None): + append((u' id="%s"' % _attr_id)) + _backup_default_39046968 = get('default', _marker) + _value = u'dummy' + econtext['default'] = _value + + # <Expression u"'def' + a + default" (7:30)> -> _attr_class + try: + _attr_class = (('def' + getitem('a')) + getitem('default')) + except: + rcontext.setdefault('__error__', []).append((u"'def' + a + default", 7, 30, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_class is None): + pass + else: + if (_attr_class is False): + _attr_class = None + else: + _tt = type(_attr_class) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_class = unicode(_attr_class) + else: + try: + if (_tt is str): + _attr_class = decode(_attr_class) + else: + if (_tt is not unicode): + try: + _attr_class = _attr_class.__html__ + except: + _attr_class = convert(_attr_class) + else: + raise RuntimeError + except RuntimeError: + _attr_class = _attr_class() + else: + if ((_attr_class is not None) and (re_needs_escape(_attr_class) is not None)): + if ('&' in _attr_class): + if (';' in _attr_class): + _attr_class = re_amp.sub('&', _attr_class) + else: + _attr_class = _attr_class.replace('&', '&') + if ('<' in _attr_class): + _attr_class = _attr_class.replace('<', '<') + if ('>' in _attr_class): + _attr_class = _attr_class.replace('>', '>') + if (u'"' in _attr_class): + _attr_class = _attr_class.replace(u'"', '"') + if (_attr_class is not None): + append((u'\n class="%s"' % _attr_class)) + if (_backup_default_39046968 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39046968 + _backup_default_38526416 = get('default', _marker) + _value = u'' + econtext['default'] = _value + + # <Expression u"'alert();'" (7:70)> -> _attr_onClick + try: + _attr_onClick = 'alert();' + except: + rcontext.setdefault('__error__', []).append((u"'alert();'", 7, 70, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_onClick is None): + pass + else: + if (_attr_onClick is False): + _attr_onClick = None + else: + _tt = type(_attr_onClick) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_onClick = unicode(_attr_onClick) + else: + try: + if (_tt is str): + _attr_onClick = decode(_attr_onClick) + else: + if (_tt is not unicode): + try: + _attr_onClick = _attr_onClick.__html__ + except: + _attr_onClick = convert(_attr_onClick) + else: + raise RuntimeError + except RuntimeError: + _attr_onClick = _attr_onClick() + else: + if ((_attr_onClick is not None) and (re_needs_escape(_attr_onClick) is not None)): + if ('&' in _attr_onClick): + if (';' in _attr_onClick): + _attr_onClick = re_amp.sub('&', _attr_onClick) + else: + _attr_onClick = _attr_onClick.replace('&', '&') + if ('<' in _attr_onClick): + _attr_onClick = _attr_onClick.replace('<', '<') + if ('>' in _attr_onClick): + _attr_onClick = _attr_onClick.replace('>', '>') + if (u'"' in _attr_onClick): + _attr_onClick = _attr_onClick.replace(u'"', '"') + if (_attr_onClick is not None): + append((u'\n onClick="%s"' % _attr_onClick)) + if (_backup_default_38526416 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38526416 + _backup_default_38524760 = get('default', _marker) + _value = None + econtext['default'] = _value + + # <Expression u"'hij'" (7:56)> -> _attr_style + try: + _attr_style = 'hij' + except: + rcontext.setdefault('__error__', []).append((u"'hij'", 7, 56, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_style is None): + pass + else: + if (_attr_style is False): + _attr_style = None + else: + _tt = type(_attr_style) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_style = unicode(_attr_style) + else: + try: + if (_tt is str): + _attr_style = decode(_attr_style) + else: + if (_tt is not unicode): + try: + _attr_style = _attr_style.__html__ + except: + _attr_style = convert(_attr_style) + else: + raise RuntimeError + except RuntimeError: + _attr_style = _attr_style() + else: + if ((_attr_style is not None) and (re_needs_escape(_attr_style) is not None)): + if ('&' in _attr_style): + if (';' in _attr_style): + _attr_style = re_amp.sub('&', _attr_style) + else: + _attr_style = _attr_style.replace('&', '&') + if ('<' in _attr_style): + _attr_style = _attr_style.replace('<', '<') + if ('>' in _attr_style): + _attr_style = _attr_style.replace('>', '>') + if ('"' in _attr_style): + _attr_style = _attr_style.replace('"', '"') + if (_attr_style is not None): + append((u' style="%s"' % _attr_style)) + if (_backup_default_38524760 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38524760 + append('>') + _backup_default_38525480 = get('default', _marker) + + # <Marker name='default' at 254a9d0> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"a + 'ghi'" (8:21)> -> _cache_39100816 + try: + _cache_39100816 = (getitem('a') + 'ghi') + except: + rcontext.setdefault('__error__', []).append((u"a + 'ghi'", 8, 21, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"a + 'ghi'" (8:21)> value=<Marker name='default' at 254a890> at 254a090> -> _condition + _expression = _cache_39100816 + + # <Marker name='default' at 254a890> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_39100816 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_38525480 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38525480 + append(u'</span>') + if (_backup_attrs_38523464 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38523464 + if (_backup_a_35883472 is _marker): + del econtext['a'] + else: + econtext['a'] = _backup_a_35883472 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_39596544 = get('default', _marker) + + # <Marker name='default' at 2521f90> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello World!'" (9:21)> -> _cache_38934928 + try: + _cache_38934928 = 'Hello World!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello World!'", 9, 21, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello World!'" (9:21)> value=<Marker name='default' at 2521c50> at 2521b50> -> _condition + _expression = _cache_38934928 + + # <Marker name='default' at 2521c50> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_38526488 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2521d90> name=None at 2521e10> -> _value + _value = _static_38935952 + econtext['attrs'] = _value + + # <span ... (9:2) + # -------------------------------------------------------- + append(u'<span>') + _content_139955154988272 = u'Hello ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36682352 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2521750> name=None at 2521ed0> -> _value + _value = _static_38934352 + econtext['attrs'] = _value + + # <b ... (9:43) + # -------------------------------------------------------- + append(u'<b>') + _content_139955154988272 = u'Universe' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</b>') + if (_backup_attrs_36682352 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36682352 + _content_139955154988272 = u'!' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</span>') + if (_backup_attrs_38526488 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38526488 + else: + _content = _cache_38934928 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_39596544 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39596544 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_36682856 = get('default', _marker) + + # <Marker name='default' at 2521710> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello World!'" (10:21)> -> _cache_38932752 + try: + _cache_38932752 = 'Hello World!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello World!'", 10, 21, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello World!'" (10:21)> value=<Marker name='default' at 2521410> at 25214d0> -> _condition + _expression = _cache_38932752 + + # <Marker name='default' at 2521410> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _backup_attrs_36682928 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2521090> name=None at 25215d0> -> _value + _value = _static_38932624 + econtext['attrs'] = _value + + # <span ... (10:2) + # -------------------------------------------------------- + append(u'<span>') + _backup_attrs_35804656 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2521f50> name=None at 2521490> -> _value + _value = _static_38936400 + econtext['attrs'] = _value + + # <b ... (10:37) + # -------------------------------------------------------- + append(u'<b>') + _content_139955154988272 = u'Hello Universe!' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</b>') + if (_backup_attrs_35804656 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35804656 + append(u'</span>') + if (_backup_attrs_36682928 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36682928 + else: + _content = _cache_38932752 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_36682856 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36682856 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_39046104 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39046104 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/027.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/027.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/027.xml @@ -0,0 +1,5 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (foo*)> +<!ELEMENT foo ANY> +]> +<doc><foo/><foo></foo></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/028-attribute-toggle.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/028-attribute-toggle.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/028-attribute-toggle.pt @@ -0,0 +1,6 @@ +<div xmlns="http://www.w3.org/1999/xhtml" + xmlns:tal="http://xml.zope.org/namespaces/tal"> + <option tal:attributes="selected True"></option> + <option tal:attributes="selected False"></option> + <option tal:attributes="selected None"></option> +</div> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/028-attribute-toggle.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/028-attribute-toggle.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/028-attribute-toggle.pt.py @@ -0,0 +1,333 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_39101904 = {u'checked': u'True', } +_static_39101136 = {u'checked': u'False', } +_static_38932624 = {u'selected': u'None', } +_static_38934992 = {u'selected': u'True', } +_static_38934864 = {u'xmlns': u'http://www.w3.org/1999/xhtml', } +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_39545024 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2521950> name=None at 2521e90> -> _value + _value = _static_38934864 + econtext['attrs'] = _value + + # <div ... (1:0) + # -------------------------------------------------------- + append(u'<div') + _attr_xmlns = u'http://www.w3.org/1999/xhtml' + if (_attr_xmlns is not None): + append((u' xmlns="%s"' % _attr_xmlns)) + append(u'>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_39545672 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x25219d0> name=None at 2521610> -> _value + _value = _static_38934992 + econtext['attrs'] = _value + + # <option ... (3:2) + # -------------------------------------------------------- + append(u'<option') + _backup_default_39545096 = get('default', _marker) + _value = None + econtext['default'] = _value + + # <Expression u'True' (3:35)> -> _attr_selected + try: + _attr_selected = True + except: + rcontext.setdefault('__error__', []).append((u'True', 3, 35, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_selected is None): + pass + else: + if (_attr_selected is False): + _attr_selected = None + else: + _tt = type(_attr_selected) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_selected = unicode(_attr_selected) + else: + try: + if (_tt is str): + _attr_selected = decode(_attr_selected) + else: + if (_tt is not unicode): + try: + _attr_selected = _attr_selected.__html__ + except: + _attr_selected = convert(_attr_selected) + else: + raise RuntimeError + except RuntimeError: + _attr_selected = _attr_selected() + else: + if ((_attr_selected is not None) and (re_needs_escape(_attr_selected) is not None)): + if ('&' in _attr_selected): + if (';' in _attr_selected): + _attr_selected = re_amp.sub('&', _attr_selected) + else: + _attr_selected = _attr_selected.replace('&', '&') + if ('<' in _attr_selected): + _attr_selected = _attr_selected.replace('<', '<') + if ('>' in _attr_selected): + _attr_selected = _attr_selected.replace('>', '>') + if ('"' in _attr_selected): + _attr_selected = _attr_selected.replace('"', '"') + if (_attr_selected is not None): + append((u' selected="%s"' % _attr_selected)) + if (_backup_default_39545096 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39545096 + append(u'>') + append(u'</option>') + if (_backup_attrs_39545672 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39545672 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_39594032 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2521090> name=None at 25215d0> -> _value + _value = _static_38932624 + econtext['attrs'] = _value + + # <option ... (4:2) + # -------------------------------------------------------- + append(u'<option') + _backup_default_39594608 = get('default', _marker) + _value = None + econtext['default'] = _value + + # <Expression u'None' (4:35)> -> _attr_selected + try: + _attr_selected = None + except: + rcontext.setdefault('__error__', []).append((u'None', 4, 35, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_selected is None): + pass + else: + if (_attr_selected is False): + _attr_selected = None + else: + _tt = type(_attr_selected) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_selected = unicode(_attr_selected) + else: + try: + if (_tt is str): + _attr_selected = decode(_attr_selected) + else: + if (_tt is not unicode): + try: + _attr_selected = _attr_selected.__html__ + except: + _attr_selected = convert(_attr_selected) + else: + raise RuntimeError + except RuntimeError: + _attr_selected = _attr_selected() + else: + if ((_attr_selected is not None) and (re_needs_escape(_attr_selected) is not None)): + if ('&' in _attr_selected): + if (';' in _attr_selected): + _attr_selected = re_amp.sub('&', _attr_selected) + else: + _attr_selected = _attr_selected.replace('&', '&') + if ('<' in _attr_selected): + _attr_selected = _attr_selected.replace('<', '<') + if ('>' in _attr_selected): + _attr_selected = _attr_selected.replace('>', '>') + if ('"' in _attr_selected): + _attr_selected = _attr_selected.replace('"', '"') + if (_attr_selected is not None): + append((u' selected="%s"' % _attr_selected)) + if (_backup_default_39594608 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39594608 + append(u'>') + append(u'</option>') + if (_backup_attrs_39594032 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39594032 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_39595400 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x254a5d0> name=None at 254a750> -> _value + _value = _static_39101904 + econtext['attrs'] = _value + + # <input ... (5:2) + # -------------------------------------------------------- + append(u'<input') + _backup_default_40174800 = get('default', _marker) + _value = None + econtext['default'] = _value + + # <Expression u'True' (5:33)> -> _attr_checked + try: + _attr_checked = True + except: + rcontext.setdefault('__error__', []).append((u'True', 5, 33, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_checked is None): + pass + else: + if (_attr_checked is False): + _attr_checked = None + else: + _tt = type(_attr_checked) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_checked = unicode(_attr_checked) + else: + try: + if (_tt is str): + _attr_checked = decode(_attr_checked) + else: + if (_tt is not unicode): + try: + _attr_checked = _attr_checked.__html__ + except: + _attr_checked = convert(_attr_checked) + else: + raise RuntimeError + except RuntimeError: + _attr_checked = _attr_checked() + else: + if ((_attr_checked is not None) and (re_needs_escape(_attr_checked) is not None)): + if ('&' in _attr_checked): + if (';' in _attr_checked): + _attr_checked = re_amp.sub('&', _attr_checked) + else: + _attr_checked = _attr_checked.replace('&', '&') + if ('<' in _attr_checked): + _attr_checked = _attr_checked.replace('<', '<') + if ('>' in _attr_checked): + _attr_checked = _attr_checked.replace('>', '>') + if ('"' in _attr_checked): + _attr_checked = _attr_checked.replace('"', '"') + if (_attr_checked is not None): + append((u' checked="%s"' % _attr_checked)) + if (_backup_default_40174800 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_40174800 + append(u' />') + if (_backup_attrs_39595400 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39595400 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_39593744 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x254a2d0> name=None at 254a210> -> _value + _value = _static_39101136 + econtext['attrs'] = _value + + # <input ... (6:2) + # -------------------------------------------------------- + append(u'<input') + _backup_default_39066584 = get('default', _marker) + _value = None + econtext['default'] = _value + + # <Expression u'False' (6:33)> -> _attr_checked + try: + _attr_checked = False + except: + rcontext.setdefault('__error__', []).append((u'False', 6, 33, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_checked is None): + pass + else: + if (_attr_checked is False): + _attr_checked = None + else: + _tt = type(_attr_checked) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_checked = unicode(_attr_checked) + else: + try: + if (_tt is str): + _attr_checked = decode(_attr_checked) + else: + if (_tt is not unicode): + try: + _attr_checked = _attr_checked.__html__ + except: + _attr_checked = convert(_attr_checked) + else: + raise RuntimeError + except RuntimeError: + _attr_checked = _attr_checked() + else: + if ((_attr_checked is not None) and (re_needs_escape(_attr_checked) is not None)): + if ('&' in _attr_checked): + if (';' in _attr_checked): + _attr_checked = re_amp.sub('&', _attr_checked) + else: + _attr_checked = _attr_checked.replace('&', '&') + if ('<' in _attr_checked): + _attr_checked = _attr_checked.replace('<', '<') + if ('>' in _attr_checked): + _attr_checked = _attr_checked.replace('>', '>') + if ('"' in _attr_checked): + _attr_checked = _attr_checked.replace('"', '"') + if (_attr_checked is not None): + append((u' checked="%s"' % _attr_checked)) + if (_backup_default_39066584 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_39066584 + append(u' />') + if (_backup_attrs_39593744 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39593744 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_39545024 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39545024 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/028.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/028.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/028.xml @@ -0,0 +1,5 @@ +<?xml version="1.0"?> +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/029-attribute-ordering.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/029-attribute-ordering.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/029-attribute-ordering.pt @@ -0,0 +1,5 @@ +<div xmlns="http://www.w3.org/1999/xhtml" + xmlns:tal="http://xml.zope.org/namespaces/tal"> + <a rel="self" href="http://repoze.org" id="link-id" + tal:attributes="href 'http://python.org'" /> +</div> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/029-attribute-ordering.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/029-attribute-ordering.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/029-attribute-ordering.pt.py @@ -0,0 +1,124 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_39103696 = {u'href': u'http://repoze.org', u'id': u'link-id', u'rel': u'self', } +_static_39100880 = {u'xmlns': u'http://www.w3.org/1999/xhtml', } +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_37098488 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x254a1d0> name=None at 254a7d0> -> _value + _value = _static_39100880 + econtext['attrs'] = _value + + # <div ... (1:0) + # -------------------------------------------------------- + append(u'<div') + _attr_xmlns = u'http://www.w3.org/1999/xhtml' + if (_attr_xmlns is not None): + append((u' xmlns="%s"' % _attr_xmlns)) + append(u'>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36810832 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x254acd0> name=None at 254ab10> -> _value + _value = _static_39103696 + econtext['attrs'] = _value + + # <a ... (3:2) + # -------------------------------------------------------- + append(u'<a') + _attr_rel = u'self' + if (_attr_rel is not None): + append((u' rel="%s"' % _attr_rel)) + _backup_default_36710736 = get('default', _marker) + _value = u'http://repoze.org' + econtext['default'] = _value + + # <Expression u"'http://python.org'" (4:26)> -> _attr_href + try: + _attr_href = 'http://python.org' + except: + rcontext.setdefault('__error__', []).append((u"'http://python.org'", 4, 26, '<string>', _sys.exc_info()[1], )) + raise + + if (_attr_href is None): + pass + else: + if (_attr_href is False): + _attr_href = None + else: + _tt = type(_attr_href) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _attr_href = unicode(_attr_href) + else: + try: + if (_tt is str): + _attr_href = decode(_attr_href) + else: + if (_tt is not unicode): + try: + _attr_href = _attr_href.__html__ + except: + _attr_href = convert(_attr_href) + else: + raise RuntimeError + except RuntimeError: + _attr_href = _attr_href() + else: + if ((_attr_href is not None) and (re_needs_escape(_attr_href) is not None)): + if ('&' in _attr_href): + if (';' in _attr_href): + _attr_href = re_amp.sub('&', _attr_href) + else: + _attr_href = _attr_href.replace('&', '&') + if ('<' in _attr_href): + _attr_href = _attr_href.replace('<', '<') + if ('>' in _attr_href): + _attr_href = _attr_href.replace('>', '>') + if (u'"' in _attr_href): + _attr_href = _attr_href.replace(u'"', '"') + if (_attr_href is not None): + append((u' href="%s"' % _attr_href)) + if (_backup_default_36710736 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36710736 + _attr_id = u'link-id' + if (_attr_id is not None): + append((u' id="%s"' % _attr_id)) + append(u' />') + if (_backup_attrs_36810832 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36810832 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_37098488 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_37098488 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/029.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/029.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/029.xml @@ -0,0 +1,5 @@ +<?xml version='1.0'?> +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/030-repeat-tuples.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/030-repeat-tuples.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/030-repeat-tuples.pt @@ -0,0 +1,7 @@ +<html> + <body> + <div tal:repeat="(i, j) ((1, 2), (3, 4))"> + ${repeat['i', 'j'].number}, ${i}, ${j} + </div> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/030-repeat-tuples.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/030-repeat-tuples.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/030-repeat-tuples.pt.py @@ -0,0 +1,242 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_38403088 = {} +_static_36668752 = {} +_static_40208144 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_35947656 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x22f8550> name=None at 230a350> -> _value + _value = _static_36668752 + econtext['attrs'] = _value + + # <html ... (1:0) + # -------------------------------------------------------- + append(u'<html>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_35343464 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x249fc10> name=None at 249fa90> -> _value + _value = _static_38403088 + econtext['attrs'] = _value + + # <body ... (2:2) + # -------------------------------------------------------- + append(u'<body>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_i_35341160 = get('i', _marker) + _backup_j_35341160 = get('j', _marker) + + # <Expression u'((1, 2), (3, 4))' (3:28)> -> _iterator + try: + _iterator = ((1, 2, ), (3, 4, ), ) + except: + rcontext.setdefault('__error__', []).append((u'((1, 2), (3, 4))', 3, 28, '<string>', _sys.exc_info()[1], )) + raise + + (_iterator, __index_40209040, ) = getitem('repeat')((u'i', u'j', ), _iterator) + econtext['i'] = econtext['j'] = None + for _item in _iterator: + (econtext['i'], econtext['j'], ) = _item + _backup_attrs_35343248 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x2658710> name=None at 2658750> -> _value + _value = _static_40208144 + econtext['attrs'] = _value + + # <div ... (3:4) + # -------------------------------------------------------- + append(u'<div>') + + # <Expression u"repeat['i', 'j'].number" (4:8)> -> _content_139955154988272 + try: + _content_139955154988272 = getitem('repeat')[('i', 'j', )].number + except: + rcontext.setdefault('__error__', []).append((u"repeat['i', 'j'].number", 4, 8, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + + # <Expression u'i' (4:36)> -> _content_139955154988272_97 + try: + _content_139955154988272_97 = getitem('i') + except: + rcontext.setdefault('__error__', []).append((u'i', 4, 36, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272_97 is None): + pass + else: + if (_content_139955154988272_97 is False): + _content_139955154988272_97 = None + else: + _tt = type(_content_139955154988272_97) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272_97 = unicode(_content_139955154988272_97) + else: + try: + if (_tt is str): + _content_139955154988272_97 = decode(_content_139955154988272_97) + else: + if (_tt is not unicode): + try: + _content_139955154988272_97 = _content_139955154988272_97.__html__ + except: + _content_139955154988272_97 = convert(_content_139955154988272_97) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272_97 = _content_139955154988272_97() + else: + if ((_content_139955154988272_97 is not None) and (re_needs_escape(_content_139955154988272_97) is not None)): + if ('&' in _content_139955154988272_97): + if (';' in _content_139955154988272_97): + _content_139955154988272_97 = re_amp.sub('&', _content_139955154988272_97) + else: + _content_139955154988272_97 = _content_139955154988272_97.replace('&', '&') + if ('<' in _content_139955154988272_97): + _content_139955154988272_97 = _content_139955154988272_97.replace('<', '<') + if ('>' in _content_139955154988272_97): + _content_139955154988272_97 = _content_139955154988272_97.replace('>', '>') + if ('\x00' in _content_139955154988272_97): + _content_139955154988272_97 = _content_139955154988272_97.replace('\x00', '"') + + # <Expression u'j' (4:42)> -> _content_139955154988272_103 + try: + _content_139955154988272_103 = getitem('j') + except: + rcontext.setdefault('__error__', []).append((u'j', 4, 42, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272_103 is None): + pass + else: + if (_content_139955154988272_103 is False): + _content_139955154988272_103 = None + else: + _tt = type(_content_139955154988272_103) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272_103 = unicode(_content_139955154988272_103) + else: + try: + if (_tt is str): + _content_139955154988272_103 = decode(_content_139955154988272_103) + else: + if (_tt is not unicode): + try: + _content_139955154988272_103 = _content_139955154988272_103.__html__ + except: + _content_139955154988272_103 = convert(_content_139955154988272_103) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272_103 = _content_139955154988272_103() + else: + if ((_content_139955154988272_103 is not None) and (re_needs_escape(_content_139955154988272_103) is not None)): + if ('&' in _content_139955154988272_103): + if (';' in _content_139955154988272_103): + _content_139955154988272_103 = re_amp.sub('&', _content_139955154988272_103) + else: + _content_139955154988272_103 = _content_139955154988272_103.replace('&', '&') + if ('<' in _content_139955154988272_103): + _content_139955154988272_103 = _content_139955154988272_103.replace('<', '<') + if ('>' in _content_139955154988272_103): + _content_139955154988272_103 = _content_139955154988272_103.replace('>', '>') + if ('\x00' in _content_139955154988272_103): + _content_139955154988272_103 = _content_139955154988272_103.replace('\x00', '"') + _content_139955154988272 = ('%s%s%s%s%s%s%s' % ((u'\n ' if (u'\n ' is not None) else ''), (_content_139955154988272 if (_content_139955154988272 is not None) else ''), (u', ' if (u', ' is not None) else ''), (_content_139955154988272_97 if (_content_139955154988272_97 is not None) else ''), (u', ' if (u', ' is not None) else ''), (_content_139955154988272_103 if (_content_139955154988272_103 is not None) else ''), (u'\n ' if (u'\n ' is not None) else ''), )) + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_35343248 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35343248 + __index_40209040 -= 1 + if (__index_40209040 > 0): + append('\n ') + if (_backup_i_35341160 is _marker): + del econtext['i'] + else: + econtext['i'] = _backup_i_35341160 + if (_backup_j_35341160 is _marker): + del econtext['j'] + else: + econtext['j'] = _backup_j_35341160 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</body>') + if (_backup_attrs_35343464 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35343464 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</html>') + if (_backup_attrs_35947656 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35947656 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/030.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/030.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/030.xml @@ -0,0 +1,5 @@ +<?xml version = "1.0"?> +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/031-namespace-with-tal.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/031-namespace-with-tal.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/031-namespace-with-tal.pt @@ -0,0 +1,7 @@ +<div> + <tal:example replace="'Hello World!'" /> + <tal:example tal:replace="'Hello World!'" /> + <tal:div content="'Hello World!'" /> + <tal:multiple repeat="i range(3)" replace="i" /> + <tal:div condition="True">True</tal:div> +</div> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/031-namespace-with-tal.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/031-namespace-with-tal.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/031-namespace-with-tal.pt.py @@ -0,0 +1,356 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import Placeholder as _Placeholder +import sys as _sys +pass +_static_38401872 = {} +_marker_default = _Placeholder() +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_attrs_35793880 = get('attrs', _marker) + + # <Static value=<_ast.Dict object at 0x249f750> name=None at 249f810> -> _value + _value = _static_38401872 + econtext['attrs'] = _value + + # <div ... (1:0) + # -------------------------------------------------------- + append(u'<div>') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_40161008 = get('default', _marker) + + # <Marker name='default' at 2658ad0> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello World!'" (2:24)> -> _cache_40208016 + try: + _cache_40208016 = 'Hello World!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello World!'", 2, 24, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello World!'" (2:24)> value=<Marker name='default' at 2658710> at 2658990> -> _condition + _expression = _cache_40208016 + + # <Marker name='default' at 2658710> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_40208016 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_40161008 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_40161008 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_35794744 = get('default', _marker) + + # <Marker name='default' at 2521610> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello World!'" (3:28)> -> _cache_36787408 + try: + _cache_36787408 = 'Hello World!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello World!'", 3, 28, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello World!'" (3:28)> value=<Marker name='default' at 2521f90> at 2521cd0> -> _condition + _expression = _cache_36787408 + + # <Marker name='default' at 2521f90> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_36787408 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_35794744 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35794744 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_default_35792944 = get('default', _marker) + + # <Marker name='default' at 25219d0> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u"'Hello World!'" (4:20)> -> _cache_40209232 + try: + _cache_40209232 = 'Hello World!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello World!'", 4, 20, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u"'Hello World!'" (4:20)> value=<Marker name='default' at 2521810> at 2521910> -> _condition + _expression = _cache_40209232 + + # <Marker name='default' at 2521810> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_40209232 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_35792944 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_35792944 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_i_40210256 = get('i', _marker) + + # <Expression u'range(3)' (5:26)> -> _iterator + try: + _iterator = get('range', range)(3) + except: + rcontext.setdefault('__error__', []).append((u'range(3)', 5, 26, '<string>', _sys.exc_info()[1], )) + raise + + (_iterator, __index_38934736, ) = getitem('repeat')(u'i', _iterator) + econtext['i'] = None + for _item in _iterator: + econtext['i'] = _item + _backup_default_40162008 = get('default', _marker) + + # <Marker name='default' at 2521f50> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'i' (5:45)> -> _cache_38932816 + try: + _cache_38932816 = getitem('i') + except: + rcontext.setdefault('__error__', []).append((u'i', 5, 45, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'i' (5:45)> value=<Marker name='default' at 2521050> at 25213d0> -> _condition + _expression = _cache_38932816 + + # <Marker name='default' at 2521050> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + pass + else: + _content = _cache_38932816 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_40162008 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_40162008 + __index_38934736 -= 1 + if (__index_38934736 > 0): + append('\n ') + if (_backup_i_40210256 is _marker): + del econtext['i'] + else: + econtext['i'] = _backup_i_40210256 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + + # <Expression u'True' (6:22)> -> _condition + try: + _condition = True + except: + rcontext.setdefault('__error__', []).append((u'True', 6, 22, '<string>', _sys.exc_info()[1], )) + raise + + if _condition: + _content_139955154988272 = u'True' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'</div>') + if (_backup_attrs_35793880 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_35793880 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/031.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/031.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/031.xml @@ -0,0 +1,5 @@ +<?xml version='1.0' encoding="UTF-8"?> +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/032-master-template.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/032-master-template.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/032-master-template.pt @@ -0,0 +1,20 @@ +<html i18n:domain="master" metal:define-macro="main" tal:define="content nothing"> + <head> + <title metal:define-slot="title" + metal:define-macro="title" + tal:define="has_title exists: title" + tal:content="title if has_title else default">Master template + + +
          + + + +
          + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/032-master-template.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/032-master-template.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/032-master-template.pt.py @@ -0,0 +1,378 @@ +# -*- coding: utf-8 -*- +pass +from chameleon.utils import Placeholder as _Placeholder +import sys as _sys +pass +_static_39973456 = {u'id': u'content', } +_static_38402704 = {} +_static_40208784 = {} +_static_38935504 = {} +_static_38935440 = {} +_static_39975056 = {u'id': u'footer', } +_marker_default = _Placeholder() +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render_main(stream, econtext, rcontext): + try: + _slot_title = getitem(u'_slot_title').pop() + except: + _slot_title = None + + try: + _slot_content = getitem(u'_slot_content').pop() + except: + _slot_content = None + + try: + _slot_body_footer = getitem(u'_slot_body_footer').pop() + except: + _slot_body_footer = None + + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + _backup_content_36741456 = get('content', _marker) + + # -> _value + try: + _value = getitem('nothing') + except: + rcontext.setdefault('__error__', []).append((u'nothing', 1, 52, '', _sys.exc_info()[1], )) + raise + + econtext['content'] = _value + _backup_attrs_39812776 = get('attrs', _marker) + + # name=None at 2658710> -> _value + _value = _static_40208784 + econtext['attrs'] = _value + + # ') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_38435368 = get('attrs', _marker) + + # name=None at 2521f90> -> _value + _value = _static_38935504 + econtext['attrs'] = _value + + # ') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + if (_slot_title is None): + _backup_has_title_38932944 = get('has_title', _marker) + + # -> _value + try: + try: + _ignore = getitem('title') + except (AttributeError, LookupError, TypeError, NameError, KeyError, ): + _value = 0 + else: + _value = 1 + except: + rcontext.setdefault('__error__', []).append((u'exists: title', 4, 33, '', _sys.exc_info()[1], )) + raise + + econtext['has_title'] = _value + _backup_attrs_36811840 = get('attrs', _marker) + + # name=None at 2521050> -> _value + _value = _static_38935440 + econtext['attrs'] = _value + + # ') + _backup_default_38436088 = get('default', _marker) + + # <Marker name='default' at 2521490> -> _value + _value = _marker_default + econtext['default'] = _value + + # <Expression u'title if has_title else default' (5:24)> -> _cache_38935120 + try: + _cache_38935120 = (getitem('title') if getitem('has_title') else getitem('default')) + except: + rcontext.setdefault('__error__', []).append((u'title if has_title else default', 5, 24, '<string>', _sys.exc_info()[1], )) + raise + + + # <Identity expression=<Expression u'title if has_title else default' (5:24)> value=<Marker name='default' at 2521f50> at 2521790> -> _condition + _expression = _cache_38935120 + + # <Marker name='default' at 2521f50> -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _content_139955154988272 = u'Master template' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + else: + _content = _cache_38935120 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_38436088 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_38436088 + append(u'') + if (_backup_attrs_36811840 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36811840 + if (_backup_has_title_38932944 is _marker): + del econtext['has_title'] + else: + econtext['has_title'] = _backup_has_title_38932944 + else: + _slot_title(stream, econtext.copy(), econtext) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'') + if (_backup_attrs_38435368 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38435368 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36813640 = get('attrs', _marker) + + # name=None at 249fc10> -> _value + _value = _static_38402704 + econtext['attrs'] = _value + + # ') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36813496 = get('attrs', _marker) + + # name=None at 261f210> -> _value + _value = _static_39973456 + econtext['attrs'] = _value + + #
          ') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + if (_slot_content is None): + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + else: + _slot_content(stream, econtext.copy(), econtext) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'
          ') + if (_backup_attrs_36813496 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36813496 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + _backup_attrs_36812632 = get('attrs', _marker) + + # name=None at 261fad0> -> _value + _value = _static_39975056 + econtext['attrs'] = _value + + #
          ') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + if (_slot_body_footer is None): + _backup_default_36811120 = get('default', _marker) + + # -> _value + _value = _marker_default + econtext['default'] = _value + + # -> _cache_39976784 + try: + _cache_39976784 = getitem('nothing') + except: + rcontext.setdefault('__error__', []).append((u'nothing', 14, 59, '', _sys.exc_info()[1], )) + raise + + + # value= at 261f6d0> -> _condition + _expression = _cache_39976784 + + # -> _value + _value = _marker_default + _condition = (_expression is _value) + if _condition: + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'') + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + else: + _content = _cache_39976784 + if (_content is None): + pass + else: + if (_content is False): + _content = None + else: + _tt = type(_content) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content = unicode(_content) + else: + try: + if (_tt is str): + _content = decode(_content) + else: + if (_tt is not unicode): + try: + _content = _content.__html__ + except: + _content = convert(_content) + else: + raise RuntimeError + except RuntimeError: + _content = _content() + else: + if ((_content is not None) and (re_needs_escape(_content) is not None)): + if ('&' in _content): + if (';' in _content): + _content = re_amp.sub('&', _content) + else: + _content = _content.replace('&', '&') + if ('<' in _content): + _content = _content.replace('<', '<') + if ('>' in _content): + _content = _content.replace('>', '>') + if ('\x00' in _content): + _content = _content.replace('\x00', '"') + if (_content is not None): + append(_content) + if (_backup_default_36811120 is _marker): + del econtext['default'] + else: + econtext['default'] = _backup_default_36811120 + else: + _slot_body_footer(stream, econtext.copy(), econtext) + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'
          ') + if (_backup_attrs_36812632 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36812632 + _content_139955154988272 = u'\n ' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'') + if (_backup_attrs_36813640 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_36813640 + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'') + if (_backup_attrs_39812776 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_39812776 + if (_backup_content_36741456 is _marker): + del econtext['content'] + else: + econtext['content'] = _backup_content_36741456 + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + render_main(stream, econtext.copy(), rcontext) + econtext.update(rcontext) + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/032.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/032.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/032.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/033-use-macro-trivial.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/033-use-macro-trivial.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/033-use-macro-trivial.pt @@ -0,0 +1,1 @@ + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/033-use-macro-trivial.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/033-use-macro-trivial.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/033-use-macro-trivial.pt.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + + # -> _macro + try: + _macro = getitem('load')('032-master-template.pt').macros['main'] + except: + rcontext.setdefault('__error__', []).append((u"load('032-master-template.pt').macros['main']", 1, 23, '', _sys.exc_info()[1], )) + raise + + _macro.include(stream, econtext.copy(), rcontext) + econtext.update(rcontext) + _content_139955154988272 = u'\n' + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/033.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/033.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/033.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/034-use-template-as-macro.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/034-use-template-as-macro.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/034-use-template-as-macro.pt @@ -0,0 +1,1 @@ + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/034-use-template-as-macro.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/034-use-template-as-macro.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/034-use-template-as-macro.pt.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + + # -> _macro + try: + _macro = getitem('load')('032-master-template.pt') + except: + rcontext.setdefault('__error__', []).append((u"load('032-master-template.pt')", 1, 23, '', _sys.exc_info()[1], )) + raise + + _macro.include(stream, econtext.copy(), rcontext) + econtext.update(rcontext) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/034.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/034.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/034.xml @@ -0,0 +1,4 @@ + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/035-use-macro-with-fill-slot.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/035-use-macro-with-fill-slot.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/035-use-macro-with-fill-slot.pt @@ -0,0 +1,5 @@ + + + ${kind} title + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/035-use-macro-with-fill-slot.pt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/035-use-macro-with-fill-slot.pt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/035-use-macro-with-fill-slot.pt.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +_static_39975056 = {} +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + + def _slot_title(stream, econtext, rcontext, _i18n_domain=_i18n_domain): + getitem = econtext.__getitem__ + get = econtext.get + _backup_kind_37140176 = get('kind', _marker) + + # -> _value + try: + _value = 'New' + except: + rcontext.setdefault('__error__', []).append((u"'New'", 2, 50, '', _sys.exc_info()[1], )) + raise + + econtext['kind'] = _value + _backup_attrs_38600784 = get('attrs', _marker) + + # name=None at 261fc50> -> _value + _value = _static_39975056 + econtext['attrs'] = _value + + # ') + + # <Expression u'kind' (3:6)> -> _content_139955154988272 + try: + _content_139955154988272 = getitem('kind') + except: + rcontext.setdefault('__error__', []).append((u'kind', 3, 6, '<string>', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is None): + pass + else: + if (_content_139955154988272 is False): + _content_139955154988272 = None + else: + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = unicode(_content_139955154988272) + else: + try: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except: + _content_139955154988272 = convert(_content_139955154988272) + else: + raise RuntimeError + except RuntimeError: + _content_139955154988272 = _content_139955154988272() + else: + if ((_content_139955154988272 is not None) and (re_needs_escape(_content_139955154988272) is not None)): + if ('&' in _content_139955154988272): + if (';' in _content_139955154988272): + _content_139955154988272 = re_amp.sub('&', _content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272.replace('&', '&') + if ('<' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('<', '<') + if ('>' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('>', '>') + if ('\x00' in _content_139955154988272): + _content_139955154988272 = _content_139955154988272.replace('\x00', '"') + _content_139955154988272 = ('%s%s%s' % ((u'\n ' if (u'\n ' is not None) else ''), (_content_139955154988272 if (_content_139955154988272 is not None) else ''), (u' title\n ' if (u' title\n ' is not None) else ''), )) + if (_content_139955154988272 is not None): + append(_content_139955154988272) + append(u'') + if (_backup_attrs_38600784 is _marker): + del econtext['attrs'] + else: + econtext['attrs'] = _backup_attrs_38600784 + if (_backup_kind_37140176 is _marker): + del econtext['kind'] + else: + econtext['kind'] = _backup_kind_37140176 + try: + _slots = getitem(u'_slot_title') + except: + _slots = econtext[u'_slot_title'] = [_slot_title, ] + else: + _slots.append(_slot_title) + + # -> _macro + try: + _macro = getitem('load')('032-master-template.pt').macros['main'] + except: + rcontext.setdefault('__error__', []).append((u"load('032-master-template.pt').macros['main']", 1, 23, '', _sys.exc_info()[1], )) + raise + + _macro.include(stream, econtext.copy(), rcontext) + econtext.update(rcontext) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/035.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/035.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/035.xml @@ -0,0 +1,4 @@ + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/036-use-macro-inherits-dynamic-scope.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/036-use-macro-inherits-dynamic-scope.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/036-use-macro-inherits-dynamic-scope.pt @@ -0,0 +1,2 @@ + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/036.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/036.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/036.xml @@ -0,0 +1,5 @@ + +]> + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/037-use-macro-local-variable-scope.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/037-use-macro-local-variable-scope.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/037-use-macro-local-variable-scope.pt @@ -0,0 +1,5 @@ + + + ok + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/037.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/037.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/037.xml @@ -0,0 +1,6 @@ + +]> + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/038-use-macro-globals.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/038-use-macro-globals.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/038-use-macro-globals.pt @@ -0,0 +1,6 @@ + + + + ok + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/038.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/038.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/038.xml @@ -0,0 +1,6 @@ + + +]> + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/039-globals.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/039-globals.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/039-globals.pt @@ -0,0 +1,1 @@ + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/039.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/039.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/039.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/040-macro-using-template-symbol.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/040-macro-using-template-symbol.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/040-macro-using-template-symbol.pt @@ -0,0 +1,20 @@ + + + + + ${foo} +
          + +
          + + + +
          + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/040.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/040.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/040.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/041-translate-nested-names.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/041-translate-nested-names.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/041-translate-nested-names.pt @@ -0,0 +1,22 @@ + + +
          + Hello + + world! + +
          +
          + Hello + + world! + +
          +
          + Goodbye + + world! + +
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/041.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/041.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/041.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/042-use-macro-fill-footer.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/042-use-macro-fill-footer.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/042-use-macro-fill-footer.pt @@ -0,0 +1,3 @@ + + New footer + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/042.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/042.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/042.xml @@ -0,0 +1,4 @@ + +]> +A diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/043-macro-nested-dynamic-vars.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/043-macro-nested-dynamic-vars.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/043-macro-nested-dynamic-vars.pt @@ -0,0 +1,19 @@ + + + + + + + + + + ${title} + + + + +
          + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/043.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/043.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/043.xml @@ -0,0 +1,6 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/044-tuple-define.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/044-tuple-define.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/044-tuple-define.pt @@ -0,0 +1,5 @@ + + + ${a}, ${b} + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/044.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/044.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/044.xml @@ -0,0 +1,10 @@ + + + +]> + + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/045-namespaces.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/045-namespaces.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/045-namespaces.pt @@ -0,0 +1,13 @@ + + +]> + + + ZZZ YYY XXX + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/045.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/045.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/045.xml @@ -0,0 +1,6 @@ + + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/046-extend-macro.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/046-extend-macro.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/046-extend-macro.pt @@ -0,0 +1,6 @@ + + + New footer + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/046.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/046.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/046.xml @@ -0,0 +1,6 @@ + + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/047-use-extended-macro.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/047-use-extended-macro.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/047-use-extended-macro.pt @@ -0,0 +1,3 @@ + + Extended + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/047.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/047.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/047.xml @@ -0,0 +1,5 @@ + +]> +X +Y diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/048-use-extended-macro-fill-original.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/048-use-extended-macro-fill-original.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/048-use-extended-macro-fill-original.pt @@ -0,0 +1,5 @@ + + + Extended footer + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/048.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/048.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/048.xml @@ -0,0 +1,4 @@ + +]> +] diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/049-entities-in-attributes.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/049-entities-in-attributes.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/049-entities-in-attributes.pt @@ -0,0 +1,11 @@ + + +
          +    
          +    
          +  
          +  
          +    
          +  
          +
          \ No newline at end of file
          diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/058.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/058.xml
          new file mode 100644
          --- /dev/null
          +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/058.xml
          @@ -0,0 +1,5 @@
          +
          +
          +]>
          +
          diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/059-embedded-javascript.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/059-embedded-javascript.pt
          new file mode 100644
          --- /dev/null
          +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/059-embedded-javascript.pt
          @@ -0,0 +1,6 @@
          +
          +  
          +    test
          +    test
          +  
          +
          diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/059.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/059.xml
          new file mode 100644
          --- /dev/null
          +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/059.xml
          @@ -0,0 +1,10 @@
          +
          +
          +
          +]>
          +
          +
          +
          +
          +
          diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/060-macro-with-multiple-same-slots.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/060-macro-with-multiple-same-slots.pt
          new file mode 100644
          --- /dev/null
          +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/060-macro-with-multiple-same-slots.pt
          @@ -0,0 +1,8 @@
          +
          +  
          +    <metal:title define-slot="title">Untitled</metal:title>
          +  
          +  
          +    

          Untitled

          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/060.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/060.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/060.xml @@ -0,0 +1,4 @@ + +]> +X Y diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/061-fill-one-slot-but-two-defined.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/061-fill-one-slot-but-two-defined.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/061-fill-one-slot-but-two-defined.pt @@ -0,0 +1,3 @@ + + My document + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/061.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/061.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/061.xml @@ -0,0 +1,4 @@ + +]> +£ diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/062-comments-and-expressions.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/062-comments-and-expressions.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/062-comments-and-expressions.pt @@ -0,0 +1,27 @@ +
          + +
          + +
          + +
          + +
          + +
          + +
          + +
          + +
          + +
          + + + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/062.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/062.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/062.xml @@ -0,0 +1,4 @@ + +]> +เจม?????? diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/063-continuation.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/063-continuation.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/063-continuation.pt @@ -0,0 +1,4 @@ +
          + ${foo} +
          \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/063.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/063.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/063.xml @@ -0,0 +1,4 @@ + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/064-tags-and-special-characters.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/064-tags-and-special-characters.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/064-tags-and-special-characters.pt @@ -0,0 +1,4 @@ + +
          +
          +
          \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/064.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/064.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/064.xml @@ -0,0 +1,4 @@ + +]> +𐀀􏿽 diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/065-use-macro-in-fill.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/065-use-macro-in-fill.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/065-use-macro-in-fill.pt @@ -0,0 +1,6 @@ + + + <div metal:fill-slot="content">Content</div> +</html> \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/065.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/065.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/065.xml @@ -0,0 +1,5 @@ +<!DOCTYPE doc [ +<!ENTITY e "<"> +<!ELEMENT doc (#PCDATA)> +]> +<doc></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/066-load-expression.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/066-load-expression.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/066-load-expression.pt @@ -0,0 +1,1 @@ +<html tal:define="hello_world load: hello_world.pt" metal:use-macro="hello_world" /> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/066.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/066.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/066.xml @@ -0,0 +1,7 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +<!ATTLIST doc a1 CDATA #IMPLIED> +<!-- 34 is double quote --> +<!ENTITY e1 """> +]> +<doc a1="&e1;"></doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/067-attribute-decode.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/067-attribute-decode.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/067-attribute-decode.pt @@ -0,0 +1,6 @@ +<html> + <body> + <img src="#" tal:attributes="class 1 > 0 and 'up' or 0 < 1 and 'down';" /> + <img src="#" tal:attributes="class 0 > 1 and 'up' or 0 < 1 and 'down';" /> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/067.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/067.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/067.xml @@ -0,0 +1,4 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +]> +<doc> </doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/068-less-than-greater-than-in-attributes.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/068-less-than-greater-than-in-attributes.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/068-less-than-greater-than-in-attributes.pt @@ -0,0 +1,8 @@ +<html> + <body> + <span tal:content="string:0 < 1 or 0 > 1" /> + <span tal:content="structure string:0 < 1 or 0 > 1" /> + <span class="0 < 1 or 0 > 1" /> + <span>0 < 1 or 0 > 1</span> + </body> +</html> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/068.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/068.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/068.xml @@ -0,0 +1,5 @@ +<!DOCTYPE doc [ +<!ELEMENT doc (#PCDATA)> +<!ENTITY e " "> +]> +<doc>&e;</doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/069-translation-domain-and-macro.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/069-translation-domain-and-macro.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/069-translation-domain-and-macro.pt @@ -0,0 +1,3 @@ +<html metal:use-macro="load('032-master-template.pt').macros['main']"> + <title metal:fill-slot="title" i18n:domain="test" i18n:translate="title">Title + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/069.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/069.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/069.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/070-translation-domain-and-use-macro.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/070-translation-domain-and-use-macro.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/070-translation-domain-and-use-macro.pt @@ -0,0 +1,3 @@ + + Title + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/070.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/070.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/070.xml @@ -0,0 +1,5 @@ +"> +%e; +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/071-html-attribute-defaults.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/071-html-attribute-defaults.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/071-html-attribute-defaults.pt @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/071.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/071.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/071.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/072-repeat-interpolation.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/072-repeat-interpolation.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/072-repeat-interpolation.pt @@ -0,0 +1,13 @@ + + +
            +
          • ${i}
          • +
          +
            +
          • ${i}
          • +
          +
            +
          • ${i}
          • +
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/072.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/072.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/072.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/073-utf8-encoded.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/073-utf8-encoded.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/073-utf8-encoded.pt @@ -0,0 +1,5 @@ + + +${'my title'} ??? ${'my site'} + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/073.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/073.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/073.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/074-encoded-template.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/074-encoded-template.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/074-encoded-template.pt @@ -0,0 +1,5 @@ + + +${'my title'} ? ${'my site'} + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/074.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/074.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/074.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/075-nested-macros.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/075-nested-macros.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/075-nested-macros.pt @@ -0,0 +1,11 @@ + + + + + + foo + + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/075.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/075.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/075.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/076-nested-macro-override.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/076-nested-macro-override.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/076-nested-macro-override.pt @@ -0,0 +1,3 @@ + + bar + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/076.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/076.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/076.xml @@ -0,0 +1,7 @@ + + + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/077-i18n-attributes.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/077-i18n-attributes.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/077-i18n-attributes.pt @@ -0,0 +1,1 @@ + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/077.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/077.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/077.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/078-tags-and-newlines.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/078-tags-and-newlines.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/078-tags-and-newlines.pt @@ -0,0 +1,23 @@ + + + + + + , + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/078.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/078.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/078.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/079-implicit-i18n.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/079-implicit-i18n.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/079-implicit-i18n.pt @@ -0,0 +1,16 @@ + + + Welcome + + +

          Welcome

          + An edge case: ${. + Site logo + Site logo +
          + boo foo. +
          + bar. +
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/079.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/079.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/079.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/080-xmlns-namespace-on-tal.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/080-xmlns-namespace-on-tal.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/080-xmlns-namespace-on-tal.pt @@ -0,0 +1,6 @@ + + Hello world + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/080.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/080.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/080.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/081-load-spec.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/081-load-spec.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/081-load-spec.pt @@ -0,0 +1,1 @@ + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/081.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/081.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/081.xml @@ -0,0 +1,7 @@ + + + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/082-load-spec-computed.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/082-load-spec-computed.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/082-load-spec-computed.pt @@ -0,0 +1,1 @@ + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/082.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/082.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/082.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/083-template-dict-to-macro.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/083-template-dict-to-macro.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/083-template-dict-to-macro.pt @@ -0,0 +1,2 @@ + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/083.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/083.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/083.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/084-interpolation-in-cdata.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/084-interpolation-in-cdata.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/084-interpolation-in-cdata.pt @@ -0,0 +1,9 @@ + + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/084.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/084.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/084.xml @@ -0,0 +1,1 @@ +]> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/085-nested-translation.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/085-nested-translation.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/085-nested-translation.pt @@ -0,0 +1,11 @@ + + + Welcome + + +

          Welcome

          +

          + Click here to continue. +

          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/085.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/085.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/085.xml @@ -0,0 +1,6 @@ + +"> + +]> +&e; diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/086-self-closing.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/086-self-closing.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/086-self-closing.pt @@ -0,0 +1,10 @@ + + +
          +
          + Chart +
          +
          +
          + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/086.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/086.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/086.xml @@ -0,0 +1,6 @@ + + +"> +]> +&e; diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/087-code-blocks.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/087-code-blocks.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/087-code-blocks.pt @@ -0,0 +1,28 @@ + + +
            +
          • +
          + + + +
            +
          • +
          + +
          + + Please input a number from the range ${", ".join(numbers)}. +
          + +
          + + 41 + 1 = ${function(41)}. +
          \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/087.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/087.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/087.xml @@ -0,0 +1,6 @@ + + + +]> +&e; diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/088-python-newlines.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/088-python-newlines.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/088-python-newlines.pt @@ -0,0 +1,2 @@ + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/088.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/088.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/088.xml @@ -0,0 +1,5 @@ + +"> +]> +&e; diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/089-load-fallback.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/089-load-fallback.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/089-load-fallback.pt @@ -0,0 +1,3 @@ + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/089.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/089.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/089.xml @@ -0,0 +1,5 @@ + + +]> +&e; diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/090-tuple-expression.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/090-tuple-expression.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/090-tuple-expression.pt @@ -0,0 +1,8 @@ + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/090.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/090.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/090.xml @@ -0,0 +1,7 @@ + + + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/091-repeat-none.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/091-repeat-none.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/091-repeat-none.pt @@ -0,0 +1,5 @@ + + +
          error
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/091.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/091.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/091.xml @@ -0,0 +1,7 @@ + + + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/092.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/092.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/092.xml @@ -0,0 +1,10 @@ + + +]> + + + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/093.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/093.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/093.xml @@ -0,0 +1,5 @@ + +]> + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/094.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/094.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/094.xml @@ -0,0 +1,6 @@ + + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/095.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/095.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/095.xml @@ -0,0 +1,6 @@ + + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/096.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/096.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/096.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/097.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/097.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/097.xml @@ -0,0 +1,8 @@ + + + +%e; + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/098.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/098.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/098.xml @@ -0,0 +1,5 @@ + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/099.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/099.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/099.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/100.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/100.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/100.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/101-unclosed-tags.html b/lib/Chameleon-2.22/src/chameleon/tests/inputs/101-unclosed-tags.html new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/101-unclosed-tags.html @@ -0,0 +1,5 @@ + + +



          Hello world

          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/101.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/101.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/101.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/102-unquoted-attributes.html b/lib/Chameleon-2.22/src/chameleon/tests/inputs/102-unquoted-attributes.html new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/102-unquoted-attributes.html @@ -0,0 +1,5 @@ + + +

          Hello world

          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/102.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/102.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/102.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/103-simple-attribute.html b/lib/Chameleon-2.22/src/chameleon/tests/inputs/103-simple-attribute.html new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/103-simple-attribute.html @@ -0,0 +1,8 @@ + + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/103.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/103.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/103.xml @@ -0,0 +1,4 @@ + +]> +<doc> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/104.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/104.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/104.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/105.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/105.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/105.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/106.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/106.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/106.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/107.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/107.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/107.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/108.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/108.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/108.xml @@ -0,0 +1,7 @@ + + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/109.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/109.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/109.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/110.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/110.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/110.xml @@ -0,0 +1,6 @@ + + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/111.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/111.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/111.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/112.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/112.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/112.xml @@ -0,0 +1,5 @@ + + +]> +
          diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/113.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/113.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/113.xml @@ -0,0 +1,5 @@ + + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/114.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/114.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/114.xml @@ -0,0 +1,5 @@ + +"> +]> +&e; diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/115.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/115.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/115.xml @@ -0,0 +1,6 @@ + + + +]> +&e1; diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/116.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/116.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/116.xml @@ -0,0 +1,5 @@ + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/117.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/117.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/117.xml @@ -0,0 +1,5 @@ + + +]> +] diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/118.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/118.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/118.xml @@ -0,0 +1,5 @@ + + +]> +] diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/119.xml b/lib/Chameleon-2.22/src/chameleon/tests/inputs/119.xml new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/119.xml @@ -0,0 +1,4 @@ + +]> + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/120-translation-context.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/120-translation-context.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/120-translation-context.pt @@ -0,0 +1,13 @@ + + +
          + Hello world! +
          + +
          + Tab +
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/121-translation-comment.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/121-translation-comment.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/121-translation-comment.pt @@ -0,0 +1,7 @@ + + +

          + Hello world! +

          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/greeting.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/greeting.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/greeting.pt @@ -0,0 +1,1 @@ +
          Hello, ${name | 'undefined'}.
          diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/hello_world.pt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/hello_world.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/hello_world.pt @@ -0,0 +1,5 @@ + + + ${'Hello world!'} + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/hello_world.txt b/lib/Chameleon-2.22/src/chameleon/tests/inputs/hello_world.txt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/hello_world.txt @@ -0,0 +1,1 @@ +${'Hello world!'} diff --git a/lib/Chameleon-2.22/src/chameleon/tests/inputs/hello_world.txt.py b/lib/Chameleon-2.22/src/chameleon/tests/inputs/hello_world.txt.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/inputs/hello_world.txt.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +pass +import sys as _sys +pass +import re +import functools +_marker = object() +g_re_amp = re.compile('&(?!([A-Za-z]+|#[0-9]+);)') +g_re_needs_escape = re.compile('[&<>\\"\\\']').search +re_whitespace = functools.partial(re.compile('\\s+').sub, ' ') + +def render(stream, econtext, rcontext): + append = stream.append + getitem = econtext.__getitem__ + get = econtext.get + _i18n_domain = None + re_amp = g_re_amp + re_needs_escape = g_re_needs_escape + decode = getitem('decode') + convert = getitem('convert') + translate = getitem('translate') + + # -> _content_139955154988272 + try: + _content_139955154988272 = 'Hello world!' + except: + rcontext.setdefault('__error__', []).append((u"'Hello world!'", 1, 2, '', _sys.exc_info()[1], )) + raise + + if (_content_139955154988272 is not None): + _tt = type(_content_139955154988272) + if ((_tt is int) or (_tt is float) or (_tt is long)): + _content_139955154988272 = str(_content_139955154988272) + else: + if (_tt is str): + _content_139955154988272 = decode(_content_139955154988272) + else: + if (_tt is not unicode): + try: + _content_139955154988272 = _content_139955154988272.__html__ + except AttributeError: + _content_139955154988272 = convert(_content_139955154988272) + else: + _content_139955154988272 = _content_139955154988272() + _content_139955154988272 = ('%s%s' % ((_content_139955154988272 if (_content_139955154988272 is not None) else ''), (u'\n' if (u'\n' is not None) else ''), )) + if (_content_139955154988272 is not None): + append(_content_139955154988272) +pass \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/001.html b/lib/Chameleon-2.22/src/chameleon/tests/outputs/001.html new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/001.html @@ -0,0 +1,7 @@ + + + Hello world! + Hello world! + + Goodbye world! + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/001.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/001.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/001.pt @@ -0,0 +1,9 @@ + + + Hello world! + + + + ok + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/001.txt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/001.txt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/001.txt @@ -0,0 +1,1 @@ +<&> diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/002.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/002.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/002.pt @@ -0,0 +1,13 @@ + + +
          + Hello! + Hello. +
          +
          + Goodbye! + Goodbye. +
          + ok + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/003.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/003.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/003.pt @@ -0,0 +1,17 @@ + + +
          Hello world!
          +
          Hello world!
          1 + 2
          Hello world!
          +
          Hello world!
          3 +
          Hello world!
          5 + 6
          Hello world!
          +
          1
          +
          1.0
          +
          True
          +
          False
          +
          0
          +
          + <div>Hello world!</div> + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/004.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/004.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/004.pt @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/005.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/005.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/005.pt @@ -0,0 +1,12 @@ + + + + + Default + True + False + + Computed default + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/006.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/006.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/006.pt @@ -0,0 +1,9 @@ + + + copyright (c) 2010 + copyright (c) 2010 + copyright (c) 2010 + $ignored + <type 'str'> + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/007.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/007.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/007.pt @@ -0,0 +1,15 @@ + + + Hello world! +
          Hello world!
          +
          Hello world!
          + <type 'str'> + && + + Hello world + $leftalone +
          +
          Hello world
          +
          ${} is ignored.
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/008.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/008.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/008.pt @@ -0,0 +1,12 @@ + + + {} + +
          + static +
          +
          + nothing +
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/009.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/009.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/009.pt @@ -0,0 +1,5 @@ + + +
          Hello world!
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/010.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/010.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/010.pt @@ -0,0 +1,9 @@ + + +
          1 < 2
          +
          2 < 3, 2&3, 2<3, 2>3
          +
          3 < 4
          +
          4 < 5
          +
          Hello world!
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/011-en.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/011-en.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/011-en.pt @@ -0,0 +1,9 @@ + + +
          Message ('message' translation into 'en')
          +
          Message ('message' translation into 'en')
          +
          Message ('message' translation into 'en')
          +
          Message ('message' translation into 'en')
          + Message ('message' translation into 'en') + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/011.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/011.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/011.pt @@ -0,0 +1,9 @@ + + +
          Message
          +
          Message
          +
          Message
          +
          Message
          + Message + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/012-en.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/012-en.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/012-en.pt @@ -0,0 +1,10 @@ + + +
          +
          Hello world! ('Hello world!' translation into 'en')
          +
          Hello world! ('hello_world' translation into 'en')
          +
          Hello world! ('Hello world!' translation into 'en')
          +
          Hello world! Goodbye planet! ('Hello ${first}! Goodbye ${second}!' translation into 'en')
          +
          Hello world! Goodbye planet! ('hello_goodbye' translation into 'en')
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/012.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/012.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/012.pt @@ -0,0 +1,10 @@ + + +
          +
          Hello world!
          +
          Hello world!
          +
          Hello world!
          +
          Hello world! Goodbye planet!
          +
          Hello world! Goodbye planet!
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/013.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/013.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/013.pt @@ -0,0 +1,22 @@ + + + + + + + + + + + +
          + [1,1] + + [1,2] +
          + [2,1] + + [2,2] +
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/014.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/014.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/014.pt @@ -0,0 +1,12 @@ + + + + [3,3] + [3,4] + + + [4,3] + [4,4] + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/015-en.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/015-en.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/015-en.pt @@ -0,0 +1,5 @@ + + +
          Price: Per kilo 12.5 ('Per kilo ${amount}' translation into 'en') ('Price: ${price}' translation into 'en')
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/015.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/015.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/015.pt @@ -0,0 +1,5 @@ + + +
          Price: Per kilo 12.5
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/016-en.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/016-en.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/016-en.pt @@ -0,0 +1,9 @@ + + +
          Hello world! ('Hello world!' translation into 'en')
          + Hello world! ('Hello world!' translation into 'en') + Hello world! ('hello_world' translation into 'en') + Hello world! ('Hello world!' translation into 'en') + Hello world! ('hello_world' translation into 'en') + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/016.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/016.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/016.pt @@ -0,0 +1,9 @@ + + +
          Hello world!
          + Hello world! + Hello world! + Hello world! + Hello world! + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/017.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/017.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/017.pt @@ -0,0 +1,12 @@ + + + Hello world! + 1 + Hello world! + 23 + 4Hello world! +
          Hello world!
          + Hello world! + Hello world! + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/018-en.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/018-en.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/018-en.pt @@ -0,0 +1,3 @@ +
          + october ('october' translation into 'en') 1982 ('1982' translation into 'en') ('${monthname} ${year}' translation into 'en') +
          diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/018.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/018.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/018.pt @@ -0,0 +1,3 @@ +
          + october 1982 +
          diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/019.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/019.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/019.pt @@ -0,0 +1,13 @@ + + + Hello world! + Hello world!1 + 2Hello world! + Hello world!3 + Hello world!5 + 6Hello world! + 1 + 1.0 + True + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/020.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/020.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/020.pt @@ -0,0 +1,8 @@ + + +
          +
          NameError thrown at 5:24.
          +
          +
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/021-en.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/021-en.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/021-en.pt @@ -0,0 +1,12 @@ + + +
          Hello world! ('Hello world!' translation into 'en' with domain 'new')
          +
          Hello world! ('Hello world!' translation into 'en' with domain 'old')
          +
          + Hello world! +
          +
          + Hello world! +
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/021.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/021.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/021.pt @@ -0,0 +1,12 @@ + + +
          Hello world!
          +
          Hello world!
          +
          + Hello world! +
          +
          + Hello world! +
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/022.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/022.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/022.pt @@ -0,0 +1,21 @@ + + +
          + + ok + + + + ok +
          +
          + + ok +
          +
          + + + ok +
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/023.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/023.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/023.pt @@ -0,0 +1,6 @@ + + + + ok + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/024.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/024.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/024.pt @@ -0,0 +1,14 @@ + + + + + first + + second + + + ok + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/025.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/025.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/025.pt @@ -0,0 +1,22 @@ + + +
            +
          • 1
          • +
          • 2
          • +
          • 3
          • +
          • 1
          • 2
          • 3
          • +
          • 1
          • +
          • 2
          • +
          • 3
          • + + + 1, + + 2, + + 3 + + . +
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/026.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/026.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/026.pt @@ -0,0 +1,17 @@ +
          +
            +
          • 0
          • +
          • 1
          • +
          • 2
          • +
          +
            +
          • 0
          • +
          • 1
          • +
          • 2
          • +
          +
            +
          • even
          • +
          • odd
          • +
          • even
          • +
          +
          diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/027.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/027.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/027.pt @@ -0,0 +1,7 @@ +
          + abcghi + Hello World! + Hello World! +
          diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/028.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/028.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/028.pt @@ -0,0 +1,5 @@ +
          + + + +
          diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/029.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/029.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/029.pt @@ -0,0 +1,3 @@ +
          + +
          diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/030.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/030.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/030.pt @@ -0,0 +1,10 @@ + + +
          + 1, 1, 2 +
          +
          + 2, 3, 4 +
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/031.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/031.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/031.pt @@ -0,0 +1,7 @@ +
          + Hello World! + Hello World! + Hello World! + 012 + True +
          diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/032.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/032.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/032.pt @@ -0,0 +1,15 @@ + + + Master template + + +
          + + + +
          + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/033.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/033.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/033.pt @@ -0,0 +1,15 @@ + + + Master template + + +
          + + + +
          + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/034.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/034.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/034.pt @@ -0,0 +1,15 @@ + + + Master template + + +
          + + + +
          + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/035.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/035.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/035.pt @@ -0,0 +1,17 @@ + + + + New title + + + +
          + + + +
          + + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/036.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/036.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/036.pt @@ -0,0 +1,15 @@ + + + New title + + +
          + + + +
          + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/037.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/037.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/037.pt @@ -0,0 +1,15 @@ + + + Master template + + +
          + + ok + +
          + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/038.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/038.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/038.pt @@ -0,0 +1,6 @@ + + + + ok + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/039.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/039.pt new file mode 100644 diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/040.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/040.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/040.pt @@ -0,0 +1,15 @@ + + + + + foo + + + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/041.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/041.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/041.pt @@ -0,0 +1,7 @@ + + +
          Hello world!
          +
          Hello world!
          +
          Goodbye
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/042.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/042.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/042.pt @@ -0,0 +1,15 @@ + + + Master template + + +
          + + + +
          + + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/043.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/043.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/043.pt @@ -0,0 +1,11 @@ + + + + + + + My title + + + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/044.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/044.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/044.pt @@ -0,0 +1,5 @@ + + + a, b + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/045.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/045.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/045.pt @@ -0,0 +1,12 @@ + + +]> + + + ZZZ YYY XXX + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/046.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/046.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/046.pt @@ -0,0 +1,17 @@ + + + Master template + + +
          + + + +
          + + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/047.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/047.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/047.pt @@ -0,0 +1,17 @@ + + + Master template + + +
          + + + +
          + + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/048.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/048.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/048.pt @@ -0,0 +1,17 @@ + + + Master template + + +
          + + + +
          + + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/049.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/049.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/049.pt @@ -0,0 +1,11 @@ + + +
          amp=&amp; lt=&lt;
          +
          amp=& lt=<
          + + + + + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/059.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/059.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/059.pt @@ -0,0 +1,6 @@ + + + test + test + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/060.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/060.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/060.pt @@ -0,0 +1,8 @@ + + + Untitled + + +

          Untitled

          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/061.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/061.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/061.pt @@ -0,0 +1,8 @@ + + + My document + + +

          My document

          + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/062.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/062.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/062.pt @@ -0,0 +1,27 @@ +
          + +
          + +
          + +
          + +
          + +
          + +
          + +
          + +
          + +
          + + + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/063.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/063.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/063.pt @@ -0,0 +1,3 @@ +
          + 2 +
          \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/064.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/064.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/064.pt @@ -0,0 +1,3 @@ + +
          +
          diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/065.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/065.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/065.pt @@ -0,0 +1,13 @@ + + + Title + + +
          +
          Content
          +
          + + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/066.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/066.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/066.pt @@ -0,0 +1,5 @@ + + + Hello world! + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/067.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/067.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/067.pt @@ -0,0 +1,6 @@ + + + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/068.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/068.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/068.pt @@ -0,0 +1,8 @@ + + + 0 < 1 or 0 > 1 + 0 < 1 or 0 > 1 + + 0 < 1 or 0 > 1 + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/069-en.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/069-en.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/069-en.pt @@ -0,0 +1,15 @@ + + + Title ('title' translation into 'en' with domain 'test') + + +
          + + + +
          + + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/069.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/069.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/069.pt @@ -0,0 +1,15 @@ + + + Title + + +
          + + + +
          + + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/070-en.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/070-en.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/070-en.pt @@ -0,0 +1,15 @@ + + + Title ('title' translation into 'en' with domain 'test') + + +
          + + + +
          + + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/070.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/070.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/070.pt @@ -0,0 +1,15 @@ + + + Title + + +
          + + + +
          + + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/071.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/071.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/071.pt @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/072.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/072.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/072.pt @@ -0,0 +1,19 @@ + + +
            +
          • 1
          • +
          • 2
          • +
          • 3
          • +
          +
            +
          • 1
          • +
          • 2
          • +
          • 3
          • +
          +
            +
          • 1
          • +
          • 2
          • +
          • 3
          • +
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/073.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/073.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/073.pt @@ -0,0 +1,5 @@ + + +my title ??? my site + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/074.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/074.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/074.pt @@ -0,0 +1,5 @@ + + +my title ? my site + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/075.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/075.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/075.pt @@ -0,0 +1,19 @@ + + + + Master template + + +
          + + + foo + + +
          + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/076.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/076.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/076.pt @@ -0,0 +1,17 @@ + + + + Master template + + +
          + + bar + +
          + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/077-en.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/077-en.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/077-en.pt @@ -0,0 +1,1 @@ + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/077.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/077.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/077.pt @@ -0,0 +1,1 @@ + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/078.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/078.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/078.pt @@ -0,0 +1,9 @@ + + + + + + 1, 2, 3 + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/079-en.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/079-en.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/079-en.pt @@ -0,0 +1,16 @@ + + + Welcome ('Welcome' translation into 'en') + + +

          Welcome ('Welcome' translation into 'en')

          + An edge case: ${. ('An edge case: ${.' translation into 'en') + Site logo ('Site logo' translation into 'en') + Site logo ('Site logo' translation into 'en') +
          + boo foo. ('boo foo.' translation into 'en') +
          + bar. ('bar.' translation into 'en') +
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/079.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/079.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/079.pt @@ -0,0 +1,16 @@ + + + Welcome + + +

          Welcome

          + An edge case: ${. + Site logo + Site logo +
          + boo foo. +
          + bar. +
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/080.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/080.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/080.pt @@ -0,0 +1,3 @@ + + Hello world + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/081.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/081.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/081.pt @@ -0,0 +1,5 @@ + + + Hello world! + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/082.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/082.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/082.pt @@ -0,0 +1,5 @@ + + + Hello world! + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/083.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/083.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/083.pt @@ -0,0 +1,15 @@ + + + Master template + + +
          + + + +
          + + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/084.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/084.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/084.pt @@ -0,0 +1,9 @@ + + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/085-en.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/085-en.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/085-en.pt @@ -0,0 +1,9 @@ + + + Welcome + + +

          Welcome

          +

          Click here ('Click here' translation into 'en' with domain 'new') to continue. ('${click_here} to continue.' translation into 'en' with domain 'new')

          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/085.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/085.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/085.pt @@ -0,0 +1,9 @@ + + + Welcome + + +

          Welcome

          +

          Click here to continue.

          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/086.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/086.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/086.pt @@ -0,0 +1,18 @@ + + + Master template + + +
          +
          +
          + Chart +
          +
          +
          +
          + + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/087.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/087.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/087.pt @@ -0,0 +1,25 @@ + + +
            +
          • 1
          • +
          • 2
          • +
          • 3
          • +
          + + + +
            +
          • 5
          • +
          • 7
          • +
          • 9
          • +
          + +
          + + Please input a number from the range 1, 2, 3, 4, 5, 6, 7, 8, 9. +
          + +
          + + 41 + 1 = 42. +
          \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/088.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/088.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/088.pt @@ -0,0 +1,1 @@ +a, b, c diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/089.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/089.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/089.pt @@ -0,0 +1,5 @@ + + + Hello world! + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/090.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/090.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/090.pt @@ -0,0 +1,14 @@ + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/091.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/091.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/091.pt @@ -0,0 +1,5 @@ + + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/101.html b/lib/Chameleon-2.22/src/chameleon/tests/outputs/101.html new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/101.html @@ -0,0 +1,5 @@ + + +



          Hello world

          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/102.html b/lib/Chameleon-2.22/src/chameleon/tests/outputs/102.html new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/102.html @@ -0,0 +1,5 @@ + + +

          Hello world

          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/103.html b/lib/Chameleon-2.22/src/chameleon/tests/outputs/103.html new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/103.html @@ -0,0 +1,8 @@ + + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/120-en.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/120-en.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/120-en.pt @@ -0,0 +1,9 @@ + + +
          Hello world! ('Hello world!' translation into 'en')
          + + + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/120.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/120.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/120.pt @@ -0,0 +1,9 @@ + + +
          Hello world!
          + +
          + Tab +
          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/121.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/121.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/121.pt @@ -0,0 +1,5 @@ + + +

          Hello world!

          + + diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/greeting.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/greeting.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/greeting.pt @@ -0,0 +1,1 @@ +
          Hello, undefined.
          diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/hello_world.pt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/hello_world.pt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/hello_world.pt @@ -0,0 +1,5 @@ + + + Hello world! + + \ No newline at end of file diff --git a/lib/Chameleon-2.22/src/chameleon/tests/outputs/hello_world.txt b/lib/Chameleon-2.22/src/chameleon/tests/outputs/hello_world.txt new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/outputs/hello_world.txt @@ -0,0 +1,1 @@ +Hello world! diff --git a/lib/Chameleon-2.22/src/chameleon/tests/test_doctests.py b/lib/Chameleon-2.22/src/chameleon/tests/test_doctests.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/test_doctests.py @@ -0,0 +1,40 @@ +import unittest +import doctest + +OPTIONFLAGS = (doctest.ELLIPSIS | + doctest.REPORT_ONLY_FIRST_FAILURE) + + +class DoctestCase(unittest.TestCase): + def __new__(self, test): + return getattr(self, test)() + + @classmethod + def test_tal(cls): + from chameleon import tal + return doctest.DocTestSuite( + tal, optionflags=OPTIONFLAGS) + + @classmethod + def test_tales(cls): + from chameleon import tales + return doctest.DocTestSuite( + tales, optionflags=OPTIONFLAGS) + + @classmethod + def test_utils(cls): + from chameleon import utils + return doctest.DocTestSuite( + utils, optionflags=OPTIONFLAGS) + + @classmethod + def test_exc(cls): + from chameleon import exc + return doctest.DocTestSuite( + exc, optionflags=OPTIONFLAGS) + + @classmethod + def test_compiler(cls): + from chameleon import compiler + return doctest.DocTestSuite( + compiler, optionflags=OPTIONFLAGS) diff --git a/lib/Chameleon-2.22/src/chameleon/tests/test_exc.py b/lib/Chameleon-2.22/src/chameleon/tests/test_exc.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/test_exc.py @@ -0,0 +1,13 @@ +from unittest import TestCase + +class TestTemplateError(TestCase): + + def test_keep_token_location_info(self): + # tokens should not lose information when passed to a TemplateError + from chameleon import exc, tokenize, utils + token = tokenize.Token('stuff', 5, 'more\nstuff', 'mystuff.txt') + error = exc.TemplateError('message', token) + s = str(error) + self.assertTrue( + '- Location: (line 2: col 0)' in s, + 'No location data found\n%s' % s) diff --git a/lib/Chameleon-2.22/src/chameleon/tests/test_loader.py b/lib/Chameleon-2.22/src/chameleon/tests/test_loader.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/test_loader.py @@ -0,0 +1,110 @@ +import unittest + + +class LoadTests: + def _makeOne(self, search_path=None, **kwargs): + klass = self._getTargetClass() + return klass(search_path, **kwargs) + + def _getTargetClass(self): + from chameleon.loader import TemplateLoader + return TemplateLoader + + def test_load_relative(self): + import os + here = os.path.join(os.path.dirname(__file__), "inputs") + loader = self._makeOne(search_path=[here]) + result = self._load(loader, 'hello_world.pt') + self.assertEqual(result.filename, os.path.join(here, 'hello_world.pt')) + + def test_consecutive_loads(self): + import os + here = os.path.join(os.path.dirname(__file__), "inputs") + loader = self._makeOne(search_path=[here]) + + self.assertTrue( + self._load(loader, 'hello_world.pt') is \ + self._load(loader, 'hello_world.pt')) + + def test_load_relative_badpath_in_searchpath(self): + import os + here = os.path.join(os.path.dirname(__file__), "inputs") + loader = self._makeOne(search_path=[os.path.join(here, 'none'), here]) + result = self._load(loader, 'hello_world.pt') + self.assertEqual(result.filename, os.path.join(here, 'hello_world.pt')) + + def test_load_abs(self): + import os + here = os.path.join(os.path.dirname(__file__), "inputs") + loader = self._makeOne() + abs = os.path.join(here, 'hello_world.pt') + result = self._load(loader, abs) + self.assertEqual(result.filename, abs) + + +class LoadPageTests(unittest.TestCase, LoadTests): + def _load(self, loader, filename): + from chameleon.zpt import template + return loader.load(filename, template.PageTemplateFile) + + +class ModuleLoadTests(unittest.TestCase): + def _makeOne(self, *args, **kwargs): + from chameleon.loader import ModuleLoader + return ModuleLoader(*args, **kwargs) + + def test_build(self): + import tempfile + path = tempfile.mkdtemp() + loader = self._makeOne(path) + source = "def function(): return %r" % "\xc3\xa6\xc3\xb8\xc3\xa5" + try: + source = source.decode('utf-8') + except AttributeError: + import sys + self.assertTrue(sys.version_info[0] > 2) + + module = loader.build(source, "test.xml") + result1 = module['function']() + d = {} + code = compile(source, 'test.py', 'exec') + exec(code, d) + result2 = d['function']() + self.assertEqual(result1, result2) + + import os + self.assertTrue("test.py" in os.listdir(path)) + + import shutil + shutil.rmtree(path) + + +class ZPTLoadTests(unittest.TestCase): + def _makeOne(self, *args, **kwargs): + import os + here = os.path.join(os.path.dirname(__file__), "inputs") + from chameleon.zpt import loader + return loader.TemplateLoader(here, **kwargs) + + def test_load_xml(self): + loader = self._makeOne() + template = loader.load("hello_world.pt", "xml") + from chameleon.zpt.template import PageTemplateFile + self.assertTrue(isinstance(template, PageTemplateFile)) + + def test_load_text(self): + loader = self._makeOne() + template = loader.load("hello_world.txt", "text") + from chameleon.zpt.template import PageTextTemplateFile + self.assertTrue(isinstance(template, PageTextTemplateFile)) + + def test_load_getitem_gets_xml_file(self): + loader = self._makeOne() + template = loader["hello_world.pt"] + from chameleon.zpt.template import PageTemplateFile + self.assertTrue(isinstance(template, PageTemplateFile)) + + +def test_suite(): + import sys + return unittest.findTestCases(sys.modules[__name__]) diff --git a/lib/Chameleon-2.22/src/chameleon/tests/test_parser.py b/lib/Chameleon-2.22/src/chameleon/tests/test_parser.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/test_parser.py @@ -0,0 +1,92 @@ +from __future__ import with_statement + +import sys + +from unittest import TestCase + +from ..namespaces import XML_NS +from ..namespaces import XMLNS_NS +from ..namespaces import PY_NS + + +class ParserTest(TestCase): + def test_sample_files(self): + import os + import traceback + path = os.path.join(os.path.dirname(__file__), "inputs") + for filename in os.listdir(path): + if not filename.endswith('.html'): + continue + + with open(os.path.join(path, filename), 'rb') as f: + source = f.read() + + from ..utils import read_encoded + try: + want = read_encoded(source) + except UnicodeDecodeError: + exc = sys.exc_info()[1] + self.fail("%s - %s" % (exc, filename)) + + from ..tokenize import iter_xml + from ..parser import ElementParser + try: + tokens = iter_xml(want) + parser = ElementParser(tokens, { + 'xmlns': XMLNS_NS, + 'xml': XML_NS, + 'py': PY_NS, + }) + elements = tuple(parser) + except: + self.fail(traceback.format_exc()) + + output = [] + + def render(kind, args): + if kind == 'element': + # start tag + tag, end, children = args + output.append("%(prefix)s%(name)s" % tag) + + for attr in tag['attrs']: + output.append( + "%(space)s%(name)s%(eq)s%(quote)s%(value)s%(quote)s" % \ + attr + ) + + output.append("%(suffix)s" % tag) + + # children + for item in children: + render(*item) + + # end tag + output.append( + "%(prefix)s%(name)s%(space)s%(suffix)s" % end + ) + elif kind == 'text': + text = args[0] + output.append(text) + elif kind == 'start_tag': + node = args[0] + output.append( + "%(prefix)s%(name)s%(space)s%(suffix)s" % node + ) + else: + raise RuntimeError("Not implemented: %s." % kind) + + for kind, args in elements: + render(kind, args) + + got = "".join(output) + + from doctest import OutputChecker + checker = OutputChecker() + + if checker.check_output(want, got, 0) is False: + from doctest import Example + example = Example(f.name, want) + diff = checker.output_difference( + example, got, 0) + self.fail("(%s) - \n%s" % (f.name, diff)) diff --git a/lib/Chameleon-2.22/src/chameleon/tests/test_sniffing.py b/lib/Chameleon-2.22/src/chameleon/tests/test_sniffing.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/test_sniffing.py @@ -0,0 +1,124 @@ +from __future__ import with_statement + +import os +import unittest +import tempfile +import shutil + +from chameleon.utils import unicode_string +from chameleon.utils import encode_string + + +class TypeSniffingTestCase(unittest.TestCase): + def setUp(self): + self.tempdir = tempfile.mkdtemp(prefix='chameleon-tests') + + def tearDown(self): + shutil.rmtree(self.tempdir) + + def _get_temporary_file(self): + filename = os.path.join(self.tempdir, 'template.py') + assert not os.path.exists(filename) + f = open(filename, 'w') + f.flush() + f.close() + return filename + + def get_template(self, text): + fn = self._get_temporary_file() + + with open(fn, 'wb') as tmpfile: + tmpfile.write(text) + + from chameleon.template import BaseTemplateFile + + class DummyTemplateFile(BaseTemplateFile): + def cook(self, body): + self.body = body + + template = DummyTemplateFile(fn) + template.cook_check() + return template + + def check_content_type(self, text, expected_type): + from chameleon.utils import read_bytes + content_type = read_bytes(text, 'ascii')[2] + self.assertEqual(content_type, expected_type) + + def test_xml_encoding(self): + from chameleon.utils import xml_prefixes + + document1 = unicode_string( + "" + ) + document2 = unicode_string( + "" + ) + + for bom, encoding in xml_prefixes: + try: + "".encode(encoding) + except LookupError: + # System does not support this encoding + continue + + self.check_content_type(document1.encode(encoding), "text/xml") + self.check_content_type(document2.encode(encoding), "text/xml") + + HTML_PUBLIC_ID = "-//W3C//DTD HTML 4.01 Transitional//EN" + HTML_SYSTEM_ID = "http://www.w3.org/TR/html4/loose.dtd" + + # Couldn't find the code that handles this... yet. + # def test_sniffer_html_ascii(self): + # self.check_content_type( + # "" + # % self.HTML_SYSTEM_ID, + # "text/html") + # self.check_content_type( + # "sample document", + # "text/html") + + # TODO: This reflects a case that simply isn't handled by the + # sniffer; there are many, but it gets it right more often than + # before. + def donttest_sniffer_xml_simple(self): + self.check_content_type("", "text/xml") + + def test_html_default_encoding(self): + body = encode_string( + '' \ + '\xc3\x90\xc2\xa2\xc3\x90\xc2\xb5' \ + '\xc3\x91\xc2\x81\xc3\x91\xc2\x82' \ + '') + + template = self.get_template(body) + self.assertEqual(template.body, body.decode('utf-8')) + + def test_html_encoding_by_meta(self): + body = encode_string( + '' \ + '\xc3\x92\xc3\xa5\xc3\xb1\xc3\xb2' \ + '' \ + "") + + template = self.get_template(body) + self.assertEqual(template.body, body.decode('windows-1251')) + + def test_xhtml(self): + body = encode_string( + '' \ + '\xc3\x92\xc3\xa5\xc3\xb1\xc3\xb2' \ + '' \ + "") + + template = self.get_template(body) + self.assertEqual(template.body, body.decode('windows-1251')) + + +def test_suite(): + return unittest.makeSuite(TypeSniffingTestCase) + +if __name__ == "__main__": + unittest.main(defaultTest="test_suite") diff --git a/lib/Chameleon-2.22/src/chameleon/tests/test_templates.py b/lib/Chameleon-2.22/src/chameleon/tests/test_templates.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/test_templates.py @@ -0,0 +1,698 @@ +# -*- coding: utf-8 -*- + +from __future__ import with_statement + +import re +import os +import sys +import shutil +import tempfile + +from functools import wraps +from functools import partial + +try: + from unittest2 import TestCase +except ImportError: + from unittest import TestCase + + +from chameleon.utils import byte_string +from chameleon.exc import RenderError + + +class Message(object): + def __str__(self): + return "message" + + +class ImportTestCase(TestCase): + def test_pagetemplates(self): + from chameleon import PageTemplate + from chameleon import PageTemplateFile + from chameleon import PageTemplateLoader + + def test_pagetexttemplates(self): + from chameleon import PageTextTemplate + from chameleon import PageTextTemplateFile + + +class TemplateFileTestCase(TestCase): + @property + def _class(self): + from chameleon.template import BaseTemplateFile + + class TestTemplateFile(BaseTemplateFile): + cook_count = 0 + + def cook(self, body): + self.cook_count += 1 + self._cooked = True + + return TestTemplateFile + + def setUp(self): + self.tempdir = tempfile.mkdtemp(prefix='chameleon-tests') + + def tearDown(self): + shutil.rmtree(self.tempdir) + + def _get_temporary_file(self): + filename = os.path.join(self.tempdir, 'template.py') + assert not os.path.exists(filename) + f = open(filename, 'w') + f.flush() + f.close() + return filename + + def test_cook_check(self): + fn = self._get_temporary_file() + template = self._class(fn) + template.cook_check() + self.assertEqual(template.cook_count, 1) + + def test_auto_reload(self): + fn = self._get_temporary_file() + + # set time in past + os.utime(fn, (0, 0)) + + template = self._class(fn, auto_reload=True) + template.cook_check() + + # a second cook check makes no difference + template.cook_check() + self.assertEqual(template.cook_count, 1) + + # set current time on file + os.utime(fn, None) + + # file is reloaded + template.cook_check() + self.assertEqual(template.cook_count, 2) + + def test_relative_is_expanded_to_cwd(self): + template = self._class("___does_not_exist___") + try: + template.cook_check() + except IOError: + exc = sys.exc_info()[1] + self.assertEqual( + os.getcwd(), + os.path.dirname(exc.filename) + ) + else: + self.fail("Expected OSError.") + + +class RenderTestCase(TestCase): + root = os.path.dirname(__file__) + + def find_files(self, ext): + inputs = os.path.join(self.root, "inputs") + outputs = os.path.join(self.root, "outputs") + for filename in sorted(os.listdir(inputs)): + name, extension = os.path.splitext(filename) + if extension != ext: + continue + path = os.path.join(inputs, filename) + + # if there's no output file, treat document as static and + # expect intput equal to output + import glob + globbed = tuple(glob.iglob(os.path.join( + outputs, "%s*%s" % (name.split('-', 1)[0], ext)))) + + if not globbed: + self.fail("Missing output for: %s." % name) + + for output in globbed: + name, ext = os.path.splitext(output) + basename = os.path.basename(name) + if '-' in basename: + language = basename.split('-')[1] + else: + language = None + + yield path, output, language + + +class ZopePageTemplatesTest(RenderTestCase): + @property + def from_string(body): + from ..zpt.template import PageTemplate + return partial(PageTemplate, keep_source=True) + + @property + def from_file(body): + from ..zpt.template import PageTemplateFile + return partial(PageTemplateFile, keep_source=True) + + def template(body): + def decorator(func): + @wraps(func) + def wrapper(self): + template = self.from_string(body) + return func(self, template) + + return wrapper + return decorator + + def error(body): + def decorator(func): + @wraps(func) + def wrapper(self): + from chameleon.exc import TemplateError + try: + template = self.from_string(body) + except TemplateError: + exc = sys.exc_info()[1] + return func(self, body, exc) + else: + self.fail("Expected exception.") + + return wrapper + return decorator + + def test_syntax_error_in_strict_mode(self): + from chameleon.exc import ExpressionError + + self.assertRaises( + ExpressionError, + self.from_string, + """""", + strict=True + ) + + def test_syntax_error_in_non_strict_mode(self): + from chameleon.exc import ExpressionError + + body = """""" + template = self.from_string(body, strict=False) + + try: + template() + except ExpressionError: + exc = sys.exc_info()[1] + self.assertTrue(body[exc.offset:].startswith('bad ///')) + else: + self.fail("Expected exception") + + @error("""""") + def test_attributes_on_tal_tag_fails(self, body, exc): + self.assertTrue(body[exc.offset:].startswith('dummy')) + + @error("""""") + def test_i18n_attributes_with_non_identifiers(self, body, exc): + self.assertTrue(body[exc.offset:].startswith('foo,')) + + @error("""""") + def test_repeat_syntax_error_message(self, body, exc): + self.assertTrue(body[exc.offset:].startswith('key,value')) + + @error('''

          + +

          ''') + def test_repeat_i18n_name_error(self, body, exc): + self.assertTrue(body[exc.offset:].startswith('repeat'), body[exc.offset:]) + + @error(''' + + ''') + def test_i18n_name_not_in_translation_error(self, body, exc): + self.assertTrue(body[exc.offset:].startswith('not_in_translation')) + + def test_encoded(self): + filename = '074-encoded-template.pt' + with open(os.path.join(self.root, 'inputs', filename), 'rb') as f: + body = f.read() + + self.from_string(body) + + def test_utf8_encoded(self): + filename = '073-utf8-encoded.pt' + with open(os.path.join(self.root, 'inputs', filename), 'rb') as f: + body = f.read() + + self.from_string(body) + + def test_unicode_decode_error(self): + template = self.from_file( + os.path.join(self.root, 'inputs', 'greeting.pt') + ) + + string = native = "the artist formerly known as ????????????" + try: + string = string.decode('utf-8') + except AttributeError: + pass + + class name: + @staticmethod + def __html__(): + # This raises a decoding exception + string.encode('utf-8').decode('ascii') + + self.fail("Expected exception raised.") + + try: + template(name=name) + except UnicodeDecodeError: + exc = sys.exc_info()[1] + formatted = str(exc) + + # There's a marker under the expression that has the + # unicode decode error + self.assertTrue('^^^^^' in formatted) + self.assertTrue(native in formatted) + else: + self.fail("expected error") + + def test_custom_encoding_for_str_or_bytes_in_content(self): + string = '
          ????????${text}
          ' + try: + string = string.decode('utf-8') + except AttributeError: + pass + + template = self.from_string(string, encoding="windows-1251") + + text = '????????' + + try: + text = text.decode('utf-8') + except AttributeError: + pass + + rendered = template(text=text.encode('windows-1251')) + + self.assertEqual( + rendered, + string.replace('${text}', text) + ) + + def test_custom_encoding_for_str_or_bytes_in_attributes(self): + string = '' + try: + string = string.decode('utf-8') + except AttributeError: + pass + + template = self.from_string(string, encoding="windows-1251") + + text = '????????' + + try: + text = text.decode('utf-8') + except AttributeError: + pass + + rendered = template(text=text.encode('windows-1251')) + + self.assertEqual( + rendered, + string.replace('${text}', text) + ) + + def test_null_translate_function(self): + template = self.from_string('${test}', translate=None) + rendered = template(test=object()) + self.assertTrue('object' in rendered) + + def test_object_substitution_coerce_to_str(self): + template = self.from_string('${test}', translate=None) + + class dummy(object): + def __repr__(inst): + self.fail("call not expected") + + def __str__(inst): + return '' + + rendered = template(test=dummy()) + self.assertEqual(rendered, '<dummy>') + + def test_repr(self): + template = self.from_file( + os.path.join(self.root, 'inputs', 'hello_world.pt') + ) + self.assertTrue(template.filename in repr(template)) + + def test_underscore_variable(self): + template = self.from_string( + "
          ${_dummy}
          " + ) + self.assertTrue(template(), "
          foo
          ") + + def test_trim_attribute_space(self): + document = '''
          ''' + + result1 = self.from_string( + document)() + + result2 = self.from_string( + document, trim_attribute_space=True)() + + self.assertEqual(result1.count(" "), 49) + self.assertEqual(result2.count(" "), 4) + self.assertTrue(" />" in result1) + self.assertTrue(" />" in result2) + + def test_exception(self): + from traceback import format_exception_only + + template = self.from_string( + "
          ${dummy}
          " + ) + try: + template() + except Exception as exc: + self.assertIn(RenderError, type(exc).__bases__) + exc = sys.exc_info()[1] + formatted = str(exc) + self.assertFalse('NameError:' in formatted) + self.assertTrue('foo' in formatted) + self.assertTrue('(line 1: col 23)' in formatted) + + formatted_exc = "\n".join(format_exception_only(type(exc), exc)) + self.assertTrue('NameError: foo' in formatted_exc) + else: + self.fail("expected error") + + def test_create_formatted_exception(self): + from chameleon.utils import create_formatted_exception + + exc = create_formatted_exception(NameError('foo'), NameError, str) + self.assertEqual(exc.args, ('foo', )) + + class MyNameError(NameError): + def __init__(self, boo): + NameError.__init__(self, boo) + self.bar = boo + + exc = create_formatted_exception(MyNameError('foo'), MyNameError, str) + self.assertEqual(exc.args, ('foo', )) + self.assertEqual(exc.bar, 'foo') + + def test_create_formatted_exception_no_subclass(self): + from chameleon.utils import create_formatted_exception + + class DifficultMetaClass(type): + def __init__(self, class_name, bases, namespace): + if not bases == (BaseException, ): + raise TypeError(bases) + + Difficult = DifficultMetaClass('Difficult', (BaseException, ), {'args': ()}) + + exc = create_formatted_exception(Difficult(), Difficult, str) + self.assertEqual(exc.args, ()) + + def test_error_handler_makes_safe_copy(self): + calls = [] + + class TestException(Exception): + def __init__(self, *args, **kwargs): + calls.append((args, kwargs)) + + def _render(stream, econtext, rcontext): + exc = TestException('foo', bar='baz') + rcontext['__error__'] = ('expression', 1, 42, 'test.pt', exc), + raise exc + + template = self.from_string("") + template._render = _render + try: + template() + except TestException: + self.assertEqual(calls, [(('foo', ), {'bar': 'baz'})]) + exc = sys.exc_info()[1] + formatted = str(exc) + self.assertTrue('TestException' in formatted) + self.assertTrue('"expression"' in formatted) + self.assertTrue('(line 1: col 42)' in formatted) + else: + self.fail("unexpected error") + + def test_double_underscore_variable(self): + from chameleon.exc import TranslationError + self.assertRaises( + TranslationError, self.from_string, + "
          ${__dummy}
          ", + ) + + def test_compiler_internals_are_disallowed(self): + from chameleon.compiler import COMPILER_INTERNALS_OR_DISALLOWED + from chameleon.exc import TranslationError + + for name in COMPILER_INTERNALS_OR_DISALLOWED: + body = "${%s}" % (name, name) + self.assertRaises(TranslationError, self.from_string, body) + + def test_simple_translate_mapping(self): + template = self.from_string( + '
          ' + 'foo' + '
          ') + + self.assertEqual(template(), '
          foo
          ') + + def test_translate_is_not_an_internal(self): + macro = self.from_string('bar') + template = self.from_string( + ''' + + foo + + + ''') + + result = template(macro=macro) + self.assertTrue('foo' in result) + self.assertTrue('foo' in result) + + def test_literal_false(self): + template = self.from_string( + '' + '' + '' + '', + literal_false=True, + ) + + self.assertEqual( + template(), + '' + '' + '' + '', + template.source + ) + + def test_boolean_attributes(self): + template = self.from_string( + '' + '' + '' + '' + '' + '', + boolean_attributes=set(['checked']) + ) + + self.assertEqual( + template(), + '' + '' + '' + '' + '' + '', + template.source + ) + + def test_default_debug_flag(self): + from chameleon.config import DEBUG_MODE + template = self.from_file( + os.path.join(self.root, 'inputs', 'hello_world.pt'), + ) + self.assertEqual(template.debug, DEBUG_MODE) + self.assertTrue('debug' not in template.__dict__) + + def test_debug_flag_on_string(self): + from chameleon.loader import ModuleLoader + + with open(os.path.join(self.root, 'inputs', 'hello_world.pt')) as f: + source = f.read() + + template = self.from_string(source, debug=True) + + self.assertTrue(template.debug) + self.assertTrue(isinstance(template.loader, ModuleLoader)) + + def test_debug_flag_on_file(self): + from chameleon.loader import ModuleLoader + template = self.from_file( + os.path.join(self.root, 'inputs', 'hello_world.pt'), + debug=True, + ) + self.assertTrue(template.debug) + self.assertTrue(isinstance(template.loader, ModuleLoader)) + + def test_tag_mismatch(self): + from chameleon.exc import ParseError + + try: + self.from_string(""" +
          +
          +
          + """) + except ParseError: + exc = sys.exc_info()[1] + self.assertTrue("" in str(exc)) + else: + self.fail("Expected error.") + + +class ZopeTemplatesTestSuite(RenderTestCase): + def setUp(self): + self.temp_path = temp_path = tempfile.mkdtemp() + + @self.addCleanup + def cleanup(path=temp_path): + shutil.rmtree(path) + + def test_pt_files(self): + from ..zpt.template import PageTemplateFile + + class Literal(object): + def __init__(self, s): + self.s = s + + def __html__(self): + return self.s + + def __str__(self): + raise RuntimeError( + "%r is a literal." % self.s) + + from chameleon.loader import TemplateLoader + loader = TemplateLoader(os.path.join(self.root, "inputs")) + + self.execute( + ".pt", PageTemplateFile, + literal=Literal("
          Hello world!
          "), + content="
          Hello world!
          ", + message=Message(), + load=loader.bind(PageTemplateFile), + ) + + def test_txt_files(self): + from ..zpt.template import PageTextTemplateFile + self.execute(".txt", PageTextTemplateFile) + + def execute(self, ext, factory, **kwargs): + def translate(msgid, domain=None, mapping=None, context=None, + target_language=None, default=None): + if default is None: + default = str(msgid) + + if isinstance(msgid, Message): + default = "Message" + + if mapping: + default = re.sub(r'\${([a-z_]+)}', r'%(\1)s', default) % \ + mapping + + if target_language is None: + return default + + if domain is None: + with_domain = "" + else: + with_domain = " with domain '%s'" % domain + + if context is None: + with_context = "" + else: + with_context = ", context '%s'" % context + + stripped = default.rstrip('\n ') + return "%s ('%s' translation into '%s'%s%s)%s" % ( + stripped, msgid, target_language, with_domain, with_context, + default[len(stripped):] + ) + + for input_path, output_path, language in self.find_files(ext): + # Make friendly title so we can locate the generated + # source when debugging + self.shortDescription = lambda: input_path + + # When input path contaiins the string 'implicit-i18n', we + # enable "implicit translation". + implicit_i18n = 'implicit-i18n' in input_path + implicit_i18n_attrs = ("alt", "title") if implicit_i18n else () + + template = factory( + input_path, + keep_source=True, + strict=False, + implicit_i18n_translate=implicit_i18n, + implicit_i18n_attributes=implicit_i18n_attrs, + ) + + params = kwargs.copy() + params.update({ + 'translate': translate, + 'target_language': language, + }) + + template.cook_check() + + try: + got = template.render(**params) + except: + import traceback + e = traceback.format_exc() + self.fail("%s\n\n Example source:\n\n%s" % (e, "\n".join( + ["%#03.d%s" % (lineno + 1, line and " " + line or "") + for (lineno, line) in + enumerate(template.source.split( + '\n'))]))) + + if isinstance(got, byte_string): + got = got.decode('utf-8') + + from doctest import OutputChecker + checker = OutputChecker() + + if not os.path.exists(output_path): + output = template.body + else: + with open(output_path, 'rb') as f: + output = f.read() + + from chameleon.utils import read_xml_encoding + from chameleon.utils import detect_encoding + + if template.content_type == 'text/xml': + encoding = read_xml_encoding(output) or \ + template.default_encoding + else: + content_type, encoding = detect_encoding( + output, template.default_encoding) + + want = output.decode(encoding) + + if checker.check_output(want, got, 0) is False: + from doctest import Example + example = Example(input_path, want) + diff = checker.output_difference( + example, got, 0) + self.fail("(%s) - \n%s\n\nCode:\n%s" % ( + input_path, diff.rstrip('\n'), + template.source.encode('utf-8'))) diff --git a/lib/Chameleon-2.22/src/chameleon/tests/test_tokenizer.py b/lib/Chameleon-2.22/src/chameleon/tests/test_tokenizer.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tests/test_tokenizer.py @@ -0,0 +1,47 @@ +import sys + +from unittest import TestCase + + +class TokenizerTest(TestCase): + def test_sample_files(self): + import os + import traceback + path = os.path.join(os.path.dirname(__file__), "inputs") + for filename in os.listdir(path): + if not filename.endswith('.xml'): + continue + f = open(os.path.join(path, filename), 'rb') + source = f.read() + f.close() + + from ..utils import read_encoded + try: + want = read_encoded(source) + except UnicodeDecodeError: + exc = sys.exc_info()[1] + self.fail("%s - %s" % (exc, filename)) + + from ..tokenize import iter_xml + try: + tokens = iter_xml(want) + got = "".join(tokens) + except: + self.fail(traceback.format_exc()) + + from doctest import OutputChecker + checker = OutputChecker() + + if checker.check_output(want, got, 0) is False: + from doctest import Example + example = Example(f.name, want) + diff = checker.output_difference( + example, got, 0) + self.fail("(%s) - \n%s" % (f.name, diff)) + + def test_token(self): + from chameleon.tokenize import Token + token = Token("abc", 1) + + self.assertTrue(isinstance(token[1:], Token)) + self.assertEqual(token[1:].pos, 2) diff --git a/lib/Chameleon-2.22/src/chameleon/tokenize.py b/lib/Chameleon-2.22/src/chameleon/tokenize.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/tokenize.py @@ -0,0 +1,144 @@ +# http://code.activestate.com/recipes/65125-xml-lexing-shallow-parsing/ +# by Paul Prescod +# licensed under the PSF License +# +# modified to capture all non-overlapping parts of tokens + +import re + +try: + str = unicode +except NameError: + pass + +class recollector: + def __init__(self): + self.res = {} + + def add(self, name, reg ): + re.compile(reg) # check that it is valid + self.res[name] = reg % self.res + +collector = recollector() +a = collector.add + +a("TextSE", "[^<]+") +a("UntilHyphen", "[^-]*-") +a("Until2Hyphens", "%(UntilHyphen)s(?:[^-]%(UntilHyphen)s)*-") +a("CommentCE", "%(Until2Hyphens)s>?") +a("UntilRSBs", "[^\\]]*](?:[^\\]]+])*]+") +a("CDATA_CE", "%(UntilRSBs)s(?:[^\\]>]%(UntilRSBs)s)*>" ) +a("S", "[ \\n\\t\\r]+") +a("Simple", "[^\"'>/]+") +a("NameStrt", "[A-Za-z_:]|[^\\x00-\\x7F]") +a("NameChar", "[A-Za-z0-9_:.-]|[^\\x00-\\x7F]") +a("Name", "(?:%(NameStrt)s)(?:%(NameChar)s)*") +a("QuoteSE", "\"[^\"]*\"|'[^']*'") +a("DT_IdentSE" , "%(S)s%(Name)s(?:%(S)s(?:%(Name)s|%(QuoteSE)s))*" ) +a("MarkupDeclCE" , "(?:[^\\]\"'><]+|%(QuoteSE)s)*>" ) +a("S1", "[\\n\\r\\t ]") +a("UntilQMs", "[^?]*\\?+") +a("PI_Tail" , "\\?>|%(S1)s%(UntilQMs)s(?:[^>?]%(UntilQMs)s)*>" ) +a("DT_ItemSE", + "<(?:!(?:--%(Until2Hyphens)s>|[^-]%(MarkupDeclCE)s)|" + "\\?%(Name)s(?:%(PI_Tail)s))|%%%(Name)s;|%(S)s" +) +a("DocTypeCE" , +"%(DT_IdentSE)s(?:%(S)s)?(?:\\[(?:%(DT_ItemSE)s)*](?:%(S)s)?)?>?" ) +a("DeclCE", + "--(?:%(CommentCE)s)?|\\[CDATA\\[(?:%(CDATA_CE)s)?|" + "DOCTYPE(?:%(DocTypeCE)s)?") +a("PI_CE", "%(Name)s(?:%(PI_Tail)s)?") +a("EndTagCE", "%(Name)s(?:%(S)s)?>?") +a("AttValSE", "\"[^\"]*\"|'[^']*'") +a("ElemTagCE", + "(%(Name)s)(?:(%(S)s)(%(Name)s)(((?:%(S)s)?=(?:%(S)s)?)" + "(?:%(AttValSE)s|%(Simple)s)|(?!(?:%(S)s)?=)))*(?:%(S)s)?(/?>)?") +a("MarkupSPE", + "<(?:!(?:%(DeclCE)s)?|" + "\\?(?:%(PI_CE)s)?|/(?:%(EndTagCE)s)?|(?:%(ElemTagCE)s)?)") +a("XML_SPE", "%(TextSE)s|%(MarkupSPE)s") +a("XML_MARKUP_ONLY_SPE", "%(MarkupSPE)s") +a("ElemTagSPE", "<|%(Name)s") + +re_xml_spe = re.compile(collector.res['XML_SPE']) +re_markup_only_spe = re.compile(collector.res['XML_MARKUP_ONLY_SPE']) + + +def iter_xml(body, filename=None): + for match in re_xml_spe.finditer(body): + string = match.group() + pos = match.start() + yield Token(string, pos, body, filename) + + +def iter_text(body, filename=None): + yield Token(body, 0, body, filename) + + +class Token(str): + __slots__ = "pos", "source", "filename" + + def __new__(cls, string, pos=0, source=None, filename=None): + inst = str.__new__(cls, string) + inst.pos = pos + inst.source = source + inst.filename = filename or "" + return inst + + def __getslice__(self, i, j): + slice = str.__getslice__(self, i, j) + return Token(slice, self.pos + i, self.source, self.filename) + + def __getitem__(self, index): + s = str.__getitem__(self, index) + if isinstance(index, slice): + return Token( + s, self.pos + (index.start or 0), self.source, self.filename) + return s + + def __add__(self, other): + if other is None: + return self + + return Token( + str.__add__(self, other), self.pos, self.source, self.filename) + + def __eq__(self, other): + return str.__eq__(self, other) + + def __hash__(self): + return str.__hash__(self) + + def replace(self, *args): + s = str.replace(self, *args) + return Token(s, self.pos, self.source, self.filename) + + def split(self, *args): + l = str.split(self, *args) + pos = self.pos + for i, s in enumerate(l): + l[i] = Token(s, pos, self.source, self.filename) + pos += len(s) + return l + + def strip(self, *args): + return self.lstrip(*args).rstrip(*args) + + def lstrip(self, *args): + s = str.lstrip(self, *args) + return Token( + s, self.pos + len(self) - len(s), self.source, self.filename) + + def rstrip(self, *args): + s = str.rstrip(self, *args) + return Token(s, self.pos, self.source, self.filename) + + @property + def location(self): + if self.source is None: + return 0, self.pos + + body = self.source[:self.pos] + line = body.count('\n') + return line + 1, self.pos - body.rfind('\n', 0) - 1 diff --git a/lib/Chameleon-2.22/src/chameleon/utils.py b/lib/Chameleon-2.22/src/chameleon/utils.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/utils.py @@ -0,0 +1,435 @@ +import os +import re +import sys +import codecs +import logging + +from copy import copy + +version = sys.version_info[:3] + +try: + import ast as _ast +except ImportError: + from chameleon import ast25 as _ast + + +class ASTProxy(object): + aliases = { + # Python 3.3 + 'TryExcept': 'Try', + 'TryFinally': 'Try', + } + + def __getattr__(self, name): + return _ast.__dict__.get(name) or getattr(_ast, self.aliases[name]) + + +ast = ASTProxy() + +log = logging.getLogger('chameleon.utils') + +# Python 2 +if version < (3, 0, 0): + import htmlentitydefs + import __builtin__ as builtins + + from .py25 import raise_with_traceback + + chr = unichr + native_string = str + decode_string = unicode + encode_string = str + unicode_string = unicode + string_type = basestring + byte_string = str + + def safe_native(s, encoding='utf-8'): + if not isinstance(s, unicode): + s = decode_string(s, encoding, 'replace') + + return s.encode(encoding) + +# Python 3 +else: + from html import entities as htmlentitydefs + import builtins + + byte_string = bytes + string_type = str + native_string = str + decode_string = bytes.decode + encode_string = lambda s: bytes(s, 'utf-8') + unicode_string = str + + def safe_native(s, encoding='utf-8'): + if not isinstance(s, str): + s = decode_string(s, encoding, 'replace') + + return s + + def raise_with_traceback(exc, tb): + exc.__traceback__ = tb + raise exc + +def text_(s, encoding='latin-1', errors='strict'): + """ If ``s`` is an instance of ``byte_string``, return + ``s.decode(encoding, errors)``, otherwise return ``s``""" + if isinstance(s, byte_string): + return s.decode(encoding, errors) + return s + +entity_re = re.compile(r'&(#?)(x?)(\d{1,5}|\w{1,8});') + +module_cache = {} + +xml_prefixes = ( + (codecs.BOM_UTF8, 'utf-8-sig'), + (codecs.BOM_UTF16_BE, 'utf-16-be'), + (codecs.BOM_UTF16_LE, 'utf-16-le'), + (codecs.BOM_UTF16, 'utf-16'), + (codecs.BOM_UTF32_BE, 'utf-32-be'), + (codecs.BOM_UTF32_LE, 'utf-32-le'), + (codecs.BOM_UTF32, 'utf-32'), + ) + + +def _has_encoding(encoding): + try: + "".encode(encoding) + except LookupError: + return False + else: + return True + + +# Precomputed prefix table +_xml_prefixes = tuple( + (bom, str('\s*', + re.IGNORECASE + ) + +RE_ENCODING = re.compile( + r'encoding\s*=\s*(?:"|\')(?P[\w\-]+)(?:"|\')'.encode('ascii'), + re.IGNORECASE + ) + + +def read_encoded(data): + return read_bytes(data, "utf-8")[0] + + +def read_bytes(body, default_encoding): + for bom, prefix, encoding in _xml_prefixes: + if body.startswith(bom): + document = body.decode(encoding) + return document, encoding, \ + "text/xml" if document.startswith(">> mangle('hello_world.pt') + 'hello_world' + + >>> mangle('foo.bar.baz.pt') + 'foo_bar_baz' + + >>> mangle('foo-bar-baz.pt') + 'foo_bar_baz' + + """ + + base, ext = os.path.splitext(filename) + return base.replace('.', '_').replace('-', '_') + + +def char2entity(c): + cp = ord(c) + name = htmlentitydefs.codepoint2name.get(cp) + return '&%s;' % name if name is not None else '&#%d;' % cp + + +def substitute_entity(match, n2cp=htmlentitydefs.name2codepoint): + ent = match.group(3) + + if match.group(1) == "#": + if match.group(2) == '': + return chr(int(ent)) + elif match.group(2) == 'x': + return chr(int('0x' + ent, 16)) + else: + cp = n2cp.get(ent) + + if cp: + return chr(cp) + else: + return match.group() + + +def create_formatted_exception(exc, cls, formatter, base=Exception): + try: + try: + new = type(cls.__name__, (cls, base), { + '__str__': formatter, + '__new__': BaseException.__new__, + '__module__': cls.__module__, + }) + except TypeError: + new = cls + + try: + inst = BaseException.__new__(new) + except TypeError: + inst = cls.__new__(new) + + BaseException.__init__(inst, *exc.args) + inst.__dict__ = exc.__dict__ + + return inst + except ValueError: + name = type(exc).__name__ + log.warn("Unable to copy exception of type '%s'." % name) + raise TypeError(exc) + + +def unescape(string): + for name in ('lt', 'gt', 'quot'): + cp = htmlentitydefs.name2codepoint[name] + string = string.replace('&%s;' % name, chr(cp)) + + return string + + +_concat = unicode_string("").join + + +def join(stream): + """Concatenate stream. + + >>> print(join(('Hello', ' ', 'world'))) + Hello world + + >>> join(('Hello', 0)) + Traceback (most recent call last): + ... + TypeError: ... expected ... + + """ + + try: + return _concat(stream) + except: + # Loop through stream and coerce each element into unicode; + # this should raise an exception + for element in stream: + unicode_string(element) + + # In case it didn't, re-raise the original exception + raise + + +def decode_htmlentities(string): + """ + >>> native_string(decode_htmlentities('&amp;')) + '&' + + """ + + decoded = entity_re.subn(substitute_entity, string)[0] + + # preserve input token data + return string.replace(string, decoded) + + +# Taken from zope.dottedname +def _resolve_dotted(name, module=None): + name = name.split('.') + if not name[0]: + if module is None: + raise ValueError("relative name without base module") + module = module.split('.') + name.pop(0) + while not name[0]: + module.pop() + name.pop(0) + name = module + name + + used = name.pop(0) + found = __import__(used) + for n in name: + used += '.' + n + try: + found = getattr(found, n) + except AttributeError: + __import__(used) + found = getattr(found, n) + + return found + + +def resolve_dotted(dotted): + if not dotted in module_cache: + resolved = _resolve_dotted(dotted) + module_cache[dotted] = resolved + return module_cache[dotted] + + +def limit_string(s, max_length=53): + if len(s) > max_length: + return s[:max_length - 3] + '...' + + return s + + +def format_kwargs(kwargs): + items = [] + for name, value in kwargs.items(): + if isinstance(value, string_type): + short = limit_string(value) + items.append((name, short.replace('\n', '\\n'))) + elif isinstance(value, (int, float)): + items.append((name, value)) + elif isinstance(value, dict): + items.append((name, '{...} (%d)' % len(value))) + else: + items.append((name, + "<%s %s at %s>" % ( + type(value).__name__, + getattr(value, '__name__', "-"), + hex(abs(id(value)))))) + + return ["%s: %s" % item for item in items] + + +class callablestr(str): + __slots__ = () + + def __call__(self): + return self + + +class callableint(int): + __slots__ = () + + def __call__(self): + return self + + +class descriptorstr(object): + __slots__ = "function", "__name__" + + def __init__(self, function): + self.function = function + self.__name__ = function.__name__ + + def __get__(self, context, cls): + return callablestr(self.function(context)) + + +class descriptorint(object): + __slots__ = "function", "__name__" + + def __init__(self, function): + self.function = function + self.__name__ = function.__name__ + + def __get__(self, context, cls): + return callableint(self.function(context)) + + +class DebuggingOutputStream(list): + def append(self, value): + if not isinstance(value, string_type): + raise TypeError(value) + + unicode_string(value) + list.append(self, value) + + +class Scope(dict): + set_local = setLocal = dict.__setitem__ + + __slots__ = "set_global", + + def __new__(cls, *args): + inst = dict.__new__(cls, *args) + inst.set_global = inst.__setitem__ + return inst + + def __getitem__(self, key): + try: + return dict.__getitem__(self, key) + except KeyError: + raise NameError(key) + + @property + def vars(self): + return self + + def copy(self): + inst = Scope(self) + inst.set_global = self.set_global + return inst + + +class ListDictProxy(object): + def __init__(self, l): + self._l = l + + def get(self, key): + return self._l[-1].get(key) + + +class Markup(unicode_string): + """Wraps a string to always render as structure. + + >>> Markup('
          ') + s'
          ' + """ + + def __html__(self): + return unicode_string(self) + + def __repr__(self): + return "s'%s'" % self diff --git a/lib/Chameleon-2.22/src/chameleon/zpt/__init__.py b/lib/Chameleon-2.22/src/chameleon/zpt/__init__.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/zpt/__init__.py @@ -0,0 +1,1 @@ +# diff --git a/lib/Chameleon-2.22/src/chameleon/zpt/loader.py b/lib/Chameleon-2.22/src/chameleon/zpt/loader.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/zpt/loader.py @@ -0,0 +1,30 @@ +from chameleon.loader import TemplateLoader as BaseLoader +from chameleon.zpt import template + + +class TemplateLoader(BaseLoader): + formats = { + "xml": template.PageTemplateFile, + "text": template.PageTextTemplateFile, + } + + default_format = "xml" + + def __init__(self, *args, **kwargs): + formats = kwargs.pop('formats', None) + if formats is not None: + self.formats = formats + + super(TemplateLoader, self).__init__(*args, **kwargs) + + def load(self, filename, format=None): + """Load and return a template file. + + The format parameter determines will parse the file. Valid + options are `xml` and `text`. + """ + + cls = self.formats[format or self.default_format] + return super(TemplateLoader, self).load(filename, cls) + + __getitem__ = load diff --git a/lib/Chameleon-2.22/src/chameleon/zpt/program.py b/lib/Chameleon-2.22/src/chameleon/zpt/program.py new file mode 100644 --- /dev/null +++ b/lib/Chameleon-2.22/src/chameleon/zpt/program.py @@ -0,0 +1,798 @@ +import re + +try: + import ast +except ImportError: + from chameleon import ast25 as ast + +try: + str = unicode +except NameError: + long = int + +from functools import partial +from copy import copy + +from ..program import ElementProgram + +from ..namespaces import XML_NS +from ..namespaces import XMLNS_NS +from ..namespaces import I18N_NS as I18N +from ..namespaces import TAL_NS as TAL +from ..namespaces import METAL_NS as METAL +from ..namespaces import META_NS as META + +from ..astutil import Static +from ..astutil import parse +from ..astutil import marker + +from .. import tal +from .. import metal +from .. import i18n +from .. import nodes + +from ..exc import LanguageError +from ..exc import ParseError +from ..exc import CompilationError + +from ..utils import decode_htmlentities + +try: + str = unicode +except NameError: + long = int + + +missing = object() + +re_trim = re.compile(r'($\s+|\s+^)', re.MULTILINE) + +EMPTY_DICT = Static(ast.Dict(keys=[], values=[])) + + +def skip(node): + return node + + +def wrap(node, *wrappers): + for wrapper in reversed(wrappers): + node = wrapper(node) + return node + + +def validate_attributes(attributes, namespace, whitelist): + for ns, name in attributes: + if ns == namespace and name not in whitelist: + raise CompilationError( + "Bad attribute for namespace '%s'" % ns, name + ) + + +class MacroProgram(ElementProgram): + """Visitor class that generates a program for the ZPT language.""" + + DEFAULT_NAMESPACES = { + 'xmlns': XMLNS_NS, + 'xml': XML_NS, + 'tal': TAL, + 'metal': METAL, + 'i18n': I18N, + 'meta': META, + } + + DROP_NS = TAL, METAL, I18N, META + + VARIABLE_BLACKLIST = "default", "repeat", "nothing", \ + "convert", "decode", "translate" + + _interpolation_enabled = True + _whitespace = "\n" + _last = "" + + # Macro name (always trivial for a macro program) + name = None + + # This default marker value has the semantics that if an + # expression evaluates to that value, the expression default value + # is returned. For an attribute, if there is no default, this + # means that the attribute is dropped. + default_marker = None + + # Escape mode (true value means XML-escape) + escape = True + + # Attributes which should have boolean behavior (on true, the + # value takes the attribute name, on false, the attribute is + # dropped) + boolean_attributes = set() + + # If provided, this should be a set of attributes for implicit + # translation. Any attribute whose name is included in the set + # will be translated even without explicit markup. Note that all + # values should be lowercase strings. + implicit_i18n_attributes = set() + + # If set, text will be translated even without explicit markup. + implicit_i18n_translate = False + + # If set, additional attribute whitespace will be stripped. + trim_attribute_space = False + + def __init__(self, *args, **kwargs): + # Internal array for switch statements + self._switches = [] + + # Internal array for current use macro level + self._use_macro = [] + + # Internal array for current interpolation status + self._interpolation = [True] + + # Internal dictionary of macro definitions + self._macros = {} + + # Apply default values from **kwargs to self + self._pop_defaults( + kwargs, + 'boolean_attributes', + 'default_marker', + 'escape', + 'implicit_i18n_translate', + 'implicit_i18n_attributes', + 'trim_attribute_space', + ) + + super(MacroProgram, self).__init__(*args, **kwargs) + + @property + def macros(self): + macros = list(self._macros.items()) + macros.append((None, nodes.Sequence(self.body))) + + return tuple( + nodes.Macro(name, [nodes.Context(node)]) + for name, node in macros + ) + + def visit_default(self, node): + return nodes.Text(node) + + def visit_element(self, start, end, children): + ns = start['ns_attrs'] + + for (prefix, attr), encoded in tuple(ns.items()): + if prefix == TAL: + ns[prefix, attr] = decode_htmlentities(encoded) + + # Validate namespace attributes + validate_attributes(ns, TAL, tal.WHITELIST) + validate_attributes(ns, METAL, metal.WHITELIST) + validate_attributes(ns, I18N, i18n.WHITELIST) + + # Check attributes for language errors + self._check_attributes(start['namespace'], ns) + + # Remember whitespace for item repetition + if self._last is not None: + self._whitespace = "\n" + " " * len(self._last.rsplit('\n', 1)[-1]) + + # Set element-local whitespace + whitespace = self._whitespace + + # Set up switch + try: + clause = ns[TAL, 'switch'] + except KeyError: + switch = None + else: + value = nodes.Value(clause) + switch = value, nodes.Copy(value) + + self._switches.append(switch) + + body = [] + + # Include macro + use_macro = ns.get((METAL, 'use-macro')) + extend_macro = ns.get((METAL, 'extend-macro')) + if use_macro or extend_macro: + omit = True + slots = [] + self._use_macro.append(slots) + + if use_macro: + inner = nodes.UseExternalMacro( + nodes.Value(use_macro), slots, False + ) + else: + inner = nodes.UseExternalMacro( + nodes.Value(extend_macro), slots, True + ) + # -or- include tag + else: + content = nodes.Sequence(body) + + # tal:content + try: + clause = ns[TAL, 'content'] + except KeyError: + pass + else: + key, value = tal.parse_substitution(clause) + xlate = True if ns.get((I18N, 'translate')) == '' else False + content = self._make_content_node(value, content, key, xlate) + + if end is None: + # Make sure start-tag has opening suffix. + start['suffix'] = ">" + + # Explicitly set end-tag. + end = { + 'prefix': '' + } + + # i18n:translate + try: + clause = ns[I18N, 'translate'] + except KeyError: + pass + else: + dynamic = ns.get((TAL, 'content')) or ns.get((TAL, 'replace')) + + if not dynamic: + content = nodes.Translate(clause, content) + + # tal:attributes + try: + clause = ns[TAL, 'attributes'] + except KeyError: + TAL_ATTRIBUTES = [] + else: + TAL_ATTRIBUTES = tal.parse_attributes(clause) + + # i18n:attributes + try: + clause = ns[I18N, 'attributes'] + except KeyError: + I18N_ATTRIBUTES = {} + else: + I18N_ATTRIBUTES = i18n.parse_attributes(clause) + + # Prepare attributes from TAL language + prepared = tal.prepare_attributes( + start['attrs'], TAL_ATTRIBUTES, + I18N_ATTRIBUTES, ns, self.DROP_NS + ) + + # Create attribute nodes + STATIC_ATTRIBUTES = self._create_static_attributes(prepared) + ATTRIBUTES = self._create_attributes_nodes( + prepared, I18N_ATTRIBUTES, STATIC_ATTRIBUTES + ) + + # Start- and end nodes + start_tag = nodes.Start( + start['name'], + self._maybe_trim(start['prefix']), + self._maybe_trim(start['suffix']), + ATTRIBUTES + ) + + end_tag = nodes.End( + end['name'], + end['space'], + self._maybe_trim(end['prefix']), + self._maybe_trim(end['suffix']), + ) if end is not None else None + + # tal:omit-tag + try: + clause = ns[TAL, 'omit-tag'] + except KeyError: + omit = False + else: + clause = clause.strip() + + if clause == "": + omit = True + else: + expression = nodes.Negate(nodes.Value(clause)) + omit = expression + + # Wrap start- and end-tags in condition + start_tag = nodes.Condition(expression, start_tag) + + if end_tag is not None: + end_tag = nodes.Condition(expression, end_tag) + + if omit is True or start['namespace'] in self.DROP_NS: + inner = content + else: + inner = nodes.Element( + start_tag, + end_tag, + content, + ) + + # Assign static attributes dictionary to "attrs" value + inner = nodes.Define( + [nodes.Alias(["attrs"], STATIC_ATTRIBUTES or EMPTY_DICT)], + inner, + ) + + if omit is not False: + inner = nodes.Cache([omit], inner) + + # tal:replace + try: + clause = ns[TAL, 'replace'] + except KeyError: + pass + else: + key, value = tal.parse_substitution(clause) + xlate = True if ns.get((I18N, 'translate')) == '' else False + inner = self._make_content_node(value, inner, key, xlate) + + # metal:define-slot + try: + clause = ns[METAL, 'define-slot'] + except KeyError: + DEFINE_SLOT = skip + else: + DEFINE_SLOT = partial(nodes.DefineSlot, clause) + + # tal:define + try: + clause = ns[TAL, 'define'] + except KeyError: + DEFINE = skip + else: + defines = tal.parse_defines(clause) + if defines is None: + raise ParseError("Invalid define syntax.", clause) + + DEFINE = partial( + nodes.Define, + [nodes.Assignment( + names, nodes.Value(expr), context == "local") + for (context, names, expr) in defines], + ) + + # tal:case + try: + clause = ns[TAL, 'case'] + except KeyError: + CASE = skip + else: + value = nodes.Value(clause) + for switch in reversed(self._switches): + if switch is not None: + break + else: + raise LanguageError( + "Must define switch on a parent element.", clause + ) + + CASE = lambda node: nodes.Define( + [nodes.Alias(["default"], switch[1], False)], + nodes.Condition( + nodes.Equality(switch[0], value), + nodes.Cancel([switch[0]], node), + )) + + # tal:repeat + try: + clause = ns[TAL, 'repeat'] + except KeyError: + REPEAT = skip + else: + defines = tal.parse_defines(clause) + assert len(defines) == 1 + context, names, expr = defines[0] + + expression = nodes.Value(expr) + + if start['namespace'] == TAL: + self._last = None + self._whitespace = whitespace.lstrip('\n') + whitespace = "" + + REPEAT = partial( + nodes.Repeat, + names, + expression, + context == "local", + whitespace + ) + + # tal:condition + try: + clause = ns[TAL, 'condition'] + except KeyError: + CONDITION = skip + else: + expression = nodes.Value(clause) + CONDITION = partial(nodes.Condition, expression) + + # tal:switch + if switch is None: + SWITCH = skip + else: + SWITCH = partial(nodes.Cache, list(switch)) + + # i18n:domain + try: + clause = ns[I18N, 'domain'] + except KeyError: + DOMAIN = skip + else: + DOMAIN = partial(nodes.Domain, clause) + + # i18n:context + try: + clause = ns[I18N, 'context'] + except KeyError: + CONTEXT = skip + else: + CONTEXT = partial(nodes.TxContext, clause) + + # i18n:name + try: + clause = ns[I18N, 'name'] + except KeyError: + NAME = skip + else: + if not clause.strip(): + NAME = skip + else: + NAME = partial(nodes.Name, clause) + + # The "slot" node next is the first node level that can serve + # as a macro slot + slot = wrap( + inner, + DEFINE_SLOT, + DEFINE, + CASE, + CONDITION, + REPEAT, + SWITCH, + DOMAIN, + CONTEXT, + ) + + # metal:fill-slot + try: + clause = ns[METAL, 'fill-slot'] + except KeyError: + pass + else: + if not clause.strip(): + raise LanguageError( + "Must provide a non-trivial string for metal:fill-slot.", + clause + ) + + index = -(1 + int(bool(use_macro or extend_macro))) + + try: + slots = self._use_macro[index] + except IndexError: + raise LanguageError( + "Cannot use metal:fill-slot without metal:use-macro.", + clause + ) + + slots = self._use_macro[index] + slots.append(nodes.FillSlot(clause, slot)) + + # metal:define-macro + try: + clause = ns[METAL, 'define-macro'] + except KeyError: + pass + else: + self._macros[clause] = slot + slot = nodes.UseInternalMacro(clause) + + slot = wrap( + slot, + NAME + ) + + # tal:on-error + try: + clause = ns[TAL, 'on-error'] + except KeyError: + ON_ERROR = skip + else: + key, value = tal.parse_substitution(clause) + translate = True if ns.get((I18N, 'translate')) == '' else False + + fallback = self._make_content_node(value, None, key, translate) + + if omit is False and start['namespace'] not in self.DROP_NS: + start_tag = copy(start_tag) + + start_tag.attributes = nodes.Sequence( + start_tag.attributes.extract( + lambda attribute: + isinstance(attribute, nodes.Attribute) and + isinstance(attribute.expression, ast.Str) + ) + ) + + if end_tag is None: + # Make sure start-tag has opening suffix. We don't + # allow self-closing element here. + start_tag.suffix = ">" + + # Explicitly set end-tag. + end_tag = nodes.End(start_tag.name, '', '',) + + fallback = nodes.Element( + start_tag, + end_tag, + fallback, + ) + + ON_ERROR = partial(nodes.OnError, fallback, 'error') + + clause = ns.get((META, 'interpolation')) + if clause in ('false', 'off'): + INTERPOLATION = False + elif clause in ('true', 'on'): + INTERPOLATION = True + elif clause is None: + INTERPOLATION = self._interpolation[-1] + else: + raise LanguageError("Bad interpolation setting.", clause) + + self._interpolation.append(INTERPOLATION) + + # Visit content body + for child in children: + body.append(self.visit(*child)) + + self._switches.pop() + self._interpolation.pop() + + if use_macro: + self._use_macro.pop() + + return wrap( + slot, + ON_ERROR + ) + + def visit_start_tag(self, start): + return self.visit_element(start, None, []) + + def visit_cdata(self, node): + if not self._interpolation[-1] or not '${' in node: + return nodes.Text(node) + + expr = nodes.Substitution(node, ()) + return nodes.Interpolation(expr, True, False) + + def visit_comment(self, node): + if node.startswith(' _d + + -test + + + -32 + $(BuildPath)python$(PyDebugExt).exe @@ -129,17 +135,17 @@ $([msbuild]::Multiply($(MicroVersionNumber), 1000)) )) )) + $([msbuild]::Add($(Field3Value), 9000)) python$(MajorVersionNumber)$(MinorVersionNumber)$(PyDebugExt) - .cp$(MajorVersionNumber)$(MinorVersionNumber)-win32 - .cp$(MajorVersionNumber)$(MinorVersionNumber)-win_amd64 + .cp$(MajorVersionNumber)$(MinorVersionNumber)-win32 + .cp$(MajorVersionNumber)$(MinorVersionNumber)-win_amd64 - $(MajorVersionNumber).$(MinorVersionNumber) - $(SysWinVer)-32 + $(MajorVersionNumber).$(MinorVersionNumber)$(PyArchExt)$(PyTestExt) @@ -148,5 +154,7 @@ + + diff --git a/Tools/msi/build.bat b/Tools/msi/build.bat --- a/Tools/msi/build.bat +++ b/Tools/msi/build.bat @@ -6,16 +6,18 @@ set BUILDX86= set BUILDX64= set BUILDDOC= -set BUILDPX= +set BUILDTEST=--test-marker set BUILDPACK= +set REBUILD= :CheckOpts if "%~1" EQU "-h" goto Help if "%~1" EQU "-x86" (set BUILDX86=1) && shift && goto CheckOpts if "%~1" EQU "-x64" (set BUILDX64=1) && shift && goto CheckOpts if "%~1" EQU "--doc" (set BUILDDOC=1) && shift && goto CheckOpts -if "%~1" EQU "--test-marker" (set BUILDPX=1) && shift && goto CheckOpts +if "%~1" EQU "--no-test-marker" (set BUILDTEST=) && shift && goto CheckOpts if "%~1" EQU "--pack" (set BUILDPACK=1) && shift && goto CheckOpts +if "%~1" EQU "-r" (set REBUILD=-r) && shift && goto CheckOpts if not defined BUILDX86 if not defined BUILDX64 (set BUILDX86=1) && (set BUILDX64=1) @@ -24,15 +26,15 @@ call "%PCBUILD%env.bat" x86 if defined BUILDX86 ( - call "%PCBUILD%build.bat" -d -e + call "%PCBUILD%build.bat" -d -e %REBUILD% %BUILDTEST% if errorlevel 1 goto :eof - call "%PCBUILD%build.bat" -e + call "%PCBUILD%build.bat" -e %REBUILD% %BUILDTEST% if errorlevel 1 goto :eof ) if defined BUILDX64 ( - call "%PCBUILD%build.bat" -p x64 -d -e + call "%PCBUILD%build.bat" -p x64 -d -e %REBUILD% %BUILDTEST% if errorlevel 1 goto :eof - call "%PCBUILD%build.bat" -p x64 -e + call "%PCBUILD%build.bat" -p x64 -e %REBUILD% %BUILDTEST% if errorlevel 1 goto :eof ) @@ -42,12 +44,15 @@ ) set BUILD_CMD="%D%bundle\snapshot.wixproj" -if defined BUILDPX ( +if defined BUILDTEST ( set BUILD_CMD=%BUILD_CMD% /p:UseTestMarker=true ) if defined BUILDPACK ( set BUILD_CMD=%BUILD_CMD% /p:Pack=true ) +if defined REBUILD ( + set BUILD_CMD=%BUILD_CMD% /t:Rebuild +) if defined BUILDX86 ( msbuild %BUILD_CMD% @@ -61,10 +66,11 @@ exit /B 0 :Help -echo build.bat [-x86] [-x64] [--doc] [-h] [--test-marker] [--pack] +echo build.bat [-x86] [-x64] [--doc] [-h] [--no-test-marker] [--pack] [-r] echo. echo -x86 Build x86 installers echo -x64 Build x64 installers echo --doc Build CHM documentation -echo --test-marker Build installers with 'x' markers +echo --no-test-marker Build without test markers echo --pack Embed core MSIs into installer +echo -r Rebuild rather than incremental build diff --git a/Tools/msi/bundle/Default.thm b/Tools/msi/bundle/Default.thm --- a/Tools/msi/bundle/Default.thm +++ b/Tools/msi/bundle/Default.thm @@ -64,7 +64,7 @@ #(loc.Include_testLabel) #(loc.Include_testHelpLabel) - #(loc.Include_launcherLabel) + #(loc.Include_launcherLabel) #(loc.InstallLauncherAllUsersLabel) #(loc.Include_launcherHelpLabel) diff --git a/Tools/msi/bundle/bundle.targets b/Tools/msi/bundle/bundle.targets --- a/Tools/msi/bundle/bundle.targets +++ b/Tools/msi/bundle/bundle.targets @@ -88,8 +88,12 @@ - - + diff --git a/Tools/msi/bundle/bundle.wxs b/Tools/msi/bundle/bundle.wxs --- a/Tools/msi/bundle/bundle.wxs +++ b/Tools/msi/bundle/bundle.wxs @@ -22,22 +22,26 @@ - - + + + + + + - + - + - - - + + + VersionNT > 600 - - + + VersionNT > 600 - - + + VersionNT > 600 - + VersionNT > 600 - + @@ -31,14 +31,14 @@ VersionNT = 600 - + VersionNT = 600 - + -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 31 16:08:06 2015 From: python-checkins at python.org (steve.dower) Date: Sat, 31 Oct 2015 20:08:06 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_from_3=2E5?= Message-ID: <20151031200806.100264.77039@psf.io> https://hg.python.org/cpython/rev/9beeb4c1375d changeset: 98919:9beeb4c1375d parent: 98916:cc75b40a27ba parent: 98918:df12c29882b1 user: Steve Dower date: Sat Oct 31 13:07:44 2015 -0700 summary: Merge from 3.5 files: Misc/NEWS | 2 + PCbuild/build.bat | 3 + PCbuild/pyproject.props | 4 +- PCbuild/python.props | 16 +++++- Tools/msi/build.bat | 24 ++++++--- Tools/msi/bundle/Default.thm | 2 +- Tools/msi/bundle/bundle.targets | 8 ++- Tools/msi/bundle/bundle.wxs | 27 ++++++++--- Tools/msi/common.wxs | 4 +- Tools/msi/doc/doc.wxs | 3 +- Tools/msi/exe/exe.wxs | 3 +- Tools/msi/launcher/launcher_files.wxs | 8 +- Tools/msi/launcher/launcher_reg.wxs | 34 +++++++------- Tools/msi/msi.props | 8 +-- Tools/msi/msi.targets | 2 +- Tools/msi/tcltk/tcltk.wxs | 6 +- Tools/msi/tcltk/tcltk_reg.wxs | 16 +++--- 17 files changed, 103 insertions(+), 67 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -519,6 +519,8 @@ Windows ------- +- Issue #25450: Updates shortcuts to start Python in installation directory. + - Issue #25164: Changes default all-users install directory to match per-user directory. diff --git a/PCbuild/build.bat b/PCbuild/build.bat --- a/PCbuild/build.bat +++ b/PCbuild/build.bat @@ -38,6 +38,7 @@ echo. Set the platform (default: Win32) echo. -t Build ^| Rebuild ^| Clean ^| CleanAll echo. Set the target manually +echo. --test-marker Enable the test marker within the build. exit /b 127 :Run @@ -62,6 +63,7 @@ if "%~1"=="-M" (set parallel=) & shift & goto CheckOpts if "%~1"=="-v" (set verbose=/v:n) & shift & goto CheckOpts if "%~1"=="-k" (set kill=true) & shift & goto CheckOpts +if "%~1"=="--test-marker" (set UseTestMarker=true) & shift & goto CheckOpts if "%~1"=="-V" shift & goto Version rem These use the actual property names used by MSBuild. We could just let rem them in through the environment, but we specify them on the command line @@ -93,6 +95,7 @@ /p:Configuration=%conf% /p:Platform=%platf%^ /p:IncludeExternals=%IncludeExternals%^ /p:IncludeSSL=%IncludeSSL% /p:IncludeTkinter=%IncludeTkinter%^ + /p:UseTestMarker=%UseTestMarker%^ %1 %2 %3 %4 %5 %6 %7 %8 %9 @goto :eof diff --git a/PCbuild/pyproject.props b/PCbuild/pyproject.props --- a/PCbuild/pyproject.props +++ b/PCbuild/pyproject.props @@ -128,8 +128,8 @@ - - + + _d + + -test + + + -32 + $(BuildPath)python$(PyDebugExt).exe @@ -129,17 +135,17 @@ $([msbuild]::Multiply($(MicroVersionNumber), 1000)) )) )) + $([msbuild]::Add($(Field3Value), 9000)) python$(MajorVersionNumber)$(MinorVersionNumber)$(PyDebugExt) - .cp$(MajorVersionNumber)$(MinorVersionNumber)-win32 - .cp$(MajorVersionNumber)$(MinorVersionNumber)-win_amd64 + .cp$(MajorVersionNumber)$(MinorVersionNumber)-win32 + .cp$(MajorVersionNumber)$(MinorVersionNumber)-win_amd64 - $(MajorVersionNumber).$(MinorVersionNumber) - $(SysWinVer)-32 + $(MajorVersionNumber).$(MinorVersionNumber)$(PyArchExt)$(PyTestExt) @@ -148,5 +154,7 @@ + + diff --git a/Tools/msi/build.bat b/Tools/msi/build.bat --- a/Tools/msi/build.bat +++ b/Tools/msi/build.bat @@ -6,16 +6,18 @@ set BUILDX86= set BUILDX64= set BUILDDOC= -set BUILDPX= +set BUILDTEST=--test-marker set BUILDPACK= +set REBUILD= :CheckOpts if "%~1" EQU "-h" goto Help if "%~1" EQU "-x86" (set BUILDX86=1) && shift && goto CheckOpts if "%~1" EQU "-x64" (set BUILDX64=1) && shift && goto CheckOpts if "%~1" EQU "--doc" (set BUILDDOC=1) && shift && goto CheckOpts -if "%~1" EQU "--test-marker" (set BUILDPX=1) && shift && goto CheckOpts +if "%~1" EQU "--no-test-marker" (set BUILDTEST=) && shift && goto CheckOpts if "%~1" EQU "--pack" (set BUILDPACK=1) && shift && goto CheckOpts +if "%~1" EQU "-r" (set REBUILD=-r) && shift && goto CheckOpts if not defined BUILDX86 if not defined BUILDX64 (set BUILDX86=1) && (set BUILDX64=1) @@ -24,15 +26,15 @@ call "%PCBUILD%env.bat" x86 if defined BUILDX86 ( - call "%PCBUILD%build.bat" -d -e + call "%PCBUILD%build.bat" -d -e %REBUILD% %BUILDTEST% if errorlevel 1 goto :eof - call "%PCBUILD%build.bat" -e + call "%PCBUILD%build.bat" -e %REBUILD% %BUILDTEST% if errorlevel 1 goto :eof ) if defined BUILDX64 ( - call "%PCBUILD%build.bat" -p x64 -d -e + call "%PCBUILD%build.bat" -p x64 -d -e %REBUILD% %BUILDTEST% if errorlevel 1 goto :eof - call "%PCBUILD%build.bat" -p x64 -e + call "%PCBUILD%build.bat" -p x64 -e %REBUILD% %BUILDTEST% if errorlevel 1 goto :eof ) @@ -42,12 +44,15 @@ ) set BUILD_CMD="%D%bundle\snapshot.wixproj" -if defined BUILDPX ( +if defined BUILDTEST ( set BUILD_CMD=%BUILD_CMD% /p:UseTestMarker=true ) if defined BUILDPACK ( set BUILD_CMD=%BUILD_CMD% /p:Pack=true ) +if defined REBUILD ( + set BUILD_CMD=%BUILD_CMD% /t:Rebuild +) if defined BUILDX86 ( msbuild %BUILD_CMD% @@ -61,10 +66,11 @@ exit /B 0 :Help -echo build.bat [-x86] [-x64] [--doc] [-h] [--test-marker] [--pack] +echo build.bat [-x86] [-x64] [--doc] [-h] [--no-test-marker] [--pack] [-r] echo. echo -x86 Build x86 installers echo -x64 Build x64 installers echo --doc Build CHM documentation -echo --test-marker Build installers with 'x' markers +echo --no-test-marker Build without test markers echo --pack Embed core MSIs into installer +echo -r Rebuild rather than incremental build diff --git a/Tools/msi/bundle/Default.thm b/Tools/msi/bundle/Default.thm --- a/Tools/msi/bundle/Default.thm +++ b/Tools/msi/bundle/Default.thm @@ -64,7 +64,7 @@ #(loc.Include_testLabel) #(loc.Include_testHelpLabel) - #(loc.Include_launcherLabel) + #(loc.Include_launcherLabel) #(loc.InstallLauncherAllUsersLabel) #(loc.Include_launcherHelpLabel) diff --git a/Tools/msi/bundle/bundle.targets b/Tools/msi/bundle/bundle.targets --- a/Tools/msi/bundle/bundle.targets +++ b/Tools/msi/bundle/bundle.targets @@ -88,8 +88,12 @@ - - + diff --git a/Tools/msi/bundle/bundle.wxs b/Tools/msi/bundle/bundle.wxs --- a/Tools/msi/bundle/bundle.wxs +++ b/Tools/msi/bundle/bundle.wxs @@ -22,22 +22,26 @@ - - + + + + + + - + - + - - - + + + VersionNT > 600 - - + + VersionNT > 600 - - + + VersionNT > 600 - + VersionNT > 600 - + @@ -31,14 +31,14 @@ VersionNT = 600 - + VersionNT = 600 - + -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 31 17:55:25 2015 From: python-checkins at python.org (berker.peksag) Date: Sat, 31 Oct 2015 21:55:25 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E5=29=3A_Fix_typo_notic?= =?utf-8?q?ed_by_Alec_Nunn?= Message-ID: <20151031215525.24889.70629@psf.io> https://hg.python.org/cpython/rev/f884590ee620 changeset: 98920:f884590ee620 branch: 3.5 parent: 98918:df12c29882b1 user: Berker Peksag date: Sun Nov 01 00:55:12 2015 +0300 summary: Fix typo noticed by Alec Nunn files: Doc/library/readline.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/library/readline.rst b/Doc/library/readline.rst --- a/Doc/library/readline.rst +++ b/Doc/library/readline.rst @@ -222,7 +222,7 @@ import atexit import os - import realine + import readline histfile = os.path.join(os.path.expanduser("~"), ".python_history") try: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Oct 31 17:55:25 2015 From: python-checkins at python.org (berker.peksag) Date: Sat, 31 Oct 2015 21:55:25 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Fix_typo_noticed_by_Alec_Nunn?= Message-ID: <20151031215525.24895.47743@psf.io> https://hg.python.org/cpython/rev/f7db966c9fee changeset: 98921:f7db966c9fee parent: 98919:9beeb4c1375d parent: 98920:f884590ee620 user: Berker Peksag date: Sun Nov 01 00:55:31 2015 +0300 summary: Fix typo noticed by Alec Nunn files: Doc/library/readline.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/library/readline.rst b/Doc/library/readline.rst --- a/Doc/library/readline.rst +++ b/Doc/library/readline.rst @@ -222,7 +222,7 @@ import atexit import os - import realine + import readline histfile = os.path.join(os.path.expanduser("~"), ".python_history") try: -- Repository URL: https://hg.python.org/cpython