[Python-checkins] cpython (merge 3.3 -> default): Issue #18873: The tokenize module, IDLE, 2to3, and the findnocoding.py script

serhiy.storchaka python-checkins at python.org
Mon Sep 16 23:05:26 CEST 2013


http://hg.python.org/cpython/rev/6b747ad4a99a
changeset:   85732:6b747ad4a99a
parent:      85730:46c1c2b34e2b
parent:      85731:2dfe8262093c
user:        Serhiy Storchaka <storchaka at gmail.com>
date:        Mon Sep 16 23:57:00 2013 +0300
summary:
  Issue #18873: The tokenize module, IDLE, 2to3, and the findnocoding.py script
now detect Python source code encoding only in comment lines.

files:
  Lib/idlelib/IOBinding.py                               |  16 +++++----
  Lib/lib2to3/pgen2/tokenize.py                          |   9 ++---
  Lib/lib2to3/tests/data/false_encoding.py               |   2 +
  Lib/lib2to3/tests/test_refactor.py                     |   4 ++
  Lib/test/test_importlib/source/test_source_encoding.py |   6 +-
  Lib/test/test_tokenize.py                              |   7 ++++
  Lib/tokenize.py                                        |   8 ++--
  Misc/NEWS                                              |  12 +++++++
  Tools/scripts/findnocoding.py                          |   6 +-
  9 files changed, 48 insertions(+), 22 deletions(-)


diff --git a/Lib/idlelib/IOBinding.py b/Lib/idlelib/IOBinding.py
--- a/Lib/idlelib/IOBinding.py
+++ b/Lib/idlelib/IOBinding.py
@@ -63,7 +63,7 @@
 encoding = locale_encoding  ### KBK 07Sep07  This is used all over IDLE, check!
                             ### 'encoding' is used below in encode(), check!
 
-coding_re = re.compile("coding[:=]\s*([-\w_.]+)")
+coding_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
 
 def coding_spec(data):
     """Return the encoding declaration according to PEP 263.
@@ -84,14 +84,16 @@
         lines = data
     # consider only the first two lines
     if '\n' in lines:
-        lst = lines.split('\n')[:2]
+        lst = lines.split('\n', 2)[:2]
     elif '\r' in lines:
-        lst = lines.split('\r')[:2]
+        lst = lines.split('\r', 2)[:2]
     else:
-        lst = list(lines)
-    str = '\n'.join(lst)
-    match = coding_re.search(str)
-    if not match:
+        lst = [lines]
+    for line in lst:
+        match = coding_re.match(line)
+        if match is not None:
+            break
+    else:
         return None
     name = match.group(1)
     try:
diff --git a/Lib/lib2to3/pgen2/tokenize.py b/Lib/lib2to3/pgen2/tokenize.py
--- a/Lib/lib2to3/pgen2/tokenize.py
+++ b/Lib/lib2to3/pgen2/tokenize.py
@@ -236,7 +236,7 @@
                 startline = False
             toks_append(tokval)
 
-cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
+cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
 
 def _get_normal_name(orig_enc):
     """Imitates get_normal_name in tokenizer.c."""
@@ -281,11 +281,10 @@
             line_string = line.decode('ascii')
         except UnicodeDecodeError:
             return None
-
-        matches = cookie_re.findall(line_string)
-        if not matches:
+        match = cookie_re.match(line_string)
+        if not match:
             return None
-        encoding = _get_normal_name(matches[0])
+        encoding = _get_normal_name(match.group(1))
         try:
             codec = lookup(encoding)
         except LookupError:
diff --git a/Lib/lib2to3/tests/data/false_encoding.py b/Lib/lib2to3/tests/data/false_encoding.py
new file mode 100644
--- /dev/null
+++ b/Lib/lib2to3/tests/data/false_encoding.py
@@ -0,0 +1,2 @@
+#!/usr/bin/env python
+print '#coding=0'
diff --git a/Lib/lib2to3/tests/test_refactor.py b/Lib/lib2to3/tests/test_refactor.py
--- a/Lib/lib2to3/tests/test_refactor.py
+++ b/Lib/lib2to3/tests/test_refactor.py
@@ -271,6 +271,10 @@
         fn = os.path.join(TEST_DATA_DIR, "different_encoding.py")
         self.check_file_refactoring(fn)
 
+    def test_false_file_encoding(self):
+        fn = os.path.join(TEST_DATA_DIR, "false_encoding.py")
+        data = self.check_file_refactoring(fn)
+
     def test_bom(self):
         fn = os.path.join(TEST_DATA_DIR, "bom.py")
         data = self.check_file_refactoring(fn)
diff --git a/Lib/test/test_importlib/source/test_source_encoding.py b/Lib/test/test_importlib/source/test_source_encoding.py
--- a/Lib/test/test_importlib/source/test_source_encoding.py
+++ b/Lib/test/test_importlib/source/test_source_encoding.py
@@ -10,7 +10,7 @@
 import unittest
 
 
-CODING_RE = re.compile(r'coding[:=]\s*([-\w.]+)')
+CODING_RE = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
 
 
 class EncodingTest(unittest.TestCase):
@@ -41,7 +41,7 @@
 
     def create_source(self, encoding):
         encoding_line = "# coding={0}".format(encoding)
-        assert CODING_RE.search(encoding_line)
+        assert CODING_RE.match(encoding_line)
         source_lines = [encoding_line.encode('utf-8')]
         source_lines.append(self.source_line.encode(encoding))
         return b'\n'.join(source_lines)
@@ -50,7 +50,7 @@
         # Make sure that an encoding that has never been a standard one for
         # Python works.
         encoding_line = "# coding=koi8-r"
-        assert CODING_RE.search(encoding_line)
+        assert CODING_RE.match(encoding_line)
         source = "{0}\na=42\n".format(encoding_line).encode("koi8-r")
         self.run_test(source)
 
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -946,6 +946,13 @@
         readline = self.get_readline((b'# coding: bad\n',))
         self.assertRaises(SyntaxError, detect_encoding, readline)
 
+    def test_false_encoding(self):
+        # Issue 18873: "Encoding" detected in non-comment lines
+        readline = self.get_readline((b'print("#coding=fake")',))
+        encoding, consumed_lines = detect_encoding(readline)
+        self.assertEqual(encoding, 'utf-8')
+        self.assertEqual(consumed_lines, [b'print("#coding=fake")'])
+
     def test_open(self):
         filename = support.TESTFN + '.py'
         self.addCleanup(support.unlink, filename)
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -31,7 +31,7 @@
 from codecs import lookup, BOM_UTF8
 import collections
 from io import TextIOWrapper
-cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
+cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
 
 import token
 __all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
@@ -372,10 +372,10 @@
                 msg = '{} for {!r}'.format(msg, filename)
             raise SyntaxError(msg)
 
-        matches = cookie_re.findall(line_string)
-        if not matches:
+        match = cookie_re.match(line_string)
+        if not match:
             return None
-        encoding = _get_normal_name(matches[0])
+        encoding = _get_normal_name(match.group(1))
         try:
             codec = lookup(encoding)
         except LookupError:
diff --git a/Misc/NEWS b/Misc/NEWS
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -12,6 +12,9 @@
 Library
 -------
 
+- Issue #18873: The tokenize module now detects Python source code encoding
+  only in comment lines.
+
 - Issue #17764: Enable http.server to bind to a user specified network
   interface.  Patch contributed by Malte Swart.
 
@@ -47,6 +50,9 @@
 IDLE
 ----
 
+- Issue #18873: IDLE now detects Python source code encoding only in comment
+  lines.
+
 - Issue #18988: The "Tab" key now works when a word is already autocompleted.
 
 Documentation
@@ -55,6 +61,12 @@
 - Issue #17003: Unified the size argument names in the io module with common
   practice.
 
+Tools/Demos
+-----------
+
+- Issue #18873: 2to3 and the findnocoding.py script now detect Python source
+  code encoding only in comment lines.
+
 
 What's New in Python 3.4.0 Alpha 2?
 ===================================
diff --git a/Tools/scripts/findnocoding.py b/Tools/scripts/findnocoding.py
--- a/Tools/scripts/findnocoding.py
+++ b/Tools/scripts/findnocoding.py
@@ -32,13 +32,13 @@
                          "no sophisticated Python source file search will be done.", file=sys.stderr)
 
 
-decl_re = re.compile(rb"coding[=:]\s*([-\w.]+)")
+decl_re = re.compile(rb'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)')
 
 def get_declaration(line):
-    match = decl_re.search(line)
+    match = decl_re.match(line)
     if match:
         return match.group(1)
-    return ''
+    return b''
 
 def has_correct_encoding(text, codec):
     try:

-- 
Repository URL: http://hg.python.org/cpython


More information about the Python-checkins mailing list