[Python-checkins] cpython: Basic support for PEP 414 without docs or tests.

armin.ronacher python-checkins at python.org
Sun Mar 4 13:35:01 CET 2012


http://hg.python.org/cpython/rev/ed9497d6a216
changeset:   75381:ed9497d6a216
user:        Armin Ronacher <armin.ronacher at active-4.com>
date:        Sun Mar 04 12:04:06 2012 +0000
summary:
  Basic support for PEP 414 without docs or tests.

files:
  Lib/tokenize.py    |  30 ++++++++++++++++++++++--------
  Parser/tokenizer.c |  10 +++++++---
  Python/ast.c       |   3 +++
  3 files changed, 32 insertions(+), 11 deletions(-)


diff --git a/Lib/tokenize.py b/Lib/tokenize.py
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -135,10 +135,10 @@
 Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
 # Tail end of """ string.
 Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
-Triple = group("[bB]?[rR]?'''", '[bB]?[rR]?"""')
+Triple = group("[bBuU]?[rR]?'''", '[bBuU]?[rR]?"""')
 # Single-line ' or " string.
-String = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
-               r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+String = group(r"[bBuU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+               r'[bBuU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
 
 # Because of leftmost-then-longest match semantics, be sure to put the
 # longest operators first (e.g., if = came before ==, == would get
@@ -156,9 +156,9 @@
 Token = Ignore + PlainToken
 
 # First (or only) line of ' or " string.
-ContStr = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+ContStr = group(r"[bBuU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
                 group("'", r'\\\r?\n'),
-                r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+                r'[bBuU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
                 group('"', r'\\\r?\n'))
 PseudoExtras = group(r'\\\r?\n', Comment, Triple)
 PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
@@ -176,21 +176,35 @@
            "bR'''": Single3, 'bR"""': Double3,
            "Br'''": Single3, 'Br"""': Double3,
            "BR'''": Single3, 'BR"""': Double3,
-           'r': None, 'R': None, 'b': None, 'B': None}
+           "u'''": Single3, 'u"""': Double3,
+           "ur'''": Single3, 'ur"""': Double3,
+           "R'''": Single3, 'R"""': Double3,
+           "U'''": Single3, 'U"""': Double3,
+           "uR'''": Single3, 'uR"""': Double3,
+           "Ur'''": Single3, 'Ur"""': Double3,
+           "UR'''": Single3, 'UR"""': Double3,
+           'r': None, 'R': None, 'b': None, 'B': None,
+           'u': None, 'U': None}
 
 triple_quoted = {}
 for t in ("'''", '"""',
           "r'''", 'r"""', "R'''", 'R"""',
           "b'''", 'b"""', "B'''", 'B"""',
           "br'''", 'br"""', "Br'''", 'Br"""',
-          "bR'''", 'bR"""', "BR'''", 'BR"""'):
+          "bR'''", 'bR"""', "BR'''", 'BR"""',
+          "u'''", 'u"""', "U'''", 'U"""',
+          "ur'''", 'ur"""', "Ur'''", 'Ur"""',
+          "uR'''", 'uR"""', "UR'''", 'UR"""'):
     triple_quoted[t] = t
 single_quoted = {}
 for t in ("'", '"',
           "r'", 'r"', "R'", 'R"',
           "b'", 'b"', "B'", 'B"',
           "br'", 'br"', "Br'", 'Br"',
-          "bR'", 'bR"', "BR'", 'BR"' ):
+          "bR'", 'bR"', "BR'", 'BR"' ,
+          "u'", 'u"', "U'", 'U"',
+          "ur'", 'ur"', "Ur'", 'Ur"',
+          "uR'", 'uR"', "UR'", 'UR"' ):
     single_quoted[t] = t
 
 tabsize = 8
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c
--- a/Parser/tokenizer.c
+++ b/Parser/tokenizer.c
@@ -1412,11 +1412,15 @@
     /* Identifier (most frequent token!) */
     nonascii = 0;
     if (is_potential_identifier_start(c)) {
-        /* Process b"", r"", br"" and rb"" */
-        int saw_b = 0, saw_r = 0;
+        /* Process b"", r"", u"", br"", rb"" and ur"" */
+        int saw_b = 0, saw_r = 0, saw_u = 0;
         while (1) {
-            if (!saw_b && (c == 'b' || c == 'B'))
+            if (!(saw_b || saw_u) && (c == 'b' || c == 'B'))
                 saw_b = 1;
+            /* Since this is a backwards compatibility support literal we don't
+               want to support it in arbitrary order like byte literals. */
+            else if (!(saw_b || saw_u || saw_r) && (c == 'u' || c == 'U'))
+                saw_u = 1;
             else if (!saw_r && (c == 'r' || c == 'R'))
                 saw_r = 1;
             else
diff --git a/Python/ast.c b/Python/ast.c
--- a/Python/ast.c
+++ b/Python/ast.c
@@ -3796,6 +3796,9 @@
                 quote = *++s;
                 *bytesmode = 1;
             }
+            else if (quote == 'u' || quote == 'U') {
+                quote = *++s;
+            }
             else if (quote == 'r' || quote == 'R') {
                 quote = *++s;
                 rawmode = 1;

-- 
Repository URL: http://hg.python.org/cpython


More information about the Python-checkins mailing list