[Python-checkins] CVS: python/dist/src/Lib tokenize.py,1.16,1.17

Ka-Ping Yee ping@users.sourceforge.net
Mon, 15 Jan 2001 14:04:32 -0800


Update of /cvsroot/python/python/dist/src/Lib
In directory usw-pr-cvs1:/tmp/cvs-serv29727

Modified Files:
	tokenize.py 
Log Message:
Add tokenizer support and tests for u'', U"", uR'', Ur"", etc.


Index: tokenize.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/tokenize.py,v
retrieving revision 1.16
retrieving revision 1.17
diff -C2 -r1.16 -r1.17
*** tokenize.py	2001/01/15 03:26:36	1.16
--- tokenize.py	2001/01/15 22:04:30	1.17
***************
*** 55,62 ****
  # Tail end of """ string.
  Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
! Triple = group("[rR]?'''", '[rR]?"""')
  # Single-line ' or " string.
! String = group(r"[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
!                r'[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
  
  # Because of leftmost-then-longest match semantics, be sure to put the
--- 55,62 ----
  # Tail end of """ string.
  Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
! Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
  # Single-line ' or " string.
! String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
!                r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
  
  # Because of leftmost-then-longest match semantics, be sure to put the
***************
*** 75,80 ****
  
  # First (or only) line of ' or " string.
! ContStr = group(r"[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r'\\\r?\n'),
!                 r'[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r'\\\r?\n'))
  PseudoExtras = group(r'\\\r?\n', Comment, Triple)
  PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
--- 75,82 ----
  
  # First (or only) line of ' or " string.
! ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
!                 group("'", r'\\\r?\n'),
!                 r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
!                 group('"', r'\\\r?\n'))
  PseudoExtras = group(r'\\\r?\n', Comment, Triple)
  PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
***************
*** 85,89 ****
              "'''": single3prog, '"""': double3prog,
              "r'''": single3prog, 'r"""': double3prog,
!             "R'''": single3prog, 'R"""': double3prog, 'r': None, 'R': None}
  
  tabsize = 8
--- 87,98 ----
              "'''": single3prog, '"""': double3prog,
              "r'''": single3prog, 'r"""': double3prog,
!             "u'''": single3prog, 'u"""': double3prog,
!             "ur'''": single3prog, 'ur"""': double3prog,
!             "R'''": single3prog, 'R"""': double3prog,
!             "U'''": single3prog, 'U"""': double3prog,
!             "uR'''": single3prog, 'uR"""': double3prog,
!             "Ur'''": single3prog, 'Ur"""': double3prog,
!             "UR'''": single3prog, 'UR"""': double3prog,
!             'r': None, 'R': None, 'u': None, 'U': None}
  
  tabsize = 8
***************
*** 173,177 ****
                      tokeneater(COMMENT, token, spos, epos, line)
                  elif token in ("'''", '"""',               # triple-quoted
!                                "r'''", 'r"""', "R'''", 'R"""'):
                      endprog = endprogs[token]
                      endmatch = endprog.match(line, pos)
--- 182,189 ----
                      tokeneater(COMMENT, token, spos, epos, line)
                  elif token in ("'''", '"""',               # triple-quoted
!                                "r'''", 'r"""', "R'''", 'R"""',
!                                "u'''", 'u"""', "U'''", 'U"""',
!                                "ur'''", 'ur"""', "Ur'''", 'Ur"""',
!                                "uR'''", 'uR"""', "UR'''", 'UR"""'):
                      endprog = endprogs[token]
                      endmatch = endprog.match(line, pos)
***************
*** 186,193 ****
                          break
                  elif initial in ("'", '"') or \
!                     token[:2] in ("r'", 'r"', "R'", 'R"'):
                      if token[-1] == '\n':                  # continued string
                          strstart = (lnum, start)
!                         endprog = endprogs[initial] or endprogs[token[1]]
                          contstr, needcont = line[start:], 1
                          contline = line
--- 198,209 ----
                          break
                  elif initial in ("'", '"') or \
!                     token[:2] in ("r'", 'r"', "R'", 'R"',
!                                   "u'", 'u"', "U'", 'U"') or \
!                     token[:3] in ("ur'", 'ur"', "Ur'", 'Ur"',
!                                   "uR'", 'uR"', "UR'", 'UR"' ):
                      if token[-1] == '\n':                  # continued string
                          strstart = (lnum, start)
!                         endprog = (endprogs[initial] or endprogs[token[1]] or
!                                    endprogs[token[2]])
                          contstr, needcont = line[start:], 1
                          contline = line