[Python-checkins] python/dist/src/Parser tokenizer.c,2.61,2.62

loewis@users.sourceforge.net loewis@users.sourceforge.net
Wed, 07 Aug 2002 05:33:20 -0700


Update of /cvsroot/python/python/dist/src/Parser
In directory usw-pr-cvs1:/tmp/cvs-serv1896/Parser

Modified Files:
	tokenizer.c 
Log Message:
Fix PEP 263 code --without-unicode. Fixes #591943.


Index: tokenizer.c
===================================================================
RCS file: /cvsroot/python/python/dist/src/Parser/tokenizer.c,v
retrieving revision 2.61
retrieving revision 2.62
diff -C2 -d -r2.61 -r2.62
*** tokenizer.c	5 Aug 2002 14:14:05 -0000	2.61
--- tokenizer.c	7 Aug 2002 12:33:18 -0000	2.62
***************
*** 257,260 ****
--- 257,261 ----
  				tok->encoding = cs;
  			} else {
+ #ifdef Py_USING_UNICODE
  				r = set_readline(tok, cs);
  				if (r) {
***************
*** 262,265 ****
--- 263,272 ----
  					tok->decoding_state = -1;
  				}
+ #else
+                                 /* Without Unicode support, we cannot
+                                    process the coding spec. Since there
+                                    won't be any Unicode literals, that
+                                    won't matter. */
+ #endif
  			}
  		} else {	/* then, compare cs with BOM */
***************
*** 318,321 ****
--- 325,332 ----
  fp_readl(char *s, int size, struct tok_state *tok)
  {
+ #ifndef Py_USING_UNICODE
+ 	/* In a non-Unicode built, this should never be called. */
+ 	abort();
+ #else
  	PyObject* utf8;
  	PyObject* buf = tok->decoding_buffer;
***************
*** 339,342 ****
--- 350,354 ----
  		return s;
  	}
+ #endif
  }
  
***************
*** 488,491 ****
--- 500,504 ----
     C byte string STR, which is encoded with ENC. */
  
+ #ifdef Py_USING_UNICODE
  static PyObject *
  translate_into_utf8(const char* str, const char* enc) {
***************
*** 498,501 ****
--- 511,515 ----
  	return utf8;
  }
+ #endif
  
  /* Decode a byte string STR for use as the buffer of TOK.
***************
*** 515,518 ****
--- 529,533 ----
  	str = tok->str;		/* string after BOM if any */
  	assert(str);
+ #ifdef Py_USING_UNICODE
  	if (tok->enc != NULL) {
  		utf8 = translate_into_utf8(str, tok->enc);
***************
*** 521,524 ****
--- 536,540 ----
  		str = PyString_AsString(utf8);
  	}
+ #endif
  	for (s = str;; s++) {
  		if (*s == '\0') break;
***************
*** 531,534 ****
--- 547,551 ----
  	if (!check_coding_spec(str, s - str, tok, buf_setreadl))
  		return NULL;
+ #ifdef Py_USING_UNICODE
  	if (tok->enc != NULL) {
  		assert(utf8 == NULL);
***************
*** 538,541 ****
--- 555,559 ----
  		str = PyString_AsString(utf8);
  	}
+ #endif
  	assert(tok->decoding_buffer == NULL);
  	tok->decoding_buffer = utf8; /* CAUTION */