[Python-checkins] CVS: python/dist/src/Tools/unicode makeunicodedata.py,1.9,1.10
Fredrik Lundh
effbot@users.sourceforge.net
Sun, 21 Jan 2001 14:41:10 -0800
- Previous message: [Python-checkins] CVS: python/dist/src/Lib shutil.py,1.18,1.19
- Next message: [Python-checkins] CVS: python/dist/src/Modules ucnhash.c,1.8,1.9 unicodename_db.h,1.2,1.3 unicodedata.c,2.6,2.7 unicodedatabase.c,2.6,2.7 unicodedatabase.h,2.5,2.6 unicodedata_db.h,1.4,1.5
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
Update of /cvsroot/python/python/dist/src/Tools/unicode
In directory usw-pr-cvs1:/tmp/cvs-serv4331/Tools/unicode
Modified Files:
makeunicodedata.py
Log Message:
compress unicode decomposition tables (this saves another 55k)
Index: makeunicodedata.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Tools/unicode/makeunicodedata.py,v
retrieving revision 1.9
retrieving revision 1.10
diff -C2 -r1.9 -r1.10
*** makeunicodedata.py 2001/01/21 17:01:31 1.9
--- makeunicodedata.py 2001/01/21 22:41:08 1.10
***************
*** 13,18 ****
# 2000-11-03 fl expand first/last ranges
# 2001-01-19 fl added character name tables (2.1)
#
! # written by Fredrik Lundh (fredrik@pythonware.com), September 2000
#
--- 13,19 ----
# 2000-11-03 fl expand first/last ranges
# 2001-01-19 fl added character name tables (2.1)
+ # 2001-01-21 fl added decomp compression; dynamic phrasebook threshold
#
! # written by Fredrik Lundh (fredrik@pythonware.com)
#
***************
*** 51,57 ****
print len(filter(None, unicode.table)), "characters"
makeunicodedata(unicode, trace)
! makeunicodetype(unicode, trace)
! makeunicodename(unicode, trace)
# --------------------------------------------------------------------
--- 52,58 ----
print len(filter(None, unicode.table)), "characters"
+ # makeunicodename(unicode, trace)
makeunicodedata(unicode, trace)
! # makeunicodetype(unicode, trace)
# --------------------------------------------------------------------
***************
*** 91,99 ****
# 2) decomposition data
! # FIXME: <fl> using the encoding stuff from unidb would save
! # another 50k or so, but I'll leave that for 2.1...
!
! decomp_data = [""]
decomp_index = [0] * len(unicode.chars)
for char in unicode.chars:
--- 92,99 ----
# 2) decomposition data
! decomp_data = [0]
! decomp_prefix = [""]
decomp_index = [0] * len(unicode.chars)
+ decomp_size = 0
for char in unicode.chars:
***************
*** 101,109 ****
if record:
if record[5]:
try:
! i = decomp_data.index(record[5])
except ValueError:
i = len(decomp_data)
! decomp_data.append(record[5])
else:
i = 0
--- 101,126 ----
if record:
if record[5]:
+ decomp = string.split(record[5])
+ # prefix
+ if decomp[0][0] == "<":
+ prefix = decomp.pop(0)
+ else:
+ prefix = ""
+ try:
+ i = decomp_prefix.index(prefix)
+ except ValueError:
+ i = len(decomp_prefix)
+ decomp_prefix.append(prefix)
+ prefix = i
+ assert prefix < 256
+ # content
+ decomp = [prefix + (len(decomp)<<8)] +\
+ map(lambda s: int(s, 16), decomp)
try:
! i = decomp_data.index(decomp)
except ValueError:
i = len(decomp_data)
! decomp_data.extend(decomp)
! decomp_size = decomp_size + len(decomp) * 2
else:
i = 0
***************
*** 111,115 ****
print len(table), "unique properties"
! print len(decomp_data), "unique decomposition entries"
print "--- Writing", FILE, "..."
--- 128,134 ----
print len(table), "unique properties"
! print len(decomp_prefix), "unique decomposition prefixes"
! print len(decomp_data), "unique decomposition entries:",
! print decomp_size, "bytes"
print "--- Writing", FILE, "..."
***************
*** 142,147 ****
print >>fp, "};"
! print >>fp, "static const char *decomp_data[] = {"
! for name in decomp_data:
print >>fp, " \"%s\"," % name
print >>fp, " NULL"
--- 161,166 ----
print >>fp, "};"
! print >>fp, "static const char *decomp_prefix[] = {"
! for name in decomp_prefix:
print >>fp, " \"%s\"," % name
print >>fp, " NULL"
***************
*** 153,166 ****
print >>fp, "/* index tables for the database records */"
print >>fp, "#define SHIFT", shift
! Array("index1", index1).dump(fp)
! Array("index2", index2).dump(fp)
# split decomposition index table
index1, index2, shift = splitbins(decomp_index, trace)
print >>fp, "/* index tables for the decomposition data */"
print >>fp, "#define DECOMP_SHIFT", shift
! Array("decomp_index1", index1).dump(fp)
! Array("decomp_index2", index2).dump(fp)
fp.close()
--- 172,188 ----
print >>fp, "/* index tables for the database records */"
print >>fp, "#define SHIFT", shift
! Array("index1", index1).dump(fp, trace)
! Array("index2", index2).dump(fp, trace)
# split decomposition index table
index1, index2, shift = splitbins(decomp_index, trace)
+ print >>fp, "/* decomposition data */"
+ Array("decomp_data", decomp_data).dump(fp, trace)
+
print >>fp, "/* index tables for the decomposition data */"
print >>fp, "#define DECOMP_SHIFT", shift
! Array("decomp_index1", index1).dump(fp, trace)
! Array("decomp_index2", index2).dump(fp, trace)
fp.close()
***************
*** 251,256 ****
print >>fp, "/* type indexes */"
print >>fp, "#define SHIFT", shift
! Array("index1", index1).dump(fp)
! Array("index2", index2).dump(fp)
fp.close()
--- 273,278 ----
print >>fp, "/* type indexes */"
print >>fp, "#define SHIFT", shift
! Array("index1", index1).dump(fp, trace)
! Array("index2", index2).dump(fp, trace)
fp.close()
***************
*** 303,316 ****
wordlist.sort(lambda a, b: len(b[1])-len(a[1]))
# statistics
n = 0
! for i in range(128):
n = n + len(wordlist[i][1])
! print n, "short words (7-bit indices)"
! # pick the 128 most commonly used words, and sort the rest on
! # falling length (to maximize overlap)
! wordlist, wordtail = wordlist[:128], wordlist[128:]
wordtail.sort(lambda a, b: len(b[0])-len(a[0]))
wordlist.extend(wordtail)
--- 325,350 ----
wordlist.sort(lambda a, b: len(b[1])-len(a[1]))
+ # figure out how many phrasebook escapes we need
+ escapes = 0
+ while escapes * 256 < len(wordlist):
+ escapes = escapes + 1
+ print escapes, "escapes"
+
+ short = 256 - escapes
+
+ assert short > 0
+
+ print short, "short indexes in lexicon"
+
# statistics
n = 0
! for i in range(short):
n = n + len(wordlist[i][1])
! print n, "short indexes in phrasebook"
! # pick the most commonly used words, and sort the rest on falling
! # length (to maximize overlap)
! wordlist, wordtail = wordlist[:short], wordlist[short:]
wordtail.sort(lambda a, b: len(b[0])-len(a[0]))
wordlist.extend(wordtail)
***************
*** 335,344 ****
offset = offset + len(w)
words[w] = len(lexicon_offset)
! lexicon_offset.append(offset)
- print len(words), "words in lexicon;", len(lexicon), "bytes"
-
- assert len(words) < 32768 # 15-bit word indices
-
lexicon = map(ord, lexicon)
--- 369,374 ----
offset = offset + len(w)
words[w] = len(lexicon_offset)
! lexicon_offset.append(o)
lexicon = map(ord, lexicon)
***************
*** 353,362 ****
for w in w:
i = words[w]
! if i < 128:
! phrasebook.append(128+i)
else:
! phrasebook.append(i>>8)
phrasebook.append(i&255)
#
# unicode name hash table
--- 383,395 ----
for w in w:
i = words[w]
! if i < short:
! phrasebook.append(i)
else:
! # store as two bytes
! phrasebook.append((i>>8) + short)
phrasebook.append(i&255)
+ assert getsize(phrasebook) == 1
+
#
# unicode name hash table
***************
*** 385,390 ****
print >>fp
print >>fp, "/* lexicon */"
! Array("lexicon", lexicon).dump(fp)
! Array("lexicon_offset", lexicon_offset).dump(fp)
# split decomposition index table
--- 418,423 ----
print >>fp
print >>fp, "/* lexicon */"
! Array("lexicon", lexicon).dump(fp, trace)
! Array("lexicon_offset", lexicon_offset).dump(fp, trace)
# split decomposition index table
***************
*** 393,403 ****
print >>fp, "/* code->name phrasebook */"
print >>fp, "#define phrasebook_shift", shift
! Array("phrasebook", phrasebook).dump(fp)
! Array("phrasebook_offset1", offset1).dump(fp)
! Array("phrasebook_offset2", offset2).dump(fp)
print >>fp, "/* name->code dictionary */"
! codehash.dump(fp)
fp.close()
--- 426,437 ----
print >>fp, "/* code->name phrasebook */"
print >>fp, "#define phrasebook_shift", shift
+ print >>fp, "#define phrasebook_short", short
! Array("phrasebook", phrasebook).dump(fp, trace)
! Array("phrasebook_offset1", offset1).dump(fp, trace)
! Array("phrasebook_offset2", offset2).dump(fp, trace)
print >>fp, "/* name->code dictionary */"
! codehash.dump(fp, trace)
fp.close()
***************
*** 528,534 ****
self.poly = poly
! def dump(self, file):
# write data to file, as a C array
! self.data.dump(file)
file.write("#define %s_magic %d\n" % (self.name, self.magic))
file.write("#define %s_size %d\n" % (self.name, self.size))
--- 562,568 ----
self.poly = poly
! def dump(self, file, trace):
# write data to file, as a C array
! self.data.dump(file, trace)
file.write("#define %s_magic %d\n" % (self.name, self.magic))
file.write("#define %s_size %d\n" % (self.name, self.size))
***************
*** 543,550 ****
self.data = data
! def dump(self, file):
# write data to file, as a C array
size = getsize(self.data)
! # print >>sys.stderr, self.name+":", size*len(self.data), "bytes"
file.write("static ")
if size == 1:
--- 577,585 ----
self.data = data
! def dump(self, file, trace=0):
# write data to file, as a C array
size = getsize(self.data)
! if trace:
! print >>sys.stderr, self.name+":", size*len(self.data), "bytes"
file.write("static ")
if size == 1:
- Previous message: [Python-checkins] CVS: python/dist/src/Lib shutil.py,1.18,1.19
- Next message: [Python-checkins] CVS: python/dist/src/Modules ucnhash.c,1.8,1.9 unicodename_db.h,1.2,1.3 unicodedata.c,2.6,2.7 unicodedatabase.c,2.6,2.7 unicodedatabase.h,2.5,2.6 unicodedata_db.h,1.4,1.5
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]