[pypy-commit] pypy win32-cleanup2: merge from default
mattip
noreply at buildbot.pypy.org
Thu Apr 26 20:26:23 CEST 2012
Author: Matti Picus <matti.picus at gmail.com>
Branch: win32-cleanup2
Changeset: r54768:6740d398e7e2
Date: 2012-04-26 21:24 +0300
http://bitbucket.org/pypy/pypy/changeset/6740d398e7e2/
Log: merge from default
diff --git a/lib-python/modified-2.7/test/test_peepholer.py b/lib-python/modified-2.7/test/test_peepholer.py
--- a/lib-python/modified-2.7/test/test_peepholer.py
+++ b/lib-python/modified-2.7/test/test_peepholer.py
@@ -145,12 +145,15 @@
def test_binary_subscr_on_unicode(self):
# valid code get optimized
- asm = dis_single('u"foo"[0]')
- self.assertIn("(u'f')", asm)
- self.assertNotIn('BINARY_SUBSCR', asm)
- asm = dis_single('u"\u0061\uffff"[1]')
- self.assertIn("(u'\\uffff')", asm)
- self.assertNotIn('BINARY_SUBSCR', asm)
+ # XXX for now we always disable this optimization
+ # XXX see CPython's issue5057
+ if 0:
+ asm = dis_single('u"foo"[0]')
+ self.assertIn("(u'f')", asm)
+ self.assertNotIn('BINARY_SUBSCR', asm)
+ asm = dis_single('u"\u0061\uffff"[1]')
+ self.assertIn("(u'\\uffff')", asm)
+ self.assertNotIn('BINARY_SUBSCR', asm)
# invalid code doesn't get optimized
# out of range
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
--- a/pypy/doc/cppyy.rst
+++ b/pypy/doc/cppyy.rst
@@ -80,7 +80,7 @@
void SetMyInt(int i) { m_myint = i; }
public:
- int m_myint;
+ int m_myint;
};
Then, generate the bindings using ``genreflex`` (part of ROOT), and compile the
@@ -174,10 +174,10 @@
$ cat MyAdvanced.xml
<lcgdict>
- <class pattern="Base?" />
- <class name="Derived" />
- <class name="std::string" />
- <function name="BaseFactory" />
+ <class pattern="Base?" />
+ <class name="Derived" />
+ <class name="std::string" />
+ <function name="BaseFactory" />
</lcgdict>
.. _`selection file`: http://root.cern.ch/drupal/content/generating-reflex-dictionaries
@@ -353,6 +353,9 @@
using classes that themselves are templates (etc.) in the arguments.
All classes must already exist in the loaded reflection info.
+* **typedefs**: Are simple python references to the actual classes to which
+ they refer.
+
* **unary operators**: Are supported if a python equivalent exists, and if the
operator is defined in the C++ class.
@@ -370,6 +373,107 @@
Only that one specific method can not be used.
+Templates
+=========
+
+A bit of special care needs to be taken for the use of templates.
+For a templated class to be completely available, it must be guaranteed that
+said class is fully instantiated, and hence all executable C++ code is
+generated and compiled in.
+The easiest way to fulfill that guarantee is by explicit instantiation in the
+header file that is handed to ``genreflex``.
+The following example should make that clear::
+
+ $ cat MyTemplate.h
+ #include <vector>
+
+ class MyClass {
+ public:
+ MyClass(int i = -99) : m_i(i) {}
+ MyClass(const MyClass& s) : m_i(s.m_i) {}
+ MyClass& operator=(const MyClass& s) { m_i = s.m_i; return *this; }
+ ~MyClass() {}
+ int m_i;
+ };
+
+ template class std::vector<MyClass>;
+
+If you know for certain that all symbols will be linked in from other sources,
+you can also declare the explicit template instantiation ``extern``.
+
+Unfortunately, this is not enough for gcc.
+The iterators, if they are going to be used, need to be instantiated as well,
+as do the comparison operators on those iterators, as these live in an
+internal namespace, rather than in the iterator classes.
+One way to handle this, is to deal with this once in a macro, then reuse that
+macro for all ``vector`` classes.
+Thus, the header above needs this, instead of just the explicit instantiation
+of the ``vector<MyClass>``::
+
+ #define STLTYPES_EXPLICIT_INSTANTIATION_DECL(STLTYPE, TTYPE) \
+ template class std::STLTYPE< TTYPE >; \
+ template class __gnu_cxx::__normal_iterator<TTYPE*, std::STLTYPE< TTYPE > >; \
+ template class __gnu_cxx::__normal_iterator<const TTYPE*, std::STLTYPE< TTYPE > >;\
+ namespace __gnu_cxx { \
+ template bool operator==(const std::STLTYPE< TTYPE >::iterator&, \
+ const std::STLTYPE< TTYPE >::iterator&); \
+ template bool operator!=(const std::STLTYPE< TTYPE >::iterator&, \
+ const std::STLTYPE< TTYPE >::iterator&); \
+ }
+
+ STLTYPES_EXPLICIT_INSTANTIATION_DECL(vector, MyClass)
+
+Then, still for gcc, the selection file needs to contain the full hierarchy as
+well as the global overloads for comparisons for the iterators::
+
+ $ cat MyTemplate.xml
+ <lcgdict>
+ <class pattern="std::vector<*>" />
+ <class pattern="__gnu_cxx::__normal_iterator<*>" />
+ <class pattern="__gnu_cxx::new_allocator<*>" />
+ <class pattern="std::_Vector_base<*>" />
+ <class pattern="std::_Vector_base<*>::_Vector_impl" />
+ <class pattern="std::allocator<*>" />
+ <function name="__gnu_cxx::operator=="/>
+ <function name="__gnu_cxx::operator!="/>
+
+ <class name="MyClass" />
+ </lcgdict>
+
+Run the normal ``genreflex`` and compilation steps::
+
+ $ genreflex MyTemplate.h --selection=MyTemplate.xm
+ $ g++ -fPIC -rdynamic -O2 -shared -I$ROOTSYS/include MyTemplate_rflx.cpp -o libTemplateDict.so
+
+Note: this is a dirty corner that clearly could do with some automation,
+even if the macro already helps.
+Such automation is planned.
+In fact, in the cling world, the backend can perform the template
+instantations and generate the reflection info on the fly, and none of the
+above will any longer be necessary.
+
+Subsequent use should be as expected.
+Note the meta-class style of "instantiating" the template::
+
+ >>>> import cppyy
+ >>>> cppyy.load_reflection_info("libTemplateDict.so")
+ >>>> std = cppyy.gbl.std
+ >>>> MyClass = cppyy.gbl.MyClass
+ >>>> v = std.vector(MyClass)()
+ >>>> v += [MyClass(1), MyClass(2), MyClass(3)]
+ >>>> for m in v:
+ .... print m.m_i,
+ ....
+ 1 2 3
+ >>>>
+
+Other templates work similarly.
+The arguments to the template instantiation can either be a string with the
+full list of arguments, or the explicit classes.
+The latter makes for easier code writing if the classes passed to the
+instantiation are themselves templates.
+
+
The fast lane
=============
diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -304,14 +304,19 @@
# produce compatible pycs.
if (self.space.isinstance_w(w_obj, self.space.w_unicode) and
self.space.isinstance_w(w_const, self.space.w_unicode)):
- unistr = self.space.unicode_w(w_const)
- if len(unistr) == 1:
- ch = ord(unistr[0])
- else:
- ch = 0
- if (ch > 0xFFFF or
- (MAXUNICODE == 0xFFFF and 0xD800 <= ch <= 0xDFFF)):
- return subs
+ #unistr = self.space.unicode_w(w_const)
+ #if len(unistr) == 1:
+ # ch = ord(unistr[0])
+ #else:
+ # ch = 0
+ #if (ch > 0xFFFF or
+ # (MAXUNICODE == 0xFFFF and 0xD800 <= ch <= 0xDFFF)):
+ # --XXX-- for now we always disable optimization of
+ # u'...'[constant] because the tests above are not
+ # enough to fix issue5057 (CPython has the same
+ # problem as of April 24, 2012).
+ # See test_const_fold_unicode_subscr
+ return subs
return ast.Const(w_const, subs.lineno, subs.col_offset)
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -844,7 +844,8 @@
return u"abc"[0]
"""
counts = self.count_instructions(source)
- assert counts == {ops.LOAD_CONST: 1, ops.RETURN_VALUE: 1}
+ if 0: # xxx later?
+ assert counts == {ops.LOAD_CONST: 1, ops.RETURN_VALUE: 1}
# getitem outside of the BMP should not be optimized
source = """def f():
@@ -854,12 +855,20 @@
assert counts == {ops.LOAD_CONST: 2, ops.BINARY_SUBSCR: 1,
ops.RETURN_VALUE: 1}
+ source = """def f():
+ return u"\U00012345abcdef"[3]
+ """
+ counts = self.count_instructions(source)
+ assert counts == {ops.LOAD_CONST: 2, ops.BINARY_SUBSCR: 1,
+ ops.RETURN_VALUE: 1}
+
monkeypatch.setattr(optimize, "MAXUNICODE", 0xFFFF)
source = """def f():
return u"\uE01F"[0]
"""
counts = self.count_instructions(source)
- assert counts == {ops.LOAD_CONST: 1, ops.RETURN_VALUE: 1}
+ if 0: # xxx later?
+ assert counts == {ops.LOAD_CONST: 1, ops.RETURN_VALUE: 1}
monkeypatch.undo()
# getslice is not yet optimized.
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -112,8 +112,8 @@
""".split()
for name in constant_names:
setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name))
-udir.join('pypy_decl.h').write("/* Will be filled later */")
-udir.join('pypy_macros.h').write("/* Will be filled later */")
+udir.join('pypy_decl.h').write("/* Will be filled later */\n")
+udir.join('pypy_macros.h').write("/* Will be filled later */\n")
globals().update(rffi_platform.configure(CConfig_constants))
def copy_header_files(dstdir):
diff --git a/pypy/rlib/rposix.py b/pypy/rlib/rposix.py
--- a/pypy/rlib/rposix.py
+++ b/pypy/rlib/rposix.py
@@ -79,13 +79,13 @@
else:
separate_module_sources = []
export_symbols = []
-eci = ExternalCompilationInfo(
+errno_eci = ExternalCompilationInfo(
includes=['errno.h','stdio.h'],
separate_module_sources = separate_module_sources,
export_symbols = export_symbols,
)
-_get_errno, _set_errno = CExternVariable(INT, 'errno', eci,
+_get_errno, _set_errno = CExternVariable(INT, 'errno', errno_eci,
CConstantErrno, sandboxsafe=True,
_nowrapper=True, c_type='int')
# the default wrapper for set_errno is not suitable for use in critical places
@@ -100,7 +100,7 @@
if os.name == 'nt':
_validate_fd = rffi.llexternal(
"_PyVerify_fd", [rffi.INT], rffi.INT,
- compilation_info=eci,
+ compilation_info=errno_eci,
)
@jit.dont_look_inside
def validate_fd(fd):
diff --git a/pypy/rpython/lltypesystem/lltype.py b/pypy/rpython/lltypesystem/lltype.py
--- a/pypy/rpython/lltypesystem/lltype.py
+++ b/pypy/rpython/lltypesystem/lltype.py
@@ -1167,7 +1167,7 @@
try:
return self._lookup_adtmeth(field_name)
except AttributeError:
- raise AttributeError("%r instance has no field %r" % (self._T._name,
+ raise AttributeError("%r instance has no field %r" % (self._T,
field_name))
def __setattr__(self, field_name, val):
diff --git a/pypy/rpython/tool/rffi_platform.py b/pypy/rpython/tool/rffi_platform.py
--- a/pypy/rpython/tool/rffi_platform.py
+++ b/pypy/rpython/tool/rffi_platform.py
@@ -379,7 +379,7 @@
self.name = name
def prepare_code(self):
- yield 'if ((%s) < 0) {' % (self.name,)
+ yield 'if ((%s) <= 0) {' % (self.name,)
yield ' long long x = (long long)(%s);' % (self.name,)
yield ' printf("value: %lld\\n", x);'
yield '} else {'
@@ -401,7 +401,7 @@
def prepare_code(self):
yield '#ifdef %s' % self.macro
yield 'dump("defined", 1);'
- yield 'if ((%s) < 0) {' % (self.macro,)
+ yield 'if ((%s) <= 0) {' % (self.macro,)
yield ' long long x = (long long)(%s);' % (self.macro,)
yield ' printf("value: %lld\\n", x);'
yield '} else {'
diff --git a/pypy/tool/compare_last_builds.py b/pypy/tool/compare_last_builds.py
new file mode 100644
--- /dev/null
+++ b/pypy/tool/compare_last_builds.py
@@ -0,0 +1,122 @@
+import os
+import urllib2
+import json
+import sys
+import md5
+
+wanted = sys.argv[1:]
+if not wanted:
+ wanted = ['default']
+base = "http://buildbot.pypy.org/json/builders/"
+
+cachedir = os.environ.get('PYPY_BUILDS_CACHE')
+if cachedir and not os.path.exists(cachedir):
+ os.makedirs(cachedir)
+
+
+
+def get_json(url, cache=cachedir):
+ return json.loads(get_data(url, cache))
+
+
+def get_data(url, cache=cachedir):
+ url = str(url)
+ if cache:
+ digest = md5.md5()
+ digest.update(url)
+ digest = digest.hexdigest()
+ cachepath = os.path.join(cachedir, digest)
+ if os.path.exists(cachepath):
+ with open(cachepath) as fp:
+ return fp.read()
+
+ print 'GET', url
+ fp = urllib2.urlopen(url)
+ try:
+ data = fp.read()
+ if cache:
+ with open(cachepath, 'wb') as cp:
+ cp.write(data)
+ return data
+ finally:
+ fp.close()
+
+def parse_log(log):
+ items = []
+ for v in log.splitlines(1):
+ if not v[0].isspace() and v[1].isspace():
+ items.append(v)
+ return sorted(items) #sort cause testrunner order is non-deterministic
+
+def gather_logdata(build):
+ logdata = get_data(str(build['log']) + '?as_text=1')
+ logdata = logdata.replace('</span><span class="stdout">', '')
+ logdata = logdata.replace('</span></pre>', '')
+ del build['log']
+ build['log'] = parse_log(logdata)
+
+
+def branch_mapping(l):
+ keep = 3 - len(wanted)
+ d = {}
+ for x in reversed(l):
+ gather_logdata(x)
+ if not x['log']:
+ continue
+ b = x['branch']
+ if b not in d:
+ d[b] = []
+ d[b].insert(0, x)
+ if len(d[b]) > keep:
+ d[b].pop()
+ return d
+
+def cleanup_build(d):
+ for a in 'times eta steps slave reason sourceStamp blame currentStep text'.split():
+ del d[a]
+
+ props = d.pop(u'logs')
+ for name, val in props:
+ if name == u'pytestLog':
+ d['log'] = val
+ props = d.pop(u'properties')
+ for name, val, _ in props:
+ if name == u'branch':
+ d['branch'] = val or 'default'
+ return d
+
+def collect_builds(d):
+ name = str(d['basedir'])
+ builds = d['cachedBuilds']
+ l = []
+ for build in builds:
+ d = get_json(base + '%s/builds/%s' % (name, build))
+ cleanup_build(d)
+ l.append(d)
+
+ l = [x for x in l if x['branch'] in wanted and 'log' in x]
+ d = branch_mapping(l)
+ return [x for lst in d.values() for x in lst]
+
+
+def only_linux32(d):
+ return d['own-linux-x86-32']
+
+
+own_builds = get_json(base, cache=False)['own-linux-x86-32']
+
+builds = collect_builds(own_builds)
+
+
+builds.sort(key=lambda x: (wanted.index(x['branch']), x['number']))
+logs = [x.pop('log') for x in builds]
+for b, s in zip(builds, logs):
+ b['resultset'] = len(s)
+import pprint
+pprint.pprint(builds)
+
+from difflib import Differ
+
+for x in Differ().compare(*logs):
+ if x[0]!=' ':
+ sys.stdout.write(x)
diff --git a/pypy/translator/c/src/cjkcodecs/cjkcodecs.h b/pypy/translator/c/src/cjkcodecs/cjkcodecs.h
--- a/pypy/translator/c/src/cjkcodecs/cjkcodecs.h
+++ b/pypy/translator/c/src/cjkcodecs/cjkcodecs.h
@@ -210,15 +210,15 @@
#define BEGIN_CODECS_LIST /* empty */
#define _CODEC(name) \
- static const MultibyteCodec _pypy_cjkcodec_##name; \
- const MultibyteCodec *pypy_cjkcodec_##name(void) { \
+ static MultibyteCodec _pypy_cjkcodec_##name; \
+ MultibyteCodec *pypy_cjkcodec_##name(void) { \
if (_pypy_cjkcodec_##name.codecinit != NULL) { \
int r = _pypy_cjkcodec_##name.codecinit(_pypy_cjkcodec_##name.config); \
assert(r == 0); \
} \
return &_pypy_cjkcodec_##name; \
} \
- static const MultibyteCodec _pypy_cjkcodec_##name
+ static MultibyteCodec _pypy_cjkcodec_##name
#define _STATEFUL_METHODS(enc) \
enc##_encode, \
enc##_encode_init, \
diff --git a/pypy/translator/c/src/cjkcodecs/multibytecodec.h b/pypy/translator/c/src/cjkcodecs/multibytecodec.h
--- a/pypy/translator/c/src/cjkcodecs/multibytecodec.h
+++ b/pypy/translator/c/src/cjkcodecs/multibytecodec.h
@@ -131,7 +131,7 @@
/* list of codecs defined in the .c files */
#define DEFINE_CODEC(name) \
- const MultibyteCodec *pypy_cjkcodec_##name(void);
+ MultibyteCodec *pypy_cjkcodec_##name(void);
// _codecs_cn
DEFINE_CODEC(gb2312)
More information about the pypy-commit
mailing list