[pypy-svn] r39697 - pypy/dist/pypy/rpython/microbench

antocuni at codespeak.net antocuni at codespeak.net
Fri Mar 2 16:06:05 CET 2007


Author: antocuni
Date: Fri Mar  2 16:06:01 2007
New Revision: 39697

Added:
   pypy/dist/pypy/rpython/microbench/
   pypy/dist/pypy/rpython/microbench/__init__.py   (contents, props changed)
   pypy/dist/pypy/rpython/microbench/autopath.py
      - copied unchanged from r39485, pypy/dist/pypy/bin/autopath.py
   pypy/dist/pypy/rpython/microbench/list.py   (contents, props changed)
   pypy/dist/pypy/rpython/microbench/microbench.py   (contents, props changed)
Log:
(antocuni, pedronis)

A tool for microbenchmarking rpython snippets.



Added: pypy/dist/pypy/rpython/microbench/__init__.py
==============================================================================

Added: pypy/dist/pypy/rpython/microbench/list.py
==============================================================================
--- (empty file)
+++ pypy/dist/pypy/rpython/microbench/list.py	Fri Mar  2 16:06:01 2007
@@ -0,0 +1,51 @@
+from pypy.rpython.microbench.microbench import MetaBench
+
+class ListAppend:
+    __metaclass__ = MetaBench
+    def init():
+        return []
+    args = ['obj', 'i']
+    def loop(obj, i):
+        obj.append(i)
+    
+class ListGetItem:
+    __metaclass__ = MetaBench
+    LOOPS = 100000000
+    def init():
+        obj = []
+        for i in xrange(1000):
+            obj.append(i)
+        return obj
+    args = ['obj', 'i']
+    def loop(obj, i):
+        return obj[i%1000]
+
+class ListSetItem:
+    __metaclass__ = MetaBench
+    LOOPS = 100000000
+    def init():
+        obj = []
+        for i in xrange(1000):
+            obj.append(i)
+        return obj
+    args = ['obj', 'i']
+    def loop(obj, i):
+        obj[i%1000] = i
+
+class FixedListGetItem:
+    __metaclass__ = MetaBench
+    LOOPS = 100000000
+    def init():
+        return [0] * 1000
+    args = ['obj', 'i']
+    def loop(obj, i):
+        return obj[i%1000]
+
+class FixedListSetItem:
+    __metaclass__ = MetaBench
+    LOOPS = 100000000
+    def init():
+        return [0] * 1000
+    args = ['obj', 'i']
+    def loop(obj, i):
+        obj[i%1000] = i

Added: pypy/dist/pypy/rpython/microbench/microbench.py
==============================================================================
--- (empty file)
+++ pypy/dist/pypy/rpython/microbench/microbench.py	Fri Mar  2 16:06:01 2007
@@ -0,0 +1,105 @@
+import sys
+import autopath
+from time import clock
+from py.compat import subprocess
+from pypy.translator.interactive import Translation
+
+LOOPS = 10000000
+
+class MetaBench(type):
+    def __new__(self, cls_name, bases, cls_dict):
+        loop = cls_dict['loop']
+        loop.dont_inline = True
+        myglob = {
+            'init': cls_dict['init'],
+            'loop': loop,
+            'LOOPS': cls_dict.get('LOOPS', LOOPS),
+            'clock': clock,
+            }
+        args = ', '.join(cls_dict['args'])
+        source = """
+def %(cls_name)s():
+    obj = init()
+    start = clock()
+    for i in xrange(LOOPS):
+        loop(%(args)s)
+    return clock() - start
+""" % locals()
+        exec source in myglob
+        func = myglob[cls_name]
+        func.benchmark = True
+        return func
+
+
+def run_benchmark(exe):
+    from pypy.translator.cli.test.runtest import CliFunctionWrapper
+    if isinstance(exe, CliFunctionWrapper):
+        stdout, stderr, retval = exe.run()
+    else:
+        assert isinstance(exe, str)
+        bench = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        stdout, stderr = bench.communicate()
+        retval = bench.wait()
+
+    if retval != 0:
+        print 'Running benchmark failed'
+        print 'Standard Output:'
+        print stdout
+        print '-' * 40
+        print 'Standard Error:'
+        print stderr
+        raise SystemExit(-1)
+
+    mydict = {}
+    for line in stdout.splitlines():
+        name, res = line.split(':')
+        mydict[name.strip()] = float(res)
+    return mydict
+
+def import_benchmarks():
+    modules = sys.argv[1:]
+    if len(modules) == 0:
+        # import all the microbenchs
+        from glob import glob
+        for module in glob('*.py'):
+            if module not in ('__init__.py', 'autopath.py', 'microbench.py'):
+                modules.append(module)
+
+    for module in modules:
+        module = module.rstrip('.py')
+        exec 'from %s import *' % module in globals()
+
+def main():
+    import_benchmarks()
+    benchmarks = []
+    for name, thing in globals().iteritems():
+        if getattr(thing, 'benchmark', False):
+            benchmarks.append((name, thing))
+    benchmarks.sort()
+    
+    def entry_point(argv):
+        for name, func in benchmarks:
+            print name, ':', func()
+        return 0
+
+    t = Translation(entry_point, standalone=True, backend='c')
+    c_exe = t.compile()
+    t = Translation(entry_point, standalone=True, backend='cli')
+    cli_exe = t.compile()
+
+    c_res = run_benchmark(c_exe)
+    cli_res = run_benchmark(cli_exe)
+
+    print 'benchmark                       genc     gencli       ratio'
+    print
+    for name, _ in benchmarks:
+        c_time = c_res[name]
+        cli_time = cli_res[name]
+        if c_time == 0:
+            ratio = '%10s' % '---'
+        else:
+            ratio = '%10.2f' % (cli_time/c_time)
+        print '%-25s %10.2f %10.2f %s' % (name, c_time, cli_time, ratio)
+
+if __name__ == '__main__':
+    main()



More information about the Pypy-commit mailing list