[pypy-svn] r70120 - pypy/trunk/pypy/translator/benchmark
arigo at codespeak.net
arigo at codespeak.net
Tue Dec 15 10:31:14 CET 2009
Author: arigo
Date: Tue Dec 15 10:31:13 2009
New Revision: 70120
Modified:
pypy/trunk/pypy/translator/benchmark/bench-custom.py
pypy/trunk/pypy/translator/benchmark/benchmarks.py
pypy/trunk/pypy/translator/benchmark/result.py
Log:
* Add the --size-factor=N option, which runs the benchmarks for N times
longer. Should let us see the effect of JIT compilation on the total
runtime.
* Record and display (if --verbose) the output of the process. This
should be convenient to grab and parse the JIT summary information.
Modified: pypy/trunk/pypy/translator/benchmark/bench-custom.py
==============================================================================
--- pypy/trunk/pypy/translator/benchmark/bench-custom.py (original)
+++ pypy/trunk/pypy/translator/benchmark/bench-custom.py Tue Dec 15 10:31:13 2009
@@ -29,6 +29,8 @@
if not b.check():
print "can't run %s benchmark for some reason"%(b.name,)
else:
+ if int(options.sizefactor) > 1:
+ b = b * int(options.sizefactor)
benchmarks.append(b)
exes = get_executables(args)
@@ -46,11 +48,11 @@
if not options.nocpython:
exes = full_pythons + exes
- for i in range(int(options.runcount)) + [None]:
+ for i in range(int(options.runcount)) or [None]:
if i is not None:
for exe in exes:
for b in benchmarks:
- benchmark_result.result(exe, allowcreate=True).run_benchmark(b, verbose=True)
+ benchmark_result.result(exe, allowcreate=True).run_benchmark(b, verbose=options.verbose)
pickle.dump(benchmark_result, open(options.picklefile, 'wb'))
@@ -106,5 +108,9 @@
'--no-cpython', action='store_true', dest='nocpython',
default=None,
)
+ parser.add_option(
+ '--size-factor', dest='sizefactor',
+ default='1',
+ )
options, args = parser.parse_args(sys.argv[1:])
main(options, args)
Modified: pypy/trunk/pypy/translator/benchmark/benchmarks.py
==============================================================================
--- pypy/trunk/pypy/translator/benchmark/benchmarks.py (original)
+++ pypy/trunk/pypy/translator/benchmark/benchmarks.py Tue Dec 15 10:31:13 2009
@@ -20,17 +20,30 @@
return float(line.split()[len(pattern.split())])
class Benchmark(object):
- def __init__(self, name, runner, asc_good, units, check=lambda:True):
- self.name = name
+ def __init__(self, name, runner, asc_good, units,
+ check=lambda:True, sizefactor=1):
+ if sizefactor > 1:
+ self.name = name + '*%d' % sizefactor
+ else:
+ self.name = name
+ self._basename = name
self._run = runner
self.asc_good = asc_good
self.units = units
self.check = check
+ self.sizefactor = sizefactor
+ def __mul__(self, n):
+ return Benchmark(self._basename, self._run, self.asc_good, self.units,
+ self.check, self.sizefactor * n)
def run(self, exe):
+ global latest_output
+ latest_output = ''
try:
- return self._run(exe)
- except BenchmarkFailed:
- return '-FAILED-'
+ result = self._run(exe, self.sizefactor)
+ except BenchmarkFailed, e:
+ result = '-FAILED-'
+ self.latest_output = latest_output
+ return result
def external_dependency(dirname, svnurl, revision):
"""Check out (if necessary) a given fixed revision of a svn url."""
@@ -54,24 +67,26 @@
return True
def run_cmd(cmd):
+ global latest_output
#print "running", cmd
pipe = os.popen(cmd + ' 2>&1')
r = pipe.read()
status = pipe.close()
+ latest_output = r
if status:
raise BenchmarkFailed(status)
return r
-def run_pystone(executable='/usr/local/bin/python', n=''):
+def run_pystone(executable='/usr/local/bin/python', sizefactor=1):
from pypy.tool import autopath
distdir = py.path.local(autopath.pypydir).dirpath()
pystone = py.path.local(autopath.libpythondir).join('test', 'pystone.py')
- txt = run_cmd('"%s" "%s" %s' % (executable, pystone, n))
+ txt = run_cmd('"%s" "%s" %d' % (executable, pystone, 50000 * sizefactor))
return get_result(txt, PYSTONE_PATTERN)
-def run_richards(executable='/usr/local/bin/python', n=5):
+def run_richards(executable='/usr/local/bin/python', sizefactor=1):
richards = py.path.local(__file__).dirpath().dirpath().join('goal').join('richards.py')
- txt = run_cmd('"%s" %s %s' % (executable, richards, n))
+ txt = run_cmd('"%s" %s %d' % (executable, richards, 5 * sizefactor))
return get_result(txt, RICHARDS_PATTERN)
def run_translate(executable='/usr/local/bin/python'):
@@ -117,7 +132,7 @@
# 'svn://svn.berlios.de/docutils/trunk/docutils',
# 4821)
-def run_templess(executable='/usr/local/bin/python'):
+def run_templess(executable='/usr/local/bin/python', sizefactor=1):
""" run some script in the templess package
templess is some simple templating language, to check out use
@@ -127,66 +142,54 @@
pypath = os.path.dirname(os.path.dirname(py.__file__))
templessdir = here.join('templess')
testscript = templessdir.join('test/oneshot.py')
- command = 'PYTHONPATH="%s:%s" "%s" "%s" 100' % (here, pypath,
- executable, testscript)
+ command = 'PYTHONPATH="%s:%s" "%s" "%s" %d' % (here, pypath,
+ executable, testscript,
+ 100 * sizefactor)
txt = run_cmd(command)
- try:
- result = float([line for line in txt.split('\n') if line.strip()][-1])
- except ValueError:
+ for line in txt.split('\n'):
+ if '.' in line:
+ try:
+ return float(line) / sizefactor
+ except ValueError:
+ pass
+ else:
raise BenchmarkFailed
- return result
def check_templess():
return external_dependency('templess',
'http://johnnydebris.net/templess/trunk',
100)
-def run_gadfly(executable='/usr/local/bin/python'):
+def run_gadfly(executable='/usr/local/bin/python', sizefactor=1):
""" run some tests in the gadfly pure Python database """
here = py.path.local(__file__).dirpath()
gadfly = here.join('gadfly')
testscript = gadfly.join('test', 'testsubset.py')
- command = 'PYTHONPATH="%s" "%s" "%s"' % (gadfly, executable, testscript)
+ command = 'PYTHONPATH="%s" "%s" "%s" %d' % (gadfly, executable, testscript,
+ sizefactor)
txt = run_cmd(command)
- lines = [line for line in txt.split('\n') if line.strip()]
- if lines[-1].strip() != 'OK':
- raise BenchmarkFailed
- lastword = lines[-2].split()[-1]
- if not lastword.endswith('s'):
- raise BenchmarkFailed
- try:
- result = float(lastword[:-1])
- except ValueError:
- raise BenchmarkFailed
- return result
+ return get_result(txt, 'Total running time:') / sizefactor
def check_gadfly():
return external_dependency('gadfly',
'http://codespeak.net/svn/user/arigo/hack/pypy-hack/gadflyZip',
- 54470)
+ 70117)
-def run_mako(executable='/usr/local/bin/python'):
+def run_mako(executable='/usr/local/bin/python', sizefactor=1):
""" run some tests in the mako templating system """
here = py.path.local(__file__).dirpath()
mako = here.join('mako')
testscript = mako.join('examples', 'bench', 'basic.py')
- command = 'PYTHONPATH="%s" "%s" "%s" mako' % (mako.join('lib'),
- executable, testscript)
+ command = 'PYTHONPATH="%s" "%s" "%s" -n%d mako' % (mako.join('lib'),
+ executable, testscript,
+ 2000 * sizefactor)
txt = run_cmd(command)
- lines = [line for line in txt.split('\n') if line.strip()]
- words = lines[-1].split()
- if words[0] != 'Mako:':
- raise BenchmarkFailed
- try:
- result = float(words[1])
- except ValueError:
- raise BenchmarkFailed
- return result
+ return get_result(txt, 'Mako:')
def check_mako():
return external_dependency('mako',
'http://codespeak.net/svn/user/arigo/hack/pypy-hack/mako',
- 40235)
+ 70118)
def check_translate():
return False # XXX what should we do about the dependency on ctypes?
Modified: pypy/trunk/pypy/translator/benchmark/result.py
==============================================================================
--- pypy/trunk/pypy/translator/benchmark/result.py (original)
+++ pypy/trunk/pypy/translator/benchmark/result.py Tue Dec 15 10:31:13 2009
@@ -89,12 +89,15 @@
self.asc_goods[benchmark.name] = benchmark.asc_good
if self.run_counts.get(benchmark.name, 0) > self.max_results:
return
- if verbose:
- print 'running', benchmark.name, 'for', self.exe_name,
- sys.stdout.flush()
+ print 'running', benchmark.name, 'for', self.exe_name,
+ sys.stdout.flush()
new_result = benchmark.run(self.exe_name)
+ print new_result
if verbose:
- print new_result
+ print '{'
+ for line in benchmark.latest_output.splitlines(False):
+ print '\t' + line
+ print '}'
self.run_counts[benchmark.name] = self.run_counts.get(benchmark.name, 0) + 1
if new_result == '-FAILED-':
return
@@ -142,6 +145,8 @@
elif stat.startswith('bench:'):
from pypy.translator.benchmark import benchmarks
statkind, statdetail = stat.split(':', 1)
+ if '*' in statdetail:
+ statdetail = statdetail.split('*')[0]
b = benchmarks.BENCHMARKS_BY_NAME[statdetail]
return "%8.2f%s"%(statvalue, b.units), 1
elif stat == 'pypy_rev':
More information about the Pypy-commit
mailing list