[pypy-commit] benchmarks default: improve reporting

fijal noreply at buildbot.pypy.org
Tue Apr 23 13:17:41 CEST 2013


Author: Maciej Fijalkowski <fijall at gmail.com>
Branch: 
Changeset: r206:842036652c02
Date: 2013-04-23 13:05 +0200
http://bitbucket.org/pypy/benchmarks/changeset/842036652c02/

Log:	improve reporting

diff --git a/unladen_swallow/perf.py b/unladen_swallow/perf.py
--- a/unladen_swallow/perf.py
+++ b/unladen_swallow/perf.py
@@ -445,7 +445,7 @@
         base_python: path to the reference Python binary.
         changed_python: path to the experimental Python binary.
         options: optparse.Values instance.
-        *args, **kwargs: will be passed through to benchmark_function. 
+        *args, **kwargs: will be passed through to benchmark_function.
 
     Returns:
         An object representing differences between the two benchmark runs.
@@ -743,7 +743,7 @@
     Returns:
         (stdout, mem_usage), where stdout is the captured stdout as a string;
         mem_usage is a list of memory usage samples in kilobytes (if
-        track_memory is False, mem_usage is None). 
+        track_memory is False, mem_usage is None).
 
     Raises:
         RuntimeError: if the command failed. The value of the exception will
@@ -758,7 +758,9 @@
         future = MemoryUsageFuture(subproc.pid)
     result, err = subproc.communicate()
     if subproc.returncode != 0:
-        raise RuntimeError("Benchmark died: " + err)
+        print result
+        raise RuntimeError("Benchmark died (returncode: %d): %s" %
+                           (subproc.returncode, err))
     if track_memory:
         mem_usage = future.GetMemoryUsage()
     return result, mem_usage
@@ -1440,7 +1442,7 @@
 BENCH_FUNCS = _FindAllBenchmarks(globals())
 
 # Benchmark groups. The "default" group is what's run if no -b option is
-# specified. 
+# specified.
 # If you update the default group, be sure to update the module docstring, too.
 # An "all" group which includes every benchmark perf.py knows about is generated
 # automatically.
@@ -1568,7 +1570,7 @@
                             " Valid benchmarks are: " +
                             ", ".join(bench_groups.keys() + all_benchmarks)))
     parser.add_option("--inherit_env", metavar="ENVVARS", type="string", action="callback",
-                      callback=ParseEnvVars, default=[],                      
+                      callback=ParseEnvVars, default=[],
                       help=("Comma-separated list of environment variable names"
                             " that are inherited from the parent environment"
                             " when running benchmarking subprocesses."))


More information about the pypy-commit mailing list