[Python-checkins] benchmarks: Issue #25774: Add raw mode to perf.py.
zach.ware
python-checkins at python.org
Wed Feb 3 15:19:46 EST 2016
https://hg.python.org/benchmarks/rev/9923b81a1d34
changeset: 239:9923b81a1d34
user: Zachary Ware <zachary.ware at gmail.com>
date: Wed Feb 03 14:19:18 2016 -0600
summary:
Issue #25774: Add raw mode to perf.py.
Raw mode runs the benchmark(s) on a single interpreter and returns
the result from main() as a dict.
files:
perf.py | 75 ++++++++++++++++++++++++++++++++++++++++++--
1 files changed, 71 insertions(+), 4 deletions(-)
diff --git a/perf.py b/perf.py
--- a/perf.py
+++ b/perf.py
@@ -818,6 +818,8 @@
try:
changed_data = benchmark_function(changed_python, options,
*args, **kwargs)
+ if options.raw:
+ return FormatRawData(changed_data, options)
base_data = benchmark_function(base_python, options,
*args, **kwargs)
except subprocess.CalledProcessError as e:
@@ -1113,6 +1115,33 @@
std_changed, delta_std, significant, timeline_link)
+class RawBenchmarkResult(dict):
+
+ def __init__(self, average, min=None, max=None, std_dev=None):
+ self['average'] = average
+ if min is not None:
+ self['min'] = min
+ if max is not None:
+ self['max'] = max
+ if std_dev is not None:
+ self['std_dev'] = std_dev
+
+ def __str__(self):
+ return '\n'.join('%s: %s' % i for i in sorted(self.items()))
+
+
+def FormatRawData(bm_data, options):
+ # XXX implement track_memory handling?
+ times = sorted(bm_data.runtimes)
+ average = avg(times)
+ mn = mx = std = None
+ if len(times) > 1:
+ mn = times[0]
+ mx = times[-1]
+ std = SampleStdDev(times)
+ return RawBenchmarkResult(average, mn, mx, std)
+
+
def CompareBenchmarkData(base_data, exp_data, options):
"""Compare performance and memory usage.
@@ -1665,6 +1694,8 @@
try:
changed_data = MeasureSpitfire(changed_python, options,
spitfire_env, extra_args)
+ if options.raw:
+ return FormatRawData(changed_data, options)
base_data = MeasureSpitfire(base_python, options,
spitfire_env, extra_args)
except subprocess.CalledProcessError as e:
@@ -2069,6 +2100,8 @@
opts = []
changed_data = MeasureStartup(changed_python, opts, num_loops,
options.track_memory, options.inherit_env)
+ if options.raw:
+ return FormatRawData(changed_data, options)
base_data = MeasureStartup(base_python, opts, num_loops,
options.track_memory, options.inherit_env)
@@ -2086,6 +2119,8 @@
opts = ["-S"]
changed_data = MeasureStartup(changed_python, opts, num_loops,
options.track_memory, options.inherit_env)
+ if options.raw:
+ return FormatRawData(changed_data, options)
base_data = MeasureStartup(base_python, opts, num_loops,
options.track_memory, options.inherit_env)
@@ -2370,6 +2405,8 @@
SLOW_BENCHMARKS = ["hexiom2"]
+NON_RAW_BENCHMARKS = ["pybench"]
+
def _ExpandBenchmarkName(bm_name, bench_groups):
"""Recursively expand name benchmark names.
@@ -2389,7 +2426,7 @@
yield bm_name
-def ParseBenchmarksOption(benchmarks_opt, bench_groups, fast=False):
+def ParseBenchmarksOption(benchmarks_opt, bench_groups, fast=False, raw=False):
"""Parses and verifies the --benchmarks option.
Args:
@@ -2431,6 +2468,12 @@
logging.info("Skipping slow benchmarks (%s) in fast mode",
', '.join(sorted(to_skip)))
should_run = should_run - to_skip
+ if raw:
+ to_skip = should_run & set(NON_RAW_BENCHMARKS)
+ if to_skip:
+ logging.info("Skipping raw-incompatible benchmarks (%s) "
+ "in raw mode", ', '.join(sorted(to_skip)))
+ should_run = should_run - to_skip
return should_run
@@ -2581,6 +2624,10 @@
" Unladen Swallow binaries. This is useful for"
" examining many benchmarks for optimization"
" effects."))
+ parser.add_option("--raw", action="store_true",
+ help="Run the benchmarks on only one interpreter and "
+ "return the timing information for each benchmark. "
+ "Provide only baseline_python, not changed_python.")
parser.add_option("--affinity", metavar="CPU_LIST", default=None,
help=("Specify CPU affinity for benchmark runs. This "
"way, benchmarks can be forced to run on a given "
@@ -2589,9 +2636,13 @@
options, args = parser.parse_args(argv)
- if len(args) != 2:
+ expected = 1 if options.raw else 2
+ if len(args) != expected:
parser.error("incorrect number of arguments")
- base, changed = args
+ if expected == 1:
+ base = changed = args[0]
+ else:
+ base, changed = args
options.base_binary = base
options.changed_binary = changed
@@ -2601,6 +2652,16 @@
options.experiment_label = options.changed_binary
base_args, changed_args = ParsePythonArgsOption(options.args)
+ if options.raw:
+ if base_args != changed_args:
+ parser.error('provide args for only one interpreter in raw mode')
+ if options.track_memory:
+ # XXX this might be worth fixing someday?
+ parser.error('raw mode is not compatible with memory tracking')
+ if options.diff_instrumentation:
+ parser.error('raw mode is not compatible with instrumentation')
+ if options.csv:
+ parser.error('raw mode does not support csv output')
base_cmd_prefix = [base] + base_args
changed_cmd_prefix = [changed] + changed_args
@@ -2633,7 +2694,7 @@
info("Automatically selected timer: %s", options.timer)
should_run = ParseBenchmarksOption(options.benchmarks, bench_groups,
- options.fast)
+ options.fast, options.raw)
should_run = FilterBenchmarks(should_run, bench_funcs,
base_cmd_prefix, changed_cmd_prefix)
@@ -2653,6 +2714,12 @@
print("Report on %s" % " ".join(platform.uname()))
if multiprocessing:
print("Total CPU cores:", multiprocessing.cpu_count())
+ if options.raw:
+ for name, result in results:
+ print()
+ print("###", name, "###")
+ print(result)
+ return dict(results)
hidden = []
if not options.verbose:
shown = []
--
Repository URL: https://hg.python.org/benchmarks
More information about the Python-checkins
mailing list