[Python-checkins] benchmarks: Allow logging results to a CSV file
antoine.pitrou
python-checkins at python.org
Mon Jan 17 21:47:35 CET 2011
antoine.pitrou pushed 62e754c57a7f to benchmarks:
http://hg.python.org/benchmarks/rev/62e754c57a7f
changeset: 149:62e754c57a7f
tag: tip
user: Antoine Pitrou <solipsis at pitrou.net>
date: Mon Jan 17 21:47:32 2011 +0100
summary:
Allow logging results to a CSV file
files:
perf.py
diff --git a/perf.py b/perf.py
--- a/perf.py
+++ b/perf.py
@@ -52,6 +52,7 @@
__author__ = "jyasskin at google.com (Jeffrey Yasskin)"
+import csv
import contextlib
import logging
import math
@@ -393,6 +394,10 @@
" %(delta_std)s" + self.get_timeline())
% self.__dict__)
+ def as_csv(self):
+ # Min base, min changed
+ return ["%f" % self.min_base, "%f" % self.min_changed]
+
class BenchmarkError(object):
"""Object representing the error from a failed benchmark run."""
@@ -427,6 +432,10 @@
" %(delta_max)s" + self.get_usage_over_time())
% self.__dict__)
+ def as_csv(self):
+ # Max base, max changed
+ return ["%.3f" % self.max_base, "%.3f" % self.max_changed]
+
class SimpleBenchmarkResult(object):
"""Object representing result data from a successful benchmark run."""
@@ -442,6 +451,10 @@
return ("%(base_time)f -> %(changed_time)f: %(time_delta)s"
% self.__dict__)
+ def as_csv(self):
+ # Base, changed
+ return ["%f" % self.base_time, "%f" % self.changed_time]
+
class InstrumentationResult(object):
"""Object respresenting a --diff_instrumentation result."""
@@ -2073,6 +2086,11 @@
help=("What style the benchmark output should take."
" Valid options are 'normal' and 'table'."
" Default is '%default'."))
+ parser.add_option("--csv", metavar="CSV_FILE", type="string",
+ action="store", default=None,
+ help=("Name of a file the results will be written to,"
+ " as a three-column CSV file containing minimum"
+ " runtimes for each benchmark."))
parser.add_option("-C", "--control_label", metavar="LABEL", type="string",
action="store", default="",
help="Optional label for the control binary")
@@ -2152,6 +2170,13 @@
else:
raise ValueError("Invalid output_style: %r" % options.output_style)
+ if options.csv:
+ with open(options.csv, "wb") as f:
+ writer = csv.writer(f)
+ writer.writerow(['Benchmark', 'Base', 'Changed'])
+ for name, result in results:
+ writer.writerow([name] + result.as_csv())
+
if hidden:
print()
print("The following not significant results are hidden, "
--
Repository URL: http://hg.python.org/benchmarks
More information about the Python-checkins
mailing list