[pypy-commit] extradoc extradoc: add first (bad) results for JIT in our benchmarks
Raemi
noreply at buildbot.pypy.org
Wed May 28 14:59:34 CEST 2014
Author: Remi Meier <remi.meier at inf.ethz.ch>
Branch: extradoc
Changeset: r5277:2308d810d1bf
Date: 2014-05-28 15:00 +0200
http://bitbucket.org/pypy/extradoc/changeset/2308d810d1bf/
Log: add first (bad) results for JIT in our benchmarks
diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex
--- a/talk/dls2014/paper/paper.tex
+++ b/talk/dls2014/paper/paper.tex
@@ -950,7 +950,7 @@
Here, we will not go into detail about the integration of our STM
system with PyPy's JIT. In fact, we will disable it for all benchmarks
-except those in section \ref{sec:real-world-bench}. We would like to
+except those in section \ref{sec:performance-bench}. We would like to
regard it as a simple performance enhancement, but that is not what
happens in reality. First, since the JIT is a tracing
JIT\remi{explain?} running in multiple threads, it may compile
@@ -962,7 +962,7 @@
exposes the overhead of STM more by speeding up all the rest.
Overall, we believe that disabling it on all benchmarks except the
-real-world benchmarks in section \ref{sec:real-world-bench} is better
+performance benchmarks in section \ref{sec:performance-bench} is better
because we can minimise non-determinism. We also do not want to depend
on the capabilities of the JIT in these experiments.
@@ -1089,7 +1089,13 @@
\end{figure}
-\subsection{Real-World Benchmarks\label{sec:real-world-bench}}
+\subsection{Performance Benchmarks\label{sec:performance-bench}}
+
+\remi{For performance we first look at no-JIT behaviour of STM. Since
+we cannot compete well even with CPython, we later show JIT benchmarks
+where we see the unstable performance but also that we can still scale.
+(with more work we can use our STM system to parallelise jitted code
+too)}
more real benchmarks comparing multiple implementations:
\begin{itemize}[noitemsep]
@@ -1102,6 +1108,18 @@
\end{itemize}
+% TODO: Jython
+\remi{Some benchmarks (figure \ref{fig:performance-jit} with enabled
+JIT show that we can be competitive with the other solutions. It also
+shows that more work is needed in that area to make performance more
+stable.}
+
+\begin{figure}[h]
+ \centering
+ \includegraphics[width=1\columnwidth]{plots/performance.pdf}
+ \caption{Comparing runtime between interpreters with JIT\label{fig:performance-jit}}
+\end{figure}
+
\section{Related Work}
diff --git a/talk/dls2014/paper/plots/performance.pdf b/talk/dls2014/paper/plots/performance.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..99fa8230f1a046efb2fe92225955fc00178fdaa0
GIT binary patch
[cut]
diff --git a/talk/dls2014/paper/plots/plot_performance.py b/talk/dls2014/paper/plots/plot_performance.py
new file mode 100755
--- /dev/null
+++ b/talk/dls2014/paper/plots/plot_performance.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+
+# benchmarks-repo at 0d81c9b1ec8e
+
+# for now: avg & stddev of the best
+
+# pypy-c-paper-jit bench.py -k5 raytrace/raytrace.py 1-4
+# pypy-c-paper-jit bench.py -k5 btree/btree.py 1-4
+# pypy-c-paper-jit bench.py -k5 skiplist/skiplist.py 1-4
+# pypy-c-paper-jit bench.py -k5 threadworms/threadworms.py 1-4
+# pypy-c-paper-jit bench.py -k5 mandelbrot/mandelbrot.py 1-4 64
+# pypy-c-paper-jit multithread-richards.py 10000 1-4 # report runtime
+
+
+
+import matplotlib
+import os
+import sys
+matplotlib.use('gtkagg')
+
+from matplotlib import rc
+#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
+## for Palatino and other serif fonts use:
+rc('font',**{'family':'serif','serif':['Palatino']})
+rc('text', usetex=True)
+
+args = None
+import matplotlib.pyplot as plt
+# import pprint - slow as hell
+
+# threads
+ts = range(1,5)
+
+interps_styles = {
+ "pypy-stm-jit": 'r-',
+ "pypy-jit": 'b--',
+ "best": "k:"
+}
+
+benchs = {
+ "raytrace":{
+ "pypy-stm-jit":[
+ [3.91, 3.87],
+ [2.53, 2.52],
+ [2.23],
+ [2.46, 2.6]
+ ],
+ "pypy-jit":[
+ [1.6],
+ [2.17],
+ [3.33],
+ [4.16]
+ ]},
+
+ "btree":{
+ "pypy-stm-jit":[
+ [1.68],
+ [1.3],
+ [1.39],
+ [1.66]
+ ],
+ "pypy-jit":[
+ [1.6],
+ [3.3],
+ [5.1],
+ [5.8]
+ ]},
+
+ "skiplist":{
+ "pypy-stm-jit":[
+ [2.9],
+ [3.0],
+ [3.4],
+ [3.8]
+ ],
+ "pypy-jit":[
+ [2.14],
+ [4.5],
+ [6.2],
+ [6.58]
+ ]},
+
+ "threadworms":{
+ "pypy-stm-jit":[
+ [4.23],
+ [3.4],
+ [3.16],
+ [3.4, 3.3]
+ ],
+ "pypy-jit":[
+ [4.14],
+ [12.5],
+ [16],
+ [20]
+ ]},
+
+ "mandelbrot":{
+ "pypy-stm-jit":[
+ [18.5],
+ [9.9],
+ [8.4],
+ [7.2]
+ ],
+ "pypy-jit":[
+ [13.5],
+ [14.3],
+ [14.5],
+ [14.1]
+ ]},
+
+ "richards":{
+ "pypy-stm-jit":[
+ [63.4],
+ [33.1],
+ [24.9,36],
+ [27,39,63]
+ ],
+ "pypy-jit":[
+ [30.7],
+ [31.4],
+ [33],
+ [32.0]
+ ]}
+}
+
+
+
+
+def plot_speedups(plt):
+ import numpy as np
+ from collections import OrderedDict
+ fig = plt.figure()
+
+ legend = OrderedDict()
+ w, h = 2, 3
+ axs = {}
+ for i, (name, contestants) in enumerate(benchs.items()):
+ if i >= w:
+ sharex = axs[i - w]
+ else:
+ sharex = None
+ ax = fig.add_subplot(h, w, i+1, sharex=sharex)
+ axs[i] = ax
+ max_y = 0
+ best_y = 9999999
+ for interp, runs in contestants.items():
+ y = []
+ yerr = []
+ for r in runs:
+ new_y = np.mean(r)
+ y.append(new_y)
+ yerr.append(np.std(r))
+ if new_y > max_y:
+ max_y = new_y
+ if new_y < best_y:
+ best_y = new_y
+
+ artist = ax.errorbar(ts, y, yerr=yerr,
+ fmt=interps_styles[interp])
+ if interp not in legend:
+ legend[interp] = artist
+
+ legend["best"], = ax.plot(ts, [best_y] * len(ts),
+ interps_styles["best"])
+
+ if i // w == h-1:
+ ax.set_xlim(0, 5)
+ ax.set_xlabel("Threads")
+ ax.set_ylim(0, max_y * 1.1)
+ if i % w == 0:
+ ax.set_ylabel("Runtime [s]")
+ ax.set_title(name)
+
+ return axs[w*(h-1)].legend(tuple(legend.values()), tuple(legend.keys()),
+ ncol=3,
+ loc=(0,-0.4))
+
+
+def main():
+ global fig
+
+ print "Draw..."
+ legend = plot_speedups(plt)
+
+ #axs[0].set_ylim(0, len(x))
+ #ax.set_yticks([r+0.5 for r in range(len(logs))])
+ #ax.set_yticklabels(range(1, len(logs)+1))
+ #axs[0].set_xticks([])
+
+ # def label_format(x, pos):
+ # return "%.2f" % (abs((x - left) * 1e-6), )
+ # major_formatter = matplotlib.ticker.FuncFormatter(label_format)
+ # axs[0].xaxis.set_major_formatter(major_formatter)
+
+ #ax.set_title("Memory Usage in Richards")
+
+ plt.draw()
+ #plt.show()
+ print "Drawn."
+
+ file_name = "performance.pdf"
+ plt.savefig(file_name, format='pdf',
+ bbox_extra_artists=(legend,),
+ bbox_inches='tight', pad_inches=0)
+
+
+
+if __name__ == "__main__":
+ import argparse
+ parser = argparse.ArgumentParser(description='Plot stm log files')
+ parser.add_argument('--figure-size', default='7x8',
+ help='set figure size in inches: format=6x4')
+ parser.add_argument('--font-size', default='10.0',
+ help='set font size in pts: 10.0')
+ parser.add_argument('--png-dpi', default='300',
+ help='set dpi of png output: 300')
+
+
+ args = parser.parse_args()
+ matplotlib.rcParams.update(
+ {'figure.figsize': tuple(map(int, args.figure_size.split('x'))),
+ 'font.size': float(args.font_size),
+ 'savefig.dpi': int(args.png_dpi),
+ })
+
+
+ main()
More information about the pypy-commit
mailing list