[pypy-svn] r59609 - pypy/build/benchmem

fijal at codespeak.net fijal at codespeak.net
Fri Oct 31 21:13:54 CET 2008


Author: fijal
Date: Fri Oct 31 21:13:52 2008
New Revision: 59609

Modified:
   pypy/build/benchmem/report.py
   pypy/build/benchmem/report_graphic.py
Log:
move main to report.py


Modified: pypy/build/benchmem/report.py
==============================================================================
--- pypy/build/benchmem/report.py	(original)
+++ pypy/build/benchmem/report.py	Fri Oct 31 21:13:52 2008
@@ -7,11 +7,15 @@
 
 """
 import py, os
-import smaps, runbench
+import smaps, runbench, numpy
 
 parser = py.compat.optparse.OptionParser(usage=__doc__)
 parser.add_option("-l", "--benchlog", action="store", dest="benchlog", default="bench.log", 
                   help="logfile to read results from")
+parser.add_option("-g", "--store-graphs", action="store", dest="basepath",
+                  default="", help="optional path to store picture output")
+parser.add_option("-t", "--no-text", action="store_true", dest="notext",
+                  help="disable text reporting")
 
 def asciitable(table):
     colmax = []
@@ -72,39 +76,6 @@
         self.tw.sep("=", "private RSS at checkpoints: %s" %(name,))
         self.tw.line(asciitable(rows))
 
-class Gnuplot(TableReporter):
-    def begin(self):
-        self.output = []
-        self.pythons = []
-
-    def report(self, name, rows):
-        if not self.pythons:
-            self.pythons.extend(rows[0][1:])
-        self.output.append(rows[2][1:])
-   
-    def end(self):
-        pythons = self.pythons
-        output = self.output
-        
-        runbench.mydir.join("gnuplotdata").write(
-            "\n".join([" ".join([str(j) for j in i]) for i in output]))
-        
-        def new_cmd(num, name):
-            s = ("plot 'gnuplotdata' using ($%d) with histograms title '%s'" %
-                 (num + 1,name))
-            if num > 0:
-                s = "re" + s
-            return s
-
-        plotcmds = "\n".join([new_cmd(i, name) for i, name in enumerate(pythons)])
-        runbench.mydir.join("gnuplotcmd").write(
-            """set terminal postscript color
-            set output 'output.ps'
-            %s
-            """ % plotcmds
-         )
-        os.system("gnuplot gnuplotcmd")
-
 class IncrementalSizePerBench:
     def __init__(self, resultset):
         self.resultset = resultset.filter(benchtype="objsizes")
@@ -143,6 +114,23 @@
                 #row.append(incsize)
             rows.append(row)
         tw.line(asciitable(rows))
+
+    def run_graphic(self, plotter):        
+        """ This function creates series of graphs for showing incremental
+        private memory consumed for object allocation (without base interpreter
+        size)
+        """
+        resultset = self.resultset
+        if not resultset.results:
+            return
+        names = [name for name, _ in resultset.getname2results()]
+        results = resultset.getname2results()
+        benchresults = [[] for _ in range(len(results[0]))]
+        executables = [result.executable for result in results[0][1]]
+        for _, results in results:
+            for i, result in enumerate(results):
+                benchresults[i].append(self.get_incremental_size(result))
+        plotter.plot_objsizes(benchresults, names, executables)
  
     def get_incremental_size(self, result):
         # checkpoint 0: state before benchmark
@@ -153,6 +141,9 @@
         return inbench - basesize
 
 class BaseSizeOfInterpreters:
+    HEAP = runbench.Mappings.HEAP
+    CODE = runbench.Mappings.CODE
+    DATA = runbench.Mappings.DATA
     def __init__(self, resultset):
         self.resultset = resultset.filter(benchtype="basesize")
 
@@ -174,44 +165,68 @@
             for result in results:
                 row.append(mapping_func(result))
             rows.append(row)
-        HEAP = runbench.Mappings.HEAP
-        CODE = runbench.Mappings.CODE
-        DATA = runbench.Mappings.DATA
         makerow("HEAP-RSS", lambda result: 
-                result.snapshot.filter(group=HEAP).rss)
+                result.snapshot.filter(group=self.HEAP).rss)
         makerow("HEAP-private_clean", lambda result:
-                result.snapshot.filter(group=HEAP).private_clean)
+                result.snapshot.filter(group=self.HEAP).private_clean)
         makerow("HEAP-private_dirty", lambda result:
-                result.snapshot.filter(group=HEAP).private_dirty)
+                result.snapshot.filter(group=self.HEAP).private_dirty)
         # we only show the clean bits of the code, this might not really work
         # out if the code is not compiled position-indepently, likewise the often
         # seen dirty code page might be a dispatch table (correct term?) of the linker
         makerow("IP-CODE-shared_clean", lambda result:
-                result.snapshot.filter(group=result.executable, kind=CODE).shared_clean)
+                result.snapshot.filter(group=result.executable, kind=self.CODE).shared_clean)
         makerow("IP-CODE-private_clean", lambda result:
-                result.snapshot.filter(group=result.executable, kind=CODE).private_clean)
+                result.snapshot.filter(group=result.executable, kind=self.CODE).private_clean)
         # whole static data of the process in memory, also including e.g. shared data with other processes
         makerow("IP-DATA-RSS", lambda result:
-                result.snapshot.filter(group=result.executable, kind=DATA).rss)
+                result.snapshot.filter(group=result.executable, kind=self.DATA).rss)
         # static data that is not shared with another process and was not modified by the process
         # can be easily shared with another process
         makerow("IP-DATA-private_clean", lambda result:
-                result.snapshot.filter(group=result.executable, kind=DATA).private_clean)
+                result.snapshot.filter(group=result.executable, kind=self.DATA).private_clean)
         # static data that is not shared with another process and was modified by the process
         makerow("IP-DATA-private_dirty", lambda result:
-                result.snapshot.filter(group=result.executable, kind=DATA).private_dirty)
+                result.snapshot.filter(group=result.executable, kind=self.DATA).private_dirty)
         # rest includes other shared libraries that are neither the interpreter nor the heap
         makerow("REST-private_clean", lambda result:
                 result.snapshot.filter(group=result.executable, inv=True).
-                filter(group=HEAP, inv=True).private_clean)
+                filter(group=self.HEAP, inv=True).private_clean)
         makerow("REST-private_dirty", lambda result:
                 result.snapshot.filter(group=result.executable, inv=True).
-                filter(group=HEAP, inv=True).private_dirty)
+                filter(group=self.HEAP, inv=True).private_dirty)
         makerow("REST-RSS", lambda result:
                 result.snapshot.filter(group=result.executable, inv=True).
-                filter(group=HEAP, inv=True).rss)
+                filter(group=self.HEAP, inv=True).rss)
         tw.line(asciitable(rows))
 
+    def run_graphic(self, plotter):
+        """ This function plots base interpreter sizes of various interpreters
+        with bars specifying:
+
+        * heap private
+        * ip code
+        * ip data
+        * private rest
+        """
+        results = self.resultset.results
+        if not results:
+            return
+        heap_private = numpy.array([result.snapshot.filter(group=self.HEAP).private
+                        for result in results])
+        ip_code = numpy.array([result.snapshot.filter(group=result.executable,
+                                                      kind=self.CODE).
+                               private for result in results])
+        ip_data = numpy.array([result.snapshot.filter(group=result.executable,
+                                                      kind=self.DATA).
+                               private for result in results])
+        rest = numpy.array([result.snapshot.filter(group=result.executable,
+                                                   inv=True).
+                            filter(group=self.HEAP, inv=True).private for result in
+                            results])
+        return plotter.plot_baseint_sizes(heap_private, ip_code, ip_data, rest, results)
+
+
 class BaseTimeOfInterpreters:
     def __init__(self, resultset):
         self.resultset = resultset.filter(benchtype="basetime")
@@ -237,6 +252,13 @@
                             timing['real'], timing['user'], timing['sys']])
         tw.line(asciitable(rows))
 
+class AppProfiles(object):
+    def __init__(self, resultset):
+        self.name2result = resultset.filter(benchtype="appprofiles").getname2results()
+
+    def run_graphic(self, plotter, totals):
+        plotter.plot_appprofiles(self.name2result, totals)
+
 if __name__ == "__main__":
     options, args = parser.parse_args()
 
@@ -245,10 +267,19 @@
     resultset.parse(benchlog)
 
     #maxtable_overview(reader)
-    CheckpointDetails(resultset).run()
-    IncrementalSizePerBench(resultset).run()
-    BaseSizeOfInterpreters(resultset).run()
-    BaseTimeOfInterpreters(resultset).run()
+    if not options.notext:
+        CheckpointDetails(resultset).run()
+        IncrementalSizePerBench(resultset).run()
+        BaseSizeOfInterpreters(resultset).run()
+        BaseTimeOfInterpreters(resultset).run()
+
+    if options.basepath:
+        from report_graphic import Plotter
+        plotter = Plotter(options.basepath)
+        #plotter = Plotter(None, True)
+        IncrementalSizePerBench(resultset).run_graphic(plotter)
+        totals = BaseSizeOfInterpreters(resultset).run_graphic(plotter)
+        AppProfiles(resultset).run_graphic(plotter, totals)
     
     #for name, results in reader.name2results.items():
     #    tw.sep("=", name)

Modified: pypy/build/benchmem/report_graphic.py
==============================================================================
--- pypy/build/benchmem/report_graphic.py	(original)
+++ pypy/build/benchmem/report_graphic.py	Fri Oct 31 21:13:52 2008
@@ -117,48 +117,3 @@
                 pylab.savefig(self.basepath + 'appprofiles_%s.ps' % name)
             if self.show:
                 pylab.show()
-
-def process_baseint_sizes(results, plotter):
-    """ This function plots base interpreter sizes of various interpreters
-    with bars specifying:
-
-    * heap private
-    * ip code
-    * ip data
-    * private rest
-    """
-    if not results:
-        return
-    heap_private = numpy.array([result.snapshot.filter(group=HEAP).private
-                    for result in results])
-    ip_code = numpy.array([result.snapshot.filter(group=result.executable,
-                                                  kind=CODE).
-                           private for result in results])
-    ip_data = numpy.array([result.snapshot.filter(group=result.executable,
-                                                  kind=DATA).
-                           private for result in results])
-    rest = numpy.array([result.snapshot.filter(group=result.executable,
-                                               inv=True).
-                        filter(group=HEAP, inv=True).private for result in
-                        results])
-    return plotter.plot_baseint_sizes(heap_private, ip_code, ip_data, rest, results)
-
-def process_objsizes(resultset, plotter):
-    """ This function creates series of graphs for showing incremental
-    private memory consumed for object allocation (without base interpreter
-    size)
-    """
-    if not resultset.results:
-        return
-    def incremental_private(result):
-        return (result.snapshots[1].heap_private() -
-                result.snapshots[0].heap_private())
-    
-    names = [name for name, _ in resultset.getname2results()]
-    results = resultset.getname2results()
-    benchresults = [[] for _ in range(len(results[0]))]
-    executables = [result.executable for result in results[0][1]]
-    for _, results in results:
-        for i, result in enumerate(results):
-            benchresults[i].append(incremental_private(result))
-    plotter.plot_objsizes(benchresults, names, executables)



More information about the Pypy-commit mailing list