[Python-checkins] r46704 - sandbox/trunk/pybch/pybch.py

sean.reifschneider python-checkins at python.org
Wed Jun 7 08:20:15 CEST 2006


Author: sean.reifschneider
Date: Wed Jun  7 08:20:14 2006
New Revision: 46704

Modified:
   sandbox/trunk/pybch/pybch.py
Log:
Closer to working, but just need to figure out some comparison math.


Modified: sandbox/trunk/pybch/pybch.py
==============================================================================
--- sandbox/trunk/pybch/pybch.py	(original)
+++ sandbox/trunk/pybch/pybch.py	Wed Jun  7 08:20:14 2006
@@ -52,11 +52,11 @@
     print ('Comparing %(version)s (%(build)s)'
             % compareResults['environment'])
     print ('       to %(version)s (%(build)s)'
-            % compareResults['environment'])
+            % testResults['environment'])
     print ('Comparing [%(environment)s] on %(host)s'
             % compareResults['environment'])
     print ('       to [%(environment)s] on %(host)s'
-            % compareResults['environment'])
+            % testResults['environment'])
     print
 
     #  get list of tests
@@ -69,18 +69,23 @@
     overallSpeedups = 0.0
     overallSlowdowns = 0.0
     for testSource in testList:
-        testCompare = [ None, None, compareResults['results'][testSource[1]] ]
-        sourceAverage = (reduce(lambda x,y: x+y, testSource[2], 0)
+        compareResults = compareResults['results'][testSource[1]]
+        testCompare = [ None, None, [compareResults[0], compareResults[1]] ]
+        sourceAverage = (reduce(lambda x,y: x+y, testSource[2][1], 0)
                 / len(testSource[2]))
-        compareAverage = (reduce(lambda x,y: x+y, testCompare[2], 0)
+        compareAverage = (reduce(lambda x,y: x+y, testCompare[2][1], 0)
                 / len(testCompare[2]))
 
+        #  calculate normalization
+        normalizationFactor = float(testCompare[2][0]) / float(testSource[2][0])
+        print normalizationFactor  #@@@
+
         #  calculate averages
         sourceAverages = []
-        for n in testSource[2]:
+        for n in testSource[2][1]:
             sourceAverages.append(n / sourceAverage * 100.0)
         compareAverages = []
-        for n in testCompare[2]:
+        for n in testCompare[2][1]:
             compareAverages.append(n / compareAverage * 100.0)
 
         sourceAveragesStr = ' '.join(map(lambda x: '%5.1f%%'
@@ -88,7 +93,7 @@
         compareAveragesStr = ' '.join(map(lambda x: '%5.1f%%'
                 % x, compareAverages))
 
-        difference = min(testCompare[2]) - min(testSource[2])
+        difference = min(testCompare[2][1]) - min(testSource[2][1])
         overallDiff = overallDiff + difference
         if difference > 0:
             overallSlowdowns = overallSlowdowns + difference
@@ -162,29 +167,28 @@
             test.cowlibrate()
             if options.verbose >= 3:
                 print 'Calibrated to %d rounds' % test.rounds
-            print '%s.rounds = %d' % ( testClass, test.rounds )
-            continue
 
             if options.verbose >= 2: print 'Running tests...'
             first = None
-            passResults = []
-            while len(passResults) < 1:
+            passResults = [test.rounds, []]
+            while len(passResults[1]) < 5:
                 latest = test.run()
-                passResults.append(latest)
+                passResults[1].append(latest)
                 if first == None: first = latest
                 if options.verbose >= 3:
                     print '   %3.2f' % ((first / latest) * 100)
             testResults[( moduleName, str(testClass) )] = passResults
 
+    environment = getEnvironmentInfo()
+    testData = { 'environment' : environment, 'results' : testResults }
+
     #  save results to a file
     if options.compareDestFileSave:
-        environment = getEnvironmentInfo()
-        pickle.dump({ 'environment' : environment, 'results' : testResults },
-                open(options.compareDestFileSave, 'w'))
+        pickle.dump(testData, open(options.compareDestFileSave, 'w'))
 
 #  deal with results
 if options.compareSrcFileLoad:
-    compareResults(testResults, options.verbose,
+    compareResults(testData, options.verbose,
             pickle.load(open(options.compareSrcFileLoad, 'r')))
 
 sys.exit(0)


More information about the Python-checkins mailing list