[Scipy-svn] r3029 - in trunk/Lib/sandbox: . lobpcg

scipy-svn at scipy.org scipy-svn at scipy.org
Tue May 22 05:56:16 EDT 2007


Author: rc
Date: 2007-05-22 04:56:10 -0500 (Tue, 22 May 2007)
New Revision: 3029

Added:
   trunk/Lib/sandbox/lobpcg/
   trunk/Lib/sandbox/lobpcg/README
   trunk/Lib/sandbox/lobpcg/X.txt
   trunk/Lib/sandbox/lobpcg/__init__.py
   trunk/Lib/sandbox/lobpcg/info.py
   trunk/Lib/sandbox/lobpcg/lobpcg.py
   trunk/Lib/sandbox/lobpcg/setup.py
Log:
initial release

Added: trunk/Lib/sandbox/lobpcg/README
===================================================================
--- trunk/Lib/sandbox/lobpcg/README	2007-05-22 09:19:14 UTC (rev 3028)
+++ trunk/Lib/sandbox/lobpcg/README	2007-05-22 09:56:10 UTC (rev 3029)
@@ -0,0 +1,12 @@
+Pure SciPy implementation of Locally Optimal Block Preconditioned Conjugate
+Gradient Method (LOBPCG), see
+http://www-math.cudenver.edu/~aknyazev/software/BLOPEX/
+
+lobpcg.py code was written by Robert Cimrman. Many thanks belong to Andrew
+Knyazev, the author of the algorithm, for lots of advice and support.
+
+The algorithm of LOBPCG is described in detail in:
+
+A. V. Knyazev, Toward the Optimal Preconditioned Eigensolver: Locally Optimal Block Preconditioned Conjugate Gradient Method. SIAM Journal on Scientific Computing 23 (2001), no. 2, pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124
+
+A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov, Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX) in hypre and PETSc  (2007). http://arxiv.org/abs/0705.2626

Added: trunk/Lib/sandbox/lobpcg/X.txt
===================================================================
--- trunk/Lib/sandbox/lobpcg/X.txt	2007-05-22 09:19:14 UTC (rev 3028)
+++ trunk/Lib/sandbox/lobpcg/X.txt	2007-05-22 09:56:10 UTC (rev 3029)
@@ -0,0 +1,100 @@
+   9.4578390e-01   8.1748307e-01   5.2405098e-01
+   3.4042417e-01   2.0205362e-01   1.0183770e-01
+   6.0552424e-01   6.2714124e-01   2.8453380e-01
+   6.6734584e-01   7.9436448e-01   1.9751938e-02
+   8.2265186e-01   2.6926755e-01   3.6840847e-01
+   3.1469418e-01   3.0379238e-01   2.0080037e-01
+   2.4413695e-01   1.4521520e-01   7.6558574e-01
+   2.3406188e-02   7.7812449e-01   2.2445593e-01
+   3.7968672e-01   3.5230883e-03   8.7364788e-01
+   3.0544127e-01   5.5824667e-01   1.6885245e-01
+   6.0105945e-01   1.0270848e-01   4.2079194e-01
+   9.7298628e-01   8.9559614e-01   6.9914607e-01
+   6.3173944e-01   3.6783941e-02   8.7291415e-01
+   1.2850335e-02   6.7239405e-01   6.1485979e-02
+   2.2492574e-01   2.2410178e-01   6.9610368e-02
+   7.0026143e-01   3.2974585e-01   4.7714450e-01
+   4.6216202e-01   7.1810038e-01   1.3842199e-01
+   1.6075778e-01   2.8434470e-01   9.3491343e-01
+   5.9920565e-01   7.7861398e-01   5.5881318e-01
+   2.5610599e-01   4.8772724e-01   8.2174995e-01
+   8.4599049e-02   6.7666816e-01   6.7358438e-01
+   1.7276306e-01   3.3672791e-01   7.1731543e-01
+   9.3638799e-01   2.8648356e-01   3.3202492e-01
+   7.8378312e-01   1.8876724e-01   5.2164755e-01
+   1.3125747e-01   7.3192144e-01   1.4458665e-01
+   3.5898101e-01   2.5542739e-01   1.3423188e-01
+   6.6653581e-01   5.3403161e-01   6.1206489e-01
+   1.6424754e-01   8.5812639e-01   8.0589966e-01
+   1.2393097e-01   6.4786459e-01   1.0513908e-01
+   5.7704459e-01   2.7393541e-01   3.5526243e-01
+   9.7196693e-01   2.4268395e-01   7.7594090e-01
+   9.2527201e-01   7.5413139e-01   2.6376718e-01
+   7.1146287e-01   2.6500795e-01   4.7268878e-01
+   8.3386510e-02   1.9721810e-01   2.1390330e-01
+   8.8205593e-01   3.1468590e-01   1.4032796e-01
+   6.1270690e-01   1.7594654e-01   8.0526177e-01
+   8.1338010e-01   3.2610889e-02   6.9759916e-01
+   6.8591045e-01   1.8629491e-01   5.5777308e-01
+   1.9653411e-01   7.4549104e-01   1.0317975e-01
+   5.1337714e-01   6.4618762e-01   4.1435988e-02
+   1.0545021e-01   2.1482454e-01   3.5563984e-02
+   4.9898768e-01   1.9196579e-01   1.7861081e-01
+   5.2622726e-01   7.6245394e-01   2.3911878e-01
+   2.3876524e-01   7.3847955e-01   3.8956799e-01
+   5.2627999e-01   7.3547286e-01   5.5896867e-01
+   1.3614202e-01   1.5962335e-01   5.8394916e-01
+   2.8810635e-01   1.6800977e-01   1.6392705e-01
+   2.5869014e-01   3.8838829e-01   3.2601187e-01
+   7.9839770e-01   9.2825768e-01   1.7308552e-01
+   9.6538231e-01   7.3628476e-01   4.8175362e-01
+   2.7278367e-01   7.1852217e-01   1.9240004e-01
+   4.2377830e-01   6.1651846e-01   6.0603278e-01
+   7.1786841e-01   7.1124239e-01   6.4650000e-01
+   7.7306094e-01   5.9903451e-01   3.7940119e-01
+   5.8775950e-01   1.5754180e-01   3.3226044e-01
+   8.5990052e-02   1.1271291e-01   1.5367891e-01
+   9.7392075e-01   7.7935865e-01   4.0554907e-01
+   4.3989414e-01   6.0471517e-02   1.3175170e-01
+   8.4232543e-01   3.1856703e-01   7.5886305e-01
+   7.0553803e-01   2.0975170e-01   8.4424784e-01
+   2.6808711e-01   6.6512384e-01   7.9348594e-01
+   7.4909146e-01   3.1300856e-01   3.6137227e-01
+   5.3318446e-01   7.4007881e-01   1.3786857e-01
+   7.0134730e-01   9.9235966e-01   9.0322375e-01
+   2.1945692e-01   5.3450916e-01   6.4340685e-02
+   4.4056297e-01   7.2382493e-01   2.0988534e-01
+   2.3244923e-01   9.7651014e-01   4.6969657e-01
+   1.2116127e-02   9.4207183e-01   2.0084696e-01
+   3.4609005e-01   3.2863497e-01   3.5739789e-02
+   3.4162931e-01   4.0051132e-01   7.9403726e-01
+   5.5939743e-01   3.6049040e-01   3.5666314e-01
+   2.3202965e-01   2.4620348e-01   3.2548306e-01
+   2.4248651e-01   3.6670017e-01   4.8815216e-01
+   5.4151465e-01   1.9926619e-01   1.1374385e-01
+   3.8399210e-01   1.7141648e-01   5.9665625e-01
+   2.1168085e-01   6.3049642e-01   9.7777921e-01
+   4.3105094e-01   2.2652553e-02   6.8658420e-01
+   6.7361827e-02   7.8998130e-01   4.1351694e-02
+   7.2217423e-01   3.9178923e-02   7.9755060e-01
+   4.3727445e-01   9.7835298e-01   2.8601743e-01
+   8.4763941e-01   9.2475650e-01   9.9795252e-01
+   6.2537016e-01   2.8203512e-02   3.7445021e-01
+   4.8265728e-01   4.7061796e-01   2.7402319e-01
+   3.1011489e-01   5.7781953e-01   9.3695011e-01
+   7.8857881e-01   1.5002163e-01   8.0323307e-02
+   9.0247724e-01   8.8659951e-01   3.1252615e-01
+   5.5738324e-01   5.5138175e-02   1.1495353e-02
+   9.9828118e-01   3.9716206e-01   8.6223948e-01
+   3.7224336e-01   7.5698087e-01   4.7226552e-01
+   4.1067449e-01   3.6324903e-01   5.5833599e-01
+   2.0115001e-01   3.0476498e-01   9.6969943e-01
+   1.4328644e-01   5.9892441e-02   1.8394223e-01
+   8.6694382e-01   4.8836797e-01   9.3343161e-01
+   3.7118176e-01   4.0550509e-01   3.7320004e-02
+   2.9918396e-01   2.8109670e-01   2.2387914e-01
+   9.3042764e-01   6.9574924e-02   9.8663726e-01
+   7.6750765e-01   9.6047464e-01   1.5928122e-01
+   1.5166860e-01   5.8205830e-01   7.4238213e-01
+   2.5812146e-01   5.4972680e-01   6.7403608e-01
+   5.1000251e-01   8.3706633e-02   5.1344360e-01

Added: trunk/Lib/sandbox/lobpcg/__init__.py
===================================================================
--- trunk/Lib/sandbox/lobpcg/__init__.py	2007-05-22 09:19:14 UTC (rev 3028)
+++ trunk/Lib/sandbox/lobpcg/__init__.py	2007-05-22 09:56:10 UTC (rev 3029)
@@ -0,0 +1,11 @@
+"LOBPCG"
+
+from info import __doc__
+import lobpcg
+__doc__ = '\n\n'.join( (lobpcg.__doc__, __doc__) )
+del lobpcg
+
+from lobpcg import *
+
+from numpy.testing import NumpyTest
+test = NumpyTest().test

Added: trunk/Lib/sandbox/lobpcg/info.py
===================================================================
--- trunk/Lib/sandbox/lobpcg/info.py	2007-05-22 09:19:14 UTC (rev 3028)
+++ trunk/Lib/sandbox/lobpcg/info.py	2007-05-22 09:56:10 UTC (rev 3029)
@@ -0,0 +1,16 @@
+"""
+The algorithm of LOBPCG is described in detail in:
+
+A. V. Knyazev, Toward the Optimal Preconditioned Eigensolver: Locally Optimal Block Preconditioned Conjugate Gradient Method. SIAM Journal on Scientific Computing 23 (2001), no. 2, pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124
+
+A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov, Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX) in hypre and PETSc  (2007). http://arxiv.org/abs/0705.2626
+
+
+Depends upon symeig (http://mdp-toolkit.sourceforge.net/symeig.html) for the
+moment, as the symmetric eigenvalue solvers were not available in scipy.
+
+Usage: XXXXX
+
+
+"""
+postpone_import = 1

Added: trunk/Lib/sandbox/lobpcg/lobpcg.py
===================================================================
--- trunk/Lib/sandbox/lobpcg/lobpcg.py	2007-05-22 09:19:14 UTC (rev 3028)
+++ trunk/Lib/sandbox/lobpcg/lobpcg.py	2007-05-22 09:56:10 UTC (rev 3029)
@@ -0,0 +1,480 @@
+"""
+Pure SciPy implementation of Locally Optimal Block Preconditioned Conjugate
+Gradient Method (LOBPCG), see
+http://www-math.cudenver.edu/~aknyazev/software/BLOPEX/
+
+License: BSD
+
+(c) Robert Cimrman, Andrew Knyazev
+"""
+
+import numpy as nm
+import scipy as sc
+import scipy.sparse as sp
+import scipy.linalg as la
+import scipy.io as io
+import types
+from symeig import symeig
+
+def pause():
+    raw_input()
+
+def save( ar, fileName ):
+    io.write_array( fileName, ar, precision = 8 )
+
+##
+# 21.05.2007, c
+def as2d( ar ):
+    if ar.ndim == 2:
+        return ar
+    else: # Assume 1!
+        aux = nm.array( ar, copy = False )
+        aux.shape = (ar.shape[0], 1)
+        return aux
+
+##
+# 05.04.2007, c
+# 10.04.2007
+def makeOperator( operatorInput, expectedShape ):
+    class Operator( object ):
+        def __call__( self, vec ):
+            return self.call( vec )
+    
+    operator = Operator()
+    operator.obj = operatorInput
+    
+    if hasattr( operatorInput, 'shape' ):
+        operator.shape = operatorInput.shape
+        operator.dtype = operatorInput.dtype
+        if operator.shape != expectedShape:
+            raise ValueError, 'bad operator shape %s != %s' \
+                  % (expectedShape, operator.shape)
+        if sp.issparse( operatorInput ):
+            def call( vec ):
+                out = operator.obj * vec
+                if sp.issparse( out ):
+                    out = out.toarray()
+                return as2d( out )
+        else:
+            def call( vec ):
+                return as2d( nm.asarray( sc.dot( operator.obj, vec ) ) )
+        operator.call = call
+
+    elif isinstance( operatorInput, types.FunctionType ) or \
+         isinstance( operatorInput, types.BuiltinFunctionType ):
+        operator.shape = expectedShape
+        operator.dtype = nm.float64
+        operator.call = operatorInput
+
+    return operator
+
+##
+# 05.04.2007, c
+def applyConstraints( blockVectorV, factYBY, blockVectorBY, blockVectorY ):
+    """Changes blockVectorV in place."""
+    gramYBV = sc.dot( blockVectorBY.T, blockVectorV )
+    tmp = la.cho_solve( factYBY, gramYBV )
+    blockVectorV -= sc.dot( blockVectorY, tmp )
+
+##
+# 05.04.2007, c
+def b_orthonormalize( operatorB, blockVectorV,
+                      blockVectorBV = None, retInvR = False ):
+
+    if blockVectorBV is None:
+        if operatorB is not None:
+            blockVectorBV = operatorB( blockVectorV )
+        else:
+            blockVectorBV = blockVectorV # Shared data!!!
+    gramVBV = sc.dot( blockVectorV.T, blockVectorBV )
+    gramVBV = la.cholesky( gramVBV )
+    la.inv( gramVBV, overwrite_a = True )
+    # gramVBV is now R^{-1}.
+    blockVectorV = sc.dot( blockVectorV, gramVBV )
+    if operatorB is not None:
+        blockVectorBV = sc.dot( blockVectorBV, gramVBV )
+
+    if retInvR:
+        return blockVectorV, blockVectorBV, gramVBV
+    else:
+        return blockVectorV, blockVectorBV
+
+##
+# 04.04.2007, c
+# 05.04.2007
+# 06.04.2007
+# 10.04.2007
+def lobpcg( blockVectorX, operatorA,
+            operatorB = None, operatorT = None, blockVectorY = None,
+            residualTolerance = None, maxIterations = 20,
+            largest = True, verbosityLevel = 0,
+            retLambdaHistory = False, retResidualNormsHistory = False ):
+
+    exitFlag = 0
+
+    if blockVectorY is not None:
+        sizeY = blockVectorY.shape[1]
+    else:
+        sizeY = 0
+
+    # Block size.
+    n, sizeX = blockVectorX.shape
+    if sizeX > n:
+        raise ValueError,\
+              'the first input argument blockVectorX must be tall, not fat' +\
+              ' (%d, %d)' % blockVectorX.shape
+
+    if n < 1:
+        raise ValueError,\
+              'the matrix size is wrong (%d)' % n
+        
+    operatorA = makeOperator( operatorA, (n, n) )
+
+    if operatorB is not None:
+        operatorB = makeOperator( operatorB, (n, n) )
+
+    if operatorT is not None:
+        operatorT = makeOperator( operatorT, (n, n) )
+##     if n != operatorA.shape[0]:
+##         aux = 'The size (%d, %d) of operatorA is not the same as\n'+\
+##               '%d - the number of rows of blockVectorX' % operatorA.shape + (n,)
+##         raise ValueError, aux
+
+##     if operatorA.shape[0] != operatorA.shape[1]:
+##         raise ValueError, 'operatorA must be a square matrix (%d, %d)' %\
+##               operatorA.shape
+
+    if residualTolerance is None:
+        residualTolerance = sqrt( 1e-15 ) * n
+
+    maxIterations = min( n, maxIterations )
+
+    if verbosityLevel:
+        aux = "Solving "
+        if operatorB is None:
+            aux += "standard"
+        else:
+            aux += "generalized"
+        aux += " eigenvalue problem with"
+        if operatorT is None:
+            aux += "out"
+        aux += " preconditioning\n\n"
+        aux += "matrix size %d\n" % n
+        aux += "block size %d\n\n" % sizeX
+        if blockVectorY is None:
+            aux += "No constraints\n\n"
+        else:
+            if sizeY > 1:
+                aux += "%d constraints\n\n" % sizeY
+            else:
+                aux += "%d constraint\n\n" % sizeY
+        print aux
+
+    ##
+    # Apply constraints to X.
+    if blockVectorY is not None:
+
+        if operatorB is not None:
+            blockVectorBY = operatorB( blockVectorY )
+        else:
+            blockVectorBY = blockVectorY
+    
+        # gramYBY is a dense array.
+        gramYBY = sc.dot( blockVectorY.T, blockVectorBY )
+        try:
+            # gramYBY is a Cholesky factor from now on...
+            gramYBY = la.cho_factor( gramYBY )
+        except:
+            print 'cannot handle linear dependent constraints'
+            raise
+
+        applyConstraints( blockVectorX, gramYBY, blockVectorBY, blockVectorY )
+
+    ##
+    # B-orthonormalize X.
+    blockVectorX, blockVectorBX = b_orthonormalize( operatorB, blockVectorX )
+
+    ##
+    # Compute the initial Ritz vectors: solve the eigenproblem.
+    blockVectorAX = operatorA( blockVectorX )
+    gramXAX = sc.dot( blockVectorX.T, blockVectorAX )
+    # gramXBX is X^T * X.
+    gramXBX = sc.dot( blockVectorX.T, blockVectorX )
+    _lambda, eigBlockVector = symeig( gramXAX )
+    ii = nm.argsort( _lambda )[:sizeX]
+    if largest:
+        ii = ii[::-1]
+    _lambda = _lambda[ii]
+    eigBlockVector = nm.asarray( eigBlockVector[:,ii] )
+#    pause()
+    blockVectorX = sc.dot( blockVectorX, eigBlockVector )
+    blockVectorAX = sc.dot( blockVectorAX, eigBlockVector )
+    if operatorB is not None:
+        blockVectorBX = sc.dot( blockVectorBX, eigBlockVector )
+    
+    ##
+    # Active index set.
+    activeMask = nm.ones( (sizeX,), dtype = nm.bool )
+
+    lambdaHistory = [_lambda]
+    residualNormsHistory = []
+
+    previousBlockSize = sizeX
+    ident = nm.eye( sizeX, dtype = operatorA.dtype )
+    ident0 = nm.eye( sizeX, dtype = operatorA.dtype )
+    
+    ##
+    # Main iteration loop.
+    for iterationNumber in xrange( maxIterations ):
+        if verbosityLevel > 0:
+            print 'iteration %d' %  iterationNumber
+
+        aux = blockVectorBX * _lambda[nm.newaxis,:]
+        blockVectorR = blockVectorAX - aux
+
+        aux = nm.sum( blockVectorR.conjugate() * blockVectorR, 0 )
+        residualNorms = nm.sqrt( aux )
+
+        
+##         if iterationNumber == 2:
+##             print blockVectorAX
+##             print blockVectorBX
+##             print blockVectorR
+##             pause()
+
+        residualNormsHistory.append( residualNorms )
+
+        ii = nm.where( residualNorms > residualTolerance, True, False )
+        activeMask = activeMask & ii
+        if verbosityLevel > 2:
+            print activeMask
+
+        currentBlockSize = activeMask.sum()
+        if currentBlockSize != previousBlockSize:
+            previousBlockSize = currentBlockSize
+            ident = nm.eye( currentBlockSize, dtype = operatorA.dtype )
+
+
+        if currentBlockSize == 0:
+            failureFlag = 0 # All eigenpairs converged.
+            break
+
+        if verbosityLevel > 0:
+            print 'current block size:', currentBlockSize
+            print 'eigenvalue:', _lambda
+            print 'residual norms:', residualNorms
+        if verbosityLevel > 10:
+            print eigBlockVector
+
+        activeBlockVectorR = as2d( blockVectorR[:,activeMask] )
+        
+        if iterationNumber > 0:
+            activeBlockVectorP = as2d( blockVectorP[:,activeMask] )
+            activeBlockVectorAP = as2d( blockVectorAP[:,activeMask] )
+            activeBlockVectorBP = as2d( blockVectorBP[:,activeMask] )
+
+#        print activeBlockVectorR
+        if operatorT is not None:
+            ##
+            # Apply preconditioner T to the active residuals.
+            activeBlockVectorR = operatorT( activeBlockVectorR )
+
+#        assert nm.all( blockVectorR == activeBlockVectorR )
+
+        ##
+        # Apply constraints to the preconditioned residuals.
+        applyConstraints( activeBlockVectorR,
+                          gramYBY, blockVectorBY, blockVectorY )
+
+#        assert nm.all( blockVectorR == activeBlockVectorR )
+
+        ##
+        # B-orthonormalize the preconditioned residuals.
+#        print activeBlockVectorR
+
+        aux = b_orthonormalize( operatorB, activeBlockVectorR )
+        activeBlockVectorR, activeBlockVectorBR = aux
+#        print activeBlockVectorR
+
+        activeBlockVectorAR = operatorA( activeBlockVectorR )
+
+        if iterationNumber > 0:
+            aux = b_orthonormalize( operatorB, activeBlockVectorP,
+                                    activeBlockVectorBP, retInvR = True )
+            activeBlockVectorP, activeBlockVectorBP, invR = aux
+            activeBlockVectorAP = sc.dot( activeBlockVectorAP, invR )
+
+        ##
+        # Perform the Rayleigh Ritz Procedure:
+        # Compute symmetric Gram matrices:
+
+        xaw = sc.dot( blockVectorX.T, activeBlockVectorAR )
+        waw = sc.dot( activeBlockVectorR.T, activeBlockVectorAR )
+        xbw = sc.dot( blockVectorX.T, activeBlockVectorBR )
+        
+        if iterationNumber > 0:
+            xap = sc.dot( blockVectorX.T, activeBlockVectorAP )
+            wap = sc.dot( activeBlockVectorR.T, activeBlockVectorAP )
+            pap = sc.dot( activeBlockVectorP.T, activeBlockVectorAP )
+            xbp = sc.dot( blockVectorX.T, activeBlockVectorBP )
+            wbp = sc.dot( activeBlockVectorR.T, activeBlockVectorBP )
+            
+            gramA = nm.bmat( [[nm.diag( _lambda ), xaw, xap],
+                              [xaw.T, waw, wap],
+                              [xap.T, wap.T, pap]] )
+            try:
+                gramB = nm.bmat( [[ident0, xbw, xbp],
+                                  [xbw.T, ident, wbp],
+                                  [xbp.T, wbp.T, ident]] )
+            except:
+                print ident
+                print xbw
+                raise
+        else:
+            gramA = nm.bmat( [[nm.diag( _lambda ), xaw],
+                              [xaw.T, waw]] )
+            gramB = nm.bmat( [[ident0, xbw],
+                              [xbw.T, ident0]] )
+        try:
+            assert nm.allclose( gramA.T, gramA )
+        except:
+            print gramA.T - gramA
+            raise
+
+        try:
+            assert nm.allclose( gramB.T, gramB )
+        except:
+            print gramB.T - gramB
+            raise
+
+##         print nm.diag( _lambda )
+##         print xaw
+##         print waw
+##         print xbw
+##         try:
+##             print xap
+##             print wap
+##             print pap
+##             print xbp
+##             print wbp
+##         except:
+##             pass
+##         pause()
+
+        if verbosityLevel > 10:
+            save( gramA, 'gramA' )
+            save( gramB, 'gramB' )
+        ##
+        # Solve the generalized eigenvalue problem.
+#        _lambda, eigBlockVector = la.eig( gramA, gramB )
+        _lambda, eigBlockVector = symeig( gramA, gramB )
+        ii = nm.argsort( _lambda )[:sizeX]
+        if largest:
+            ii = ii[::-1]
+        if verbosityLevel > 10:
+            print ii
+        
+        _lambda = _lambda[ii].astype( nm.float64 )
+        eigBlockVector = nm.asarray( eigBlockVector[:,ii].astype( nm.float64 ) )
+        if verbosityLevel > 10:
+            print 'lambda:', _lambda
+##         # Normalize eigenvectors!
+##         aux = nm.sum( eigBlockVector.conjugate() * eigBlockVector, 0 )
+##         eigVecNorms = nm.sqrt( aux )
+##         eigBlockVector = eigBlockVector / eigVecNorms[nm.newaxis,:]
+#        eigBlockVector, aux = b_orthonormalize( operatorB, eigBlockVector )
+
+        if verbosityLevel > 10:
+            print eigBlockVector
+            pause()
+        ##
+        # Compute Ritz vectors.
+        if iterationNumber > 0:
+            eigBlockVectorX = eigBlockVector[:sizeX]
+            eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]
+            eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]
+
+            pp = sc.dot( activeBlockVectorR, eigBlockVectorR )\
+                 + sc.dot( activeBlockVectorP, eigBlockVectorP )
+
+            app = sc.dot( activeBlockVectorAR, eigBlockVectorR )\
+                  + sc.dot( activeBlockVectorAP, eigBlockVectorP )
+
+            bpp = sc.dot( activeBlockVectorBR, eigBlockVectorR )\
+                  + sc.dot( activeBlockVectorBP, eigBlockVectorP )
+        else:
+            eigBlockVectorX = eigBlockVector[:sizeX]
+            eigBlockVectorR = eigBlockVector[sizeX:]
+
+            pp = sc.dot( activeBlockVectorR, eigBlockVectorR )
+
+            app = sc.dot( activeBlockVectorAR, eigBlockVectorR )
+
+            bpp = sc.dot( activeBlockVectorBR, eigBlockVectorR )
+
+        if verbosityLevel > 10:
+            print pp
+            print app
+            print bpp
+            pause()
+#        print pp.shape, app.shape, bpp.shape
+
+        blockVectorX = sc.dot( blockVectorX, eigBlockVectorX ) + pp
+        blockVectorAX = sc.dot( blockVectorAX, eigBlockVectorX ) + app
+        blockVectorBX = sc.dot( blockVectorBX, eigBlockVectorX ) + bpp
+
+        blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
+
+    aux = blockVectorBX * _lambda[nm.newaxis,:]
+    blockVectorR = blockVectorAX - aux
+
+    aux = nm.sum( blockVectorR.conjugate() * blockVectorR, 0 )
+    residualNorms = nm.sqrt( aux )
+
+
+    if verbosityLevel > 0:
+        print 'final eigenvalue:', _lambda
+        print 'final residual norms:', residualNorms
+
+
+    return _lambda, eigBlockVectorX
+
+###########################################################################
+if __name__ == '__main__':
+    from scipy.sparse import spdiags, speye
+    import time
+
+##     def operatorB( vec ):
+##         return vec
+
+    n = 100
+    vals = [nm.arange( n, dtype = nm.float64 ) + 1]
+    operatorA = spdiags( vals, 0, n, n )
+    operatorB = speye( n, n )
+#    operatorB[0,0] = 0
+    operatorB = nm.eye( n, n )
+    Y = nm.eye( n, 3 )
+
+
+##    X = sc.rand( n, 3 )
+    xfile = {100 : 'X.txt', 1000 : 'X2.txt', 10000 : 'X3.txt'}
+    X = nm.fromfile( xfile[n], dtype = nm.float64, sep = ' ' )
+    X.shape = (n, 3)
+
+    ivals = [1./vals[0]]
+    def precond( x ):
+        invA = spdiags( ivals, 0, n, n )
+        y = invA  * x
+        if sp.issparse( y ):
+            y = y.toarray()
+
+        return as2d( y )
+
+    tt = time.clock()
+    eigs, vecs = lobpcg( X, operatorA, operatorB, blockVectorY = Y,
+                         operatorT = precond,
+                         residualTolerance = 1e-4, maxIterations = 40,
+                         largest = False, verbosityLevel = 1 )
+    print 'solution time:', time.clock() - tt
+    print eigs
+    

Added: trunk/Lib/sandbox/lobpcg/setup.py
===================================================================
--- trunk/Lib/sandbox/lobpcg/setup.py	2007-05-22 09:19:14 UTC (rev 3028)
+++ trunk/Lib/sandbox/lobpcg/setup.py	2007-05-22 09:56:10 UTC (rev 3029)
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+from os.path import join
+import sys
+
+def configuration(parent_package='',top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    from numpy.distutils.system_info import get_info
+
+    config = Configuration('lobpcg',parent_package,top_path)
+#    config.add_data_dir('tests')
+
+    return config
+
+if __name__ == '__main__':
+    from numpy.distutils.core import setup
+    setup(**configuration(top_path='').todict())


Property changes on: trunk/Lib/sandbox/lobpcg/setup.py
___________________________________________________________________
Name: svn:executable
   + *




More information about the Scipy-svn mailing list