[Scipy-svn] r3837 - in trunk/scipy/sandbox/multigrid: . examples

scipy-svn at scipy.org scipy-svn at scipy.org
Tue Jan 15 08:54:57 EST 2008


Author: wnbell
Date: 2008-01-15 07:54:53 -0600 (Tue, 15 Jan 2008)
New Revision: 3837

Added:
   trunk/scipy/sandbox/multigrid/examples/
   trunk/scipy/sandbox/multigrid/examples/adaptive.py
   trunk/scipy/sandbox/multigrid/examples/dec_example.py
   trunk/scipy/sandbox/multigrid/examples/simple_example.py
Modified:
   trunk/scipy/sandbox/multigrid/adaptive.py
Log:
updating adaptive SA code to use BSR


Modified: trunk/scipy/sandbox/multigrid/adaptive.py
===================================================================
--- trunk/scipy/sandbox/multigrid/adaptive.py	2008-01-15 13:09:03 UTC (rev 3836)
+++ trunk/scipy/sandbox/multigrid/adaptive.py	2008-01-15 13:54:53 UTC (rev 3837)
@@ -1,4 +1,5 @@
 import numpy,scipy,scipy.sparse
+
 from numpy import sqrt, ravel, diff, zeros, zeros_like, inner, concatenate, \
                   asarray, hstack, ascontiguousarray, isinf, dot
 from numpy.random import randn
@@ -11,89 +12,8 @@
 
 
 
-def augment_candidates(AggOp, old_Q, old_R, new_candidate):
-    #TODO update P and A also
 
-    K = old_R.shape[1]
 
-    #determine blocksizes
-    if new_candidate.shape[0] == old_Q.shape[0]:
-        #then this is the first prolongator
-        old_bs = (1,K)
-        new_bs = (1,K+1)
-    else:
-        old_bs = (K,K)
-        new_bs = (K+1,K+1)
-
-    AggOp = expand_into_blocks(AggOp,new_bs[0],1).tocsr() #TODO switch to block matrix
-
-
-    # tentative prolongator
-    #TODO USE BSR
-    Q_indptr  = (K+1)*AggOp.indptr
-    Q_indices = ((K+1)*AggOp.indices).repeat(K+1)
-    for i in range(K+1):
-        Q_indices[i::K+1] += i
-    Q_data = zeros((AggOp.indptr[-1]/new_bs[0],) + new_bs)
-    Q_data[:,:old_bs[0],:old_bs[1]] = old_Q.data.reshape((-1,) + old_bs)   #TODO BSR change
-
-    # coarse candidates
-    R = zeros((AggOp.shape[1],K+1,K+1))
-    R[:,:K,:K] = old_R.reshape(-1,K,K)
-
-    c = new_candidate.reshape(-1)[diff(AggOp.indptr) == 1]  #eliminate DOFs that aggregation misses
-    threshold = 1e-10 * abs(c).max()   # cutoff for small basis functions
-
-    X = csr_matrix((c,AggOp.indices,AggOp.indptr),shape=AggOp.shape)
-
-    #orthogonalize X against previous
-    for i in range(K):
-        old_c = ascontiguousarray(Q_data[:,:,i].reshape(-1))
-        D_AtX = csr_matrix((old_c*X.data,X.indices,X.indptr),shape=X.shape).sum(axis=0).A.flatten() #same as diagonal of A.T * X
-        R[:,i,K] = D_AtX
-        X.data -= D_AtX[X.indices] * old_c
-
-    #normalize X
-    D_XtX = csr_matrix((X.data**2,X.indices,X.indptr),shape=X.shape).sum(axis=0).A.flatten() #same as diagonal of X.T * X
-    col_norms = sqrt(D_XtX)
-    mask = col_norms < threshold  # find small basis functions
-    col_norms[mask] = 0           # and set them to zero
-
-    R[:,K,K] = col_norms      # store diagonal entry into R
-
-    col_norms = 1.0/col_norms
-    col_norms[mask] = 0
-    X.data *= col_norms[X.indices]
-    Q_data[:,:,-1] = X.data.reshape(-1,new_bs[0])
-
-    Q_data = Q_data.reshape(-1)  #TODO BSR change
-    R = R.reshape(-1,K+1)
-
-    Q = csr_matrix((Q_data,Q_indices,Q_indptr),shape=(AggOp.shape[0],(K+1)*AggOp.shape[1]))
-
-    return Q,R
-
-
-
-
-
-
-def smoothed_prolongator(P,A):
-    #just use Richardson for now
-    #omega = 4.0/(3.0*approximate_spectral_radius(A))
-    #return P - omega*(A*P)
-    #return P  #TEST
-
-    D = diag_sparse(A)
-    D_inv_A = diag_sparse(1.0/D)*A
-    omega = 4.0/(3.0*approximate_spectral_radius(D_inv_A))
-    print "spectral radius",approximate_spectral_radius(D_inv_A) #TODO remove this
-    D_inv_A *= omega
-
-    return P - D_inv_A*P
-
-
-
 def sa_hierarchy(A,B,AggOps):
     """
     Construct multilevel hierarchy using Smoothed Aggregation
@@ -114,7 +34,7 @@
 
     for AggOp in AggOps:
         P,B = sa_fit_candidates(AggOp,B)
-        I   = smoothed_prolongator(P,A)
+        I   = sa_smoothed_prolongator(P,A)
         A   = I.T.tocsr() * A * I
         As.append(A)
         Ts.append(P)
@@ -148,13 +68,13 @@
             x = self.__develop_new_candidate(As,Ps,Ts,Bs,AggOps,mu=mu)
 
             #TODO which is faster?
-            As,Ps,Ts,Bs = self.__augment_cycle(As,Ps,Ts,Bs,AggOps,x)
+            #As,Ps,Ts,Bs = self.__augment_cycle(As,Ps,Ts,Bs,AggOps,x)
 
-            #B = hstack((Bs[0],x))
-            #As,Ps,Ts,Bs = sa_hierarchy(A,B,AggOps)
+            B = hstack((Bs[0],x))
+            As,Ps,Ts,Bs = sa_hierarchy(A,B,AggOps)
 
         #improve candidates?
-        if True:
+        if False:
             print "improving candidates"
             B = Bs[0]
             for i in range(max_candidates):
@@ -199,7 +119,7 @@
             else:
                 W_l   = aggregation[len(AggOps)]
             P_l,x = sa_fit_candidates(W_l,x)                             #step 4c
-            I_l   = smoothed_prolongator(P_l,A_l)                          #step 4d
+            I_l   = sa_smoothed_prolongator(P_l,A_l)                          #step 4d
             A_l   = I_l.T.tocsr() * A_l * I_l                              #step 4e
 
             blocks = None #not needed on subsequent levels
@@ -244,24 +164,25 @@
         temp_Ps = []
         temp_As = [A]
 
-        def make_bridge(P,K):
-            indptr = P.indptr[:-1].reshape(-1,K-1)
-            indptr = hstack((indptr,indptr[:,-1].reshape(-1,1)))
-            indptr = indptr.reshape(-1)
-            indptr = hstack((indptr,indptr[-1:])) #duplicate last element
-            return csr_matrix((P.data,P.indices,indptr),shape=(K*P.shape[0]/(K-1),P.shape[1]))
+        def make_bridge(P):
+            M,N  = P.shape
+            K    = P.blocksize[0]
+            bnnz = P.indptr[-1]
+            data = zeros( (bnnz, K+1, K), dtype=P.dtype )
+            data[:,:-1,:-1] = P.data
+            return bsr_matrix( (data, P.indices, P.indptr), shape=( (K+1)*(M/K), N) )
 
         for i in range(len(As) - 2):
-            T,R = augment_candidates(AggOps[i], Ts[i], Bs[i+1], x)
+            B = zeros( (x.shape[0], Bs[i+1].shape[1] + 1), dtype=x.dtype)
+            T,R = sa_fit_candidates(AggOp,B)
 
-            P = smoothed_prolongator(T,A)
+            P = sa_smoothed_prolongator(T,A)
             A = P.T.tocsr() * A * P
 
             temp_Ps.append(P)
             temp_As.append(A)
 
-            #TODO USE BSR (K,K) -> (K,K-1)
-            bridge = make_bridge(Ps[i+1],R.shape[1])
+            bridge = make_bridge(Ps[i+1])
 
             solver = multilevel_solver( [A] + As[i+2:], [bridge] + Ps[i+2:] )
 
@@ -274,111 +195,87 @@
 
         return x
 
-    def __augment_cycle(self,As,Ps,Ts,Bs,AggOps,x):
-        A = As[0]
+#    def __augment_cycle(self,As,Ps,Ts,Bs,AggOps,x):
+#        A = As[0]
+#
+#        new_As = [A]
+#        new_Ps = []
+#        new_Ts = []
+#        new_Bs = [ hstack((Bs[0],x)) ]
+#
+#        for i in range(len(As) - 1):
+#            T,R = augment_candidates(AggOps[i], Ts[i], Bs[i+1], x)
+#
+#            P = sa_smoothed_prolongator(T,A)
+#            A = P.T.tocsr() * A * P
+#
+#            new_As.append(A)
+#            new_Ps.append(P)
+#            new_Ts.append(T)
+#            new_Bs.append(R)
+#
+#            x = R[:,-1].reshape(-1,1)
+#
+#        return new_As,new_Ps,new_Ts,new_Bs
 
-        new_As = [A]
-        new_Ps = []
-        new_Ts = []
-        new_Bs = [ hstack((Bs[0],x)) ]
 
-        for i in range(len(As) - 1):
-            T,R = augment_candidates(AggOps[i], Ts[i], Bs[i+1], x)
 
-            P = smoothed_prolongator(T,A)
-            A = P.T.tocsr() * A * P
 
-            new_As.append(A)
-            new_Ps.append(P)
-            new_Ts.append(T)
-            new_Bs.append(R)
 
-            x = R[:,-1].reshape(-1,1)
+#def augment_candidates(AggOp, old_Q, old_R, new_candidate):
+#    #TODO update P and A also
+#
+#    K = old_R.shape[1]
+#
+#    #determine blocksizes
+#    if new_candidate.shape[0] == old_Q.shape[0]:
+#        #then this is the first prolongator
+#        old_bs = (1,K)
+#        new_bs = (1,K+1)
+#    else:
+#        old_bs = (K,K)
+#        new_bs = (K+1,K+1)
+#
+#    #AggOp = expand_into_blocks(AggOp,new_bs[0],1).tocsr() #TODO switch to block matrix
+#
+#    # tentative prolongator
+#    Q_data = zeros( (AggOp.indptr[-1],) + new_bs)
+#    Q_data[:,:old_bs[0],:old_bs[1]] = old_Q.data.reshape((-1,) + old_bs)
+#
+#    # coarse candidates
+#    R = zeros((AggOp.shape[1],K+1,K+1))
+#    R[:,:K,:K] = old_R.reshape(-1,K,K)
+#
+#    c = new_candidate.reshape(-1)[diff(AggOp.indptr) == 1]  #eliminate DOFs that aggregation misses
+#    threshold = 1e-10 * abs(c).max()   # cutoff for small basis functions
+#
+#    X = bsr_matrix((c,AggOp.indices,AggOp.indptr),shape=AggOp.shape)
+#
+#    #orthogonalize X against previous
+#    for i in range(K):
+#        old_c = ascontiguousarray(Q_data[:,:,i].reshape(-1))
+#        D_AtX = csr_matrix((old_c*X.data,X.indices,X.indptr),shape=X.shape).sum(axis=0).A.flatten() #same as diagonal of A.T * X
+#        R[:,i,K] = D_AtX
+#        X.data -= D_AtX[X.indices] * old_c
+#
+#    #normalize X
+#    D_XtX = csr_matrix((X.data**2,X.indices,X.indptr),shape=X.shape).sum(axis=0).A.flatten() #same as diagonal of X.T * X
+#    col_norms = sqrt(D_XtX)
+#    mask = col_norms < threshold  # find small basis functions
+#    col_norms[mask] = 0           # and set them to zero
+#
+#    R[:,K,K] = col_norms      # store diagonal entry into R
+#
+#    col_norms = 1.0/col_norms
+#    col_norms[mask] = 0
+#    X.data *= col_norms[X.indices]
+#    Q_data[:,:,-1] = X.data.reshape(-1,new_bs[0])
+#
+#    Q_data = Q_data.reshape(-1)  #TODO BSR change
+#    R = R.reshape(-1,K+1)
+#
+#    Q = csr_matrix((Q_data,Q_indices,Q_indptr),shape=(AggOp.shape[0],(K+1)*AggOp.shape[1]))
+#
+#    return Q,R
 
-        return new_As,new_Ps,new_Ts,new_Bs
 
-
-if __name__ == '__main__':
-    from scipy import *
-    from utils import diag_sparse
-    from multilevel import poisson_problem1D,poisson_problem2D
-
-    blocks = None
-    aggregation = None
-
-    #A = poisson_problem2D(200,1e-2)
-    #aggregation = [ sa_constant_interpolation(A*A*A,epsilon=0.0) ]
-
-    #A = io.mmread("tests/sample_data/laplacian_41_3dcube.mtx").tocsr()
-    #A = io.mmread("laplacian_40_3dcube.mtx").tocsr()
-    #A = io.mmread("/home/nathan/Desktop/9pt/9pt-100x100.mtx").tocsr()
-    #A = io.mmread("/home/nathan/Desktop/BasisShift_W_EnergyMin_Luke/9pt-5x5.mtx").tocsr()
-
-
-    #D = diag_sparse(1.0/sqrt(10**(12*rand(A.shape[0])-6))).tocsr()
-    #A = D * A * D
-
-    A = io.mmread("tests/sample_data/elas30_A.mtx").tocsr()
-    blocks = arange(A.shape[0]/2).repeat(2)
-
-    from time import clock; start = clock()
-    asa = adaptive_sa_solver(A,max_candidates=3,mu=5,blocks=blocks,aggregation=aggregation)
-    print "Adaptive Solver Construction: %s seconds" % (clock() - start); del start
-
-    scipy.random.seed(0)  #make tests repeatable
-    x = randn(A.shape[0])
-    b = A*randn(A.shape[0])
-    #b = zeros(A.shape[0])
-
-
-    print "solving"
-    if False:
-        x_sol,residuals = asa.solver.solve(b,x0=x,maxiter=20,tol=1e-12,return_residuals=True)
-    else:
-        residuals = []
-        def add_resid(x):
-            residuals.append(linalg.norm(b - A*x))
-        A.psolve = asa.solver.psolve
-        x_sol = linalg.cg(A,b,x0=x,maxiter=30,tol=1e-12,callback=add_resid)[0]
-
-    residuals = array(residuals)/residuals[0]
-
-    print "residuals ",residuals
-    print "mean convergence factor",(residuals[-1]/residuals[0])**(1.0/len(residuals))
-    print "last convergence factor",residuals[-1]/residuals[-2]
-
-    print
-    print asa.solver
-
-    print "constant Rayleigh quotient",dot(ones(A.shape[0]),A*ones(A.shape[0]))/float(A.shape[0])
-
-    def plot2d_arrows(x):
-        from pylab import figure,quiver,show
-        x = x.reshape(-1)
-        N = (len(x)/2)**0.5
-        assert(2 * N * N == len(x))
-        X = linspace(-1,1,N).reshape(1,N).repeat(N,0).reshape(-1)
-        Y = linspace(-1,1,N).reshape(1,N).repeat(N,0).T.reshape(-1)
-
-        dX = x[0::2]
-        dY = x[1::2]
-
-        figure()
-        quiver(X,Y,dX,dY)
-        show()
-
-    def plot2d(x):
-        from pylab import pcolor,figure,show
-        figure()
-        pcolor(x.reshape(sqrt(len(x)),sqrt(len(x))))
-        show()
-
-
-    for c in asa.Bs[0].T:
-        #plot2d(c)
-        plot2d_arrows(c)
-        print "candidate Rayleigh quotient",dot(c,A*c)/dot(c,c)
-
-
-    ##W = asa.AggOps[0]*asa.AggOps[1]
-    ##pcolor((W * rand(W.shape[1])).reshape((200,200)))

Added: trunk/scipy/sandbox/multigrid/examples/adaptive.py
===================================================================
--- trunk/scipy/sandbox/multigrid/examples/adaptive.py	2008-01-15 13:09:03 UTC (rev 3836)
+++ trunk/scipy/sandbox/multigrid/examples/adaptive.py	2008-01-15 13:54:53 UTC (rev 3837)
@@ -0,0 +1,84 @@
+
+from scipy import *
+from scipy.sandbox.multigrid.utils import diag_sparse
+from scipy.sandbox.multigrid.gallery import poisson, linear_elasticity
+
+
+#A = poisson( (200,200), spacing=(1,1e-2)
+#aggregation = [ sa_constant_interpolation(A*A*A,epsilon=0.0) ]
+
+#A = io.mmread("tests/sample_data/laplacian_41_3dcube.mtx").tocsr()
+#A = io.mmread("laplacian_40_3dcube.mtx").tocsr()
+#A = io.mmread("/home/nathan/Desktop/9pt/9pt-100x100.mtx").tocsr()
+#A = io.mmread("/home/nathan/Desktop/BasisShift_W_EnergyMin_Luke/9pt-5x5.mtx").tocsr()
+
+
+#D = diag_sparse(1.0/sqrt(10**(12*rand(A.shape[0])-6))).tocsr()
+#A = D * A * D
+
+#A = io.mmread("tests/sample_data/elas30_A.mtx").tocsr()
+
+A,B = linear_elasticity( (100,100) )
+
+from time import clock; start = clock()
+asa = adaptive_sa_solver(A,max_candidates=3,mu=5,blocks=blocks,aggregation=aggregation)
+print "Adaptive Solver Construction: %s seconds" % (clock() - start); del start
+
+scipy.random.seed(0)  #make tests repeatable
+x = randn(A.shape[0])
+b = A*randn(A.shape[0])
+#b = zeros(A.shape[0])
+
+
+print "solving"
+if False:
+    x_sol,residuals = asa.solver.solve(b,x0=x,maxiter=20,tol=1e-12,return_residuals=True)
+else:
+    residuals = []
+    def add_resid(x):
+        residuals.append(linalg.norm(b - A*x))
+    A.psolve = asa.solver.psolve
+    x_sol = linalg.cg(A,b,x0=x,maxiter=30,tol=1e-12,callback=add_resid)[0]
+
+residuals = array(residuals)/residuals[0]
+
+print "residuals ",residuals
+print "mean convergence factor",(residuals[-1]/residuals[0])**(1.0/len(residuals))
+print "last convergence factor",residuals[-1]/residuals[-2]
+
+print
+print asa.solver
+
+print "constant Rayleigh quotient",dot(ones(A.shape[0]),A*ones(A.shape[0]))/float(A.shape[0])
+
+def plot2d_arrows(x):
+    from pylab import figure,quiver,show
+    x = x.reshape(-1)
+    N = (len(x)/2)**0.5
+    assert(2 * N * N == len(x))
+    X = linspace(-1,1,N).reshape(1,N).repeat(N,0).reshape(-1)
+    Y = linspace(-1,1,N).reshape(1,N).repeat(N,0).T.reshape(-1)
+
+    dX = x[0::2]
+    dY = x[1::2]
+
+    figure()
+    quiver(X,Y,dX,dY)
+    show()
+
+def plot2d(x):
+    from pylab import pcolor,figure,show
+    figure()
+    pcolor(x.reshape(sqrt(len(x)),sqrt(len(x))))
+    show()
+
+
+for c in asa.Bs[0].T:
+    #plot2d(c)
+    plot2d_arrows(c)
+    print "candidate Rayleigh quotient",dot(c,A*c)/dot(c,c)
+
+
+##W = asa.AggOps[0]*asa.AggOps[1]
+##pcolor((W * rand(W.shape[1])).reshape((200,200)))
+

Added: trunk/scipy/sandbox/multigrid/examples/dec_example.py
===================================================================
--- trunk/scipy/sandbox/multigrid/examples/dec_example.py	2008-01-15 13:09:03 UTC (rev 3836)
+++ trunk/scipy/sandbox/multigrid/examples/dec_example.py	2008-01-15 13:54:53 UTC (rev 3837)
@@ -0,0 +1,241 @@
+
+from scipy import *
+from scipy.sparse import *
+from pydec import *
+from pydec.multigrid.discrete_laplacian import boundary_hierarchy, discrete_laplacian_solver, hodge_solver
+
+from scipy.sandbox.multigrid import smoothed_aggregation_solver,multigridtools,multilevel_solver
+from scipy.sandbox.multigrid.adaptive import adaptive_sa_solver
+from scipy.sandbox.multigrid.sa import sa_smoothed_prolongator
+from scipy.sandbox.multigrid.utils import expand_into_blocks
+
+
+## Load mesh from file
+mesh_path = '../../../../../hirani_group/wnbell/meshes/'
+#mesh = read_mesh(mesh_path + 'rocket/rocket.xml')
+#mesh = read_mesh(mesh_path + 'genus3/genus3_168k.xml')
+#mesh = read_mesh(mesh_path + 'genus3/genus3_455k.xml')
+#mesh = read_mesh(mesh_path + '/torus/torus.xml')
+mesh = read_mesh(mesh_path + '/sq14tri/sq14tri.xml')
+for i in range(5):
+    mesh['vertices'],mesh['elements'] = loop_subdivision(mesh['vertices'],mesh['elements'])
+cmplx = simplicial_complex(mesh['vertices'],mesh['elements'])
+
+## Construct mesh manually
+#bitmap = ones((60,60),dtype='bool')
+#bitmap[1::10,1::10] = False
+#bitmap[100:150,100:400] = False
+#cmplx = regular_cube_complex(regular_cube_mesh(bitmap))
+
+def curl_curl_prolongator(D_nodal,vertices):
+    if not isspmatrix_csr(D_nodal):
+        raise TypeError('expected csr_matrix')
+
+    A = D_nodal.T.tocsr() * D_nodal
+    aggs = multigridtools.sa_get_aggregates(A.shape[0],A.indptr,A.indices)
+
+    num_edges = D_nodal.shape[0]
+    num_basis = vertices.shape[1]
+    num_aggs  = aggs.max() + 1
+
+    # replace with CSR + eliminate duplicates
+    #indptr  = (2*num_basis) * arange(num_edges+1)
+    ## same same
+    #csr_matrix((data,indices,indptr),shape=(num_edges,num_aggs))
+
+    row  = arange(num_edges).repeat(2*num_basis)
+    col  = (num_basis*aggs[D_nodal.indices]).repeat(num_basis)
+    col = col.reshape(-1,num_basis) + arange(num_basis)
+    col = col.reshape(-1)
+    data = tile(0.5 * (D_nodal*vertices),(1,2)).reshape(-1)
+
+    return coo_matrix((data,(row,col)),shape=(num_edges,num_basis*num_aggs)).tocsr()
+
+
+
+
+
+def whitney_innerproduct_cache(cmplx,k):
+    h = hash(cmplx.vertices.tostring()) ^ hash(cmplx.simplices.tostring()) ^ hash(k)
+
+    filename = "/home/nathan/.pydec/cache/whitney_" + str(h) + ".mtx"
+
+    try:
+        import pydec
+        M = pydec.io.read_array(filename)
+    except:
+        import pydec
+        M = whitney_innerproduct(cmplx,k)
+        pydec.io.write_array(filename,M)
+
+    return M
+
+
+
+def cube_innerproduct_cache(cmplx,k):
+    h = hash(cmplx.mesh.bitmap.tostring()) ^ hash(cmplx.mesh.bitmap.shape) ^ hash(k)
+
+    filename = "/home/nathan/.pydec/cache/cube_" + str(h) + ".mtx"
+
+    try:
+        import pydec
+        M = pydec.io.read_array(filename)
+    except:
+        import pydec
+        M = regular_cube_innerproduct(cmplx,k)
+        pydec.io.write_array(filename,M)
+
+    return M
+
+
+
+#solve d_k d_k problem for all reasonable k
+#from pylab import semilogy,show,xlabel,ylabel,legend,ylim,xlim
+#from matplotlib.font_manager import fontManager, FontProperties
+
+cochain_complex = cmplx.cochain_complex()
+
+for i in [1]: #range(len(cochain_complex)-1):
+    print "computing mass matrix"
+
+    if isinstance(cmplx,simplicial_complex):
+        Mi = whitney_innerproduct_cache(cmplx,i+1)
+    else:
+        Mi = regular_cube_innerproduct(cmplx,i+1)
+
+
+    dimension = mesh['vertices'].shape[1]
+
+    if True:
+
+        d0 = cmplx[0].d
+        d1 = cmplx[1].d
+
+        #A = (d1.T.tocsr() * d1 + d0 * d0.T.tocsr()).astype('d')
+        A = (d1.T.tocsr()  * d1).astype('d')
+
+        P = curl_curl_prolongator(d0,mesh['vertices'])
+
+        num_blocks = P.shape[1]/dimension
+        blocks = arange(num_blocks).repeat(dimension)
+
+        P = sa_smoothed_prolongator(A,P,epsilon=0,omega=4.0/3.0)
+
+        PAP = P.T.tocsr() * A * P
+
+        candidates = None
+        candidates = zeros((num_blocks,dimension,dimension))
+        for n in range(dimension):
+            candidates[:,n,n] = 1.0
+        candidates = candidates.reshape(-1,dimension)
+
+        ml = smoothed_aggregation_solver(PAP,epsilon=0.0,candidates=candidates,blocks=blocks)
+        #A = PAP
+        ml = multilevel_solver([A] + ml.As, [P] + ml.Ps)
+    else:
+
+        bh = boundary_hierarchy(cochain_complex)
+        while len(bh) < 3:
+            bh.coarsen()
+        print repr(bh)
+
+        N = len(cochain_complex) - 1
+
+        B =  bh[0][N - i].B
+
+        A = (B.T.tocsr() * B).astype('d')
+        #A = B.T.tocsr() * Mi * B
+
+        constant_prolongators = [lvl[N - i].I for lvl in bh[:-1]]
+
+        method = 'aSA'
+
+        if method == 'RS':
+            As = [A]
+            Ps = []
+            for T in constant_prolongators:
+                Ps.append( sa_smoothed_prolongator(As[-1],T,epsilon=0.0,omega=4.0/3.0) )
+                As.append(Ps[-1].T.tocsr() * As[-1] * Ps[-1])
+            ml = multilevel_solver(As,Ps)
+
+        else:
+            if method == 'BSA':
+                if i == 0:
+                    candidates = None
+                else:
+                    candidates = cmplx[0].d * mesh['vertices']
+                    K = candidates.shape[1]
+
+                    constant_prolongators = [constant_prolongators[0]] + \
+                            [expand_into_blocks(T,K,1).tocsr() for T in constant_prolongators[1:] ]
+
+                    ml = smoothed_aggregation_solver(A,candidates,aggregation=constant_prolongators)
+            elif method == 'aSA':
+                asa = adaptive_sa_solver(A,aggregation=constant_prolongators,max_candidates=dimension,epsilon=0.0)
+                ml = asa.solver
+            else:
+                raise ValuerError,'unknown method'
+
+        #ml = smoothed_aggregation_solver(A,candidates)
+
+    #x = d0 * mesh['vertices'][:,0]
+    x = rand(A.shape[0])
+    b = zeros_like(x)
+    #b = A*rand(A.shape[0])
+
+    if True:
+        x_sol,residuals = ml.solve(b,x0=x,maxiter=50,tol=1e-12,return_residuals=True)
+    else:
+        residuals = []
+        def add_resid(x):
+            residuals.append(linalg.norm(b - A*x))
+        A.psolve = ml.psolve
+
+        from pydec import cg
+        x_sol = cg(A,b,x0=x,maxiter=40,tol=1e-8,callback=add_resid)[0]
+
+
+    residuals = array(residuals)/residuals[0]
+    avg_convergence_ratio = residuals[-1]**(1.0/len(residuals))
+    print "average convergence ratio",avg_convergence_ratio
+    print "last convergence ratio",residuals[-1]/residuals[-2]
+
+    print residuals
+
+
+
+
+##candidates = None
+##blocks = None
+##
+##
+##
+##A = io.mmread('tests/sample_data/elas30_A.mtx').tocsr()
+##candidates = io.mmread('tests/sample_data/elas30_nullspace.mtx')
+##candidates = [ array(candidates[:,x]) for x in range(candidates.shape[1]) ]
+##blocks = arange(A.shape[0]/2).repeat(2)
+##
+##ml = smoothed_aggregation_solver(A,candidates,blocks=blocks,epsilon=0,max_coarse=10,max_levels=10)
+###ml = ruge_stuben_solver(A)
+##
+##x = rand(A.shape[0])
+###b = zeros_like(x)
+##b = A*rand(A.shape[0])
+##
+##if True:
+##    x_sol,residuals = ml.solve(b,x0=x,maxiter=30,tol=1e-12,return_residuals=True)
+##else:
+##    residuals = []
+##    def add_resid(x):
+##        residuals.append(linalg.norm(b - A*x))
+##    A.psolve = ml.psolve
+##    x_sol = linalg.cg(A,b,x0=x,maxiter=25,tol=1e-12,callback=add_resid)[0]
+##
+##
+##residuals = array(residuals)/residuals[0]
+##avg_convergence_ratio = residuals[-1]**(1.0/len(residuals))
+##print "average convergence ratio",avg_convergence_ratio
+##print "last convergence ratio",residuals[-1]/residuals[-2]
+##
+##print residuals
+##

Added: trunk/scipy/sandbox/multigrid/examples/simple_example.py
===================================================================
--- trunk/scipy/sandbox/multigrid/examples/simple_example.py	2008-01-15 13:09:03 UTC (rev 3836)
+++ trunk/scipy/sandbox/multigrid/examples/simple_example.py	2008-01-15 13:54:53 UTC (rev 3837)
@@ -0,0 +1,28 @@
+from scipy import *
+from scipy.sandbox.multigrid.sa import *
+from scipy.sandbox.multigrid import *
+from scipy.sandbox.multigrid.utils import *
+from time import clock
+
+mats = io.loadmat('/home/nathan/Work/ogroup/matrices/elasticity/simple_grid_2d/elasticity_500x500.mat')
+A = mats['A'].tobsr(blocksize=(2,2))
+B = mats['B']
+
+#A = poisson_problem2D(50)
+#B = None
+
+start = clock()
+sa = smoothed_aggregation_solver(A,B=B)
+print "constructed solver in %s seconds" % (clock() - start)
+
+x0 = rand(A.shape[0])
+b = zeros_like(x0)
+start = clock()
+x,residuals = sa.solve(b,x0=x0,return_residuals=True)
+print "solved in %s seconds" % (clock() - start)
+    
+residuals = array(residuals)/residuals[0]
+avg_convergence_ratio = residuals[-1]**(1.0/len(residuals))
+print "average convergence ratio",avg_convergence_ratio
+print "last convergence ratio",residuals[-1]/residuals[-2]
+print 'residuals',residuals




More information about the Scipy-svn mailing list