[Scipy-svn] r3253 - in trunk/scipy/sandbox/multigrid: . multigridtools tests

scipy-svn at scipy.org scipy-svn at scipy.org
Wed Aug 22 11:55:17 EDT 2007


Author: wnbell
Date: 2007-08-22 10:55:13 -0500 (Wed, 22 Aug 2007)
New Revision: 3253

Modified:
   trunk/scipy/sandbox/multigrid/
   trunk/scipy/sandbox/multigrid/multigrid.py
   trunk/scipy/sandbox/multigrid/multigridtools/
   trunk/scipy/sandbox/multigrid/multigridtools/multigridtools.i
   trunk/scipy/sandbox/multigrid/multigridtools/multigridtools_wrap.cxx
   trunk/scipy/sandbox/multigrid/multilevel.py
   trunk/scipy/sandbox/multigrid/simple_test.py
   trunk/scipy/sandbox/multigrid/tests/
Log:
changed properties




Property changes on: trunk/scipy/sandbox/multigrid
___________________________________________________________________
Name: svn:ignore
   + *.so
*.bak
*.pyc
ignore.txt



Modified: trunk/scipy/sandbox/multigrid/multigrid.py
===================================================================
--- trunk/scipy/sandbox/multigrid/multigrid.py	2007-08-21 22:12:51 UTC (rev 3252)
+++ trunk/scipy/sandbox/multigrid/multigrid.py	2007-08-22 15:55:13 UTC (rev 3253)
@@ -8,6 +8,19 @@
     
 from pydec import gauss_seidel,diag_sparse,inf_norm
 
+
+def poisson_problem1D(N):
+    """
+    Return a sparse CSC matrix for the 2d N*N poisson problem
+    with standard 5-point finite difference stencil
+    """
+    D = 2*numpy.ones(N)
+    O =  -numpy.ones(N)
+    return scipy.sparse.spdiags([D,O,O],[0,-1,1],N,N)
+
+
+
+
 def poisson_problem(N):
     """
     Return a sparse CSC matrix for the 2d N*N poisson problem
@@ -51,6 +64,17 @@
     return scipy.sparse.csr_matrix((Sx,Sj,Sp),A.shape)
 
 
+def sa_no_threshold(A):
+    if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix')
+    
+    #tentative (non-smooth) interpolation operator I
+    Ij = multigridtools.sa_get_aggregates(A.shape[0],A.indptr,A.indices)
+    Ip = numpy.arange(len(Ij)+1)
+    Ix = numpy.ones(len(Ij))
+    
+    return scipy.sparse.csr_matrix((Ix,Ij,Ip))
+
+
 def sa_constant_interpolation(A,epsilon=0.08):
     if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix')
     


Property changes on: trunk/scipy/sandbox/multigrid/multigridtools
___________________________________________________________________
Name: svn:ignore
   + *.so
*.bak
*.pyc
ignore.txt



Modified: trunk/scipy/sandbox/multigrid/multigridtools/multigridtools.i
===================================================================
--- trunk/scipy/sandbox/multigrid/multigridtools/multigridtools.i	2007-08-21 22:12:51 UTC (rev 3252)
+++ trunk/scipy/sandbox/multigrid/multigridtools/multigridtools.i	2007-08-22 15:55:13 UTC (rev 3253)
@@ -16,7 +16,7 @@
 
 %feature("autodoc", "1");
 
-%include "../../../sparse/sparsetools/numpy.i"
+%include "numpy.i"
 
 %init %{
     import_array();
@@ -61,7 +61,7 @@
 %enddef
 
 
-I_IN_ARRAY1( int  )
+I_IN_ARRAY1( int         )
 T_IN_ARRAY1( float       )
 T_IN_ARRAY1( double      )
 
@@ -70,8 +70,7 @@
  /*
   * OUT types
   */
-%define I_ARRAY_ARGOUT( ctype, atype )
-VEC_ARRAY_ARGOUT( ctype, atype )
+%define I_ARRAY_ARGOUT( ctype )
 %apply std::vector<ctype>* array_argout {
     std::vector<ctype>* Ap,
     std::vector<ctype>* Ai,
@@ -91,8 +90,7 @@
 };
 %enddef
 
-%define T_ARRAY_ARGOUT( ctype, atype )
-VEC_ARRAY_ARGOUT( ctype, atype )
+%define T_ARRAY_ARGOUT( ctype )
 %apply std::vector<ctype>* array_argout {
     std::vector<ctype>* Ax, 
     std::vector<ctype>* Bx,
@@ -104,9 +102,9 @@
 };
 %enddef
 
-I_ARRAY_ARGOUT( int,   INT)
-T_ARRAY_ARGOUT( float,       FLOAT   )
-T_ARRAY_ARGOUT( double,      DOUBLE  )
+I_ARRAY_ARGOUT( int    )
+T_ARRAY_ARGOUT( float  )
+T_ARRAY_ARGOUT( double )
 
 
 

Modified: trunk/scipy/sandbox/multigrid/multigridtools/multigridtools_wrap.cxx
===================================================================
--- trunk/scipy/sandbox/multigrid/multigridtools/multigridtools_wrap.cxx	2007-08-21 22:12:51 UTC (rev 3252)
+++ trunk/scipy/sandbox/multigrid/multigridtools/multigridtools_wrap.cxx	2007-08-22 15:55:13 UTC (rev 3253)
@@ -2576,7 +2576,9 @@
 #endif
 #include "stdio.h"
 #include <numpy/arrayobject.h>
+#include "complex_ops.h"
 
+
 /* The following code originally appeared in enthought/kiva/agg/src/numeric.i,
  * author unknown.  It was translated from C++ to C by John Hunter.  Bill
  * Spotz has modified it slightly to fix some minor bugs, add some comments
@@ -3089,7 +3091,7 @@
   resultobj = SWIG_Py_Void();
   {
     int length = (arg4)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg4))[0]),sizeof(int)*length);	 
     delete arg4; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
@@ -3192,21 +3194,21 @@
   resultobj = SWIG_Py_Void();
   {
     int length = (arg6)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(int)*length);	 
     delete arg6; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
   }
   {
     int length = (arg7)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg7))[0]),sizeof(int)*length);	 
     delete arg7; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
   }
   {
     int length = (arg8)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_FLOAT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_FLOAT); 
     memcpy(PyArray_DATA(obj),&((*(arg8))[0]),sizeof(float)*length);	 
     delete arg8; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
@@ -3315,21 +3317,21 @@
   resultobj = SWIG_Py_Void();
   {
     int length = (arg6)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(int)*length);	 
     delete arg6; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
   }
   {
     int length = (arg7)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg7))[0]),sizeof(int)*length);	 
     delete arg7; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
   }
   {
     int length = (arg8)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_DOUBLE); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_DOUBLE); 
     memcpy(PyArray_DATA(obj),&((*(arg8))[0]),sizeof(double)*length);	 
     delete arg8; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
@@ -3580,21 +3582,21 @@
   resultobj = SWIG_Py_Void();
   {
     int length = (arg11)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg11))[0]),sizeof(int)*length);	 
     delete arg11; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
   }
   {
     int length = (arg12)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg12))[0]),sizeof(int)*length);	 
     delete arg12; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
   }
   {
     int length = (arg13)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_FLOAT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_FLOAT); 
     memcpy(PyArray_DATA(obj),&((*(arg13))[0]),sizeof(float)*length);	 
     delete arg13; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
@@ -3802,21 +3804,21 @@
   resultobj = SWIG_Py_Void();
   {
     int length = (arg11)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg11))[0]),sizeof(int)*length);	 
     delete arg11; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
   }
   {
     int length = (arg12)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg12))[0]),sizeof(int)*length);	 
     delete arg12; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
   }
   {
     int length = (arg13)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_DOUBLE); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_DOUBLE); 
     memcpy(PyArray_DATA(obj),&((*(arg13))[0]),sizeof(double)*length);	 
     delete arg13; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
@@ -4088,21 +4090,21 @@
   resultobj = SWIG_Py_Void();
   {
     int length = (arg6)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(int)*length);	 
     delete arg6; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
   }
   {
     int length = (arg7)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg7))[0]),sizeof(int)*length);	 
     delete arg7; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
   }
   {
     int length = (arg8)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_FLOAT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_FLOAT); 
     memcpy(PyArray_DATA(obj),&((*(arg8))[0]),sizeof(float)*length);	 
     delete arg8; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
@@ -4211,21 +4213,21 @@
   resultobj = SWIG_Py_Void();
   {
     int length = (arg6)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(int)*length);	 
     delete arg6; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
   }
   {
     int length = (arg7)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg7))[0]),sizeof(int)*length);	 
     delete arg7; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
   }
   {
     int length = (arg8)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_DOUBLE); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_DOUBLE); 
     memcpy(PyArray_DATA(obj),&((*(arg8))[0]),sizeof(double)*length);	 
     delete arg8; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
@@ -4449,21 +4451,21 @@
   resultobj = SWIG_Py_Void();
   {
     int length = (arg9)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg9))[0]),sizeof(int)*length);	 
     delete arg9; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
   }
   {
     int length = (arg10)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg10))[0]),sizeof(int)*length);	 
     delete arg10; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
   }
   {
     int length = (arg11)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_FLOAT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_FLOAT); 
     memcpy(PyArray_DATA(obj),&((*(arg11))[0]),sizeof(float)*length);	 
     delete arg11; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
@@ -4626,21 +4628,21 @@
   resultobj = SWIG_Py_Void();
   {
     int length = (arg9)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg9))[0]),sizeof(int)*length);	 
     delete arg9; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
   }
   {
     int length = (arg10)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); 
     memcpy(PyArray_DATA(obj),&((*(arg10))[0]),sizeof(int)*length);	 
     delete arg10; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 
   }
   {
     int length = (arg11)->size(); 
-    PyObject *obj = PyArray_FromDims(1, &length, PyArray_DOUBLE); 
+    PyObject *obj = PyArray_FromDims(1, &length,PyArray_DOUBLE); 
     memcpy(PyArray_DATA(obj),&((*(arg11))[0]),sizeof(double)*length);	 
     delete arg11; 
     resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); 

Modified: trunk/scipy/sandbox/multigrid/multilevel.py
===================================================================
--- trunk/scipy/sandbox/multigrid/multilevel.py	2007-08-21 22:12:51 UTC (rev 3252)
+++ trunk/scipy/sandbox/multigrid/multilevel.py	2007-08-22 15:55:13 UTC (rev 3253)
@@ -1,11 +1,12 @@
-from scipy import ones,zeros,rand,array,array_split,hstack,transpose,sum,ones_like,sqrt
-from scipy.sparse import spidentity
+from scipy import ones,zeros,rand,array,array_split,hstack,transpose,sum,ones_like,sqrt,concatenate
+from scipy.sparse import spidentity,csr_matrix,coo_matrix
 from numpy.linalg import norm
+from numpy import zeros_like,arange,inner,diff,ravel
 
 import pydec
 from pydec import diag_sparse,inf_norm, mls_polynomial_coeffs,polynomial_smoother
      
-from multigrid import sa_interpolation,rs_interpolation
+from multigrid import sa_interpolation,rs_interpolation,sa_constant_interpolation,sa_no_threshold
 import multigrid
 import multigridtools
 from relaxation import gauss_seidel,jacobi 
@@ -312,7 +313,6 @@
         
         self[-1].coarse_solver = coarse_grid_solver(A,self.opts.sub_options('coarse:'))
         self[-1].A = A
-
         if self.opts['smoother: type'] == 'jacobi':
             omegas = []
             for lvl in self:
@@ -324,4 +324,400 @@
             self.opts['smoother: omega'] = omegas
         
 
+class multilevel_solver2:
+    def __init__(self,As,Ps,options=None):
+        self.As = As
+        self.Ps = Ps
+        self.ops = options
+            
+    def solve(self,b, x0=None, tol=1e-5, maxiter=100, callback=None, return_residuals=False):
 
+        if x0 is None:
+            x = zeros(b.shape,max(self.A.dtype,b.dtype))
+        else:
+            x = array(x0)
+
+        self.__solve(0,x,b)
+
+        return x
+        
+    def __solve(self,lvl,x,b):
+
+        A = self.As[lvl]
+        
+        if len(self.As) == 1:
+            x[:] = scipy.linalg.solve(A.todense(),b)
+            return x
+
+
+        self.__smooth(lvl,x,b,which='pre')
+            
+        residual = b - A*x
+
+        coarse_x = zeros((self.As[lvl+1].shape[0]))
+        coarse_b = self.Ps[lvl].T * residual
+        
+        if lvl == len(self.As) - 2:
+            pass
+            coarse_x[:] = scipy.linalg.solve(self.As[-1].todense(),coarse_b)
+            #coarse_x[:] = self[-1].coarse_solver.solve(coarse_b)   #next level is coarsest 
+        else:   
+            self.__solve(lvl+1,coarse_x,coarse_b)
+                
+        x += self.Ps[lvl] * coarse_x
+
+        self.__smooth(lvl,x,b,which='post')
+
+
+    def __smooth(self,lvl,x,b,which):
+        A = self.As[lvl]
+        if which == 'pre':
+            gauss_seidel(A,x,b,iterations=1,sweep="forward")
+        else:
+            gauss_seidel(A,x,b,iterations=1,sweep="backward")
+
+
+def inf_norm(A):
+    return abs(A).sum(axis=1).max()  #max abs row sum
+
+def fit_candidate(I,x):
+    """
+    For each aggregate in I (i.e. each column of I) compute vector R and 
+    sparse matrix Q (having the sparsity of I) such that the following holds:
+
+        Q*R = x     and   Q^T*Q = I
+
+    In otherwords, find a prolongator Q with orthonormal columns so that
+    x is represented exactly on the coarser level by R.
+    """
+    Q = csr_matrix((x.copy(),I.indices,I.indptr),dims=I.shape,check=False)
+    R = sqrt(numpy.ravel(csr_matrix((x*x,I.indices,I.indptr),dims=I.shape,check=False).sum(axis=0)))  #column 2-norms  
+    Q.data *= (1.0/R)[Q.indices]
+    print "norm(Q*R - x)",linalg.norm(Q*R - x)
+    return Q,R
+
+
+def scaled_columns_csr(A,scales):
+    scales = numpy.ravel(scales)
+    A = A.copy()
+    A.data *= scales[A.indices]
+    return A
+
+def orthonormalize_candidate(I,x,basis):
+    Px = csr_matrix((x,I.indices,I.indptr),dims=I.shape,check=False) 
+    Rs = []
+    #othogonalize columns of Px against other candidates 
+    for b in basis:
+        Pb = csr_matrix((b,I.indices,I.indptr),dims=I.shape,check=False)
+        R  = ravel(csr_matrix((Pb.data*Px.data,I.indices,I.indptr),dims=I.shape,check=False).sum(axis=0)) # columnwise projection of Px on Pb
+        Px.data -= R[I.indices] * Pb.data   #subtract component in b direction
+        Rs.append(R)
+
+    #filter columns here, set unused cols to 0, add to mask
+    
+    #normalize columns of Px
+    R = ravel(csr_matrix((x**x,I.indices,I.indptr),dims=I.shape,check=False).sum(axis=0))
+    Px.data *= (1.0/R)[I.indices]
+    Rs.append(R.reshape(-1,1))
+    return Rs
+
+def hstack_csr(A,B):
+    #OPTIMIZE THIS
+    assert(A.shape[0] == B.shape[0])
+    A = A.tocoo()
+    B = B.tocoo()
+    I = concatenate((A.row,B.row))
+    J = concatenate((A.col,B.col+A.shape[1]))
+    V = concatenate((A.data,B.data))
+    return coo_matrix((V,(I,J)),dims=(A.shape[0],A.shape[1]+B.shape[1])).tocsr()
+
+
+def vstack_csr(A,B):
+    #OPTIMIZE THIS
+    assert(A.shape[1] == B.shape[1])
+    A = A.tocoo()
+    B = B.tocoo()
+    I = concatenate((A.row,B.row+A.shape[0]))
+    J = concatenate((A.col,B.col))
+    V = concatenate((A.data,B.data))
+    return coo_matrix((V,(I,J)),dims=(A.shape[0]+B.shape[0],A.shape[1])).tocsr()
+    
+
+
+def orthonormalize_prolongator(P_l,x_l,W_l,W_m):
+    """
+     
+    """
+    X = csr_matrix((x_l,W_l.indices,W_l.indptr),dims=W_l.shape,check=False)  #candidate prolongator (assumes every value from x is used)
+    
+    R = (P_l.T.tocsr() * X)  # R has at most 1 nz per row
+    X = X - P_l*R            # othogonalize X against P_l
+    
+    #DROP REDUNDANT COLUMNS FROM P (AND R?) HERE (NULL OUT R ACCORDINGLY?)    
+    #REMOVE CORRESPONDING COLUMNS FROM W_l AND ROWS FROM A_m ALSO
+    W_l_new = W_l
+    W_m_new = W_m
+
+    #normalize surviving columns of X
+    col_norms = ravel(sqrt(csr_matrix((X.data*X.data,X.indices,X.indptr),dims=X.shape,check=False).sum(axis=0)))
+    print "zero cols",sum(col_norms == 0)
+    print "small cols",sum(col_norms < 1e-8)
+    Xcopy = X.copy()
+    X.data *= (1.0/col_norms)[X.indices]
+
+    P_l_new = hstack_csr(P_l,X)
+
+
+    #check orthonormality
+    print "norm(P.T*P - I) ",scipy.linalg.norm((P_l_new.T * P_l_new - scipy.sparse.spidentity(P_l_new.shape[1])).data)
+    #assert(scipy.linalg.norm((P_l_new.T * P_l_new - scipy.sparse.spidentity(P_l_new.shape[1])).data)<1e-8)
+    
+    x_m = zeros(P_l_new.shape[1],dtype=x_l.dtype)
+    x_m[:P_l.shape[1]][diff(R.indptr).astype('bool')] = R.data
+    x_m[P_l.shape[1]:] = col_norms
+
+    print "||x_l - P_l*x_m||",scipy.linalg.norm(P_l_new* x_m - x_l) #see if x_l is represented exactly
+
+    return P_l_new,x_m,W_l,W_m
+
+
+
+def prolongation_smoother(A):
+    omega = (4.0/3.0)/inf_norm(A)
+    S = (spidentity(A.shape[0]).T  - omega*A)
+    return S
+
+
+def smoothed_prolongator(P,A):
+    #just use Richardson for now
+    omega = 4.0/(3.0*inf_norm(A))
+    return P - omega*(A*P)
+ 
+
+
+
+def sa_hierarchy(A,Ws,x):
+    """
+    Construct multilevel hierarchy using Smoothed Aggregation
+        Inputs:
+          A  - matrix
+          Is - list of constant prolongators
+          x  - "candidate" basis function to be approximated
+        Ouputs:
+          (As,Is,Ps) - tuple of lists
+                  - As - [A, Ps[0].T*A*Ps[0], Ps[1].T*A*Ps[1], ... ]
+                  - Is - smoothed prolongators 
+                  - Ps - tentative prolongators
+    """
+    Ps = []
+    Is = []
+    As = [A]
+
+    for W in Ws:
+        P,x = fit_candidate(W,x)
+        I   = smoothed_prolongator(P,A)  
+        A   = I.T.tocsr() * A * I
+        As.append(A)
+        Ps.append(P)
+        Is.append(I)
+    return As,Is,Ps
+         
+def make_bridge(I,N):
+    tail = I.indptr[-1].repeat(N - I.shape[0])
+    ptr = concatenate((I.indptr,tail))
+    return csr_matrix((I.data,I.indices,ptr),dims=(N,I.shape[1]),check=False)
+
+class adaptive_sa_solver:
+    def __init__(self,A,options=None):
+        self.A = A
+
+        self.Rs = [] 
+        self.__construct_hierarchy(A)
+
+    def __construct_hierarchy(self,A):
+        #if self.A.shape[0] <= self.opts['coarse: max size']:
+        #    raise ValueError,'small matrices not handled yet'
+        
+        x,AggOps = self.__initialization_stage(A) #first candidate
+        Ws = AggOps
+        
+        #x[:] = 1 #TEST 
+
+        self.candidates = [x]
+        
+        #create SA using x here
+        As,Is,Ps = sa_hierarchy(A,Ws,x)
+
+        for i in range(0):
+            x           = self.__develop_candidate(A,As,Is,Ps,Ws,AggOps)
+            #x[:]  = arange(x.shape[0])
+            #x[x.shape[0]/2:] = 2*x[x.shape[0]/2] - x[x.shape[0]/2:]
+            As,Is,Ps,Ws = self.__augment_cycle(A,As,Ps,Ws,AggOps,x)
+             
+            self.candidates.append(x)
+        
+        #As,Is,Ps = sa_hierarchy(A,AggOps,x)  #TESTING
+        self.Ps = Ps 
+        self.solver = multilevel_solver2(As,Is)
+                
+
+
+    def __develop_candidate(self,A,As,Is,Ps,Ws,AggOps):
+        x = rand(A.shape[0])
+        b = zeros_like(x)
+
+        #x[:] = 1 #TEST
+
+        mu = 5
+ 
+        solver = multilevel_solver2(As,Is)
+
+        for n in range(mu):
+            x = solver.solve(b, x0=x, tol=1e-8, maxiter=1)
+        #TEST FOR CONVERGENCE HERE
+ 
+        A_l,P_l,W_l,x_l = As[0],Ps[0],Ws[0],x
+        
+        temp_Is = []
+        for i in range(len(As) - 2):
+            P_l_new, x_m, W_l_new, W_m_new = orthonormalize_prolongator(P_l, x_l, W_l, AggOps[i+1])    
+ 
+            I_l_new = smoothed_prolongator(P_l_new,A_l)
+            A_m_new = I_l_new.T.tocsr() * A_l * I_l_new
+            bridge = make_bridge(Is[i+1],A_m_new.shape[0]) 
+ 
+            temp_solver = multilevel_solver2( [A_m_new] + As[i+2:], [bridge] + Is[i+2:] )
+ 
+            for n in range(mu):
+                x_m = temp_solver.solve(zeros_like(x_m), x0=x_m, tol=1e-8, maxiter=1)
+ 
+            temp_Is.append(I_l_new)
+ 
+            W_l = vstack_csr(Ws[i+1],W_m_new)  #prepare for next iteration
+            A_l = A_m_new
+            x_l = x_m
+            P_l = make_bridge(Ps[i+1],A_m_new.shape[0])
+ 
+        x = x_l
+        for I in reversed(temp_Is):
+            x = I*x
+        
+        return x
+           
+
+    def __augment_cycle(self,A,As,Ps,Ws,AggOps,x):
+        #As,Is,Ps,Ws = self.__augment_cycle(A,Ps,Ws,AggOps,x)
+
+        #make a new cycle using the new candidate
+        A_l,P_l,W_l,x_l = As[0],Ps[0],AggOps[0],x            
+        
+        new_As,new_Is,new_Ps,new_Ws = [A],[],[],[AggOps[0]]
+
+        for i in range(len(As) - 2):
+            P_l_new, x_m, W_l_new, W_m_new = orthonormalize_prolongator(P_l, x_l, W_l, AggOps[i+1])
+
+            I_l_new = smoothed_prolongator(P_l_new,A_l)
+            A_m_new = I_l_new.T.tocsr() * A_l * I_l_new
+            W_m_new = vstack_csr(Ws[i+1],W_m_new)
+
+            new_As.append(A_m_new)
+            new_Ws.append(W_m_new)
+            new_Is.append(I_l_new)
+            new_Ps.append(P_l_new)
+
+            #prepare for next iteration
+            W_l = W_m_new 
+            A_l = A_m_new
+            x_l = x_m
+            P_l = make_bridge(Ps[i+1],A_m_new.shape[0]) 
+        
+        P_l_new, x_m, W_l_new, W_m_new = orthonormalize_prolongator(P_l, x_l, W_l, csr_matrix((P_l.shape[1],1)))
+        I_l_new = smoothed_prolongator(P_l_new,A_l)
+        A_m_new = I_l_new.T.tocsr() * A_l * I_l_new
+
+        new_As.append(A_m_new)
+        new_Is.append(I_l_new)
+        new_Ps.append(P_l_new)
+
+        return new_As,new_Is,new_Ps,new_Ws
+
+
+    def __initialization_stage(self,A):
+        max_levels = 10
+        max_coarse = 50
+
+        AggOps = []
+        Is     = []
+
+        # aSA parameters 
+        mu      = 5    # number of test relaxation iterations
+        epsilon = 0.1  # minimum acceptable relaxation convergence factor 
+
+        #step 1
+        A_l = A
+        x   = scipy.rand(A_l.shape[0])
+        skip_f_to_i = False
+        
+        #step 2
+        b = zeros_like(x)
+        gauss_seidel(A_l,x,b,iterations=mu)
+        #step 3
+        #test convergence rate here 
+       
+        while len(AggOps) < max_levels  and A_l.shape[0] > max_coarse:
+            W_l   = sa_constant_interpolation(A_l)                        #step 4b 
+            #W_l   = sa_no_threshold(A_l)                        #step 4b  TEST
+            P_l,x = fit_candidate(W_l,x)                                  #step 4c  
+            I_l   = smoothed_prolongator(P_l,A_l)                         #step 4d
+            A_l   = I_l.T.tocsr() * A_l * I_l                             #step 4e
+            
+            AggOps.append(W_l)
+            Is.append(I_l)
+
+            if A_l.shape <= max_coarse:  break
+
+            if not skip_f_to_i:
+                print "."
+                x_hat = x.copy()                                        #step 4g
+                gauss_seidel(A_l,x,zeros_like(x),iterations=mu)         #step 4h
+                x_A_x = inner(x,A_l*x) 
+                if (x_A_x/inner(x_hat,A_l*x_hat))**(1.0/mu) < epsilon:  #step 4i
+                    print "sufficient convergence, skipping"
+                    skip_f_to_i = True
+                    if x_A_x == 0:       
+                        x = x_hat  #need to restore x
+        
+        #update fine-level candidate
+        for I in reversed(Is):
+            x = I * x
+                
+        #gauss_seidel(A,x,zeros_like(x),iterations=mu)    #TEST
+    
+        #x[:] = 1 #TEST
+    
+        return x,AggOps  #first candidate,aggregation
+
+
+
+from scipy import *
+from pydec import diag_sparse
+from multigrid import poisson_problem,poisson_problem1D
+#A = poisson_problem(100).T
+A = poisson_problem1D(100).T
+D = diag_sparse(1.0/sqrt(10**(12*rand(A.shape[0])-6))).tocsr()
+A = D * A * D
+#A = A*A
+#A = io.mmread("nos2.mtx").tocsr()
+asa = adaptive_sa_solver(A)
+x = rand(A.shape[0])
+b = zeros_like(x)
+
+resid = []
+
+for n in range(50):
+    x = asa.solver.solve(b,x)
+    resid.append(linalg.norm(A*x))
+
+
+
+

Modified: trunk/scipy/sandbox/multigrid/simple_test.py
===================================================================
--- trunk/scipy/sandbox/multigrid/simple_test.py	2007-08-21 22:12:51 UTC (rev 3252)
+++ trunk/scipy/sandbox/multigrid/simple_test.py	2007-08-22 15:55:13 UTC (rev 3253)
@@ -2,7 +2,7 @@
 from multigrid import *
 from scipy import *
 
-A = poisson_problem(300).T
+A = poisson_problem(100).T
 s = scalar_solver(A)
 b = rand(A.shape[0])
 x,res = s.solve(b,return_residuals=True)


Property changes on: trunk/scipy/sandbox/multigrid/tests
___________________________________________________________________
Name: svn:ignore
   + *.so
*.bak
*.pyc
ignore.txt






More information about the Scipy-svn mailing list