From scipy-svn at scipy.org Wed Aug 1 04:08:03 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 1 Aug 2007 03:08:03 -0500 (CDT) Subject: [Scipy-svn] r3211 - trunk/Lib/io Message-ID: <20070801080803.C265139C140@new.scipy.org> Author: cdavid Date: 2007-08-01 03:07:47 -0500 (Wed, 01 Aug 2007) New Revision: 3211 Modified: trunk/Lib/io/mio.py Log: - correct wrong variable name for relative path in find_mat_file in mio.py (should fix #473) - make other cosmetic changes to make pylint happy in mio.py Modified: trunk/Lib/io/mio.py =================================================================== --- trunk/Lib/io/mio.py 2007-07-31 05:44:04 UTC (rev 3210) +++ trunk/Lib/io/mio.py 2007-08-01 08:07:47 UTC (rev 3211) @@ -10,7 +10,7 @@ from scipy.io.mio4 import MatFile4Reader, MatFile4Writer from scipy.io.mio5 import MatFile5Reader, MatFile5Writer -__all__ = ['find_mat_file','mat_reader_factory','loadmat', 'savemat'] +__all__ = ['find_mat_file', 'mat_reader_factory', 'loadmat', 'savemat'] def find_mat_file(file_name, appendmat=True): ''' Try to find .mat file on system path @@ -21,14 +21,14 @@ if appendmat and file_name[-4:] == ".mat": file_name = file_name[:-4] if os.sep in file_name: - full_file_name = file_name + full_name = file_name if appendmat: full_name = file_name + ".mat" else: full_name = None - junk,file_name = os.path.split(file_name) + junk, file_name = os.path.split(file_name) for path in sys.path: - test_name = os.path.join(path,file_name) + test_name = os.path.join(path, file_name) if appendmat: test_name += ".mat" try: @@ -117,7 +117,8 @@ try: file_name.write('') except AttributeError: - raise IOError, 'Writer needs file name or writeable file-like object' + raise IOError, 'Writer needs file name or writeable '\ + 'file-like object' file_stream = file_name MW = MatFile4Writer(file_stream) From scipy-svn at scipy.org Wed Aug 1 07:40:14 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 1 Aug 2007 06:40:14 -0500 (CDT) Subject: [Scipy-svn] r3212 - trunk/Lib/optimize Message-ID: <20070801114014.C591F39C1B6@new.scipy.org> Author: dmitrey.kroshko Date: 2007-08-01 06:39:07 -0500 (Wed, 01 Aug 2007) New Revision: 3212 Modified: trunk/Lib/optimize/minpack.py Log: "apply" removed from minpack.py (Nils Wagner ticket) Modified: trunk/Lib/optimize/minpack.py =================================================================== --- trunk/Lib/optimize/minpack.py 2007-08-01 08:07:47 UTC (rev 3211) +++ trunk/Lib/optimize/minpack.py 2007-08-01 11:39:07 UTC (rev 3212) @@ -8,8 +8,7 @@ __all__ = ['fsolve', 'leastsq', 'newton', 'fixed_point','bisection'] def check_func(thefunc, x0, args, numinputs, output_shape=None): - args = (x0[:numinputs],) + args - res = atleast_1d(apply(thefunc,args)) + res = atleast_1d(thefunc(*((x0[:numinputs],)+args))) if (output_shape != None) and (shape(res) != output_shape): if (output_shape[0] != 1): if len(output_shape) > 1: @@ -392,8 +391,8 @@ else: # Secant method p0 = x0 p1 = x0*(1+1e-4) - q0 = apply(func,(p0,)+args) - q1 = apply(func,(p1,)+args) + q0 = func(*((p0,)+args)) + q1 = func(*((p1,)+args)) for iter in range(maxiter): if q1 == q0: if p1 != p0: @@ -406,7 +405,7 @@ p0 = p1 q0 = q1 p1 = p - q1 = apply(func,(p1,)+args) + q1 = func(*((p1,)+args)) raise RuntimeError, "Failed to converge after %d iterations, value is %s" % (maxiter,p) @@ -438,8 +437,8 @@ p0 = x0 for iter in range(maxiter): - p1 = apply(func,(p0,)+args) - p2 = apply(func,(p1,)+args) + p1 = func(*((p0,)+args)) + p2 = func(*((p1,)+args)) d = p2 - 2.0 * p1 + p0 if d == 0.0: print "Warning: Difference in estimates is %g" % (abs(p2-p1)) From scipy-svn at scipy.org Wed Aug 1 09:10:15 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 1 Aug 2007 08:10:15 -0500 (CDT) Subject: [Scipy-svn] r3213 - in trunk/Lib/fftpack: . src Message-ID: <20070801131015.39E3539C074@new.scipy.org> Author: cdavid Date: 2007-08-01 08:09:45 -0500 (Wed, 01 Aug 2007) New Revision: 3213 Added: trunk/Lib/fftpack/src/drfft_djbfft.c trunk/Lib/fftpack/src/drfft_fftpack.c trunk/Lib/fftpack/src/drfft_fftw.c trunk/Lib/fftpack/src/drfft_fftw3.c Modified: trunk/Lib/fftpack/setup.py trunk/Lib/fftpack/src/drfft.c Log: Clean fft code for real input Modified: trunk/Lib/fftpack/setup.py =================================================================== --- trunk/Lib/fftpack/setup.py 2007-08-01 11:39:07 UTC (rev 3212) +++ trunk/Lib/fftpack/setup.py 2007-08-01 13:09:45 UTC (rev 3213) @@ -31,7 +31,9 @@ libraries=['dfftpack'], extra_info=[fft_opt_info, djbfft_info], depends=['src/zfft_djbfft.c', 'src/zfft_fftpack.c', 'src/zfft_fftw.c', - 'src/zfft_fftw3.c', 'src/zfft_mkl.c'], + 'src/zfft_fftw3.c', 'src/zfft_mkl.c', + 'src/drfft_djbfft.c', 'src/drfft_fftpack.c', + 'src/drfft_fftw3.c', 'src/drfft_fftw.c'], ) config.add_extension('convolve', Modified: trunk/Lib/fftpack/src/drfft.c =================================================================== --- trunk/Lib/fftpack/src/drfft.c 2007-08-01 11:39:07 UTC (rev 3212) +++ trunk/Lib/fftpack/src/drfft.c 2007-08-01 13:09:45 UTC (rev 3213) @@ -6,211 +6,66 @@ #include "fftpack.h" -/**************** DJBFFT *****************************/ -#ifdef WITH_DJBFFT -GEN_CACHE(ddjbfft,(int n) - ,unsigned int* f; - double* ptr; - ,caches_ddjbfft[i].n==n - ,caches_ddjbfft[id].f = (unsigned int*)malloc(sizeof(unsigned int)*(n)); - caches_ddjbfft[id].ptr = (double*)malloc(sizeof(double)*n); - fftfreq_rtable(caches_ddjbfft[id].f,n); - ,free(caches_ddjbfft[id].f); - free(caches_ddjbfft[id].ptr); - ,10) -#endif - -#if defined WITH_FFTW3 -/**************** FFTW3 *****************************/ -GEN_CACHE(drfftw,(int n,int d,int flags) - ,int direction; - int flags; - fftw_plan plan; - double *ptr; - ,((caches_drfftw[i].n==n) && - (caches_drfftw[i].direction==d) && - (caches_drfftw[i].flags==flags)) - ,caches_drfftw[id].direction = d; - caches_drfftw[id].flags = flags; - caches_drfftw[id].ptr = (double*)fftw_malloc(sizeof(double)*(n)); - caches_drfftw[id].plan = fftw_plan_r2r_1d(n,caches_drfftw[id].ptr, - caches_drfftw[id].ptr,(d>0?FFTW_R2HC:FFTW_HC2R),flags); - ,fftw_destroy_plan(caches_drfftw[id].plan); - fftw_free(caches_drfftw[id].ptr); - ,10) -#elif defined WITH_FFTW -/**************** FFTW2 *****************************/ -GEN_CACHE(drfftw,(int n,int d,int flags) - ,int direction; - int flags; - rfftw_plan plan; - double *ptr; - ,((caches_drfftw[i].n==n) && - (caches_drfftw[i].direction==d) && - (caches_drfftw[i].flags==flags)) - ,caches_drfftw[id].direction = d; - caches_drfftw[id].flags = flags; - caches_drfftw[id].plan = rfftw_create_plan(n, - (d>0?FFTW_REAL_TO_COMPLEX:FFTW_COMPLEX_TO_REAL),flags); - caches_drfftw[id].ptr = (double*)malloc(sizeof(double)*(n)); - ,rfftw_destroy_plan(caches_drfftw[id].plan); - free(caches_drfftw[id].ptr); - ,10) -#else -/**************** FFTPACK ZFFT **********************/ -extern void F_FUNC(dfftf,DFFTF)(int*,double*,double*); -extern void F_FUNC(dfftb,DFFTB)(int*,double*,double*); -extern void F_FUNC(dffti,DFFTI)(int*,double*); -GEN_CACHE(dfftpack,(int n) - ,double* wsave; - ,(caches_dfftpack[i].n==n) - ,caches_dfftpack[id].wsave = (double*)malloc(sizeof(double)*(2*n+15)); - F_FUNC(dffti,DFFTI)(&n,caches_dfftpack[id].wsave); - ,free(caches_dfftpack[id].wsave); - ,10) -#endif - -extern void destroy_drfft_cache(void) { -#ifdef WITH_DJBFFT - destroy_ddjbfft_caches(); -#endif -#if defined(WITH_FFTW3) || defined(WITH_FFTW) - destroy_drfftw_caches(); -#else - destroy_dfftpack_caches(); -#endif +/* The following macro convert private backend specific function to the public + * functions exported by the module */ +#define GEN_PUBLIC_API(name) \ +void destroy_drfft_cache(void)\ +{\ + destroy_dr##name##_caches();\ +}\ +\ +void drfft(double *inout, int n, \ + int direction, int howmany, int normalize)\ +{\ + drfft_##name(inout, n, direction, howmany, normalize);\ } -/**************** DRFFT function **********************/ +/* ************** Definition of backend specific functions ********* */ +/* + * To add a backend : + * - create a file drfft_name.c, where you define a function drfft_name where + * name is the name of your backend. If you do not use the GEN_CACHE macro, + * you will need to define a function void destroy_drname_caches(void), + * which can do nothing + * - in drfft.c, include the drfft_name.c file, and add the 3 following lines + * just after it: + * #ifndef WITH_DJBFFT + * GEN_PUBLIC_API(name) + * #endif + */ -extern void drfft(double *inout, - int n,int direction,int howmany,int normalize) { - int i; - double *ptr = inout; -#if defined(WITH_FFTW3) || defined(WITH_FFTW) || defined(WITH_DJBFFT) - double *ptrc = NULL; -#endif -#if defined WITH_FFTW3 - fftw_plan plan = NULL; +#ifdef WITH_FFTW3 + #include "drfft_fftw3.c" + #ifndef WITH_DJBFFT + GEN_PUBLIC_API(fftw3) + #endif #elif defined WITH_FFTW - rfftw_plan plan = NULL; -#else - double* wsave = NULL; + #include "drfft_fftw.c" + #ifndef WITH_DJBFFT + GEN_PUBLIC_API(fftw) + #endif +#else /* Use fftpack by default */ + #include "drfft_fftpack.c" + #ifndef WITH_DJBFFT + GEN_PUBLIC_API(fftpack) + #endif #endif -#ifdef WITH_DJBFFT - unsigned int *f = NULL; -#endif +/* + * djbfft must be used at the end, because it needs another backend (defined + * above) for non 2^n * size + */ #ifdef WITH_DJBFFT - switch (n) { - case 2:;case 4:;case 8:;case 16:;case 32:;case 64:;case 128:;case 256:; - case 512:;case 1024:;case 2048:;case 4096:;case 8192: - i = get_cache_id_ddjbfft(n); - f = caches_ddjbfft[i].f; - ptrc = caches_ddjbfft[i].ptr; - } - if (f==NULL) -#endif -#ifdef WITH_FFTW3 + #include "drfft_djbfft.c" + void destroy_drfft_cache(void) { - i = get_cache_id_drfftw(n,direction,FFTW_ESTIMATE); - plan = caches_drfftw[i].plan; - ptrc = caches_drfftw[i].ptr; + destroy_drdjbfft_caches(); + drfft_def_destroy_cache(); } -#elif defined WITH_FFTW + void drfft(double *inout, int n, + int direction, int howmany, int normalize) { - i = get_cache_id_drfftw(n,direction,FFTW_IN_PLACE|FFTW_ESTIMATE); - plan = caches_drfftw[i].plan; - ptrc = caches_drfftw[i].ptr; + drfft_djbfft(inout, n, direction, howmany, normalize); } -#else - wsave = caches_dfftpack[get_cache_id_dfftpack(n)].wsave; #endif - - switch (direction) { - - case 1: - for (i=0;i=0;--i) - (*(ptr++)) *= d; - } -} - - - Added: trunk/Lib/fftpack/src/drfft_djbfft.c =================================================================== --- trunk/Lib/fftpack/src/drfft_djbfft.c 2007-08-01 11:39:07 UTC (rev 3212) +++ trunk/Lib/fftpack/src/drfft_djbfft.c 2007-08-01 13:09:45 UTC (rev 3213) @@ -0,0 +1,131 @@ +/* + * Last Change: Wed Aug 01 08:00 PM 2007 J + * + * Original code by Pearu Peterson. + */ + +/* + * DJBFFT only implements size 2^N ! + * + * drfft_def and drfft_def_destroy_cache are the functions used for size different + * than 2^N + */ +#ifdef WITH_FFTW3 +#define drfft_def drfft_fftw3 +#define drfft_def_destroy_cache destroy_drfftw3_caches +#elif defined WITH_FFTW +#define drfft_def drfft_fftw +#define drfft_def_destroy_cache destroy_drfftw_caches +#else +#define drfft_def drfft_fftpack +#define drfft_def_destroy_cache destroy_drfftpack_caches +#endif + +GEN_CACHE(drdjbfft, (int n) + , unsigned int *f; + double *ptr;, + caches_drdjbfft[i].n == n, + caches_drdjbfft[id].f = (unsigned int *) malloc(sizeof(unsigned int) * (n)); + caches_drdjbfft[id].ptr = (double *) malloc(sizeof(double) * n); + fftfreq_rtable(caches_drdjbfft[id].f, n);, + free(caches_drdjbfft[id].f); + free(caches_drdjbfft[id].ptr);, + 10) + +/**************** ZFFT function **********************/ +static void drfft_djbfft(double * inout, + int n, int direction, int howmany, int normalize) +{ + int i; + double *ptr = inout; + double *ptrc = NULL; + unsigned int *f = NULL; + + switch (n) { + case 2:; + case 4:; + case 8:; + case 16:; + case 32:; + case 64:; + case 128:; + case 256:; + case 512:; + case 1024:; + case 2048:; + case 4096:; + case 8192: + i = get_cache_id_drdjbfft(n); + f = caches_drdjbfft[i].f; + ptrc = caches_drdjbfft[i].ptr; + } + if (f == NULL) { + drfft_def(inout, n, direction, howmany, normalize); + } + + switch (direction) { + case 1: + for (i = 0; i < howmany; ++i, ptr += n) { + if (f != NULL) { + COPYSTD2DJB(ptr, ptrc, n); + switch (n) { +#define TMPCASE(N) case N: fftr8_##N(ptrc); break + TMPCASE(2); + TMPCASE(4); + TMPCASE(8); + TMPCASE(16); + TMPCASE(32); + TMPCASE(64); + TMPCASE(128); + TMPCASE(256); + TMPCASE(512); + TMPCASE(1024); + TMPCASE(2048); + TMPCASE(4096); + TMPCASE(8192); +#undef TMPCASE + } + COPYDJB2STD(ptrc, ptr, f, n); + } + } + break; + + case -1: + for (i = 0; i < howmany; ++i, ptr += n) { + if (f != NULL) { + COPYINVSTD2DJB(ptr, ptrc, normalize, f, n); + switch (n) { + +#define TMPCASE(N)case N:if(normalize)fftr8_scale##N(ptrc);fftr8_un##N(ptrc);break + TMPCASE(2); + TMPCASE(4); + TMPCASE(8); + TMPCASE(16); + TMPCASE(32); + TMPCASE(64); + TMPCASE(128); + TMPCASE(256); + TMPCASE(512); + TMPCASE(1024); + TMPCASE(2048); + TMPCASE(4096); + TMPCASE(8192); +#undef TMPCASE + } + COPYINVDJB2STD(ptrc, ptr, n); + } + } + break; + + default: + fprintf(stderr, "drfft: invalid direction=%d\n", direction); + } + + if (normalize && f != NULL && direction == 1) { + double d = 1.0 / n; + ptr = inout; + for (i = n * howmany - 1; i >= 0; --i) { + (*(ptr++)) *= d; + } + } +} Added: trunk/Lib/fftpack/src/drfft_fftpack.c =================================================================== --- trunk/Lib/fftpack/src/drfft_fftpack.c 2007-08-01 11:39:07 UTC (rev 3212) +++ trunk/Lib/fftpack/src/drfft_fftpack.c 2007-08-01 13:09:45 UTC (rev 3213) @@ -0,0 +1,54 @@ +/* + * Last Change: Wed Aug 01 07:00 PM 2007 J + * + * FFTPACK implementation + * + * Original code by Pearu Peterson. + */ + +extern void F_FUNC(dfftf, DFFTF) (int *, double *, double *); +extern void F_FUNC(dfftb, DFFTB) (int *, double *, double *); +extern void F_FUNC(dffti, DFFTI) (int *, double *); +GEN_CACHE(drfftpack, (int n) + , double *wsave; + , (caches_drfftpack[i].n == n) + , caches_drfftpack[id].wsave = + (double *) malloc(sizeof(double) * (2 * n + 15)); + F_FUNC(dffti, DFFTI) (&n, caches_drfftpack[id].wsave); + , free(caches_drfftpack[id].wsave); + , 10) + +static void drfft_fftpack(double *inout, int n, int direction, int howmany, + int normalize) +{ + int i; + double *ptr = inout; + double *wsave = NULL; + wsave = caches_drfftpack[get_cache_id_drfftpack(n)].wsave; + + + switch (direction) { + case 1: + for (i = 0; i < howmany; ++i, ptr += n) { + dfftf_(&n, ptr, wsave); + } + break; + + case -1: + for (i = 0; i < howmany; ++i, ptr += n) { + dfftb_(&n, ptr, wsave); + } + break; + + default: + fprintf(stderr, "drfft: invalid direction=%d\n", direction); + } + + if (normalize) { + double d = 1.0 / n; + ptr = inout; + for (i = n * howmany - 1; i >= 0; --i) { + (*(ptr++)) *= d; + } + } +} Added: trunk/Lib/fftpack/src/drfft_fftw.c =================================================================== --- trunk/Lib/fftpack/src/drfft_fftw.c 2007-08-01 11:39:07 UTC (rev 3212) +++ trunk/Lib/fftpack/src/drfft_fftw.c 2007-08-01 13:09:45 UTC (rev 3213) @@ -0,0 +1,70 @@ +/* + * Last Change: Wed Aug 01 07:00 PM 2007 J + * + * FFTW2 implementation + * + * Original code by Pearu Peterson. + */ + +GEN_CACHE(drfftw, (int n, int d, int flags) + , int direction; + int flags; + rfftw_plan plan; + double *ptr;, ((caches_drfftw[i].n == n) && + (caches_drfftw[i].direction == d) && + (caches_drfftw[i].flags == flags)) + , caches_drfftw[id].direction = d; + caches_drfftw[id].flags = flags; + caches_drfftw[id].plan = rfftw_create_plan(n, + (d > + 0 ? + FFTW_REAL_TO_COMPLEX + : + FFTW_COMPLEX_TO_REAL), + flags); + caches_drfftw[id].ptr = + (double *) malloc(sizeof(double) * (n));, + rfftw_destroy_plan(caches_drfftw[id].plan); + free(caches_drfftw[id].ptr);, 10) + +static void drfft_fftw(double *inout, int n, int dir, int + howmany, int normalize) +{ + int i; + double *ptr = inout; + double *ptrc = NULL; + rfftw_plan plan = NULL; + + i = get_cache_id_drfftw(n, dir, FFTW_IN_PLACE | FFTW_ESTIMATE); + plan = caches_drfftw[i].plan; + ptrc = caches_drfftw[i].ptr; + + switch (dir) { + case 1: + for (i = 0; i < howmany; ++i, ptr += n) { + memcpy(ptrc, ptr, sizeof(double) * n); + rfftw(plan, 1, (fftw_real *) ptrc, 1, 1, NULL, 1, 1); + COPYRFFTW2STD(ptrc, ptr, n); + } + break; + + case -1: + for (i = 0; i < howmany; ++i, ptr += n) { + COPYINVRFFTW2STD(ptr, ptrc, n); + rfftw(plan, 1, (fftw_real *) ptrc, 1, 1, NULL, 1, 1); + memcpy(ptr, ptrc, sizeof(double) * n); + } + break; + + default: + fprintf(stderr, "drfft: invalid direction=%d\n", dir); + } + + if (normalize) { + double d = 1.0 / n; + ptr = inout; + for (i = n * howmany - 1; i >= 0; --i) { + (*(ptr++)) *= d; + } + } +} Added: trunk/Lib/fftpack/src/drfft_fftw3.c =================================================================== --- trunk/Lib/fftpack/src/drfft_fftw3.c 2007-08-01 11:39:07 UTC (rev 3212) +++ trunk/Lib/fftpack/src/drfft_fftw3.c 2007-08-01 13:09:45 UTC (rev 3213) @@ -0,0 +1,65 @@ +/* + * Last Change: Wed Aug 01 07:00 PM 2007 J + * + * FFTW3 implementation + * + * Original code by Pearu Peterson. + */ + +GEN_CACHE(drfftw3, (int n, int d, int flags) + , int direction; + int flags; + fftw_plan plan; + double *ptr;, ((caches_drfftw3[i].n == n) && + (caches_drfftw3[i].direction == d) && + (caches_drfftw3[i].flags == flags)) + , caches_drfftw3[id].direction = d; + caches_drfftw3[id].flags = flags; + caches_drfftw3[id].ptr = + (double *) fftw_malloc(sizeof(double) * (n)); + caches_drfftw3[id].plan = + fftw_plan_r2r_1d(n, caches_drfftw3[id].ptr, caches_drfftw3[id].ptr, + (d > 0 ? FFTW_R2HC : FFTW_HC2R), flags);, + fftw_destroy_plan(caches_drfftw3[id].plan); + fftw_free(caches_drfftw3[id].ptr);, 10) + +static void drfft_fftw3(double *inout, int n, int direction, int + howmany, int normalize) +{ + int i; + double *ptr = inout; + + double *ptrc = NULL; + fftw_plan plan = NULL; + + i = get_cache_id_drfftw3(n, direction, (1U << 6)); + plan = caches_drfftw3[i].plan; + ptrc = caches_drfftw3[i].ptr; + switch (direction) { + case 1: + for (i = 0; i < howmany; ++i, ptr += n) { + memcpy(ptrc, ptr, sizeof(double) * n); + fftw_execute(plan); + COPYRFFTW2STD(ptrc, ptr, n); + } + break; + + case -1: + for (i = 0; i < howmany; ++i, ptr += n) { + COPYINVRFFTW2STD(ptr, ptrc, n); + fftw_execute(plan); + memcpy(ptr, ptrc, sizeof(double) * n); + } + break; + default: + fprintf(stderr, "drfft: invalid direction=%d\n", direction); + } + + if (normalize) { + double d = 1.0 / n; + ptr = inout; + for (i = n * howmany - 1; i >= 0; --i) { + (*(ptr++)) *= d; + } + } +} From scipy-svn at scipy.org Wed Aug 1 09:13:32 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 1 Aug 2007 08:13:32 -0500 (CDT) Subject: [Scipy-svn] r3214 - trunk/Lib/optimize Message-ID: <20070801131332.0876D39C192@new.scipy.org> Author: dmitrey.kroshko Date: 2007-08-01 08:13:03 -0500 (Wed, 01 Aug 2007) New Revision: 3214 Modified: trunk/Lib/optimize/optimize.py Log: minor changes (atleast_1d in some scipy.optimize funcs have been added for to prevent errors like ticket 416) Modified: trunk/Lib/optimize/optimize.py =================================================================== --- trunk/Lib/optimize/optimize.py 2007-08-01 13:09:45 UTC (rev 3213) +++ trunk/Lib/optimize/optimize.py 2007-08-01 13:13:03 UTC (rev 3214) @@ -156,7 +156,7 @@ """ fcalls, func = wrap_function(func, args) - x0 = asfarray(x0) + x0 = atleast_1d(asfarray(x0)) N = len(x0) rank = len(x0.shape) if not -1 < rank < 2: @@ -523,7 +523,8 @@ Outputs: (alpha, fc, gc) """ - + + xk = atleast_1d(xk) fc = 0 phi0 = old_fval # compute f(xk) -- done in past loop phi_a0 = f(*((xk+alpha0*pk,)+args)) @@ -828,7 +829,7 @@ fixed_point -- scalar fixed-point finder """ - x0 = asarray(x0) + x0 = atleast_1d(asarray(x0)) if maxiter is None: maxiter = len(x0)*200 func_calls, f = wrap_function(f, args) @@ -996,7 +997,7 @@ fixed_point -- scalar fixed-point finder """ - x0 = asarray(x0) + x0 = atleast_1d(asarray(x0)) fcalls, f = wrap_function(f, args) gcalls, fprime = wrap_function(fprime, args) hcalls = 0 @@ -1661,7 +1662,7 @@ # we need to use a mutable object here that we can update in the # wrapper function fcalls, func = wrap_function(func, args) - x = asarray(x0) + x = atleast_1d(asarray(x0)) if retall: allvecs = [x] N = len(x) From scipy-svn at scipy.org Wed Aug 1 09:41:55 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 1 Aug 2007 08:41:55 -0500 (CDT) Subject: [Scipy-svn] r3215 - in trunk/Lib/sparse: . tests Message-ID: <20070801134155.F18CB39C1F1@new.scipy.org> Author: stefan Date: 2007-08-01 08:41:33 -0500 (Wed, 01 Aug 2007) New Revision: 3215 Modified: trunk/Lib/sparse/sparse.py trunk/Lib/sparse/tests/test_sparse.py Log: Add scalar addition and point-wise multiplication to lil_matrix. Modified: trunk/Lib/sparse/sparse.py =================================================================== --- trunk/Lib/sparse/sparse.py 2007-08-01 13:13:03 UTC (rev 3214) +++ trunk/Lib/sparse/sparse.py 2007-08-01 13:41:33 UTC (rev 3215) @@ -10,7 +10,6 @@ 'spdiags','speye','spidentity', 'isspmatrix','issparse','isspmatrix_csc','isspmatrix_csr', 'isspmatrix_lil','isspmatrix_dok' ] - import warnings @@ -2413,13 +2412,53 @@ else: return self.dot(other) + def multiply(self, other): + """Point-wise multiplication by another lil_matrix. + + """ + if isscalar(other): + return self.__mul__(other) + + if isspmatrix_lil(other): + reference,target = self,other + + if reference.shape != target.shape: + raise ValueError("Dimensions do not match.") + + if len(reference.data) > len(target.data): + reference,target = target,reference + + new = lil_matrix(reference.shape) + for r,row in enumerate(reference.rows): + tr = target.rows[r] + td = target.data[r] + rd = reference.data[r] + L = len(tr) + for c,column in enumerate(row): + ix = bisect_left(tr,column) + if ix < L and tr[ix] == column: + new.rows[r].append(column) + new.data[r].append(rd[c] * td[ix]) + return new + else: + raise ValueError("Point-wise multiplication only allowed " + "with another lil_matrix.") + def copy(self): new = lil_matrix(self.shape, dtype=self.dtype) new.data = copy.deepcopy(self.data) new.rows = copy.deepcopy(self.rows) return new - - + + def __add__(self, other): + if isscalar(other): + new = self.copy() + new.data = numpy.array([[val+other for val in rowvals] for + rowvals in new.data], dtype=object) + return new + else: + return spmatrix.__add__(self, other) + def __rmul__(self, other): # other * self if isscalarlike(other): # Multiplication by a scalar is symmetric Modified: trunk/Lib/sparse/tests/test_sparse.py =================================================================== --- trunk/Lib/sparse/tests/test_sparse.py 2007-08-01 13:13:03 UTC (rev 3214) +++ trunk/Lib/sparse/tests/test_sparse.py 2007-08-01 13:41:33 UTC (rev 3215) @@ -779,7 +779,7 @@ class test_lil(_test_cs, _test_horiz_slicing, NumpyTestCase): spmatrix = lil_matrix - def check_mult(self): + def check_dot(self): A = matrix(zeros((10,10))) A[0,3] = 10 A[5,6] = 20 @@ -829,7 +829,43 @@ D = lil_matrix(C) assert_array_equal(C.A, D.A) + def check_scalar_add(self): + a = lil_matrix((3,3)) + a[0,0] = 1 + a[0,1] = 2 + a[1,1] = 3 + a[2,1] = 4 + a[2,2] = 5 + assert_array_equal((a-5).todense(), + [[-4,-3,0], + [ 0,-2,0], + [ 0,-1,0]]) + + def check_point_wise_multiply(self): + l = lil_matrix((4,3)) + l[0,0] = 1 + l[1,1] = 2 + l[2,2] = 3 + l[3,1] = 4 + + m = lil_matrix((4,3)) + m[0,0] = 1 + m[0,1] = 2 + m[2,2] = 3 + m[3,1] = 4 + m[3,2] = 4 + + assert_array_equal(l.multiply(m).todense(), + m.multiply(l).todense()) + + assert_array_equal(l.multiply(m).todense(), + [[1,0,0], + [0,0,0], + [0,0,9], + [0,16,0]]) + + class test_construct_utils(NumpyTestCase): def check_identity(self): a = spidentity(3) From scipy-svn at scipy.org Wed Aug 1 10:00:50 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 1 Aug 2007 09:00:50 -0500 (CDT) Subject: [Scipy-svn] r3216 - trunk/Lib/fftpack/src Message-ID: <20070801140050.CA9AF39C24A@new.scipy.org> Author: cdavid Date: 2007-08-01 09:00:45 -0500 (Wed, 01 Aug 2007) New Revision: 3216 Modified: trunk/Lib/fftpack/src/drfft_fftw3.c Log: Set correct enum value instead of generated value by the preprocessor for FFTW3 real wrapper Modified: trunk/Lib/fftpack/src/drfft_fftw3.c =================================================================== --- trunk/Lib/fftpack/src/drfft_fftw3.c 2007-08-01 13:41:33 UTC (rev 3215) +++ trunk/Lib/fftpack/src/drfft_fftw3.c 2007-08-01 14:00:45 UTC (rev 3216) @@ -32,7 +32,7 @@ double *ptrc = NULL; fftw_plan plan = NULL; - i = get_cache_id_drfftw3(n, direction, (1U << 6)); + i = get_cache_id_drfftw3(n, direction, FFTW_ESTIMATE); plan = caches_drfftw3[i].plan; ptrc = caches_drfftw3[i].ptr; switch (direction) { From scipy-svn at scipy.org Wed Aug 1 10:39:49 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 1 Aug 2007 09:39:49 -0500 (CDT) Subject: [Scipy-svn] r3217 - trunk/Lib/fftpack/src Message-ID: <20070801143949.AC20539C1ED@new.scipy.org> Author: cdavid Date: 2007-08-01 09:39:21 -0500 (Wed, 01 Aug 2007) New Revision: 3217 Modified: trunk/Lib/fftpack/src/zfft_fftw3.c Log: Avoid unnecessary copies for complex fft with fftw3 Modified: trunk/Lib/fftpack/src/zfft_fftw3.c =================================================================== --- trunk/Lib/fftpack/src/zfft_fftw3.c 2007-08-01 14:00:45 UTC (rev 3216) +++ trunk/Lib/fftpack/src/zfft_fftw3.c 2007-08-01 14:39:21 UTC (rev 3217) @@ -1,6 +1,70 @@ + +// This cache uses FFTW_MEASURE for the plans, and do not copy the data. GEN_CACHE(zfftw3,(int n,int d) ,int direction; fftw_plan plan; + fftw_complex *wrk; + ,((caches_zfftw3[i].n==n) && + (caches_zfftw3[i].direction==d)) + ,caches_zfftw3[id].direction = d; + // This working buffer is only used to compute the plan: we need it + // since FFTW_MEASURE destroys its input when computing a plan + caches_zfftw3[id].wrk = fftw_malloc(n * sizeof(double) * 2); + caches_zfftw3[id].plan = fftw_plan_dft_1d(n, + caches_zfftw3[id].wrk, + caches_zfftw3[id].wrk, + (d>0?FFTW_FORWARD:FFTW_BACKWARD), + FFTW_ESTIMATE | FFTW_UNALIGNED); + ,//fftw_print_plan(caches_zfftw3[id].plan); + fftw_destroy_plan(caches_zfftw3[id].plan); + fftw_free(caches_zfftw3[id].wrk); + //fflush(stdout); + //fprintf(stderr, "aligned count %d\n", countaligned); + ,10) + +static void zfft_fftw3(complex_double * inout, int n, int dir, int +howmany, int normalize) +{ + fftw_complex *ptr = (fftw_complex*)inout; + fftw_complex *ptrm; + fftw_plan plan = NULL; + double factor = 1./n; + + int i; + + plan = caches_zfftw3[get_cache_id_zfftw3(n, dir)].plan; + + switch (dir) { + case 1: + for (i = 0; i < howmany; ++i, ptr += n) { + fftw_execute_dft(plan, ptr, ptr); + } + break; + + case -1: + for (i = 0; i < howmany; ++i, ptr += n) { + fftw_execute_dft(plan, ptr, ptr); + } + break; + + default: + fprintf(stderr, "zfft: invalid dir=%d\n", dir); + } + + if (normalize) { + ptr =(fftw_complex*)inout; + for (i = n * howmany - 1; i >= 0; --i) { + *((double *) (ptr)) *= factor; + *((double *) (ptr++) + 1) *= factor; + //*((double *) (ptr)) /= n; + //*((double *) (ptr++) + 1) /= n; + } + } +} +#if 0 +GEN_CACHE(zfftw3,(int n,int d) + ,int direction; + fftw_plan plan; fftw_complex* ptr; ,((caches_zfftw3[i].n==n) && (caches_zfftw3[i].direction==d)) @@ -58,3 +122,4 @@ } } } +#endif From scipy-svn at scipy.org Wed Aug 1 11:51:01 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 1 Aug 2007 10:51:01 -0500 (CDT) Subject: [Scipy-svn] r3218 - trunk/Lib/optimize Message-ID: <20070801155101.1C6F139C25E@new.scipy.org> Author: dmitrey.kroshko Date: 2007-08-01 10:50:42 -0500 (Wed, 01 Aug 2007) New Revision: 3218 Modified: trunk/Lib/optimize/optimize.py Log: minor changes in optimize.py Modified: trunk/Lib/optimize/optimize.py =================================================================== --- trunk/Lib/optimize/optimize.py 2007-08-01 14:39:21 UTC (rev 3217) +++ trunk/Lib/optimize/optimize.py 2007-08-01 15:50:42 UTC (rev 3218) @@ -156,7 +156,7 @@ """ fcalls, func = wrap_function(func, args) - x0 = atleast_1d(asfarray(x0)) + x0 = asfarray(x0).flatten() N = len(x0) rank = len(x0.shape) if not -1 < rank < 2: @@ -829,7 +829,7 @@ fixed_point -- scalar fixed-point finder """ - x0 = atleast_1d(asarray(x0)) + x0 = asarray(x0).flatten() if maxiter is None: maxiter = len(x0)*200 func_calls, f = wrap_function(f, args) @@ -997,7 +997,7 @@ fixed_point -- scalar fixed-point finder """ - x0 = atleast_1d(asarray(x0)) + x0 = asarray(x0).flatten() fcalls, f = wrap_function(f, args) gcalls, fprime = wrap_function(fprime, args) hcalls = 0 @@ -1662,7 +1662,7 @@ # we need to use a mutable object here that we can update in the # wrapper function fcalls, func = wrap_function(func, args) - x = atleast_1d(asarray(x0)) + x = asarray(x0).flatten() if retall: allvecs = [x] N = len(x) From scipy-svn at scipy.org Wed Aug 1 12:19:15 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 1 Aug 2007 11:19:15 -0500 (CDT) Subject: [Scipy-svn] r3219 - in trunk/Lib/sparse: . tests Message-ID: <20070801161915.2B2E739C1D4@new.scipy.org> Author: stefan Date: 2007-08-01 11:18:38 -0500 (Wed, 01 Aug 2007) New Revision: 3219 Modified: trunk/Lib/sparse/sparse.py trunk/Lib/sparse/tests/test_sparse.py Log: Add lil_eye. Modified: trunk/Lib/sparse/sparse.py =================================================================== --- trunk/Lib/sparse/sparse.py 2007-08-01 15:50:42 UTC (rev 3218) +++ trunk/Lib/sparse/sparse.py 2007-08-01 16:18:38 UTC (rev 3219) @@ -9,14 +9,14 @@ 'lil_matrix','dok_matrix', 'spdiags','speye','spidentity', 'isspmatrix','issparse','isspmatrix_csc','isspmatrix_csr', - 'isspmatrix_lil','isspmatrix_dok' ] + 'isspmatrix_lil','isspmatrix_dok', 'lil_eye' ] import warnings from numpy import zeros, isscalar, real, imag, asarray, asmatrix, matrix, \ ndarray, amax, amin, rank, conj, searchsorted, ndarray, \ less, where, greater, array, transpose, empty, ones, \ - arange, shape, intc + arange, shape, intc, clip import numpy from scipy.sparse.sparsetools import cscmux, csrmux, \ cootocsr, csrtocoo, cootocsc, csctocoo, csctocsr, csrtocsc, \ @@ -2654,7 +2654,26 @@ diags = ones((1, n), dtype = dtype) return spdiags(diags, k, n, m) +def lil_eye((r,c), k=0, dtype=float): + """Generate a lil_matrix of dimensions (r,c) with the k-th + diagonal set to 1. + :Parameters: + r,c : int + Row and column-dimensions of the output. + k : int + Diagonal offset. In the output matrix, + out[m,m+k] == 1 for all m. + dtype : dtype + Data-type of the output array. + + """ + out = lil_matrix((r,c),dtype=dtype) + for c in xrange(clip(k,0,c),clip(r+k,0,c)): + out.rows[c-k].append(c) + out.data[c-k].append(1) + return out + def issequence(t): return isinstance(t, (list, tuple)) Modified: trunk/Lib/sparse/tests/test_sparse.py =================================================================== --- trunk/Lib/sparse/tests/test_sparse.py 2007-08-01 15:50:42 UTC (rev 3218) +++ trunk/Lib/sparse/tests/test_sparse.py 2007-08-01 16:18:38 UTC (rev 3219) @@ -22,7 +22,7 @@ from numpy.testing import * set_package_path() from scipy.sparse import csc_matrix, csr_matrix, dok_matrix, coo_matrix, \ - spidentity, speye, lil_matrix + spidentity, speye, lil_matrix, lil_eye from scipy.linsolve import splu restore_path() @@ -865,7 +865,14 @@ [0,0,9], [0,16,0]]) + def check_lil_eye(self): + for dim in [(3,5),(5,3)]: + for k in range(-5,5): + r,c = dim + assert_array_equal(lil_eye(dim,k).todense(), + speye(r,c,k).todense()) + class test_construct_utils(NumpyTestCase): def check_identity(self): a = spidentity(3) From scipy-svn at scipy.org Wed Aug 1 13:08:23 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 1 Aug 2007 12:08:23 -0500 (CDT) Subject: [Scipy-svn] r3220 - in trunk/Lib/sparse: . tests Message-ID: <20070801170823.9834939C064@new.scipy.org> Author: stefan Date: 2007-08-01 12:07:49 -0500 (Wed, 01 Aug 2007) New Revision: 3220 Modified: trunk/Lib/sparse/sparse.py trunk/Lib/sparse/tests/test_sparse.py Log: Add lil_diags. Modified: trunk/Lib/sparse/sparse.py =================================================================== --- trunk/Lib/sparse/sparse.py 2007-08-01 16:18:38 UTC (rev 3219) +++ trunk/Lib/sparse/sparse.py 2007-08-01 17:07:49 UTC (rev 3220) @@ -9,7 +9,7 @@ 'lil_matrix','dok_matrix', 'spdiags','speye','spidentity', 'isspmatrix','issparse','isspmatrix_csc','isspmatrix_csr', - 'isspmatrix_lil','isspmatrix_dok', 'lil_eye' ] + 'isspmatrix_lil','isspmatrix_dok', 'lil_eye', 'lil_diags' ] import warnings @@ -2674,6 +2674,51 @@ out.data[c-k].append(1) return out +def lil_diags(diags,offsets,(m,n),dtype=float): + """Generate a lil_matrix with the given diagonals. + + :Parameters: + diags : list of list of values e.g. [[1,2,3],[4,5]] + Values to be placed on each indicated diagonal. + offsets : list of ints + Diagonal offsets. This indicates the diagonal on which + the given values should be placed. + (r,c) : tuple of ints + Row and column dimensions of the output. + dtype : dtype + Output data-type. + + Example: + ------- + + >>> lil_diags([[1,2,3],[4,5],[6]],[0,1,2],(3,3)).todense() + matrix([[ 1., 4., 6.], + [ 0., 2., 5.], + [ 0., 0., 3.]]) + + """ + offsets_unsorted = list(offsets) + diags_unsorted = list(diags) + if len(diags) != len(offsets): + raise ValueError("Number of diagonals provided should " + "agree with offsets.") + + sort_indices = numpy.argsort(offsets_unsorted) + diags = [diags_unsorted[k] for k in sort_indices] + offsets = [offsets_unsorted[k] for k in sort_indices] + + for i,k in enumerate(offsets): + if len(diags[i]) < m-abs(k): + raise ValueError("Not enough values specified to fill " + "diagonal %s." % k) + + out = lil_matrix((m,n),dtype=dtype) + for k,diag in itertools.izip(offsets,diags): + for ix,c in enumerate(xrange(clip(k,0,n),clip(m+k,0,n))): + out.rows[c-k].append(c) + out.data[c-k].append(diag[ix]) + return out + def issequence(t): return isinstance(t, (list, tuple)) Modified: trunk/Lib/sparse/tests/test_sparse.py =================================================================== --- trunk/Lib/sparse/tests/test_sparse.py 2007-08-01 16:18:38 UTC (rev 3219) +++ trunk/Lib/sparse/tests/test_sparse.py 2007-08-01 17:07:49 UTC (rev 3220) @@ -22,7 +22,7 @@ from numpy.testing import * set_package_path() from scipy.sparse import csc_matrix, csr_matrix, dok_matrix, coo_matrix, \ - spidentity, speye, lil_matrix, lil_eye + spidentity, speye, lil_matrix, lil_eye, lil_diags from scipy.linsolve import splu restore_path() @@ -32,7 +32,7 @@ self.dat = matrix([[1,0,0,2],[3,0,1,0],[0,2,0,0]],'d') self.datsp = self.spmatrix(self.dat) - def check_getelement(self): + def check_getelement(self): assert_equal(self.datsp[0,0],1) assert_equal(self.datsp[0,1],0) assert_equal(self.datsp[1,0],3) @@ -872,7 +872,37 @@ assert_array_equal(lil_eye(dim,k).todense(), speye(r,c,k).todense()) + def check_lil_diags(self): + assert_array_equal(lil_diags([[1,2,3],[4,5],[6]], + [0,1,2],(3,3)).todense(), + [[1,4,6], + [0,2,5], + [0,0,3]]) + assert_array_equal(lil_diags([[6],[4,5],[1,2,3]], + [2,1,0],(3,3)).todense(), + [[1,4,6], + [0,2,5], + [0,0,3]]) + + assert_array_equal(lil_diags([[6,7,8],[4,5],[1,2,3]], + [2,1,0],(3,3)).todense(), + [[1,4,6], + [0,2,5], + [0,0,3]]) + + assert_array_equal(lil_diags([[1,2,3],[4,5],[6]], + [0,-1,-2],(3,3)).todense(), + [[1,0,0], + [4,2,0], + [6,5,3]]) + + assert_array_equal(lil_diags([[6,7,8],[4,5]], + [-2,-1],(3,3)).todense(), + [[0,0,0], + [4,0,0], + [6,5,0]]) + class test_construct_utils(NumpyTestCase): def check_identity(self): a = spidentity(3) From scipy-svn at scipy.org Sun Aug 5 10:51:01 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 5 Aug 2007 09:51:01 -0500 (CDT) Subject: [Scipy-svn] r3221 - trunk/Lib/sandbox/timeseries/src Message-ID: <20070805145101.BA22139C100@new.scipy.org> Author: mattknox_ca Date: 2007-08-05 09:50:56 -0500 (Sun, 05 Aug 2007) New Revision: 3221 Modified: trunk/Lib/sandbox/timeseries/src/c_tdates.c Log: changed date properties to show end of period values for time data for lower frequencies (eg. the "hour" for an annual date will be 23 and the "minute" will be 59, etc...). This is more consistent with the rest of the date module Modified: trunk/Lib/sandbox/timeseries/src/c_tdates.c =================================================================== --- trunk/Lib/sandbox/timeseries/src/c_tdates.c 2007-08-01 17:07:49 UTC (rev 3220) +++ trunk/Lib/sandbox/timeseries/src/c_tdates.c 2007-08-05 14:50:56 UTC (rev 3221) @@ -1033,7 +1033,7 @@ periodsPerDay = 24*60*60; break; default: - return 0; + return 24*60*60 - 1; } startOfDay = asfreq_DtoHIGHFREQ(dailyDate, 'B', periodsPerDay); From scipy-svn at scipy.org Tue Aug 7 19:56:22 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 7 Aug 2007 18:56:22 -0500 (CDT) Subject: [Scipy-svn] r3222 - in trunk/Lib/signal: . tests Message-ID: <20070807235622.2F70139C044@new.scipy.org> Author: stefan Date: 2007-08-07 18:55:56 -0500 (Tue, 07 Aug 2007) New Revision: 3222 Modified: trunk/Lib/signal/signaltools.py trunk/Lib/signal/tests/test_signaltools.py Log: Fix order_filter. Modified: trunk/Lib/signal/signaltools.py =================================================================== --- trunk/Lib/signal/signaltools.py 2007-08-05 14:50:56 UTC (rev 3221) +++ trunk/Lib/signal/signaltools.py 2007-08-07 23:55:56 UTC (rev 3222) @@ -157,7 +157,7 @@ return sigtools._correlateND(volume,kernel[slice_obj],val) -def order_filter(a, domain, order): +def order_filter(a, domain, rank): """Perform an order filter on an N-dimensional array. Description: @@ -188,7 +188,7 @@ for k in range(len(size)): if (size[k] % 2) != 1: raise ValueError, "Each dimension of domain argument should have an odd number of elements." - return sigtools._orderfilterND(a, domain, rank) + return sigtools._order_filterND(a, domain, rank) def medfilt(volume,kernel_size=None): Modified: trunk/Lib/signal/tests/test_signaltools.py =================================================================== --- trunk/Lib/signal/tests/test_signaltools.py 2007-08-05 14:50:56 UTC (rev 3221) +++ trunk/Lib/signal/tests/test_signaltools.py 2007-08-07 23:55:56 UTC (rev 3222) @@ -40,6 +40,10 @@ # make sure interpolated values are on knot points assert_array_almost_equal(y2[::10], y, decimal=5) +class test_order_filt(NumpyTestCase): + def check_basic(self): + assert_array_equal(signal.order_filter([1,2,3],[1,0,1],1), + [2,3,2]) if __name__ == "__main__": NumpyTest().run() From scipy-svn at scipy.org Wed Aug 8 02:40:21 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 8 Aug 2007 01:40:21 -0500 (CDT) Subject: [Scipy-svn] r3223 - in trunk/Lib/fftpack: . src Message-ID: <20070808064021.EB74A39C0CA@new.scipy.org> Author: cdavid Date: 2007-08-08 01:39:47 -0500 (Wed, 08 Aug 2007) New Revision: 3223 Added: trunk/Lib/fftpack/src/zfftnd_fftpack.c trunk/Lib/fftpack/src/zfftnd_fftw.c trunk/Lib/fftpack/src/zfftnd_fftw3.c trunk/Lib/fftpack/src/zfftnd_mkl.c Modified: trunk/Lib/fftpack/setup.py trunk/Lib/fftpack/src/zfftnd.c Log: Cleaning fft sources for Multi dimensional fft Modified: trunk/Lib/fftpack/setup.py =================================================================== --- trunk/Lib/fftpack/setup.py 2007-08-07 23:55:56 UTC (rev 3222) +++ trunk/Lib/fftpack/setup.py 2007-08-08 06:39:47 UTC (rev 3223) @@ -33,7 +33,10 @@ depends=['src/zfft_djbfft.c', 'src/zfft_fftpack.c', 'src/zfft_fftw.c', 'src/zfft_fftw3.c', 'src/zfft_mkl.c', 'src/drfft_djbfft.c', 'src/drfft_fftpack.c', - 'src/drfft_fftw3.c', 'src/drfft_fftw.c'], + 'src/drfft_fftw3.c', 'src/drfft_fftw.c', + 'src/zfftnd_fftpack.c', 'src/zfftnd_fftw.c', + 'src/zfftnd_fftw3.c', 'src/zfftnd_mkl.c', + ], ) config.add_extension('convolve', Modified: trunk/Lib/fftpack/src/zfftnd.c =================================================================== --- trunk/Lib/fftpack/src/zfftnd.c 2007-08-07 23:55:56 UTC (rev 3222) +++ trunk/Lib/fftpack/src/zfftnd.c 2007-08-08 06:39:47 UTC (rev 3223) @@ -5,6 +5,20 @@ */ #include "fftpack.h" +/* The following macro convert private backend specific function to the public + * functions exported by the module */ +#define GEN_PUBLIC_API(name) \ +void destroy_zfftnd_cache(void)\ +{\ + destroy_zfftnd_##name##_caches();\ +}\ +\ +void zfftnd(complex_double * inout, int rank,\ + int *dims, int direction, int howmany, int normalize)\ +{\ + zfftnd_##name(inout, rank, dims, direction, howmany, normalize);\ +} + #if defined(WITH_FFTW) || defined(WITH_MKL) static int equal_dims(int rank,int *dims1,int *dims2) { @@ -15,6 +29,22 @@ return 1; } #endif + +#ifdef WITH_FFTW3 + #include "zfftnd_fftw3.c" + GEN_PUBLIC_API(fftw3) +#elif defined WITH_FFTW + #include "zfftnd_fftw.c" + GEN_PUBLIC_API(fftw) +#elif defined WITH_MKL + #include "zfftnd_mkl.c" + GEN_PUBLIC_API(mkl) +#else /* Use fftpack by default */ + #include "zfftnd_fftpack.c" + GEN_PUBLIC_API(fftpack) +#endif + +#if 0 /**************** INTEL MKL **************************/ #ifdef WITH_MKL long* convert_dims(int n, int *dims) @@ -93,43 +123,6 @@ } #if defined(WITH_FFTW) || defined(WITH_FFTW3) || defined(WITH_MKL) #else -static -/*inline : disabled because MSVC6.0 fails to compile it. */ -int next_comb(int *ia,int *da,int m) { - while (m>=0 && ia[m]==da[m]) ia[m--] = 0; - if (m<0) return 0; - ia[m]++; - return 1; -} -static -void flatten(complex_double *dest,complex_double *src, - int rank,int strides_axis,int dims_axis,int unflat,int *tmp) { - int *new_strides = tmp+rank; - int *new_dims = tmp+2*rank; - int *ia = tmp+3*rank; - int rm1=rank-1,rm2=rank-2; - int i,j,k; - for (i=0;i= 0 && ia[m] == da[m]) { + ia[m--] = 0; + } + if (m < 0) { + return 0; + } + ia[m]++; + return 1; +} + +static +void flatten(complex_double * dest, complex_double * src, + int rank, int strides_axis, int dims_axis, int unflat, + int *tmp) +{ + int *new_strides = tmp + rank; + int *new_dims = tmp + 2 * rank; + int *ia = tmp + 3 * rank; + int rm1 = rank - 1, rm2 = rank - 2; + int i, j, k; + for (i = 0; i < rm2; ++i) + ia[i] = 0; + ia[rm2] = -1; + j = 0; + if (unflat) { + while (next_comb(ia, new_dims, rm2)) { + k = 0; + for (i = 0; i < rm1; ++i) { + k += ia[i] * new_strides[i]; + } + for (i = 0; i < dims_axis; ++i) { + *(dest + k + i * strides_axis) = *(src + j++); + } + } + } else { + while (next_comb(ia, new_dims, rm2)) { + k = 0; + for (i = 0; i < rm1; ++i) { + k += ia[i] * new_strides[i]; + } + for (i = 0; i < dims_axis; ++i) { + *(dest + j++) = *(src + k + i * strides_axis); + } + } + } +} + +extern void zfft(complex_double * inout, + int n, int direction, int howmany, int normalize); + +extern void zfftnd_fftpack(complex_double * inout, int rank, + int *dims, int direction, int howmany, + int normalize) +{ + int i, sz; + complex_double *ptr = inout; + int axis; + complex_double *tmp; + int *itmp; + int k, j; + + sz = 1; + for (i = 0; i < rank; ++i) { + sz *= dims[i]; + } + //zfft_fftpack(ptr, dims[rank - 1], direction, howmany * sz / dims[rank - 1], + // normalize); + zfft(ptr, dims[rank - 1], direction, howmany * sz / dims[rank - 1], + normalize); + + i = get_cache_id_zfftnd_fftpack(sz, rank); + tmp = caches_zfftnd_fftpack[i].ptr; + itmp = caches_zfftnd_fftpack[i].iptr; + + itmp[rank - 1] = 1; + for (i = 2; i <= rank; ++i) { + itmp[rank - i] = itmp[rank - i + 1] * dims[rank - i + 1]; + } + + for (i = 0; i < howmany; ++i, ptr += sz) { + for (axis = 0; axis < rank - 1; ++axis) { + for (k = j = 0; k < rank; ++k) { + if (k != axis) { + *(itmp + rank + j) = itmp[k]; + *(itmp + 2 * rank + j++) = dims[k] - 1; + } + } + flatten(tmp, ptr, rank, itmp[axis], dims[axis], 0, itmp); + //zfft_fftpack(tmp, dims[axis], direction, sz / dims[axis], normalize); + zfft(tmp, dims[axis], direction, sz / dims[axis], normalize); + flatten(ptr, tmp, rank, itmp[axis], dims[axis], 1, itmp); + } + } + +} Added: trunk/Lib/fftpack/src/zfftnd_fftw.c =================================================================== --- trunk/Lib/fftpack/src/zfftnd_fftw.c 2007-08-07 23:55:56 UTC (rev 3222) +++ trunk/Lib/fftpack/src/zfftnd_fftw.c 2007-08-08 06:39:47 UTC (rev 3223) @@ -0,0 +1,53 @@ +/* + * fftw2 backend for multi dimensional fft + * + * Original code by Pearu Peaterson + * + * Last Change: Wed Aug 08 03:00 PM 2007 J + */ + +GEN_CACHE(zfftnd_fftw, (int n, int *dims, int d, int flags) + , int direction; + int *dims; + fftwnd_plan plan;, ((caches_zfftnd_fftw[i].n == n) && + (caches_zfftnd_fftw[i].direction == d) && + (equal_dims + (n, caches_zfftnd_fftw[i].dims, dims))) + , caches_zfftnd_fftw[id].direction = d; + caches_zfftnd_fftw[id].n = n; + caches_zfftnd_fftw[id].dims = (int *) malloc(sizeof(int) * n); + memcpy(caches_zfftnd_fftw[id].dims, dims, sizeof(int) * n); + caches_zfftnd_fftw[id].plan = + fftwnd_create_plan(n, dims, + (d > 0 ? FFTW_FORWARD : FFTW_BACKWARD), + flags);, + fftwnd_destroy_plan(caches_zfftnd_fftw[id].plan); + free(caches_zfftnd_fftw[id].dims);, 10) + + +extern void zfftnd_mkl(complex_double * inout, int rank, + int *dims, int direction, int howmany, + int normalize) +{ + int i, sz; + complex_double *ptr = inout; + fftwnd_plan plan = NULL; + + sz = 1; + for (i = 0; i < rank; ++i) { + sz *= dims[i]; + } + i = get_cache_id_zfftnd_fftw(rank, dims, direction, + FFTW_IN_PLACE | FFTW_ESTIMATE); + plan = caches_zfftnd_fftw[i].plan; + for (i = 0; i < howmany; ++i, ptr += sz) { + fftwnd_one(plan, (fftw_complex *) ptr, NULL); + } + if (normalize) { + ptr = inout; + for (i = sz * howmany - 1; i >= 0; --i) { + *((double *) (ptr)) /= sz; + *((double *) (ptr++) + 1) /= sz; + } + } +} Added: trunk/Lib/fftpack/src/zfftnd_fftw3.c =================================================================== --- trunk/Lib/fftpack/src/zfftnd_fftw3.c 2007-08-07 23:55:56 UTC (rev 3222) +++ trunk/Lib/fftpack/src/zfftnd_fftw3.c 2007-08-08 06:39:47 UTC (rev 3223) @@ -0,0 +1,40 @@ +/* + * fftw3 backend for multi dimensional fft + * + * Original code by Pearu Peaterson + * + * Last Change: Wed Aug 08 02:00 PM 2007 J + */ + +/* stub because fftw3 has no cache mechanism (yet) */ +static void destroy_zfftnd_fftw3_caches(void) {} + +extern void zfftnd_fftw3(complex_double * inout, int rank, + int *dims, int direction, int howmany, + int normalize) +{ + int i, sz; + complex_double *ptr = inout; + + fftw_plan plan = NULL; + sz = 1; + for (i = 0; i < rank; ++i) { + sz *= dims[i]; + } + plan = fftw_plan_many_dft(rank, dims, howmany, + (fftw_complex *) ptr, NULL, 1, sz, + (fftw_complex *) ptr, NULL, 1, sz, + (direction > + 0 ? FFTW_FORWARD : FFTW_BACKWARD), + FFTW_ESTIMATE); + fftw_execute(plan); + fftw_destroy_plan(plan); + + if (normalize) { + ptr = inout; + for (i = sz * howmany - 1; i >= 0; --i) { + *((double *) (ptr)) /= sz; + *((double *) (ptr++) + 1) /= sz; + } + } +} Added: trunk/Lib/fftpack/src/zfftnd_mkl.c =================================================================== --- trunk/Lib/fftpack/src/zfftnd_mkl.c 2007-08-07 23:55:56 UTC (rev 3222) +++ trunk/Lib/fftpack/src/zfftnd_mkl.c 2007-08-08 06:39:47 UTC (rev 3223) @@ -0,0 +1,66 @@ +/* + * MKL backend for multi dimensional fft + * + * Original code by David M. Cooke + * + * Last Change: Wed Aug 08 03:00 PM 2007 J + */ + +GEN_CACHE(zfftnd_mkl, (int n, int *dims) + , DFTI_DESCRIPTOR_HANDLE desc_handle; + int *dims; + long *ndims;, ((caches_zfftnd_mkl[i].n == n) && + (equal_dims(n, caches_zfftnd_mkl[i].dims, dims))) + , caches_zfftnd_mkl[id].ndims = convert_dims(n, dims); + caches_zfftnd_mkl[id].n = n; + caches_zfftnd_mkl[id].dims = (int *) malloc(sizeof(int) * n); + memcpy(caches_zfftnd_mkl[id].dims, dims, sizeof(int) * n); + DftiCreateDescriptor(&caches_zfftnd_mkl[id].desc_handle, + DFTI_DOUBLE, DFTI_COMPLEX, (long) n, + caches_zfftnd_mkl[id].ndims); + DftiCommitDescriptor(caches_zfftnd_mkl[id].desc_handle);, + DftiFreeDescriptor(&caches_zfftnd_mkl[id].desc_handle); + free(caches_zfftnd_mkl[id].dims); + free(caches_zfftnd_mkl[id].ndims);, 10) + +static long *convert_dims(int n, int *dims) +{ + long *ndim; + int i; + ndim = (long *) malloc(sizeof(long) * n); + for (i = 0; i < n; i++) { + ndim[i] = (long) dims[i]; + } + return ndim; +} + +extern void zfftnd_mkl(complex_double * inout, int rank, + int *dims, int direction, int howmany, + int normalize) +{ + int i, sz; + complex_double *ptr = inout; + + DFTI_DESCRIPTOR_HANDLE desc_handle; + sz = 1; + for (i = 0; i < rank; ++i) { + sz *= dims[i]; + } + + desc_handle = + caches_zmklfftnd[get_cache_id_zmklfftnd(rank, dims)].desc_handle; + for (i = 0; i < howmany; ++i, ptr += sz) { + if (direction == 1) { + DftiComputeForward(desc_handle, (double *) ptr); + } else if (direction == -1) { + DftiComputeBackward(desc_handle, (double *) ptr); + } + } + if (normalize) { + ptr = inout; + for (i = sz * howmany - 1; i >= 0; --i) { + *((double *) (ptr)) /= sz; + *((double *) (ptr++) + 1) /= sz; + } + } +} From scipy-svn at scipy.org Wed Aug 8 02:46:33 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 8 Aug 2007 01:46:33 -0500 (CDT) Subject: [Scipy-svn] r3224 - trunk/Lib/fftpack/src Message-ID: <20070808064633.1F19C39C0B6@new.scipy.org> Author: cdavid Date: 2007-08-08 01:46:12 -0500 (Wed, 08 Aug 2007) New Revision: 3224 Modified: trunk/Lib/fftpack/src/zfftnd.c Log: Forgot to remove unused, older code for multi dimensional fft Modified: trunk/Lib/fftpack/src/zfftnd.c =================================================================== --- trunk/Lib/fftpack/src/zfftnd.c 2007-08-08 06:39:47 UTC (rev 3223) +++ trunk/Lib/fftpack/src/zfftnd.c 2007-08-08 06:46:12 UTC (rev 3224) @@ -43,177 +43,3 @@ #include "zfftnd_fftpack.c" GEN_PUBLIC_API(fftpack) #endif - -#if 0 -/**************** INTEL MKL **************************/ -#ifdef WITH_MKL -long* convert_dims(int n, int *dims) -{ - long * ndim; - int i; - ndim = (long*)malloc(sizeof(long)*n); - for(i=0;i0?FFTW_FORWARD:FFTW_BACKWARD),flags); - ,fftwnd_destroy_plan(caches_zfftwnd[id].plan); - free(caches_zfftwnd[id].dims); - ,10) -#else -GEN_CACHE(zfftnd,(int n,int rank) - ,complex_double *ptr; - int *iptr; - int rank; - ,((caches_zfftnd[i].n==n)&&(caches_zfftnd[i].rank==rank)) - ,caches_zfftnd[id].n = n; - caches_zfftnd[id].ptr = (complex_double*)malloc(2*sizeof(double)*n); - caches_zfftnd[id].iptr = (int*)malloc(4*rank*sizeof(int)); - ,free(caches_zfftnd[id].ptr); - free(caches_zfftnd[id].iptr); - ,10) -#endif - -extern void destroy_zfftnd_cache(void) { -#ifdef WITH_MKL - destroy_zmklfftnd_caches(); -#elif defined WITH_FFTW3 -#elif defined WITH_FFTW - destroy_zfftwnd_caches(); -#else - destroy_zfftnd_caches(); -#endif -} -#if defined(WITH_FFTW) || defined(WITH_FFTW3) || defined(WITH_MKL) -#else -#endif -/**************** ZFFTND function **********************/ -extern void zfftnd(complex_double *inout,int rank, - int *dims,int direction,int howmany,int normalize) { - int i,sz; - complex_double *ptr = inout; -#if defined WITH_MKL - DFTI_DESCRIPTOR_HANDLE desc_handle; -#elif defined WITH_FFTW3 - fftw_plan plan = NULL; -#elif defined WITH_FFTW - fftwnd_plan plan = NULL; -#else - int axis; - complex_double *tmp; - int *itmp; - int k,j; -#endif - sz = 1; - for(i=0;i=0;--i) { - *((double*)(ptr)) /= sz; - *((double*)(ptr++)+1) /= sz; - } - } -#elif defined WITH_FFTW3 - plan = fftw_plan_many_dft(rank,dims,howmany, - (fftw_complex*)ptr,NULL,1,sz, - (fftw_complex*)ptr,NULL,1,sz, - (direction>0?FFTW_FORWARD:FFTW_BACKWARD), - FFTW_ESTIMATE); - fftw_execute(plan); - fftw_destroy_plan(plan); - /* note that fftw_malloc of array *could* lead - * to faster fft here for processors with SIMD acceleration, - * but would require more memory and an array memcpy - */ - if (normalize) { - ptr = inout; - for (i=sz*howmany-1;i>=0;--i) { - *((double*)(ptr)) /= sz; - *((double*)(ptr++)+1) /= sz; - } - } -#elif defined WITH_FFTW - i = get_cache_id_zfftwnd(rank,dims,direction,FFTW_IN_PLACE|FFTW_ESTIMATE); - plan = caches_zfftwnd[i].plan; - for (i=0;i=0;--i) { - *((double*)(ptr)) /= sz; - *((double*)(ptr++)+1) /= sz; - } - } -#else - zfft(ptr,dims[rank-1],direction,howmany*sz/dims[rank-1],normalize); - - i = get_cache_id_zfftnd(sz,rank); /*Get cache*/ - tmp = caches_zfftnd[i].ptr; - itmp = caches_zfftnd[i].iptr; - - itmp[rank-1] = 1; /*Calculate strides*/ - for (i=2;i<=rank;++i) - itmp[rank-i] = itmp[rank-i+1]*dims[rank-i+1]; - - for (i=0;i Author: cdavid Date: 2007-08-08 10:15:24 -0500 (Wed, 08 Aug 2007) New Revision: 3225 Modified: trunk/Lib/fftpack/src/zfftnd_mkl.c Log: Fix typo in mkl backend for nd fft Modified: trunk/Lib/fftpack/src/zfftnd_mkl.c =================================================================== --- trunk/Lib/fftpack/src/zfftnd_mkl.c 2007-08-08 06:46:12 UTC (rev 3224) +++ trunk/Lib/fftpack/src/zfftnd_mkl.c 2007-08-08 15:15:24 UTC (rev 3225) @@ -6,6 +6,17 @@ * Last Change: Wed Aug 08 03:00 PM 2007 J */ +static long *convert_dims(int n, int *dims) +{ + long *ndim; + int i; + ndim = (long *) malloc(sizeof(long) * n); + for (i = 0; i < n; i++) { + ndim[i] = (long) dims[i]; + } + return ndim; +} + GEN_CACHE(zfftnd_mkl, (int n, int *dims) , DFTI_DESCRIPTOR_HANDLE desc_handle; int *dims; @@ -23,17 +34,6 @@ free(caches_zfftnd_mkl[id].dims); free(caches_zfftnd_mkl[id].ndims);, 10) -static long *convert_dims(int n, int *dims) -{ - long *ndim; - int i; - ndim = (long *) malloc(sizeof(long) * n); - for (i = 0; i < n; i++) { - ndim[i] = (long) dims[i]; - } - return ndim; -} - extern void zfftnd_mkl(complex_double * inout, int rank, int *dims, int direction, int howmany, int normalize) @@ -48,7 +48,7 @@ } desc_handle = - caches_zmklfftnd[get_cache_id_zmklfftnd(rank, dims)].desc_handle; + caches_zfftnd_mkl[get_cache_id_zfftnd_mkl(rank, dims)].desc_handle; for (i = 0; i < howmany; ++i, ptr += sz) { if (direction == 1) { DftiComputeForward(desc_handle, (double *) ptr); From scipy-svn at scipy.org Fri Aug 10 20:08:27 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 10 Aug 2007 19:08:27 -0500 (CDT) Subject: [Scipy-svn] r3226 - in trunk/Lib/signal: . tests Message-ID: <20070811000827.8107139C033@new.scipy.org> Author: stefan Date: 2007-08-10 19:08:07 -0500 (Fri, 10 Aug 2007) New Revision: 3226 Added: trunk/Lib/signal/tests/test_wavelets.py Modified: trunk/Lib/signal/wavelets.py Log: Fix wavelet module. Add tests. Added: trunk/Lib/signal/tests/test_wavelets.py =================================================================== --- trunk/Lib/signal/tests/test_wavelets.py 2007-08-08 15:15:24 UTC (rev 3225) +++ trunk/Lib/signal/tests/test_wavelets.py 2007-08-11 00:08:07 UTC (rev 3226) @@ -0,0 +1,26 @@ +import numpy as N +from numpy.testing import * + +set_package_path() +from scipy.signal import wavelets +restore_path() + +class test_wavelets(NumpyTestCase): + def check_qmf(self): + assert_array_equal(wavelets.qmf([1,1]),[1,-1]) + + def check_daub(self): + for i in xrange(1,15): + assert_equal(len(wavelets.daub(i)),i*2) + + def check_cascade(self): + for J in xrange(1,7): + for i in xrange(1,5): + lpcoef = wavelets.daub(i) + k = len(lpcoef) + x,phi,psi = wavelets.cascade(lpcoef,J) + assert len(x) == len(phi) == len(psi) + assert_equal(len(x),(k-1)*2**J) + +if __name__ == "__main__": + NumpyTest().run() Modified: trunk/Lib/signal/wavelets.py =================================================================== --- trunk/Lib/signal/wavelets.py 2007-08-08 15:15:24 UTC (rev 3225) +++ trunk/Lib/signal/wavelets.py 2007-08-11 00:08:07 UTC (rev 3226) @@ -1,3 +1,4 @@ +__all__ = ['daub','qmf','cascade'] import numpy as sb from numpy.dual import eig @@ -3,5 +4,4 @@ from scipy.misc import comb - def daub(p): """The coefficients for the FIR low-pass filter producing Daubechies wavelets. @@ -47,7 +47,8 @@ if (abs(z1)) < 1: z1 = const - part q = q * [1,-z1] - q = sb.real(q) * c + + q = c * sb.real(q) # Normalize result q = q / sb.sum(q) * sqrt(2) return q.c[::-1] @@ -74,7 +75,7 @@ J -- values will be computed at grid points $K/2^J$ Outputs: - x -- the dyadic points $K/2^J$ for $K=0...N*2^J-1$ + x -- the dyadic points $K/2^J$ for $K=0...N*(2^J)-1$ where len(hk)=len(gk)=N+1 phi -- the scaling function phi(x) at x $\phi(x) = \sum_{k=0}^{N} h_k \phi(2x-k)$ @@ -118,7 +119,7 @@ m *= s2 # construct the grid of points - x = sb.arange(0,N*(1< Author: stefan Date: 2007-08-10 19:56:45 -0500 (Fri, 10 Aug 2007) New Revision: 3227 Modified: trunk/Lib/signal/tests/test_wavelets.py trunk/Lib/signal/wavelets.py Log: Add complex Morlet wavelet (based on contribution by iCy-fLaME). Modified: trunk/Lib/signal/tests/test_wavelets.py =================================================================== --- trunk/Lib/signal/tests/test_wavelets.py 2007-08-11 00:08:07 UTC (rev 3226) +++ trunk/Lib/signal/tests/test_wavelets.py 2007-08-11 00:56:45 UTC (rev 3227) @@ -22,5 +22,14 @@ assert len(x) == len(phi) == len(psi) assert_equal(len(x),(k-1)*2**J) + def check_morlet(self): + x = wavelets.morlet(50,4.1,complete=True) + y = wavelets.morlet(50,4.1,complete=False) + assert_equal(len(x),len(y)) + + x = wavelets.morlet(10,50,complete=False) + y = wavelets.morlet(10,50,complete=True) + assert_equal(x,y) + if __name__ == "__main__": NumpyTest().run() Modified: trunk/Lib/signal/wavelets.py =================================================================== --- trunk/Lib/signal/wavelets.py 2007-08-11 00:08:07 UTC (rev 3226) +++ trunk/Lib/signal/wavelets.py 2007-08-11 00:56:45 UTC (rev 3227) @@ -1,8 +1,9 @@ -__all__ = ['daub','qmf','cascade'] +__all__ = ['daub','qmf','cascade','morlet'] import numpy as sb from numpy.dual import eig from scipy.misc import comb +from scipy import linspace, pi, exp, zeros def daub(p): """The coefficients for the FIR low-pass filter producing Daubechies wavelets. @@ -167,3 +168,50 @@ prevkeys = newkeys return x, phi, psi + +def morlet(M, w=5.0, s=1.0, complete=True): + """Complex Morlet wavelet. + + :Parameters: + M : int + Length of the wavelet. + w : float + Omega0 + s : float + Scaling factor, windowed from -s*2*pi to +s*2*pi. + complete : bool + Whether to use the complete or the standard version. + + Notes: + ------ + + The standard version: + pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2)) + + This commonly used wavelet is often referred to simply as the + Morlet wavelet. Note that, this simplified version can cause + admissibility problems at low values of w. + + The complete version: + pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2)) + + The complete version of the Morlet wavelet, with a correction + term to improve admissibility. For w greater than 5, the + correction term is negligible. + + Note that the energy of the return wavelet is not normalised + according to s. + + The fundamental frequency of this wavelet in Hz is given + by f = 2*s*w*r / M where r is the sampling rate. + + """ + x = linspace(-s*2*pi,s*2*pi,M) + output = exp(1j*w*x) + + if complete: + x -= exp(-0.5*(w**2)) + + output *= exp(-0.5*(x**2)) * pi**(-0.25) + + return output From scipy-svn at scipy.org Mon Aug 13 09:14:52 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 13 Aug 2007 08:14:52 -0500 (CDT) Subject: [Scipy-svn] r3228 - in trunk/Lib/sandbox/maskedarray: . tests Message-ID: <20070813131452.CF1F639C034@new.scipy.org> Author: pierregm Date: 2007-08-13 08:14:46 -0500 (Mon, 13 Aug 2007) New Revision: 3228 Modified: trunk/Lib/sandbox/maskedarray/core.py trunk/Lib/sandbox/maskedarray/tests/test_core.py Log: core : fixed a pb w/ maximum/minimum on multiD arrays (thx to Eric Firing) Modified: trunk/Lib/sandbox/maskedarray/core.py =================================================================== --- trunk/Lib/sandbox/maskedarray/core.py 2007-08-11 00:56:45 UTC (rev 3227) +++ trunk/Lib/sandbox/maskedarray/core.py 2007-08-13 13:14:46 UTC (rev 3228) @@ -2017,7 +2017,8 @@ else: kargs = {} target = target.ravel() - + if not (m is nomask): + m = m.ravel() if m is nomask: t = self.ufunc.reduce(target, **kargs) else: Modified: trunk/Lib/sandbox/maskedarray/tests/test_core.py =================================================================== --- trunk/Lib/sandbox/maskedarray/tests/test_core.py 2007-08-11 00:56:45 UTC (rev 3227) +++ trunk/Lib/sandbox/maskedarray/tests/test_core.py 2007-08-13 13:14:46 UTC (rev 3228) @@ -301,6 +301,10 @@ assert_equal(maximum(x,y), where(greater(x,y), x, y)) assert minimum(x) == 0 assert maximum(x) == 4 + # + x = arange(4).reshape(2,2) + x[-1,-1] = masked + assert_equal(maximum(x), 2) def check_minmax_methods(self): "Additional tests on max/min" From scipy-svn at scipy.org Mon Aug 13 15:16:55 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 13 Aug 2007 14:16:55 -0500 (CDT) Subject: [Scipy-svn] r3229 - trunk/Lib/weave/examples Message-ID: <20070813191655.3A14F39C12F@new.scipy.org> Author: eric Date: 2007-08-13 14:16:51 -0500 (Mon, 13 Aug 2007) New Revision: 3229 Modified: trunk/Lib/weave/examples/binary_search.py trunk/Lib/weave/examples/object.py Log: cleaned up a few weave demos Modified: trunk/Lib/weave/examples/binary_search.py =================================================================== --- trunk/Lib/weave/examples/binary_search.py 2007-08-13 13:14:46 UTC (rev 3228) +++ trunk/Lib/weave/examples/binary_search.py 2007-08-13 19:16:51 UTC (rev 3229) @@ -104,7 +104,7 @@ { if (max < min ) { - return_val = PyInt_FromLong(-1); + return_val = -1; break; } m = (min + max) / 2; @@ -115,7 +115,7 @@ max = m - 1; else { - return_val = PyInt_FromLong(m); + return_val = m; break; } } Modified: trunk/Lib/weave/examples/object.py =================================================================== --- trunk/Lib/weave/examples/object.py 2007-08-13 13:14:46 UTC (rev 3228) +++ trunk/Lib/weave/examples/object.py 2007-08-13 19:16:51 UTC (rev 3229) @@ -1,8 +1,8 @@ -# h:\wrk\scipy\weave\examples>python object.py -# initial val: 1 -# inc result: 2 -# after set attr: 5 +""" Attribute and method access on Python objects from C++. + Note: std::cout type operations currently crash python... + Not sure what is up with this... +""" import scipy.weave as weave #---------------------------------------------------------------------------- @@ -13,24 +13,29 @@ def __init__(self): self.val = 1 def inc(self,amount): - self.val += 1 + self.val += amount return self.val obj = foo() code = """ + py::tuple result(3); + int i = obj.attr("val"); - std::cout << "initial val: " << i << std::endl; + result[0] = i; py::tuple args(1); args[0] = 2; i = obj.mcall("inc",args); - std::cout << "inc result: " << i << std::endl; - + result[1] = i; + obj.set_attr("val",5); i = obj.attr("val"); - std::cout << "after set attr: " << i << std::endl; + result[2] = i; + + return_val = result; """ -weave.inline(code,['obj']) +print 'initial, inc(2), set(5)/get:', weave.inline(code,['obj']) + #---------------------------------------------------------------------------- # indexing of values. #---------------------------------------------------------------------------- @@ -38,11 +43,11 @@ obj = UserList([1,[1,2],"hello"]) code = """ int i; - // find obj length and accesss each of its items - std::cout << "UserList items: "; - for(i = 0; i < obj.length(); i++) - std::cout << obj[i].str() << " "; - std::cout << std::endl; + // find obj length and access each of its items + //std::cout << "UserList items: "; + //for(i = 0; i < obj.length(); i++) + // std::cout << obj[i].str() << " "; + //std::cout << std::endl; // assign new values to each of its items for(i = 0; i < obj.length(); i++) obj[i] = "goodbye"; From scipy-svn at scipy.org Tue Aug 14 13:01:58 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 14 Aug 2007 12:01:58 -0500 (CDT) Subject: [Scipy-svn] r3230 - trunk/Lib/io Message-ID: <20070814170158.9505039C24C@new.scipy.org> Author: oliphant Date: 2007-08-14 12:01:55 -0500 (Tue, 14 Aug 2007) New Revision: 3230 Added: trunk/Lib/io/wavfile.py Log: Add wavfile.py to read and write basic .wav files. Added: trunk/Lib/io/wavfile.py =================================================================== --- trunk/Lib/io/wavfile.py 2007-08-13 19:16:51 UTC (rev 3229) +++ trunk/Lib/io/wavfile.py 2007-08-14 17:01:55 UTC (rev 3230) @@ -0,0 +1,109 @@ +import numpy +import struct + +# assumes file pointer is immediately +# after the 'fmt ' id +def _read_fmt_chunk(fid): + res = struct.unpack('lhHLLHH',fid.read(20)) + size, comp, noc, rate, sbytes, ba, bits = res + if (comp != 1 or size > 16): + print "Warning: unfamiliar format bytes..." + if (size>16): + fid.read(size-16) + return size, comp, noc, rate, sbytes, ba, bits + +# assumes file pointer is immediately +# after the 'data' id +def _read_data_chunk(fid, noc, bits): + size = struct.unpack('l',fid.read(4))[0] + if bits == 8: + data = numpy.fromfile(fid, dtype=numpy.ubyte, count=size) + if noc > 1: + data = data.reshape(-1,noc) + else: + bytes = bits//8 + dtype = 'i%d' % bytes + data = numpy.fromfile(fid, dtype=dtype, count=size//bytes) + if noc > 1: + data = data.reshape(-1,noc) + return data + +def _read_riff_chunk(fid): + str1 = fid.read(4) + fsize = struct.unpack('L', fid.read(4))[0] + 8 + str2 = fid.read(4) + if (str1 != 'RIFF' or str2 != 'WAVE'): + raise ValueError, "Not a WAV file." + return fsize + +# open a wave-file +def read(file): + """Return the sample rate (in samples/sec) and data from a WAV file + + The file can be an open file or a filename. + The returned sample rate is a Python integer + The data is returned as a numpy array with a + data-type determined from the file. + """ + if hasattr(file,'read'): + fid = file + else: + fid = open(file, 'rb') + + fsize = _read_riff_chunk(fid) + noc = 1 + bits = 8 + while (fid.tell() < fsize): + # read the next chunk + chunk_id = fid.read(4) + if chunk_id == 'fmt ': + print "Reading fmt chunk" + size, comp, noc, rate, sbytes, ba, bits = _read_fmt_chunk(fid) + elif chunk_id == 'data': + print "Reading data chunk" + data = _read_data_chunk(fid, noc, bits) + else: + print "Warning: %s chunk not understood" + size = struct.unpack('L',fid.read(4))[0] + bytes = fid.read(size) + fid.close() + return rate, data + +# Write a wave-file +# sample rate, data +def write(filename, rate, data): + """Write a numpy array as a WAV file + + filename -- The name of the file to write (will be over-written) + rate -- The sample rate (in samples/sec). + data -- A 1-d or 2-d numpy array of integer data-type. + The bits-per-sample will be determined by the data-type + To write multiple-channels, use a 2-d array of shape + (Nsamples, Nchannels) + + Writes a simple uncompressed WAV file. + """ + fid = open(filename, 'wb') + fid.write('RIFF') + fid.write('\x00\x00\x00\x00') + fid.write('WAVE') + # fmt chunk + fid.write('fmt ') + if data.ndim == 1: + noc = 1 + else: + noc = data.shape[1] + bits = data.dtype.itemsize * 8 + sbytes = rate*(bits / 8)*noc + ba = noc * (bits / 8) + fid.write(struct.pack('lhHLLHH', 16, 1, noc, rate, sbytes, ba, bits)) + # data chunk + fid.write('data') + fid.write(struct.pack('l', data.nbytes)) + data.tofile(fid) + # Determine file size and place it in correct + # position at start of the file. + size = fid.tell() + fid.seek(4) + fid.write(struct.pack('l', size-8)) + fid.close() From scipy-svn at scipy.org Tue Aug 14 13:45:28 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 14 Aug 2007 12:45:28 -0500 (CDT) Subject: [Scipy-svn] r3231 - trunk/Lib/sandbox/timeseries Message-ID: <20070814174528.E48D239C142@new.scipy.org> Author: mattknox_ca Date: 2007-08-14 12:45:19 -0500 (Tue, 14 Aug 2007) New Revision: 3231 Modified: trunk/Lib/sandbox/timeseries/tseries.py Log: added tolist method Modified: trunk/Lib/sandbox/timeseries/tseries.py =================================================================== --- trunk/Lib/sandbox/timeseries/tseries.py 2007-08-14 17:01:55 UTC (rev 3230) +++ trunk/Lib/sandbox/timeseries/tseries.py 2007-08-14 17:45:19 UTC (rev 3231) @@ -919,6 +919,17 @@ TimeSeries.tofile = tofile #............................................ +def tolist(self, fill_value=None): + """Copies the date and data portion of the time series to a hierarchical +python list and returns that list. Data items are converted to the nearest +compatible Python type. Dates are converted to standard Python datetime +objects. Masked values are filled with `fill_value`""" + return [(d.datetime, v) for (d,v) in \ + zip(self.dates, self._series.tolist())] +TimeSeries.tolist = tolist + +#............................................ + def asrecords(series): """Returns the masked time series as a recarray. Fields are `_dates`, `_data` and _`mask`. From scipy-svn at scipy.org Tue Aug 14 14:29:03 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 14 Aug 2007 13:29:03 -0500 (CDT) Subject: [Scipy-svn] r3232 - trunk/Lib/sandbox/timeseries/lib Message-ID: <20070814182903.ED2A139C0C6@new.scipy.org> Author: pierregm Date: 2007-08-14 13:28:58 -0500 (Tue, 14 Aug 2007) New Revision: 3232 Modified: trunk/Lib/sandbox/timeseries/lib/moving_funcs.py Log: maskedarray mrecords : fixed a pb when creating mrecords from a list of tuples timeseries lib.moving_funcs : fixed a pb with the mov_xxx functions: the original data was affected in certain cases Modified: trunk/Lib/sandbox/timeseries/lib/moving_funcs.py =================================================================== --- trunk/Lib/sandbox/timeseries/lib/moving_funcs.py 2007-08-14 17:45:19 UTC (rev 3231) +++ trunk/Lib/sandbox/timeseries/lib/moving_funcs.py 2007-08-14 18:28:58 UTC (rev 3232) @@ -33,15 +33,18 @@ def _process_result_dict(orig_data, result_dict): "process the results from the c function" + rarray = result_dict['array'] rtype = result_dict['array'].dtype rmask = result_dict['mask'] # makes a copy of the appropriate type - data = orig_data.astype(rtype) - data[:] = result_dict['array'] + data = orig_data.astype(rtype).copy() + data.flat = result_dict['array'].ravel() + if not hasattr(data, '__setmask__'): + data = data.view(MA.MaskedArray) + data.__setmask__(rmask) + return data - return marray(data, mask=rmask, copy=False, subok=True) - def _moving_func(data, cfunc, kwargs): if data.ndim == 1: @@ -327,3 +330,29 @@ fdoc = fdoc.replace('$$'+prm+'$$', dc) fdoc += mov_result_doc _g[fn].func_doc = fdoc + + +############################################################################### +if __name__ == '__main__': + from timeseries import time_series, today + from maskedarray.testutils import assert_equal, assert_almost_equal + # + series = time_series(N.arange(10),start_date=today('D')) + # + filtered = mov_sum(series,3) + assert_equal(filtered, [0,1,3,6,9,12,15,18,21,24]) + assert_equal(filtered._mask, [1,1,0,0,0,0,0,0,0,0]) + assert_equal(filtered._dates, series._dates) + assert_equal(series, N.arange(10)) + # + filtered = mov_average(series,3) + assert_equal(filtered, [0,1,1,2,3,4,5,6,7,8]) + assert_equal(filtered._mask, [1,1,0,0,0,0,0,0,0,0]) + assert_equal(filtered._dates, series._dates) + assert_equal(series, N.arange(10)) + # + filtered = mov_average(series._data,3) + assert_equal(filtered, [0,1,1,2,3,4,5,6,7,8]) + assert_equal(filtered._mask, [1,1,0,0,0,0,0,0,0,0]) + assert_equal(series, N.arange(10)) + From scipy-svn at scipy.org Tue Aug 14 14:29:08 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 14 Aug 2007 13:29:08 -0500 (CDT) Subject: [Scipy-svn] r3233 - trunk/Lib/sandbox/maskedarray Message-ID: <20070814182908.4351339C11D@new.scipy.org> Author: pierregm Date: 2007-08-14 13:29:06 -0500 (Tue, 14 Aug 2007) New Revision: 3233 Modified: trunk/Lib/sandbox/maskedarray/mrecords.py Log: maskedarray mrecords : fixed a pb when creating mrecords from a list of tuples timeseries lib.moving_funcs : fixed a pb with the mov_xxx functions: the original data was affected in certain cases Modified: trunk/Lib/sandbox/maskedarray/mrecords.py =================================================================== --- trunk/Lib/sandbox/maskedarray/mrecords.py 2007-08-14 18:28:58 UTC (rev 3232) +++ trunk/Lib/sandbox/maskedarray/mrecords.py 2007-08-14 18:29:06 UTC (rev 3233) @@ -135,11 +135,22 @@ else: _fieldmask = mask else: - _data = recarray(shape, dtype=descr) - _fieldmask = recarray(shape, dtype=mdescr) - for (n,v) in zip(_names, data): - _data[n] = numeric.asarray(v).view(ndarray) - _fieldmask[n] = getmaskarray(v) + try: + data = numeric.array(data, dtype=descr).view(recarray) + _data = data + if mask is nomask: + _fieldmask = data.astype(mdescr) + _fieldmask.flat = tuple([False]*len(mdescr)) + else: + _fieldmask = mask + except: + _data = recarray(shape, dtype=descr) + _fieldmask = recarray(shape, dtype=mdescr) + for (n,v) in zip(_names, data): + print n, v + print _data[n] + _data[n] = numeric.asarray(v).view(ndarray) + _fieldmask[n] = getmaskarray(v) #........................................ _data = _data.view(cls) _data._fieldmask = _fieldmask @@ -653,7 +664,7 @@ if __name__ == '__main__': import numpy as N from maskedarray.testutils import assert_equal - if 1: + if 0: d = N.arange(5) m = MA.make_mask([1,0,0,1,1]) base_d = N.r_[d,d[::-1]].reshape(2,-1).T From scipy-svn at scipy.org Tue Aug 14 16:32:40 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 14 Aug 2007 15:32:40 -0500 (CDT) Subject: [Scipy-svn] r3234 - branches Message-ID: <20070814203240.85AAE39C0B9@new.scipy.org> Author: jarrod.millman Date: 2007-08-14 15:32:36 -0500 (Tue, 14 Aug 2007) New Revision: 3234 Added: branches/0.5.2.x/ Log: branching from the 0.5.2 tag Copied: branches/0.5.2.x (from rev 3233, tags/0.5.2) From scipy-svn at scipy.org Tue Aug 14 16:52:39 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 14 Aug 2007 15:52:39 -0500 (CDT) Subject: [Scipy-svn] r3235 - branches/0.5.2.x/Lib Message-ID: <20070814205239.E272739C068@new.scipy.org> Author: jarrod.millman Date: 2007-08-14 15:52:37 -0500 (Tue, 14 Aug 2007) New Revision: 3235 Modified: branches/0.5.2.x/Lib/version.py Log: This is the development code for the 0.5.2.1 release Modified: branches/0.5.2.x/Lib/version.py =================================================================== --- branches/0.5.2.x/Lib/version.py 2007-08-14 20:32:36 UTC (rev 3234) +++ branches/0.5.2.x/Lib/version.py 2007-08-14 20:52:37 UTC (rev 3235) @@ -1,5 +1,5 @@ -version = '0.5.2' -release=True +version = '0.5.2.1' +release=False if not release: import os From scipy-svn at scipy.org Tue Aug 14 16:54:37 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 14 Aug 2007 15:54:37 -0500 (CDT) Subject: [Scipy-svn] r3236 - in branches/0.5.2.x/Lib: . fftpack fftpack/tests integrate integrate/tests interpolate interpolate/tests io io/tests lib lib/blas lib/blas/tests lib/lapack lib/lapack/tests linalg linalg/tests linsolve linsolve/umfpack linsolve/umfpack/tests maxentropy maxentropy/tests misc ndimage ndimage/tests optimize optimize/tests sandbox/arpack/tests sandbox/arraysetops/tests sandbox/cdavid/tests sandbox/exmplpackage/tests sandbox/exmplpackage/yyy/tests sandbox/models/tests sandbox/montecarlo/tests sandbox/spline sandbox/spline/tests signal signal/tests sparse sparse/tests special special/tests stats stats/tests weave weave/tests Message-ID: <20070814205437.D5C3E39C068@new.scipy.org> Author: jarrod.millman Date: 2007-08-14 15:53:46 -0500 (Tue, 14 Aug 2007) New Revision: 3236 Modified: branches/0.5.2.x/Lib/__init__.py branches/0.5.2.x/Lib/fftpack/__init__.py branches/0.5.2.x/Lib/fftpack/tests/test_basic.py branches/0.5.2.x/Lib/fftpack/tests/test_helper.py branches/0.5.2.x/Lib/fftpack/tests/test_pseudo_diffs.py branches/0.5.2.x/Lib/integrate/__init__.py branches/0.5.2.x/Lib/integrate/tests/test_integrate.py branches/0.5.2.x/Lib/integrate/tests/test_quadpack.py branches/0.5.2.x/Lib/integrate/tests/test_quadrature.py branches/0.5.2.x/Lib/interpolate/__init__.py branches/0.5.2.x/Lib/interpolate/tests/test_fitpack.py branches/0.5.2.x/Lib/interpolate/tests/test_interpolate.py branches/0.5.2.x/Lib/io/__init__.py branches/0.5.2.x/Lib/io/tests/test_array_import.py branches/0.5.2.x/Lib/io/tests/test_mio.py branches/0.5.2.x/Lib/io/tests/test_mmio.py branches/0.5.2.x/Lib/io/tests/test_recaster.py branches/0.5.2.x/Lib/lib/__init__.py branches/0.5.2.x/Lib/lib/blas/__init__.py branches/0.5.2.x/Lib/lib/blas/tests/test_blas.py branches/0.5.2.x/Lib/lib/blas/tests/test_fblas.py branches/0.5.2.x/Lib/lib/lapack/__init__.py branches/0.5.2.x/Lib/lib/lapack/tests/test_lapack.py branches/0.5.2.x/Lib/linalg/__init__.py branches/0.5.2.x/Lib/linalg/tests/test_basic.py branches/0.5.2.x/Lib/linalg/tests/test_blas.py branches/0.5.2.x/Lib/linalg/tests/test_decomp.py branches/0.5.2.x/Lib/linalg/tests/test_fblas.py branches/0.5.2.x/Lib/linalg/tests/test_lapack.py branches/0.5.2.x/Lib/linalg/tests/test_matfuncs.py branches/0.5.2.x/Lib/linsolve/__init__.py branches/0.5.2.x/Lib/linsolve/umfpack/__init__.py branches/0.5.2.x/Lib/linsolve/umfpack/tests/test_umfpack.py branches/0.5.2.x/Lib/maxentropy/__init__.py branches/0.5.2.x/Lib/maxentropy/tests/test_maxentropy.py branches/0.5.2.x/Lib/misc/__init__.py branches/0.5.2.x/Lib/misc/ppimport.py branches/0.5.2.x/Lib/ndimage/__init__.py branches/0.5.2.x/Lib/ndimage/tests/test_ndimage.py branches/0.5.2.x/Lib/optimize/__init__.py branches/0.5.2.x/Lib/optimize/tests/test_cobyla.py branches/0.5.2.x/Lib/optimize/tests/test_optimize.py branches/0.5.2.x/Lib/optimize/tests/test_zeros.py branches/0.5.2.x/Lib/sandbox/arpack/tests/test_arpack.py branches/0.5.2.x/Lib/sandbox/arpack/tests/test_speigs.py branches/0.5.2.x/Lib/sandbox/arraysetops/tests/test_arraysetops.py branches/0.5.2.x/Lib/sandbox/cdavid/tests/test_autocorr.py branches/0.5.2.x/Lib/sandbox/cdavid/tests/test_lpc.py branches/0.5.2.x/Lib/sandbox/exmplpackage/tests/test_foo.py branches/0.5.2.x/Lib/sandbox/exmplpackage/yyy/tests/test_yyy.py branches/0.5.2.x/Lib/sandbox/models/tests/test_formula.py branches/0.5.2.x/Lib/sandbox/montecarlo/tests/test_dictsampler.py branches/0.5.2.x/Lib/sandbox/montecarlo/tests/test_intsampler.py branches/0.5.2.x/Lib/sandbox/spline/__init__.py branches/0.5.2.x/Lib/sandbox/spline/tests/test_fitpack.py branches/0.5.2.x/Lib/sandbox/spline/tests/test_interpolate.py branches/0.5.2.x/Lib/signal/__init__.py branches/0.5.2.x/Lib/signal/tests/test_signaltools.py branches/0.5.2.x/Lib/sparse/__init__.py branches/0.5.2.x/Lib/sparse/tests/test_sparse.py branches/0.5.2.x/Lib/special/__init__.py branches/0.5.2.x/Lib/special/tests/test_basic.py branches/0.5.2.x/Lib/stats/__init__.py branches/0.5.2.x/Lib/stats/tests/test_distributions.py branches/0.5.2.x/Lib/stats/tests/test_morestats.py branches/0.5.2.x/Lib/stats/tests/test_stats.py branches/0.5.2.x/Lib/weave/__init__.py branches/0.5.2.x/Lib/weave/tests/test_ast_tools.py branches/0.5.2.x/Lib/weave/tests/test_blitz_tools.py branches/0.5.2.x/Lib/weave/tests/test_build_tools.py branches/0.5.2.x/Lib/weave/tests/test_c_spec.py branches/0.5.2.x/Lib/weave/tests/test_catalog.py branches/0.5.2.x/Lib/weave/tests/test_ext_tools.py branches/0.5.2.x/Lib/weave/tests/test_inline_tools.py branches/0.5.2.x/Lib/weave/tests/test_scxx.py branches/0.5.2.x/Lib/weave/tests/test_scxx_dict.py branches/0.5.2.x/Lib/weave/tests/test_scxx_object.py branches/0.5.2.x/Lib/weave/tests/test_scxx_sequence.py branches/0.5.2.x/Lib/weave/tests/test_size_check.py branches/0.5.2.x/Lib/weave/tests/test_slice_handler.py branches/0.5.2.x/Lib/weave/tests/test_standard_array_spec.py branches/0.5.2.x/Lib/weave/tests/test_wx_spec.py Log: removed use of deprecated scipytest in favor of numpytest Modified: branches/0.5.2.x/Lib/__init__.py =================================================================== --- branches/0.5.2.x/Lib/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -71,10 +71,10 @@ def test(level=1, verbosity=1): """ Run Scipy tests suite with level and verbosity.""" - from numpy.testing import ScipyTest + from numpy.testing import NumpyTest import scipy scipy.pkgload() - return ScipyTest(scipy).test(level, verbosity) + return NumpyTest(scipy).test(level, verbosity) __doc__ += """ Modified: branches/0.5.2.x/Lib/fftpack/__init__.py =================================================================== --- branches/0.5.2.x/Lib/fftpack/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/fftpack/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -17,5 +17,5 @@ del k, register_func -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/fftpack/tests/test_basic.py =================================================================== --- branches/0.5.2.x/Lib/fftpack/tests/test_basic.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/fftpack/tests/test_basic.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -90,7 +90,7 @@ x1[0] = x[0] return direct_idft(x1).real -class test_fft(ScipyTestCase): +class test_fft(NumpyTestCase): def check_definition(self): x = [1,2,3,4+1j,1,2,3,4+2j] @@ -162,7 +162,7 @@ print ' (secs for %s calls)' % (repeat) sys.stdout.flush() -class test_ifft(ScipyTestCase): +class test_ifft(NumpyTestCase): def check_definition(self): x = [1,2,3,4+1j,1,2,3,4+2j] @@ -237,7 +237,7 @@ print ' (secs for %s calls)' % (repeat) sys.stdout.flush() -class test_rfft(ScipyTestCase): +class test_rfft(NumpyTestCase): def check_definition(self): x = [1,2,3,4,1,2,3,4] @@ -292,7 +292,7 @@ print ' (secs for %s calls)' % (repeat) sys.stdout.flush() -class test_irfft(ScipyTestCase): +class test_irfft(NumpyTestCase): def check_definition(self): x = [1,2,3,4,1,2,3,4] @@ -368,7 +368,7 @@ sys.stdout.flush() -class test_fftn(ScipyTestCase): +class test_fftn(NumpyTestCase): def check_definition(self): x = [[1,2,3],[4,5,6],[7,8,9]] @@ -529,7 +529,7 @@ sys.stdout.flush() -class test_ifftn(ScipyTestCase): +class test_ifftn(NumpyTestCase): def check_definition(self): x = [[1,2,3],[4,5,6],[7,8,9]] @@ -547,4 +547,4 @@ assert_array_almost_equal (fftn(ifftn(x)),x) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/fftpack/tests/test_helper.py =================================================================== --- branches/0.5.2.x/Lib/fftpack/tests/test_helper.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/fftpack/tests/test_helper.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -22,7 +22,7 @@ def random(size): return rand(*size) -class test_fftshift(ScipyTestCase): +class test_fftshift(NumpyTestCase): def check_definition(self): x = [0,1,2,3,4,-4,-3,-2,-1] @@ -39,7 +39,7 @@ x = random((n,)) assert_array_almost_equal(ifftshift(fftshift(x)),x) -class test_fftfreq(ScipyTestCase): +class test_fftfreq(NumpyTestCase): def check_definition(self): x = [0,1,2,3,4,-4,-3,-2,-1] @@ -49,7 +49,7 @@ assert_array_almost_equal(10*fftfreq(10),x) assert_array_almost_equal(10*pi*fftfreq(10,pi),x) -class test_rfftfreq(ScipyTestCase): +class test_rfftfreq(NumpyTestCase): def check_definition(self): x = [0,1,1,2,2,3,3,4,4] @@ -60,4 +60,4 @@ assert_array_almost_equal(10*pi*rfftfreq(10,pi),x) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/fftpack/tests/test_pseudo_diffs.py =================================================================== --- branches/0.5.2.x/Lib/fftpack/tests/test_pseudo_diffs.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/fftpack/tests/test_pseudo_diffs.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -77,7 +77,7 @@ return ifft(fft(x)*exp(k*a)).real -class test_diff(ScipyTestCase): +class test_diff(NumpyTestCase): def check_definition(self): for n in [16,17,64,127,32]: @@ -216,7 +216,7 @@ print ' (secs for %s calls)' % (repeat) -class test_tilbert(ScipyTestCase): +class test_tilbert(NumpyTestCase): def check_definition(self): for h in [0.1,0.5,1,5.5,10]: @@ -277,7 +277,7 @@ sys.stdout.flush() print ' (secs for %s calls)' % (repeat) -class test_itilbert(ScipyTestCase): +class test_itilbert(NumpyTestCase): def check_definition(self): for h in [0.1,0.5,1,5.5,10]: @@ -291,7 +291,7 @@ assert_array_almost_equal(itilbert(sin(2*x),h), direct_itilbert(sin(2*x),h)) -class test_hilbert(ScipyTestCase): +class test_hilbert(NumpyTestCase): def check_definition(self): for n in [16,17,64,127]: @@ -360,7 +360,7 @@ sys.stdout.flush() print ' (secs for %s calls)' % (repeat) -class test_ihilbert(ScipyTestCase): +class test_ihilbert(NumpyTestCase): def check_definition(self): for n in [16,17,64,127]: @@ -381,7 +381,7 @@ y2 = itilbert(f,h=10) assert_array_almost_equal (y,y2) -class test_shift(ScipyTestCase): +class test_shift(NumpyTestCase): def check_definition(self): for n in [18,17,64,127,32,2048,256]: @@ -430,4 +430,4 @@ print ' (secs for %s calls)' % (repeat) if __name__ == "__main__": - ScipyTest('fftpack.pseudo_diffs').run() + NumpyTest('fftpack.pseudo_diffs').run() Modified: branches/0.5.2.x/Lib/integrate/__init__.py =================================================================== --- branches/0.5.2.x/Lib/integrate/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/integrate/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -12,5 +12,5 @@ from ode import * __all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/integrate/tests/test_integrate.py =================================================================== --- branches/0.5.2.x/Lib/integrate/tests/test_integrate.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/integrate/tests/test_integrate.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -14,7 +14,7 @@ from scipy.integrate import odeint restore_path() -class test_odeint(ScipyTestCase): +class test_odeint(NumpyTestCase): """ Test odeint: free vibration of a simple oscillator m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0 @@ -51,4 +51,4 @@ assert res < 1.0e-6 if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/integrate/tests/test_quadpack.py =================================================================== --- branches/0.5.2.x/Lib/integrate/tests/test_quadpack.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/integrate/tests/test_quadpack.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -10,7 +10,7 @@ if errTol is not None: assert err < errTol, (err, errTol) -class test_quad(ScipyTestCase): +class test_quad(NumpyTestCase): def check_typical(self): # 1) Typical function with two extra arguments: def myfunc(x,n,z): # Bessel function integrand @@ -106,4 +106,4 @@ 8/3.0 * (b**4.0 - a**4.0)) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/integrate/tests/test_quadrature.py =================================================================== --- branches/0.5.2.x/Lib/integrate/tests/test_quadrature.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/integrate/tests/test_quadrature.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -7,7 +7,7 @@ from scipy.integrate import quadrature, romberg, romb restore_path() -class test_quadrature(ScipyTestCase): +class test_quadrature(NumpyTestCase): def quad(self, x, a, b, args): raise NotImplementedError @@ -31,4 +31,4 @@ assert_equal(romb(numpy.arange(17)),128) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/interpolate/__init__.py =================================================================== --- branches/0.5.2.x/Lib/interpolate/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/interpolate/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -11,5 +11,5 @@ from fitpack2 import * __all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/interpolate/tests/test_fitpack.py =================================================================== --- branches/0.5.2.x/Lib/interpolate/tests/test_fitpack.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/interpolate/tests/test_fitpack.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -22,7 +22,7 @@ RectBivariateSpline restore_path() -class test_UnivariateSpline(ScipyTestCase): +class test_UnivariateSpline(NumpyTestCase): def check_linear_constant(self): x = [1,2,3] y = [3,3,3] @@ -41,7 +41,7 @@ assert_almost_equal(lut.get_residual(),0.0) assert_array_almost_equal(lut([1,1.5,2]),[0,1,2]) -class test_LSQBivariateSpline(ScipyTestCase): +class test_LSQBivariateSpline(NumpyTestCase): def check_linear_constant(self): x = [1,1,1,2,2,2,3,3,3] y = [1,2,3,1,2,3,1,2,3] @@ -54,7 +54,7 @@ #print lut.get_coeffs() #print lut.get_residual() -class test_SmoothBivariateSpline(ScipyTestCase): +class test_SmoothBivariateSpline(NumpyTestCase): def check_linear_constant(self): x = [1,1,1,2,2,2,3,3,3] y = [1,2,3,1,2,3,1,2,3] @@ -75,7 +75,7 @@ assert_almost_equal(lut.get_residual(),0.0) assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[0,0],[1,1],[2,2]]) -class test_RectBivariateSpline(ScipyTestCase): +class test_RectBivariateSpline(NumpyTestCase): def check_defaults(self): x = array([1,2,3,4,5]) y = array([1,2,3,4,5]) @@ -84,4 +84,4 @@ assert_array_almost_equal(lut(x,y),z) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/interpolate/tests/test_interpolate.py =================================================================== --- branches/0.5.2.x/Lib/interpolate/tests/test_interpolate.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/interpolate/tests/test_interpolate.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -7,7 +7,7 @@ restore_path() -class test_interp2d(ScipyTestCase): +class test_interp2d(NumpyTestCase): def test_interp2d(self): y, x = mgrid[0:pi:20j, 0:pi:21j] z = sin(x+y) @@ -18,7 +18,7 @@ assert_almost_equal(I(u.ravel(), v.ravel()), sin(v+u), decimal=2) -class test_interp1d(ScipyTestCase): +class test_interp1d(NumpyTestCase): def setUp(self): self.x10 = np.arange(10.) @@ -195,4 +195,4 @@ if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/io/__init__.py =================================================================== --- branches/0.5.2.x/Lib/io/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/io/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -16,5 +16,5 @@ from mmio import mminfo,mmread,mmwrite __all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/io/tests/test_array_import.py =================================================================== --- branches/0.5.2.x/Lib/io/tests/test_array_import.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/io/tests/test_array_import.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -16,7 +16,7 @@ import numpy.oldnumeric as N import tempfile -class test_numpyio(ScipyTestCase): +class test_numpyio(NumpyTestCase): def check_basic(self): # Generate some data a = 255*rand(20) @@ -34,7 +34,7 @@ assert(N.product(a.astype(N.Int16) == b,axis=0)) os.remove(fname) -class test_read_array(ScipyTestCase): +class test_read_array(NumpyTestCase): def check_complex(self): a = rand(13,4) + 1j*rand(13,4) fname = tempfile.mktemp('.dat') @@ -61,4 +61,4 @@ os.remove(fname) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/io/tests/test_mio.py =================================================================== --- branches/0.5.2.x/Lib/io/tests/test_mio.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/io/tests/test_mio.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -4,7 +4,7 @@ from glob import glob from cStringIO import StringIO from tempfile import mkstemp -from numpy.testing import set_package_path, restore_path, ScipyTestCase, ScipyTest +from numpy.testing import set_package_path, restore_path, NumpyTestCase, NumpyTest from numpy.testing import assert_equal, assert_array_almost_equal from numpy import arange, array, eye, pi, cos, exp, sin, sqrt, ndarray, \ zeros, reshape, transpose, empty @@ -23,7 +23,7 @@ test_data_path = os.path.join(os.path.dirname(__file__), './data') -class test_mio_array(ScipyTestCase): +class test_mio_array(NumpyTestCase): def __init__(self, *args, **kwargs): super(test_mio_array, self).__init__(*args, **kwargs) @@ -233,5 +233,5 @@ if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/io/tests/test_mmio.py =================================================================== --- branches/0.5.2.x/Lib/io/tests/test_mmio.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/io/tests/test_mmio.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -8,7 +8,7 @@ from io.mmio import mminfo,mmread,mmwrite restore_path() -class test_mmio_array(ScipyTestCase): +class test_mmio_array(NumpyTestCase): def check_simple(self): a = [[1,2],[3,4]] @@ -135,7 +135,7 @@ 5 5 1.200e+01 ''' -class test_mmio_coordinate(ScipyTestCase): +class test_mmio_coordinate(NumpyTestCase): def check_simple_todense(self): fn = mktemp() @@ -152,4 +152,4 @@ assert_array_almost_equal(a,b) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/io/tests/test_recaster.py =================================================================== --- branches/0.5.2.x/Lib/io/tests/test_recaster.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/io/tests/test_recaster.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -10,7 +10,7 @@ except: pass -class test_recaster(ScipyTestCase): +class test_recaster(NumpyTestCase): def setUp(self): self.valid_types = [N.int32, N.complex128, N.float64] self.recaster = Recaster(self.valid_types) Modified: branches/0.5.2.x/Lib/lib/__init__.py =================================================================== --- branches/0.5.2.x/Lib/lib/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/lib/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -1,5 +1,5 @@ from info import __doc__, __all__ -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/lib/blas/__init__.py =================================================================== --- branches/0.5.2.x/Lib/lib/blas/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/lib/blas/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -57,5 +57,5 @@ funcs.append(func) return tuple(funcs) -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/lib/blas/tests/test_blas.py =================================================================== --- branches/0.5.2.x/Lib/lib/blas/tests/test_blas.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/lib/blas/tests/test_blas.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -24,7 +24,7 @@ from blas import get_blas_funcs restore_path() -class test_cblas1_simple(ScipyTestCase): +class test_cblas1_simple(NumpyTestCase): def check_axpy(self): for p in 'sd': @@ -36,7 +36,7 @@ if f is None: continue assert_array_almost_equal(f([1,2j,3],[2,-1,3],a=5),[7,10j-1,18]) -class test_fblas1_simple(ScipyTestCase): +class test_fblas1_simple(NumpyTestCase): def check_axpy(self): for p in 'sd': @@ -122,7 +122,7 @@ assert_equal(f([-5,4+3j,6]),1) #XXX: need tests for rot,rotm,rotg,rotmg -class test_fblas2_simple(ScipyTestCase): +class test_fblas2_simple(NumpyTestCase): def check_gemv(self): for p in 'sd': @@ -170,7 +170,7 @@ 2j, 3j],[3j,4j]),[[6,8],[12,16],[18,24]]) -class test_fblas3_simple(ScipyTestCase): +class test_fblas3_simple(NumpyTestCase): def check_gemm(self): for p in 'sd': @@ -195,7 +195,7 @@ assert_array_almost_equal(f(1,[[1,2]],[[3],[4]]),[[11]]) assert_array_almost_equal(f(1,[[1,2],[1,2]],[[3],[4]]),[[11],[11]]) -class test_blas(ScipyTestCase): +class test_blas(NumpyTestCase): def check_blas(self): a = array([[1,1,1]]) @@ -226,4 +226,4 @@ """ if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/lib/blas/tests/test_fblas.py =================================================================== --- branches/0.5.2.x/Lib/lib/blas/tests/test_fblas.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/lib/blas/tests/test_fblas.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -40,7 +40,7 @@ ################################################## ### Test blas ?axpy -class base_axpy(ScipyTestCase): +class base_axpy(NumpyTestCase): def check_default_a(self): x = arange(3.,dtype=self.dtype) y = arange(3.,dtype=x.dtype) @@ -114,7 +114,7 @@ ################################################## ### Test blas ?scal -class base_scal(ScipyTestCase): +class base_scal(NumpyTestCase): def check_simple(self): x = arange(3.,dtype=self.dtype) real_x = x*3. @@ -159,7 +159,7 @@ ################################################## ### Test blas ?copy -class base_copy(ScipyTestCase): +class base_copy(NumpyTestCase): def check_simple(self): x = arange(3.,dtype=self.dtype) y = zeros(shape(x),x.dtype) @@ -228,7 +228,7 @@ ################################################## ### Test blas ?swap -class base_swap(ScipyTestCase): +class base_swap(NumpyTestCase): def check_simple(self): x = arange(3.,dtype=self.dtype) y = zeros(shape(x),x.dtype) @@ -304,7 +304,7 @@ ### Test blas ?gemv ### This will be a mess to test all cases. -class base_gemv(ScipyTestCase): +class base_gemv(NumpyTestCase): def get_data(self,x_stride=1,y_stride=1): mult = array(1, dtype = self.dtype) if self.dtype in [complex64, complex128]: @@ -409,7 +409,7 @@ ### Test blas ?ger ### This will be a mess to test all cases. -class base_ger(ScipyTestCase): +class base_ger(NumpyTestCase): def get_data(self,x_stride=1,y_stride=1): from numpy.random import normal alpha = array(1., dtype = self.dtype) @@ -518,4 +518,4 @@ """ if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/lib/lapack/__init__.py =================================================================== --- branches/0.5.2.x/Lib/lib/lapack/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/lib/lapack/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -89,5 +89,5 @@ func_code = %(func_name)s.func_code ''' -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/lib/lapack/tests/test_lapack.py =================================================================== --- branches/0.5.2.x/Lib/lib/lapack/tests/test_lapack.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/lib/lapack/tests/test_lapack.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -27,7 +27,7 @@ #class _test_ev: pass -class _test_lapack(ScipyTestCase, +class _test_lapack(NumpyTestCase, _test_ev, _test_gev): @@ -123,4 +123,4 @@ decimal = 12 if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/linalg/__init__.py =================================================================== --- branches/0.5.2.x/Lib/linalg/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/linalg/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -28,5 +28,5 @@ del k, register_func -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/linalg/tests/test_basic.py =================================================================== --- branches/0.5.2.x/Lib/linalg/tests/test_basic.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/linalg/tests/test_basic.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -39,7 +39,7 @@ data = add.outer(data,data) return data -class test_solve_banded(ScipyTestCase): +class test_solve_banded(NumpyTestCase): def check_simple(self): @@ -54,7 +54,7 @@ x = solve_banded((l,u),ab,b) assert_array_almost_equal(numpy.dot(a,x),b) -class test_solve(ScipyTestCase): +class test_solve(NumpyTestCase): def check_20Feb04_bug(self): a = [[1,1],[1.0,0]] # ok @@ -193,7 +193,7 @@ print ' (secs for %s calls)' % (repeat) -class test_inv(ScipyTestCase): +class test_inv(NumpyTestCase): def check_simple(self): a = [[1,2],[3,4]] @@ -265,7 +265,7 @@ print ' (secs for %s calls)' % (repeat) -class test_det(ScipyTestCase): +class test_det(NumpyTestCase): def check_simple(self): a = [[1,2],[3,4]] @@ -340,7 +340,7 @@ b1 = dot(at, b) return solve(a1, b1) -class test_lstsq(ScipyTestCase): +class test_lstsq(NumpyTestCase): def check_random_overdet_large(self): #bug report: Nils Wagner n = 200 @@ -510,7 +510,7 @@ y = hankel([1,2,3],[3,4,5]) assert_array_equal(y,[[1,2,3],[2,3,4],[3,4,5]]) -class test_pinv(ScipyTestCase): +class test_pinv(NumpyTestCase): def check_simple(self): a=array([[1,2,3],[4,5,6.],[7,8,10]]) @@ -538,4 +538,4 @@ assert_array_almost_equal(a_pinv,a_pinv2) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/linalg/tests/test_blas.py =================================================================== --- branches/0.5.2.x/Lib/linalg/tests/test_blas.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/linalg/tests/test_blas.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -23,7 +23,7 @@ from linalg import cblas restore_path() -class test_cblas1_simple(ScipyTestCase): +class test_cblas1_simple(NumpyTestCase): def check_axpy(self): for p in 'sd': @@ -35,7 +35,7 @@ if f is None: continue assert_array_almost_equal(f(5,[1,2j,3],[2,-1,3]),[7,10j-1,18]) -class test_fblas1_simple(ScipyTestCase): +class test_fblas1_simple(NumpyTestCase): def check_axpy(self): for p in 'sd': @@ -121,7 +121,7 @@ assert_equal(f([-5,4+3j,6]),1) #XXX: need tests for rot,rotm,rotg,rotmg -class test_fblas2_simple(ScipyTestCase): +class test_fblas2_simple(NumpyTestCase): def check_gemv(self): for p in 'sd': @@ -169,7 +169,7 @@ 2j, 3j],[3j,4j]),[[6,8],[12,16],[18,24]]) -class test_fblas3_simple(ScipyTestCase): +class test_fblas3_simple(NumpyTestCase): def check_gemm(self): for p in 'sd': @@ -183,7 +183,7 @@ assert_array_almost_equal(f(3j,[3-4j],[-4]),[[-48-36j]]) assert_array_almost_equal(f(3j,[3-4j],[-4],3,[5j]),[-48-21j]) -class test_blas(ScipyTestCase): +class test_blas(NumpyTestCase): def check_fblas(self): if hasattr(fblas,'empty_module'): @@ -208,4 +208,4 @@ """ if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/linalg/tests/test_decomp.py =================================================================== --- branches/0.5.2.x/Lib/linalg/tests/test_decomp.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/linalg/tests/test_decomp.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -32,7 +32,7 @@ def random(size): return rand(*size) -class test_eigvals(ScipyTestCase): +class test_eigvals(NumpyTestCase): def check_simple(self): a = [[1,2,3],[1,2,3],[2,5,6]] @@ -78,7 +78,7 @@ print ' (secs for %s calls)' % (repeat) -class test_eig(ScipyTestCase): +class test_eig(NumpyTestCase): def check_simple(self): a = [[1,2,3],[1,2,3],[2,5,6]] @@ -111,10 +111,10 @@ -class test_eig_banded(ScipyTestCase): +class test_eig_banded(NumpyTestCase): def __init__(self, *args): - ScipyTestCase.__init__(self, *args) + NumpyTestCase.__init__(self, *args) self.create_bandmat() @@ -396,7 +396,7 @@ -class test_lu(ScipyTestCase): +class test_lu(NumpyTestCase): def check_simple(self): a = [[1,2,3],[1,2,3],[2,5,6]] @@ -414,7 +414,7 @@ #XXX: need more tests -class test_lu_solve(ScipyTestCase): +class test_lu_solve(NumpyTestCase): def check_lu(self): a = random((10,10)) b = random((10,)) @@ -426,7 +426,7 @@ assert_array_equal(x1,x2) -class test_svd(ScipyTestCase): +class test_svd(NumpyTestCase): def check_simple(self): a = [[1,2,3],[1,20,3],[2,5,6]] @@ -499,7 +499,7 @@ for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) -class test_svdvals(ScipyTestCase): +class test_svdvals(NumpyTestCase): def check_simple(self): a = [[1,2,3],[1,2,3],[2,5,6]] @@ -537,12 +537,12 @@ assert len(s)==2 assert s[0]>=s[1] -class test_diagsvd(ScipyTestCase): +class test_diagsvd(NumpyTestCase): def check_simple(self): assert_array_almost_equal(diagsvd([1,0,0],3,3),[[1,0,0],[0,0,0],[0,0,0]]) -class test_cholesky(ScipyTestCase): +class test_cholesky(NumpyTestCase): def check_simple(self): a = [[8,2,3],[2,9,3],[3,3,6]] @@ -591,7 +591,7 @@ assert_array_almost_equal(cholesky(a,lower=1),c) -class test_qr(ScipyTestCase): +class test_qr(NumpyTestCase): def check_simple(self): a = [[8,2,3],[2,9,3],[5,3,6]] @@ -677,7 +677,7 @@ transp = transpose any = sometrue -class test_schur(ScipyTestCase): +class test_schur(NumpyTestCase): def check_simple(self): a = [[8,12,3],[2,9,3],[10,3,6]] @@ -689,7 +689,7 @@ tc2,zc2 = rsf2csf(tc,zc) assert_array_almost_equal(dot(dot(zc2,tc2),transp(conj(zc2))),a) -class test_hessenberg(ScipyTestCase): +class test_hessenberg(NumpyTestCase): def check_simple(self): a = [[-149, -50,-154], @@ -737,4 +737,4 @@ assert_array_almost_equal(h1,h) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/linalg/tests/test_fblas.py =================================================================== --- branches/0.5.2.x/Lib/linalg/tests/test_fblas.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/linalg/tests/test_fblas.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -40,7 +40,7 @@ ################################################## ### Test blas ?axpy -class base_axpy(ScipyTestCase): +class base_axpy(NumpyTestCase): def check_default_a(self): x = arange(3.,dtype=self.dtype) y = arange(3.,dtype=x.dtype) @@ -114,7 +114,7 @@ ################################################## ### Test blas ?scal -class base_scal(ScipyTestCase): +class base_scal(NumpyTestCase): def check_simple(self): x = arange(3.,dtype=self.dtype) real_x = x*3. @@ -159,7 +159,7 @@ ################################################## ### Test blas ?copy -class base_copy(ScipyTestCase): +class base_copy(NumpyTestCase): def check_simple(self): x = arange(3.,dtype=self.dtype) y = zeros(shape(x),x.dtype) @@ -228,7 +228,7 @@ ################################################## ### Test blas ?swap -class base_swap(ScipyTestCase): +class base_swap(NumpyTestCase): def check_simple(self): x = arange(3.,dtype=self.dtype) y = zeros(shape(x),x.dtype) @@ -304,7 +304,7 @@ ### Test blas ?gemv ### This will be a mess to test all cases. -class base_gemv(ScipyTestCase): +class base_gemv(NumpyTestCase): def get_data(self,x_stride=1,y_stride=1): mult = array(1, dtype = self.dtype) if self.dtype in [complex64, complex128]: @@ -409,7 +409,7 @@ ### Test blas ?ger ### This will be a mess to test all cases. -class base_ger(ScipyTestCase): +class base_ger(NumpyTestCase): def get_data(self,x_stride=1,y_stride=1): from numpy.random import normal alpha = array(1., dtype = self.dtype) @@ -518,4 +518,4 @@ """ if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/linalg/tests/test_lapack.py =================================================================== --- branches/0.5.2.x/Lib/linalg/tests/test_lapack.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/linalg/tests/test_lapack.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -20,7 +20,7 @@ from linalg import clapack restore_path() -class test_flapack_simple(ScipyTestCase): +class test_flapack_simple(NumpyTestCase): def check_gebal(self): a = [[1,2,3],[4,5,6],[7,8,9]] @@ -52,7 +52,7 @@ ht,tau,info = f(a) assert not info,`info` -class test_lapack(ScipyTestCase): +class test_lapack(NumpyTestCase): def check_flapack(self): if hasattr(flapack,'empty_module'): @@ -77,4 +77,4 @@ """ if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/linalg/tests/test_matfuncs.py =================================================================== --- branches/0.5.2.x/Lib/linalg/tests/test_matfuncs.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/linalg/tests/test_matfuncs.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -25,7 +25,7 @@ from linalg import signm,logm,funm, sqrtm, expm, expm2, expm3 restore_path() -class test_signm(ScipyTestCase): +class test_signm(NumpyTestCase): def check_nils(self): a = array([[ 29.2, -24.2, 69.5, 49.8, 7. ], @@ -67,7 +67,7 @@ r = signm(a) #XXX: what would be the correct result? -class test_logm(ScipyTestCase): +class test_logm(NumpyTestCase): def check_nils(self): a = array([[ -2., 25., 0., 0., 0., 0., 0.], @@ -81,7 +81,7 @@ logm(m) -class test_sqrtm(ScipyTestCase): +class test_sqrtm(NumpyTestCase): def check_bad(self): # See http://www.maths.man.ac.uk/~nareports/narep336.ps.gz e = 2**-5 @@ -98,7 +98,7 @@ esa = sqrtm(a) assert_array_almost_equal(dot(esa,esa),a) -class test_expm(ScipyTestCase): +class test_expm(NumpyTestCase): def check_zero(self): a = array([[0.,0],[0,0]]) assert_array_almost_equal(expm(a),[[1,0],[0,1]]) @@ -106,4 +106,4 @@ assert_array_almost_equal(expm3(a),[[1,0],[0,1]]) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/linsolve/__init__.py =================================================================== --- branches/0.5.2.x/Lib/linsolve/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/linsolve/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -9,5 +9,5 @@ from linsolve import * __all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/linsolve/umfpack/__init__.py =================================================================== --- branches/0.5.2.x/Lib/linsolve/umfpack/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/linsolve/umfpack/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -3,5 +3,5 @@ from umfpack import * __all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/linsolve/umfpack/tests/test_umfpack.py =================================================================== --- branches/0.5.2.x/Lib/linsolve/umfpack/tests/test_umfpack.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/linsolve/umfpack/tests/test_umfpack.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -18,7 +18,7 @@ restore_path() -class test_solvers(ScipyTestCase): +class test_solvers(NumpyTestCase): """Tests inverting a sparse linear system""" def check_solve_complex_without_umfpack(self): @@ -72,7 +72,7 @@ -class test_factorization(ScipyTestCase): +class test_factorization(NumpyTestCase): """Tests factorizing a sparse linear system""" def check_complex_lu(self): @@ -132,4 +132,4 @@ if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/maxentropy/__init__.py =================================================================== --- branches/0.5.2.x/Lib/maxentropy/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/maxentropy/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -8,5 +8,5 @@ from info import __doc__ from maxentropy import * -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/maxentropy/tests/test_maxentropy.py =================================================================== --- branches/0.5.2.x/Lib/maxentropy/tests/test_maxentropy.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/maxentropy/tests/test_maxentropy.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -16,7 +16,7 @@ import unittest -class test_maxentropy(ScipyTestCase): +class test_maxentropy(NumpyTestCase): """Test whether logsumexp() function correctly handles large inputs. """ @@ -41,4 +41,4 @@ if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/misc/__init__.py =================================================================== --- branches/0.5.2.x/Lib/misc/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/misc/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -21,5 +21,5 @@ __all__ += common.__all__ -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/misc/ppimport.py =================================================================== --- branches/0.5.2.x/Lib/misc/ppimport.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/misc/ppimport.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -242,8 +242,8 @@ self.__dict__['_ppimport_p_frame'] = p_frame if location != 'sys.path': - from numpy.test.testing import ScipyTest - self.__dict__['test'] = ScipyTest(self).test + from numpy.test.testing import NumpyTest + self.__dict__['test'] = NumpyTest(self).test # install loader sys.modules[name] = self @@ -283,8 +283,8 @@ self.__dict__['_ppimport_module'] = module # XXX: Should we check the existence of module.test? Warn? - from numpy.test.testing import ScipyTest - module.test = ScipyTest(module).test + from numpy.test.testing import NumpyTest + module.test = NumpyTest(module).test return module Modified: branches/0.5.2.x/Lib/ndimage/__init__.py =================================================================== --- branches/0.5.2.x/Lib/ndimage/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/ndimage/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -36,6 +36,6 @@ from morphology import * from info import __doc__ -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/ndimage/tests/test_ndimage.py =================================================================== --- branches/0.5.2.x/Lib/ndimage/tests/test_ndimage.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/ndimage/tests/test_ndimage.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -62,7 +62,7 @@ return math.sqrt(t) -class test_ndimage(ScipyTestCase): +class test_ndimage(NumpyTestCase): def setUp(self): # list of numarray data types @@ -5523,4 +5523,4 @@ if __name__ == "__main__": #unittest.main() - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/optimize/__init__.py =================================================================== --- branches/0.5.2.x/Lib/optimize/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/optimize/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -13,5 +13,5 @@ from cobyla import fmin_cobyla __all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/optimize/tests/test_cobyla.py =================================================================== --- branches/0.5.2.x/Lib/optimize/tests/test_cobyla.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/optimize/tests/test_cobyla.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -6,7 +6,7 @@ restore_path() import math -class test_cobyla(ScipyTestCase): +class test_cobyla(NumpyTestCase): def check_simple(self, level=1): function = lambda x: x[0]**2 + abs(x[1])**3 @@ -20,4 +20,4 @@ assert_almost_equal(x, [x0, x1], decimal=5) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/optimize/tests/test_optimize.py =================================================================== --- branches/0.5.2.x/Lib/optimize/tests/test_optimize.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/optimize/tests/test_optimize.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -11,7 +11,7 @@ restore_path() -class test_optimize(ScipyTestCase): +class test_optimize(NumpyTestCase): """ Test case for a simple constrained entropy maximization problem (the machine translation example of Berger et al in Computational Linguistics, vol 22, num 1, pp 39--72, 1996.) @@ -124,4 +124,4 @@ if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/optimize/tests/test_zeros.py =================================================================== --- branches/0.5.2.x/Lib/optimize/tests/test_zeros.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/optimize/tests/test_zeros.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -51,7 +51,7 @@ functions = [f2,f3,f4,f5,f6] fstrings = ['f2','f3','f4','f5','f6'] -class test_basic(ScipyTestCase) : +class test_basic(NumpyTestCase) : def run_test(self, method, name): a = .5 b = sqrt(3) @@ -93,4 +93,4 @@ print '\n\n' if __name__ == '__main__' : - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/sandbox/arpack/tests/test_arpack.py =================================================================== --- branches/0.5.2.x/Lib/sandbox/arpack/tests/test_arpack.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/sandbox/arpack/tests/test_arpack.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -19,7 +19,7 @@ import numpy from scipy.linalg import eig,eigh,norm -class test_eigen_nonsymmetric(ScipyTestCase): +class test_eigen_nonsymmetric(NumpyTestCase): def get_a1(self,typ): mat=numpy.array([[-2., -8., 1., 2., -5.], @@ -121,7 +121,7 @@ -class test_eigen_complex_nonsymmetric(ScipyTestCase): +class test_eigen_complex_nonsymmetric(NumpyTestCase): def get_a1(self,typ): mat=numpy.array([[-2., -8., 1., 2., -5.], @@ -218,7 +218,7 @@ -class test_eigen_symmetric(ScipyTestCase): +class test_eigen_symmetric(NumpyTestCase): def get_a1(self,typ): mat_a1=numpy.array([[ 2., 0., 0., -1., 0., -1.], @@ -290,7 +290,7 @@ self.end_eigenvalues(typ,k) -class test_eigen_complex_symmetric(ScipyTestCase): +class test_eigen_complex_symmetric(NumpyTestCase): def get_a1(self,typ): mat_a1=numpy.array([[ 2., 0., 0., -1., 0., -1.], @@ -352,4 +352,4 @@ if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/sandbox/arpack/tests/test_speigs.py =================================================================== --- branches/0.5.2.x/Lib/sandbox/arpack/tests/test_speigs.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/sandbox/arpack/tests/test_speigs.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -8,7 +8,7 @@ import numpy as N -class test_eigs(ScipyTestCase): +class test_eigs(NumpyTestCase): def test(self): maxn=15 # Dimension of square matrix to be solved # Use a PDP^-1 factorisation to construct matrix with known @@ -36,7 +36,7 @@ assert_array_almost_equal(calc_vecs, N.array(vecs)[:,0:nev], decimal=7) -# class test_geneigs(ScipyTestCase): +# class test_geneigs(NumpyTestCase): # def test(self): # import pickle # import scipy.linsolve @@ -50,5 +50,5 @@ # 94.646308846854879, 95.30841709116271], decimal=11) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/sandbox/arraysetops/tests/test_arraysetops.py =================================================================== --- branches/0.5.2.x/Lib/sandbox/arraysetops/tests/test_arraysetops.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/sandbox/arraysetops/tests/test_arraysetops.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -24,7 +24,7 @@ from scipy.arraysetops import * restore_path() -class test_aso( ScipyTestCase ): +class test_aso( NumpyTestCase ): def chech_all(): test_unique1d() @@ -37,4 +37,4 @@ test_manyways() if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/sandbox/cdavid/tests/test_autocorr.py =================================================================== --- branches/0.5.2.x/Lib/sandbox/cdavid/tests/test_autocorr.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/sandbox/cdavid/tests/test_autocorr.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -290,7 +290,7 @@ assert_array_equal(yt, yr) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() #class test_autocorr_2d(NumpyTestCase): # def check_double(self): Modified: branches/0.5.2.x/Lib/sandbox/cdavid/tests/test_lpc.py =================================================================== --- branches/0.5.2.x/Lib/sandbox/cdavid/tests/test_lpc.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/sandbox/cdavid/tests/test_lpc.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -184,4 +184,4 @@ assert_array_almost_equal(k, kt) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/sandbox/exmplpackage/tests/test_foo.py =================================================================== --- branches/0.5.2.x/Lib/sandbox/exmplpackage/tests/test_foo.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/sandbox/exmplpackage/tests/test_foo.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -22,15 +22,15 @@ from exmplpackage.foo import * del sys.path[0] -class test_foo_bar(ScipyTestCase): +class test_foo_bar(NumpyTestCase): def check_simple(self, level=1): assert exmplpackage_foo_bar()=='Hello from exmplpackage_foo_bar' -class test_foo_gun(ScipyTestCase): +class test_foo_gun(NumpyTestCase): def check_simple(self, level=1): assert foo_gun()=='Hello from foo_gun' if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/sandbox/exmplpackage/yyy/tests/test_yyy.py =================================================================== --- branches/0.5.2.x/Lib/sandbox/exmplpackage/yyy/tests/test_yyy.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/sandbox/exmplpackage/yyy/tests/test_yyy.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -7,10 +7,10 @@ from yyy import fun del sys.path[0] -class test_fun(ScipyTestCase): +class test_fun(NumpyTestCase): def check_simple(self, level=1): assert fun()=='Hello from yyy.fun' #... if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/sandbox/models/tests/test_formula.py =================================================================== --- branches/0.5.2.x/Lib/sandbox/models/tests/test_formula.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/sandbox/models/tests/test_formula.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -43,7 +43,7 @@ f = intercept * t1 self.assertEqual(str(f), str(formula.formula(t1))) -class test_formula(ScipyTestCase): +class test_formula(NumpyTestCase): def setUp(self): self.X = R.standard_normal((40,10)) Modified: branches/0.5.2.x/Lib/sandbox/montecarlo/tests/test_dictsampler.py =================================================================== --- branches/0.5.2.x/Lib/sandbox/montecarlo/tests/test_dictsampler.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/sandbox/montecarlo/tests/test_dictsampler.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -21,7 +21,7 @@ import unittest -class test_dictsampler(ScipyTestCase): +class test_dictsampler(NumpyTestCase): def check_simple(self): """ # Sample from this discrete distribution: @@ -76,4 +76,4 @@ if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/sandbox/montecarlo/tests/test_intsampler.py =================================================================== --- branches/0.5.2.x/Lib/sandbox/montecarlo/tests/test_intsampler.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/sandbox/montecarlo/tests/test_intsampler.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -30,7 +30,7 @@ import unittest -class test_intsampler(ScipyTestCase): +class test_intsampler(NumpyTestCase): def check_simple(self): # Sample from a Poisson distribution, P(lambda = 10.0) lam = 10.0 @@ -71,4 +71,4 @@ if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/sandbox/spline/__init__.py =================================================================== --- branches/0.5.2.x/Lib/sandbox/spline/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/sandbox/spline/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -10,5 +10,5 @@ from spline import * __all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/sandbox/spline/tests/test_fitpack.py =================================================================== --- branches/0.5.2.x/Lib/sandbox/spline/tests/test_fitpack.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/sandbox/spline/tests/test_fitpack.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -22,7 +22,7 @@ RectBivariateSpline restore_path() -class test_UnivariateSpline(ScipyTestCase): +class test_UnivariateSpline(NumpyTestCase): def check_linear_constant(self): x = [1,2,3] y = [3,3,3] @@ -41,7 +41,7 @@ assert_almost_equal(lut.get_residual(),0.0) assert_array_almost_equal(lut([1,1.5,2]),[0,1,2]) -class test_LSQBivariateSpline(ScipyTestCase): +class test_LSQBivariateSpline(NumpyTestCase): def check_linear_constant(self): x = [1,1,1,2,2,2,3,3,3] y = [1,2,3,1,2,3,1,2,3] @@ -54,7 +54,7 @@ #print lut.get_coeffs() #print lut.get_residual() -class test_SmoothBivariateSpline(ScipyTestCase): +class test_SmoothBivariateSpline(NumpyTestCase): def check_linear_constant(self): x = [1,1,1,2,2,2,3,3,3] y = [1,2,3,1,2,3,1,2,3] @@ -75,7 +75,7 @@ assert_almost_equal(lut.get_residual(),0.0) assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[0,0],[1,1],[2,2]]) -class test_RectBivariateSpline(ScipyTestCase): +class test_RectBivariateSpline(NumpyTestCase): def check_defaults(self): x = array([1,2,3,4,5]) y = array([1,2,3,4,5]) @@ -84,4 +84,4 @@ assert_array_almost_equal(lut(x,y),z) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/sandbox/spline/tests/test_interpolate.py =================================================================== --- branches/0.5.2.x/Lib/sandbox/spline/tests/test_interpolate.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/sandbox/spline/tests/test_interpolate.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -7,7 +7,7 @@ restore_path() -class test_interp2d(ScipyTestCase): +class test_interp2d(NumpyTestCase): def test_interp2d(self): y, x = mgrid[0:pi:20j, 0:pi:21j] z = sin(x+y) @@ -18,7 +18,7 @@ assert_almost_equal(I(u.ravel(), v.ravel()), sin(v+u), decimal=2) -class test_interp1d(ScipyTestCase): +class test_interp1d(NumpyTestCase): def setUp(self): self.x10 = np.arange(10.) @@ -195,4 +195,4 @@ if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/signal/__init__.py =================================================================== --- branches/0.5.2.x/Lib/signal/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/signal/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -13,5 +13,5 @@ from wavelets import * __all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/signal/tests/test_signaltools.py =================================================================== --- branches/0.5.2.x/Lib/signal/tests/test_signaltools.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/signal/tests/test_signaltools.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -7,27 +7,27 @@ from numpy import array, arange -class test_convolve(ScipyTestCase): +class test_convolve(NumpyTestCase): def check_basic(self): a = [3,4,5,6,5,4] b = [1,2,3] c = signal.convolve(a,b) assert_array_equal(c,array([3,10,22,28,32,32,23,12])) -class test_medfilt(ScipyTestCase): +class test_medfilt(NumpyTestCase): def check_basic(self): f = [[3,4,5],[2,3,4],[1,2,5]] d = signal.medfilt(f) assert_array_equal(d, [[0,3,0],[2,3,3],[0,2,0]]) -class test_wiener(ScipyTestCase): +class test_wiener(NumpyTestCase): def check_basic(self): g = array([[5,6,4,3],[3,5,6,2],[2,3,5,6],[1,6,9,7]],'d') correct = array([[2.16374269,3.2222222222, 2.8888888889, 1.6666666667],[2.666666667, 4.33333333333, 4.44444444444, 2.8888888888],[2.222222222, 4.4444444444, 5.4444444444, 4.801066874837],[1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]]) h = signal.wiener(g) assert_array_almost_equal(h,correct,decimal=6) -class test_cspline1d_eval(ScipyTestCase): +class test_cspline1d_eval(NumpyTestCase): def check_basic(self): y=array([1,2,3,4,3,2,1,2,3.0]) x=arange(len(y)) @@ -42,4 +42,4 @@ if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/sparse/__init__.py =================================================================== --- branches/0.5.2.x/Lib/sparse/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/sparse/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -5,5 +5,5 @@ from sparse import * __all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/sparse/tests/test_sparse.py =================================================================== --- branches/0.5.2.x/Lib/sparse/tests/test_sparse.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/sparse/tests/test_sparse.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -364,7 +364,7 @@ -class test_csr(_test_cs, _test_horiz_slicing, ScipyTestCase): +class test_csr(_test_cs, _test_horiz_slicing, NumpyTestCase): spmatrix = csr_matrix def check_constructor1(self): @@ -417,7 +417,7 @@ assert(e.A.dtype.type == mytype) -class test_csc(_test_cs, _test_vert_slicing, ScipyTestCase): +class test_csc(_test_cs, _test_vert_slicing, NumpyTestCase): spmatrix = csc_matrix def check_constructor1(self): @@ -462,7 +462,7 @@ assert(e.A.dtype.type == mytype) -class test_dok(_test_cs, ScipyTestCase): +class test_dok(_test_cs, NumpyTestCase): spmatrix = dok_matrix def check_mult(self): @@ -573,7 +573,7 @@ assert caught == 6 -class test_lil(_test_cs, _test_horiz_slicing, ScipyTestCase): +class test_lil(_test_cs, _test_horiz_slicing, NumpyTestCase): spmatrix = lil_matrix def check_mult(self): A = matrix(zeros((10,10))) @@ -616,7 +616,7 @@ assert_array_equal(C.A, D.A) -class test_construct_utils(ScipyTestCase): +class test_construct_utils(NumpyTestCase): def check_identity(self): a = spidentity(3) b = array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='d') @@ -638,7 +638,7 @@ b = array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='d') assert_array_equal(a.toarray(), b) -class test_coo(ScipyTestCase): +class test_coo(NumpyTestCase): def check_normalize( self ): @@ -672,4 +672,4 @@ if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/special/__init__.py =================================================================== --- branches/0.5.2.x/Lib/special/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/special/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -18,5 +18,5 @@ register_func('i0',i0) del register_func -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/special/tests/test_basic.py =================================================================== --- branches/0.5.2.x/Lib/special/tests/test_basic.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/special/tests/test_basic.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -41,7 +41,7 @@ restore_path() -class test_cephes(ScipyTestCase): +class test_cephes(NumpyTestCase): def check_airy(self): cephes.airy(0) def check_airye(self): @@ -455,7 +455,7 @@ def check_wofz(self): cephes.wofz(0) -class test_airy(ScipyTestCase): +class test_airy(NumpyTestCase): def check_airy(self): #This tests the airy function to ensure 8 place accuracy in computation @@ -467,7 +467,7 @@ x = airy(-.36) assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8) -class test_airye(ScipyTestCase): +class test_airye(NumpyTestCase): def check_airye(self): a = airye(0.01) @@ -479,7 +479,7 @@ b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01)))) assert_array_almost_equal(a,b1,6) -class test_arange(ScipyTestCase): +class test_arange(NumpyTestCase): def check_arange(self): numstring = arange(0,2.21,.1) @@ -498,7 +498,7 @@ assert_array_equal(numstringc,array([3.3,7.3,11.3,15.3, 19.3,23.3])) -class test_ai_zeros(ScipyTestCase): +class test_ai_zeros(NumpyTestCase): def check_ai_zeros(self): ai = ai_zeros(1) @@ -507,7 +507,7 @@ array([ 0.5357]), array([ 0.7012])),4) -class test_array(ScipyTestCase): +class test_array(NumpyTestCase): def check_array(self): x = array([1,2,3,4]) @@ -517,7 +517,7 @@ a = arange(1,5,1) assert_array_equal(a,x) -class test_assoc_laguerre(ScipyTestCase): +class test_assoc_laguerre(NumpyTestCase): def check_assoc_laguerre(self): a1 = genlaguerre(11,1) @@ -526,36 +526,36 @@ a2 = assoc_laguerre(1,11,1) assert_array_almost_equal(a2,a1(1),8) -class test_besselpoly(ScipyTestCase): +class test_besselpoly(NumpyTestCase): def check_besselpoly(self): pass -class test_bei(ScipyTestCase): +class test_bei(NumpyTestCase): def check_bei(self): mbei = bei(2) assert_almost_equal(mbei, 0.9722916273066613,5)#this may not be exact -class test_beip(ScipyTestCase): +class test_beip(NumpyTestCase): def check_beip(self): mbeip = beip(2) assert_almost_equal(mbeip,0.91701361338403631,5)#this may not be exact -class test_ber(ScipyTestCase): +class test_ber(NumpyTestCase): def check_ber(self): mber = ber(2) assert_almost_equal(mber,0.75173418271380821,5)#this may not be exact -class test_berp(ScipyTestCase): +class test_berp(NumpyTestCase): def check_berp(self): mberp = berp(2) assert_almost_equal(mberp,-0.49306712470943909,5)#this may not be exact -class test_bei_zeros(ScipyTestCase): +class test_bei_zeros(NumpyTestCase): def check_bei_zeros(self): bi = bi_zeros(5) @@ -584,7 +584,7 @@ 0.929983638568022]),11) -class test_beip_zeros(ScipyTestCase): +class test_beip_zeros(NumpyTestCase): def check_beip_zeros(self): bip = beip_zeros(5) @@ -593,7 +593,7 @@ 12.742147523633703, 17.193431752512542, 21.641143941167325]),4) -class test_ber_zeros(ScipyTestCase): +class test_ber_zeros(NumpyTestCase): def check_ber_zeros(self): ber = ber_zeros(5) @@ -603,7 +603,7 @@ 16.11356, 20.55463]),4) -class test_bernoulli(ScipyTestCase): +class test_bernoulli(NumpyTestCase): def check_bernoulli(self): brn = bernoulli(5) @@ -614,7 +614,7 @@ -0.0333, 0.0000]),4) -class test_berp_zeros(ScipyTestCase): +class test_berp_zeros(NumpyTestCase): def check_berp_zeros(self): brp = berp_zeros(5) @@ -623,34 +623,34 @@ 14.96844, 19.41758, 23.86430]),4) -class test_beta(ScipyTestCase): +class test_beta(NumpyTestCase): def check_beta(self): bet = beta(2,4) betg = (gamma(2)*gamma(4))/gamma(6) assert_almost_equal(bet,betg,8) -class test_betaln(ScipyTestCase): +class test_betaln(NumpyTestCase): def check_betaln(self): betln = betaln(2,4) bet = log(abs(beta(2,4))) assert_almost_equal(betln,bet,8) -class test_betainc(ScipyTestCase): +class test_betainc(NumpyTestCase): def check_betainc(self): btinc = betainc(1,1,.2) assert_almost_equal(btinc,0.2,8) -class test_betaincinv(ScipyTestCase): +class test_betaincinv(NumpyTestCase): def check_betaincinv(self): y = betaincinv(2,4,.5) comp = betainc(2,4,y) assert_almost_equal(comp,.5,5) -class test_bi_zeros(ScipyTestCase): +class test_bi_zeros(NumpyTestCase): def check_bi_zeros(self): bi = bi_zeros(2) @@ -660,7 +660,7 @@ array([ 0.60195789 , -0.76031014])) assert_array_almost_equal(bi,bia,4) -class test_chebyc(ScipyTestCase): +class test_chebyc(NumpyTestCase): def check_chebyc(self): C0 = chebyc(0) @@ -677,7 +677,7 @@ assert_array_almost_equal(C4.c,[1,0,-4,0,2],13) assert_array_almost_equal(C5.c,[1,0,-5,0,5,0],13) -class test_chebys(ScipyTestCase): +class test_chebys(NumpyTestCase): def check_chebys(self): S0 = chebys(0) @@ -693,7 +693,7 @@ assert_array_almost_equal(S4.c,[1,0,-3,0,1],13) assert_array_almost_equal(S5.c,[1,0,-4,0,3,0],13) -class test_chebyt(ScipyTestCase): +class test_chebyt(NumpyTestCase): def check_chebyt(self): T0 = chebyt(0) @@ -709,7 +709,7 @@ assert_array_almost_equal(T4.c,[8,0,-8,0,1],13) assert_array_almost_equal(T5.c,[16,0,-20,0,5,0],13) -class test_chebyu(ScipyTestCase): +class test_chebyu(NumpyTestCase): def check_chebyu(self): U0 = chebyu(0) @@ -725,14 +725,14 @@ assert_array_almost_equal(U4.c,[16,0,-12,0,1],13) assert_array_almost_equal(U5.c,[32,0,-32,0,6,0],13) -class test_choose(ScipyTestCase): +class test_choose(NumpyTestCase): def check_choose(self): carray = [1,3,2,4,6,5] chose = choose([1,3,5],carray) assert_array_equal(chose,array([3,4,5])) -class test_cbrt(ScipyTestCase): +class test_cbrt(NumpyTestCase): def check_cbrt(self): cb = cbrt(27) @@ -744,7 +744,7 @@ cbrl1 = 27.9**(1.0/3.0) assert_almost_equal(cb1,cbrl1,8) -class test_cosdg(ScipyTestCase): +class test_cosdg(NumpyTestCase): def check_cosdg(self): cdg = cosdg(90) @@ -756,14 +756,14 @@ cdgmrl = cos(pi/6.0) assert_almost_equal(cdgm,cdgmrl,8) -class test_cosm1(ScipyTestCase): +class test_cosm1(NumpyTestCase): def check_cosm1(self): cs = (cosm1(0),cosm1(.3),cosm1(pi/10)) csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1) assert_array_almost_equal(cs,csrl,8) -class test_cotdg(ScipyTestCase): +class test_cotdg(NumpyTestCase): def check_cotdg(self): ct = cotdg(30) @@ -790,20 +790,20 @@ assert_almost_equal(cotdg(-315), 1.0, 14) assert_almost_equal(cotdg(765), 1.0, 14) -class test_ellipj(ScipyTestCase): +class test_ellipj(NumpyTestCase): def check_ellipj(self): el = ellipj(0.2,0) rel = [sin(0.2),cos(0.2),1.0,0.20] assert_array_almost_equal(el,rel,13) -class test_ellipk(ScipyTestCase): +class test_ellipk(NumpyTestCase): def check_ellipk(self): elk = ellipk(.2) assert_almost_equal(elk,1.659623598610528,11) -class test_ellipkinc(ScipyTestCase): +class test_ellipkinc(NumpyTestCase): def check_ellipkinc(self): elkinc = ellipkinc(pi/2,.2) @@ -817,13 +817,13 @@ # From pg. 614 of A & S -class test_ellipe(ScipyTestCase): +class test_ellipe(NumpyTestCase): def check_ellipe(self): ele = ellipe(.2) assert_almost_equal(ele,1.4890350580958529,8) -class test_ellipeinc(ScipyTestCase): +class test_ellipeinc(NumpyTestCase): def check_ellipeinc(self): eleinc = ellipeinc(pi/2,.2) @@ -836,13 +836,13 @@ assert_almost_equal(eleinc, 0.58823065, 8) -class test_erf(ScipyTestCase): +class test_erf(NumpyTestCase): def check_erf(self): er = erf(.25) assert_almost_equal(er,0.2763263902,8) -class test_erf_zeros(ScipyTestCase): +class test_erf_zeros(NumpyTestCase): def check_erf_zeros(self): erz = erf_zeros(5) @@ -853,19 +853,19 @@ 3.76900557+4.06069723j]) assert_array_almost_equal(erz,erzr,4) -class test_erfcinv(ScipyTestCase): +class test_erfcinv(NumpyTestCase): def check_erfcinv(self): i = erfcinv(1) assert_equal(i,0) -class test_erfinv(ScipyTestCase): +class test_erfinv(NumpyTestCase): def check_erfinv(self): i = erfinv(0) assert_equal(i,0) -class test_errprint(ScipyTestCase): +class test_errprint(NumpyTestCase): def check_errprint(self): a = errprint() @@ -876,7 +876,7 @@ assert_equal(d,b) #makes sure state was returned #assert_equal(d,1-a) -class test_euler(ScipyTestCase): +class test_euler(NumpyTestCase): def check_euler(self): eu0 = euler(0) @@ -899,7 +899,7 @@ errmax = max(err) assert_almost_equal(errmax, 0.0, 14) -class test_exp2(ScipyTestCase): +class test_exp2(NumpyTestCase): def check_exp2(self): ex = exp2(2) @@ -911,7 +911,7 @@ exmrl = 2**(2.5) assert_almost_equal(exm,exmrl,8) -class test_exp10(ScipyTestCase): +class test_exp10(NumpyTestCase): def check_exp10(self): ex = exp10(2) @@ -923,7 +923,7 @@ exmrl = 10**(2.5) assert_almost_equal(exm,exmrl,8) -class test_expm1(ScipyTestCase): +class test_expm1(NumpyTestCase): def check_expm1(self): ex = (expm1(2),expm1(3),expm1(4)) @@ -935,13 +935,13 @@ exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1) assert_array_almost_equal(ex1,exrl1,8) -class test_fresnel(ScipyTestCase): +class test_fresnel(NumpyTestCase): def check_fresnel(self): frs = array(fresnel(.5)) assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8) -class test_fresnel_zeros(ScipyTestCase): +class test_fresnel_zeros(NumpyTestCase): # values from pg 329 Table 7.11 of A & S # slightly corrected in 4th decimal place @@ -975,48 +975,48 @@ assert_array_almost_equal(frs,szo,12) -class test_gamma(ScipyTestCase): +class test_gamma(NumpyTestCase): def check_gamma(self): gam = gamma(5) assert_equal(gam,24.0) -class test_gammaln(ScipyTestCase): +class test_gammaln(NumpyTestCase): def check_gammaln(self): gamln = gammaln(3) lngam = log(gamma(3)) assert_almost_equal(gamln,lngam,8) -class test_gammainc(ScipyTestCase): +class test_gammainc(NumpyTestCase): def check_gammainc(self): gama = gammainc(.5,.5) assert_almost_equal(gama,.7,1) -class test_gammaincc(ScipyTestCase): +class test_gammaincc(NumpyTestCase): def check_gammaincc(self): gicc = gammaincc(.5,.5) greal = 1 - gammainc(.5,.5) assert_almost_equal(gicc,greal,8) -class test_gammainccinv(ScipyTestCase): +class test_gammainccinv(NumpyTestCase): def check_gammainccinv(self): gccinv = gammainccinv(.5,.5) gcinv = gammaincinv(.5,.5) assert_almost_equal(gccinv,gcinv,8) -class test_gammaincinv(ScipyTestCase): +class test_gammaincinv(NumpyTestCase): def check_gammaincinv(self): y = gammaincinv(.4,.4) x = gammainc(.4,y) assert_almost_equal(x,0.4,1) -class test_hankel1(ScipyTestCase): +class test_hankel1(NumpyTestCase): def check_negv(self): assert_almost_equal(hankel1(-3,2), -hankel1(3,2), 14) @@ -1025,7 +1025,7 @@ hankrl = (jv(1,.1)+yv(1,.1)*1j) assert_almost_equal(hank1,hankrl,8) -class test_hankel1e(ScipyTestCase): +class test_hankel1e(NumpyTestCase): def check_negv(self): assert_almost_equal(hankel1e(-3,2), -hankel1e(3,2), 14) @@ -1034,7 +1034,7 @@ hankrle = hankel1(1,.1)*exp(-.1j) assert_almost_equal(hank1e,hankrle,8) -class test_hankel2(ScipyTestCase): +class test_hankel2(NumpyTestCase): def check_negv(self): assert_almost_equal(hankel2(-3,2), -hankel2(3,2), 14) @@ -1043,7 +1043,7 @@ hankrl2 = (jv(1,.1)-yv(1,.1)*1j) assert_almost_equal(hank2,hankrl2,8) -class test_hankel2e(ScipyTestCase): +class test_hankel2e(NumpyTestCase): def check_negv(self): assert_almost_equal(hankel2e(-3,2), -hankel2e(3,2), 14) @@ -1052,7 +1052,7 @@ hankrl2e = hankel2e(1,.1) assert_almost_equal(hank2e,hankrl2e,8) -class test_hermite(ScipyTestCase): +class test_hermite(NumpyTestCase): def check_hermite(self): H0 = hermite(0) @@ -1093,7 +1093,7 @@ _gam = cephes.gamma -class test_gegenbauer(ScipyTestCase): +class test_gegenbauer(NumpyTestCase): def check_gegenbauer(self): a = 5*rand()-0.5 @@ -1116,7 +1116,7 @@ 0,15*poch(a,3),0])/15.0,11) -class test_h1vp(ScipyTestCase): +class test_h1vp(NumpyTestCase): def check_h1vp(self): @@ -1124,36 +1124,36 @@ h1real = (jvp(1,.1)+yvp(1,.1)*1j) assert_almost_equal(h1,h1real,8) -class test_h2vp(ScipyTestCase): +class test_h2vp(NumpyTestCase): def check_h2vp(self): h2 = h2vp(1,.1) h2real = (jvp(1,.1)-yvp(1,.1)*1j) assert_almost_equal(h2,h2real,8) -class test_hyp0f1(ScipyTestCase): +class test_hyp0f1(NumpyTestCase): def check_hyp0f1(self): pass -class test_hyp1f1(ScipyTestCase): +class test_hyp1f1(NumpyTestCase): def check_hyp1f1(self): hyp1 = hyp1f1(.1,.1,.3) assert_almost_equal(hyp1, 1.3498588075760032,7) -class test_hyp1f2(ScipyTestCase): +class test_hyp1f2(NumpyTestCase): def check_hyp1f2(self): pass -class test_hyp2f0(ScipyTestCase): +class test_hyp2f0(NumpyTestCase): def check_hyp2f0(self): pass -class test_hyp2f1(ScipyTestCase): +class test_hyp2f1(NumpyTestCase): def check_hyp2f1(self): # a collection of special cases taken from AMS 55 values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))], @@ -1174,12 +1174,12 @@ cv = hyp2f1(a, b, c, x) assert_almost_equal(cv, v, 8, err_msg='test #%d' % i) -class test_hyp3f0(ScipyTestCase): +class test_hyp3f0(NumpyTestCase): def check_hyp3f0(self): pass -class test_hyperu(ScipyTestCase): +class test_hyperu(NumpyTestCase): def check_hyperu(self): val1 = hyperu(1,0.1,100) @@ -1194,7 +1194,7 @@ /(gamma(a)*gamma(2-b))) assert_array_almost_equal(hypu,hprl,12) -class test_i0(ScipyTestCase): +class test_i0(NumpyTestCase): def check_i0(self): values = [[0.0, 1.0], [1e-10, 1.0], @@ -1209,14 +1209,14 @@ cv = i0(x) * exp(-x) assert_almost_equal(cv, v, 8, err_msg='test #%d' % i) -class test_i0e(ScipyTestCase): +class test_i0e(NumpyTestCase): def check_i0e(self): oize = i0e(.1) oizer = ive(0,.1) assert_almost_equal(oize,oizer,8) -class test_i1(ScipyTestCase): +class test_i1(NumpyTestCase): def check_i1(self): values = [[0.0, 0.0], @@ -1231,38 +1231,38 @@ cv = i1(x) * exp(-x) assert_almost_equal(cv, v, 8, err_msg='test #%d' % i) -class test_i1e(ScipyTestCase): +class test_i1e(NumpyTestCase): def check_i1e(self): oi1e = i1e(.1) oi1er = ive(1,.1) assert_almost_equal(oi1e,oi1er,8) -class test_iti0k0(ScipyTestCase): +class test_iti0k0(NumpyTestCase): def check_iti0k0(self): iti0 = array(iti0k0(5)) assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5) -class test_it2i0k0(ScipyTestCase): +class test_it2i0k0(NumpyTestCase): def check_it2i0k0(self): it2k = it2i0k0(.1) assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6) -class test_itj0y0(ScipyTestCase): +class test_itj0y0(NumpyTestCase): def check_itj0y0(self): it0 = array(itj0y0(.2)) assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8) -class test_it2j0y0(ScipyTestCase): +class test_it2j0y0(NumpyTestCase): def check_it2j0y0(self): it2 = array(it2j0y0(.2)) assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8) -class test_iv(ScipyTestCase): +class test_iv(NumpyTestCase): def check_negv(self): assert_equal(iv(3,2), iv(-3,2)) @@ -1270,7 +1270,7 @@ iv1 = iv(0,.1)*exp(-.1) assert_almost_equal(iv1,0.90710092578230106,10) -class test_ive(ScipyTestCase): +class test_ive(NumpyTestCase): def check_negv(self): assert_equal(ive(3,2), ive(-3,2)) @@ -1279,7 +1279,7 @@ iv1 = iv(0,.1)*exp(-.1) assert_almost_equal(ive1,iv1,10) -class test_ivp(ScipyTestCase): +class test_ivp(NumpyTestCase): def check_ivp0(self): assert_almost_equal(iv(1,2), ivp(0,2), 10) @@ -1288,21 +1288,21 @@ x = ivp(1,2) assert_almost_equal(x,y,10) -class test_j0(ScipyTestCase): +class test_j0(NumpyTestCase): def check_j0(self): oz = j0(.1) ozr = jn(0,.1) assert_almost_equal(oz,ozr,8) -class test_j1(ScipyTestCase): +class test_j1(NumpyTestCase): def check_j1(self): o1 = j1(.1) o1r = jn(1,.1) assert_almost_equal(o1,o1r,8) -class test_jacobi(ScipyTestCase): +class test_jacobi(NumpyTestCase): def check_jacobi(self): a = 5*rand() - 1 @@ -1323,13 +1323,13 @@ assert_array_almost_equal(P3.c,array(p3c)/48.0,13) -class test_jn(ScipyTestCase): +class test_jn(NumpyTestCase): def check_jn(self): jnnr = jn(1,.2) assert_almost_equal(jnnr,0.099500832639235995,8) -class test_jv(ScipyTestCase): +class test_jv(NumpyTestCase): def check_negv(self): assert_almost_equal(jv(-3,2), -jv(3,2), 14) @@ -1344,7 +1344,7 @@ yc = jv(v, x) assert_almost_equal(yc, y, 8, err_msg='test #%d' % i) -class test_jve(ScipyTestCase): +class test_jve(NumpyTestCase): def check_negv(self): assert_almost_equal(jve(-3,2), -jve(3,2), 14) @@ -1356,7 +1356,7 @@ jvexpr = jv(1,z)*exp(-abs(z.imag)) assert_almost_equal(jvexp1,jvexpr,8) -class test_jn_zeros(ScipyTestCase): +class test_jn_zeros(NumpyTestCase): def check_jn_zeros(self): jn0 = jn_zeros(0,5) @@ -1372,7 +1372,7 @@ 13.32369, 16.47063]),4) -class test_jnjnp_zeros(ScipyTestCase): +class test_jnjnp_zeros(NumpyTestCase): def check_jnjnp_zeros(self): pass @@ -1381,7 +1381,7 @@ #I don't think specfun jdzo is working properly the outputs do not seem to correlate #to the inputs -class test_jnp_zeros(ScipyTestCase): +class test_jnp_zeros(NumpyTestCase): def check_jnp_zeros(self): jnp = jnp_zeros(1,5) @@ -1391,7 +1391,7 @@ 11.70600, 14.86359]),4) -class test_jnyn_zeros(ScipyTestCase): +class test_jnyn_zeros(NumpyTestCase): def check_jnyn_zeros(self): jnz = jnyn_zeros(1,5) @@ -1416,48 +1416,48 @@ 13.28576, 16.44006])),4) -class test_jvp(ScipyTestCase): +class test_jvp(NumpyTestCase): def check_jvp(self): jvprim = jvp(2,2) jv0 = (jv(1,2)-jv(3,2))/2 assert_almost_equal(jvprim,jv0,10) -class test_k0(ScipyTestCase): +class test_k0(NumpyTestCase): def check_k0(self): ozk = k0(.1) ozkr = kv(0,.1) assert_almost_equal(ozk,ozkr,8) -class test_k0e(ScipyTestCase): +class test_k0e(NumpyTestCase): def check_k0e(self): ozke = k0e(.1) ozker = kve(0,.1) assert_almost_equal(ozke,ozker,8) -class test_k1(ScipyTestCase): +class test_k1(NumpyTestCase): def check_k1(self): o1k = k1(.1) o1kr = kv(1,.1) assert_almost_equal(o1k,o1kr,8) -class test_k1e(ScipyTestCase): +class test_k1e(NumpyTestCase): def check_k1e(self): o1ke = k1e(.1) o1ker = kve(1,.1) assert_almost_equal(o1ke,o1ker,8) -class test_kei(ScipyTestCase): +class test_kei(NumpyTestCase): def check_kei(self): mkei = kei(2) assert_almost_equal(mkei,-0.20240006776470432,5) -class test_kelvin(ScipyTestCase): +class test_kelvin(NumpyTestCase): def check_kelvin(self): mkelv = kelvin(2) @@ -1466,25 +1466,25 @@ berp(2)+beip(2)*1j, kerp(2)+keip(2)*1j),8) -class test_keip(ScipyTestCase): +class test_keip(NumpyTestCase): def check_keip(self): mkeip = keip(2) assert_almost_equal(mkeip,0.21980790991960536,5) -class test_ker(ScipyTestCase): +class test_ker(NumpyTestCase): def check_ker(self): mker = ker(2) assert_almost_equal(mker,-0.041664513991509472,5) -class test_kerp(ScipyTestCase): +class test_kerp(NumpyTestCase): def check_kerp(self): mkerp = kerp(2) assert_almost_equal(mkerp,-0.10660096588105264,5) -class test_kei_zeros(ScipyTestCase): +class test_kei_zeros(NumpyTestCase): def check_kei_zeros(self): kei = kei_zeros(5) @@ -1494,7 +1494,7 @@ 17.22314, 21.66464]),4) -class test_keip_zeros(ScipyTestCase): +class test_keip_zeros(NumpyTestCase): def check_keip_zeros(self): keip = keip_zeros(5) @@ -1506,7 +1506,7 @@ -class test_kelvin_zeros(ScipyTestCase): +class test_kelvin_zeros(NumpyTestCase): # numbers come from 9.9 of A&S pg. 381 def check_kelvin_zeros(self): @@ -1555,7 +1555,7 @@ 18.30717, 22.75379]),4) -class test_ker_zeros(ScipyTestCase): +class test_ker_zeros(NumpyTestCase): def check_ker_zeros(self): ker = ker_zeros(5) @@ -1565,7 +1565,7 @@ 15.00269, 19.44381]),4) -class test_kerp_zeros(ScipyTestCase): +class test_kerp_zeros(NumpyTestCase): def check_kerp_zeros(self): kerp = kerp_zeros(5) @@ -1575,13 +1575,13 @@ 16.08312, 20.53068]),4) -class test_kn(ScipyTestCase): +class test_kn(NumpyTestCase): def check_kn(self): kn1 = kn(0,.2) assert_almost_equal(kn1,1.7527038555281462,8) -class test_kv(ScipyTestCase): +class test_kv(NumpyTestCase): def check_negv(self): assert_equal(kv(3.0, 2.2), kv(-3.0, 2.2)) @@ -1596,7 +1596,7 @@ assert_almost_equal(kv2, 49.51242928773287, 10) -class test_kve(ScipyTestCase): +class test_kve(NumpyTestCase): def check_negv(self): assert_equal(kve(3.0, 2.2), kve(-3.0, 2.2)) @@ -1609,7 +1609,7 @@ kv2 = kv(0,z)*exp(z) assert_almost_equal(kve2,kv2,8) -class test_kvp(ScipyTestCase): +class test_kvp(NumpyTestCase): def check_kvp_v0n1(self): z = 2.2 assert_almost_equal(-kv(1,z), kvp(0,z, n=1), 10) @@ -1628,7 +1628,7 @@ x = kvp(v, z, n=2) assert_almost_equal(xc, x, 10) -class test_laguerre(ScipyTestCase): +class test_laguerre(NumpyTestCase): def check_laguerre(self): lag0 = laguerre(0) @@ -1657,7 +1657,7 @@ # Base polynomials come from Abrahmowitz and Stegan -class test_legendre(ScipyTestCase): +class test_legendre(NumpyTestCase): def check_legendre(self): leg0 = legendre(0) @@ -1674,7 +1674,7 @@ assert_almost_equal(leg5.c,array([63,0,-70,0,15,0])/8.0) -class test_lmbda(ScipyTestCase): +class test_lmbda(NumpyTestCase): def check_lmbda(self): lam = lmbda(1,.1) @@ -1682,7 +1682,7 @@ array([jvp(0,.1), -2*jv(1,.1)/.01 + 2*jvp(1,.1)/.1])) assert_array_almost_equal(lam,lamr,8) -class test_log1p(ScipyTestCase): +class test_log1p(NumpyTestCase): def check_log1p(self): l1p = (log1p(10),log1p(11),log1p(12)) @@ -1694,7 +1694,7 @@ l1pmrl = (log(2),log(2.1),log(2.2)) assert_array_almost_equal(l1pm,l1pmrl,8) -class test_lpmn(ScipyTestCase): +class test_lpmn(NumpyTestCase): def check_lpmn(self): @@ -1706,7 +1706,7 @@ 1.00000 , 1.50000]])),4) -class test_lpn(ScipyTestCase): +class test_lpn(NumpyTestCase): def check_lpn(self): lpnf = lpn(2,.5) @@ -1717,13 +1717,13 @@ 1.00000 , 1.50000])),4) -class test_lpmv(ScipyTestCase): +class test_lpmv(NumpyTestCase): def check_lpmv(self): lp = lpmv(0,2,.5) assert_almost_equal(lp,-0.125,3) -class test_lqmn(ScipyTestCase): +class test_lqmn(NumpyTestCase): def check_lqmn(self): lqmnf = lqmn(0,2,.5) @@ -1735,41 +1735,41 @@ -class test_lqn(ScipyTestCase): +class test_lqn(NumpyTestCase): def check_lqn(self): lqf = lqn(2,.5) assert_array_almost_equal(lqf,(array([ 0.5493, -0.7253, -0.8187]), array([ 1.3333, 1.216 , -0.8427])),4) -class test_mathieu_a(ScipyTestCase): +class test_mathieu_a(NumpyTestCase): def check_mathieu_a(self): pass -class test_mathieu_even_coef(ScipyTestCase): +class test_mathieu_even_coef(NumpyTestCase): def check_mathieu_even_coef(self): mc = mathieu_even_coef(2,5) #Q not defined broken and cannot figure out proper reporting order -class test_mathieu_odd_coef(ScipyTestCase): +class test_mathieu_odd_coef(NumpyTestCase): def check_mathieu_odd_coef(self): pass #same problem as above -class test_modfresnelp(ScipyTestCase): +class test_modfresnelp(NumpyTestCase): def check_modfresnelp(self): pass -class test_modfresnelm(ScipyTestCase): +class test_modfresnelm(NumpyTestCase): def check_modfresnelm(self): pass -class test_obl_cv_seq(ScipyTestCase): +class test_obl_cv_seq(NumpyTestCase): def check_obl_cv_seq(self): obl = obl_cv_seq(0,3,1) @@ -1778,7 +1778,7 @@ 5.486800, 11.492120]),5) -class test_pbdn_seq(ScipyTestCase): +class test_pbdn_seq(NumpyTestCase): def check_pbdn_seq(self): pb = pbdn_seq(1,.1) @@ -1787,25 +1787,25 @@ array([-0.0499, 0.9925])),4) -class test_pbdv(ScipyTestCase): +class test_pbdv(NumpyTestCase): def check_pbdv(self): pbv = pbdv(1,.2) derrl = 1/2*(.2)*pbdv(1,.2)[0] - pbdv(0,.2)[0] -class _test_pbdv_seq(ScipyTestCase): +class _test_pbdv_seq(NumpyTestCase): def check_pbdv_seq(self): pbn = pbdn_seq(1,.1) pbv = pbdv_seq(1,.1) assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4) -class test_pbvv_seq(ScipyTestCase): +class test_pbvv_seq(NumpyTestCase): def check_pbvv_seq(self): pass -class test_polygamma(ScipyTestCase): +class test_polygamma(NumpyTestCase): # from Table 6.2 (pg. 271) of A&S def check_polygamma(self): @@ -1814,7 +1814,7 @@ assert_almost_equal(poly2,-2.4041138063,10) assert_almost_equal(poly3,6.4939394023,10) -class test_pro_cv_seq(ScipyTestCase): +class test_pro_cv_seq(NumpyTestCase): def check_pro_cv_seq(self): prol = pro_cv_seq(0,3,1) @@ -1823,13 +1823,13 @@ 6.533471, 12.514462]),5) -class test_psi(ScipyTestCase): +class test_psi(NumpyTestCase): def check_psi(self): ps = psi(1) assert_almost_equal(ps,-0.57721566490153287,8) -class test_radian(ScipyTestCase): +class test_radian(NumpyTestCase): def check_radian(self): rad = radian(90,0,0) @@ -1839,7 +1839,7 @@ rad1 = radian(90,1,60) assert_almost_equal(rad1,pi/2+0.0005816135199345904,5) -class test_reshape(ScipyTestCase): +class test_reshape(NumpyTestCase): def check_reshape(self): a = (array([1,2,3]),array([4,5,6])) @@ -1851,28 +1851,28 @@ [3, 4], [5, 6]])) -class test_rgamma(ScipyTestCase): +class test_rgamma(NumpyTestCase): def check_rgamma(self): rgam = rgamma(8) rlgam = 1/gamma(8) assert_almost_equal(rgam,rlgam,8) -class test_riccati_jn(ScipyTestCase): +class test_riccati_jn(NumpyTestCase): def check_riccati_jn(self): jnrl = (sph_jn(1,.2)[0]*.2,sph_jn(1,.2)[0]+sph_jn(1,.2)[1]*.2) ricjn = riccati_jn(1,.2) assert_array_almost_equal(ricjn,jnrl,8) -class test_riccati_yn(ScipyTestCase): +class test_riccati_yn(NumpyTestCase): def check_riccati_yn(self): ynrl = (sph_yn(1,.2)[0]*.2,sph_yn(1,.2)[0]+sph_yn(1,.2)[1]*.2) ricyn = riccati_yn(1,.2) assert_array_almost_equal(ricyn,ynrl,8) -class test_round(ScipyTestCase): +class test_round(NumpyTestCase): def check_round(self): rnd = map(int,(round(10.1),round(10.4),round(10.5),round(10.6))) @@ -1885,7 +1885,7 @@ rndrl = (10,10,10,11) assert_array_equal(rnd,rndrl) -class _test_sh_legendre(ScipyTestCase): +class _test_sh_legendre(NumpyTestCase): def check_sh_legendre(self): # P*_n(x) = P_n(2x-1) @@ -1909,7 +1909,7 @@ assert_array_almost_equal(Ps4.c,pse4.c,12) assert_array_almost_equal(Ps5.c,pse5.c,12) -class _test_sh_chebyt(ScipyTestCase): +class _test_sh_chebyt(NumpyTestCase): def check_sh_chebyt(self): # T*_n(x) = T_n(2x-1) @@ -1934,7 +1934,7 @@ assert_array_almost_equal(Ts5.c,tse5.c,12) -class _test_sh_chebyu(ScipyTestCase): +class _test_sh_chebyu(NumpyTestCase): def check_sh_chebyu(self): # U*_n(x) = U_n(2x-1) @@ -1958,7 +1958,7 @@ assert_array_almost_equal(Us4.c,use4.c,12) assert_array_almost_equal(Us5.c,use5.c,11) -class _test_sh_jacobi(ScipyTestCase): +class _test_sh_jacobi(NumpyTestCase): def check_sh_jacobi(self): # G^(p,q)_n(x) = n! gamma(n+p)/gamma(2*n+p) * P^(p-q,q-1)_n(2*x-1) @@ -1988,7 +1988,7 @@ assert_array_almost_equal(G5.c,ge5.c,13) -class test_sinc(ScipyTestCase): +class test_sinc(NumpyTestCase): def check_sinc(self): c = arange(-2,2,.1) @@ -2000,7 +2000,7 @@ x = 0.0 assert_equal(sinc(x),1.0) -class test_sindg(ScipyTestCase): +class test_sindg(NumpyTestCase): def check_sindg(self): sn = sindg(90) @@ -2014,13 +2014,13 @@ snmrl1 = sin(pi/4.0) assert_almost_equal(snm1,snmrl1,8) -class test_sph_harm(ScipyTestCase): +class test_sph_harm(NumpyTestCase): def check_sph_harm(self): pass -class test_sph_in(ScipyTestCase): +class test_sph_in(NumpyTestCase): def check_sph_in(self): i1n = sph_in(1,.2) @@ -2030,14 +2030,14 @@ 0.066933714568029540839]),12) assert_array_almost_equal(i1n[1],[inp0,inp1],12) -class test_sph_inkn(ScipyTestCase): +class test_sph_inkn(NumpyTestCase): def check_sph_inkn(self): spikn = r_[sph_in(1,.2)+sph_kn(1,.2)] inkn = r_[sph_inkn(1,.2)] assert_array_almost_equal(inkn,spikn,10) -class test_sph_jn(ScipyTestCase): +class test_sph_jn(NumpyTestCase): def check_sph_jn(self): s1 = sph_jn(2,.2) @@ -2049,14 +2049,14 @@ 0.0026590560795273856680],12) assert_array_almost_equal(s1[1],[s10,s11,s12],12) -class test_sph_jnyn(ScipyTestCase): +class test_sph_jnyn(NumpyTestCase): def check_sph_jnyn(self): jnyn = r_[sph_jn(1,.2) + sph_yn(1,.2)] # tuple addition jnyn1 = r_[sph_jnyn(1,.2)] assert_array_almost_equal(jnyn1,jnyn,9) -class test_sph_kn(ScipyTestCase): +class test_sph_kn(NumpyTestCase): def check_sph_kn(self): kn = sph_kn(2,.2) @@ -2068,7 +2068,7 @@ 585.15696310385559829],12) assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9) -class test_sph_yn(ScipyTestCase): +class test_sph_yn(NumpyTestCase): def check_sph_yn(self): sy1 = sph_yn(2,.2)[0][2] @@ -2079,14 +2079,14 @@ sy3 = sph_yn(1,.2)[1][1] assert_almost_equal(sy3,sphpy,4) #compare correct derivative val. (correct =-system val). -class test_take(ScipyTestCase): +class test_take(NumpyTestCase): def check_take(self): a = array([0,1,2,3,4,5,6,7,8]) tka = take(a,(0,4,5,8),axis=0) assert_array_equal(tka,array([0,4,5,8])) -class test_tandg(ScipyTestCase): +class test_tandg(NumpyTestCase): def check_tandg(self): tn = tandg(30) @@ -2114,21 +2114,21 @@ assert_almost_equal(tandg(315), -1.0, 14) assert_almost_equal(tandg(-315), 1.0, 14) -class test_y0(ScipyTestCase): +class test_y0(NumpyTestCase): def check_y0(self): oz = y0(.1) ozr = yn(0,.1) assert_almost_equal(oz,ozr,8) -class test_y1(ScipyTestCase): +class test_y1(NumpyTestCase): def check_y1(self): o1 = y1(.1) o1r = yn(1,.1) assert_almost_equal(o1,o1r,8) -class test_y0_zeros(ScipyTestCase): +class test_y0_zeros(NumpyTestCase): def check_y0_zeros(self): yo,ypo = y0_zeros(2) @@ -2139,37 +2139,37 @@ assert_array_almost_equal(abs(yv(1,all)-allval),0.0,11) -class test_y1_zeros(ScipyTestCase): +class test_y1_zeros(NumpyTestCase): def check_y1_zeros(self): y1 = y1_zeros(1) assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5) -class test_y1p_zeros(ScipyTestCase): +class test_y1p_zeros(NumpyTestCase): def check_y1p_zeros(self): y1p = y1p_zeros(1,complex=1) assert_array_almost_equal(y1p,(array([ 0.5768+0.904j]), array([-0.7635+0.5892j])),3) -class test_yn_zeros(ScipyTestCase): +class test_yn_zeros(NumpyTestCase): def check_yn_zeros(self): an = yn_zeros(4,2) assert_array_almost_equal(an,array([ 5.64515, 9.36162]),5) -class test_ynp_zeros(ScipyTestCase): +class test_ynp_zeros(NumpyTestCase): def check_ynp_zeros(self): ao = ynp_zeros(0,2) assert_array_almost_equal(ao,array([ 2.19714133, 5.42968104]),6) -class test_yn(ScipyTestCase): +class test_yn(NumpyTestCase): def check_yn(self): yn2n = yn(1,.2) assert_almost_equal(yn2n,-3.3238249881118471,8) -class test_yv(ScipyTestCase): +class test_yv(NumpyTestCase): def check_negv(self): assert_almost_equal(yv(-3,2), -yv(3,2), 14) @@ -2177,7 +2177,7 @@ yv2 = yv(1,.2) assert_almost_equal(yv2,-3.3238249881118471,8) -class test_yve(ScipyTestCase): +class test_yve(NumpyTestCase): def check_negv(self): assert_almost_equal(yve(-3,2), -yve(3,2), 14) @@ -2188,14 +2188,14 @@ yve22 = yve(1,.2+1j) assert_almost_equal(yve22,yve2r,8) -class test_yvp(ScipyTestCase): +class test_yvp(NumpyTestCase): def check_yvp(self): yvpr = (yv(1,.2) - yv(3,.2))/2.0 yvp1 = yvp(2,.2) assert_array_almost_equal(yvp1,yvpr,10) -class test_zeros(ScipyTestCase): +class test_zeros(NumpyTestCase): def check_zeros(self): b = zeros((1,11)) @@ -2205,4 +2205,4 @@ [0, 0]])) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/stats/__init__.py =================================================================== --- branches/0.5.2.x/Lib/stats/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/stats/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -11,5 +11,5 @@ from kde import gaussian_kde __all__ = filter(lambda s:not s.startswith('_'),dir()) -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/stats/tests/test_distributions.py =================================================================== --- branches/0.5.2.x/Lib/stats/tests/test_distributions.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/stats/tests/test_distributions.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -59,7 +59,7 @@ else: args = str(tuple(1.0+rand(nargs))) exstr = r""" -class test_%s(ScipyTestCase): +class test_%s(NumpyTestCase): def check_cdf(self): D,pval = stats.kstest('%s','',args=%s,N=30) if (pval < %f): @@ -71,7 +71,7 @@ exec exstr -class test_randint(ScipyTestCase): +class test_randint(NumpyTestCase): def check_rvs(self): vals = stats.randint.rvs(5,30,size=100) assert(numpy.all(vals < 30) & numpy.all(vals >= 5)) @@ -97,7 +97,7 @@ vals = stats.randint.cdf(x,5,30) assert_array_almost_equal(vals, out, decimal=12) -class test_binom(ScipyTestCase): +class test_binom(NumpyTestCase): def check_rvs(self): vals = stats.binom.rvs(10, 0.75, size=(2, 50)) assert(numpy.all(vals >= 0) & numpy.all(vals <= 10)) @@ -108,7 +108,7 @@ assert(val.dtype.char in typecodes['AllInteger']) -class test_bernoulli(ScipyTestCase): +class test_bernoulli(NumpyTestCase): def check_rvs(self): vals = stats.bernoulli.rvs(0.75, size=(2, 50)) assert(numpy.all(vals >= 0) & numpy.all(vals <= 1)) @@ -118,7 +118,7 @@ assert(isinstance(val, numpy.ndarray)) assert(val.dtype.char in typecodes['AllInteger']) -class test_nbinom(ScipyTestCase): +class test_nbinom(NumpyTestCase): def check_rvs(self): vals = stats.nbinom.rvs(10, 0.75, size=(2, 50)) assert(numpy.all(vals >= 0)) @@ -128,7 +128,7 @@ assert(isinstance(val, numpy.ndarray)) assert(val.dtype.char in typecodes['AllInteger']) -class test_geom(ScipyTestCase): +class test_geom(NumpyTestCase): def check_rvs(self): vals = stats.geom.rvs(0.75, size=(2, 50)) assert(numpy.all(vals >= 0)) @@ -138,7 +138,7 @@ assert(isinstance(val, numpy.ndarray)) assert(val.dtype.char in typecodes['AllInteger']) -class test_hypergeom(ScipyTestCase): +class test_hypergeom(NumpyTestCase): def check_rvs(self): vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50)) assert(numpy.all(vals >= 0) & @@ -149,7 +149,7 @@ assert(isinstance(val, numpy.ndarray)) assert(val.dtype.char in typecodes['AllInteger']) -class test_logser(ScipyTestCase): +class test_logser(NumpyTestCase): def check_rvs(self): vals = stats.logser.rvs(0.75, size=(2, 50)) assert(numpy.all(vals >= 1)) @@ -159,7 +159,7 @@ assert(isinstance(val, numpy.ndarray)) assert(val.dtype.char in typecodes['AllInteger']) -class test_poisson(ScipyTestCase): +class test_poisson(NumpyTestCase): def check_rvs(self): vals = stats.poisson.rvs(0.5, size=(2, 50)) assert(numpy.all(vals >= 0)) @@ -169,7 +169,7 @@ assert(isinstance(val, numpy.ndarray)) assert(val.dtype.char in typecodes['AllInteger']) -class test_zipf(ScipyTestCase): +class test_zipf(NumpyTestCase): def check_rvs(self): vals = stats.zipf.rvs(1.5, size=(2, 50)) assert(numpy.all(vals >= 1)) @@ -179,7 +179,7 @@ assert(isinstance(val, numpy.ndarray)) assert(val.dtype.char in typecodes['AllInteger']) -class test_dlaplace(ScipyTestCase): +class test_dlaplace(NumpyTestCase): def check_rvs(self): vals = stats.dlaplace.rvs(1.5 , size=(2, 50)) assert(numpy.shape(vals) == (2, 50)) @@ -189,4 +189,4 @@ assert(val.dtype.char in typecodes['AllInteger']) if __name__ == "__main__": - ScipyTest('stats.distributions').run() + NumpyTest('stats.distributions').run() Modified: branches/0.5.2.x/Lib/stats/tests/test_morestats.py =================================================================== --- branches/0.5.2.x/Lib/stats/tests/test_morestats.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/stats/tests/test_morestats.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -22,7 +22,7 @@ g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991] g10= [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997] -class test_shapiro(ScipyTestCase): +class test_shapiro(NumpyTestCase): def check_basic(self): x1 = [0.11,7.87,4.61,10.14,7.95,3.14,0.46, 4.43,0.21,4.75,0.71,1.52,3.24, @@ -37,7 +37,7 @@ assert_almost_equal(w,0.9590270,6) assert_almost_equal(pw,0.52460,3) -class test_anderson(ScipyTestCase): +class test_anderson(NumpyTestCase): def check_normal(self): x1 = scipy.stats.expon.rvs(size=50) x2 = scipy.stats.norm.rvs(size=50) @@ -58,7 +58,7 @@ A,crit,sig = scipy.stats.anderson(x2,'expon') assert_array_less(crit[:-1], A) -class test_ansari(ScipyTestCase): +class test_ansari(NumpyTestCase): def check_small(self): x = [1,2,3,3,4] y = [3,2,6,1,6,1,4,1] @@ -80,7 +80,7 @@ assert_almost_equal(W,10.0,11) assert_almost_equal(pval,0.533333333333333333,7) -class test_bartlett(ScipyTestCase): +class test_bartlett(NumpyTestCase): def check_data(self): args = [] for k in range(1,11): @@ -89,7 +89,7 @@ assert_almost_equal(T,20.78587342806484,7) assert_almost_equal(pval,0.0136358632781,7) -class test_levene(ScipyTestCase): +class test_levene(NumpyTestCase): def check_data(self): args = [] for k in range(1,11): @@ -98,7 +98,7 @@ assert_almost_equal(W,1.7059176930008939,7) assert_almost_equal(pval,0.0990829755522,7) -class test_binom_test(ScipyTestCase): +class test_binom_test(NumpyTestCase): def check_data(self): pval = stats.binom_test(100,250) assert_almost_equal(pval,0.0018833009350757682,11) @@ -107,7 +107,7 @@ pval = stats.binom_test([682,243],p=3.0/4) assert_almost_equal(pval,0.38249155957481695,11) -class test_find_repeats(ScipyTestCase): +class test_find_repeats(NumpyTestCase): def check_basic(self): a = [1,2,3,4,1,2,3,4,1,2,5] res,nums = scipy.stats.find_repeats(a) @@ -115,4 +115,4 @@ assert_array_equal(nums,[3,3,2,2]) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/stats/tests/test_stats.py =================================================================== --- branches/0.5.2.x/Lib/stats/tests/test_stats.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/stats/tests/test_stats.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -48,7 +48,7 @@ X8 = X7 * X X9 = X8 * X -class test_round(ScipyTestCase): +class test_round(NumpyTestCase): """ W.II. ROUND You should get the numbers 1 to 9. Many language compilers, @@ -98,7 +98,7 @@ y=(int(round((3-numpy.exp(numpy.log(numpy.sqrt(2.0)*numpy.sqrt(2.0))))))) assert_equal(y,1) -class test_basicstats(ScipyTestCase): +class test_basicstats(NumpyTestCase): """ W.II.C. Compute basic statistic on all the variables. The means should be the fifth value of all the variables (case FIVE). @@ -184,7 +184,7 @@ y = scipy.stats.std(ROUND) assert_approx_equal(y, 2.738612788) -class test_corr(ScipyTestCase): +class test_corr(NumpyTestCase): """ W.II.D. Compute a correlation matrix on all the variables. All the correlations, except for ZERO and MISS, shoud be exactly 1. @@ -371,7 +371,7 @@ ### I need to figure out how to do this one. -class test_regression(ScipyTestCase): +class test_regression(NumpyTestCase): def check_linregressBIGX(self): """ W.II.F. Regress BIG on X. @@ -436,7 +436,7 @@ ################################################## ### Test for sum -class test_gmean(ScipyTestCase): +class test_gmean(NumpyTestCase): def check_1D_list(self): a = (1,2,3,4) @@ -475,7 +475,7 @@ desired = array((v,v,v)) assert_array_almost_equal(desired,actual,decimal=14) -class test_hmean(ScipyTestCase): +class test_hmean(NumpyTestCase): def check_1D_list(self): a = (1,2,3,4) actual= stats.hmean(a) @@ -515,7 +515,7 @@ assert_array_almost_equal(desired1,actual1,decimal=14) -class test_mean(ScipyTestCase): +class test_mean(NumpyTestCase): def check_basic(self): a = [3,4,5,10,-3,-5,6] af = [3.,4,5,10,-3,-5,-6] @@ -552,7 +552,7 @@ A += val assert_almost_equal(stats.mean(a,axis=None),A/(5*3.0*5)) -class test_median(ScipyTestCase): +class test_median(NumpyTestCase): def check_basic(self): a1 = [3,4,5,10,-3,-5,6] a2 = [3,-6,-2,8,7,4,2,1] @@ -561,7 +561,7 @@ assert_equal(stats.median(a2),2.5) assert_equal(stats.median(a3),3.5) -class test_percentile(ScipyTestCase): +class test_percentile(NumpyTestCase): def setUp(self): self.a1 = [3,4,5,10,-3,-5,6] self.a2 = [3,-6,-2,8,7,4,2,1] @@ -578,7 +578,7 @@ assert_equal(stats.scoreatpercentile(x, 100), 3.5) assert_equal(stats.scoreatpercentile(x, 50), 1.75) -class test_std(ScipyTestCase): +class test_std(NumpyTestCase): def check_basic(self): a = [3,4,5,10,-3,-5,6] b = [3,4,5,10,-3,-5,-6] @@ -597,21 +597,21 @@ assert_array_almost_equal(stats.std(a,axis=1),b2,11) -class test_cmedian(ScipyTestCase): +class test_cmedian(NumpyTestCase): def check_basic(self): data = [1,2,3,1,5,3,6,4,3,2,4,3,5,2.0] assert_almost_equal(stats.cmedian(data,5),3.2916666666666665) assert_almost_equal(stats.cmedian(data,3),3.083333333333333) assert_almost_equal(stats.cmedian(data),3.0020020020020022) -class test_median(ScipyTestCase): +class test_median(NumpyTestCase): def check_basic(self): data1 = [1,3,5,2,3,1,19,-10,2,4.0] data2 = [3,5,1,10,23,-10,3,-2,6,8,15] assert_almost_equal(stats.median(data1),2.5) assert_almost_equal(stats.median(data2),5) -class test_mode(ScipyTestCase): +class test_mode(NumpyTestCase): def check_basic(self): data1 = [3,5,1,10,23,3,2,6,8,6,10,6] vals = stats.mode(data1) @@ -619,7 +619,7 @@ assert_almost_equal(vals[1][0],3) -class test_variability(ScipyTestCase): +class test_variability(NumpyTestCase): """ Comparison numbers are found using R v.1.5.1 note that length(testcase) = 4 """ @@ -699,7 +699,7 @@ -class test_moments(ScipyTestCase): +class test_moments(NumpyTestCase): """ Comparison numbers are found using R v.1.5.1 note that length(testcase) = 4 @@ -759,7 +759,7 @@ y = scipy.stats.kurtosis(self.testcase,0,0) assert_approx_equal(y,1.64) -class test_threshold(ScipyTestCase): +class test_threshold(NumpyTestCase): def check_basic(self): a = [-1,2,3,4,5,-1,-2] assert_array_equal(stats.threshold(a),a) @@ -771,4 +771,4 @@ [0,2,3,4,0,0,0]) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/weave/__init__.py =================================================================== --- branches/0.5.2.x/Lib/weave/__init__.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/weave/__init__.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -18,5 +18,5 @@ except: pass -from numpy.testing import ScipyTest -test = ScipyTest().test +from numpy.testing import NumpyTest +test = NumpyTest().test Modified: branches/0.5.2.x/Lib/weave/tests/test_ast_tools.py =================================================================== --- branches/0.5.2.x/Lib/weave/tests/test_ast_tools.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/weave/tests/test_ast_tools.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -8,7 +8,7 @@ from weave_test_utils import * restore_path() -class test_harvest_variables(ScipyTestCase): +class test_harvest_variables(NumpyTestCase): """ Not much testing going on here, but at least it is a flame test. """ @@ -28,4 +28,4 @@ self.generic_test(expr,desired) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/weave/tests/test_blitz_tools.py =================================================================== --- branches/0.5.2.x/Lib/weave/tests/test_blitz_tools.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/weave/tests/test_blitz_tools.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -14,7 +14,7 @@ restore_path() -class test_ast_to_blitz_expr(ScipyTestCase): +class test_ast_to_blitz_expr(NumpyTestCase): def generic_test(self,expr,desired): import parser @@ -57,7 +57,7 @@ '-hy(_all,blitz::Range(1,_end),blitz::Range(_beg,Nhy(2)-1-1)));' self.generic_test(expr,desired) -class test_blitz(ScipyTestCase): +class test_blitz(NumpyTestCase): """* These are long running tests... I'd like to benchmark these things somehow. @@ -174,4 +174,4 @@ self.generic_2d(expr,complex128) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/weave/tests/test_build_tools.py =================================================================== --- branches/0.5.2.x/Lib/weave/tests/test_build_tools.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/weave/tests/test_build_tools.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -12,7 +12,7 @@ def is_writable(val): return os.access(val,os.W_OK) -class test_configure_build_dir(ScipyTestCase): +class test_configure_build_dir(NumpyTestCase): def check_default(self): " default behavior is to return current directory " d = build_tools.configure_build_dir() @@ -46,7 +46,7 @@ assert(d == tempfile.gettempdir()) assert(is_writable(d)) -class test_configure_sys_argv(ScipyTestCase): +class test_configure_sys_argv(NumpyTestCase): def check_simple(self): build_dir = 'build_dir' temp_dir = 'temp_dir' @@ -63,4 +63,4 @@ assert(pre_argv == sys.argv[:]) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/weave/tests/test_c_spec.py =================================================================== --- branches/0.5.2.x/Lib/weave/tests/test_c_spec.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/weave/tests/test_c_spec.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -48,7 +48,7 @@ # Scalar conversion test classes # int, float, complex #---------------------------------------------------------------------------- -class test_int_converter(ScipyTestCase): +class test_int_converter(NumpyTestCase): compiler = '' def check_type_match_string(self,level=5): s = c_spec.int_converter() @@ -103,7 +103,7 @@ assert( c == 3) -class test_float_converter(ScipyTestCase): +class test_float_converter(NumpyTestCase): compiler = '' def check_type_match_string(self,level=5): s = c_spec.float_converter() @@ -158,7 +158,7 @@ c = test(b) assert( c == 3.) -class test_complex_converter(ScipyTestCase): +class test_complex_converter(NumpyTestCase): compiler = '' def check_type_match_string(self,level=5): s = c_spec.complex_converter() @@ -216,7 +216,7 @@ # File conversion tests #---------------------------------------------------------------------------- -class test_file_converter(ScipyTestCase): +class test_file_converter(NumpyTestCase): compiler = '' def check_py_to_file(self,level=5): import tempfile @@ -250,14 +250,14 @@ # Instance conversion tests #---------------------------------------------------------------------------- -class test_instance_converter(ScipyTestCase): +class test_instance_converter(NumpyTestCase): pass #---------------------------------------------------------------------------- # Callable object conversion tests #---------------------------------------------------------------------------- -class test_callable_converter(ScipyTestCase): +class test_callable_converter(NumpyTestCase): compiler='' def check_call_function(self,level=5): import string @@ -277,7 +277,7 @@ desired = func(search_str,sub_str) assert(desired == actual) -class test_sequence_converter(ScipyTestCase): +class test_sequence_converter(NumpyTestCase): compiler = '' def check_convert_to_dict(self,level=5): d = {} @@ -292,7 +292,7 @@ t = () inline_tools.inline("",['t'],compiler=self.compiler,force=1) -class test_string_converter(ScipyTestCase): +class test_string_converter(NumpyTestCase): compiler = '' def check_type_match_string(self,level=5): s = c_spec.string_converter() @@ -347,7 +347,7 @@ c = test(b) assert( c == 'hello') -class test_list_converter(ScipyTestCase): +class test_list_converter(NumpyTestCase): compiler = '' def check_type_match_bad(self,level=5): s = c_spec.list_converter() @@ -458,7 +458,7 @@ print 'python:', t2 - t1 assert( sum1 == sum2 and sum1 == sum3) -class test_tuple_converter(ScipyTestCase): +class test_tuple_converter(NumpyTestCase): compiler = '' def check_type_match_bad(self,level=5): s = c_spec.tuple_converter() @@ -511,7 +511,7 @@ assert( c == ('hello',None)) -class test_dict_converter(ScipyTestCase): +class test_dict_converter(NumpyTestCase): def check_type_match_bad(self,level=5): s = c_spec.dict_converter() objs = [[],(),'',1,1.,1+1j] @@ -674,4 +674,4 @@ if _n[:9]=='test_gcc_': exec 'del '+_n if __name__ == "__main__": - ScipyTest('weave.c_spec').run() + NumpyTest('weave.c_spec').run() Modified: branches/0.5.2.x/Lib/weave/tests/test_catalog.py =================================================================== --- branches/0.5.2.x/Lib/weave/tests/test_catalog.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/weave/tests/test_catalog.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -11,7 +11,7 @@ restore_path() -class test_default_dir(ScipyTestCase): +class test_default_dir(NumpyTestCase): def check_is_writable(self): path = catalog.default_dir() name = os.path.join(path,'dummy_catalog') @@ -22,10 +22,10 @@ test_file.close() os.remove(name) -class test_os_dependent_catalog_name(ScipyTestCase): +class test_os_dependent_catalog_name(NumpyTestCase): pass -class test_catalog_path(ScipyTestCase): +class test_catalog_path(NumpyTestCase): def check_default(self): in_path = catalog.default_dir() path = catalog.catalog_path(in_path) @@ -64,7 +64,7 @@ path = catalog.catalog_path(in_path) assert (path is None) -class test_get_catalog(ScipyTestCase): +class test_get_catalog(NumpyTestCase): """ This only tests whether new catalogs are created correctly. And whether non-existent return None correctly with read mode. Putting catalogs in the right place is all tested with @@ -98,7 +98,7 @@ self.remove_dir(pardir) assert(cat is not None) -class test_catalog(ScipyTestCase): +class test_catalog(NumpyTestCase): def clear_environ(self): if os.environ.has_key('PYTHONCOMPILED'): @@ -331,4 +331,4 @@ if __name__ == '__main__': - ScipyTest('weave.catalog').run() + NumpyTest('weave.catalog').run() Modified: branches/0.5.2.x/Lib/weave/tests/test_ext_tools.py =================================================================== --- branches/0.5.2.x/Lib/weave/tests/test_ext_tools.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/weave/tests/test_ext_tools.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -17,7 +17,7 @@ build_dir = empty_temp_dir() print 'building extensions here:', build_dir -class test_ext_module(ScipyTestCase): +class test_ext_module(NumpyTestCase): #should really do some testing of where modules end up def check_simple(self,level=5): """ Simplest possible module """ @@ -94,7 +94,7 @@ c,d = ext_return_tuple.test(a) assert(c==a and d == a+1) -class test_ext_function(ScipyTestCase): +class test_ext_function(NumpyTestCase): #should really do some testing of where modules end up def check_simple(self,level=5): """ Simplest possible function """ @@ -107,7 +107,7 @@ import simple_ext_function simple_ext_function.test() -class test_assign_variable_types(ScipyTestCase): +class test_assign_variable_types(NumpyTestCase): def check_assign_variable_types(self): try: from numpy.numerix import arange, Float32, Float64 @@ -135,4 +135,4 @@ print_assert_equal(expr,actual,desired) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/weave/tests/test_inline_tools.py =================================================================== --- branches/0.5.2.x/Lib/weave/tests/test_inline_tools.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/weave/tests/test_inline_tools.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -9,7 +9,7 @@ from test_scxx import * restore_path() -class test_inline(ScipyTestCase): +class test_inline(NumpyTestCase): """ These are long running tests... I'd like to benchmark these things somehow. @@ -43,4 +43,4 @@ pass if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/weave/tests/test_scxx.py =================================================================== --- branches/0.5.2.x/Lib/weave/tests/test_scxx.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/weave/tests/test_scxx.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -12,4 +12,4 @@ restore_path() if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/weave/tests/test_scxx_dict.py =================================================================== --- branches/0.5.2.x/Lib/weave/tests/test_scxx_dict.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/weave/tests/test_scxx_dict.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -9,7 +9,7 @@ restore_path() -class test_dict_construct(ScipyTestCase): +class test_dict_construct(NumpyTestCase): #------------------------------------------------------------------------ # Check that construction from basic types is allowed and have correct # reference counts @@ -25,7 +25,7 @@ assert res == {} -class test_dict_has_key(ScipyTestCase): +class test_dict_has_key(NumpyTestCase): def check_obj(self,level=5): class foo: pass @@ -89,7 +89,7 @@ res = inline_tools.inline(code,['a']) assert not res -class test_dict_get_item_op(ScipyTestCase): +class test_dict_get_item_op(NumpyTestCase): def generic_get(self,code,args=['a']): a = {} @@ -132,7 +132,7 @@ except KeyError: pass -class test_dict_set_operator(ScipyTestCase): +class test_dict_set_operator(NumpyTestCase): def generic_new(self,key,val): # test that value is set correctly and that reference counts # on dict, key, and val are being handled correctly. @@ -199,7 +199,7 @@ key,val = foo(),12345 self.generic_overwrite(key,val) -class test_dict_del(ScipyTestCase): +class test_dict_del(NumpyTestCase): def generic(self,key): # test that value is set correctly and that reference counts # on dict, key, are being handled correctly. after deletion, @@ -233,7 +233,7 @@ key = foo() self.generic(key) -class test_dict_others(ScipyTestCase): +class test_dict_others(NumpyTestCase): def check_clear(self,level=5): a = {} a["hello"] = 1 @@ -262,4 +262,4 @@ assert a == b if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/weave/tests/test_scxx_object.py =================================================================== --- branches/0.5.2.x/Lib/weave/tests/test_scxx_object.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/weave/tests/test_scxx_object.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -8,7 +8,7 @@ from weave import inline_tools restore_path() -class test_object_construct(ScipyTestCase): +class test_object_construct(NumpyTestCase): #------------------------------------------------------------------------ # Check that construction from basic types is allowed and have correct # reference counts @@ -66,7 +66,7 @@ assert sys.getrefcount(res) == 2 assert res == "hello" -class test_object_print(ScipyTestCase): +class test_object_print(NumpyTestCase): #------------------------------------------------------------------------ # Check the object print protocol. #------------------------------------------------------------------------ @@ -101,7 +101,7 @@ ## pass -class test_object_cast(ScipyTestCase): +class test_object_cast(NumpyTestCase): def check_int_cast(self,level=5): code = """ py::object val = 1; @@ -147,7 +147,7 @@ def __str__(self): return "b" -class test_object_hasattr(ScipyTestCase): +class test_object_hasattr(NumpyTestCase): def check_string(self,level=5): a = foo() a.b = 12345 @@ -203,7 +203,7 @@ res = inline_tools.inline(code,['a']) assert res -class test_object_attr(ScipyTestCase): +class test_object_attr(NumpyTestCase): def generic_attr(self,code,args=['a']): a = foo() @@ -261,7 +261,7 @@ assert res == "bar results" assert first == second -class test_object_set_attr(ScipyTestCase): +class test_object_set_attr(NumpyTestCase): def generic_existing(self, code, desired): args = ['a'] @@ -325,7 +325,7 @@ """ self.generic_existing(code,"hello") -class test_object_del(ScipyTestCase): +class test_object_del(NumpyTestCase): def generic(self, code): args = ['a'] a = foo() @@ -348,7 +348,7 @@ """ self.generic(code) -class test_object_cmp(ScipyTestCase): +class test_object_cmp(NumpyTestCase): def check_equal(self,level=5): a,b = 1,1 res = inline_tools.inline('return_val = (a == b);',['a','b']) @@ -411,7 +411,7 @@ res = inline_tools.inline(code,['a']) assert res == (a == "hello") -class test_object_repr(ScipyTestCase): +class test_object_repr(NumpyTestCase): def check_repr(self,level=5): class foo: def __str__(self): @@ -427,7 +427,7 @@ assert first == second assert res == "repr return" -class test_object_str(ScipyTestCase): +class test_object_str(NumpyTestCase): def check_str(self,level=5): class foo: def __str__(self): @@ -444,7 +444,7 @@ print res assert res == "str return" -class test_object_unicode(ScipyTestCase): +class test_object_unicode(NumpyTestCase): # This ain't going to win awards for test of the year... def check_unicode(self,level=5): class foo: @@ -461,7 +461,7 @@ assert first == second assert res == "unicode" -class test_object_is_callable(ScipyTestCase): +class test_object_is_callable(NumpyTestCase): def check_true(self,level=5): class foo: def __call__(self): @@ -476,7 +476,7 @@ res = inline_tools.inline('return_val = a.is_callable();',['a']) assert not res -class test_object_call(ScipyTestCase): +class test_object_call(NumpyTestCase): def check_noargs(self,level=5): def foo(): return (1,2,3) @@ -532,7 +532,7 @@ # first should == second, but the weird refcount error assert second == third -class test_object_mcall(ScipyTestCase): +class test_object_mcall(NumpyTestCase): def check_noargs(self,level=5): a = foo() res = inline_tools.inline('return_val = a.mcall("bar");',['a']) @@ -626,7 +626,7 @@ # first should == second, but the weird refcount error assert second == third -class test_object_hash(ScipyTestCase): +class test_object_hash(NumpyTestCase): def check_hash(self,level=5): class foo: def __hash__(self): @@ -636,7 +636,7 @@ print 'hash:', res assert res == 123 -class test_object_is_true(ScipyTestCase): +class test_object_is_true(NumpyTestCase): def check_true(self,level=5): class foo: pass @@ -648,7 +648,7 @@ res = inline_tools.inline('return_val = a.is_true();',['a']) assert res == 0 -class test_object_is_true(ScipyTestCase): +class test_object_is_true(NumpyTestCase): def check_false(self,level=5): class foo: pass @@ -660,7 +660,7 @@ res = inline_tools.inline('return_val = a.not();',['a']) assert res == 1 -class test_object_type(ScipyTestCase): +class test_object_type(NumpyTestCase): def check_type(self,level=5): class foo: pass @@ -668,7 +668,7 @@ res = inline_tools.inline('return_val = a.type();',['a']) assert res == type(a) -class test_object_size(ScipyTestCase): +class test_object_size(NumpyTestCase): def check_size(self,level=5): class foo: def __len__(self): @@ -692,7 +692,7 @@ assert res == len(a) from UserList import UserList -class test_object_set_item_op_index(ScipyTestCase): +class test_object_set_item_op_index(NumpyTestCase): def check_list_refcount(self,level=5): a = UserList([1,2,3]) # temporary refcount fix until I understand why it incs by one. @@ -727,7 +727,7 @@ assert a[1] == 1+1j from UserDict import UserDict -class test_object_set_item_op_key(ScipyTestCase): +class test_object_set_item_op_key(NumpyTestCase): def check_key_refcount(self,level=5): a = UserDict() code = """ @@ -818,4 +818,4 @@ assert a['first'] == a['second'] if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/weave/tests/test_scxx_sequence.py =================================================================== --- branches/0.5.2.x/Lib/weave/tests/test_scxx_sequence.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/weave/tests/test_scxx_sequence.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -20,7 +20,7 @@ from UserList import UserList -class _test_sequence_base(ScipyTestCase): +class _test_sequence_base(NumpyTestCase): seq_type = None def check_conversion(self,level=5): @@ -435,4 +435,4 @@ assert b == desired if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/weave/tests/test_size_check.py =================================================================== --- branches/0.5.2.x/Lib/weave/tests/test_size_check.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/weave/tests/test_size_check.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -11,7 +11,7 @@ empty = array(()) -class test_make_same_length(ScipyTestCase): +class test_make_same_length(NumpyTestCase): def generic_test(self,x,y,desired): actual = size_check.make_same_length(x,y) @@ -39,7 +39,7 @@ desired = array((1,2,3)),array((1,1,2)) self.generic_test(x,y,desired) -class test_binary_op_size(ScipyTestCase): +class test_binary_op_size(NumpyTestCase): def generic_test(self,x,y,desired): actual = size_check.binary_op_size(x,y) desired = desired @@ -115,7 +115,7 @@ def desired_type(self,val): return size_check.dummy_array(array(val),1) -class test_dummy_array_indexing(ScipyTestCase): +class test_dummy_array_indexing(NumpyTestCase): def generic_test(self,ary,expr,desired): a = size_check.dummy_array(ary) actual = eval(expr).shape @@ -269,7 +269,7 @@ except IndexError: pass -class test_reduction(ScipyTestCase): +class test_reduction(NumpyTestCase): def check_1d_0(self): a = ones((5,)) actual = size_check.reduction(a,0) @@ -303,7 +303,7 @@ except ValueError: pass -class test_expressions(ScipyTestCase): +class test_expressions(NumpyTestCase): def generic_test(self,expr,desired,**kw): import parser ast_list = parser.expr(expr).tolist() @@ -372,4 +372,4 @@ if __name__ == "__main__": - ScipyTest('weave.size_check').run() + NumpyTest('weave.size_check').run() Modified: branches/0.5.2.x/Lib/weave/tests/test_slice_handler.py =================================================================== --- branches/0.5.2.x/Lib/weave/tests/test_slice_handler.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/weave/tests/test_slice_handler.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -22,7 +22,7 @@ pprint.pprint(desired,msg) raise AssertionError, msg.getvalue() -class test_build_slice_atom(ScipyTestCase): +class test_build_slice_atom(NumpyTestCase): def generic_test(self,slice_vars,desired): pos = slice_vars['pos'] ast_list = slice_handler.build_slice_atom(slice_vars,pos) @@ -34,7 +34,7 @@ desired = 'slice(1,2-1)' self.generic_test(slice_vars,desired) -class test_slice(ScipyTestCase): +class test_slice(NumpyTestCase): def generic_test(self,suite_string,desired): import parser @@ -135,7 +135,7 @@ out = string.replace(out,"\n","") return out -class test_transform_slices(ScipyTestCase): +class test_transform_slices(NumpyTestCase): def generic_test(self,suite_string,desired): import parser ast_list = parser.suite(suite_string).tolist() @@ -164,4 +164,4 @@ if __name__ == "__main__": - ScipyTest('weave.slice_handler').run() + NumpyTest('weave.slice_handler').run() Modified: branches/0.5.2.x/Lib/weave/tests/test_standard_array_spec.py =================================================================== --- branches/0.5.2.x/Lib/weave/tests/test_standard_array_spec.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/weave/tests/test_standard_array_spec.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -28,7 +28,7 @@ pprint.pprint(desired,msg) raise AssertionError, msg.getvalue() -class test_array_converter(ScipyTestCase): +class test_array_converter(NumpyTestCase): def check_type_match_string(self): s = standard_array_spec.array_converter() assert( not s.type_match('string') ) @@ -40,4 +40,4 @@ assert(s.type_match(arange(4))) if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() Modified: branches/0.5.2.x/Lib/weave/tests/test_wx_spec.py =================================================================== --- branches/0.5.2.x/Lib/weave/tests/test_wx_spec.py 2007-08-14 20:52:37 UTC (rev 3235) +++ branches/0.5.2.x/Lib/weave/tests/test_wx_spec.py 2007-08-14 20:53:46 UTC (rev 3236) @@ -16,7 +16,7 @@ import wxPython import wxPython.wx -class test_wx_converter(ScipyTestCase): +class test_wx_converter(NumpyTestCase): def check_type_match_string(self,level=5): s = wx_spec.wx_converter() assert(not s.type_match('string') ) @@ -91,4 +91,4 @@ assert( c == 'hello') if __name__ == "__main__": - ScipyTest().run() + NumpyTest().run() From scipy-svn at scipy.org Tue Aug 14 17:33:54 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 14 Aug 2007 16:33:54 -0500 (CDT) Subject: [Scipy-svn] r3237 - branches/0.5.2.x/Lib/io Message-ID: <20070814213354.76C0D39C1E4@new.scipy.org> Author: jarrod.millman Date: 2007-08-14 16:33:51 -0500 (Tue, 14 Aug 2007) New Revision: 3237 Modified: branches/0.5.2.x/Lib/io/mio5.py Log: Fix mio5 for new behavior of rank-0 record arrays as per revision 2893. Modified: branches/0.5.2.x/Lib/io/mio5.py =================================================================== --- branches/0.5.2.x/Lib/io/mio5.py 2007-08-14 20:53:46 UTC (rev 3236) +++ branches/0.5.2.x/Lib/io/mio5.py 2007-08-14 21:33:51 UTC (rev 3237) @@ -157,7 +157,7 @@ tag = ndarray(shape=(), dtype=self.dtypes['tag_full'], buffer = raw_tag) - mdtype = tag['mdtype'] + mdtype = tag['mdtype'].item() byte_count = mdtype >> 16 if byte_count: # small data element format if byte_count > 4: @@ -168,7 +168,7 @@ return ndarray(shape=(el_count,), dtype=dt, buffer=raw_tag[4:]) - byte_count = tag['byte_count'] + byte_count = tag['byte_count'].item() if mdtype == miMATRIX: return self.current_getter(byte_count).get_array() if mdtype in self.codecs: # encoded char data @@ -193,8 +193,8 @@ def matrix_getter_factory(self): ''' Returns reader for next matrix at top level ''' tag = self.read_dtype(self.dtypes['tag_full']) - mdtype = tag['mdtype'] - byte_count = tag['byte_count'] + mdtype = tag['mdtype'].item() + byte_count = tag['byte_count'].item() next_pos = self.mat_stream.tell() + byte_count if mdtype == miCOMPRESSED: getter = Mat5ZArrayReader(self, byte_count).matrix_getter_factory() @@ -507,7 +507,7 @@ ''' Read in mat 5 file header ''' hdict = {} hdr = self.read_dtype(self.dtypes['file_header']) - hdict['__header__'] = hdr['description'].strip(' \t\n\000') + hdict['__header__'] = hdr['description'].item().strip(' \t\n\000') v_major = hdr['version'] >> 8 v_minor = hdr['version'] & 0xFF hdict['__version__'] = '%d.%d' % (v_major, v_minor) From scipy-svn at scipy.org Tue Aug 14 17:46:58 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 14 Aug 2007 16:46:58 -0500 (CDT) Subject: [Scipy-svn] r3238 - branches/0.5.2.x/Lib/cluster Message-ID: <20070814214658.7D7EC39C1C9@new.scipy.org> Author: jarrod.millman Date: 2007-08-14 16:46:55 -0500 (Tue, 14 Aug 2007) New Revision: 3238 Modified: branches/0.5.2.x/Lib/cluster/__init__.py Log: Add missing test definition in scipy.cluster as per revision 2941 Modified: branches/0.5.2.x/Lib/cluster/__init__.py =================================================================== --- branches/0.5.2.x/Lib/cluster/__init__.py 2007-08-14 21:33:51 UTC (rev 3237) +++ branches/0.5.2.x/Lib/cluster/__init__.py 2007-08-14 21:46:55 UTC (rev 3238) @@ -7,3 +7,5 @@ __all__ = ['vq'] import vq +from numpy.testing import NumpyTest +test = NumpyTest().test From scipy-svn at scipy.org Tue Aug 14 18:09:30 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 14 Aug 2007 17:09:30 -0500 (CDT) Subject: [Scipy-svn] r3239 - branches/0.5.2.x/Lib/odr Message-ID: <20070814220930.AF50539C299@new.scipy.org> Author: jarrod.millman Date: 2007-08-14 17:09:26 -0500 (Tue, 14 Aug 2007) New Revision: 3239 Modified: branches/0.5.2.x/Lib/odr/__init__.py branches/0.5.2.x/Lib/odr/models.py branches/0.5.2.x/Lib/odr/odrpack.py branches/0.5.2.x/Lib/odr/setup.py Log: trying to synch odr with trunk since odr is broken in 0.5.2 Modified: branches/0.5.2.x/Lib/odr/__init__.py =================================================================== --- branches/0.5.2.x/Lib/odr/__init__.py 2007-08-14 21:46:55 UTC (rev 3238) +++ branches/0.5.2.x/Lib/odr/__init__.py 2007-08-14 22:09:26 UTC (rev 3239) @@ -1,47 +1,9 @@ -""" Orthogonal Distance Regression +# +# odr - Orthogonal Distance Regression +# -Introduction -============ +from info import __doc__ -Why Orthogonal Distance Regression (ODR)? Sometimes one has measurement errors -in the explanatory variable, not just the response variable. Ordinary Least -Squares (OLS) fitting procedures treat the data for explanatory variables as -fixed. Furthermore, OLS procedures require that the response variable be an -explicit function of the explanatory variables; sometimes making the equation -explicit is unwieldy and introduces errors. ODR can handle both of these cases -with ease and can even reduce to the OLS case if necessary. - -ODRPACK is a FORTRAN-77 library for performing ODR with possibly non-linear -fitting functions. It uses a modified trust-region Levenberg-Marquardt-type -algorithm to estimate the function parameters. The fitting functions are -provided by Python functions operating on NumPy arrays. The required derivatives -may be provided by Python functions as well or may be numerically estimated. -ODRPACK can do explicit or implicit ODR fits or can do OLS. Input and output -variables may be multi-dimensional. Weights can be provided to account for -different variances of the observations (even covariances between dimensions of -the variables). - -odr provides two interfaces: a single function and a set of high-level classes -that wrap that function. Please refer to their docstrings for more information. -While the docstring of the function, odr, does not have a full explanation of -its arguments, the classes do, and the arguments with the same name usually have -the same requirements. Furthermore, it is highly suggested that one at least -skim the ODRPACK User's Guide. Know Thy Algorithm. - - -Use -=== - -See the docstrings of odr.odrpack and the functions and classes for -usage instructions. The ODRPACK User's Guide is also quite helpful. It can be -found on one of the ODRPACK's original author's website: - - http://www.boulder.nist.gov/mcsd/Staff/JRogers/odrpack.html - -Robert Kern -robert.kern at gmail.com -""" - __version__ = '0.7' __author__ = 'Robert Kern ' __date__ = '2006-09-21' @@ -59,4 +21,6 @@ __all__ = ['odr', 'odr_error', 'odr_stop', 'Data', 'RealData', 'Model', 'Output', 'ODR', 'odrpack'] +from numpy.testing import NumpyTest +test = NumpyTest().test #### EOF ####################################################################### Modified: branches/0.5.2.x/Lib/odr/models.py =================================================================== --- branches/0.5.2.x/Lib/odr/models.py 2007-08-14 21:46:55 UTC (rev 3238) +++ branches/0.5.2.x/Lib/odr/models.py 2007-08-14 22:09:26 UTC (rev 3239) @@ -1,9 +1,8 @@ """ Collection of Model instances for use with the odrpack fitting package. """ -# Scipy imports. -from scipy.sandbox.odr.odrpack import Model import numpy as np +from scipy.odr.odrpack import Model def _lin_fcn(B, x): Modified: branches/0.5.2.x/Lib/odr/odrpack.py =================================================================== --- branches/0.5.2.x/Lib/odr/odrpack.py 2007-08-14 21:46:55 UTC (rev 3238) +++ branches/0.5.2.x/Lib/odr/odrpack.py 2007-08-14 22:09:26 UTC (rev 3239) @@ -100,7 +100,7 @@ """ import numpy -from scipy.sandbox.odr import __odrpack +from scipy.odr import __odrpack odr = __odrpack.odr odr_error = __odrpack.odr_error @@ -140,7 +140,7 @@ 'Sum of squares convergence', 'Parameter convergence', 'Both sum of squares and parameter convergence', - 'Iteration limit reached')[info % 10] + 'Iteration limit reached')[info % 5] if info >= 5: # questionable results or fatal error @@ -320,7 +320,7 @@ covx and covy are arrays of covariance matrices and are converted to weights by performing a matrix inversion on each observation's covariance matrix. - E.g. we[i] = scipy.linalg.inv(covy[i]) # i in range(len(covy)) + E.g. we[i] = numpy.linalg.inv(covy[i]) # i in range(len(covy)) # if covy.shape == (n,q,q) These arguments follow the same structured argument conventions as wd and we @@ -376,15 +376,15 @@ """ Convert covariance matrix(-ices) to weights. """ - from scipy import linalg + from numpy.dual import inv if len(cov.shape) == 2: - return linalg.inv(cov) + return inv(cov) else: weights = numpy.zeros(cov.shape, float) for i in range(cov.shape[-1]): # n - weights[:,:,i] = linalg.inv(cov[:,:,i]) + weights[:,:,i] = inv(cov[:,:,i]) return weights Modified: branches/0.5.2.x/Lib/odr/setup.py =================================================================== --- branches/0.5.2.x/Lib/odr/setup.py 2007-08-14 21:46:55 UTC (rev 3238) +++ branches/0.5.2.x/Lib/odr/setup.py 2007-08-14 22:09:26 UTC (rev 3239) @@ -1,18 +1,11 @@ #!/usr/bin/env python -import os,sys,re -from distutils import dep_util -from glob import glob -import warnings +from os.path import join -from numpy.distutils.core import Extension -from numpy.distutils.misc_util import get_path, Configuration, dot_join - -from numpy.distutils.system_info import get_info,dict_append,\ - AtlasNotFoundError,LapackNotFoundError,BlasNotFoundError,\ - LapackSrcNotFoundError,BlasSrcNotFoundError - def configuration(parent_package='', top_path=None): + import warnings + from numpy.distutils.misc_util import Configuration + from numpy.distutils.system_info import get_info, BlasNotFoundError config = Configuration('odr', parent_package, top_path) libodr_files = ['d_odr.f', @@ -26,7 +19,7 @@ warnings.warn(BlasNotFoundError.__doc__) libodr_files.append('d_lpkbls.f') - libodr = [os.path.join('odrpack', x) for x in libodr_files] + libodr = [join('odrpack', x) for x in libodr_files] config.add_library('odrpack', sources=libodr) sources = ['__odrpack.c'] libraries = ['odrpack'] + blas_info.pop('libraries', []) @@ -38,6 +31,7 @@ **blas_info ) + config.add_data_dir('tests') return config if __name__ == '__main__': From scipy-svn at scipy.org Tue Aug 14 20:29:44 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 14 Aug 2007 19:29:44 -0500 (CDT) Subject: [Scipy-svn] r3240 - branches/0.5.2.x/Lib/linsolve/umfpack Message-ID: <20070815002944.CB7CA39C1B8@new.scipy.org> Author: jarrod.millman Date: 2007-08-14 19:29:42 -0500 (Tue, 14 Aug 2007) New Revision: 3240 Modified: branches/0.5.2.x/Lib/linsolve/umfpack/umfpack.i Log: resynching with trunk (updates for SWIG > 1.3.29 and fixes memory leak of type 'void *') Modified: branches/0.5.2.x/Lib/linsolve/umfpack/umfpack.i =================================================================== --- branches/0.5.2.x/Lib/linsolve/umfpack/umfpack.i 2007-08-14 22:09:26 UTC (rev 3239) +++ branches/0.5.2.x/Lib/linsolve/umfpack/umfpack.i 2007-08-15 00:29:42 UTC (rev 3240) @@ -1,3 +1,6 @@ +/* -*- C -*- */ +#ifdef SWIGPYTHON + %module _umfpack /* @@ -91,7 +94,7 @@ - 30.11.2005, c */ #define ARRAY_IN( rtype, ctype, atype ) \ -%typemap( python, in ) (ctype *array) { \ +%typemap( in ) (ctype *array) { \ PyArrayObject *obj; \ obj = helper_getCArrayObject( $input, PyArray_##atype, 1, 1 ); \ if (!obj) return NULL; \ @@ -104,7 +107,7 @@ - 30.11.2005, c */ #define CONF_IN( arSize ) \ -%typemap( python, in ) (double conf [arSize]) { \ +%typemap( in ) (double conf [arSize]) { \ PyArrayObject *obj; \ obj = helper_getCArrayObject( $input, PyArray_DOUBLE, 1, 1 ); \ if (!obj) return NULL; \ @@ -114,6 +117,7 @@ return NULL; \ } \ $1 = (double *) obj->data; \ + Py_DECREF( obj ); \ }; /*! @@ -122,12 +126,12 @@ - 02.12.2005 */ #define OPAQUE_ARGOUT( ttype ) \ -%typemap( python, in, numinputs=0 ) ttype* opaque_argout( ttype tmp ) { \ +%typemap( in, numinputs=0 ) ttype* opaque_argout( ttype tmp ) { \ $1 = &tmp; \ }; \ -%typemap( python, argout ) ttype* opaque_argout { \ +%typemap( argout ) ttype* opaque_argout { \ PyObject *obj; \ - obj = SWIG_NewPointerObj( (ttype) (*$1), $*1_descriptor, 1 ); \ + obj = SWIG_NewPointerObj( (ttype) (*$1), $*1_descriptor, 0 ); \ $result = helper_appendToTuple( $result, obj ); \ }; @@ -136,14 +140,14 @@ - 02.12.2005, c */ #define OPAQUE_ARGINOUT( ttype ) \ -%typemap( python, in ) ttype* opaque_arginout( ttype tmp ) { \ +%typemap( in ) ttype* opaque_arginout( ttype tmp ) { \ if ((SWIG_ConvertPtr( $input,(void **) &tmp, $*1_descriptor, \ SWIG_POINTER_EXCEPTION)) == -1) return NULL; \ $1 = &tmp; \ }; \ -%typemap( python, argout ) ttype* opaque_arginout { \ +%typemap( argout ) ttype* opaque_arginout { \ PyObject *obj; \ - obj = SWIG_NewPointerObj( (ttype) (*$1), $*1_descriptor, 1 ); \ + obj = SWIG_NewPointerObj( (ttype) (*$1), $*1_descriptor, 0 ); \ $result = helper_appendToTuple( $result, obj ); \ }; @@ -266,3 +270,5 @@ }; %apply int *OUTPUT { int *do_recip}; %include + +#endif From scipy-svn at scipy.org Tue Aug 14 20:52:44 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 14 Aug 2007 19:52:44 -0500 (CDT) Subject: [Scipy-svn] r3241 - branches/0.5.2.x/Lib/odr/tests Message-ID: <20070815005244.E383B39C1B8@new.scipy.org> Author: jarrod.millman Date: 2007-08-14 19:52:41 -0500 (Tue, 14 Aug 2007) New Revision: 3241 Added: branches/0.5.2.x/Lib/odr/tests/test_odr.py Removed: branches/0.5.2.x/Lib/odr/tests/test_odrpack.py Log: synching with trunk Copied: branches/0.5.2.x/Lib/odr/tests/test_odr.py (from rev 3240, branches/0.5.2.x/Lib/odr/tests/test_odrpack.py) Deleted: branches/0.5.2.x/Lib/odr/tests/test_odrpack.py =================================================================== --- branches/0.5.2.x/Lib/odr/tests/test_odrpack.py 2007-08-15 00:29:42 UTC (rev 3240) +++ branches/0.5.2.x/Lib/odr/tests/test_odrpack.py 2007-08-15 00:52:41 UTC (rev 3241) @@ -1,318 +0,0 @@ - -# Standard library imports. -import cPickle -import unittest - -# Scipy imports. -import numpy as np -from numpy import pi -from numpy.testing import assert_array_almost_equal -from scipy.sandbox.odr import Data, Model, ODR, RealData, odr_stop - - -class ODRTestCase(unittest.TestCase): - - # Explicit Example - - def explicit_fcn(self, B, x): - ret = B[0] + B[1] * np.power(np.exp(B[2]*x) - 1.0, 2) - return ret - - def explicit_fjd(self, B, x): - eBx = np.exp(B[2]*x) - ret = B[1] * 2.0 * (eBx-1.0) * B[2] * eBx - return ret - - def explicit_fjb(self, B, x): - eBx = np.exp(B[2]*x) - res = np.vstack([np.ones(x.shape[-1]), - np.power(eBx-1.0, 2), - B[1]*2.0*(eBx-1.0)*eBx*x]) - return res - - def test_explicit(self): - explicit_mod = Model( - self.explicit_fcn, - fjacb=self.explicit_fjb, - fjacd=self.explicit_fjd, - meta=dict(name='Sample Explicit Model', - ref='ODRPACK UG, pg. 39'), - ) - explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.], - [1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6, - 1213.8,1215.5,1212.]) - explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1], - ifixx=[0,0,1,1,1,1,1,1,1,1,1,0]) - explicit_odr.set_job(deriv=2) - - out = explicit_odr.run() - assert_array_almost_equal( - out.beta, - np.array([ 1.2646548050648876e+03, -5.4018409956678255e+01, - -8.7849712165253724e-02]), - ) - assert_array_almost_equal( - out.sd_beta, - np.array([ 1.0349270280543437, 1.583997785262061 , 0.0063321988657267]), - ) - assert_array_almost_equal( - out.cov_beta, - np.array([[ 4.4949592379003039e-01, -3.7421976890364739e-01, - -8.0978217468468912e-04], - [ -3.7421976890364739e-01, 1.0529686462751804e+00, - -1.9453521827942002e-03], - [ -8.0978217468468912e-04, -1.9453521827942002e-03, - 1.6827336938454476e-05]]), - ) - - - # Implicit Example - - def implicit_fcn(self, B, x): - return (B[2]*np.power(x[0]-B[0], 2) + - 2.0*B[3]*(x[0]-B[0])*(x[1]-B[1]) + - B[4]*np.power(x[1]-B[1], 2) - 1.0) - - def test_implicit(self): - implicit_mod = Model( - self.implicit_fcn, - implicit=1, - meta=dict(name='Sample Implicit Model', - ref='ODRPACK UG, pg. 49'), - ) - implicit_dat = Data([ - [0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28, - -0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44], - [-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32, - -6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]], - 1, - ) - implicit_odr = ODR(implicit_dat, implicit_mod, - beta0=[-1.0, -3.0, 0.09, 0.02, 0.08]) - - out = implicit_odr.run() - assert_array_almost_equal( - out.beta, - np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354, - 0.0162299708984738, 0.0797537982976416]), - ) - assert_array_almost_equal( - out.sd_beta, - np.array([ 0.1113840353364371, 0.1097673310686467, 0.0041060738314314, - 0.0027500347539902, 0.0034962501532468]), - ) - assert_array_almost_equal( - out.cov_beta, - np.array([[ 2.1089274602333052e+00, -1.9437686411979040e+00, - 7.0263550868344446e-02, -4.7175267373474862e-02, - 5.2515575927380355e-02], - [ -1.9437686411979040e+00, 2.0481509222414456e+00, - -6.1600515853057307e-02, 4.6268827806232933e-02, - -5.8822307501391467e-02], - [ 7.0263550868344446e-02, -6.1600515853057307e-02, - 2.8659542561579308e-03, -1.4628662260014491e-03, - 1.4528860663055824e-03], - [ -4.7175267373474862e-02, 4.6268827806232933e-02, - -1.4628662260014491e-03, 1.2855592885514335e-03, - -1.2692942951415293e-03], - [ 5.2515575927380355e-02, -5.8822307501391467e-02, - 1.4528860663055824e-03, -1.2692942951415293e-03, - 2.0778813389755596e-03]]), - ) - - - # Multi-variable Example - - def multi_fcn(self, B, x): - if (x < 0.0).any(): - raise odr_stop - theta = pi*B[3]/2. - ctheta = np.cos(theta) - stheta = np.sin(theta) - omega = np.power(2.*pi*x*np.exp(-B[2]), B[3]) - phi = np.arctan2((omega*stheta), (1.0 + omega*ctheta)) - r = (B[0] - B[1]) * np.power(np.sqrt(np.power(1.0 + omega*ctheta, 2) + - np.power(omega*stheta, 2)), -B[4]) - ret = np.vstack([B[1] + r*np.cos(B[4]*phi), - r*np.sin(B[4]*phi)]) - return ret - - def test_multi(self): - multi_mod = Model( - self.multi_fcn, - meta=dict(name='Sample Multi-Response Model', - ref='ODRPACK UG, pg. 56'), - ) - - multi_x = np.array([30.0, 50.0, 70.0, 100.0, 150.0, 200.0, 300.0, 500.0, - 700.0, 1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 10000.0, - 15000.0, 20000.0, 30000.0, 50000.0, 70000.0, 100000.0, 150000.0]) - multi_y = np.array([ - [4.22, 4.167, 4.132, 4.038, 4.019, 3.956, 3.884, 3.784, 3.713, - 3.633, 3.54, 3.433, 3.358, 3.258, 3.193, 3.128, 3.059, 2.984, - 2.934, 2.876, 2.838, 2.798, 2.759], - [0.136, 0.167, 0.188, 0.212, 0.236, 0.257, 0.276, 0.297, 0.309, - 0.311, 0.314, 0.311, 0.305, 0.289, 0.277, 0.255, 0.24, 0.218, - 0.202, 0.182, 0.168, 0.153, 0.139], - ]) - n = len(multi_x) - multi_we = np.zeros((2, 2, n), dtype=float) - multi_ifixx = np.ones(n, dtype=int) - multi_delta = np.zeros(n, dtype=float) - - multi_we[0,0,:] = 559.6 - multi_we[1,0,:] = multi_we[0,1,:] = -1634.0 - multi_we[1,1,:] = 8397.0 - - for i in range(n): - if multi_x[i] < 100.0: - multi_ifixx[i] = 0 - elif multi_x[i] <= 150.0: - pass # defaults are fine - elif multi_x[i] <= 1000.0: - multi_delta[i] = 25.0 - elif multi_x[i] <= 10000.0: - multi_delta[i] = 560.0 - elif multi_x[i] <= 100000.0: - multi_delta[i] = 9500.0 - else: - multi_delta[i] = 144000.0 - if multi_x[i] == 100.0 or multi_x[i] == 150.0: - multi_we[:,:,i] = 0.0 - - multi_dat = Data(multi_x, multi_y, wd=1e-4/np.power(multi_x, 2), - we=multi_we) - multi_odr = ODR(multi_dat, multi_mod, beta0=[4.,2.,7.,.4,.5], - delta0=multi_delta, ifixx=multi_ifixx) - multi_odr.set_job(deriv=1, del_init=1) - - out = multi_odr.run() - assert_array_almost_equal( - out.beta, - np.array([ 4.3799880305938963, 2.4333057577497703, 8.0028845899503978, - 0.5101147161764654, 0.5173902330489161]), - ) - assert_array_almost_equal( - out.sd_beta, - np.array([ 0.0130625231081944, 0.0130499785273277, 0.1167085962217757, - 0.0132642749596149, 0.0288529201353984]), - ) - assert_array_almost_equal( - out.cov_beta, - np.array([[ 0.0064918418231375, 0.0036159705923791, 0.0438637051470406, - -0.0058700836512467, 0.011281212888768 ], - [ 0.0036159705923791, 0.0064793789429006, 0.0517610978353126, - -0.0051181304940204, 0.0130726943624117], - [ 0.0438637051470406, 0.0517610978353126, 0.5182263323095322, - -0.0563083340093696, 0.1269490939468611], - [-0.0058700836512467, -0.0051181304940204, -0.0563083340093696, - 0.0066939246261263, -0.0140184391377962], - [ 0.011281212888768 , 0.0130726943624117, 0.1269490939468611, - -0.0140184391377962, 0.0316733013820852]]), - ) - - - # Pearson's Data - # K. Pearson, Philosophical Magazine, 2, 559 (1901) - - def pearson_fcn(self, B, x): - return B[0] + B[1]*x - - def test_pearson(self): - p_x = np.array([0.,.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4]) - p_y = np.array([5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5]) - p_sx = np.array([.03,.03,.04,.035,.07,.11,.13,.22,.74,1.]) - p_sy = np.array([1.,.74,.5,.35,.22,.22,.12,.12,.1,.04]) - - p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy) - - # Reverse the data to test invariance of results - pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx) - - p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit')) - - p_odr = ODR(p_dat, p_mod, beta0=[1.,1.]) - pr_odr = ODR(pr_dat, p_mod, beta0=[1.,1.]) - - out = p_odr.run() - assert_array_almost_equal( - out.beta, - np.array([ 5.4767400299231674, -0.4796082367610305]), - ) - assert_array_almost_equal( - out.sd_beta, - np.array([ 0.3590121690702467, 0.0706291186037444]), - ) - assert_array_almost_equal( - out.cov_beta, - np.array([[ 0.0854275622946333, -0.0161807025443155], - [-0.0161807025443155, 0.003306337993922 ]]), - ) - - rout = pr_odr.run() - assert_array_almost_equal( - rout.beta, - np.array([ 11.4192022410781231, -2.0850374506165474]), - ) - assert_array_almost_equal( - rout.sd_beta, - np.array([ 0.9820231665657161, 0.3070515616198911]), - ) - assert_array_almost_equal( - rout.cov_beta, - np.array([[ 0.6391799462548782, -0.1955657291119177], - [-0.1955657291119177, 0.0624888159223392]]), - ) - - # Lorentz Peak - # The data is taken from one of the undergraduate physics labs I performed. - - def lorentz(self, beta, x): - return (beta[0]*beta[1]*beta[2] / np.sqrt(np.power(x*x - - beta[2]*beta[2], 2.0) + np.power(beta[1]*x, 2.0))) - - def test_lorentz(self): - l_sy = np.array([.29]*18) - l_sx = np.array([.000972971,.000948268,.000707632,.000706679, - .000706074, .000703918,.000698955,.000456856, - .000455207,.000662717,.000654619,.000652694, - .000000859202,.00106589,.00106378,.00125483, .00140818,.00241839]) - - l_dat = RealData( - [3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608, - 3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982, - 3.6562, 3.62498, 3.55525, 3.41886], - [652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122, - 957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5], - sx=l_sx, - sy=l_sy, - ) - l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak')) - l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8)) - - out = l_odr.run() - assert_array_almost_equal( - out.beta, - np.array([ 1.4306780846149925e+03, 1.3390509034538309e-01, - 3.7798193600109009e+00]), - ) - assert_array_almost_equal( - out.sd_beta, - np.array([ 7.3621186811330963e-01, 3.5068899941471650e-04, - 2.4451209281408992e-04]), - ) - assert_array_almost_equal( - out.cov_beta, - np.array([[ 2.4714409064597873e-01, -6.9067261911110836e-05, - -3.1236953270424990e-05], - [ -6.9067261911110836e-05, 5.6077531517333009e-08, - 3.6133261832722601e-08], - [ -3.1236953270424990e-05, 3.6133261832722601e-08, - 2.7261220025171730e-08]]), - ) - - -if __name__ == '__main__': - unittest.main() - -#### EOF ####################################################################### From scipy-svn at scipy.org Tue Aug 14 20:53:16 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 14 Aug 2007 19:53:16 -0500 (CDT) Subject: [Scipy-svn] r3242 - branches/0.5.2.x/Lib/odr/tests Message-ID: <20070815005316.BD80139C29B@new.scipy.org> Author: jarrod.millman Date: 2007-08-14 19:53:14 -0500 (Tue, 14 Aug 2007) New Revision: 3242 Modified: branches/0.5.2.x/Lib/odr/tests/test_odr.py Log: more synching with trunk Modified: branches/0.5.2.x/Lib/odr/tests/test_odr.py =================================================================== --- branches/0.5.2.x/Lib/odr/tests/test_odr.py 2007-08-15 00:52:41 UTC (rev 3241) +++ branches/0.5.2.x/Lib/odr/tests/test_odr.py 2007-08-15 00:53:14 UTC (rev 3242) @@ -1,16 +1,14 @@ - # Standard library imports. import cPickle -import unittest # Scipy imports. import numpy as np from numpy import pi -from numpy.testing import assert_array_almost_equal -from scipy.sandbox.odr import Data, Model, ODR, RealData, odr_stop +from numpy.testing import NumpyTest, NumpyTestCase, assert_array_almost_equal +from scipy.odr import Data, Model, ODR, RealData, odr_stop -class ODRTestCase(unittest.TestCase): +class test_odr(NumpyTestCase): # Explicit Example @@ -312,7 +310,7 @@ ) -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + NumpyTest().run() #### EOF ####################################################################### From scipy-svn at scipy.org Wed Aug 15 01:44:26 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 15 Aug 2007 00:44:26 -0500 (CDT) Subject: [Scipy-svn] r3243 - in trunk/Lib/sandbox/maskedarray: . tests Message-ID: <20070815054426.94CFA39C2AE@new.scipy.org> Author: pierregm Date: 2007-08-15 00:44:07 -0500 (Wed, 15 Aug 2007) New Revision: 3243 Modified: trunk/Lib/sandbox/maskedarray/core.py trunk/Lib/sandbox/maskedarray/mrecords.py trunk/Lib/sandbox/maskedarray/tests/test_mrecords.py Log: mrecords : * fixed a pb w/ numpy.void * returns 'masked' when accessing a masked attribute from a unique record. core : * modified .tolist() so that fill_value=None now outputs None for masked values Modified: trunk/Lib/sandbox/maskedarray/core.py =================================================================== --- trunk/Lib/sandbox/maskedarray/core.py 2007-08-15 00:53:14 UTC (rev 3242) +++ trunk/Lib/sandbox/maskedarray/core.py 2007-08-15 05:44:07 UTC (rev 3243) @@ -1,2660 +0,0 @@ -# pylint: disable-msg=E1002 -"""MA: a facility for dealing with missing observations -MA is generally used as a numpy.array look-alike. -by Paul F. Dubois. - -Copyright 1999, 2000, 2001 Regents of the University of California. -Released for unlimited redistribution. -Adapted for numpy_core 2005 by Travis Oliphant and -(mainly) Paul Dubois. - -Subclassing of the base ndarray 2006 by Pierre Gerard-Marchant. -pgmdevlist_AT_gmail_DOT_com -Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) - -:author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu -:version: $Id$ -""" -__author__ = "Pierre GF Gerard-Marchant ($Author$)" -__version__ = '1.0' -__revision__ = "$Revision$" -__date__ = '$Date$' - -__all__ = ['MAError', 'MaskType', 'MaskedArray', - 'bool_', 'complex_', 'float_', 'int_', 'object_', - 'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue', - 'amax', 'amin', 'anom', 'anomalies', 'any', 'arange', - 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', - 'arctanh', 'argmax', 'argmin', 'argsort', 'around', - 'array', 'asarray', - 'bitwise_and', 'bitwise_or', 'bitwise_xor', - 'ceil', 'choose', 'compressed', 'concatenate', 'conjugate', - 'cos', 'cosh', 'count', - 'diagonal', 'divide', 'dump', 'dumps', - 'empty', 'empty_like', 'equal', 'exp', - 'fabs', 'fmod', 'filled', 'floor', 'floor_divide', - 'getmask', 'getmaskarray', 'greater', 'greater_equal', 'hypot', - 'ids', 'inner', 'innerproduct', - 'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', - 'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log10', - 'logical_and', 'logical_not', 'logical_or', 'logical_xor', - 'make_mask', 'make_mask_none', 'mask_or', 'masked', - 'masked_array', 'masked_equal', 'masked_greater', - 'masked_greater_equal', 'masked_inside', 'masked_less', - 'masked_less_equal', 'masked_not_equal', 'masked_object', - 'masked_outside', 'masked_print_option', 'masked_singleton', - 'masked_values', 'masked_where', 'max', 'maximum', 'mean', 'min', - 'minimum', 'multiply', - 'negative', 'nomask', 'nonzero', 'not_equal', - 'ones', 'outer', 'outerproduct', - 'power', 'product', 'ptp', 'put', 'putmask', - 'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize', - 'right_shift', 'round_', - 'shape', 'sin', 'sinh', 'size', 'sometrue', 'sort', 'sqrt', 'std', - 'subtract', 'sum', 'swapaxes', - 'take', 'tan', 'tanh', 'transpose', 'true_divide', - 'var', 'where', - 'zeros'] - -import sys -import types -import cPickle -# -import numpy -from numpy import bool_, complex_, float_, int_, object_, str_ - -import numpy.core.umath as umath -import numpy.core.fromnumeric as fromnumeric -import numpy.core.numeric as numeric -import numpy.core.numerictypes as ntypes -from numpy import bool_, dtype, typecodes, amax, amin, ndarray -from numpy import expand_dims as n_expand_dims -import warnings - - -MaskType = bool_ -nomask = MaskType(0) - -divide_tolerance = 1.e-35 -numpy.seterr(all='ignore') - -# TODO: There's still a problem with N.add.reduce not working... -# TODO: ...neither does N.add.accumulate - -#####-------------------------------------------------------------------------- -#---- --- Exceptions --- -#####-------------------------------------------------------------------------- -class MAError(Exception): - "Class for MA related errors." - def __init__ (self, args=None): - "Creates an exception." - Exception.__init__(self,args) - self.args = args - def __str__(self): - "Calculates the string representation." - return str(self.args) - __repr__ = __str__ - -#####-------------------------------------------------------------------------- -#---- --- Filling options --- -#####-------------------------------------------------------------------------- -# b: boolean - c: complex - f: floats - i: integer - O: object - S: string -default_filler = {'b': True, - 'c' : 1.e20 + 0.0j, - 'f' : 1.e20, - 'i' : 999999, - 'O' : '?', - 'S' : 'N/A', - 'u' : 999999, - 'V' : '???', - } -max_filler = ntypes._minvals -max_filler.update([(k,-numeric.inf) for k in [numpy.float32, numpy.float64]]) -min_filler = ntypes._maxvals -min_filler.update([(k,numeric.inf) for k in [numpy.float32, numpy.float64]]) -if 'float128' in ntypes.typeDict: - max_filler.update([(numpy.float128,-numeric.inf)]) - min_filler.update([(numpy.float128, numeric.inf)]) - - -def default_fill_value(obj): - "Calculates the default fill value for an object `obj`." - if hasattr(obj,'dtype'): - defval = default_filler[obj.dtype.kind] - elif isinstance(obj, numeric.dtype): - defval = default_filler[obj.kind] - elif isinstance(obj, float): - defval = default_filler['f'] - elif isinstance(obj, int) or isinstance(obj, long): - defval = default_filler['i'] - elif isinstance(obj, str): - defval = default_filler['S'] - elif isinstance(obj, complex): - defval = default_filler['c'] - else: - defval = default_filler['O'] - return defval - -def minimum_fill_value(obj): - "Calculates the default fill value suitable for taking the minimum of `obj`." - if hasattr(obj, 'dtype'): - objtype = obj.dtype - filler = min_filler[objtype] - if filler is None: - raise TypeError, 'Unsuitable type for calculating minimum.' - return filler - elif isinstance(obj, float): - return min_filler[ntypes.typeDict['float_']] - elif isinstance(obj, int): - return min_filler[ntypes.typeDict['int_']] - elif isinstance(obj, long): - return min_filler[ntypes.typeDict['uint']] - elif isinstance(obj, numeric.dtype): - return min_filler[obj] - else: - raise TypeError, 'Unsuitable type for calculating minimum.' - -def maximum_fill_value(obj): - "Calculates the default fill value suitable for taking the maximum of `obj`." - if hasattr(obj, 'dtype'): - objtype = obj.dtype - filler = max_filler[objtype] - if filler is None: - raise TypeError, 'Unsuitable type for calculating minimum.' - return filler - elif isinstance(obj, float): - return max_filler[ntypes.typeDict['float_']] - elif isinstance(obj, int): - return max_filler[ntypes.typeDict['int_']] - elif isinstance(obj, long): - return max_filler[ntypes.typeDict['uint']] - elif isinstance(obj, numeric.dtype): - return max_filler[obj] - else: - raise TypeError, 'Unsuitable type for calculating minimum.' - -def set_fill_value(a, fill_value): - "Sets the fill value of `a` if it is a masked array." - if isinstance(a, MaskedArray): - a.set_fill_value(fill_value) - -def get_fill_value(a): - """Returns the fill value of `a`, if any. - Otherwise, returns the default fill value for that type. - """ - if isinstance(a, MaskedArray): - result = a.fill_value - else: - result = default_fill_value(a) - return result - -def common_fill_value(a, b): - "Returns the common fill_value of `a` and `b`, if any, or `None`." - t1 = get_fill_value(a) - t2 = get_fill_value(b) - if t1 == t2: - return t1 - return None - -#................................................ -def filled(a, value = None): - """Returns `a` as an array with masked data replaced by `value`. -If `value` is `None` or the special element `masked`, `get_fill_value(a)` -is used instead. - -If `a` is already a contiguous numeric array, `a` itself is returned. - -`filled(a)` can be used to be sure that the result is numeric when passing -an object a to other software ignorant of MA, in particular to numpy itself. - """ - if hasattr(a, 'filled'): - return a.filled(value) - elif isinstance(a, ndarray): # and a.flags['CONTIGUOUS']: - return a - elif isinstance(a, dict): - return numeric.array(a, 'O') - else: - return numeric.array(a) - -def get_masked_subclass(*arrays): - """Returns the youngest subclass of MaskedArray from a list of arrays, - or MaskedArray. In case of siblings, the first takes over.""" - if len(arrays) == 1: - arr = arrays[0] - if isinstance(arr, MaskedArray): - rcls = type(arr) - else: - rcls = MaskedArray - else: - arrcls = [type(a) for a in arrays] - rcls = arrcls[0] - if not issubclass(rcls, MaskedArray): - rcls = MaskedArray - for cls in arrcls[1:]: - if issubclass(cls, rcls): - rcls = cls - return rcls - -#####-------------------------------------------------------------------------- -#---- --- Ufuncs --- -#####-------------------------------------------------------------------------- -ufunc_domain = {} -ufunc_fills = {} - -class domain_check_interval: - """Defines a valid interval, -so that `domain_check_interval(a,b)(x) = true` where `x < a` or `x > b`.""" - def __init__(self, a, b): - "domain_check_interval(a,b)(x) = true where x < a or y > b" - if (a > b): - (a, b) = (b, a) - self.a = a - self.b = b - - def __call__ (self, x): - "Execute the call behavior." - return umath.logical_or(umath.greater (x, self.b), - umath.less(x, self.a)) -#............................ -class domain_tan: - """Defines a valid interval for the `tan` function, -so that `domain_tan(eps) = True where `abs(cos(x)) < eps`""" - def __init__(self, eps): - "domain_tan(eps) = true where abs(cos(x)) < eps)" - self.eps = eps - def __call__ (self, x): - "Execute the call behavior." - return umath.less(umath.absolute(umath.cos(x)), self.eps) -#............................ -class domain_safe_divide: - """defines a domain for safe division.""" - def __init__ (self, tolerance=divide_tolerance): - self.tolerance = tolerance - def __call__ (self, a, b): - return umath.absolute(a) * self.tolerance >= umath.absolute(b) -#............................ -class domain_greater: - "domain_greater(v)(x) = true where x <= v" - def __init__(self, critical_value): - "domain_greater(v)(x) = true where x <= v" - self.critical_value = critical_value - - def __call__ (self, x): - "Execute the call behavior." - return umath.less_equal(x, self.critical_value) -#............................ -class domain_greater_equal: - "domain_greater_equal(v)(x) = true where x < v" - def __init__(self, critical_value): - "domain_greater_equal(v)(x) = true where x < v" - self.critical_value = critical_value - - def __call__ (self, x): - "Execute the call behavior." - return umath.less(x, self.critical_value) -#.............................................................................. -class masked_unary_operation: - """Defines masked version of unary operations, -where invalid values are pre-masked. - -:IVariables: - - `f` : function. - - `fill` : Default filling value *[0]*. - - `domain` : Default domain *[None]*. - """ - def __init__ (self, mufunc, fill=0, domain=None): - """ masked_unary_operation(aufunc, fill=0, domain=None) - aufunc(fill) must be defined - self(x) returns aufunc(x) - with masked values where domain(x) is true or getmask(x) is true. - """ - self.f = mufunc - self.fill = fill - self.domain = domain - self.__doc__ = getattr(mufunc, "__doc__", str(mufunc)) - self.__name__ = getattr(mufunc, "__name__", str(mufunc)) - ufunc_domain[mufunc] = domain - ufunc_fills[mufunc] = fill - # - def __call__ (self, a, *args, **kwargs): - "Execute the call behavior." -# numeric tries to return scalars rather than arrays when given scalars. - m = getmask(a) - d1 = filled(a, self.fill) - if self.domain is not None: - m = mask_or(m, numeric.asarray(self.domain(d1))) - # Take care of the masked singletong first ... - if m.ndim == 0 and m: - return masked - # Get the result.... - if isinstance(a, MaskedArray): - result = self.f(d1, *args, **kwargs).view(type(a)) - else: - result = self.f(d1, *args, **kwargs).view(MaskedArray) - # Fix the mask if we don't have a scalar - if result.ndim > 0: - result._mask = m - return result - # - def __str__ (self): - return "Masked version of %s. [Invalid values are masked]" % str(self.f) -#.............................................................................. -class masked_binary_operation: - """Defines masked version of binary operations, -where invalid values are pre-masked. - -:IVariables: - - `f` : function. - - `fillx` : Default filling value for first array*[0]*. - - `filly` : Default filling value for second array*[0]*. - - `domain` : Default domain *[None]*. - """ - def __init__ (self, mbfunc, fillx=0, filly=0): - """abfunc(fillx, filly) must be defined. - abfunc(x, filly) = x for all x to enable reduce. - """ - self.f = mbfunc - self.fillx = fillx - self.filly = filly - self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc)) - self.__name__ = getattr(mbfunc, "__name__", str(mbfunc)) - ufunc_domain[mbfunc] = None - ufunc_fills[mbfunc] = (fillx, filly) - # - def __call__ (self, a, b, *args, **kwargs): - "Execute the call behavior." - m = mask_or(getmask(a), getmask(b)) - if (not m.ndim) and m: - return masked - d1 = filled(a, self.fillx) - d2 = filled(b, self.filly) -# CHECK : Do we really need to fill the arguments ? Pro'ly not -# result = self.f(a, b, *args, **kwargs).view(get_masked_subclass(a,b)) - result = self.f(d1, d2, *args, **kwargs).view(get_masked_subclass(a,b)) - if result.ndim > 0: - result._mask = m - return result - # - def reduce (self, target, axis=0, dtype=None): - """Reduces `target` along the given `axis`.""" - if isinstance(target, MaskedArray): - tclass = type(target) - else: - tclass = MaskedArray - m = getmask(target) - t = filled(target, self.filly) - if t.shape == (): - t = t.reshape(1) - if m is not nomask: - m = make_mask(m, copy=1) - m.shape = (1,) - if m is nomask: - return self.f.reduce(t, axis).view(tclass) - t = t.view(tclass) - t._mask = m - # XXX: "or t.dtype" below is a workaround for what appears - # XXX: to be a bug in reduce. - tr = self.f.reduce(filled(t, self.filly), axis, dtype=dtype or t.dtype) - mr = umath.logical_and.reduce(m, axis) - tr = tr.view(tclass) - if mr.ndim > 0: - tr._mask = mr - return tr - elif mr: - return masked - return tr - - def outer (self, a, b): - "Returns the function applied to the outer product of a and b." - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - m = nomask - else: - ma = getmaskarray(a) - mb = getmaskarray(b) - m = umath.logical_or.outer(ma, mb) - if (not m.ndim) and m: - return masked - rcls = get_masked_subclass(a,b) - d = self.f.outer(filled(a, self.fillx), filled(b, self.filly)).view(rcls) - if d.ndim > 0: - d._mask = m - return d - - def accumulate (self, target, axis=0): - """Accumulates `target` along `axis` after filling with y fill value.""" - if isinstance(target, MaskedArray): - tclass = type(target) - else: - tclass = masked_array - t = filled(target, self.filly) - return self.f.accumulate(t, axis).view(tclass) - - def __str__ (self): - return "Masked version of " + str(self.f) -#.............................................................................. -class domained_binary_operation: - """Defines binary operations that have a domain, like divide. - -These are complicated so they are a separate class. -They have no reduce, outer or accumulate. - -:IVariables: - - `f` : function. - - `fillx` : Default filling value for first array*[0]*. - - `filly` : Default filling value for second array*[0]*. - - `domain` : Default domain *[None]*. - """ - def __init__ (self, dbfunc, domain, fillx=0, filly=0): - """abfunc(fillx, filly) must be defined. - abfunc(x, filly) = x for all x to enable reduce. - """ - self.f = dbfunc - self.domain = domain - self.fillx = fillx - self.filly = filly - self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc)) - self.__name__ = getattr(dbfunc, "__name__", str(dbfunc)) - ufunc_domain[dbfunc] = domain - ufunc_fills[dbfunc] = (fillx, filly) - - def __call__(self, a, b): - "Execute the call behavior." - ma = getmask(a) - mb = getmask(b) - d1 = filled(a, self.fillx) - d2 = filled(b, self.filly) - t = numeric.asarray(self.domain(d1, d2)) - - if fromnumeric.sometrue(t, None): - d2 = numeric.where(t, self.filly, d2) - mb = mask_or(mb, t) - m = mask_or(ma, mb) - if (not m.ndim) and m: - return masked - result = self.f(d1, d2).view(get_masked_subclass(a,b)) - if result.ndim > 0: - result._mask = m - return result - - def __str__ (self): - return "Masked version of " + str(self.f) - -#.............................................................................. -# Unary ufuncs -exp = masked_unary_operation(umath.exp) -conjugate = masked_unary_operation(umath.conjugate) -sin = masked_unary_operation(umath.sin) -cos = masked_unary_operation(umath.cos) -tan = masked_unary_operation(umath.tan) -arctan = masked_unary_operation(umath.arctan) -arcsinh = masked_unary_operation(umath.arcsinh) -sinh = masked_unary_operation(umath.sinh) -cosh = masked_unary_operation(umath.cosh) -tanh = masked_unary_operation(umath.tanh) -abs = absolute = masked_unary_operation(umath.absolute) -fabs = masked_unary_operation(umath.fabs) -negative = masked_unary_operation(umath.negative) -floor = masked_unary_operation(umath.floor) -ceil = masked_unary_operation(umath.ceil) -around = masked_unary_operation(fromnumeric.round_) -logical_not = masked_unary_operation(umath.logical_not) -# Domained unary ufuncs -sqrt = masked_unary_operation(umath.sqrt, 0.0, domain_greater_equal(0.0)) -log = masked_unary_operation(umath.log, 1.0, domain_greater(0.0)) -log10 = masked_unary_operation(umath.log10, 1.0, domain_greater(0.0)) -tan = masked_unary_operation(umath.tan, 0.0, domain_tan(1.e-35)) -arcsin = masked_unary_operation(umath.arcsin, 0.0, - domain_check_interval(-1.0, 1.0)) -arccos = masked_unary_operation(umath.arccos, 0.0, - domain_check_interval(-1.0, 1.0)) -arccosh = masked_unary_operation(umath.arccosh, 1.0, domain_greater_equal(1.0)) -arctanh = masked_unary_operation(umath.arctanh, 0.0, - domain_check_interval(-1.0+1e-15, 1.0-1e-15)) -# Binary ufuncs -add = masked_binary_operation(umath.add) -subtract = masked_binary_operation(umath.subtract) -multiply = masked_binary_operation(umath.multiply, 1, 1) -arctan2 = masked_binary_operation(umath.arctan2, 0.0, 1.0) -equal = masked_binary_operation(umath.equal) -equal.reduce = None -not_equal = masked_binary_operation(umath.not_equal) -not_equal.reduce = None -less_equal = masked_binary_operation(umath.less_equal) -less_equal.reduce = None -greater_equal = masked_binary_operation(umath.greater_equal) -greater_equal.reduce = None -less = masked_binary_operation(umath.less) -less.reduce = None -greater = masked_binary_operation(umath.greater) -greater.reduce = None -logical_and = masked_binary_operation(umath.logical_and) -alltrue = masked_binary_operation(umath.logical_and, 1, 1).reduce -logical_or = masked_binary_operation(umath.logical_or) -sometrue = logical_or.reduce -logical_xor = masked_binary_operation(umath.logical_xor) -bitwise_and = masked_binary_operation(umath.bitwise_and) -bitwise_or = masked_binary_operation(umath.bitwise_or) -bitwise_xor = masked_binary_operation(umath.bitwise_xor) -hypot = masked_binary_operation(umath.hypot) -# Domained binary ufuncs -divide = domained_binary_operation(umath.divide, domain_safe_divide(), 0, 1) -true_divide = domained_binary_operation(umath.true_divide, - domain_safe_divide(), 0, 1) -floor_divide = domained_binary_operation(umath.floor_divide, - domain_safe_divide(), 0, 1) -remainder = domained_binary_operation(umath.remainder, - domain_safe_divide(), 0, 1) -fmod = domained_binary_operation(umath.fmod, domain_safe_divide(), 0, 1) - - -#####-------------------------------------------------------------------------- -#---- --- Mask creation functions --- -#####-------------------------------------------------------------------------- -def getmask(a): - """Returns the mask of `a`, if any, or `nomask`. -Returns `nomask` if `a` is not a masked array. -To get an array for sure use getmaskarray.""" - if hasattr(a, "_mask"): - return a._mask - else: - return nomask - -def getmaskarray(a): - """Returns the mask of `a`, if any. -Otherwise, returns an array of `False`, with the same shape as `a`. - """ - m = getmask(a) - if m is nomask: - return make_mask_none(fromnumeric.shape(a)) - else: - return m - -def is_mask(m): - """Returns `True` if `m` is a legal mask. -Does not check contents, only type. - """ - try: - return m.dtype.type is MaskType - except AttributeError: - return False -# -def make_mask(m, copy=False, small_mask=True, flag=None): - """make_mask(m, copy=0, small_mask=0) -Returns `m` as a mask, creating a copy if necessary or requested. -The function can accept any sequence of integers or `nomask`. -Does not check that contents must be 0s and 1s. -If `small_mask=True`, returns `nomask` if `m` contains no true elements. - -:Parameters: - - `m` (ndarray) : Mask. - - `copy` (boolean, *[False]*) : Returns a copy of `m` if true. - - `small_mask` (boolean, *[False]*): Flattens mask to `nomask` if `m` is all false. - """ - if flag is not None: - warnings.warn("The flag 'flag' is now called 'small_mask'!", - DeprecationWarning) - small_mask = flag - if m is nomask: - return nomask - elif isinstance(m, ndarray): - m = filled(m, True) - if m.dtype.type is MaskType: - if copy: - result = numeric.array(m, dtype=MaskType, copy=copy) - else: - result = m - else: - result = numeric.array(m, dtype=MaskType) - else: - result = numeric.array(filled(m, True), dtype=MaskType) - # Bas les masques ! - if small_mask and not result.any(): - return nomask - else: - return result - -def make_mask_none(s): - "Returns a mask of shape `s`, filled with `False`." - result = numeric.zeros(s, dtype=MaskType) - return result - -def mask_or (m1, m2, copy=False, small_mask=True): - """Returns the combination of two masks `m1` and `m2`. -The masks are combined with the `logical_or` operator, treating `nomask` as false. -The result may equal m1 or m2 if the other is nomask. - -:Parameters: - - `m` (ndarray) : Mask. - - `copy` (boolean, *[False]*) : Returns a copy of `m` if true. - - `small_mask` (boolean, *[False]*): Flattens mask to `nomask` if `m` is all false. - """ - if m1 is nomask: - return make_mask(m2, copy=copy, small_mask=small_mask) - if m2 is nomask: - return make_mask(m1, copy=copy, small_mask=small_mask) - if m1 is m2 and is_mask(m1): - return m1 - return make_mask(umath.logical_or(m1, m2), copy=copy, small_mask=small_mask) - -#####-------------------------------------------------------------------------- -#--- --- Masking functions --- -#####-------------------------------------------------------------------------- -def masked_where(condition, a, copy=True): - """Returns `x` as an array masked where `condition` is true. -Masked values of `x` or `condition` are kept. - -:Parameters: - - `condition` (ndarray) : Masking condition. - - `x` (ndarray) : Array to mask. - - `copy` (boolean, *[False]*) : Returns a copy of `m` if true. - """ - cond = filled(condition,1) - a = numeric.array(a, copy=copy, subok=True) - if hasattr(a, '_mask'): - cond = mask_or(cond, a._mask) - cls = type(a) - else: - cls = MaskedArray - result = a.view(cls) - result._mask = cond - return result - -def masked_greater(x, value, copy=1): - "Shortcut to `masked_where`, with ``condition = (x > value)``." - return masked_where(greater(x, value), x, copy=copy) - -def masked_greater_equal(x, value, copy=1): - "Shortcut to `masked_where`, with ``condition = (x >= value)``." - return masked_where(greater_equal(x, value), x, copy=copy) - -def masked_less(x, value, copy=True): - "Shortcut to `masked_where`, with ``condition = (x < value)``." - return masked_where(less(x, value), x, copy=copy) - -def masked_less_equal(x, value, copy=True): - "Shortcut to `masked_where`, with ``condition = (x <= value)``." - return masked_where(less_equal(x, value), x, copy=copy) - -def masked_not_equal(x, value, copy=True): - "Shortcut to `masked_where`, with ``condition = (x != value)``." - return masked_where((x != value), x, copy=copy) - -# -def masked_equal(x, value, copy=True): - """Shortcut to `masked_where`, with ``condition = (x == value)``. -For floating point, consider `masked_values(x, value)` instead. - """ - return masked_where((x == value), x, copy=copy) -# d = filled(x, 0) -# c = umath.equal(d, value) -# m = mask_or(c, getmask(x)) -# return array(d, mask=m, copy=copy) - -def masked_inside(x, v1, v2, copy=True): - """Shortcut to `masked_where`, where `condition` is True for x inside -the interval `[v1,v2]` ``(v1 <= x <= v2)``. -The boundaries `v1` and `v2` can be given in either order. - """ - if v2 < v1: - (v1, v2) = (v2, v1) - xf = filled(x) - condition = (xf >= v1) & (xf <= v2) - return masked_where(condition, x, copy=copy) - -def masked_outside(x, v1, v2, copy=True): - """Shortcut to `masked_where`, where `condition` is True for x outside -the interval `[v1,v2]` ``(x < v1)|(x > v2)``. -The boundaries `v1` and `v2` can be given in either order. - """ - if v2 < v1: - (v1, v2) = (v2, v1) - xf = filled(x) - condition = (xf < v1) | (xf > v2) - return masked_where(condition, x, copy=copy) - -# -def masked_object(x, value, copy=True): - """Masks the array `x` where the data are exactly equal to `value`. -This function is suitable only for `object` arrays: for floating point, -please use `masked_values` instead. -The mask is set to `nomask` if posible. - -:parameter copy (Boolean, *[True]*): Returns a copy of `x` if true. """ - if isMaskedArray(x): - condition = umath.equal(x._data, value) - mask = x._mask - else: - condition = umath.equal(fromnumeric.asarray(x), value) - mask = nomask - mask = mask_or(mask, make_mask(condition, small_mask=True)) - return masked_array(x, mask=mask, copy=copy, fill_value=value) - -def masked_values(x, value, rtol=1.e-5, atol=1.e-8, copy=True): - """Masks the array `x` where the data are approximately equal to `value` -(that is, ``abs(x - value) <= atol+rtol*abs(value)``). -Suitable only for floating points. For integers, please use `masked_equal`. -The mask is set to `nomask` if posible. - -:Parameters: - - `rtol` (Float, *[1e-5]*): Tolerance parameter. - - `atol` (Float, *[1e-8]*): Tolerance parameter. - - `copy` (boolean, *[False]*) : Returns a copy of `x` if True. - """ - abs = umath.absolute - xnew = filled(x, value) - if issubclass(xnew.dtype.type, numeric.floating): - condition = umath.less_equal(abs(xnew-value), atol+rtol*abs(value)) - try: - mask = x._mask - except AttributeError: - mask = nomask - else: - condition = umath.equal(xnew, value) - mask = nomask - mask = mask_or(mask, make_mask(condition, small_mask=True)) - return masked_array(xnew, mask=mask, copy=copy, fill_value=value) - -#####-------------------------------------------------------------------------- -#---- --- Printing options --- -#####-------------------------------------------------------------------------- -class _MaskedPrintOption: - """Handles the string used to represent missing data in a masked array.""" - def __init__ (self, display): - "Creates the masked_print_option object." - self._display = display - self._enabled = True - - def display(self): - "Displays the string to print for masked values." - return self._display - - def set_display (self, s): - "Sets the string to print for masked values." - self._display = s - - def enabled(self): - "Is the use of the display value enabled?" - return self._enabled - - def enable(self, small_mask=1): - "Set the enabling small_mask to `small_mask`." - self._enabled = small_mask - - def __str__ (self): - return str(self._display) - - __repr__ = __str__ - -#if you single index into a masked location you get this object. -masked_print_option = _MaskedPrintOption('--') - -#####-------------------------------------------------------------------------- -#---- --- MaskedArray class --- -#####-------------------------------------------------------------------------- -##def _getoptions(a_out, a_in): -## "Copies standards options of a_in to a_out." -## for att in ['] -#class _mathmethod(object): -# """Defines a wrapper for arithmetic methods. -#Instead of directly calling a ufunc, the corresponding method of the `array._data` -#object is called instead. -# """ -# def __init__ (self, methodname, fill_self=0, fill_other=0, domain=None): -# """ -#:Parameters: -# - `methodname` (String) : Method name. -# - `fill_self` (Float *[0]*) : Fill value for the instance. -# - `fill_other` (Float *[0]*) : Fill value for the target. -# - `domain` (Domain object *[None]*) : Domain of non-validity. -# """ -# self.methodname = methodname -# self.fill_self = fill_self -# self.fill_other = fill_other -# self.domain = domain -# self.obj = None -# self.__doc__ = self.getdoc() -# # -# def getdoc(self): -# "Returns the doc of the function (from the doc of the method)." -# try: -# return getattr(MaskedArray, self.methodname).__doc__ -# except: -# return getattr(ndarray, self.methodname).__doc__ -# # -# def __get__(self, obj, objtype=None): -# self.obj = obj -# return self -# # -# def __call__ (self, other, *args): -# "Execute the call behavior." -# instance = self.obj -# m_self = instance._mask -# m_other = getmask(other) -# base = instance.filled(self.fill_self) -# target = filled(other, self.fill_other) -# if self.domain is not None: -# # We need to force the domain to a ndarray only. -# if self.fill_other > self.fill_self: -# domain = self.domain(base, target) -# else: -# domain = self.domain(target, base) -# if domain.any(): -# #If `other` is a subclass of ndarray, `filled` must have the -# # same subclass, else we'll lose some info. -# #The easiest then is to fill `target` instead of creating -# # a pure ndarray. -# #Oh, and we better make a copy! -# if isinstance(other, ndarray): -# # We don't want to modify other: let's copy target, then -# target = target.copy() -# target[fromnumeric.asarray(domain)] = self.fill_other -# else: -# target = numeric.where(fromnumeric.asarray(domain), -# self.fill_other, target) -# m_other = mask_or(m_other, domain) -# m = mask_or(m_self, m_other) -# method = getattr(base, self.methodname) -# result = method(target, *args).view(type(instance)) -# try: -# result._mask = m -# except AttributeError: -# if m: -# result = masked -# return result -#............................................................................... -class _arraymethod(object): - """Defines a wrapper for basic array methods. -Upon call, returns a masked array, where the new `_data` array is the output -of the corresponding method called on the original `_data`. - -If `onmask` is True, the new mask is the output of the method calld on the initial mask. -If `onmask` is False, the new mask is just a reference to the initial mask. - -:Parameters: - `funcname` : String - Name of the function to apply on data. - `onmask` : Boolean *[True]* - Whether the mask must be processed also (True) or left alone (False). - """ - def __init__(self, funcname, onmask=True): - self._name = funcname - self._onmask = onmask - self.obj = None - self.__doc__ = self.getdoc() - # - def getdoc(self): - "Returns the doc of the function (from the doc of the method)." - methdoc = getattr(ndarray, self._name, None) - methdoc = getattr(numpy, self._name, methdoc) -# methdoc = getattr(MaskedArray, self._name, methdoc) - if methdoc is not None: - return methdoc.__doc__ -# try: -# return getattr(MaskedArray, self._name).__doc__ -# except: -# try: -# return getattr(numpy, self._name).__doc__ -# except: -# return getattr(ndarray, self._name).__doc - # - def __get__(self, obj, objtype=None): - self.obj = obj - return self - # - def __call__(self, *args, **params): - methodname = self._name - data = self.obj._data - mask = self.obj._mask - cls = type(self.obj) - result = getattr(data, methodname)(*args, **params).view(cls) - result._smallmask = self.obj._smallmask - if result.ndim: - if not self._onmask: - result._mask = mask - elif mask is not nomask: - result.__setmask__(getattr(mask, methodname)(*args, **params)) - return result -#.......................................................... - -class flatiter(object): - "Defines an interator." - def __init__(self, ma): - self.ma = ma - self.ma_iter = numpy.asarray(ma).flat - - if ma._mask is nomask: - self.maskiter = None - else: - self.maskiter = ma._mask.flat - - def __iter__(self): - return self - - ### This won't work is ravel makes a copy - def __setitem__(self, index, value): - a = self.ma.ravel() - a[index] = value - - def next(self): - d = self.ma_iter.next() - if self.maskiter is not None and self.maskiter.next(): - d = masked - return d - - -class MaskedArray(numeric.ndarray): - """Arrays with possibly masked values. -Masked values of True exclude the corresponding element from any computation. - -Construction: - x = array(data, dtype=None, copy=True, order=False, - mask = nomask, fill_value=None, small_mask=True) - -If copy=False, every effort is made not to copy the data: -If `data` is a MaskedArray, and argument mask=nomask, then the candidate data -is `data._data` and the mask used is `data._mask`. -If `data` is a numeric array, it is used as the candidate raw data. -If `dtype` is not None and is different from data.dtype.char then a data copy is required. -Otherwise, the candidate is used. - -If a data copy is required, the raw (unmasked) data stored is the result of: -numeric.array(data, dtype=dtype.char, copy=copy) - -If `mask` is `nomask` there are no masked values. -Otherwise mask must be convertible to an array of booleans with the same shape as x. -If `small_mask` is True, a mask consisting of zeros (False) only is compressed to `nomask`. -Otherwise, the mask is not compressed. - -fill_value is used to fill in masked values when necessary, such as when -printing and in method/function filled(). -The fill_value is not used for computation within this module. - """ - __array_priority__ = 10.1 - _defaultmask = nomask - _defaulthardmask = False - _baseclass = numeric.ndarray - def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, fill_value=None, - keep_mask=True, small_mask=True, hard_mask=False, flag=None, - subok=True, **options): - """array(data, dtype=None, copy=True, mask=nomask, fill_value=None) - -If `data` is already a ndarray, its dtype becomes the default value of dtype. - """ - if flag is not None: - warnings.warn("The flag 'flag' is now called 'small_mask'!", - DeprecationWarning) - small_mask = flag - # Process data............ - _data = numeric.array(data, dtype=dtype, copy=copy, subok=subok) - _baseclass = getattr(data, '_baseclass', type(_data)) - _basedict = getattr(data, '_basedict', getattr(data, '__dict__', None)) - if not isinstance(data, MaskedArray): - _data = _data.view(cls) - elif not subok: - _data = data.view(cls) - else: - _data = _data.view(type(data)) - # Backwards compat ....... - if hasattr(data,'_mask') and not isinstance(data, ndarray): - _data._mask = data._mask - _sharedmask = True - # Process mask ........... - if mask is nomask: - if not keep_mask: - _data._mask = nomask - if copy: - _data._mask = _data._mask.copy() - else: - mask = numeric.array(mask, dtype=MaskType, copy=copy) - if mask.shape != _data.shape: - (nd, nm) = (_data.size, mask.size) - if nm == 1: - mask = numeric.resize(mask, _data.shape) - elif nm == nd: - mask = fromnumeric.reshape(mask, _data.shape) - else: - msg = "Mask and data not compatible: data size is %i, "+\ - "mask size is %i." - raise MAError, msg % (nd, nm) - if _data._mask is nomask: - _data._mask = mask - _data._sharedmask = True - else: - # Make a copy of the mask to avoid propagation - _data._sharedmask = False - if not keep_mask: - _data._mask = mask - else: - _data._mask = umath.logical_or(mask, _data._mask) - - - # Update fill_value....... - _data._fill_value = getattr(data, '_fill_value', fill_value) - if _data._fill_value is None: - _data._fill_value = default_fill_value(_data) - # Process extra options .. - _data._hardmask = hard_mask - _data._smallmask = small_mask - _data._baseclass = _baseclass - _data._basedict = _basedict - return _data - #........................ - def __array_finalize__(self,obj): - """Finalizes the masked array. - """ - # Finalize mask ............... - self._mask = getattr(obj, '_mask', nomask) - if self._mask is not nomask: - self._mask.shape = self.shape - # Get the remaining options ... - self._hardmask = getattr(obj, '_hardmask', self._defaulthardmask) - self._smallmask = getattr(obj, '_smallmask', True) - self._sharedmask = True - self._baseclass = getattr(obj, '_baseclass', type(obj)) - self._fill_value = getattr(obj, '_fill_value', None) - # Update special attributes ... - self._basedict = getattr(obj, '_basedict', getattr(obj, '__dict__', None)) - if self._basedict is not None: - self.__dict__.update(self._basedict) - return - #.................................. - def __array_wrap__(self, obj, context=None): - """Special hook for ufuncs. -Wraps the numpy array and sets the mask according to context. - """ - #TODO : Should we check for type result - result = obj.view(type(self)) - #.......... - if context is not None: - result._mask = result._mask.copy() - (func, args, _) = context - m = reduce(mask_or, [getmask(arg) for arg in args]) - # Get domain mask - domain = ufunc_domain.get(func, None) - if domain is not None: - if len(args) > 2: - d = reduce(domain, args) - else: - d = domain(*args) - if m is nomask: - if d is not nomask: - m = d - else: - m |= d - if not m.ndim and m: - if m: - if result.shape == (): - return masked - result._mask = numeric.ones(result.shape, bool_) - else: - result._mask = m - #.... -# result._mask = m - result._fill_value = self._fill_value - result._hardmask = self._hardmask - result._smallmask = self._smallmask - result._baseclass = self._baseclass - return result - #............................................. - def __getitem__(self, indx): - """x.__getitem__(y) <==> x[y] -Returns the item described by i. Not a copy as in previous versions. - """ - # This test is useful, but we should keep things light... -# if getmask(indx) is not nomask: -# msg = "Masked arrays must be filled before they can be used as indices!" -# raise IndexError, msg - # super() can't work here if the underlying data is a matrix... - dout = (self._data).__getitem__(indx) - m = self._mask - if hasattr(dout, 'shape') and len(dout.shape) > 0: - # Not a scalar: make sure that dout is a MA - dout = dout.view(type(self)) - dout._smallmask = self._smallmask - if m is not nomask: - # use _set_mask to take care of the shape - dout.__setmask__(m[indx]) - elif m is not nomask and m[indx]: - return masked - return dout - #........................ - def __setitem__(self, indx, value): - """x.__setitem__(i, y) <==> x[i]=y -Sets item described by index. If value is masked, masks those locations. - """ - if self is masked: - raise MAError, 'Cannot alter the masked element.' -# if getmask(indx) is not nomask: -# msg = "Masked arrays must be filled before they can be used as indices!" -# raise IndexError, msg - #.... - if value is masked: - m = self._mask - if m is nomask: - m = make_mask_none(self.shape) -# else: -# m = m.copy() - m[indx] = True - self.__setmask__(m) - return - #.... - dval = numeric.asarray(value).astype(self.dtype) - valmask = getmask(value) - if self._mask is nomask: - if valmask is not nomask: - self._mask = make_mask_none(self.shape) - self._mask[indx] = valmask - elif not self._hardmask: - _mask = self._mask.copy() - if valmask is nomask: - _mask[indx] = False - else: - _mask[indx] = valmask - self._set_mask(_mask) - elif hasattr(indx, 'dtype') and (indx.dtype==bool_): - indx = indx * umath.logical_not(self._mask) - else: - mindx = mask_or(self._mask[indx], valmask, copy=True) - dindx = self._data[indx] - if dindx.size > 1: - dindx[~mindx] = dval - elif mindx is nomask: - dindx = dval - dval = dindx - self._mask[indx] = mindx - # Set data .......... - #dval = filled(value).astype(self.dtype) - ndarray.__setitem__(self._data,indx,dval) - #............................................ - def __getslice__(self, i, j): - """x.__getslice__(i, j) <==> x[i:j] -Returns the slice described by i, j. -The use of negative indices is not supported.""" - return self.__getitem__(slice(i,j)) - #........................ - def __setslice__(self, i, j, value): - """x.__setslice__(i, j, value) <==> x[i:j]=value -Sets a slice i:j to `value`. -If `value` is masked, masks those locations.""" - self.__setitem__(slice(i,j), value) - #............................................ - def __setmask__(self, mask, copy=False): - newmask = make_mask(mask, copy=copy, small_mask=self._smallmask) -# self.unshare_mask() - if self._mask is nomask: - self._mask = newmask - elif self._hardmask: - if newmask is not nomask: - self._mask.__ior__(newmask) - else: - # This one is tricky: if we set the mask that way, we may break the - # propagation. But if we don't, we end up with a mask full of False - # and a test on nomask fails... - if newmask is nomask: - self._mask = nomask - else: - self._mask.flat = newmask - if self._mask.shape: - self._mask = numeric.reshape(self._mask, self.shape) - _set_mask = __setmask__ - - def _get_mask(self): - """Returns the current mask.""" - return self._mask - - mask = property(fget=_get_mask, fset=__setmask__, doc="Mask") - #............................................ - def harden_mask(self): - "Forces the mask to hard." - self._hardmask = True - - def soften_mask(self): - "Forces the mask to soft." - self._hardmask = False - - def unshare_mask(self): - "Copies the mask and set the sharedmask flag to False." - if self._sharedmask: - self._mask = self._mask.copy() - self._sharedmask = False - - #............................................ - def _get_data(self): - "Returns the current data (as a view of the original underlying data)>" - return self.view(self._baseclass) - _data = property(fget=_get_data) - #............................................ - def _get_flat(self): - """Calculates the flat value. - """ - return flatiter(self) - # - def _set_flat (self, value): - "x.flat = value" - y = self.ravel() - y[:] = value - # - flat = property(fget=_get_flat, fset=_set_flat, doc="Flat version") - #............................................ - def get_fill_value(self): - "Returns the filling value." - if self._fill_value is None: - self._fill_value = default_fill_value(self) - return self._fill_value - - def set_fill_value(self, value=None): - """Sets the filling value to `value`. -If None, uses the default, based on the data type.""" - if value is None: - value = default_fill_value(self) - self._fill_value = value - - fill_value = property(fget=get_fill_value, fset=set_fill_value, - doc="Filling value") - - def filled(self, fill_value=None): - """Returns an array of the same class as `_data`, - with masked values filled with `fill_value`. -Subclassing is preserved. - -If `fill_value` is None, uses self.fill_value. - """ - m = self._mask - if m is nomask or not m.any(): - return self._data - # - if fill_value is None: - fill_value = self.fill_value - # - if self is masked_singleton: - result = numeric.asanyarray(fill_value) - else: - result = self._data.copy() - try: - result[m] = fill_value - except (TypeError, AttributeError): - fill_value = numeric.array(fill_value, dtype=object) - d = result.astype(object) - result = fromnumeric.choose(m, (d, fill_value)) - except IndexError: - #ok, if scalar - if self._data.shape: - raise - elif m: - result = numeric.array(fill_value, dtype=self.dtype) - else: - result = self._data - return result - - def compressed(self): - "A 1-D array of all the non-masked data." - d = self.ravel() - if self._mask is nomask: - return d - elif not self._smallmask and not self._mask.any(): - return d - else: - return d[numeric.logical_not(d._mask)] - #............................................ - def __str__(self): - """x.__str__() <==> str(x) -Calculates the string representation, using masked for fill if it is enabled. -Otherwise, fills with fill value. - """ - if masked_print_option.enabled(): - f = masked_print_option - if self is masked: - return str(f) - m = self._mask - if m is nomask: - res = self._data - else: - if m.shape == (): - if m: - return str(f) - else: - return str(self._data) - # convert to object array to make filled work -#CHECK: the two lines below seem more robust than the self._data.astype -# res = numeric.empty(self._data.shape, object_) -# numeric.putmask(res,~m,self._data) - res = self._data.astype("|O8") - res[m] = f - else: - res = self.filled(self.fill_value) - return str(res) - - def __repr__(self): - """x.__repr__() <==> repr(x) -Calculates the repr representation, using masked for fill if it is enabled. -Otherwise fill with fill value. - """ - with_mask = """\ -masked_%(name)s(data = - %(data)s, - mask = - %(mask)s, - fill_value=%(fill)s) -""" - with_mask1 = """\ -masked_%(name)s(data = %(data)s, - mask = %(mask)s, - fill_value=%(fill)s) -""" - n = len(self.shape) - name = repr(self._data).split('(')[0] - if n <= 1: - return with_mask1 % { - 'name': name, - 'data': str(self), - 'mask': str(self._mask), - 'fill': str(self.fill_value), - } - return with_mask % { - 'name': name, - 'data': str(self), - 'mask': str(self._mask), - 'fill': str(self.fill_value), - } - #............................................ - def __iadd__(self, other): - "Adds other to self in place." - ndarray.__iadd__(self._data,other) - m = getmask(other) - if self._mask is nomask: - self._mask = m - elif m is not nomask: - self._mask += m - return self - #.... - def __isub__(self, other): - "Subtracts other from self in place." - ndarray.__isub__(self._data,other) - m = getmask(other) - if self._mask is nomask: - self._mask = m - elif m is not nomask: - self._mask += m - return self - #.... - def __imul__(self, other): - "Multiplies self by other in place." - ndarray.__imul__(self._data,other) - m = getmask(other) - if self._mask is nomask: - self._mask = m - elif m is not nomask: - self._mask += m - return self - #.... - def __idiv__(self, other): - "Divides self by other in place." - dom_mask = domain_safe_divide().__call__(self, filled(other,1)) - other_mask = getmask(other) - new_mask = mask_or(other_mask, dom_mask) - ndarray.__idiv__(self._data, other) - self._mask = mask_or(self._mask, new_mask) - return self - #............................................ - def __float__(self): - "Converts self to float." - if self._mask is not nomask: - warnings.warn("Warning: converting a masked element to nan.") - return numpy.nan - #raise MAError, 'Cannot convert masked element to a Python float.' - return float(self.item()) - - def __int__(self): - "Converts self to int." - if self._mask is not nomask: - raise MAError, 'Cannot convert masked element to a Python int.' - return int(self.item()) - #............................................ - def count(self, axis=None): - """Counts the non-masked elements of the array along a given axis, -and returns a masked array where the mask is True where all data are masked. -If `axis` is None, counts all the non-masked elements, and returns either a -scalar or the masked singleton.""" - m = self._mask - s = self.shape - ls = len(s) - if m is nomask: - if ls == 0: - return 1 - if ls == 1: - return s[0] - if axis is None: - return self.size - else: - n = s[axis] - t = list(s) - del t[axis] - return numeric.ones(t) * n - n1 = fromnumeric.size(m, axis) - n2 = m.astype(int_).sum(axis) - if axis is None: - return (n1-n2) - else: - return masked_array(n1 - n2) - #............................................ - def reshape (self, *s): - """Reshapes the array to shape s. -Returns a new masked array. -If you want to modify the shape in place, please use `a.shape = s`""" - result = self._data.reshape(*s).view(type(self)) - result.__dict__.update(self.__dict__) - if result._mask is not nomask: - result._mask = self._mask.copy() - result._mask.shape = result.shape - return result - # - repeat = _arraymethod('repeat') - # - def resize(self, newshape, refcheck=True, order=False): - """Attempts to modify size and shape of self inplace. - The array must own its own memory and not be referenced by other arrays. - Returns None. - """ - try: - self._data.resize(newshape, refcheck, order) - if self.mask is not nomask: - self._mask.resize(newshape, refcheck, order) - except ValueError: - raise ValueError("Cannot resize an array that has been referenced " - "or is referencing another array in this way.\n" - "Use the resize function.") - return None - # - flatten = _arraymethod('flatten') - # - def put(self, indices, values, mode='raise'): - """Sets storage-indexed locations to corresponding values. -a.put(values, indices, mode) sets a.flat[n] = values[n] for each n in indices. -`values` can be scalar or an array shorter than indices, and it will be repeated, -if necessary. -If `values` has some masked values, the initial mask is updated in consequence, -else the corresponding values are unmasked. - """ - m = self._mask - # Hard mask: Get rid of the values/indices that fall on masked data - if self._hardmask and self._mask is not nomask: - mask = self._mask[indices] - indices = numeric.asarray(indices) - values = numeric.asanyarray(values) - values.resize(indices.shape) - indices = indices[~mask] - values = values[~mask] - #.... - self._data.put(indices, values, mode=mode) - #.... - if m is nomask: - m = getmask(values) - else: - m = m.copy() - if getmask(values) is nomask: - m.put(indices, False, mode=mode) - else: - m.put(indices, values._mask, mode=mode) - m = make_mask(m, copy=False, small_mask=True) - self._mask = m - #............................................ - def ids (self): - """Return the address of the data and mask areas.""" - return (self.ctypes.data, self._mask.ctypes.data) - #............................................ - def all(self, axis=None, out=None): - """a.all(axis) returns True if all entries along the axis are True. - Returns False otherwise. If axis is None, uses the flatten array. - Masked data are considered as True during computation. - Outputs a masked array, where the mask is True if all data are masked along the axis. - Note: the out argument is not really operational... - """ - d = self.filled(True).all(axis=axis, out=out).view(type(self)) - if d.ndim > 0: - d.__setmask__(self._mask.all(axis)) - return d - - def any(self, axis=None, out=None): - """a.any(axis) returns True if some or all entries along the axis are True. - Returns False otherwise. If axis is None, uses the flatten array. - Masked data are considered as False during computation. - Outputs a masked array, where the mask is True if all data are masked along the axis. - Note: the out argument is not really operational... - """ - d = self.filled(False).any(axis=axis, out=out).view(type(self)) - if d.ndim > 0: - d.__setmask__(self._mask.all(axis)) - return d - - def nonzero(self): - """a.nonzero() returns a tuple of arrays - - Returns a tuple of arrays, one for each dimension of a, - containing the indices of the non-zero elements in that - dimension. The corresponding non-zero values can be obtained - with - a[a.nonzero()]. - - To group the indices by element, rather than dimension, use - transpose(a.nonzero()) - instead. The result of this is always a 2d array, with a row for - each non-zero element.""" - return numeric.asarray(self.filled(0)).nonzero() - #............................................ - def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): - """a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) -Returns the sum along the offset diagonal of the array's indicated `axis1` and `axis2`. - """ - # TODO: What are we doing with `out`? - m = self._mask - if m is nomask: - result = super(MaskedArray, self).trace(offset=offset, axis1=axis1, - axis2=axis2, out=out) - return result.astype(dtype) - else: - D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) - return D.astype(dtype).sum(axis=None) - #............................................ - def sum(self, axis=None, dtype=None): - """a.sum(axis=None, dtype=None) -Sums the array `a` over the given axis `axis`. -Masked values are set to 0. -If `axis` is None, applies to a flattened version of the array. - """ - if self._mask is nomask: - mask = nomask - else: - mask = self._mask.all(axis) - if (not mask.ndim) and mask: - return masked - result = self.filled(0).sum(axis, dtype=dtype).view(type(self)) - if result.ndim > 0: - result.__setmask__(mask) - return result - - def cumsum(self, axis=None, dtype=None): - """a.cumprod(axis=None, dtype=None) -Returns the cumulative sum of the elements of array `a` along the given axis `axis`. -Masked values are set to 0. -If `axis` is None, applies to a flattened version of the array. - """ - result = self.filled(0).cumsum(axis=axis, dtype=dtype).view(type(self)) - result.__setmask__(self.mask) - return result - - def prod(self, axis=None, dtype=None): - """a.prod(axis=None, dtype=None) -Returns the product of the elements of array `a` along the given axis `axis`. -Masked elements are set to 1. -If `axis` is None, applies to a flattened version of the array. - """ - if self._mask is nomask: - mask = nomask - else: - mask = self._mask.all(axis) - if (not mask.ndim) and mask: - return masked - result = self.filled(1).prod(axis=axis, dtype=dtype).view(type(self)) - if result.ndim: - result.__setmask__(mask) - return result - product = prod - - def cumprod(self, axis=None, dtype=None): - """a.cumprod(axis=None, dtype=None) -Returns the cumulative product of ethe lements of array `a` along the given axis `axis`. -Masked values are set to 1. -If `axis` is None, applies to a flattened version of the array. - """ - result = self.filled(1).cumprod(axis=axis, dtype=dtype).view(type(self)) - result.__setmask__(self.mask) - return result - - def mean(self, axis=None, dtype=None): - """a.mean(axis=None, dtype=None) - - Averages the array over the given axis. If the axis is None, - averages over all dimensions of the array. Equivalent to - - a.sum(axis, dtype) / size(a, axis). - - The optional dtype argument is the data type for intermediate - calculations in the sum. - - Returns a masked array, of the same class as a. - """ - if self._mask is nomask: - return super(MaskedArray, self).mean(axis=axis, dtype=dtype) - else: - dsum = self.sum(axis=axis, dtype=dtype) - cnt = self.count(axis=axis) - return dsum*1./cnt - - def anom(self, axis=None, dtype=None): - """a.anom(axis=None, dtype=None) - Returns the anomalies, or deviation from the average. - """ - m = self.mean(axis, dtype) - if not axis: - return (self - m) - else: - return (self - expand_dims(m,axis)) - - def var(self, axis=None, dtype=None): - """a.var(axis=None, dtype=None) -Returns the variance, a measure of the spread of a distribution. - -The variance is the average of the squared deviations from the mean, -i.e. var = mean((x - x.mean())**2). - """ - if self._mask is nomask: - # TODO: Do we keep super, or var _data and take a view ? - return super(MaskedArray, self).var(axis=axis, dtype=dtype) - else: - cnt = self.count(axis=axis) - danom = self.anom(axis=axis, dtype=dtype) - danom *= danom - dvar = numeric.array(danom.sum(axis) / cnt).view(type(self)) - if axis is not None: - dvar._mask = mask_or(self._mask.all(axis), (cnt==1)) - return dvar - - def std(self, axis=None, dtype=None): - """a.std(axis=None, dtype=None) -Returns the standard deviation, a measure of the spread of a distribution. - -The standard deviation is the square root of the average of the squared -deviations from the mean, i.e. std = sqrt(mean((x - x.mean())**2)). - """ - dvar = self.var(axis,dtype) - if axis is not None or dvar is not masked: - dvar = sqrt(dvar) - return dvar - #............................................ - def argsort(self, axis=None, fill_value=None, kind='quicksort', - order=None): - """Returns an array of indices that sort 'a' along the specified axis. - Masked values are filled beforehand to `fill_value`. - If `fill_value` is None, uses the default for the data type. - Returns a numpy array. - -:Keywords: - `axis` : Integer *[None]* - Axis to be indirectly sorted (default -1) - `kind` : String *['quicksort']* - Sorting algorithm (default 'quicksort') - Possible values: 'quicksort', 'mergesort', or 'heapsort' - - Returns: array of indices that sort 'a' along the specified axis. - - This method executes an indirect sort along the given axis using the - algorithm specified by the kind keyword. It returns an array of indices of - the same shape as 'a' that index data along the given axis in sorted order. - - The various sorts are characterized by average speed, worst case - performance, need for work space, and whether they are stable. A stable - sort keeps items with the same key in the same relative order. The three - available algorithms have the following properties: - - |------------------------------------------------------| - | kind | speed | worst case | work space | stable| - |------------------------------------------------------| - |'quicksort'| 1 | O(n^2) | 0 | no | - |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | - |'heapsort' | 3 | O(n*log(n)) | 0 | no | - |------------------------------------------------------| - - All the sort algorithms make temporary copies of the data when the sort is not - along the last axis. Consequently, sorts along the last axis are faster and use - less space than sorts along other axis. - """ - if fill_value is None: - fill_value = default_fill_value(self) - d = self.filled(fill_value).view(ndarray) - return d.argsort(axis=axis, kind=kind, order=order) - #........................ - def argmin(self, axis=None, fill_value=None): - """Returns a ndarray of indices for the minimum values of `a` along the - specified axis. - Masked values are treated as if they had the value `fill_value`. - If `fill_value` is None, the default for the data type is used. - Returns a numpy array. - -:Keywords: - `axis` : Integer *[None]* - Axis to be indirectly sorted (default -1) - `fill_value` : var *[None]* - Default filling value. If None, uses the minimum default for the data type. - """ - if fill_value is None: - fill_value = minimum_fill_value(self) - d = self.filled(fill_value).view(ndarray) - return d.argmin(axis) - #........................ - def argmax(self, axis=None, fill_value=None): - """Returns the array of indices for the maximum values of `a` along the - specified axis. - Masked values are treated as if they had the value `fill_value`. - If `fill_value` is None, the maximum default for the data type is used. - Returns a numpy array. - -:Keywords: - `axis` : Integer *[None]* - Axis to be indirectly sorted (default -1) - `fill_value` : var *[None]* - Default filling value. If None, uses the data type default. - """ - if fill_value is None: - fill_value = maximum_fill_value(self._data) - d = self.filled(fill_value).view(ndarray) - return d.argmax(axis) - - def sort(self, axis=-1, kind='quicksort', order=None, - endwith=True, fill_value=None): - """ - Sort a along the given axis. - - Keyword arguments: - - axis -- axis to be sorted (default -1) - kind -- sorting algorithm (default 'quicksort') - Possible values: 'quicksort', 'mergesort', or 'heapsort'. - order -- If a has fields defined, then the order keyword can be the - field name to sort on or a list (or tuple) of field names - to indicate the order that fields should be used to define - the sort. - endwith--Boolean flag indicating whether missing values (if any) should - be forced in the upper indices (at the end of the array) or - lower indices (at the beginning). - - Returns: None. - - This method sorts 'a' in place along the given axis using the algorithm - specified by the kind keyword. - - The various sorts may characterized by average speed, worst case - performance, need for work space, and whether they are stable. A stable - sort keeps items with the same key in the same relative order and is most - useful when used with argsort where the key might differ from the items - being sorted. The three available algorithms have the following properties: - - |------------------------------------------------------| - | kind | speed | worst case | work space | stable| - |------------------------------------------------------| - |'quicksort'| 1 | O(n^2) | 0 | no | - |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | - |'heapsort' | 3 | O(n*log(n)) | 0 | no | - |------------------------------------------------------| - - """ - if self._mask is nomask: - ndarray.sort(self,axis=axis, kind=kind, order=order) - else: - if fill_value is None: - if endwith: - filler = minimum_fill_value(self) - else: - filler = maximum_fill_value(self) - else: - filler = fill_value - idx = numpy.indices(self.shape) - idx[axis] = self.filled(filler).argsort(axis=axis,kind=kind,order=order) - idx_l = idx.tolist() - tmp_mask = self._mask[idx_l].flat - tmp_data = self._data[idx_l].flat - self.flat = tmp_data - self._mask.flat = tmp_mask - return - #............................................ - def min(self, axis=None, fill_value=None): - """Returns the minimum/a along the given axis. -If `axis` is None, applies to the flattened array. Masked values are filled -with `fill_value` during processing. If `fill_value is None, it is set to the -maximum_fill_value corresponding to the data type.""" - mask = self._mask - # Check all/nothing case ...... - if mask is nomask: - return super(MaskedArray, self).min(axis=axis) - elif (not mask.ndim) and mask: - return masked - # Get the mask ................ - if axis is None: - mask = umath.logical_and.reduce(mask.flat) - else: - mask = umath.logical_and.reduce(mask, axis=axis) - # Get the fil value ........... - if fill_value is None: - fill_value = minimum_fill_value(self) - # Get the data ................ - result = self.filled(fill_value).min(axis=axis).view(type(self)) - if result.ndim > 0: - result._mask = mask - return result - #........................ - def max(self, axis=None, fill_value=None): - """Returns the maximum/a along the given axis. -If `axis` is None, applies to the flattened array. Masked values are filled -with `fill_value` during processing. If `fill_value is None, it is set to the -maximum_fill_value corresponding to the data type.""" - mask = self._mask - # Check all/nothing case ...... - if mask is nomask: - return super(MaskedArray, self).max(axis=axis) - elif (not mask.ndim) and mask: - return masked - # Check the mask .............. - if axis is None: - mask = umath.logical_and.reduce(mask.flat) - else: - mask = umath.logical_and.reduce(mask, axis=axis) - # Get the fill value .......... - if fill_value is None: - fill_value = maximum_fill_value(self) - # Get the data ................ - result = self.filled(fill_value).max(axis=axis).view(type(self)) - if result.ndim > 0: - result._mask = mask - return result - #........................ - def ptp(self, axis=None, fill_value=None): - """Returns the visible data range (max-min) along the given axis. -If the axis is `None`, applies on a flattened array. Masked values are filled -with `fill_value` for processing. If `fill_value` is None, the maximum is uses -the maximum default, the minimum uses the minimum default.""" - return self.max(axis, fill_value) - self.min(axis, fill_value) - - # Array methods --------------------------------------- - conj = conjugate = _arraymethod('conjugate') - copy = _arraymethod('copy') - diagonal = _arraymethod('diagonal') - take = _arraymethod('take') - ravel = _arraymethod('ravel') - transpose = _arraymethod('transpose') - T = property(fget=lambda self:self.transpose()) - swapaxes = _arraymethod('swapaxes') - clip = _arraymethod('clip', onmask=False) - compress = _arraymethod('compress') - copy = _arraymethod('copy') - squeeze = _arraymethod('squeeze') - #-------------------------------------------- - def tolist(self, fill_value=None): - """Copies the data portion of the array to a hierarchical python list and - returns that list. Data items are converted to the nearest compatible Python - type. Masked values are filled with `fill_value`""" - return self.filled(fill_value).tolist() - #........................ - def tostring(self, fill_value=None): - """a.tostring(order='C', fill_value=None) -> raw copy of array data as a Python string. - - Keyword arguments: - order : order of the data item in the copy {"C","F","A"} (default "C") - fill_value : value used in lieu of missing data - - Construct a Python string containing the raw bytes in the array. The order - of the data in arrays with ndim > 1 is specified by the 'order' keyword and - this keyword overrides the order of the array. The - choices are: - - "C" -- C order (row major) - "Fortran" -- Fortran order (column major) - "Any" -- Current order of array. - None -- Same as "Any" - - Masked data are filled with fill_value. If fill_value is None, the data-type- - dependent default is used.""" - return self.filled(fill_value).tostring() - #-------------------------------------------- - # Backwards Compatibility. Heck... - @property - def data(self): - """Returns the `_data` part of the MaskedArray.""" - return self._data - def raw_data(self): - """Returns the `_data` part of the MaskedArray. -You should really use `data` instead...""" - return self._data - #-------------------------------------------- - # Pickling - def __getstate__(self): - "Returns the internal state of the masked array, for pickling purposes." - state = (1, - self.shape, - self.dtype, - self.flags.fnc, - self._data.tostring(), - getmaskarray(self).tostring(), - self._fill_value, - ) - return state - # - def __setstate__(self, state): - """Restores the internal state of the masked array, for pickling purposes. - `state` is typically the output of the ``__getstate__`` output, and is a 5-tuple: - - - class name - - a tuple giving the shape of the data - - a typecode for the data - - a binary string for the data - - a binary string for the mask. - """ - (ver, shp, typ, isf, raw, msk, flv) = state - ndarray.__setstate__(self, (shp, typ, isf, raw)) - self._mask.__setstate__((shp, dtype(bool), isf, msk)) - self.fill_value = flv - # - def __reduce__(self): - """Returns a 3-tuple for pickling a MaskedArray.""" - return (_mareconstruct, - (self.__class__, self._baseclass, (0,), 'b', ), - self.__getstate__()) - - -def _mareconstruct(subtype, baseclass, baseshape, basetype,): - """Internal function that builds a new MaskedArray from the information stored -in a pickle.""" - _data = ndarray.__new__(baseclass, baseshape, basetype) - _mask = ndarray.__new__(ndarray, baseshape, 'b1') - return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype, small_mask=False) -#MaskedArray.__dump__ = dump -#MaskedArray.__dumps__ = dumps - - - -#####-------------------------------------------------------------------------- -#---- --- Shortcuts --- -#####--------------------------------------------------------------------------- -def isMaskedArray(x): - "Is x a masked array, that is, an instance of MaskedArray?" - return isinstance(x, MaskedArray) -isarray = isMaskedArray -isMA = isMaskedArray #backward compatibility -#masked = MaskedArray(0, int, mask=1) -masked_singleton = MaskedArray(0, dtype=int_, mask=True) -masked = masked_singleton - -masked_array = MaskedArray -def array(data, dtype=None, copy=False, order=False, mask=nomask, subok=True, - keep_mask=True, small_mask=True, hard_mask=None, fill_value=None): - """array(data, dtype=None, copy=True, order=False, mask=nomask, - keep_mask=True, small_mask=True, fill_value=None) -Acts as shortcut to MaskedArray, with options in a different order for convenience. -And backwards compatibility... - """ - #TODO: we should try to put 'order' somwehere - return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok, - keep_mask=keep_mask, small_mask=small_mask, - hard_mask=hard_mask, fill_value=fill_value) - -def is_masked(x): - """Returns whether x has some masked values.""" - m = getmask(x) - if m is nomask: - return False - elif m.any(): - return True - return False - - -#####--------------------------------------------------------------------------- -#---- --- Extrema functions --- -#####--------------------------------------------------------------------------- -class _extrema_operation(object): - "Generic class for maximum/minimum functions." - def __call__(self, a, b=None): - "Executes the call behavior." - if b is None: - return self.reduce(a) - return where(self.compare(a, b), a, b) - #......... - def reduce(self, target, axis=None): - """Reduces target along the given axis.""" - m = getmask(target) - if axis is not None: - kargs = { 'axis' : axis } - else: - kargs = {} - target = target.ravel() - if not (m is nomask): - m = m.ravel() - if m is nomask: - t = self.ufunc.reduce(target, **kargs) - else: - target = target.filled(self.fill_value_func(target)).view(type(target)) - t = self.ufunc.reduce(target, **kargs) - m = umath.logical_and.reduce(m, **kargs) - if hasattr(t, '_mask'): - t._mask = m - elif m: - t = masked - return t - #......... - def outer (self, a, b): - "Returns the function applied to the outer product of a and b." - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - m = nomask - else: - ma = getmaskarray(a) - mb = getmaskarray(b) - m = logical_or.outer(ma, mb) - result = self.ufunc.outer(filled(a), filled(b)) - result._mask = m - return result -#............................ -class _minimum_operation(_extrema_operation): - "Object to calculate minima" - def __init__ (self): - """minimum(a, b) or minimum(a) -In one argument case, returns the scalar minimum. - """ - self.ufunc = umath.minimum - self.afunc = amin - self.compare = less - self.fill_value_func = minimum_fill_value -#............................ -class _maximum_operation(_extrema_operation): - "Object to calculate maxima" - def __init__ (self): - """maximum(a, b) or maximum(a) - In one argument case returns the scalar maximum. - """ - self.ufunc = umath.maximum - self.afunc = amax - self.compare = greater - self.fill_value_func = maximum_fill_value -#.......................................................... -def min(array, axis=None, out=None): - """Returns the minima along the given axis. -If `axis` is None, applies to the flattened array.""" - if out is not None: - raise TypeError("Output arrays Unsupported for masked arrays") - if axis is None: - return minimum(array) - else: - return minimum.reduce(array, axis) -#............................ -def max(obj, axis=None, out=None): - """Returns the maxima along the given axis. -If `axis` is None, applies to the flattened array.""" - if out is not None: - raise TypeError("Output arrays Unsupported for masked arrays") - if axis is None: - return maximum(obj) - else: - return maximum.reduce(obj, axis) -#............................. -def ptp(obj, axis=None): - """a.ptp(axis=None) = a.max(axis)-a.min(axis)""" - try: - return obj.max(axis)-obj.min(axis) - except AttributeError: - return max(obj, axis=axis) - min(obj, axis=axis) - - -#####--------------------------------------------------------------------------- -#---- --- Definition of functions from the corresponding methods --- -#####--------------------------------------------------------------------------- -class _frommethod: - """Defines functions from existing MaskedArray methods. -:ivar _methodname (String): Name of the method to transform. - """ - def __init__(self, methodname): - self._methodname = methodname - self.__doc__ = self.getdoc() - def getdoc(self): - "Returns the doc of the function (from the doc of the method)." - try: - return getattr(MaskedArray, self._methodname).__doc__ - except: - return getattr(numpy, self._methodname).__doc__ - def __call__(self, a, *args, **params): - if isinstance(a, MaskedArray): - return getattr(a, self._methodname).__call__(*args, **params) - #FIXME ---- - #As x is not a MaskedArray, we transform it to a ndarray with asarray - #... and call the corresponding method. - #Except that sometimes it doesn't work (try reshape([1,2,3,4],(2,2))) - #we end up with a "SystemError: NULL result without error in PyObject_Call" - #A dirty trick is then to call the initial numpy function... - method = getattr(fromnumeric.asarray(a), self._methodname) - try: - return method(*args, **params) - except SystemError: - return getattr(numpy,self._methodname).__call__(a, *args, **params) - -all = _frommethod('all') -anomalies = anom = _frommethod('anom') -any = _frommethod('any') -conjugate = _frommethod('conjugate') -ids = _frommethod('ids') -nonzero = _frommethod('nonzero') -diagonal = _frommethod('diagonal') -maximum = _maximum_operation() -mean = _frommethod('mean') -minimum = _minimum_operation () -product = _frommethod('prod') -ptp = _frommethod('ptp') -ravel = _frommethod('ravel') -repeat = _frommethod('repeat') -std = _frommethod('std') -sum = _frommethod('sum') -swapaxes = _frommethod('swapaxes') -take = _frommethod('take') -var = _frommethod('var') - -#.............................................................................. -def power(a, b, third=None): - """Computes a**b elementwise. - Masked values are set to 1.""" - if third is not None: - raise MAError, "3-argument power not supported." - ma = getmask(a) - mb = getmask(b) - m = mask_or(ma, mb) - fa = filled(a, 1) - fb = filled(b, 1) - if fb.dtype.char in typecodes["Integer"]: - return masked_array(umath.power(fa, fb), m) - md = make_mask((fa < 0), small_mask=1) - m = mask_or(m, md) - if m is nomask: - return masked_array(umath.power(fa, fb)) - else: - fa[m] = 1 - return masked_array(umath.power(fa, fb), m) - -#.............................................................................. -def argsort(a, axis=None, kind='quicksort', order=None, fill_value=None): - """Returns an array of indices that sort 'a' along the specified axis. - Masked values are filled beforehand to `fill_value`. - If `fill_value` is None, uses the default for the data type. - Returns a numpy array. - -:Keywords: - `axis` : Integer *[None]* - Axis to be indirectly sorted (default -1) - `kind` : String *['quicksort']* - Sorting algorithm (default 'quicksort') - Possible values: 'quicksort', 'mergesort', or 'heapsort' - - Returns: array of indices that sort 'a' along the specified axis. - - This method executes an indirect sort along the given axis using the - algorithm specified by the kind keyword. It returns an array of indices of - the same shape as 'a' that index data along the given axis in sorted order. - - The various sorts are characterized by average speed, worst case - performance, need for work space, and whether they are stable. A stable - sort keeps items with the same key in the same relative order. The three - available algorithms have the following properties: - - |------------------------------------------------------| - | kind | speed | worst case | work space | stable| - |------------------------------------------------------| - |'quicksort'| 1 | O(n^2) | 0 | no | - |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | - |'heapsort' | 3 | O(n*log(n)) | 0 | no | - |------------------------------------------------------| - - All the sort algorithms make temporary copies of the data when the sort is not - along the last axis. Consequently, sorts along the last axis are faster and use - less space than sorts along other axis. - """ - if fill_value is None: - fill_value = default_fill_value(a) - d = filled(a, fill_value) - if axis is None: - return d.argsort(kind=kind, order=order) - return d.argsort(axis, kind=kind, order=order) - -def argmin(a, axis=None, fill_value=None): - """Returns the array of indices for the minimum values of `a` along the - specified axis. - Masked values are treated as if they had the value `fill_value`. - If `fill_value` is None, the default for the data type is used. - Returns a numpy array. - -:Keywords: - `axis` : Integer *[None]* - Axis to be indirectly sorted (default -1) - `fill_value` : var *[None]* - Default filling value. If None, uses the data type default. - """ - if fill_value is None: - fill_value = default_fill_value(a) - d = filled(a, fill_value) - return d.argmin(axis=axis) - -def argmax(a, axis=None, fill_value=None): - """Returns the array of indices for the maximum values of `a` along the - specified axis. - Masked values are treated as if they had the value `fill_value`. - If `fill_value` is None, the default for the data type is used. - Returns a numpy array. - -:Keywords: - `axis` : Integer *[None]* - Axis to be indirectly sorted (default -1) - `fill_value` : var *[None]* - Default filling value. If None, uses the data type default. - """ - if fill_value is None: - fill_value = default_fill_value(a) - try: - fill_value = - fill_value - except: - pass - d = filled(a, fill_value) - return d.argmax(axis=axis) - -def sort(a, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None): - """ - Sort a along the given axis. - -Keyword arguments: - -axis -- axis to be sorted (default -1) -kind -- sorting algorithm (default 'quicksort') - Possible values: 'quicksort', 'mergesort', or 'heapsort'. -order -- If a has fields defined, then the order keyword can be the - field name to sort on or a list (or tuple) of field names - to indicate the order that fields should be used to define - the sort. -endwith--Boolean flag indicating whether missing values (if any) should - be forced in the upper indices (at the end of the array) or - lower indices (at the beginning). - -Returns: None. - -This method sorts 'a' in place along the given axis using the algorithm -specified by the kind keyword. - -The various sorts may characterized by average speed, worst case -performance, need for work space, and whether they are stable. A stable -sort keeps items with the same key in the same relative order and is most -useful when used with argsort where the key might differ from the items -being sorted. The three available algorithms have the following properties: - -|------------------------------------------------------| -| kind | speed | worst case | work space | stable| -|------------------------------------------------------| -|'quicksort'| 1 | O(n^2) | 0 | no | -|'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | -|'heapsort' | 3 | O(n*log(n)) | 0 | no | -|------------------------------------------------------| - -All the sort algorithms make temporary copies of the data when the sort is -not along the last axis. Consequently, sorts along the last axis are faster -and use less space than sorts along other axis. - -""" - a = numeric.asanyarray(a) - if fill_value is None: - if endwith: - filler = minimum_fill_value(a) - else: - filler = maximum_fill_value(a) - else: - filler = fill_value -# return - indx = numpy.indices(a.shape).tolist() - indx[axis] = filled(a,filler).argsort(axis=axis,kind=kind,order=order) - return a[indx] - -def compressed(x): - """Returns a compressed version of a masked array (or just the array if it - wasn't masked first).""" - if getmask(x) is None: - return x - else: - return x.compressed() - -def count(a, axis = None): - "Count of the non-masked elements in a, or along a certain axis." - a = masked_array(a) - return a.count(axis) - -def concatenate(arrays, axis=0): - "Concatenates the arrays along the given axis" - d = numeric.concatenate([filled(a) for a in arrays], axis) - rcls = get_masked_subclass(*arrays) - data = d.view(rcls) - for x in arrays: - if getmask(x) is not nomask: - break - else: - return data - dm = numeric.concatenate([getmaskarray(a) for a in arrays], axis) - dm = make_mask(dm, copy=False, small_mask=True) - data._mask = dm - return data - -def expand_dims(x,axis): - """Expand the shape of a by including newaxis before given axis.""" - result = n_expand_dims(x,axis) - if isinstance(x, MaskedArray): - new_shape = result.shape - result = x.view() - result.shape = new_shape - if result._mask is not nomask: - result._mask.shape = new_shape - return result - -#...................................... -def left_shift (a, n): - "Left shift n bits" - m = getmask(a) - if m is nomask: - d = umath.left_shift(filled(a), n) - return masked_array(d) - else: - d = umath.left_shift(filled(a, 0), n) - return masked_array(d, mask=m) - -def right_shift (a, n): - "Right shift n bits" - m = getmask(a) - if m is nomask: - d = umath.right_shift(filled(a), n) - return masked_array(d) - else: - d = umath.right_shift(filled(a, 0), n) - return masked_array(d, mask=m) -#...................................... -def put(a, indices, values, mode='raise'): - """Sets storage-indexed locations to corresponding values. - Values and indices are filled if necessary.""" - # We can't use 'frommethod', the order of arguments is different - try: - return a.put(indices, values, mode=mode) - except AttributeError: - return fromnumeric.asarray(a).put(indices, values, mode=mode) - -def putmask(a, mask, values): #, mode='raise'): - """`putmask(a, mask, v)` results in `a = v` for all places where `mask` is true. -If `v` is shorter than `mask`, it will be repeated as necessary. -In particular `v` can be a scalar or length 1 array.""" - # We can't use 'frommethod', the order of arguments is different - try: - return a.putmask(values, mask) - except AttributeError: - return fromnumeric.asarray(a).putmask(values, mask) - -def transpose(a,axes=None): - """Returns a view of the array with dimensions permuted according to axes. -If `axes` is None (default), returns array with dimensions reversed. - """ - #We can't use 'frommethod', as 'transpose' doesn't take keywords - try: - return a.transpose(axes) - except AttributeError: - return fromnumeric.asarray(a).transpose(axes) - -def reshape(a, new_shape): - """Changes the shape of the array `a` to `new_shape`.""" - #We can't use 'frommethod', it whine about some parameters. Dmmit. - try: - return a.reshape(new_shape) - except AttributeError: - return fromnumeric.asarray(a).reshape(new_shape) - -def resize(x, new_shape): - """resize(a,new_shape) returns a new array with the specified shape. - The total size of the original array can be any size. - The new array is filled with repeated copies of a. If a was masked, the new - array will be masked, and the new mask will be a repetition of the old one. - """ - # We can't use _frommethods here, as N.resize is notoriously whiny. - m = getmask(x) - if m is not nomask: - m = fromnumeric.resize(m, new_shape) - result = fromnumeric.resize(x, new_shape).view(get_masked_subclass(x)) - if result.ndim: - result._mask = m - return result - - -#................................................ -def rank(obj): - """Gets the rank of sequence a (the number of dimensions, not a matrix rank) -The rank of a scalar is zero.""" - return fromnumeric.rank(filled(obj)) -# -def shape(obj): - """Returns the shape of `a` (as a function call which also works on nested sequences). - """ - return fromnumeric.shape(filled(obj)) -# -def size(obj, axis=None): - """Returns the number of elements in the array along the given axis, -or in the sequence if `axis` is None. - """ - return fromnumeric.size(filled(obj), axis) -#................................................ - -#####-------------------------------------------------------------------------- -#---- --- Extra functions --- -#####-------------------------------------------------------------------------- -def where (condition, x, y): - """where(condition, x, y) is x where condition is nonzero, y otherwise. - condition must be convertible to an integer array. - Answer is always the shape of condition. - The type depends on x and y. It is integer if both x and y are - the value masked. - """ - fc = filled(not_equal(condition, 0), 0) - xv = filled(x) - xm = getmask(x) - yv = filled(y) - ym = getmask(y) - d = numeric.choose(fc, (yv, xv)) - md = numeric.choose(fc, (ym, xm)) - m = getmask(condition) - m = make_mask(mask_or(m, md), copy=False, small_mask=True) - return masked_array(d, mask=m) - -def choose (indices, t, out=None, mode='raise'): - "Returns array shaped like indices with elements chosen from t" - #TODO: implement options `out` and `mode`, if possible. - def fmask (x): - "Returns the filled array, or True if ``masked``." - if x is masked: - return 1 - return filled(x) - def nmask (x): - "Returns the mask, True if ``masked``, False if ``nomask``." - if x is masked: - return 1 - m = getmask(x) - if m is nomask: - return 0 - return m - c = filled(indices, 0) - masks = [nmask(x) for x in t] - a = [fmask(x) for x in t] - d = numeric.choose(c, a) - m = numeric.choose(c, masks) - m = make_mask(mask_or(m, getmask(indices)), copy=0, small_mask=1) - return masked_array(d, mask=m) - -def round_(a, decimals=0, out=None): - """Returns reference to result. Copies a and rounds to 'decimals' places. - - Keyword arguments: - decimals -- number of decimals to round to (default 0). May be negative. - out -- existing array to use for output (default copy of a). - - Return: - Reference to out, where None specifies a copy of the original array a. - - Round to the specified number of decimals. When 'decimals' is negative it - specifies the number of positions to the left of the decimal point. The - real and imaginary parts of complex numbers are rounded separately. - Nothing is done if the array is not of float type and 'decimals' is greater - than or equal to 0.""" - result = fromnumeric.round_(filled(a), decimals, out) - if isinstance(a,MaskedArray): - result = result.view(type(a)) - result._mask = a._mask - else: - result = result.view(MaskedArray) - return result - -def arange(start, stop=None, step=1, dtype=None): - """Just like range() except it returns a array whose type can be specified - by the keyword argument dtype. - """ - return array(numeric.arange(start, stop, step, dtype),mask=nomask) - -def inner(a, b): - """inner(a,b) returns the dot product of two arrays, which has - shape a.shape[:-1] + b.shape[:-1] with elements computed by summing the - product of the elements from the last dimensions of a and b. - Masked elements are replace by zeros. - """ - fa = filled(a, 0) - fb = filled(b, 0) - if len(fa.shape) == 0: - fa.shape = (1,) - if len(fb.shape) == 0: - fb.shape = (1,) - return masked_array(numeric.inner(fa, fb)) -innerproduct = inner - -def outer(a, b): - """outer(a,b) = {a[i]*b[j]}, has shape (len(a),len(b))""" - fa = filled(a, 0).ravel() - fb = filled(b, 0).ravel() - d = numeric.outer(fa, fb) - ma = getmask(a) - mb = getmask(b) - if ma is nomask and mb is nomask: - return masked_array(d) - ma = getmaskarray(a) - mb = getmaskarray(b) - m = make_mask(1-numeric.outer(1-ma, 1-mb), copy=0) - return masked_array(d, mask=m) -outerproduct = outer - -def allequal (a, b, fill_value=True): - """ -Returns `True` if all entries of a and b are equal, using -fill_value as a truth value where either or both are masked. - """ - m = mask_or(getmask(a), getmask(b)) - if m is nomask: - x = filled(a) - y = filled(b) - d = umath.equal(x, y) - return d.all() - elif fill_value: - x = filled(a) - y = filled(b) - d = umath.equal(x, y) - dm = array(d, mask=m, copy=False) - return dm.filled(True).all(None) - else: - return False - -def allclose (a, b, fill_value=True, rtol=1.e-5, atol=1.e-8): - """ Returns `True` if all elements of `a` and `b` are equal subject to given tolerances. -If `fill_value` is True, masked values are considered equal. -If `fill_value` is False, masked values considered unequal. -The relative error rtol should be positive and << 1.0 -The absolute error `atol` comes into play for those elements of `b` - that are very small or zero; it says how small `a` must be also. - """ - m = mask_or(getmask(a), getmask(b)) - d1 = filled(a) - d2 = filled(b) - x = filled(array(d1, copy=0, mask=m), fill_value).astype(float) - y = filled(array(d2, copy=0, mask=m), 1).astype(float) - d = umath.less_equal(umath.absolute(x-y), atol + rtol * umath.absolute(y)) - return fromnumeric.alltrue(fromnumeric.ravel(d)) - -#.............................................................................. -def asarray(a, dtype=None): - """asarray(data, dtype) = array(data, dtype, copy=0) -Returns `a` as an masked array. -No copy is performed if `a` is already an array. -Subclasses are converted to base class MaskedArray. - """ - return masked_array(a, dtype=dtype, copy=False, keep_mask=True) - -def empty(new_shape, dtype=float): - """empty((d1,...,dn),dtype=float,order='C') -Returns a new array of shape (d1,...,dn) and given type with all its -entries uninitialized. This can be faster than zeros.""" - return numeric.empty(new_shape, dtype).view(MaskedArray) - -def empty_like(a): - """empty_like(a) -Returns an empty (uninitialized) array of the shape and typecode of a. -Note that this does NOT initialize the returned array. -If you require your array to be initialized, you should use zeros_like().""" - return numeric.empty_like(a).view(MaskedArray) - -def ones(new_shape, dtype=float): - """ones(shape, dtype=None) -Returns an array of the given dimensions, initialized to all ones.""" - return numeric.ones(new_shape, dtype).view(MaskedArray) - -def zeros(new_shape, dtype=float): - """zeros(new_shape, dtype=None) -Returns an array of the given dimensions, initialized to all zeros.""" - return numeric.zeros(new_shape, dtype).view(MaskedArray) - -#####-------------------------------------------------------------------------- -#---- --- Pickling --- -#####-------------------------------------------------------------------------- -def dump(a,F): - """Pickles the MaskedArray `a` to the file `F`. -`F` can either be the handle of an exiting file, or a string representing a file name. - """ - if not hasattr(F,'readline'): - F = open(F,'w') - return cPickle.dump(a,F) - -def dumps(a): - """Returns a string corresponding to the pickling of the MaskedArray.""" - return cPickle.dumps(a) - -def load(F): - """Wrapper around ``cPickle.load`` which accepts either a file-like object or - a filename.""" - if not hasattr(F, 'readline'): - F = open(F,'r') - return cPickle.load(F) - -def loads(strg): - "Loads a pickle from the current string.""" - return cPickle.loads(strg) - - -################################################################################ - -if __name__ == '__main__': - if 1: - x = arange(10) - assert(x.ctypes.data == x.filled().ctypes.data) - if 0: - a = array([1,2,3,4],mask=[0,0,0,0],small_mask=True) - a[1] = masked - a[1] = 1 - assert(a.ravel()._mask, [0,0,0,0]) - assert(a.compressed(), a) - a[0] = masked - assert(a.compressed()._mask, [0,0,0]) - if 1: - x = array(0, mask=0) - I = x.ctypes.data - J = x.filled().ctypes.data - print (I,J) - x = array([0,0], mask=0) - (I,J) = (x.ctypes.data, x.filled().ctypes.data) - print (I,J) - \ No newline at end of file Modified: trunk/Lib/sandbox/maskedarray/mrecords.py =================================================================== --- trunk/Lib/sandbox/maskedarray/mrecords.py 2007-08-15 00:53:14 UTC (rev 3242) +++ trunk/Lib/sandbox/maskedarray/mrecords.py 2007-08-15 05:44:07 UTC (rev 3243) @@ -116,580 +116,5 @@ descr = _checknames(descr,names) _names = descr.names mdescr = [(n,'|b1') for n in _names] - # - shape = numeric.asarray(data[0]).shape - if isinstance(shape, int): - shape = (shape,) - # Construct the _data recarray .......... - if isinstance(data, record): - _data = numeric.asarray(data).view(recarray) - _fieldmask = mask - elif isinstance(data, MaskedRecords): - _data = data._data - _fieldmask = data._fieldmask - elif isinstance(data, recarray): - _data = data - if mask is nomask: - _fieldmask = data.astype(mdescr) - _fieldmask.flat = tuple([False]*len(mdescr)) - else: - _fieldmask = mask - else: - try: - data = numeric.array(data, dtype=descr).view(recarray) - _data = data - if mask is nomask: - _fieldmask = data.astype(mdescr) - _fieldmask.flat = tuple([False]*len(mdescr)) - else: - _fieldmask = mask - except: - _data = recarray(shape, dtype=descr) - _fieldmask = recarray(shape, dtype=mdescr) - for (n,v) in zip(_names, data): - print n, v - print _data[n] - _data[n] = numeric.asarray(v).view(ndarray) - _fieldmask[n] = getmaskarray(v) - #........................................ - _data = _data.view(cls) - _data._fieldmask = _fieldmask - _data._hardmask = hard_mask - if fill_value is None: - _data._fill_value = [default_fill_value(numeric.dtype(d[1])) - for d in descr.descr] - else: - _data._fill_value = fill_value - return _data - - def __array_finalize__(self,obj): - if isinstance(obj, MaskedRecords): - self.__dict__.update(_fieldmask=obj._fieldmask, - _hardmask=obj._hardmask, - _fill_value=obj._fill_value - ) - else: - self.__dict__.update(_fieldmask = nomask, - _hardmask = False, - fill_value = None - ) - return - - def _getdata(self): - "Returns the data as a recarray." - return self.view(recarray) - _data = property(fget=_getdata) - - #...................................................... - def __getattribute__(self, attr): - try: - # Returns a generic attribute - return object.__getattribute__(self,attr) - except AttributeError: - # OK, so attr must be a field name - pass - # Get the list of fields ...... - _names = self.dtype.names - if attr in _names: - _data = self._data - _mask = self._fieldmask - obj = numeric.asarray(_data.__getattribute__(attr)).view(MaskedArray) - obj.__setmask__(_mask.__getattribute__(attr)) - return obj - raise AttributeError,"No attribute '%s' !" % attr - - def __setattr__(self, attr, val): - newattr = attr not in self.__dict__ - try: - # Is attr a generic attribute ? - ret = object.__setattr__(self, attr, val) - except: - # Not a generic attribute: exit if it's not a valid field - fielddict = self.dtype.names or {} - if attr not in fielddict: - exctype, value = sys.exc_info()[:2] - raise exctype, value - else: - if attr not in list(self.dtype.names) + ['_mask']: - return ret - if newattr: # We just added this one - try: # or this setattr worked on an internal - # attribute. - object.__delattr__(self, attr) - except: - return ret - # Case #1.: Basic field ............ - base_fmask = self._fieldmask - _names = self.dtype.names - if attr in _names: - fval = filled(val) - mval = getmaskarray(val) - if self._hardmask: - mval = mask_or(mval, base_fmask.__getattr__(attr)) - self._data.__setattr__(attr, fval) - base_fmask.__setattr__(attr, mval) - return - elif attr == '_mask': - self.__setmask__(val) - return - #............................................ - def __getitem__(self, indx): - """Returns all the fields sharing the same fieldname base. - The fieldname base is either `_data` or `_mask`.""" - _localdict = self.__dict__ - _data = self._data - # We want a field ........ - if isinstance(indx, str): - obj = _data[indx].view(MaskedArray) - obj._set_mask(_localdict['_fieldmask'][indx]) - return obj - # We want some elements .. - obj = ndarray.__getitem__(self, indx).view(type(self)) - obj._fieldmask = _localdict['_fieldmask'][indx] - return obj - #............................................ - def __setitem__(self, indx, value): - """Sets the given record to value.""" - MaskedArray.__setitem__(self, indx, value) - -# def __getslice__(self, i, j): -# """Returns the slice described by [i,j].""" -# _localdict = self.__dict__ -# return MaskedRecords(_localdict['_data'][i:j], -# mask=_localdict['_fieldmask'][i:j], -# dtype=self.dtype) -# - def __setslice__(self, i, j, value): - """Sets the slice described by [i,j] to `value`.""" - _localdict = self.__dict__ - d = self._data - m = _localdict['_fieldmask'] - names = self.dtype.names - if value is masked: - for n in names: - m[i:j][n] = True - elif not self._hardmask: - fval = filled(value) - mval = getmaskarray(value) - for n in names: - d[n][i:j] = fval - m[n][i:j] = mval - else: - mindx = getmaskarray(self)[i:j] - dval = numeric.asarray(value) - valmask = getmask(value) - if valmask is nomask: - for n in names: - mval = mask_or(m[n][i:j], valmask) - d[n][i:j][~mval] = value - elif valmask.size > 1: - for n in names: - mval = mask_or(m[n][i:j], valmask) - d[n][i:j][~mval] = dval[~mval] - m[n][i:j] = mask_or(m[n][i:j], mval) - self._fieldmask = m - - #..................................................... - def __setmask__(self, mask): - names = self.dtype.names - fmask = self.__dict__['_fieldmask'] - newmask = make_mask(mask, copy=False) -# self.unshare_mask() - if self._hardmask: - for n in names: - fmask[n].__ior__(newmask) - else: - for n in names: - fmask[n].flat = newmask - - def _getmask(self): - """Returns the mask of the mrecord: a record is masked when all the fields -are masked.""" - if self.size > 1: - return self._fieldmask.view((bool_, len(self.dtype))).all(1) - - _setmask = __setmask__ - _mask = property(fget=_getmask, fset=_setmask) - - #...................................................... - def __str__(self): - """x.__str__() <==> str(x) -Calculates the string representation, using masked for fill if it is enabled. -Otherwise, fills with fill value. - """ - if self.size > 1: - mstr = ["(%s)" % ",".join([str(i) for i in s]) - for s in zip(*[getattr(self,f) for f in self.dtype.names])] - return "[%s]" % ", ".join(mstr) - else: - mstr = numeric.asarray(self._data.item(), dtype=object_) - mstr[list(self._fieldmask)] = masked_print_option - return str(mstr) - - def __repr__(self): - """x.__repr__() <==> repr(x) -Calculates the repr representation, using masked for fill if it is enabled. -Otherwise fill with fill value. - """ - _names = self.dtype.names - fmt = "%%%is : %%s" % (max([len(n) for n in _names])+4,) - reprstr = [fmt % (f,getattr(self,f)) for f in self.dtype.names] - reprstr.insert(0,'masked_records(') - reprstr.extend([fmt % (' fill_value', self._fill_value), - ' )']) - return str("\n".join(reprstr)) - #...................................................... - def view(self, obj): - """Returns a view of the mrecarray.""" - try: - if issubclass(obj, ndarray): - return ndarray.view(self, obj) - except TypeError: - pass - dtype = numeric.dtype(obj) - if dtype.fields is None: - return self.__array__().view(dtype) - return ndarray.view(self, obj) - #...................................................... - def filled(self, fill_value=None): - """Returns an array of the same class as `_data`, - with masked values filled with `fill_value`. -Subclassing is preserved. - -If `fill_value` is None, uses self.fill_value. - """ - _localdict = self.__dict__ - d = self._data - fm = _localdict['_fieldmask'] - if not numeric.asarray(fm, dtype=bool_).any(): - return d - # - if fill_value is None: - value = _localdict['_fill_value'] - else: - value = fill_value - if numeric.size(value) == 1: - value = [value,] * len(self.dtype) - # - if self is masked: - result = numeric.asanyarray(value) - else: - result = d.copy() - for (n, v) in zip(d.dtype.names, value): - numpy.putmask(numeric.asarray(result[n]), - numeric.asarray(fm[n]), v) - return result - #............................................ - def harden_mask(self): - "Forces the mask to hard" - self._hardmask = True - def soften_mask(self): - "Forces the mask to soft" - self._hardmask = False - #............................................. - def copy(self): - """Returns a copy of the masked record.""" - _localdict = self.__dict__ - return MaskedRecords(self._data.copy(), - mask=_localdict['_fieldmask'].copy(), - dtype=self.dtype) - #............................................. - - -#####--------------------------------------------------------------------------- -#---- --- Constructors --- -#####--------------------------------------------------------------------------- - -def fromarrays(arraylist, dtype=None, shape=None, formats=None, - names=None, titles=None, aligned=False, byteorder=None): - """Creates a mrecarray from a (flat) list of masked arrays. - -:Parameters: - - `arraylist` : Sequence - A list of (masked) arrays. Each element of the sequence is first converted - to a masked array if needed. If a 2D array is passed as argument, it is - processed line by line - - `dtype` : numeric.dtype - Data type descriptor. - - `shape` : Integer *[None]* - Number of records. If None, `shape` is defined from the shape of the first - array in the list. - - `formats` : - (Description to write) - - `names` : - (description to write) - - `titles`: - (Description to write) - - `aligned`: Boolen *[False]* - (Description to write, not used anyway) - - `byteorder`: Boolen *[None]* - (Description to write, not used anyway) - - - """ - arraylist = [MA.asarray(x) for x in arraylist] - # Define/check the shape..................... - if shape is None or shape == 0: - shape = arraylist[0].shape - if isinstance(shape, int): - shape = (shape,) - # Define formats from scratch ............... - if formats is None and dtype is None: - formats = _getformats(arraylist) - # Define the dtype .......................... - if dtype is not None: - descr = numeric.dtype(dtype) - _names = descr.names - else: - parsed = format_parser(formats, names, titles, aligned, byteorder) - _names = parsed._names - descr = parsed._descr - # Determine shape from data-type............. - if len(descr) != len(arraylist): - msg = "Mismatch between the number of fields (%i) and the number of "\ - "arrays (%i)" - raise ValueError, msg % (len(descr), len(arraylist)) - d0 = descr[0].shape - nn = len(d0) - if nn > 0: - shape = shape[:-nn] - # Make sure the shape is the correct one .... - for k, obj in enumerate(arraylist): - nn = len(descr[k].shape) - testshape = obj.shape[:len(obj.shape)-nn] - if testshape != shape: - raise ValueError, "Array-shape mismatch in array %d" % k - # Reconstruct the descriptor, by creating a _data and _mask version - return MaskedRecords(arraylist, dtype=descr) -#.............................................................................. -def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, - titles=None, aligned=False, byteorder=None): - """Creates a MaskedRecords from a list of records. - - The data in the same field can be heterogeneous, they will be promoted - to the highest data type. This method is intended for creating - smaller record arrays. If used to create large array without formats - defined, it can be slow. - - If formats is None, then this will auto-detect formats. Use a list of - tuples rather than a list of lists for faster processing. - """ - # reclist is in fact a mrecarray ................. - if isinstance(reclist, MaskedRecords): - mdescr = reclist.dtype - shape = reclist.shape - return MaskedRecords(reclist, dtype=mdescr) - # No format, no dtype: create from to arrays ..... - nfields = len(reclist[0]) - if formats is None and dtype is None: # slower - if isinstance(reclist, recarray): - arrlist = [reclist.field(i) for i in range(len(reclist.dtype))] - if names is None: - names = reclist.dtype.names - else: - obj = numeric.array(reclist,dtype=object) - arrlist = [numeric.array(obj[...,i].tolist()) - for i in xrange(nfields)] - return MaskedRecords(arrlist, formats=formats, names=names, - titles=titles, aligned=aligned, byteorder=byteorder) - # Construct the descriptor ....................... - if dtype is not None: - descr = numeric.dtype(dtype) - _names = descr.names - else: - parsed = format_parser(formats, names, titles, aligned, byteorder) - _names = parsed._names - descr = parsed._descr - - try: - retval = numeric.array(reclist, dtype = descr).view(recarray) - except TypeError: # list of lists instead of list of tuples - if (shape is None or shape == 0): - shape = len(reclist)*2 - if isinstance(shape, (int, long)): - shape = (shape*2,) - if len(shape) > 1: - raise ValueError, "Can only deal with 1-d array." - retval = recarray(shape, mdescr) - for k in xrange(retval.size): - retval[k] = tuple(reclist[k]) - return MaskedRecords(retval, dtype=descr) - else: - if shape is not None and retval.shape != shape: - retval.shape = shape - # - return MaskedRecords(retval, dtype=descr) - -def _guessvartypes(arr): - """Tries to guess the dtypes of the str_ ndarray `arr`, by testing element-wise - conversion. Returns a list of dtypes. - The array is first converted to ndarray. If the array is 2D, the test is - performed on the first line. An exception is raised if the file is 3D or more. - """ - vartypes = [] - arr = numeric.asarray(arr) - if len(arr.shape) == 2 : - arr = arr[0] - elif len(arr.shape) > 2: - raise ValueError, "The array should be 2D at most!" - # Start the conversion loop ....... - for f in arr: - try: - val = int(f) - except ValueError: - try: - val = float(f) - except ValueError: - try: - val = complex(f) - except ValueError: - vartypes.append(arr.dtype) - else: - vartypes.append(complex_) - else: - vartypes.append(float_) - else: - vartypes.append(int_) - return vartypes - -def openfile(fname): - "Opens the file handle of file `fname`" - # A file handle ................... - if hasattr(fname, 'readline'): - return fname - # Try to open the file and guess its type - try: - f = open(fname) - except IOError: - raise IOError, "No such file: '%s'" % fname - if f.readline()[:2] != "\\x": - f.seek(0,0) - return f - raise NotImplementedError, "Wow, binary file" - - -def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='', - varnames=None, vartypes=None): - """Creates a mrecarray from data stored in the file `filename`. - -:Parameters: - - `filename` : file name/handle - Handle of an opened file. - - `delimitor` : Character *None* - Alphanumeric character used to separate columns in the file. - If None, any (group of) white spacestring(s) will be used. - - `commentchar` : String *['#']* - Alphanumeric character used to mark the start of a comment. - - `missingchar` : String *['']* - String indicating missing data, and used to create the masks. - - `varnames` : Sequence *[None]* - Sequence of the variable names. If None, a list will be created from - the first non empty line of the file. - - `vartypes` : Sequence *[None]* - Sequence of the variables dtypes. If None, the sequence will be estimated - from the first non-commented line. - - - Ultra simple: the varnames are in the header, one line""" - # Try to open the file ...................... - f = openfile(fname) - # Get the first non-empty line as the varnames - while True: - line = f.readline() - firstline = line[:line.find(commentchar)].strip() - _varnames = firstline.split(delimitor) - if len(_varnames) > 1: - break - if varnames is None: - varnames = _varnames - # Get the data .............................. - _variables = MA.asarray([line.strip().split(delimitor) for line in f - if line[0] != commentchar and len(line) > 1]) - (_, nfields) = _variables.shape - # Try to guess the dtype .................... - if vartypes is None: - vartypes = _guessvartypes(_variables[0]) - else: - vartypes = [numeric.dtype(v) for v in vartypes] - if len(vartypes) != nfields: - msg = "Attempting to %i dtypes for %i fields!" - msg += " Reverting to default." - warnings.warn(msg % (len(vartypes), nfields)) - vartypes = _guessvartypes(_variables[0]) - # Construct the descriptor .................. - mdescr = [(n,f) for (n,f) in zip(varnames, vartypes)] - # Get the data and the mask ................. - # We just need a list of masked_arrays. It's easier to create it like that: - _mask = (_variables.T == missingchar) - _datalist = [masked_array(a,mask=m,dtype=t) - for (a,m,t) in zip(_variables.T, _mask, vartypes)] - return MaskedRecords(_datalist, dtype=mdescr) - -#.................................................................... -def addfield(mrecord, newfield, newfieldname=None): - """Adds a new field to the masked record array, using `newfield` as data -and `newfieldname` as name. If `newfieldname` is None, the new field name is -set to 'fi', where `i` is the number of existing fields. - """ - _data = mrecord._data - _mask = mrecord._fieldmask - if newfieldname is None or newfieldname in reserved_fields: - newfieldname = 'f%i' % len(_data.dtype) - newfield = MA.asarray(newfield) - # Get the new data ............ - # Create a new empty recarray - newdtype = numeric.dtype(_data.dtype.descr + \ - [(newfieldname, newfield.dtype)]) - newdata = recarray(_data.shape, newdtype) - # Add the exisintg field - [newdata.setfield(_data.getfield(*f),*f) - for f in _data.dtype.fields.values()] - # Add the new field - newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname]) - newdata = newdata.view(MaskedRecords) - # Get the new mask ............. - # Create a new empty recarray - newmdtype = numeric.dtype([(n,bool_) for n in newdtype.names]) - newmask = recarray(_data.shape, newmdtype) - # Add the old masks - [newmask.setfield(_mask.getfield(*f),*f) - for f in _mask.dtype.fields.values()] - # Add the mask of the new field - newmask.setfield(getmaskarray(newfield), - *newmask.dtype.fields[newfieldname]) - newdata._fieldmask = newmask - return newdata - -################################################################################ -if __name__ == '__main__': - import numpy as N - from maskedarray.testutils import assert_equal - if 0: - d = N.arange(5) - m = MA.make_mask([1,0,0,1,1]) - base_d = N.r_[d,d[::-1]].reshape(2,-1).T - base_m = N.r_[[m, m[::-1]]].T - base = MA.array(base_d, mask=base_m) - mrecord = fromarrays(base.T,dtype=[('a',N.float_),('b',N.float_)]) - mrec = MaskedRecords(mrecord) - # - mrec.a[3:] = 5 - assert_equal(mrec.a, [0,1,2,5,5]) - assert_equal(mrec.a._mask, [1,0,0,0,0]) - # - mrec.b[3:] = masked - assert_equal(mrec.b, [4,3,2,1,0]) - assert_equal(mrec.b._mask, [1,1,0,1,1]) - # - mrec[:2] = masked - assert_equal(mrec._mask, [1,1,0,0,0]) - mrec[-1] = masked - assert_equal(mrec._mask, [1,1,0,0,1]) - - if 1: - x = [(1.,10.,'a'),(2.,20,'b'),(3.14,30,'c'),(5.55,40,'d')] - desc = [('ffloat', N.float_), ('fint', N.int_), ('fstr', 'S10')] - mr = MaskedRecords(x,dtype=desc) - mr[0] = masked - mr.ffloat[-1] = masked - \ No newline at end of file + # get the shape ......................... + \ No newline at end of file Modified: trunk/Lib/sandbox/maskedarray/tests/test_mrecords.py =================================================================== --- trunk/Lib/sandbox/maskedarray/tests/test_mrecords.py 2007-08-15 00:53:14 UTC (rev 3242) +++ trunk/Lib/sandbox/maskedarray/tests/test_mrecords.py 2007-08-15 05:44:07 UTC (rev 3243) @@ -50,7 +50,7 @@ mrec = mrec.copy() assert_equal(mrec.a, MA.array(d,mask=m)) assert_equal(mrec.b, MA.array(d[::-1],mask=m[::-1])) - assert((mrec._fieldmask == N.core.records.fromarrays([m, m[::-1]])).all()) + assert((mrec._fieldmask == N.core.records.fromarrays([m, m[::-1]], dtype=mrec._fieldmask.dtype)).all()) assert_equal(mrec._mask, N.r_[[m,m[::-1]]].all(0)) assert_equal(mrec.a[1], mrec[1].a) # @@ -138,7 +138,7 @@ mrecfr = fromrecords(tmp) assert_equal(mrecfr.a, mrec.a[::-1]) #.................... - mrecfr = fromrecords(nrec.tolist()) + mrecfr = fromrecords(nrec.tolist(), names=nrec.dtype.names) assert_equal(mrecfr.a, mrec.a) assert_equal(mrecfr.dtype, mrec.dtype) From scipy-svn at scipy.org Wed Aug 15 02:04:44 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 15 Aug 2007 01:04:44 -0500 (CDT) Subject: [Scipy-svn] r3244 - trunk/Lib/interpolate Message-ID: <20070815060444.9BA5539C2AE@new.scipy.org> Author: oliphant Date: 2007-08-15 01:04:30 -0500 (Wed, 15 Aug 2007) New Revision: 3244 Modified: trunk/Lib/interpolate/interpolate.py Log: Fix interpolate.interp1d so that it works for higher order splines. Modified: trunk/Lib/interpolate/interpolate.py =================================================================== --- trunk/Lib/interpolate/interpolate.py 2007-08-15 05:44:07 UTC (rev 3243) +++ trunk/Lib/interpolate/interpolate.py 2007-08-15 06:04:30 UTC (rev 3244) @@ -197,16 +197,15 @@ self.bounds_error = bounds_error self.fill_value = fill_value - - if isinstance(kind, int): - kind = {0:'zero', - 1:'slinear', - 2:'quadratic', - 3:'cubic'}.get(kind,'none') - - if kind not in ['zero', 'linear', 'slinear', 'quadratic', 'cubic']: - raise NotImplementedError("%d is unsupported: Use fitpack "\ - "routines for other types.") + if kind in ['zero', 'slinear', 'quadratic', 'cubic']: + order = {'zero':0,'slinear':1,'quadratic':2, 'cubic':3}[kind] + kind = 'spline' + elif isinstance(kind, int): + order = kind + kind = 'spline' + elif kind != 'linear': + raise NotImplementedError("%s is unsupported: Use fitpack "\ + "routines for other types." % kind) x = array(x, copy=self.copy) y = array(y, copy=self.copy) @@ -228,7 +227,6 @@ self._call = self._call_linear else: oriented_y = y.swapaxes(0, axis) - order = {'zero':0,'slinear':1,'quadratic':2, 'cubic':3}[kind] minval = order + 1 len_y = oriented_y.shape[0] self._call = self._call_spline @@ -275,7 +273,7 @@ return y_new def _call_spline(self, x_new): - x_new = asarray(x_new) + x_new =np.asarray(x_new) result = spleval(self._spline,x_new.ravel()) return result.reshape(x_new.shape+result.shape[1:]) @@ -716,7 +714,7 @@ res = np.empty(xx.shape + sh) for index in np.ndindex(*sh): sl = (slice(None),)+index - res[sl] = _fitpack._bspleval(xx,xk,cvals[sl],k,deriv) + res[sl] = _fitpack._bspleval(xx,xj,cvals[sl],k,deriv) res.shape = oldshape + sh return res From scipy-svn at scipy.org Wed Aug 15 09:38:23 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 15 Aug 2007 08:38:23 -0500 (CDT) Subject: [Scipy-svn] r3245 - in trunk/Lib/sandbox/maskedarray: . tests Message-ID: <20070815133823.353F639C04A@new.scipy.org> Author: pierregm Date: 2007-08-15 08:38:19 -0500 (Wed, 15 Aug 2007) New Revision: 3245 Modified: trunk/Lib/sandbox/maskedarray/core.py trunk/Lib/sandbox/maskedarray/mrecords.py trunk/Lib/sandbox/maskedarray/tests/test_core.py Log: mrecords : * fixed a pb w/ numpy.void * returns 'masked' when accessing a masked attribute from a unique record. core : * modified .tolist() so that fill_value=None now outputs None for masked values Modified: trunk/Lib/sandbox/maskedarray/core.py =================================================================== --- trunk/Lib/sandbox/maskedarray/core.py 2007-08-15 06:04:30 UTC (rev 3244) +++ trunk/Lib/sandbox/maskedarray/core.py 2007-08-15 13:38:19 UTC (rev 3245) @@ -0,0 +1,2698 @@ +# pylint: disable-msg=E1002 +"""MA: a facility for dealing with missing observations +MA is generally used as a numpy.array look-alike. +by Paul F. Dubois. + +Copyright 1999, 2000, 2001 Regents of the University of California. +Released for unlimited redistribution. +Adapted for numpy_core 2005 by Travis Oliphant and +(mainly) Paul Dubois. + +Subclassing of the base ndarray 2006 by Pierre Gerard-Marchant. +pgmdevlist_AT_gmail_DOT_com +Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id$ +""" +__author__ = "Pierre GF Gerard-Marchant ($Author$)" +__version__ = '1.0' +__revision__ = "$Revision$" +__date__ = '$Date$' + +__all__ = ['MAError', 'MaskType', 'MaskedArray', + 'bool_', 'complex_', 'float_', 'int_', 'object_', + 'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue', + 'amax', 'amin', 'anom', 'anomalies', 'any', 'arange', + 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', + 'arctanh', 'argmax', 'argmin', 'argsort', 'around', + 'array', 'asarray', + 'bitwise_and', 'bitwise_or', 'bitwise_xor', + 'ceil', 'choose', 'compressed', 'concatenate', 'conjugate', + 'cos', 'cosh', 'count', + 'diagonal', 'divide', 'dump', 'dumps', + 'empty', 'empty_like', 'equal', 'exp', + 'fabs', 'fmod', 'filled', 'floor', 'floor_divide', + 'getmask', 'getmaskarray', 'greater', 'greater_equal', 'hypot', + 'ids', 'inner', 'innerproduct', + 'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', + 'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log10', + 'logical_and', 'logical_not', 'logical_or', 'logical_xor', + 'make_mask', 'make_mask_none', 'mask_or', 'masked', + 'masked_array', 'masked_equal', 'masked_greater', + 'masked_greater_equal', 'masked_inside', 'masked_less', + 'masked_less_equal', 'masked_not_equal', 'masked_object', + 'masked_outside', 'masked_print_option', 'masked_singleton', + 'masked_values', 'masked_where', 'max', 'maximum', 'mean', 'min', + 'minimum', 'multiply', + 'negative', 'nomask', 'nonzero', 'not_equal', + 'ones', 'outer', 'outerproduct', + 'power', 'product', 'ptp', 'put', 'putmask', + 'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize', + 'right_shift', 'round_', + 'shape', 'sin', 'sinh', 'size', 'sometrue', 'sort', 'sqrt', 'std', + 'subtract', 'sum', 'swapaxes', + 'take', 'tan', 'tanh', 'transpose', 'true_divide', + 'var', 'where', + 'zeros'] + +import sys +import types +import cPickle +import operator +# +import numpy +from numpy import bool_, complex_, float_, int_, object_, str_ + +import numpy.core.umath as umath +import numpy.core.fromnumeric as fromnumeric +import numpy.core.numeric as numeric +import numpy.core.numerictypes as ntypes +from numpy import bool_, dtype, typecodes, amax, amin, ndarray +from numpy import expand_dims as n_expand_dims +import warnings + + +MaskType = bool_ +nomask = MaskType(0) + +divide_tolerance = 1.e-35 +numpy.seterr(all='ignore') + +# TODO: There's still a problem with N.add.reduce not working... +# TODO: ...neither does N.add.accumulate + +#####-------------------------------------------------------------------------- +#---- --- Exceptions --- +#####-------------------------------------------------------------------------- +class MAError(Exception): + "Class for MA related errors." + def __init__ (self, args=None): + "Creates an exception." + Exception.__init__(self,args) + self.args = args + def __str__(self): + "Calculates the string representation." + return str(self.args) + __repr__ = __str__ + +#####-------------------------------------------------------------------------- +#---- --- Filling options --- +#####-------------------------------------------------------------------------- +# b: boolean - c: complex - f: floats - i: integer - O: object - S: string +default_filler = {'b': True, + 'c' : 1.e20 + 0.0j, + 'f' : 1.e20, + 'i' : 999999, + 'O' : '?', + 'S' : 'N/A', + 'u' : 999999, + 'V' : '???', + } +max_filler = ntypes._minvals +max_filler.update([(k,-numeric.inf) for k in [numpy.float32, numpy.float64]]) +min_filler = ntypes._maxvals +min_filler.update([(k,numeric.inf) for k in [numpy.float32, numpy.float64]]) +if 'float128' in ntypes.typeDict: + max_filler.update([(numpy.float128,-numeric.inf)]) + min_filler.update([(numpy.float128, numeric.inf)]) + + +def default_fill_value(obj): + "Calculates the default fill value for an object `obj`." + if hasattr(obj,'dtype'): + defval = default_filler[obj.dtype.kind] + elif isinstance(obj, numeric.dtype): + defval = default_filler[obj.kind] + elif isinstance(obj, float): + defval = default_filler['f'] + elif isinstance(obj, int) or isinstance(obj, long): + defval = default_filler['i'] + elif isinstance(obj, str): + defval = default_filler['S'] + elif isinstance(obj, complex): + defval = default_filler['c'] + else: + defval = default_filler['O'] + return defval + +def minimum_fill_value(obj): + "Calculates the default fill value suitable for taking the minimum of `obj`." + if hasattr(obj, 'dtype'): + objtype = obj.dtype + filler = min_filler[objtype] + if filler is None: + raise TypeError, 'Unsuitable type for calculating minimum.' + return filler + elif isinstance(obj, float): + return min_filler[ntypes.typeDict['float_']] + elif isinstance(obj, int): + return min_filler[ntypes.typeDict['int_']] + elif isinstance(obj, long): + return min_filler[ntypes.typeDict['uint']] + elif isinstance(obj, numeric.dtype): + return min_filler[obj] + else: + raise TypeError, 'Unsuitable type for calculating minimum.' + +def maximum_fill_value(obj): + "Calculates the default fill value suitable for taking the maximum of `obj`." + if hasattr(obj, 'dtype'): + objtype = obj.dtype + filler = max_filler[objtype] + if filler is None: + raise TypeError, 'Unsuitable type for calculating minimum.' + return filler + elif isinstance(obj, float): + return max_filler[ntypes.typeDict['float_']] + elif isinstance(obj, int): + return max_filler[ntypes.typeDict['int_']] + elif isinstance(obj, long): + return max_filler[ntypes.typeDict['uint']] + elif isinstance(obj, numeric.dtype): + return max_filler[obj] + else: + raise TypeError, 'Unsuitable type for calculating minimum.' + +def set_fill_value(a, fill_value): + "Sets the fill value of `a` if it is a masked array." + if isinstance(a, MaskedArray): + a.set_fill_value(fill_value) + +def get_fill_value(a): + """Returns the fill value of `a`, if any. + Otherwise, returns the default fill value for that type. + """ + if isinstance(a, MaskedArray): + result = a.fill_value + else: + result = default_fill_value(a) + return result + +def common_fill_value(a, b): + "Returns the common fill_value of `a` and `b`, if any, or `None`." + t1 = get_fill_value(a) + t2 = get_fill_value(b) + if t1 == t2: + return t1 + return None + +#................................................ +def filled(a, value = None): + """Returns `a` as an array with masked data replaced by `value`. +If `value` is `None` or the special element `masked`, `get_fill_value(a)` +is used instead. + +If `a` is already a contiguous numeric array, `a` itself is returned. + +`filled(a)` can be used to be sure that the result is numeric when passing +an object a to other software ignorant of MA, in particular to numpy itself. + """ + if hasattr(a, 'filled'): + return a.filled(value) + elif isinstance(a, ndarray): # and a.flags['CONTIGUOUS']: + return a + elif isinstance(a, dict): + return numeric.array(a, 'O') + else: + return numeric.array(a) + +def get_masked_subclass(*arrays): + """Returns the youngest subclass of MaskedArray from a list of arrays, + or MaskedArray. In case of siblings, the first takes over.""" + if len(arrays) == 1: + arr = arrays[0] + if isinstance(arr, MaskedArray): + rcls = type(arr) + else: + rcls = MaskedArray + else: + arrcls = [type(a) for a in arrays] + rcls = arrcls[0] + if not issubclass(rcls, MaskedArray): + rcls = MaskedArray + for cls in arrcls[1:]: + if issubclass(cls, rcls): + rcls = cls + return rcls + +#####-------------------------------------------------------------------------- +#---- --- Ufuncs --- +#####-------------------------------------------------------------------------- +ufunc_domain = {} +ufunc_fills = {} + +class domain_check_interval: + """Defines a valid interval, +so that `domain_check_interval(a,b)(x) = true` where `x < a` or `x > b`.""" + def __init__(self, a, b): + "domain_check_interval(a,b)(x) = true where x < a or y > b" + if (a > b): + (a, b) = (b, a) + self.a = a + self.b = b + + def __call__ (self, x): + "Execute the call behavior." + return umath.logical_or(umath.greater (x, self.b), + umath.less(x, self.a)) +#............................ +class domain_tan: + """Defines a valid interval for the `tan` function, +so that `domain_tan(eps) = True where `abs(cos(x)) < eps`""" + def __init__(self, eps): + "domain_tan(eps) = true where abs(cos(x)) < eps)" + self.eps = eps + def __call__ (self, x): + "Execute the call behavior." + return umath.less(umath.absolute(umath.cos(x)), self.eps) +#............................ +class domain_safe_divide: + """defines a domain for safe division.""" + def __init__ (self, tolerance=divide_tolerance): + self.tolerance = tolerance + def __call__ (self, a, b): + return umath.absolute(a) * self.tolerance >= umath.absolute(b) +#............................ +class domain_greater: + "domain_greater(v)(x) = true where x <= v" + def __init__(self, critical_value): + "domain_greater(v)(x) = true where x <= v" + self.critical_value = critical_value + + def __call__ (self, x): + "Execute the call behavior." + return umath.less_equal(x, self.critical_value) +#............................ +class domain_greater_equal: + "domain_greater_equal(v)(x) = true where x < v" + def __init__(self, critical_value): + "domain_greater_equal(v)(x) = true where x < v" + self.critical_value = critical_value + + def __call__ (self, x): + "Execute the call behavior." + return umath.less(x, self.critical_value) +#.............................................................................. +class masked_unary_operation: + """Defines masked version of unary operations, +where invalid values are pre-masked. + +:IVariables: + - `f` : function. + - `fill` : Default filling value *[0]*. + - `domain` : Default domain *[None]*. + """ + def __init__ (self, mufunc, fill=0, domain=None): + """ masked_unary_operation(aufunc, fill=0, domain=None) + aufunc(fill) must be defined + self(x) returns aufunc(x) + with masked values where domain(x) is true or getmask(x) is true. + """ + self.f = mufunc + self.fill = fill + self.domain = domain + self.__doc__ = getattr(mufunc, "__doc__", str(mufunc)) + self.__name__ = getattr(mufunc, "__name__", str(mufunc)) + ufunc_domain[mufunc] = domain + ufunc_fills[mufunc] = fill + # + def __call__ (self, a, *args, **kwargs): + "Execute the call behavior." +# numeric tries to return scalars rather than arrays when given scalars. + m = getmask(a) + d1 = filled(a, self.fill) + if self.domain is not None: + m = mask_or(m, numeric.asarray(self.domain(d1))) + # Take care of the masked singletong first ... + if m.ndim == 0 and m: + return masked + # Get the result.... + if isinstance(a, MaskedArray): + result = self.f(d1, *args, **kwargs).view(type(a)) + else: + result = self.f(d1, *args, **kwargs).view(MaskedArray) + # Fix the mask if we don't have a scalar + if result.ndim > 0: + result._mask = m + return result + # + def __str__ (self): + return "Masked version of %s. [Invalid values are masked]" % str(self.f) +#.............................................................................. +class masked_binary_operation: + """Defines masked version of binary operations, +where invalid values are pre-masked. + +:IVariables: + - `f` : function. + - `fillx` : Default filling value for first array*[0]*. + - `filly` : Default filling value for second array*[0]*. + - `domain` : Default domain *[None]*. + """ + def __init__ (self, mbfunc, fillx=0, filly=0): + """abfunc(fillx, filly) must be defined. + abfunc(x, filly) = x for all x to enable reduce. + """ + self.f = mbfunc + self.fillx = fillx + self.filly = filly + self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc)) + self.__name__ = getattr(mbfunc, "__name__", str(mbfunc)) + ufunc_domain[mbfunc] = None + ufunc_fills[mbfunc] = (fillx, filly) + # + def __call__ (self, a, b, *args, **kwargs): + "Execute the call behavior." + m = mask_or(getmask(a), getmask(b)) + if (not m.ndim) and m: + return masked + d1 = filled(a, self.fillx) + d2 = filled(b, self.filly) +# CHECK : Do we really need to fill the arguments ? Pro'ly not +# result = self.f(a, b, *args, **kwargs).view(get_masked_subclass(a,b)) + result = self.f(d1, d2, *args, **kwargs).view(get_masked_subclass(a,b)) + if result.ndim > 0: + result._mask = m + return result + # + def reduce (self, target, axis=0, dtype=None): + """Reduces `target` along the given `axis`.""" + if isinstance(target, MaskedArray): + tclass = type(target) + else: + tclass = MaskedArray + m = getmask(target) + t = filled(target, self.filly) + if t.shape == (): + t = t.reshape(1) + if m is not nomask: + m = make_mask(m, copy=1) + m.shape = (1,) + if m is nomask: + return self.f.reduce(t, axis).view(tclass) + t = t.view(tclass) + t._mask = m + # XXX: "or t.dtype" below is a workaround for what appears + # XXX: to be a bug in reduce. + tr = self.f.reduce(filled(t, self.filly), axis, dtype=dtype or t.dtype) + mr = umath.logical_and.reduce(m, axis) + tr = tr.view(tclass) + if mr.ndim > 0: + tr._mask = mr + return tr + elif mr: + return masked + return tr + + def outer (self, a, b): + "Returns the function applied to the outer product of a and b." + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = umath.logical_or.outer(ma, mb) + if (not m.ndim) and m: + return masked + rcls = get_masked_subclass(a,b) + d = self.f.outer(filled(a, self.fillx), filled(b, self.filly)).view(rcls) + if d.ndim > 0: + d._mask = m + return d + + def accumulate (self, target, axis=0): + """Accumulates `target` along `axis` after filling with y fill value.""" + if isinstance(target, MaskedArray): + tclass = type(target) + else: + tclass = masked_array + t = filled(target, self.filly) + return self.f.accumulate(t, axis).view(tclass) + + def __str__ (self): + return "Masked version of " + str(self.f) +#.............................................................................. +class domained_binary_operation: + """Defines binary operations that have a domain, like divide. + +These are complicated so they are a separate class. +They have no reduce, outer or accumulate. + +:IVariables: + - `f` : function. + - `fillx` : Default filling value for first array*[0]*. + - `filly` : Default filling value for second array*[0]*. + - `domain` : Default domain *[None]*. + """ + def __init__ (self, dbfunc, domain, fillx=0, filly=0): + """abfunc(fillx, filly) must be defined. + abfunc(x, filly) = x for all x to enable reduce. + """ + self.f = dbfunc + self.domain = domain + self.fillx = fillx + self.filly = filly + self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc)) + self.__name__ = getattr(dbfunc, "__name__", str(dbfunc)) + ufunc_domain[dbfunc] = domain + ufunc_fills[dbfunc] = (fillx, filly) + + def __call__(self, a, b): + "Execute the call behavior." + ma = getmask(a) + mb = getmask(b) + d1 = filled(a, self.fillx) + d2 = filled(b, self.filly) + t = numeric.asarray(self.domain(d1, d2)) + + if fromnumeric.sometrue(t, None): + d2 = numeric.where(t, self.filly, d2) + mb = mask_or(mb, t) + m = mask_or(ma, mb) + if (not m.ndim) and m: + return masked + result = self.f(d1, d2).view(get_masked_subclass(a,b)) + if result.ndim > 0: + result._mask = m + return result + + def __str__ (self): + return "Masked version of " + str(self.f) + +#.............................................................................. +# Unary ufuncs +exp = masked_unary_operation(umath.exp) +conjugate = masked_unary_operation(umath.conjugate) +sin = masked_unary_operation(umath.sin) +cos = masked_unary_operation(umath.cos) +tan = masked_unary_operation(umath.tan) +arctan = masked_unary_operation(umath.arctan) +arcsinh = masked_unary_operation(umath.arcsinh) +sinh = masked_unary_operation(umath.sinh) +cosh = masked_unary_operation(umath.cosh) +tanh = masked_unary_operation(umath.tanh) +abs = absolute = masked_unary_operation(umath.absolute) +fabs = masked_unary_operation(umath.fabs) +negative = masked_unary_operation(umath.negative) +floor = masked_unary_operation(umath.floor) +ceil = masked_unary_operation(umath.ceil) +around = masked_unary_operation(fromnumeric.round_) +logical_not = masked_unary_operation(umath.logical_not) +# Domained unary ufuncs +sqrt = masked_unary_operation(umath.sqrt, 0.0, domain_greater_equal(0.0)) +log = masked_unary_operation(umath.log, 1.0, domain_greater(0.0)) +log10 = masked_unary_operation(umath.log10, 1.0, domain_greater(0.0)) +tan = masked_unary_operation(umath.tan, 0.0, domain_tan(1.e-35)) +arcsin = masked_unary_operation(umath.arcsin, 0.0, + domain_check_interval(-1.0, 1.0)) +arccos = masked_unary_operation(umath.arccos, 0.0, + domain_check_interval(-1.0, 1.0)) +arccosh = masked_unary_operation(umath.arccosh, 1.0, domain_greater_equal(1.0)) +arctanh = masked_unary_operation(umath.arctanh, 0.0, + domain_check_interval(-1.0+1e-15, 1.0-1e-15)) +# Binary ufuncs +add = masked_binary_operation(umath.add) +subtract = masked_binary_operation(umath.subtract) +multiply = masked_binary_operation(umath.multiply, 1, 1) +arctan2 = masked_binary_operation(umath.arctan2, 0.0, 1.0) +equal = masked_binary_operation(umath.equal) +equal.reduce = None +not_equal = masked_binary_operation(umath.not_equal) +not_equal.reduce = None +less_equal = masked_binary_operation(umath.less_equal) +less_equal.reduce = None +greater_equal = masked_binary_operation(umath.greater_equal) +greater_equal.reduce = None +less = masked_binary_operation(umath.less) +less.reduce = None +greater = masked_binary_operation(umath.greater) +greater.reduce = None +logical_and = masked_binary_operation(umath.logical_and) +alltrue = masked_binary_operation(umath.logical_and, 1, 1).reduce +logical_or = masked_binary_operation(umath.logical_or) +sometrue = logical_or.reduce +logical_xor = masked_binary_operation(umath.logical_xor) +bitwise_and = masked_binary_operation(umath.bitwise_and) +bitwise_or = masked_binary_operation(umath.bitwise_or) +bitwise_xor = masked_binary_operation(umath.bitwise_xor) +hypot = masked_binary_operation(umath.hypot) +# Domained binary ufuncs +divide = domained_binary_operation(umath.divide, domain_safe_divide(), 0, 1) +true_divide = domained_binary_operation(umath.true_divide, + domain_safe_divide(), 0, 1) +floor_divide = domained_binary_operation(umath.floor_divide, + domain_safe_divide(), 0, 1) +remainder = domained_binary_operation(umath.remainder, + domain_safe_divide(), 0, 1) +fmod = domained_binary_operation(umath.fmod, domain_safe_divide(), 0, 1) + + +#####-------------------------------------------------------------------------- +#---- --- Mask creation functions --- +#####-------------------------------------------------------------------------- +def getmask(a): + """Returns the mask of `a`, if any, or `nomask`. +Returns `nomask` if `a` is not a masked array. +To get an array for sure use getmaskarray.""" + if hasattr(a, "_mask"): + return a._mask + else: + return nomask + +def getmaskarray(a): + """Returns the mask of `a`, if any. +Otherwise, returns an array of `False`, with the same shape as `a`. + """ + m = getmask(a) + if m is nomask: + return make_mask_none(fromnumeric.shape(a)) + else: + return m + +def is_mask(m): + """Returns `True` if `m` is a legal mask. +Does not check contents, only type. + """ + try: + return m.dtype.type is MaskType + except AttributeError: + return False +# +def make_mask(m, copy=False, small_mask=True, flag=None): + """make_mask(m, copy=0, small_mask=0) +Returns `m` as a mask, creating a copy if necessary or requested. +The function can accept any sequence of integers or `nomask`. +Does not check that contents must be 0s and 1s. +If `small_mask=True`, returns `nomask` if `m` contains no true elements. + +:Parameters: + - `m` (ndarray) : Mask. + - `copy` (boolean, *[False]*) : Returns a copy of `m` if true. + - `small_mask` (boolean, *[False]*): Flattens mask to `nomask` if `m` is all false. + """ + if flag is not None: + warnings.warn("The flag 'flag' is now called 'small_mask'!", + DeprecationWarning) + small_mask = flag + if m is nomask: + return nomask + elif isinstance(m, ndarray): + m = filled(m, True) + if m.dtype.type is MaskType: + if copy: + result = numeric.array(m, dtype=MaskType, copy=copy) + else: + result = m + else: + result = numeric.array(m, dtype=MaskType) + else: + result = numeric.array(filled(m, True), dtype=MaskType) + # Bas les masques ! + if small_mask and not result.any(): + return nomask + else: + return result + +def make_mask_none(s): + "Returns a mask of shape `s`, filled with `False`." + result = numeric.zeros(s, dtype=MaskType) + return result + +def mask_or (m1, m2, copy=False, small_mask=True): + """Returns the combination of two masks `m1` and `m2`. +The masks are combined with the `logical_or` operator, treating `nomask` as false. +The result may equal m1 or m2 if the other is nomask. + +:Parameters: + - `m` (ndarray) : Mask. + - `copy` (boolean, *[False]*) : Returns a copy of `m` if true. + - `small_mask` (boolean, *[False]*): Flattens mask to `nomask` if `m` is all false. + """ + if m1 is nomask: + return make_mask(m2, copy=copy, small_mask=small_mask) + if m2 is nomask: + return make_mask(m1, copy=copy, small_mask=small_mask) + if m1 is m2 and is_mask(m1): + return m1 + return make_mask(umath.logical_or(m1, m2), copy=copy, small_mask=small_mask) + +#####-------------------------------------------------------------------------- +#--- --- Masking functions --- +#####-------------------------------------------------------------------------- +def masked_where(condition, a, copy=True): + """Returns `x` as an array masked where `condition` is true. +Masked values of `x` or `condition` are kept. + +:Parameters: + - `condition` (ndarray) : Masking condition. + - `x` (ndarray) : Array to mask. + - `copy` (boolean, *[False]*) : Returns a copy of `m` if true. + """ + cond = filled(condition,1) + a = numeric.array(a, copy=copy, subok=True) + if hasattr(a, '_mask'): + cond = mask_or(cond, a._mask) + cls = type(a) + else: + cls = MaskedArray + result = a.view(cls) + result._mask = cond + return result + +def masked_greater(x, value, copy=1): + "Shortcut to `masked_where`, with ``condition = (x > value)``." + return masked_where(greater(x, value), x, copy=copy) + +def masked_greater_equal(x, value, copy=1): + "Shortcut to `masked_where`, with ``condition = (x >= value)``." + return masked_where(greater_equal(x, value), x, copy=copy) + +def masked_less(x, value, copy=True): + "Shortcut to `masked_where`, with ``condition = (x < value)``." + return masked_where(less(x, value), x, copy=copy) + +def masked_less_equal(x, value, copy=True): + "Shortcut to `masked_where`, with ``condition = (x <= value)``." + return masked_where(less_equal(x, value), x, copy=copy) + +def masked_not_equal(x, value, copy=True): + "Shortcut to `masked_where`, with ``condition = (x != value)``." + return masked_where((x != value), x, copy=copy) + +# +def masked_equal(x, value, copy=True): + """Shortcut to `masked_where`, with ``condition = (x == value)``. +For floating point, consider `masked_values(x, value)` instead. + """ + return masked_where((x == value), x, copy=copy) +# d = filled(x, 0) +# c = umath.equal(d, value) +# m = mask_or(c, getmask(x)) +# return array(d, mask=m, copy=copy) + +def masked_inside(x, v1, v2, copy=True): + """Shortcut to `masked_where`, where `condition` is True for x inside +the interval `[v1,v2]` ``(v1 <= x <= v2)``. +The boundaries `v1` and `v2` can be given in either order. + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf >= v1) & (xf <= v2) + return masked_where(condition, x, copy=copy) + +def masked_outside(x, v1, v2, copy=True): + """Shortcut to `masked_where`, where `condition` is True for x outside +the interval `[v1,v2]` ``(x < v1)|(x > v2)``. +The boundaries `v1` and `v2` can be given in either order. + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf < v1) | (xf > v2) + return masked_where(condition, x, copy=copy) + +# +def masked_object(x, value, copy=True): + """Masks the array `x` where the data are exactly equal to `value`. +This function is suitable only for `object` arrays: for floating point, +please use `masked_values` instead. +The mask is set to `nomask` if posible. + +:parameter copy (Boolean, *[True]*): Returns a copy of `x` if true. """ + if isMaskedArray(x): + condition = umath.equal(x._data, value) + mask = x._mask + else: + condition = umath.equal(fromnumeric.asarray(x), value) + mask = nomask + mask = mask_or(mask, make_mask(condition, small_mask=True)) + return masked_array(x, mask=mask, copy=copy, fill_value=value) + +def masked_values(x, value, rtol=1.e-5, atol=1.e-8, copy=True): + """Masks the array `x` where the data are approximately equal to `value` +(that is, ``abs(x - value) <= atol+rtol*abs(value)``). +Suitable only for floating points. For integers, please use `masked_equal`. +The mask is set to `nomask` if posible. + +:Parameters: + - `rtol` (Float, *[1e-5]*): Tolerance parameter. + - `atol` (Float, *[1e-8]*): Tolerance parameter. + - `copy` (boolean, *[False]*) : Returns a copy of `x` if True. + """ + abs = umath.absolute + xnew = filled(x, value) + if issubclass(xnew.dtype.type, numeric.floating): + condition = umath.less_equal(abs(xnew-value), atol+rtol*abs(value)) + try: + mask = x._mask + except AttributeError: + mask = nomask + else: + condition = umath.equal(xnew, value) + mask = nomask + mask = mask_or(mask, make_mask(condition, small_mask=True)) + return masked_array(xnew, mask=mask, copy=copy, fill_value=value) + +#####-------------------------------------------------------------------------- +#---- --- Printing options --- +#####-------------------------------------------------------------------------- +class _MaskedPrintOption: + """Handles the string used to represent missing data in a masked array.""" + def __init__ (self, display): + "Creates the masked_print_option object." + self._display = display + self._enabled = True + + def display(self): + "Displays the string to print for masked values." + return self._display + + def set_display (self, s): + "Sets the string to print for masked values." + self._display = s + + def enabled(self): + "Is the use of the display value enabled?" + return self._enabled + + def enable(self, small_mask=1): + "Set the enabling small_mask to `small_mask`." + self._enabled = small_mask + + def __str__ (self): + return str(self._display) + + __repr__ = __str__ + +#if you single index into a masked location you get this object. +masked_print_option = _MaskedPrintOption('--') + +#####-------------------------------------------------------------------------- +#---- --- MaskedArray class --- +#####-------------------------------------------------------------------------- +##def _getoptions(a_out, a_in): +## "Copies standards options of a_in to a_out." +## for att in ['] +#class _mathmethod(object): +# """Defines a wrapper for arithmetic methods. +#Instead of directly calling a ufunc, the corresponding method of the `array._data` +#object is called instead. +# """ +# def __init__ (self, methodname, fill_self=0, fill_other=0, domain=None): +# """ +#:Parameters: +# - `methodname` (String) : Method name. +# - `fill_self` (Float *[0]*) : Fill value for the instance. +# - `fill_other` (Float *[0]*) : Fill value for the target. +# - `domain` (Domain object *[None]*) : Domain of non-validity. +# """ +# self.methodname = methodname +# self.fill_self = fill_self +# self.fill_other = fill_other +# self.domain = domain +# self.obj = None +# self.__doc__ = self.getdoc() +# # +# def getdoc(self): +# "Returns the doc of the function (from the doc of the method)." +# try: +# return getattr(MaskedArray, self.methodname).__doc__ +# except: +# return getattr(ndarray, self.methodname).__doc__ +# # +# def __get__(self, obj, objtype=None): +# self.obj = obj +# return self +# # +# def __call__ (self, other, *args): +# "Execute the call behavior." +# instance = self.obj +# m_self = instance._mask +# m_other = getmask(other) +# base = instance.filled(self.fill_self) +# target = filled(other, self.fill_other) +# if self.domain is not None: +# # We need to force the domain to a ndarray only. +# if self.fill_other > self.fill_self: +# domain = self.domain(base, target) +# else: +# domain = self.domain(target, base) +# if domain.any(): +# #If `other` is a subclass of ndarray, `filled` must have the +# # same subclass, else we'll lose some info. +# #The easiest then is to fill `target` instead of creating +# # a pure ndarray. +# #Oh, and we better make a copy! +# if isinstance(other, ndarray): +# # We don't want to modify other: let's copy target, then +# target = target.copy() +# target[fromnumeric.asarray(domain)] = self.fill_other +# else: +# target = numeric.where(fromnumeric.asarray(domain), +# self.fill_other, target) +# m_other = mask_or(m_other, domain) +# m = mask_or(m_self, m_other) +# method = getattr(base, self.methodname) +# result = method(target, *args).view(type(instance)) +# try: +# result._mask = m +# except AttributeError: +# if m: +# result = masked +# return result +#............................................................................... +class _arraymethod(object): + """Defines a wrapper for basic array methods. +Upon call, returns a masked array, where the new `_data` array is the output +of the corresponding method called on the original `_data`. + +If `onmask` is True, the new mask is the output of the method calld on the initial mask. +If `onmask` is False, the new mask is just a reference to the initial mask. + +:Parameters: + `funcname` : String + Name of the function to apply on data. + `onmask` : Boolean *[True]* + Whether the mask must be processed also (True) or left alone (False). + """ + def __init__(self, funcname, onmask=True): + self._name = funcname + self._onmask = onmask + self.obj = None + self.__doc__ = self.getdoc() + # + def getdoc(self): + "Returns the doc of the function (from the doc of the method)." + methdoc = getattr(ndarray, self._name, None) + methdoc = getattr(numpy, self._name, methdoc) +# methdoc = getattr(MaskedArray, self._name, methdoc) + if methdoc is not None: + return methdoc.__doc__ +# try: +# return getattr(MaskedArray, self._name).__doc__ +# except: +# try: +# return getattr(numpy, self._name).__doc__ +# except: +# return getattr(ndarray, self._name).__doc + # + def __get__(self, obj, objtype=None): + self.obj = obj + return self + # + def __call__(self, *args, **params): + methodname = self._name + data = self.obj._data + mask = self.obj._mask + cls = type(self.obj) + result = getattr(data, methodname)(*args, **params).view(cls) + result._smallmask = self.obj._smallmask + if result.ndim: + if not self._onmask: + result._mask = mask + elif mask is not nomask: + result.__setmask__(getattr(mask, methodname)(*args, **params)) + return result +#.......................................................... + +class flatiter(object): + "Defines an interator." + def __init__(self, ma): + self.ma = ma + self.ma_iter = numpy.asarray(ma).flat + + if ma._mask is nomask: + self.maskiter = None + else: + self.maskiter = ma._mask.flat + + def __iter__(self): + return self + + ### This won't work is ravel makes a copy + def __setitem__(self, index, value): + a = self.ma.ravel() + a[index] = value + + def next(self): + d = self.ma_iter.next() + if self.maskiter is not None and self.maskiter.next(): + d = masked + return d + + +class MaskedArray(numeric.ndarray): + """Arrays with possibly masked values. +Masked values of True exclude the corresponding element from any computation. + +Construction: + x = array(data, dtype=None, copy=True, order=False, + mask = nomask, fill_value=None, small_mask=True) + +If copy=False, every effort is made not to copy the data: +If `data` is a MaskedArray, and argument mask=nomask, then the candidate data +is `data._data` and the mask used is `data._mask`. +If `data` is a numeric array, it is used as the candidate raw data. +If `dtype` is not None and is different from data.dtype.char then a data copy is required. +Otherwise, the candidate is used. + +If a data copy is required, the raw (unmasked) data stored is the result of: +numeric.array(data, dtype=dtype.char, copy=copy) + +If `mask` is `nomask` there are no masked values. +Otherwise mask must be convertible to an array of booleans with the same shape as x. +If `small_mask` is True, a mask consisting of zeros (False) only is compressed to `nomask`. +Otherwise, the mask is not compressed. + +fill_value is used to fill in masked values when necessary, such as when +printing and in method/function filled(). +The fill_value is not used for computation within this module. + """ + __array_priority__ = 10.1 + _defaultmask = nomask + _defaulthardmask = False + _baseclass = numeric.ndarray + def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, fill_value=None, + keep_mask=True, small_mask=True, hard_mask=False, flag=None, + subok=True, **options): + """array(data, dtype=None, copy=True, mask=nomask, fill_value=None) + +If `data` is already a ndarray, its dtype becomes the default value of dtype. + """ + if flag is not None: + warnings.warn("The flag 'flag' is now called 'small_mask'!", + DeprecationWarning) + small_mask = flag + # Process data............ + _data = numeric.array(data, dtype=dtype, copy=copy, subok=subok) + _baseclass = getattr(data, '_baseclass', type(_data)) + _basedict = getattr(data, '_basedict', getattr(data, '__dict__', None)) + if not isinstance(data, MaskedArray): + _data = _data.view(cls) + elif not subok: + _data = data.view(cls) + else: + _data = _data.view(type(data)) + # Backwards compat ....... + if hasattr(data,'_mask') and not isinstance(data, ndarray): + _data._mask = data._mask + _sharedmask = True + # Process mask ........... + if mask is nomask: + if not keep_mask: + _data._mask = nomask + if copy: + _data._mask = _data._mask.copy() + else: + mask = numeric.array(mask, dtype=MaskType, copy=copy) + if mask.shape != _data.shape: + (nd, nm) = (_data.size, mask.size) + if nm == 1: + mask = numeric.resize(mask, _data.shape) + elif nm == nd: + mask = fromnumeric.reshape(mask, _data.shape) + else: + msg = "Mask and data not compatible: data size is %i, "+\ + "mask size is %i." + raise MAError, msg % (nd, nm) + if _data._mask is nomask: + _data._mask = mask + _data._sharedmask = True + else: + # Make a copy of the mask to avoid propagation + _data._sharedmask = False + if not keep_mask: + _data._mask = mask + else: + _data._mask = umath.logical_or(mask, _data._mask) + + + # Update fill_value....... + _data._fill_value = getattr(data, '_fill_value', fill_value) + if _data._fill_value is None: + _data._fill_value = default_fill_value(_data) + # Process extra options .. + _data._hardmask = hard_mask + _data._smallmask = small_mask + _data._baseclass = _baseclass + _data._basedict = _basedict + return _data + #........................ + def __array_finalize__(self,obj): + """Finalizes the masked array. + """ + # Finalize mask ............... + self._mask = getattr(obj, '_mask', nomask) + if self._mask is not nomask: + self._mask.shape = self.shape + # Get the remaining options ... + self._hardmask = getattr(obj, '_hardmask', self._defaulthardmask) + self._smallmask = getattr(obj, '_smallmask', True) + self._sharedmask = True + self._baseclass = getattr(obj, '_baseclass', type(obj)) + self._fill_value = getattr(obj, '_fill_value', None) + # Update special attributes ... + self._basedict = getattr(obj, '_basedict', getattr(obj, '__dict__', None)) + if self._basedict is not None: + self.__dict__.update(self._basedict) + return + #.................................. + def __array_wrap__(self, obj, context=None): + """Special hook for ufuncs. +Wraps the numpy array and sets the mask according to context. + """ + #TODO : Should we check for type result + result = obj.view(type(self)) + #.......... + if context is not None: + result._mask = result._mask.copy() + (func, args, _) = context + m = reduce(mask_or, [getmask(arg) for arg in args]) + # Get domain mask + domain = ufunc_domain.get(func, None) + if domain is not None: + if len(args) > 2: + d = reduce(domain, args) + else: + d = domain(*args) + if m is nomask: + if d is not nomask: + m = d + else: + m |= d + if not m.ndim and m: + if m: + if result.shape == (): + return masked + result._mask = numeric.ones(result.shape, bool_) + else: + result._mask = m + #.... +# result._mask = m + result._fill_value = self._fill_value + result._hardmask = self._hardmask + result._smallmask = self._smallmask + result._baseclass = self._baseclass + return result + #............................................. + def __getitem__(self, indx): + """x.__getitem__(y) <==> x[y] +Returns the item described by i. Not a copy as in previous versions. + """ + # This test is useful, but we should keep things light... +# if getmask(indx) is not nomask: +# msg = "Masked arrays must be filled before they can be used as indices!" +# raise IndexError, msg + # super() can't work here if the underlying data is a matrix... + dout = (self._data).__getitem__(indx) + m = self._mask + if hasattr(dout, 'shape') and len(dout.shape) > 0: + # Not a scalar: make sure that dout is a MA + dout = dout.view(type(self)) + dout._smallmask = self._smallmask + if m is not nomask: + # use _set_mask to take care of the shape + dout.__setmask__(m[indx]) + elif m is not nomask and m[indx]: + return masked + return dout + #........................ + def __setitem__(self, indx, value): + """x.__setitem__(i, y) <==> x[i]=y +Sets item described by index. If value is masked, masks those locations. + """ + if self is masked: + raise MAError, 'Cannot alter the masked element.' +# if getmask(indx) is not nomask: +# msg = "Masked arrays must be filled before they can be used as indices!" +# raise IndexError, msg + #.... + if value is masked: + m = self._mask + if m is nomask: + m = make_mask_none(self.shape) +# else: +# m = m.copy() + m[indx] = True + self.__setmask__(m) + return + #.... + dval = numeric.asarray(value).astype(self.dtype) + valmask = getmask(value) + if self._mask is nomask: + if valmask is not nomask: + self._mask = make_mask_none(self.shape) + self._mask[indx] = valmask + elif not self._hardmask: + _mask = self._mask.copy() + if valmask is nomask: + _mask[indx] = False + else: + _mask[indx] = valmask + self._set_mask(_mask) + elif hasattr(indx, 'dtype') and (indx.dtype==bool_): + indx = indx * umath.logical_not(self._mask) + else: + mindx = mask_or(self._mask[indx], valmask, copy=True) + dindx = self._data[indx] + if dindx.size > 1: + dindx[~mindx] = dval + elif mindx is nomask: + dindx = dval + dval = dindx + self._mask[indx] = mindx + # Set data .......... + #dval = filled(value).astype(self.dtype) + ndarray.__setitem__(self._data,indx,dval) + #............................................ + def __getslice__(self, i, j): + """x.__getslice__(i, j) <==> x[i:j] +Returns the slice described by i, j. +The use of negative indices is not supported.""" + return self.__getitem__(slice(i,j)) + #........................ + def __setslice__(self, i, j, value): + """x.__setslice__(i, j, value) <==> x[i:j]=value +Sets a slice i:j to `value`. +If `value` is masked, masks those locations.""" + self.__setitem__(slice(i,j), value) + #............................................ + def __setmask__(self, mask, copy=False): + newmask = make_mask(mask, copy=copy, small_mask=self._smallmask) +# self.unshare_mask() + if self._mask is nomask: + self._mask = newmask + elif self._hardmask: + if newmask is not nomask: + self._mask.__ior__(newmask) + else: + # This one is tricky: if we set the mask that way, we may break the + # propagation. But if we don't, we end up with a mask full of False + # and a test on nomask fails... + if newmask is nomask: + self._mask = nomask + else: + self._mask.flat = newmask + if self._mask.shape: + self._mask = numeric.reshape(self._mask, self.shape) + _set_mask = __setmask__ + + def _get_mask(self): + """Returns the current mask.""" + return self._mask + + mask = property(fget=_get_mask, fset=__setmask__, doc="Mask") + #............................................ + def harden_mask(self): + "Forces the mask to hard." + self._hardmask = True + + def soften_mask(self): + "Forces the mask to soft." + self._hardmask = False + + def unshare_mask(self): + "Copies the mask and set the sharedmask flag to False." + if self._sharedmask: + self._mask = self._mask.copy() + self._sharedmask = False + + #............................................ + def _get_data(self): + "Returns the current data (as a view of the original underlying data)>" + return self.view(self._baseclass) + _data = property(fget=_get_data) + #............................................ + def _get_flat(self): + """Calculates the flat value. + """ + return flatiter(self) + # + def _set_flat (self, value): + "x.flat = value" + y = self.ravel() + y[:] = value + # + flat = property(fget=_get_flat, fset=_set_flat, doc="Flat version") + #............................................ + def get_fill_value(self): + "Returns the filling value." + if self._fill_value is None: + self._fill_value = default_fill_value(self) + return self._fill_value + + def set_fill_value(self, value=None): + """Sets the filling value to `value`. +If None, uses the default, based on the data type.""" + if value is None: + value = default_fill_value(self) + self._fill_value = value + + fill_value = property(fget=get_fill_value, fset=set_fill_value, + doc="Filling value") + + def filled(self, fill_value=None): + """Returns an array of the same class as `_data`, + with masked values filled with `fill_value`. +Subclassing is preserved. + +If `fill_value` is None, uses self.fill_value. + """ + m = self._mask + if m is nomask or not m.any(): + return self._data + # + if fill_value is None: + fill_value = self.fill_value + # + if self is masked_singleton: + result = numeric.asanyarray(fill_value) + else: + result = self._data.copy() + try: + result[m] = fill_value + except (TypeError, AttributeError): + fill_value = numeric.array(fill_value, dtype=object) + d = result.astype(object) + result = fromnumeric.choose(m, (d, fill_value)) + except IndexError: + #ok, if scalar + if self._data.shape: + raise + elif m: + result = numeric.array(fill_value, dtype=self.dtype) + else: + result = self._data + return result + + def compressed(self): + "A 1-D array of all the non-masked data." + d = self.ravel() + if self._mask is nomask: + return d + elif not self._smallmask and not self._mask.any(): + return d + else: + return d[numeric.logical_not(d._mask)] + #............................................ + def __str__(self): + """x.__str__() <==> str(x) +Calculates the string representation, using masked for fill if it is enabled. +Otherwise, fills with fill value. + """ + if masked_print_option.enabled(): + f = masked_print_option + if self is masked: + return str(f) + m = self._mask + if m is nomask: + res = self._data + else: + if m.shape == (): + if m: + return str(f) + else: + return str(self._data) + # convert to object array to make filled work +#CHECK: the two lines below seem more robust than the self._data.astype +# res = numeric.empty(self._data.shape, object_) +# numeric.putmask(res,~m,self._data) + res = self._data.astype("|O8") + res[m] = f + else: + res = self.filled(self.fill_value) + return str(res) + + def __repr__(self): + """x.__repr__() <==> repr(x) +Calculates the repr representation, using masked for fill if it is enabled. +Otherwise fill with fill value. + """ + with_mask = """\ +masked_%(name)s(data = + %(data)s, + mask = + %(mask)s, + fill_value=%(fill)s) +""" + with_mask1 = """\ +masked_%(name)s(data = %(data)s, + mask = %(mask)s, + fill_value=%(fill)s) +""" + n = len(self.shape) + name = repr(self._data).split('(')[0] + if n <= 1: + return with_mask1 % { + 'name': name, + 'data': str(self), + 'mask': str(self._mask), + 'fill': str(self.fill_value), + } + return with_mask % { + 'name': name, + 'data': str(self), + 'mask': str(self._mask), + 'fill': str(self.fill_value), + } + #............................................ + def __iadd__(self, other): + "Adds other to self in place." + ndarray.__iadd__(self._data,other) + m = getmask(other) + if self._mask is nomask: + self._mask = m + elif m is not nomask: + self._mask += m + return self + #.... + def __isub__(self, other): + "Subtracts other from self in place." + ndarray.__isub__(self._data,other) + m = getmask(other) + if self._mask is nomask: + self._mask = m + elif m is not nomask: + self._mask += m + return self + #.... + def __imul__(self, other): + "Multiplies self by other in place." + ndarray.__imul__(self._data,other) + m = getmask(other) + if self._mask is nomask: + self._mask = m + elif m is not nomask: + self._mask += m + return self + #.... + def __idiv__(self, other): + "Divides self by other in place." + dom_mask = domain_safe_divide().__call__(self, filled(other,1)) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + ndarray.__idiv__(self._data, other) + self._mask = mask_or(self._mask, new_mask) + return self + #............................................ + def __float__(self): + "Converts self to float." + if self._mask is not nomask: + warnings.warn("Warning: converting a masked element to nan.") + return numpy.nan + #raise MAError, 'Cannot convert masked element to a Python float.' + return float(self.item()) + + def __int__(self): + "Converts self to int." + if self._mask is not nomask: + raise MAError, 'Cannot convert masked element to a Python int.' + return int(self.item()) + #............................................ + def count(self, axis=None): + """Counts the non-masked elements of the array along a given axis, +and returns a masked array where the mask is True where all data are masked. +If `axis` is None, counts all the non-masked elements, and returns either a +scalar or the masked singleton.""" + m = self._mask + s = self.shape + ls = len(s) + if m is nomask: + if ls == 0: + return 1 + if ls == 1: + return s[0] + if axis is None: + return self.size + else: + n = s[axis] + t = list(s) + del t[axis] + return numeric.ones(t) * n + n1 = fromnumeric.size(m, axis) + n2 = m.astype(int_).sum(axis) + if axis is None: + return (n1-n2) + else: + return masked_array(n1 - n2) + #............................................ + def reshape (self, *s): + """Reshapes the array to shape s. +Returns a new masked array. +If you want to modify the shape in place, please use `a.shape = s`""" + result = self._data.reshape(*s).view(type(self)) + result.__dict__.update(self.__dict__) + if result._mask is not nomask: + result._mask = self._mask.copy() + result._mask.shape = result.shape + return result + # + repeat = _arraymethod('repeat') + # + def resize(self, newshape, refcheck=True, order=False): + """Attempts to modify size and shape of self inplace. + The array must own its own memory and not be referenced by other arrays. + Returns None. + """ + try: + self._data.resize(newshape, refcheck, order) + if self.mask is not nomask: + self._mask.resize(newshape, refcheck, order) + except ValueError: + raise ValueError("Cannot resize an array that has been referenced " + "or is referencing another array in this way.\n" + "Use the resize function.") + return None + # + flatten = _arraymethod('flatten') + # + def put(self, indices, values, mode='raise'): + """Sets storage-indexed locations to corresponding values. +a.put(values, indices, mode) sets a.flat[n] = values[n] for each n in indices. +`values` can be scalar or an array shorter than indices, and it will be repeated, +if necessary. +If `values` has some masked values, the initial mask is updated in consequence, +else the corresponding values are unmasked. + """ + m = self._mask + # Hard mask: Get rid of the values/indices that fall on masked data + if self._hardmask and self._mask is not nomask: + mask = self._mask[indices] + indices = numeric.asarray(indices) + values = numeric.asanyarray(values) + values.resize(indices.shape) + indices = indices[~mask] + values = values[~mask] + #.... + self._data.put(indices, values, mode=mode) + #.... + if m is nomask: + m = getmask(values) + else: + m = m.copy() + if getmask(values) is nomask: + m.put(indices, False, mode=mode) + else: + m.put(indices, values._mask, mode=mode) + m = make_mask(m, copy=False, small_mask=True) + self._mask = m + #............................................ + def ids (self): + """Return the address of the data and mask areas.""" + return (self.ctypes.data, self._mask.ctypes.data) + #............................................ + def all(self, axis=None, out=None): + """a.all(axis) returns True if all entries along the axis are True. + Returns False otherwise. If axis is None, uses the flatten array. + Masked data are considered as True during computation. + Outputs a masked array, where the mask is True if all data are masked along the axis. + Note: the out argument is not really operational... + """ + d = self.filled(True).all(axis=axis, out=out).view(type(self)) + if d.ndim > 0: + d.__setmask__(self._mask.all(axis)) + return d + + def any(self, axis=None, out=None): + """a.any(axis) returns True if some or all entries along the axis are True. + Returns False otherwise. If axis is None, uses the flatten array. + Masked data are considered as False during computation. + Outputs a masked array, where the mask is True if all data are masked along the axis. + Note: the out argument is not really operational... + """ + d = self.filled(False).any(axis=axis, out=out).view(type(self)) + if d.ndim > 0: + d.__setmask__(self._mask.all(axis)) + return d + + def nonzero(self): + """a.nonzero() returns a tuple of arrays + + Returns a tuple of arrays, one for each dimension of a, + containing the indices of the non-zero elements in that + dimension. The corresponding non-zero values can be obtained + with + a[a.nonzero()]. + + To group the indices by element, rather than dimension, use + transpose(a.nonzero()) + instead. The result of this is always a 2d array, with a row for + each non-zero element.""" + return numeric.asarray(self.filled(0)).nonzero() + #............................................ + def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) +Returns the sum along the offset diagonal of the array's indicated `axis1` and `axis2`. + """ + # TODO: What are we doing with `out`? + m = self._mask + if m is nomask: + result = super(MaskedArray, self).trace(offset=offset, axis1=axis1, + axis2=axis2, out=out) + return result.astype(dtype) + else: + D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) + return D.astype(dtype).sum(axis=None) + #............................................ + def sum(self, axis=None, dtype=None): + """a.sum(axis=None, dtype=None) +Sums the array `a` over the given axis `axis`. +Masked values are set to 0. +If `axis` is None, applies to a flattened version of the array. + """ + if self._mask is nomask: + mask = nomask + else: + mask = self._mask.all(axis) + if (not mask.ndim) and mask: + return masked + result = self.filled(0).sum(axis, dtype=dtype).view(type(self)) + if result.ndim > 0: + result.__setmask__(mask) + return result + + def cumsum(self, axis=None, dtype=None): + """a.cumprod(axis=None, dtype=None) +Returns the cumulative sum of the elements of array `a` along the given axis `axis`. +Masked values are set to 0. +If `axis` is None, applies to a flattened version of the array. + """ + result = self.filled(0).cumsum(axis=axis, dtype=dtype).view(type(self)) + result.__setmask__(self.mask) + return result + + def prod(self, axis=None, dtype=None): + """a.prod(axis=None, dtype=None) +Returns the product of the elements of array `a` along the given axis `axis`. +Masked elements are set to 1. +If `axis` is None, applies to a flattened version of the array. + """ + if self._mask is nomask: + mask = nomask + else: + mask = self._mask.all(axis) + if (not mask.ndim) and mask: + return masked + result = self.filled(1).prod(axis=axis, dtype=dtype).view(type(self)) + if result.ndim: + result.__setmask__(mask) + return result + product = prod + + def cumprod(self, axis=None, dtype=None): + """a.cumprod(axis=None, dtype=None) +Returns the cumulative product of ethe lements of array `a` along the given axis `axis`. +Masked values are set to 1. +If `axis` is None, applies to a flattened version of the array. + """ + result = self.filled(1).cumprod(axis=axis, dtype=dtype).view(type(self)) + result.__setmask__(self.mask) + return result + + def mean(self, axis=None, dtype=None): + """a.mean(axis=None, dtype=None) + + Averages the array over the given axis. If the axis is None, + averages over all dimensions of the array. Equivalent to + + a.sum(axis, dtype) / size(a, axis). + + The optional dtype argument is the data type for intermediate + calculations in the sum. + + Returns a masked array, of the same class as a. + """ + if self._mask is nomask: + return super(MaskedArray, self).mean(axis=axis, dtype=dtype) + else: + dsum = self.sum(axis=axis, dtype=dtype) + cnt = self.count(axis=axis) + return dsum*1./cnt + + def anom(self, axis=None, dtype=None): + """a.anom(axis=None, dtype=None) + Returns the anomalies, or deviation from the average. + """ + m = self.mean(axis, dtype) + if not axis: + return (self - m) + else: + return (self - expand_dims(m,axis)) + + def var(self, axis=None, dtype=None): + """a.var(axis=None, dtype=None) +Returns the variance, a measure of the spread of a distribution. + +The variance is the average of the squared deviations from the mean, +i.e. var = mean((x - x.mean())**2). + """ + if self._mask is nomask: + # TODO: Do we keep super, or var _data and take a view ? + return super(MaskedArray, self).var(axis=axis, dtype=dtype) + else: + cnt = self.count(axis=axis) + danom = self.anom(axis=axis, dtype=dtype) + danom *= danom + dvar = numeric.array(danom.sum(axis) / cnt).view(type(self)) + if axis is not None: + dvar._mask = mask_or(self._mask.all(axis), (cnt==1)) + return dvar + + def std(self, axis=None, dtype=None): + """a.std(axis=None, dtype=None) +Returns the standard deviation, a measure of the spread of a distribution. + +The standard deviation is the square root of the average of the squared +deviations from the mean, i.e. std = sqrt(mean((x - x.mean())**2)). + """ + dvar = self.var(axis,dtype) + if axis is not None or dvar is not masked: + dvar = sqrt(dvar) + return dvar + #............................................ + def argsort(self, axis=None, fill_value=None, kind='quicksort', + order=None): + """Returns an array of indices that sort 'a' along the specified axis. + Masked values are filled beforehand to `fill_value`. + If `fill_value` is None, uses the default for the data type. + Returns a numpy array. + +:Keywords: + `axis` : Integer *[None]* + Axis to be indirectly sorted (default -1) + `kind` : String *['quicksort']* + Sorting algorithm (default 'quicksort') + Possible values: 'quicksort', 'mergesort', or 'heapsort' + + Returns: array of indices that sort 'a' along the specified axis. + + This method executes an indirect sort along the given axis using the + algorithm specified by the kind keyword. It returns an array of indices of + the same shape as 'a' that index data along the given axis in sorted order. + + The various sorts are characterized by average speed, worst case + performance, need for work space, and whether they are stable. A stable + sort keeps items with the same key in the same relative order. The three + available algorithms have the following properties: + + |------------------------------------------------------| + | kind | speed | worst case | work space | stable| + |------------------------------------------------------| + |'quicksort'| 1 | O(n^2) | 0 | no | + |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | + |'heapsort' | 3 | O(n*log(n)) | 0 | no | + |------------------------------------------------------| + + All the sort algorithms make temporary copies of the data when the sort is not + along the last axis. Consequently, sorts along the last axis are faster and use + less space than sorts along other axis. + """ + if fill_value is None: + fill_value = default_fill_value(self) + d = self.filled(fill_value).view(ndarray) + return d.argsort(axis=axis, kind=kind, order=order) + #........................ + def argmin(self, axis=None, fill_value=None): + """Returns a ndarray of indices for the minimum values of `a` along the + specified axis. + Masked values are treated as if they had the value `fill_value`. + If `fill_value` is None, the default for the data type is used. + Returns a numpy array. + +:Keywords: + `axis` : Integer *[None]* + Axis to be indirectly sorted (default -1) + `fill_value` : var *[None]* + Default filling value. If None, uses the minimum default for the data type. + """ + if fill_value is None: + fill_value = minimum_fill_value(self) + d = self.filled(fill_value).view(ndarray) + return d.argmin(axis) + #........................ + def argmax(self, axis=None, fill_value=None): + """Returns the array of indices for the maximum values of `a` along the + specified axis. + Masked values are treated as if they had the value `fill_value`. + If `fill_value` is None, the maximum default for the data type is used. + Returns a numpy array. + +:Keywords: + `axis` : Integer *[None]* + Axis to be indirectly sorted (default -1) + `fill_value` : var *[None]* + Default filling value. If None, uses the data type default. + """ + if fill_value is None: + fill_value = maximum_fill_value(self._data) + d = self.filled(fill_value).view(ndarray) + return d.argmax(axis) + + def sort(self, axis=-1, kind='quicksort', order=None, + endwith=True, fill_value=None): + """ + Sort a along the given axis. + + Keyword arguments: + + axis -- axis to be sorted (default -1) + kind -- sorting algorithm (default 'quicksort') + Possible values: 'quicksort', 'mergesort', or 'heapsort'. + order -- If a has fields defined, then the order keyword can be the + field name to sort on or a list (or tuple) of field names + to indicate the order that fields should be used to define + the sort. + endwith--Boolean flag indicating whether missing values (if any) should + be forced in the upper indices (at the end of the array) or + lower indices (at the beginning). + + Returns: None. + + This method sorts 'a' in place along the given axis using the algorithm + specified by the kind keyword. + + The various sorts may characterized by average speed, worst case + performance, need for work space, and whether they are stable. A stable + sort keeps items with the same key in the same relative order and is most + useful when used with argsort where the key might differ from the items + being sorted. The three available algorithms have the following properties: + + |------------------------------------------------------| + | kind | speed | worst case | work space | stable| + |------------------------------------------------------| + |'quicksort'| 1 | O(n^2) | 0 | no | + |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | + |'heapsort' | 3 | O(n*log(n)) | 0 | no | + |------------------------------------------------------| + + """ + if self._mask is nomask: + ndarray.sort(self,axis=axis, kind=kind, order=order) + else: + if fill_value is None: + if endwith: + filler = minimum_fill_value(self) + else: + filler = maximum_fill_value(self) + else: + filler = fill_value + idx = numpy.indices(self.shape) + idx[axis] = self.filled(filler).argsort(axis=axis,kind=kind,order=order) + idx_l = idx.tolist() + tmp_mask = self._mask[idx_l].flat + tmp_data = self._data[idx_l].flat + self.flat = tmp_data + self._mask.flat = tmp_mask + return + #............................................ + def min(self, axis=None, fill_value=None): + """Returns the minimum/a along the given axis. +If `axis` is None, applies to the flattened array. Masked values are filled +with `fill_value` during processing. If `fill_value is None, it is set to the +maximum_fill_value corresponding to the data type.""" + mask = self._mask + # Check all/nothing case ...... + if mask is nomask: + return super(MaskedArray, self).min(axis=axis) + elif (not mask.ndim) and mask: + return masked + # Get the mask ................ + if axis is None: + mask = umath.logical_and.reduce(mask.flat) + else: + mask = umath.logical_and.reduce(mask, axis=axis) + # Get the fil value ........... + if fill_value is None: + fill_value = minimum_fill_value(self) + # Get the data ................ + result = self.filled(fill_value).min(axis=axis).view(type(self)) + if result.ndim > 0: + result._mask = mask + return result + #........................ + def max(self, axis=None, fill_value=None): + """Returns the maximum/a along the given axis. +If `axis` is None, applies to the flattened array. Masked values are filled +with `fill_value` during processing. If `fill_value is None, it is set to the +maximum_fill_value corresponding to the data type.""" + mask = self._mask + # Check all/nothing case ...... + if mask is nomask: + return super(MaskedArray, self).max(axis=axis) + elif (not mask.ndim) and mask: + return masked + # Check the mask .............. + if axis is None: + mask = umath.logical_and.reduce(mask.flat) + else: + mask = umath.logical_and.reduce(mask, axis=axis) + # Get the fill value .......... + if fill_value is None: + fill_value = maximum_fill_value(self) + # Get the data ................ + result = self.filled(fill_value).max(axis=axis).view(type(self)) + if result.ndim > 0: + result._mask = mask + return result + #........................ + def ptp(self, axis=None, fill_value=None): + """Returns the visible data range (max-min) along the given axis. +If the axis is `None`, applies on a flattened array. Masked values are filled +with `fill_value` for processing. If `fill_value` is None, the maximum is uses +the maximum default, the minimum uses the minimum default.""" + return self.max(axis, fill_value) - self.min(axis, fill_value) + + # Array methods --------------------------------------- + conj = conjugate = _arraymethod('conjugate') + copy = _arraymethod('copy') + diagonal = _arraymethod('diagonal') + take = _arraymethod('take') + ravel = _arraymethod('ravel') + transpose = _arraymethod('transpose') + T = property(fget=lambda self:self.transpose()) + swapaxes = _arraymethod('swapaxes') + clip = _arraymethod('clip', onmask=False) + compress = _arraymethod('compress') + copy = _arraymethod('copy') + squeeze = _arraymethod('squeeze') + #-------------------------------------------- + def tolist(self, fill_value=None): + """Copies the data portion of the array to a hierarchical python list and + returns that list. Data items are converted to the nearest compatible Python + type. + Masked values are converted to `fill_value`. If `fill_value` is None, the + corresponding entries in the output list will be None. + """ + if fill_value is not None: + return self.filled(fill_value).tolist() + result = self.filled().tolist() + if self._mask is nomask: + return result + if self.ndim == 0: + return [None] + elif self.ndim == 1: + maskedidx = self._mask.nonzero()[0].tolist() + [operator.setitem(result,i,None) for i in maskedidx] + else: + for idx in zip(*[i.tolist() for i in self._mask.nonzero()]): + tmp = result + for i in idx[:-1]: + tmp = tmp[i] + tmp[idx[-1]] = None + return result + + + #........................ + def tostring(self, fill_value=None): + """a.tostring(order='C', fill_value=None) -> raw copy of array data as a Python string. + + Keyword arguments: + order : order of the data item in the copy {"C","F","A"} (default "C") + fill_value : value used in lieu of missing data + + Construct a Python string containing the raw bytes in the array. The order + of the data in arrays with ndim > 1 is specified by the 'order' keyword and + this keyword overrides the order of the array. The + choices are: + + "C" -- C order (row major) + "Fortran" -- Fortran order (column major) + "Any" -- Current order of array. + None -- Same as "Any" + + Masked data are filled with fill_value. If fill_value is None, the data-type- + dependent default is used.""" + return self.filled(fill_value).tostring() + #-------------------------------------------- + # Backwards Compatibility. Heck... + @property + def data(self): + """Returns the `_data` part of the MaskedArray.""" + return self._data + def raw_data(self): + """Returns the `_data` part of the MaskedArray. +You should really use `data` instead...""" + return self._data + #-------------------------------------------- + # Pickling + def __getstate__(self): + "Returns the internal state of the masked array, for pickling purposes." + state = (1, + self.shape, + self.dtype, + self.flags.fnc, + self._data.tostring(), + getmaskarray(self).tostring(), + self._fill_value, + ) + return state + # + def __setstate__(self, state): + """Restores the internal state of the masked array, for pickling purposes. + `state` is typically the output of the ``__getstate__`` output, and is a 5-tuple: + + - class name + - a tuple giving the shape of the data + - a typecode for the data + - a binary string for the data + - a binary string for the mask. + """ + (ver, shp, typ, isf, raw, msk, flv) = state + ndarray.__setstate__(self, (shp, typ, isf, raw)) + self._mask.__setstate__((shp, dtype(bool), isf, msk)) + self.fill_value = flv + # + def __reduce__(self): + """Returns a 3-tuple for pickling a MaskedArray.""" + return (_mareconstruct, + (self.__class__, self._baseclass, (0,), 'b', ), + self.__getstate__()) + + +def _mareconstruct(subtype, baseclass, baseshape, basetype,): + """Internal function that builds a new MaskedArray from the information stored +in a pickle.""" + _data = ndarray.__new__(baseclass, baseshape, basetype) + _mask = ndarray.__new__(ndarray, baseshape, 'b1') + return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype, small_mask=False) +#MaskedArray.__dump__ = dump +#MaskedArray.__dumps__ = dumps + + + +#####-------------------------------------------------------------------------- +#---- --- Shortcuts --- +#####--------------------------------------------------------------------------- +def isMaskedArray(x): + "Is x a masked array, that is, an instance of MaskedArray?" + return isinstance(x, MaskedArray) +isarray = isMaskedArray +isMA = isMaskedArray #backward compatibility +#masked = MaskedArray(0, int, mask=1) +masked_singleton = MaskedArray(0, dtype=int_, mask=True) +masked = masked_singleton + +masked_array = MaskedArray +def array(data, dtype=None, copy=False, order=False, mask=nomask, subok=True, + keep_mask=True, small_mask=True, hard_mask=None, fill_value=None): + """array(data, dtype=None, copy=True, order=False, mask=nomask, + keep_mask=True, small_mask=True, fill_value=None) +Acts as shortcut to MaskedArray, with options in a different order for convenience. +And backwards compatibility... + """ + #TODO: we should try to put 'order' somwehere + return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok, + keep_mask=keep_mask, small_mask=small_mask, + hard_mask=hard_mask, fill_value=fill_value) + +def is_masked(x): + """Returns whether x has some masked values.""" + m = getmask(x) + if m is nomask: + return False + elif m.any(): + return True + return False + + +#####--------------------------------------------------------------------------- +#---- --- Extrema functions --- +#####--------------------------------------------------------------------------- +class _extrema_operation(object): + "Generic class for maximum/minimum functions." + def __call__(self, a, b=None): + "Executes the call behavior." + if b is None: + return self.reduce(a) + return where(self.compare(a, b), a, b) + #......... + def reduce(self, target, axis=None): + """Reduces target along the given axis.""" + m = getmask(target) + if axis is not None: + kargs = { 'axis' : axis } + else: + kargs = {} + target = target.ravel() + if not (m is nomask): + m = m.ravel() + if m is nomask: + t = self.ufunc.reduce(target, **kargs) + else: + target = target.filled(self.fill_value_func(target)).view(type(target)) + t = self.ufunc.reduce(target, **kargs) + m = umath.logical_and.reduce(m, **kargs) + if hasattr(t, '_mask'): + t._mask = m + elif m: + t = masked + return t + #......... + def outer (self, a, b): + "Returns the function applied to the outer product of a and b." + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = logical_or.outer(ma, mb) + result = self.ufunc.outer(filled(a), filled(b)) + result._mask = m + return result +#............................ +class _minimum_operation(_extrema_operation): + "Object to calculate minima" + def __init__ (self): + """minimum(a, b) or minimum(a) +In one argument case, returns the scalar minimum. + """ + self.ufunc = umath.minimum + self.afunc = amin + self.compare = less + self.fill_value_func = minimum_fill_value +#............................ +class _maximum_operation(_extrema_operation): + "Object to calculate maxima" + def __init__ (self): + """maximum(a, b) or maximum(a) + In one argument case returns the scalar maximum. + """ + self.ufunc = umath.maximum + self.afunc = amax + self.compare = greater + self.fill_value_func = maximum_fill_value +#.......................................................... +def min(array, axis=None, out=None): + """Returns the minima along the given axis. +If `axis` is None, applies to the flattened array.""" + if out is not None: + raise TypeError("Output arrays Unsupported for masked arrays") + if axis is None: + return minimum(array) + else: + return minimum.reduce(array, axis) +#............................ +def max(obj, axis=None, out=None): + """Returns the maxima along the given axis. +If `axis` is None, applies to the flattened array.""" + if out is not None: + raise TypeError("Output arrays Unsupported for masked arrays") + if axis is None: + return maximum(obj) + else: + return maximum.reduce(obj, axis) +#............................. +def ptp(obj, axis=None): + """a.ptp(axis=None) = a.max(axis)-a.min(axis)""" + try: + return obj.max(axis)-obj.min(axis) + except AttributeError: + return max(obj, axis=axis) - min(obj, axis=axis) + + +#####--------------------------------------------------------------------------- +#---- --- Definition of functions from the corresponding methods --- +#####--------------------------------------------------------------------------- +class _frommethod: + """Defines functions from existing MaskedArray methods. +:ivar _methodname (String): Name of the method to transform. + """ + def __init__(self, methodname): + self._methodname = methodname + self.__doc__ = self.getdoc() + def getdoc(self): + "Returns the doc of the function (from the doc of the method)." + try: + return getattr(MaskedArray, self._methodname).__doc__ + except: + return getattr(numpy, self._methodname).__doc__ + def __call__(self, a, *args, **params): + if isinstance(a, MaskedArray): + return getattr(a, self._methodname).__call__(*args, **params) + #FIXME ---- + #As x is not a MaskedArray, we transform it to a ndarray with asarray + #... and call the corresponding method. + #Except that sometimes it doesn't work (try reshape([1,2,3,4],(2,2))) + #we end up with a "SystemError: NULL result without error in PyObject_Call" + #A dirty trick is then to call the initial numpy function... + method = getattr(fromnumeric.asarray(a), self._methodname) + try: + return method(*args, **params) + except SystemError: + return getattr(numpy,self._methodname).__call__(a, *args, **params) + +all = _frommethod('all') +anomalies = anom = _frommethod('anom') +any = _frommethod('any') +conjugate = _frommethod('conjugate') +ids = _frommethod('ids') +nonzero = _frommethod('nonzero') +diagonal = _frommethod('diagonal') +maximum = _maximum_operation() +mean = _frommethod('mean') +minimum = _minimum_operation () +product = _frommethod('prod') +ptp = _frommethod('ptp') +ravel = _frommethod('ravel') +repeat = _frommethod('repeat') +std = _frommethod('std') +sum = _frommethod('sum') +swapaxes = _frommethod('swapaxes') +take = _frommethod('take') +var = _frommethod('var') + +#.............................................................................. +def power(a, b, third=None): + """Computes a**b elementwise. + Masked values are set to 1.""" + if third is not None: + raise MAError, "3-argument power not supported." + ma = getmask(a) + mb = getmask(b) + m = mask_or(ma, mb) + fa = filled(a, 1) + fb = filled(b, 1) + if fb.dtype.char in typecodes["Integer"]: + return masked_array(umath.power(fa, fb), m) + md = make_mask((fa < 0), small_mask=1) + m = mask_or(m, md) + if m is nomask: + return masked_array(umath.power(fa, fb)) + else: + fa[m] = 1 + return masked_array(umath.power(fa, fb), m) + +#.............................................................................. +def argsort(a, axis=None, kind='quicksort', order=None, fill_value=None): + """Returns an array of indices that sort 'a' along the specified axis. + Masked values are filled beforehand to `fill_value`. + If `fill_value` is None, uses the default for the data type. + Returns a numpy array. + +:Keywords: + `axis` : Integer *[None]* + Axis to be indirectly sorted (default -1) + `kind` : String *['quicksort']* + Sorting algorithm (default 'quicksort') + Possible values: 'quicksort', 'mergesort', or 'heapsort' + + Returns: array of indices that sort 'a' along the specified axis. + + This method executes an indirect sort along the given axis using the + algorithm specified by the kind keyword. It returns an array of indices of + the same shape as 'a' that index data along the given axis in sorted order. + + The various sorts are characterized by average speed, worst case + performance, need for work space, and whether they are stable. A stable + sort keeps items with the same key in the same relative order. The three + available algorithms have the following properties: + + |------------------------------------------------------| + | kind | speed | worst case | work space | stable| + |------------------------------------------------------| + |'quicksort'| 1 | O(n^2) | 0 | no | + |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | + |'heapsort' | 3 | O(n*log(n)) | 0 | no | + |------------------------------------------------------| + + All the sort algorithms make temporary copies of the data when the sort is not + along the last axis. Consequently, sorts along the last axis are faster and use + less space than sorts along other axis. + """ + if fill_value is None: + fill_value = default_fill_value(a) + d = filled(a, fill_value) + if axis is None: + return d.argsort(kind=kind, order=order) + return d.argsort(axis, kind=kind, order=order) + +def argmin(a, axis=None, fill_value=None): + """Returns the array of indices for the minimum values of `a` along the + specified axis. + Masked values are treated as if they had the value `fill_value`. + If `fill_value` is None, the default for the data type is used. + Returns a numpy array. + +:Keywords: + `axis` : Integer *[None]* + Axis to be indirectly sorted (default -1) + `fill_value` : var *[None]* + Default filling value. If None, uses the data type default. + """ + if fill_value is None: + fill_value = default_fill_value(a) + d = filled(a, fill_value) + return d.argmin(axis=axis) + +def argmax(a, axis=None, fill_value=None): + """Returns the array of indices for the maximum values of `a` along the + specified axis. + Masked values are treated as if they had the value `fill_value`. + If `fill_value` is None, the default for the data type is used. + Returns a numpy array. + +:Keywords: + `axis` : Integer *[None]* + Axis to be indirectly sorted (default -1) + `fill_value` : var *[None]* + Default filling value. If None, uses the data type default. + """ + if fill_value is None: + fill_value = default_fill_value(a) + try: + fill_value = - fill_value + except: + pass + d = filled(a, fill_value) + return d.argmax(axis=axis) + +def sort(a, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None): + """ + Sort a along the given axis. + +Keyword arguments: + +axis -- axis to be sorted (default -1) +kind -- sorting algorithm (default 'quicksort') + Possible values: 'quicksort', 'mergesort', or 'heapsort'. +order -- If a has fields defined, then the order keyword can be the + field name to sort on or a list (or tuple) of field names + to indicate the order that fields should be used to define + the sort. +endwith--Boolean flag indicating whether missing values (if any) should + be forced in the upper indices (at the end of the array) or + lower indices (at the beginning). + +Returns: None. + +This method sorts 'a' in place along the given axis using the algorithm +specified by the kind keyword. + +The various sorts may characterized by average speed, worst case +performance, need for work space, and whether they are stable. A stable +sort keeps items with the same key in the same relative order and is most +useful when used with argsort where the key might differ from the items +being sorted. The three available algorithms have the following properties: + +|------------------------------------------------------| +| kind | speed | worst case | work space | stable| +|------------------------------------------------------| +|'quicksort'| 1 | O(n^2) | 0 | no | +|'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | +|'heapsort' | 3 | O(n*log(n)) | 0 | no | +|------------------------------------------------------| + +All the sort algorithms make temporary copies of the data when the sort is +not along the last axis. Consequently, sorts along the last axis are faster +and use less space than sorts along other axis. + +""" + a = numeric.asanyarray(a) + if fill_value is None: + if endwith: + filler = minimum_fill_value(a) + else: + filler = maximum_fill_value(a) + else: + filler = fill_value +# return + indx = numpy.indices(a.shape).tolist() + indx[axis] = filled(a,filler).argsort(axis=axis,kind=kind,order=order) + return a[indx] + +def compressed(x): + """Returns a compressed version of a masked array (or just the array if it + wasn't masked first).""" + if getmask(x) is None: + return x + else: + return x.compressed() + +def count(a, axis = None): + "Count of the non-masked elements in a, or along a certain axis." + a = masked_array(a) + return a.count(axis) + +def concatenate(arrays, axis=0): + "Concatenates the arrays along the given axis" + d = numeric.concatenate([filled(a) for a in arrays], axis) + rcls = get_masked_subclass(*arrays) + data = d.view(rcls) + for x in arrays: + if getmask(x) is not nomask: + break + else: + return data + dm = numeric.concatenate([getmaskarray(a) for a in arrays], axis) + dm = make_mask(dm, copy=False, small_mask=True) + data._mask = dm + return data + +def expand_dims(x,axis): + """Expand the shape of a by including newaxis before given axis.""" + result = n_expand_dims(x,axis) + if isinstance(x, MaskedArray): + new_shape = result.shape + result = x.view() + result.shape = new_shape + if result._mask is not nomask: + result._mask.shape = new_shape + return result + +#...................................... +def left_shift (a, n): + "Left shift n bits" + m = getmask(a) + if m is nomask: + d = umath.left_shift(filled(a), n) + return masked_array(d) + else: + d = umath.left_shift(filled(a, 0), n) + return masked_array(d, mask=m) + +def right_shift (a, n): + "Right shift n bits" + m = getmask(a) + if m is nomask: + d = umath.right_shift(filled(a), n) + return masked_array(d) + else: + d = umath.right_shift(filled(a, 0), n) + return masked_array(d, mask=m) +#...................................... +def put(a, indices, values, mode='raise'): + """Sets storage-indexed locations to corresponding values. + Values and indices are filled if necessary.""" + # We can't use 'frommethod', the order of arguments is different + try: + return a.put(indices, values, mode=mode) + except AttributeError: + return fromnumeric.asarray(a).put(indices, values, mode=mode) + +def putmask(a, mask, values): #, mode='raise'): + """`putmask(a, mask, v)` results in `a = v` for all places where `mask` is true. +If `v` is shorter than `mask`, it will be repeated as necessary. +In particular `v` can be a scalar or length 1 array.""" + # We can't use 'frommethod', the order of arguments is different + try: + return a.putmask(values, mask) + except AttributeError: + return fromnumeric.asarray(a).putmask(values, mask) + +def transpose(a,axes=None): + """Returns a view of the array with dimensions permuted according to axes. +If `axes` is None (default), returns array with dimensions reversed. + """ + #We can't use 'frommethod', as 'transpose' doesn't take keywords + try: + return a.transpose(axes) + except AttributeError: + return fromnumeric.asarray(a).transpose(axes) + +def reshape(a, new_shape): + """Changes the shape of the array `a` to `new_shape`.""" + #We can't use 'frommethod', it whine about some parameters. Dmmit. + try: + return a.reshape(new_shape) + except AttributeError: + return fromnumeric.asarray(a).reshape(new_shape) + +def resize(x, new_shape): + """resize(a,new_shape) returns a new array with the specified shape. + The total size of the original array can be any size. + The new array is filled with repeated copies of a. If a was masked, the new + array will be masked, and the new mask will be a repetition of the old one. + """ + # We can't use _frommethods here, as N.resize is notoriously whiny. + m = getmask(x) + if m is not nomask: + m = fromnumeric.resize(m, new_shape) + result = fromnumeric.resize(x, new_shape).view(get_masked_subclass(x)) + if result.ndim: + result._mask = m + return result + + +#................................................ +def rank(obj): + """Gets the rank of sequence a (the number of dimensions, not a matrix rank) +The rank of a scalar is zero.""" + return fromnumeric.rank(filled(obj)) +# +def shape(obj): + """Returns the shape of `a` (as a function call which also works on nested sequences). + """ + return fromnumeric.shape(filled(obj)) +# +def size(obj, axis=None): + """Returns the number of elements in the array along the given axis, +or in the sequence if `axis` is None. + """ + return fromnumeric.size(filled(obj), axis) +#................................................ + +#####-------------------------------------------------------------------------- +#---- --- Extra functions --- +#####-------------------------------------------------------------------------- +def where (condition, x, y): + """where(condition, x, y) is x where condition is nonzero, y otherwise. + condition must be convertible to an integer array. + Answer is always the shape of condition. + The type depends on x and y. It is integer if both x and y are + the value masked. + """ + fc = filled(not_equal(condition, 0), 0) + xv = filled(x) + xm = getmask(x) + yv = filled(y) + ym = getmask(y) + d = numeric.choose(fc, (yv, xv)) + md = numeric.choose(fc, (ym, xm)) + m = getmask(condition) + m = make_mask(mask_or(m, md), copy=False, small_mask=True) + return masked_array(d, mask=m) + +def choose (indices, t, out=None, mode='raise'): + "Returns array shaped like indices with elements chosen from t" + #TODO: implement options `out` and `mode`, if possible. + def fmask (x): + "Returns the filled array, or True if ``masked``." + if x is masked: + return 1 + return filled(x) + def nmask (x): + "Returns the mask, True if ``masked``, False if ``nomask``." + if x is masked: + return 1 + m = getmask(x) + if m is nomask: + return 0 + return m + c = filled(indices, 0) + masks = [nmask(x) for x in t] + a = [fmask(x) for x in t] + d = numeric.choose(c, a) + m = numeric.choose(c, masks) + m = make_mask(mask_or(m, getmask(indices)), copy=0, small_mask=1) + return masked_array(d, mask=m) + +def round_(a, decimals=0, out=None): + """Returns reference to result. Copies a and rounds to 'decimals' places. + + Keyword arguments: + decimals -- number of decimals to round to (default 0). May be negative. + out -- existing array to use for output (default copy of a). + + Return: + Reference to out, where None specifies a copy of the original array a. + + Round to the specified number of decimals. When 'decimals' is negative it + specifies the number of positions to the left of the decimal point. The + real and imaginary parts of complex numbers are rounded separately. + Nothing is done if the array is not of float type and 'decimals' is greater + than or equal to 0.""" + result = fromnumeric.round_(filled(a), decimals, out) + if isinstance(a,MaskedArray): + result = result.view(type(a)) + result._mask = a._mask + else: + result = result.view(MaskedArray) + return result + +def arange(start, stop=None, step=1, dtype=None): + """Just like range() except it returns a array whose type can be specified + by the keyword argument dtype. + """ + return array(numeric.arange(start, stop, step, dtype),mask=nomask) + +def inner(a, b): + """inner(a,b) returns the dot product of two arrays, which has + shape a.shape[:-1] + b.shape[:-1] with elements computed by summing the + product of the elements from the last dimensions of a and b. + Masked elements are replace by zeros. + """ + fa = filled(a, 0) + fb = filled(b, 0) + if len(fa.shape) == 0: + fa.shape = (1,) + if len(fb.shape) == 0: + fb.shape = (1,) + return masked_array(numeric.inner(fa, fb)) +innerproduct = inner + +def outer(a, b): + """outer(a,b) = {a[i]*b[j]}, has shape (len(a),len(b))""" + fa = filled(a, 0).ravel() + fb = filled(b, 0).ravel() + d = numeric.outer(fa, fb) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + return masked_array(d) + ma = getmaskarray(a) + mb = getmaskarray(b) + m = make_mask(1-numeric.outer(1-ma, 1-mb), copy=0) + return masked_array(d, mask=m) +outerproduct = outer + +def allequal (a, b, fill_value=True): + """ +Returns `True` if all entries of a and b are equal, using +fill_value as a truth value where either or both are masked. + """ + m = mask_or(getmask(a), getmask(b)) + if m is nomask: + x = filled(a) + y = filled(b) + d = umath.equal(x, y) + return d.all() + elif fill_value: + x = filled(a) + y = filled(b) + d = umath.equal(x, y) + dm = array(d, mask=m, copy=False) + return dm.filled(True).all(None) + else: + return False + +def allclose (a, b, fill_value=True, rtol=1.e-5, atol=1.e-8): + """ Returns `True` if all elements of `a` and `b` are equal subject to given tolerances. +If `fill_value` is True, masked values are considered equal. +If `fill_value` is False, masked values considered unequal. +The relative error rtol should be positive and << 1.0 +The absolute error `atol` comes into play for those elements of `b` + that are very small or zero; it says how small `a` must be also. + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + x = filled(array(d1, copy=0, mask=m), fill_value).astype(float) + y = filled(array(d2, copy=0, mask=m), 1).astype(float) + d = umath.less_equal(umath.absolute(x-y), atol + rtol * umath.absolute(y)) + return fromnumeric.alltrue(fromnumeric.ravel(d)) + +#.............................................................................. +def asarray(a, dtype=None): + """asarray(data, dtype) = array(data, dtype, copy=0) +Returns `a` as an masked array. +No copy is performed if `a` is already an array. +Subclasses are converted to base class MaskedArray. + """ + return masked_array(a, dtype=dtype, copy=False, keep_mask=True) + +def empty(new_shape, dtype=float): + """empty((d1,...,dn),dtype=float,order='C') +Returns a new array of shape (d1,...,dn) and given type with all its +entries uninitialized. This can be faster than zeros.""" + return numeric.empty(new_shape, dtype).view(MaskedArray) + +def empty_like(a): + """empty_like(a) +Returns an empty (uninitialized) array of the shape and typecode of a. +Note that this does NOT initialize the returned array. +If you require your array to be initialized, you should use zeros_like().""" + return numeric.empty_like(a).view(MaskedArray) + +def ones(new_shape, dtype=float): + """ones(shape, dtype=None) +Returns an array of the given dimensions, initialized to all ones.""" + return numeric.ones(new_shape, dtype).view(MaskedArray) + +def zeros(new_shape, dtype=float): + """zeros(new_shape, dtype=None) +Returns an array of the given dimensions, initialized to all zeros.""" + return numeric.zeros(new_shape, dtype).view(MaskedArray) + +#####-------------------------------------------------------------------------- +#---- --- Pickling --- +#####-------------------------------------------------------------------------- +def dump(a,F): + """Pickles the MaskedArray `a` to the file `F`. +`F` can either be the handle of an exiting file, or a string representing a file name. + """ + if not hasattr(F,'readline'): + F = open(F,'w') + return cPickle.dump(a,F) + +def dumps(a): + """Returns a string corresponding to the pickling of the MaskedArray.""" + return cPickle.dumps(a) + +def load(F): + """Wrapper around ``cPickle.load`` which accepts either a file-like object or + a filename.""" + if not hasattr(F, 'readline'): + F = open(F,'r') + return cPickle.load(F) + +def loads(strg): + "Loads a pickle from the current string.""" + return cPickle.loads(strg) + + +################################################################################ + +if __name__ == '__main__': + from testutils import assert_equal, assert_almost_equal + if 1: + x = arange(10) + assert(x.ctypes.data == x.filled().ctypes.data) + if 0: + a = array([1,2,3,4],mask=[0,0,0,0],small_mask=True) + a[1] = masked + a[1] = 1 + assert(a.ravel()._mask, [0,0,0,0]) + assert(a.compressed(), a) + a[0] = masked + assert(a.compressed()._mask, [0,0,0]) + if 0: + x = array(0, mask=0) + I = x.ctypes.data + J = x.filled().ctypes.data + print (I,J) + x = array([0,0], mask=0) + (I,J) = (x.ctypes.data, x.filled().ctypes.data) + print (I,J) + if 1: + x = array(numpy.arange(12)) + x[[1,-2]] = masked + xlist = x.tolist() + assert(xlist[1] is None) + assert(xlist[-2] is None) + # + x.shape = (3,4) + xlist = x.tolist() + # + assert_equal(xlist[0],[0,None,2,3]) + assert_equal(xlist[1],[4,5,6,7]) + assert_equal(xlist[2],[8,9,None,11]) + + + \ No newline at end of file Modified: trunk/Lib/sandbox/maskedarray/mrecords.py =================================================================== --- trunk/Lib/sandbox/maskedarray/mrecords.py 2007-08-15 06:04:30 UTC (rev 3244) +++ trunk/Lib/sandbox/maskedarray/mrecords.py 2007-08-15 13:38:19 UTC (rev 3245) @@ -117,4 +117,615 @@ _names = descr.names mdescr = [(n,'|b1') for n in _names] # get the shape ......................... - \ No newline at end of file + try: + shape = numeric.asarray(data[0]).shape + except IndexError: + shape = len(data.dtype) + if isinstance(shape, int): + shape = (shape,) + # Construct the _data recarray .......... + if isinstance(data, record): + _data = numeric.asarray(data).view(recarray) + _fieldmask = mask + elif isinstance(data, MaskedRecords): + _data = data._data + _fieldmask = data._fieldmask + elif isinstance(data, recarray): + _data = data + if mask is nomask: + _fieldmask = data.astype(mdescr) + _fieldmask.flat = tuple([False]*len(mdescr)) + else: + _fieldmask = mask + elif (isinstance(data, (tuple, numpy.void)) or\ + hasattr(data,'__len__') and isinstance(data[0], (tuple, numpy.void))): + data = numeric.array(data, dtype=descr).view(recarray) + _data = data + if mask is nomask: + _fieldmask = data.astype(mdescr) + _fieldmask.flat = tuple([False]*len(mdescr)) + else: + _fieldmask = mask + else: + _data = recarray(shape, dtype=descr) + _fieldmask = recarray(shape, dtype=mdescr) + for (n,v) in zip(_names, data): + _data[n] = numeric.asarray(v).view(ndarray) + _fieldmask[n] = getmaskarray(v) + #........................................ + _data = _data.view(cls) + _data._fieldmask = _fieldmask + _data._hardmask = hard_mask + if fill_value is None: + _data._fill_value = [default_fill_value(numeric.dtype(d[1])) + for d in descr.descr] + else: + _data._fill_value = fill_value + return _data + + def __array_finalize__(self,obj): + if isinstance(obj, MaskedRecords): + self.__dict__.update(_fieldmask=obj._fieldmask, + _hardmask=obj._hardmask, + _fill_value=obj._fill_value + ) + else: + self.__dict__.update(_fieldmask = nomask, + _hardmask = False, + fill_value = None + ) + return + + def _getdata(self): + "Returns the data as a recarray." + return self.view(recarray) + _data = property(fget=_getdata) + + #...................................................... + def __getattribute__(self, attr): + try: + # Returns a generic attribute + return object.__getattribute__(self,attr) + except AttributeError: + # OK, so attr must be a field name + pass + # Get the list of fields ...... + _names = self.dtype.names + if attr in _names: + _data = self._data + _mask = self._fieldmask + obj = numeric.asarray(_data.__getattribute__(attr)).view(MaskedArray) + obj.__setmask__(_mask.__getattribute__(attr)) + if (obj.ndim == 0) and obj._mask: + return masked + return obj + raise AttributeError,"No attribute '%s' !" % attr + + def __setattr__(self, attr, val): + newattr = attr not in self.__dict__ + try: + # Is attr a generic attribute ? + ret = object.__setattr__(self, attr, val) + except: + # Not a generic attribute: exit if it's not a valid field + fielddict = self.dtype.names or {} + if attr not in fielddict: + exctype, value = sys.exc_info()[:2] + raise exctype, value + else: + if attr not in list(self.dtype.names) + ['_mask']: + return ret + if newattr: # We just added this one + try: # or this setattr worked on an internal + # attribute. + object.__delattr__(self, attr) + except: + return ret + # Case #1.: Basic field ............ + base_fmask = self._fieldmask + _names = self.dtype.names + if attr in _names: + fval = filled(val) + mval = getmaskarray(val) + if self._hardmask: + mval = mask_or(mval, base_fmask.__getattr__(attr)) + self._data.__setattr__(attr, fval) + base_fmask.__setattr__(attr, mval) + return + elif attr == '_mask': + self.__setmask__(val) + return + #............................................ + def __getitem__(self, indx): + """Returns all the fields sharing the same fieldname base. + The fieldname base is either `_data` or `_mask`.""" + _localdict = self.__dict__ + _data = self._data + # We want a field ........ + if isinstance(indx, str): + obj = _data[indx].view(MaskedArray) + obj._set_mask(_localdict['_fieldmask'][indx]) + return obj + # We want some elements .. + # First, the data ........ + obj = ndarray.__getitem__(self, indx) + if isinstance(obj, numpy.void): + obj = self.__class__(obj, dtype=self.dtype) + else: + obj = obj.view(type(self)) + obj._fieldmask = numpy.asarray(_localdict['_fieldmask'][indx]).view(recarray) + return obj + #............................................ + def __setitem__(self, indx, value): + """Sets the given record to value.""" + MaskedArray.__setitem__(self, indx, value) + +# def __getslice__(self, i, j): +# """Returns the slice described by [i,j].""" +# _localdict = self.__dict__ +# return MaskedRecords(_localdict['_data'][i:j], +# mask=_localdict['_fieldmask'][i:j], +# dtype=self.dtype) +# + def __setslice__(self, i, j, value): + """Sets the slice described by [i,j] to `value`.""" + _localdict = self.__dict__ + d = self._data + m = _localdict['_fieldmask'] + names = self.dtype.names + if value is masked: + for n in names: + m[i:j][n] = True + elif not self._hardmask: + fval = filled(value) + mval = getmaskarray(value) + for n in names: + d[n][i:j] = fval + m[n][i:j] = mval + else: + mindx = getmaskarray(self)[i:j] + dval = numeric.asarray(value) + valmask = getmask(value) + if valmask is nomask: + for n in names: + mval = mask_or(m[n][i:j], valmask) + d[n][i:j][~mval] = value + elif valmask.size > 1: + for n in names: + mval = mask_or(m[n][i:j], valmask) + d[n][i:j][~mval] = dval[~mval] + m[n][i:j] = mask_or(m[n][i:j], mval) + self._fieldmask = m + + #..................................................... + def __setmask__(self, mask): + names = self.dtype.names + fmask = self.__dict__['_fieldmask'] + newmask = make_mask(mask, copy=False) +# self.unshare_mask() + if self._hardmask: + for n in names: + fmask[n].__ior__(newmask) + else: + for n in names: + fmask[n].flat = newmask + + def _getmask(self): + """Returns the mask of the mrecord: a record is masked when all the fields +are masked.""" + if self.size > 1: + return self._fieldmask.view((bool_, len(self.dtype))).all(1) + + _setmask = __setmask__ + _mask = property(fget=_getmask, fset=_setmask) + + #...................................................... + def __str__(self): + """x.__str__() <==> str(x) +Calculates the string representation, using masked for fill if it is enabled. +Otherwise, fills with fill value. + """ + if self.size > 1: + mstr = ["(%s)" % ",".join([str(i) for i in s]) + for s in zip(*[getattr(self,f) for f in self.dtype.names])] + return "[%s]" % ", ".join(mstr) + else: + mstr = numeric.asarray(self._data.item(), dtype=object_) + mstr[list(self._fieldmask)] = masked_print_option + return str(mstr) + + def __repr__(self): + """x.__repr__() <==> repr(x) +Calculates the repr representation, using masked for fill if it is enabled. +Otherwise fill with fill value. + """ + _names = self.dtype.names + fmt = "%%%is : %%s" % (max([len(n) for n in _names])+4,) + reprstr = [fmt % (f,getattr(self,f)) for f in self.dtype.names] + reprstr.insert(0,'masked_records(') + reprstr.extend([fmt % (' fill_value', self._fill_value), + ' )']) + return str("\n".join(reprstr)) + #...................................................... + def view(self, obj): + """Returns a view of the mrecarray.""" + try: + if issubclass(obj, ndarray): + return ndarray.view(self, obj) + except TypeError: + pass + dtype = numeric.dtype(obj) + if dtype.fields is None: + return self.__array__().view(dtype) + return ndarray.view(self, obj) + #...................................................... + def filled(self, fill_value=None): + """Returns an array of the same class as `_data`, + with masked values filled with `fill_value`. +Subclassing is preserved. + +If `fill_value` is None, uses self.fill_value. + """ + _localdict = self.__dict__ + d = self._data + fm = _localdict['_fieldmask'] + if not numeric.asarray(fm, dtype=bool_).any(): + return d + # + if fill_value is None: + value = _localdict['_fill_value'] + else: + value = fill_value + if numeric.size(value) == 1: + value = [value,] * len(self.dtype) + # + if self is masked: + result = numeric.asanyarray(value) + else: + result = d.copy() + for (n, v) in zip(d.dtype.names, value): + numpy.putmask(numeric.asarray(result[n]), + numeric.asarray(fm[n]), v) + return result + #............................................ + def harden_mask(self): + "Forces the mask to hard" + self._hardmask = True + def soften_mask(self): + "Forces the mask to soft" + self._hardmask = False + #............................................. + def copy(self): + """Returns a copy of the masked record.""" + _localdict = self.__dict__ + return MaskedRecords(self._data.copy(), + mask=_localdict['_fieldmask'].copy(), + dtype=self.dtype) + #............................................. + + +#####--------------------------------------------------------------------------- +#---- --- Constructors --- +#####--------------------------------------------------------------------------- + +def fromarrays(arraylist, dtype=None, shape=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """Creates a mrecarray from a (flat) list of masked arrays. + +:Parameters: + - `arraylist` : Sequence + A list of (masked) arrays. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + - `dtype` : numeric.dtype + Data type descriptor. + - `shape` : Integer *[None]* + Number of records. If None, `shape` is defined from the shape of the first + array in the list. + - `formats` : + (Description to write) + - `names` : + (description to write) + - `titles`: + (Description to write) + - `aligned`: Boolen *[False]* + (Description to write, not used anyway) + - `byteorder`: Boolen *[None]* + (Description to write, not used anyway) + + + """ + arraylist = [MA.asarray(x) for x in arraylist] + # Define/check the shape..................... + if shape is None or shape == 0: + shape = arraylist[0].shape + if isinstance(shape, int): + shape = (shape,) + # Define formats from scratch ............... + if formats is None and dtype is None: + formats = _getformats(arraylist) + # Define the dtype .......................... + if dtype is not None: + descr = numeric.dtype(dtype) + _names = descr.names + else: + parsed = format_parser(formats, names, titles, aligned, byteorder) + _names = parsed._names + descr = parsed._descr + # Determine shape from data-type............. + if len(descr) != len(arraylist): + msg = "Mismatch between the number of fields (%i) and the number of "\ + "arrays (%i)" + raise ValueError, msg % (len(descr), len(arraylist)) + d0 = descr[0].shape + nn = len(d0) + if nn > 0: + shape = shape[:-nn] + # Make sure the shape is the correct one .... + for k, obj in enumerate(arraylist): + nn = len(descr[k].shape) + testshape = obj.shape[:len(obj.shape)-nn] + if testshape != shape: + raise ValueError, "Array-shape mismatch in array %d" % k + # Reconstruct the descriptor, by creating a _data and _mask version + return MaskedRecords(arraylist, dtype=descr) +#.............................................................................. +def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None): + """Creates a MaskedRecords from a list of records. + + The data in the same field can be heterogeneous, they will be promoted + to the highest data type. This method is intended for creating + smaller record arrays. If used to create large array without formats + defined, it can be slow. + + If formats is None, then this will auto-detect formats. Use a list of + tuples rather than a list of lists for faster processing. + """ + # reclist is in fact a mrecarray ................. + if isinstance(reclist, MaskedRecords): + mdescr = reclist.dtype + shape = reclist.shape + return MaskedRecords(reclist, dtype=mdescr) + # No format, no dtype: create from to arrays ..... + nfields = len(reclist[0]) + if formats is None and dtype is None: # slower + if isinstance(reclist, recarray): + arrlist = [reclist.field(i) for i in range(len(reclist.dtype))] + if names is None: + names = reclist.dtype.names + else: + obj = numeric.array(reclist,dtype=object) + arrlist = [numeric.array(obj[...,i].tolist()) + for i in xrange(nfields)] + return MaskedRecords(arrlist, formats=formats, names=names, + titles=titles, aligned=aligned, byteorder=byteorder) + # Construct the descriptor ....................... + if dtype is not None: + descr = numeric.dtype(dtype) + _names = descr.names + else: + parsed = format_parser(formats, names, titles, aligned, byteorder) + _names = parsed._names + descr = parsed._descr + + try: + retval = numeric.array(reclist, dtype = descr).view(recarray) + except TypeError: # list of lists instead of list of tuples + if (shape is None or shape == 0): + shape = len(reclist)*2 + if isinstance(shape, (int, long)): + shape = (shape*2,) + if len(shape) > 1: + raise ValueError, "Can only deal with 1-d array." + retval = recarray(shape, mdescr) + for k in xrange(retval.size): + retval[k] = tuple(reclist[k]) + return MaskedRecords(retval, dtype=descr) + else: + if shape is not None and retval.shape != shape: + retval.shape = shape + # + return MaskedRecords(retval, dtype=descr) + +def _guessvartypes(arr): + """Tries to guess the dtypes of the str_ ndarray `arr`, by testing element-wise + conversion. Returns a list of dtypes. + The array is first converted to ndarray. If the array is 2D, the test is + performed on the first line. An exception is raised if the file is 3D or more. + """ + vartypes = [] + arr = numeric.asarray(arr) + if len(arr.shape) == 2 : + arr = arr[0] + elif len(arr.shape) > 2: + raise ValueError, "The array should be 2D at most!" + # Start the conversion loop ....... + for f in arr: + try: + val = int(f) + except ValueError: + try: + val = float(f) + except ValueError: + try: + val = complex(f) + except ValueError: + vartypes.append(arr.dtype) + else: + vartypes.append(complex_) + else: + vartypes.append(float_) + else: + vartypes.append(int_) + return vartypes + +def openfile(fname): + "Opens the file handle of file `fname`" + # A file handle ................... + if hasattr(fname, 'readline'): + return fname + # Try to open the file and guess its type + try: + f = open(fname) + except IOError: + raise IOError, "No such file: '%s'" % fname + if f.readline()[:2] != "\\x": + f.seek(0,0) + return f + raise NotImplementedError, "Wow, binary file" + + +def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='', + varnames=None, vartypes=None): + """Creates a mrecarray from data stored in the file `filename`. + +:Parameters: + - `filename` : file name/handle + Handle of an opened file. + - `delimitor` : Character *None* + Alphanumeric character used to separate columns in the file. + If None, any (group of) white spacestring(s) will be used. + - `commentchar` : String *['#']* + Alphanumeric character used to mark the start of a comment. + - `missingchar` : String *['']* + String indicating missing data, and used to create the masks. + - `varnames` : Sequence *[None]* + Sequence of the variable names. If None, a list will be created from + the first non empty line of the file. + - `vartypes` : Sequence *[None]* + Sequence of the variables dtypes. If None, the sequence will be estimated + from the first non-commented line. + + + Ultra simple: the varnames are in the header, one line""" + # Try to open the file ...................... + f = openfile(fname) + # Get the first non-empty line as the varnames + while True: + line = f.readline() + firstline = line[:line.find(commentchar)].strip() + _varnames = firstline.split(delimitor) + if len(_varnames) > 1: + break + if varnames is None: + varnames = _varnames + # Get the data .............................. + _variables = MA.asarray([line.strip().split(delimitor) for line in f + if line[0] != commentchar and len(line) > 1]) + (_, nfields) = _variables.shape + # Try to guess the dtype .................... + if vartypes is None: + vartypes = _guessvartypes(_variables[0]) + else: + vartypes = [numeric.dtype(v) for v in vartypes] + if len(vartypes) != nfields: + msg = "Attempting to %i dtypes for %i fields!" + msg += " Reverting to default." + warnings.warn(msg % (len(vartypes), nfields)) + vartypes = _guessvartypes(_variables[0]) + # Construct the descriptor .................. + mdescr = [(n,f) for (n,f) in zip(varnames, vartypes)] + # Get the data and the mask ................. + # We just need a list of masked_arrays. It's easier to create it like that: + _mask = (_variables.T == missingchar) + _datalist = [masked_array(a,mask=m,dtype=t) + for (a,m,t) in zip(_variables.T, _mask, vartypes)] + return MaskedRecords(_datalist, dtype=mdescr) + +#.................................................................... +def addfield(mrecord, newfield, newfieldname=None): + """Adds a new field to the masked record array, using `newfield` as data +and `newfieldname` as name. If `newfieldname` is None, the new field name is +set to 'fi', where `i` is the number of existing fields. + """ + _data = mrecord._data + _mask = mrecord._fieldmask + if newfieldname is None or newfieldname in reserved_fields: + newfieldname = 'f%i' % len(_data.dtype) + newfield = MA.asarray(newfield) + # Get the new data ............ + # Create a new empty recarray + newdtype = numeric.dtype(_data.dtype.descr + \ + [(newfieldname, newfield.dtype)]) + newdata = recarray(_data.shape, newdtype) + # Add the exisintg field + [newdata.setfield(_data.getfield(*f),*f) + for f in _data.dtype.fields.values()] + # Add the new field + newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname]) + newdata = newdata.view(MaskedRecords) + # Get the new mask ............. + # Create a new empty recarray + newmdtype = numeric.dtype([(n,bool_) for n in newdtype.names]) + newmask = recarray(_data.shape, newmdtype) + # Add the old masks + [newmask.setfield(_mask.getfield(*f),*f) + for f in _mask.dtype.fields.values()] + # Add the mask of the new field + newmask.setfield(getmaskarray(newfield), + *newmask.dtype.fields[newfieldname]) + newdata._fieldmask = newmask + return newdata + +################################################################################ +if __name__ == '__main__': + import numpy as N + from maskedarray.testutils import assert_equal + if 1: + d = N.arange(5) + m = MA.make_mask([1,0,0,1,1]) + base_d = N.r_[d,d[::-1]].reshape(2,-1).T + base_m = N.r_[[m, m[::-1]]].T + base = MA.array(base_d, mask=base_m).T + mrecord = fromarrays(base,dtype=[('a',N.float_),('b',N.float_)]) + mrec = MaskedRecords(mrecord.copy()) + # + mrec.a[3:] = 5 + assert_equal(mrec.a, [0,1,2,5,5]) + assert_equal(mrec.a._mask, [1,0,0,0,0]) + # + mrec.b[3:] = masked + assert_equal(mrec.b, [4,3,2,1,0]) + assert_equal(mrec.b._mask, [1,1,0,1,1]) + # + mrec[:2] = masked + assert_equal(mrec._mask, [1,1,0,0,0]) + mrec[-1] = masked + assert_equal(mrec._mask, [1,1,0,0,1]) + if 1: + nrec = N.core.records.fromarrays(N.r_[[d,d[::-1]]], + dtype=[('a',N.float_),('b',N.float_)]) + mrec = mrecord + #.................... + mrecfr = fromrecords(nrec) + assert_equal(mrecfr.a, mrec.a) + assert_equal(mrecfr.dtype, mrec.dtype) + #.................... + tmp = mrec[::-1] #.tolist() + mrecfr = fromrecords(tmp) + assert_equal(mrecfr.a, mrec.a[::-1]) + #.................... + mrecfr = fromrecords(nrec.tolist(), names=nrec.dtype.names) + assert_equal(mrecfr.a, mrec.a) + assert_equal(mrecfr.dtype, mrec.dtype) + if 1: + assert_equal(mrec.a, MA.array(d,mask=m)) + assert_equal(mrec.b, MA.array(d[::-1],mask=m[::-1])) + assert((mrec._fieldmask == N.core.records.fromarrays([m, m[::-1]])).all()) + assert_equal(mrec._mask, N.r_[[m,m[::-1]]].all(0)) + assert_equal(mrec.a[1], mrec[1].a) + + if 1: + x = [(1.,10.,'a'),(2.,20,'b'),(3.14,30,'c'),(5.55,40,'d')] + desc = [('ffloat', N.float_), ('fint', N.int_), ('fstr', 'S10')] + mr = MaskedRecords(x,dtype=desc) + mr[0] = masked + mr.ffloat[-1] = masked + # + mrlast = mr[-1] + assert(isinstance(mrlast,MaskedRecords)) + assert(hasattr(mrlast,'ffloat')) + assert_equal(mrlast.ffloat, masked) + + \ No newline at end of file Modified: trunk/Lib/sandbox/maskedarray/tests/test_core.py =================================================================== --- trunk/Lib/sandbox/maskedarray/tests/test_core.py 2007-08-15 06:04:30 UTC (rev 3244) +++ trunk/Lib/sandbox/maskedarray/tests/test_core.py 2007-08-15 13:38:19 UTC (rev 3245) @@ -12,10 +12,11 @@ import types -import numpy as N +import numpy import numpy.core.fromnumeric as fromnumeric from numpy.testing import NumpyTest, NumpyTestCase from numpy.testing.utils import build_err_msg +from numpy import array as narray import maskedarray.testutils from maskedarray.testutils import * @@ -23,7 +24,7 @@ import maskedarray.core as coremodule from maskedarray.core import * -pi = N.pi +pi = numpy.pi #.............................................................................. class test_ma(NumpyTestCase): @@ -34,16 +35,16 @@ def setUp (self): "Base data definition." - x = N.array([1.,1.,1.,-2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = N.array([5.,0.,3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + x = narray([1.,1.,1.,-2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = narray([5.,0.,3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) a10 = 10. m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 ,0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) - z = N.array([-.5, 0., .5, .8]) + z = narray([-.5, 0., .5, .8]) zm = masked_array(z, mask=[0,1,0,0]) - xf = N.where(m1, 1.e+20, x) + xf = numpy.where(m1, 1.e+20, x) xm.set_fill_value(1.e+20) self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) #........................ @@ -55,7 +56,7 @@ assert((xm-ym).filled(0).any()) fail_if_equal(xm.mask.astype(int_), ym.mask.astype(int_)) s = x.shape - assert_equal(N.shape(xm), s) + assert_equal(numpy.shape(xm), s) assert_equal(xm.shape, s) assert_equal(xm.dtype, x.dtype) assert_equal(zm.dtype, z.dtype) @@ -115,14 +116,14 @@ assert_equal(x**2, xm**2) assert_equal(abs(x)**2.5, abs(xm) **2.5) assert_equal(x**y, xm**ym) - assert_equal(N.add(x,y), add(xm, ym)) - assert_equal(N.subtract(x,y), subtract(xm, ym)) - assert_equal(N.multiply(x,y), multiply(xm, ym)) - assert_equal(N.divide(x,y), divide(xm, ym)) + assert_equal(numpy.add(x,y), add(xm, ym)) + assert_equal(numpy.subtract(x,y), subtract(xm, ym)) + assert_equal(numpy.multiply(x,y), multiply(xm, ym)) + assert_equal(numpy.divide(x,y), divide(xm, ym)) #........................ def check_mixed_arithmetic(self): "Tests mixed arithmetics." - na = N.array([1]) + na = narray([1]) ma = array([1]) self.failUnless(isinstance(na + ma, MaskedArray)) self.failUnless(isinstance(ma + na, MaskedArray)) @@ -246,28 +247,28 @@ def check_basic_ufuncs (self): "Test various functions such as sin, cos." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - assert_equal(N.cos(x), cos(xm)) - assert_equal(N.cosh(x), cosh(xm)) - assert_equal(N.sin(x), sin(xm)) - assert_equal(N.sinh(x), sinh(xm)) - assert_equal(N.tan(x), tan(xm)) - assert_equal(N.tanh(x), tanh(xm)) - assert_equal(N.sqrt(abs(x)), sqrt(xm)) - assert_equal(N.log(abs(x)), log(xm)) - assert_equal(N.log10(abs(x)), log10(xm)) - assert_equal(N.exp(x), exp(xm)) - assert_equal(N.arcsin(z), arcsin(zm)) - assert_equal(N.arccos(z), arccos(zm)) - assert_equal(N.arctan(z), arctan(zm)) - assert_equal(N.arctan2(x, y), arctan2(xm, ym)) - assert_equal(N.absolute(x), absolute(xm)) - assert_equal(N.equal(x,y), equal(xm, ym)) - assert_equal(N.not_equal(x,y), not_equal(xm, ym)) - assert_equal(N.less(x,y), less(xm, ym)) - assert_equal(N.greater(x,y), greater(xm, ym)) - assert_equal(N.less_equal(x,y), less_equal(xm, ym)) - assert_equal(N.greater_equal(x,y), greater_equal(xm, ym)) - assert_equal(N.conjugate(x), conjugate(xm)) + assert_equal(numpy.cos(x), cos(xm)) + assert_equal(numpy.cosh(x), cosh(xm)) + assert_equal(numpy.sin(x), sin(xm)) + assert_equal(numpy.sinh(x), sinh(xm)) + assert_equal(numpy.tan(x), tan(xm)) + assert_equal(numpy.tanh(x), tanh(xm)) + assert_equal(numpy.sqrt(abs(x)), sqrt(xm)) + assert_equal(numpy.log(abs(x)), log(xm)) + assert_equal(numpy.log10(abs(x)), log10(xm)) + assert_equal(numpy.exp(x), exp(xm)) + assert_equal(numpy.arcsin(z), arcsin(zm)) + assert_equal(numpy.arccos(z), arccos(zm)) + assert_equal(numpy.arctan(z), arctan(zm)) + assert_equal(numpy.arctan2(x, y), arctan2(xm, ym)) + assert_equal(numpy.absolute(x), absolute(xm)) + assert_equal(numpy.equal(x,y), equal(xm, ym)) + assert_equal(numpy.not_equal(x,y), not_equal(xm, ym)) + assert_equal(numpy.less(x,y), less(xm, ym)) + assert_equal(numpy.greater(x,y), greater(xm, ym)) + assert_equal(numpy.less_equal(x,y), less_equal(xm, ym)) + assert_equal(numpy.greater_equal(x,y), greater_equal(xm, ym)) + assert_equal(numpy.conjugate(x), conjugate(xm)) #........................ def check_count_func (self): "Tests count" @@ -286,7 +287,7 @@ def check_minmax_func (self): "Tests minimum and maximum." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - xr = N.ravel(x) #max doesn't work if shaped + xr = numpy.ravel(x) #max doesn't work if shaped xmr = ravel(xm) assert_equal(max(xr), maximum(xmr)) #true because of careful selection of data assert_equal(min(xr), minimum(xmr)) #true because of careful selection of data @@ -326,50 +327,50 @@ def check_addsumprod (self): "Tests add, sum, product." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d - assert_equal(N.add.reduce(x), add.reduce(x)) - assert_equal(N.add.accumulate(x), add.accumulate(x)) + assert_equal(numpy.add.reduce(x), add.reduce(x)) + assert_equal(numpy.add.accumulate(x), add.accumulate(x)) assert_equal(4, sum(array(4),axis=0)) assert_equal(4, sum(array(4), axis=0)) - assert_equal(N.sum(x,axis=0), sum(x,axis=0)) - assert_equal(N.sum(filled(xm,0),axis=0), sum(xm,axis=0)) - assert_equal(N.sum(x,0), sum(x,0)) - assert_equal(N.product(x,axis=0), product(x,axis=0)) - assert_equal(N.product(x,0), product(x,0)) - assert_equal(N.product(filled(xm,1),axis=0), product(xm,axis=0)) + assert_equal(numpy.sum(x,axis=0), sum(x,axis=0)) + assert_equal(numpy.sum(filled(xm,0),axis=0), sum(xm,axis=0)) + assert_equal(numpy.sum(x,0), sum(x,0)) + assert_equal(numpy.product(x,axis=0), product(x,axis=0)) + assert_equal(numpy.product(x,0), product(x,0)) + assert_equal(numpy.product(filled(xm,1),axis=0), product(xm,axis=0)) s = (3,4) x.shape = y.shape = xm.shape = ym.shape = s if len(s) > 1: - assert_equal(N.concatenate((x,y),1), concatenate((xm,ym),1)) - assert_equal(N.add.reduce(x,1), add.reduce(x,1)) - assert_equal(N.sum(x,1), sum(x,1)) - assert_equal(N.product(x,1), product(x,1)) + assert_equal(numpy.concatenate((x,y),1), concatenate((xm,ym),1)) + assert_equal(numpy.add.reduce(x,1), add.reduce(x,1)) + assert_equal(numpy.sum(x,1), sum(x,1)) + assert_equal(numpy.product(x,1), product(x,1)) #......................... def check_concat(self): "Tests concatenations." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d # basic concatenation - assert_equal(N.concatenate((x,y)), concatenate((xm,ym))) - assert_equal(N.concatenate((x,y)), concatenate((x,y))) - assert_equal(N.concatenate((x,y)), concatenate((xm,y))) - assert_equal(N.concatenate((x,y,x)), concatenate((x,ym,x))) + assert_equal(numpy.concatenate((x,y)), concatenate((xm,ym))) + assert_equal(numpy.concatenate((x,y)), concatenate((x,y))) + assert_equal(numpy.concatenate((x,y)), concatenate((xm,y))) + assert_equal(numpy.concatenate((x,y,x)), concatenate((x,ym,x))) # Concatenation along an axis s = (3,4) x.shape = y.shape = xm.shape = ym.shape = s - assert_equal(xm.mask, N.reshape(m1, s)) - assert_equal(ym.mask, N.reshape(m2, s)) + assert_equal(xm.mask, numpy.reshape(m1, s)) + assert_equal(ym.mask, numpy.reshape(m2, s)) xmym = concatenate((xm,ym),1) - assert_equal(N.concatenate((x,y),1), xmym) - assert_equal(N.concatenate((xm.mask,ym.mask),1), xmym._mask) + assert_equal(numpy.concatenate((x,y),1), xmym) + assert_equal(numpy.concatenate((xm.mask,ym.mask),1), xmym._mask) #........................ def check_indexing(self): "Tests conversions and indexing" - x1 = N.array([1,2,4,3]) + x1 = numpy.array([1,2,4,3]) x2 = array(x1, mask=[1,0,0,0]) x3 = array(x1, mask=[0,1,0,1]) x4 = array(x1) # test conversion to strings junk, garbage = str(x2), repr(x2) - assert_equal(N.sort(x1),sort(x2,endwith=False)) + assert_equal(numpy.sort(x1),sort(x2,endwith=False)) # tests of indexing assert type(x2[1]) is type(x1[1]) assert x1[1] == x2[1] @@ -396,14 +397,14 @@ x4[:] = masked_array([1,2,3,4],[0,1,1,0]) assert allequal(getmask(x4), array([0,1,1,0])) assert allequal(x4, array([1,2,3,4])) - x1 = N.arange(5)*1.0 + x1 = numpy.arange(5)*1.0 x2 = masked_values(x1, 3.0) assert_equal(x1,x2) assert allequal(array([0,0,0,1,0],MaskType), x2.mask) #FIXME: Well, eh, fill_value is now a property assert_equal(3.0, x2.fill_value()) assert_equal(3.0, x2.fill_value) x1 = array([1,'hello',2,3],object) - x2 = N.array([1,'hello',2,3],object) + x2 = numpy.array([1,'hello',2,3],object) s1 = x1[1] s2 = x2[1] assert_equal(type(s2), str) @@ -420,7 +421,7 @@ m3 = make_mask(m, copy=1) assert(m is not m3) - x1 = N.arange(5) + x1 = numpy.arange(5) y1 = array(x1, mask=m) #assert( y1._data is x1) assert_equal(y1._data.__array_interface__, x1.__array_interface__) @@ -585,15 +586,15 @@ def check_TakeTransposeInnerOuter(self): "Test of take, transpose, inner, outer products" x = arange(24) - y = N.arange(24) + y = numpy.arange(24) x[5:6] = masked x = x.reshape(2,3,4) y = y.reshape(2,3,4) - assert_equal(N.transpose(y,(2,0,1)), transpose(x,(2,0,1))) - assert_equal(N.take(y, (2,0,1), 1), take(x, (2,0,1), 1)) - assert_equal(N.inner(filled(x,0),filled(y,0)), + assert_equal(numpy.transpose(y,(2,0,1)), transpose(x,(2,0,1))) + assert_equal(numpy.take(y, (2,0,1), 1), take(x, (2,0,1), 1)) + assert_equal(numpy.inner(filled(x,0),filled(y,0)), inner(x, y)) - assert_equal(N.outer(filled(x,0),filled(y,0)), + assert_equal(numpy.outer(filled(x,0),filled(y,0)), outer(x, y)) y = array(['abc', 1, 'def', 2, 3], object) y[2] = masked @@ -642,7 +643,7 @@ assert_equal(1, int(array([[[1]]]))) assert_equal(1.0, float(array([[1]]))) self.failUnlessRaises(ValueError, float, array([1,1])) - assert N.isnan(float(array([1],mask=[1]))) + assert numpy.isnan(float(array([1],mask=[1]))) #TODO: Check how bool works... #TODO: self.failUnless(bool(array([0,1]))) #TODO: self.failUnless(bool(array([0,0],mask=[0,1]))) @@ -722,11 +723,11 @@ assert_equal(a_pickled._data, a._data) assert_equal(a_pickled.fill_value, 999) # - a = array(N.matrix(range(10)), mask=[1,0,1,0,0]*2) + a = array(numpy.matrix(range(10)), mask=[1,0,1,0,0]*2) a_pickled = cPickle.loads(a.dumps()) assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled, a) - assert(isinstance(a_pickled._data,N.matrix)) + assert(isinstance(a_pickled._data,numpy.matrix)) #............................................................................... @@ -795,7 +796,7 @@ "Test class for miscellaneous MaskedArrays methods." def setUp(self): "Base data definition." - x = N.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, + x = numpy.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928, 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732, 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479, @@ -804,7 +805,7 @@ X = x.reshape(6,6) XX = x.reshape(3,2,2,3) - m = N.array([0, 1, 0, 1, 0, 0, + m = numpy.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, @@ -814,7 +815,7 @@ mX = array(data=X,mask=m.reshape(X.shape)) mXX = array(data=XX,mask=m.reshape(XX.shape)) - m2 = N.array([1, 1, 0, 1, 0, 0, + m2 = numpy.array([1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, @@ -847,8 +848,8 @@ (x,X,XX,m,mx,mX,mXX,m2x,m2X,m2XX) = self.d (n,m) = X.shape assert_equal(mx.ptp(),mx.compressed().ptp()) - rows = N.zeros(n,N.float_) - cols = N.zeros(m,N.float_) + rows = numpy.zeros(n,numpy.float_) + cols = numpy.zeros(m,numpy.float_) for k in range(m): cols[k] = mX[:,k].compressed().ptp() for k in range(n): @@ -888,7 +889,7 @@ for k in range(6): assert_almost_equal(mXvar1[k],mX[k].compressed().var()) assert_almost_equal(mXvar0[k],mX[:,k].compressed().var()) - assert_almost_equal(N.sqrt(mXvar0[k]), mX[:,k].compressed().std()) + assert_almost_equal(numpy.sqrt(mXvar0[k]), mX[:,k].compressed().std()) def check_argmin(self): "Tests argmin & argmax on MaskedArrays." @@ -969,16 +970,16 @@ #........................ def check_anyall(self): """Checks the any/all methods/functions.""" - x = N.array([[ 0.13, 0.26, 0.90], + x = numpy.array([[ 0.13, 0.26, 0.90], [ 0.28, 0.33, 0.63], [ 0.31, 0.87, 0.70]]) - m = N.array([[ True, False, False], + m = numpy.array([[ True, False, False], [False, False, False], - [True, True, False]], dtype=N.bool_) + [True, True, False]], dtype=numpy.bool_) mx = masked_array(x, mask=m) - xbig = N.array([[False, False, True], + xbig = numpy.array([[False, False, True], [False, False, True], - [False, True, True]], dtype=N.bool_) + [False, True, True]], dtype=numpy.bool_) mxbig = (mx > 0.5) mxsmall = (mx < 0.5) # @@ -996,24 +997,24 @@ assert_equal(mxsmall.any(0), [True, True, False]) assert_equal(mxsmall.any(1), [True, True, False]) # - X = N.matrix(x) + X = numpy.matrix(x) mX = masked_array(X, mask=m) mXbig = (mX > 0.5) mXsmall = (mX < 0.5) # assert (mXbig.all()==False) assert (mXbig.any()==True) - assert_equal(mXbig.all(0), N.matrix([False, False, True])) - assert_equal(mXbig.all(1), N.matrix([False, False, True]).T) - assert_equal(mXbig.any(0), N.matrix([False, False, True])) - assert_equal(mXbig.any(1), N.matrix([ True, True, True]).T) + assert_equal(mXbig.all(0), numpy.matrix([False, False, True])) + assert_equal(mXbig.all(1), numpy.matrix([False, False, True]).T) + assert_equal(mXbig.any(0), numpy.matrix([False, False, True])) + assert_equal(mXbig.any(1), numpy.matrix([ True, True, True]).T) # assert (mXsmall.all()==False) assert (mXsmall.any()==True) - assert_equal(mXsmall.all(0), N.matrix([True, True, False])) - assert_equal(mXsmall.all(1), N.matrix([False, False, False]).T) - assert_equal(mXsmall.any(0), N.matrix([True, True, False])) - assert_equal(mXsmall.any(1), N.matrix([True, True, False]).T) + assert_equal(mXsmall.all(0), numpy.matrix([True, True, False])) + assert_equal(mXsmall.all(1), numpy.matrix([False, False, False]).T) + assert_equal(mXsmall.any(0), numpy.matrix([True, True, False])) + assert_equal(mXsmall.any(1), numpy.matrix([True, True, False]).T) def check_keepmask(self): "Tests the keep mask flag" @@ -1110,7 +1111,7 @@ def check_sort(self): "Test sort" - x = array([1,4,2,3],mask=[0,1,0,0],dtype=N.uint8) + x = array([1,4,2,3],mask=[0,1,0,0],dtype=numpy.uint8) # sortedx = sort(x) assert_equal(sortedx._data,[1,2,3,4]) @@ -1124,7 +1125,7 @@ assert_equal(x._data,[1,2,3,4]) assert_equal(x._mask,[0,0,0,1]) # - x = array([1,4,2,3],mask=[0,1,0,0],dtype=N.uint8) + x = array([1,4,2,3],mask=[0,1,0,0],dtype=numpy.uint8) x.sort(endwith=False) assert_equal(x._data, [4,1,2,3]) assert_equal(x._mask, [1,0,0,0]) @@ -1133,10 +1134,10 @@ sortedx = sort(x) assert(not isinstance(sorted, MaskedArray)) # - x = array([0,1,-1,-2,2], mask=nomask, dtype=N.int8) + x = array([0,1,-1,-2,2], mask=nomask, dtype=numpy.int8) sortedx = sort(x, endwith=False) assert_equal(sortedx._data, [-2,-1,0,1,2]) - x = array([0,1,-1,-2,2], mask=[0,1,0,0,1], dtype=N.int8) + x = array([0,1,-1,-2,2], mask=[0,1,0,0,1], dtype=numpy.int8) sortedx = sort(x, endwith=False) assert_equal(sortedx._data, [1,2,-2,-1,0]) assert_equal(sortedx._mask, [1,1,0,0,0]) @@ -1190,7 +1191,7 @@ a = array([0,0], mask=[1,1]) aravel = a.ravel() assert_equal(a._mask.shape, a.shape) - a = array(N.matrix([1,2,3,4,5]), mask=[[0,1,0,0,0]]) + a = array(numpy.matrix([1,2,3,4,5]), mask=[[0,1,0,0,0]]) aravel = a.ravel() assert_equal(a.shape,(1,5)) assert_equal(a._mask.shape, a.shape) @@ -1223,6 +1224,21 @@ assert_equal(b._data, [2,3,4]) assert_equal(b._mask, nomask) + def check_tolist(self): + "Tests to list" + x = array(numpy.arange(12)) + x[[1,-2]] = masked + xlist = x.tolist() + assert(xlist[1] is None) + assert(xlist[-2] is None) + # + x.shape = (3,4) + xlist = x.tolist() + # + assert_equal(xlist[0],[0,None,2,3]) + assert_equal(xlist[1],[4,5,6,7]) + assert_equal(xlist[2],[8,9,None,11]) + #.............................................................................. ############################################################################### From scipy-svn at scipy.org Sat Aug 18 07:21:19 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 18 Aug 2007 06:21:19 -0500 (CDT) Subject: [Scipy-svn] r3246 - in trunk/Lib/stats: . tests Message-ID: <20070818112119.7B21839C1C7@new.scipy.org> Author: stefan Date: 2007-08-18 06:20:58 -0500 (Sat, 18 Aug 2007) New Revision: 3246 Modified: trunk/Lib/stats/distributions.py trunk/Lib/stats/tests/test_distributions.py Log: Fix rv_discrete.rvs for values with 0 probability. Modified: trunk/Lib/stats/distributions.py =================================================================== --- trunk/Lib/stats/distributions.py 2007-08-15 13:38:19 UTC (rev 3245) +++ trunk/Lib/stats/distributions.py 2007-08-18 11:20:58 UTC (rev 3246) @@ -20,6 +20,7 @@ import numpy.random as mtrand from numpy import flatnonzero as nonzero from scipy.special import gammaln as gamln +from copy import copy __all__ = [ 'rv_continuous', @@ -3289,7 +3290,9 @@ def reverse_dict(dict): newdict = {} - for key in dict.keys(): + sorted_keys = copy(dict.keys()) + sorted_keys.sort() + for key in sorted_keys[::-1]: newdict[dict[key]] = key return newdict Modified: trunk/Lib/stats/tests/test_distributions.py =================================================================== --- trunk/Lib/stats/tests/test_distributions.py 2007-08-15 13:38:19 UTC (rev 3245) +++ trunk/Lib/stats/tests/test_distributions.py 2007-08-18 11:20:58 UTC (rev 3246) @@ -200,5 +200,16 @@ assert(isinstance(val, numpy.ndarray)) assert(val.dtype.char in typecodes['AllInteger']) +class test_rv_discrete(NumpyTestCase): + def check_rvs(self): + states = [-1,0,1,2,3,4] + probability = [0.0,0.3,0.4,0.0,0.3,0.0] + samples = 1000 + r = stats.rv_discrete(name='sample',values=(states,probability)) + x = r.rvs(size=samples) + + for s,p in zip(states,probability): + assert abs(sum(x == s)/float(samples) - p) < 0.05 + if __name__ == "__main__": NumpyTest('stats.distributions').run() From scipy-svn at scipy.org Sat Aug 18 14:46:50 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 18 Aug 2007 13:46:50 -0500 (CDT) Subject: [Scipy-svn] r3247 - tags Message-ID: <20070818184650.30E2539C107@new.scipy.org> Author: jarrod.millman Date: 2007-08-18 13:46:47 -0500 (Sat, 18 Aug 2007) New Revision: 3247 Added: tags/0.5.2.1/ Log: Tag tree for 0.5.2.1 release Copied: tags/0.5.2.1 (from rev 3246, branches/0.5.2.x) From scipy-svn at scipy.org Sat Aug 18 14:53:27 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 18 Aug 2007 13:53:27 -0500 (CDT) Subject: [Scipy-svn] r3248 - branches/0.5.2.x/Lib Message-ID: <20070818185327.EE32539C107@new.scipy.org> Author: jarrod.millman Date: 2007-08-18 13:53:26 -0500 (Sat, 18 Aug 2007) New Revision: 3248 Modified: branches/0.5.2.x/Lib/version.py Log: Update version number on 0.5.2.x branch. Modified: branches/0.5.2.x/Lib/version.py =================================================================== --- branches/0.5.2.x/Lib/version.py 2007-08-18 18:46:47 UTC (rev 3247) +++ branches/0.5.2.x/Lib/version.py 2007-08-18 18:53:26 UTC (rev 3248) @@ -1,4 +1,4 @@ -version = '0.5.2.1' +version = '0.5.2.2' release=False if not release: From scipy-svn at scipy.org Sat Aug 18 14:57:47 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 18 Aug 2007 13:57:47 -0500 (CDT) Subject: [Scipy-svn] r3249 - tags/0.5.2.1/Lib Message-ID: <20070818185747.07EE139C107@new.scipy.org> Author: jarrod.millman Date: 2007-08-18 13:57:46 -0500 (Sat, 18 Aug 2007) New Revision: 3249 Modified: tags/0.5.2.1/Lib/version.py Log: Make 0.5.2.1 tag a version release. Modified: tags/0.5.2.1/Lib/version.py =================================================================== --- tags/0.5.2.1/Lib/version.py 2007-08-18 18:53:26 UTC (rev 3248) +++ tags/0.5.2.1/Lib/version.py 2007-08-18 18:57:46 UTC (rev 3249) @@ -1,5 +1,5 @@ version = '0.5.2.1' -release=False +release=True if not release: import os From scipy-svn at scipy.org Tue Aug 21 18:02:36 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 21 Aug 2007 17:02:36 -0500 (CDT) Subject: [Scipy-svn] r3250 - trunk Message-ID: <20070821220236.14E8C39C013@new.scipy.org> Author: jarrod.millman Date: 2007-08-21 17:02:33 -0500 (Tue, 21 Aug 2007) New Revision: 3250 Added: trunk/scipy/ Removed: trunk/Lib/ Modified: trunk/setup.py Log: rename trunk/Lib to trunk/scipy to conform to convention (see ticket #483) Copied: trunk/scipy (from rev 3249, trunk/Lib) Modified: trunk/setup.py =================================================================== --- trunk/setup.py 2007-08-18 18:57:46 UTC (rev 3249) +++ trunk/setup.py 2007-08-21 22:02:33 UTC (rev 3250) @@ -16,10 +16,10 @@ delegate_options_to_subpackages=True, quiet=True) - config.add_subpackage('Lib') + config.add_subpackage('scipy') config.add_data_files(('scipy','*.txt')) - config.get_version('Lib/version.py') # sets config.version + config.get_version('scipy/version.py') # sets config.version return config @@ -32,7 +32,7 @@ local_path = os.path.dirname(os.path.abspath(sys.argv[0])) os.chdir(local_path) sys.path.insert(0,local_path) - sys.path.insert(0,os.path.join(local_path,'Lib')) # to retrive version + sys.path.insert(0,os.path.join(local_path,'scipy')) # to retrive version try: from version import version as version From scipy-svn at scipy.org Tue Aug 21 18:09:14 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 21 Aug 2007 17:09:14 -0500 (CDT) Subject: [Scipy-svn] r3251 - trunk/scipy Message-ID: <20070821220914.B698739C013@new.scipy.org> Author: jarrod.millman Date: 2007-08-21 17:09:12 -0500 (Tue, 21 Aug 2007) New Revision: 3251 Modified: trunk/scipy/version.py Log: next release is milestone:0.6 Modified: trunk/scipy/version.py =================================================================== --- trunk/scipy/version.py 2007-08-21 22:02:33 UTC (rev 3250) +++ trunk/scipy/version.py 2007-08-21 22:09:12 UTC (rev 3251) @@ -1,4 +1,4 @@ -version = '0.5.3' +version = '0.6' release=False if not release: From scipy-svn at scipy.org Tue Aug 21 18:12:53 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 21 Aug 2007 17:12:53 -0500 (CDT) Subject: [Scipy-svn] r3252 - trunk/scipy/maxentropy Message-ID: <20070821221253.E6B8139C013@new.scipy.org> Author: jarrod.millman Date: 2007-08-21 17:12:51 -0500 (Tue, 21 Aug 2007) New Revision: 3252 Modified: trunk/scipy/maxentropy/setup.py Log: there is no docs directory Modified: trunk/scipy/maxentropy/setup.py =================================================================== --- trunk/scipy/maxentropy/setup.py 2007-08-21 22:09:12 UTC (rev 3251) +++ trunk/scipy/maxentropy/setup.py 2007-08-21 22:12:51 UTC (rev 3252) @@ -10,7 +10,6 @@ config.add_data_dir('tests') config.add_data_dir('examples') - config.add_data_dir('doc') return config From scipy-svn at scipy.org Wed Aug 22 11:55:17 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 22 Aug 2007 10:55:17 -0500 (CDT) Subject: [Scipy-svn] r3253 - in trunk/scipy/sandbox/multigrid: . multigridtools tests Message-ID: <20070822155517.931E239C014@new.scipy.org> Author: wnbell Date: 2007-08-22 10:55:13 -0500 (Wed, 22 Aug 2007) New Revision: 3253 Modified: trunk/scipy/sandbox/multigrid/ trunk/scipy/sandbox/multigrid/multigrid.py trunk/scipy/sandbox/multigrid/multigridtools/ trunk/scipy/sandbox/multigrid/multigridtools/multigridtools.i trunk/scipy/sandbox/multigrid/multigridtools/multigridtools_wrap.cxx trunk/scipy/sandbox/multigrid/multilevel.py trunk/scipy/sandbox/multigrid/simple_test.py trunk/scipy/sandbox/multigrid/tests/ Log: changed properties Property changes on: trunk/scipy/sandbox/multigrid ___________________________________________________________________ Name: svn:ignore + *.so *.bak *.pyc ignore.txt Modified: trunk/scipy/sandbox/multigrid/multigrid.py =================================================================== --- trunk/scipy/sandbox/multigrid/multigrid.py 2007-08-21 22:12:51 UTC (rev 3252) +++ trunk/scipy/sandbox/multigrid/multigrid.py 2007-08-22 15:55:13 UTC (rev 3253) @@ -8,6 +8,19 @@ from pydec import gauss_seidel,diag_sparse,inf_norm + +def poisson_problem1D(N): + """ + Return a sparse CSC matrix for the 2d N*N poisson problem + with standard 5-point finite difference stencil + """ + D = 2*numpy.ones(N) + O = -numpy.ones(N) + return scipy.sparse.spdiags([D,O,O],[0,-1,1],N,N) + + + + def poisson_problem(N): """ Return a sparse CSC matrix for the 2d N*N poisson problem @@ -51,6 +64,17 @@ return scipy.sparse.csr_matrix((Sx,Sj,Sp),A.shape) +def sa_no_threshold(A): + if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') + + #tentative (non-smooth) interpolation operator I + Ij = multigridtools.sa_get_aggregates(A.shape[0],A.indptr,A.indices) + Ip = numpy.arange(len(Ij)+1) + Ix = numpy.ones(len(Ij)) + + return scipy.sparse.csr_matrix((Ix,Ij,Ip)) + + def sa_constant_interpolation(A,epsilon=0.08): if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') Property changes on: trunk/scipy/sandbox/multigrid/multigridtools ___________________________________________________________________ Name: svn:ignore + *.so *.bak *.pyc ignore.txt Modified: trunk/scipy/sandbox/multigrid/multigridtools/multigridtools.i =================================================================== --- trunk/scipy/sandbox/multigrid/multigridtools/multigridtools.i 2007-08-21 22:12:51 UTC (rev 3252) +++ trunk/scipy/sandbox/multigrid/multigridtools/multigridtools.i 2007-08-22 15:55:13 UTC (rev 3253) @@ -16,7 +16,7 @@ %feature("autodoc", "1"); -%include "../../../sparse/sparsetools/numpy.i" +%include "numpy.i" %init %{ import_array(); @@ -61,7 +61,7 @@ %enddef -I_IN_ARRAY1( int ) +I_IN_ARRAY1( int ) T_IN_ARRAY1( float ) T_IN_ARRAY1( double ) @@ -70,8 +70,7 @@ /* * OUT types */ -%define I_ARRAY_ARGOUT( ctype, atype ) -VEC_ARRAY_ARGOUT( ctype, atype ) +%define I_ARRAY_ARGOUT( ctype ) %apply std::vector* array_argout { std::vector* Ap, std::vector* Ai, @@ -91,8 +90,7 @@ }; %enddef -%define T_ARRAY_ARGOUT( ctype, atype ) -VEC_ARRAY_ARGOUT( ctype, atype ) +%define T_ARRAY_ARGOUT( ctype ) %apply std::vector* array_argout { std::vector* Ax, std::vector* Bx, @@ -104,9 +102,9 @@ }; %enddef -I_ARRAY_ARGOUT( int, INT) -T_ARRAY_ARGOUT( float, FLOAT ) -T_ARRAY_ARGOUT( double, DOUBLE ) +I_ARRAY_ARGOUT( int ) +T_ARRAY_ARGOUT( float ) +T_ARRAY_ARGOUT( double ) Modified: trunk/scipy/sandbox/multigrid/multigridtools/multigridtools_wrap.cxx =================================================================== --- trunk/scipy/sandbox/multigrid/multigridtools/multigridtools_wrap.cxx 2007-08-21 22:12:51 UTC (rev 3252) +++ trunk/scipy/sandbox/multigrid/multigridtools/multigridtools_wrap.cxx 2007-08-22 15:55:13 UTC (rev 3253) @@ -2576,7 +2576,9 @@ #endif #include "stdio.h" #include +#include "complex_ops.h" + /* The following code originally appeared in enthought/kiva/agg/src/numeric.i, * author unknown. It was translated from C++ to C by John Hunter. Bill * Spotz has modified it slightly to fix some minor bugs, add some comments @@ -3089,7 +3091,7 @@ resultobj = SWIG_Py_Void(); { int length = (arg4)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg4))[0]),sizeof(int)*length); delete arg4; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); @@ -3192,21 +3194,21 @@ resultobj = SWIG_Py_Void(); { int length = (arg6)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(int)*length); delete arg6; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); } { int length = (arg7)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg7))[0]),sizeof(int)*length); delete arg7; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); } { int length = (arg8)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_FLOAT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_FLOAT); memcpy(PyArray_DATA(obj),&((*(arg8))[0]),sizeof(float)*length); delete arg8; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); @@ -3315,21 +3317,21 @@ resultobj = SWIG_Py_Void(); { int length = (arg6)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(int)*length); delete arg6; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); } { int length = (arg7)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg7))[0]),sizeof(int)*length); delete arg7; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); } { int length = (arg8)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_DOUBLE); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_DOUBLE); memcpy(PyArray_DATA(obj),&((*(arg8))[0]),sizeof(double)*length); delete arg8; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); @@ -3580,21 +3582,21 @@ resultobj = SWIG_Py_Void(); { int length = (arg11)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg11))[0]),sizeof(int)*length); delete arg11; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); } { int length = (arg12)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg12))[0]),sizeof(int)*length); delete arg12; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); } { int length = (arg13)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_FLOAT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_FLOAT); memcpy(PyArray_DATA(obj),&((*(arg13))[0]),sizeof(float)*length); delete arg13; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); @@ -3802,21 +3804,21 @@ resultobj = SWIG_Py_Void(); { int length = (arg11)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg11))[0]),sizeof(int)*length); delete arg11; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); } { int length = (arg12)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg12))[0]),sizeof(int)*length); delete arg12; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); } { int length = (arg13)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_DOUBLE); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_DOUBLE); memcpy(PyArray_DATA(obj),&((*(arg13))[0]),sizeof(double)*length); delete arg13; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); @@ -4088,21 +4090,21 @@ resultobj = SWIG_Py_Void(); { int length = (arg6)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(int)*length); delete arg6; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); } { int length = (arg7)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg7))[0]),sizeof(int)*length); delete arg7; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); } { int length = (arg8)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_FLOAT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_FLOAT); memcpy(PyArray_DATA(obj),&((*(arg8))[0]),sizeof(float)*length); delete arg8; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); @@ -4211,21 +4213,21 @@ resultobj = SWIG_Py_Void(); { int length = (arg6)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(int)*length); delete arg6; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); } { int length = (arg7)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg7))[0]),sizeof(int)*length); delete arg7; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); } { int length = (arg8)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_DOUBLE); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_DOUBLE); memcpy(PyArray_DATA(obj),&((*(arg8))[0]),sizeof(double)*length); delete arg8; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); @@ -4449,21 +4451,21 @@ resultobj = SWIG_Py_Void(); { int length = (arg9)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg9))[0]),sizeof(int)*length); delete arg9; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); } { int length = (arg10)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg10))[0]),sizeof(int)*length); delete arg10; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); } { int length = (arg11)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_FLOAT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_FLOAT); memcpy(PyArray_DATA(obj),&((*(arg11))[0]),sizeof(float)*length); delete arg11; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); @@ -4626,21 +4628,21 @@ resultobj = SWIG_Py_Void(); { int length = (arg9)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg9))[0]),sizeof(int)*length); delete arg9; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); } { int length = (arg10)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_INT); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); memcpy(PyArray_DATA(obj),&((*(arg10))[0]),sizeof(int)*length); delete arg10; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); } { int length = (arg11)->size(); - PyObject *obj = PyArray_FromDims(1, &length, PyArray_DOUBLE); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_DOUBLE); memcpy(PyArray_DATA(obj),&((*(arg11))[0]),sizeof(double)*length); delete arg11; resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); Modified: trunk/scipy/sandbox/multigrid/multilevel.py =================================================================== --- trunk/scipy/sandbox/multigrid/multilevel.py 2007-08-21 22:12:51 UTC (rev 3252) +++ trunk/scipy/sandbox/multigrid/multilevel.py 2007-08-22 15:55:13 UTC (rev 3253) @@ -1,11 +1,12 @@ -from scipy import ones,zeros,rand,array,array_split,hstack,transpose,sum,ones_like,sqrt -from scipy.sparse import spidentity +from scipy import ones,zeros,rand,array,array_split,hstack,transpose,sum,ones_like,sqrt,concatenate +from scipy.sparse import spidentity,csr_matrix,coo_matrix from numpy.linalg import norm +from numpy import zeros_like,arange,inner,diff,ravel import pydec from pydec import diag_sparse,inf_norm, mls_polynomial_coeffs,polynomial_smoother -from multigrid import sa_interpolation,rs_interpolation +from multigrid import sa_interpolation,rs_interpolation,sa_constant_interpolation,sa_no_threshold import multigrid import multigridtools from relaxation import gauss_seidel,jacobi @@ -312,7 +313,6 @@ self[-1].coarse_solver = coarse_grid_solver(A,self.opts.sub_options('coarse:')) self[-1].A = A - if self.opts['smoother: type'] == 'jacobi': omegas = [] for lvl in self: @@ -324,4 +324,400 @@ self.opts['smoother: omega'] = omegas +class multilevel_solver2: + def __init__(self,As,Ps,options=None): + self.As = As + self.Ps = Ps + self.ops = options + + def solve(self,b, x0=None, tol=1e-5, maxiter=100, callback=None, return_residuals=False): + if x0 is None: + x = zeros(b.shape,max(self.A.dtype,b.dtype)) + else: + x = array(x0) + + self.__solve(0,x,b) + + return x + + def __solve(self,lvl,x,b): + + A = self.As[lvl] + + if len(self.As) == 1: + x[:] = scipy.linalg.solve(A.todense(),b) + return x + + + self.__smooth(lvl,x,b,which='pre') + + residual = b - A*x + + coarse_x = zeros((self.As[lvl+1].shape[0])) + coarse_b = self.Ps[lvl].T * residual + + if lvl == len(self.As) - 2: + pass + coarse_x[:] = scipy.linalg.solve(self.As[-1].todense(),coarse_b) + #coarse_x[:] = self[-1].coarse_solver.solve(coarse_b) #next level is coarsest + else: + self.__solve(lvl+1,coarse_x,coarse_b) + + x += self.Ps[lvl] * coarse_x + + self.__smooth(lvl,x,b,which='post') + + + def __smooth(self,lvl,x,b,which): + A = self.As[lvl] + if which == 'pre': + gauss_seidel(A,x,b,iterations=1,sweep="forward") + else: + gauss_seidel(A,x,b,iterations=1,sweep="backward") + + +def inf_norm(A): + return abs(A).sum(axis=1).max() #max abs row sum + +def fit_candidate(I,x): + """ + For each aggregate in I (i.e. each column of I) compute vector R and + sparse matrix Q (having the sparsity of I) such that the following holds: + + Q*R = x and Q^T*Q = I + + In otherwords, find a prolongator Q with orthonormal columns so that + x is represented exactly on the coarser level by R. + """ + Q = csr_matrix((x.copy(),I.indices,I.indptr),dims=I.shape,check=False) + R = sqrt(numpy.ravel(csr_matrix((x*x,I.indices,I.indptr),dims=I.shape,check=False).sum(axis=0))) #column 2-norms + Q.data *= (1.0/R)[Q.indices] + print "norm(Q*R - x)",linalg.norm(Q*R - x) + return Q,R + + +def scaled_columns_csr(A,scales): + scales = numpy.ravel(scales) + A = A.copy() + A.data *= scales[A.indices] + return A + +def orthonormalize_candidate(I,x,basis): + Px = csr_matrix((x,I.indices,I.indptr),dims=I.shape,check=False) + Rs = [] + #othogonalize columns of Px against other candidates + for b in basis: + Pb = csr_matrix((b,I.indices,I.indptr),dims=I.shape,check=False) + R = ravel(csr_matrix((Pb.data*Px.data,I.indices,I.indptr),dims=I.shape,check=False).sum(axis=0)) # columnwise projection of Px on Pb + Px.data -= R[I.indices] * Pb.data #subtract component in b direction + Rs.append(R) + + #filter columns here, set unused cols to 0, add to mask + + #normalize columns of Px + R = ravel(csr_matrix((x**x,I.indices,I.indptr),dims=I.shape,check=False).sum(axis=0)) + Px.data *= (1.0/R)[I.indices] + Rs.append(R.reshape(-1,1)) + return Rs + +def hstack_csr(A,B): + #OPTIMIZE THIS + assert(A.shape[0] == B.shape[0]) + A = A.tocoo() + B = B.tocoo() + I = concatenate((A.row,B.row)) + J = concatenate((A.col,B.col+A.shape[1])) + V = concatenate((A.data,B.data)) + return coo_matrix((V,(I,J)),dims=(A.shape[0],A.shape[1]+B.shape[1])).tocsr() + + +def vstack_csr(A,B): + #OPTIMIZE THIS + assert(A.shape[1] == B.shape[1]) + A = A.tocoo() + B = B.tocoo() + I = concatenate((A.row,B.row+A.shape[0])) + J = concatenate((A.col,B.col)) + V = concatenate((A.data,B.data)) + return coo_matrix((V,(I,J)),dims=(A.shape[0]+B.shape[0],A.shape[1])).tocsr() + + + +def orthonormalize_prolongator(P_l,x_l,W_l,W_m): + """ + + """ + X = csr_matrix((x_l,W_l.indices,W_l.indptr),dims=W_l.shape,check=False) #candidate prolongator (assumes every value from x is used) + + R = (P_l.T.tocsr() * X) # R has at most 1 nz per row + X = X - P_l*R # othogonalize X against P_l + + #DROP REDUNDANT COLUMNS FROM P (AND R?) HERE (NULL OUT R ACCORDINGLY?) + #REMOVE CORRESPONDING COLUMNS FROM W_l AND ROWS FROM A_m ALSO + W_l_new = W_l + W_m_new = W_m + + #normalize surviving columns of X + col_norms = ravel(sqrt(csr_matrix((X.data*X.data,X.indices,X.indptr),dims=X.shape,check=False).sum(axis=0))) + print "zero cols",sum(col_norms == 0) + print "small cols",sum(col_norms < 1e-8) + Xcopy = X.copy() + X.data *= (1.0/col_norms)[X.indices] + + P_l_new = hstack_csr(P_l,X) + + + #check orthonormality + print "norm(P.T*P - I) ",scipy.linalg.norm((P_l_new.T * P_l_new - scipy.sparse.spidentity(P_l_new.shape[1])).data) + #assert(scipy.linalg.norm((P_l_new.T * P_l_new - scipy.sparse.spidentity(P_l_new.shape[1])).data)<1e-8) + + x_m = zeros(P_l_new.shape[1],dtype=x_l.dtype) + x_m[:P_l.shape[1]][diff(R.indptr).astype('bool')] = R.data + x_m[P_l.shape[1]:] = col_norms + + print "||x_l - P_l*x_m||",scipy.linalg.norm(P_l_new* x_m - x_l) #see if x_l is represented exactly + + return P_l_new,x_m,W_l,W_m + + + +def prolongation_smoother(A): + omega = (4.0/3.0)/inf_norm(A) + S = (spidentity(A.shape[0]).T - omega*A) + return S + + +def smoothed_prolongator(P,A): + #just use Richardson for now + omega = 4.0/(3.0*inf_norm(A)) + return P - omega*(A*P) + + + + +def sa_hierarchy(A,Ws,x): + """ + Construct multilevel hierarchy using Smoothed Aggregation + Inputs: + A - matrix + Is - list of constant prolongators + x - "candidate" basis function to be approximated + Ouputs: + (As,Is,Ps) - tuple of lists + - As - [A, Ps[0].T*A*Ps[0], Ps[1].T*A*Ps[1], ... ] + - Is - smoothed prolongators + - Ps - tentative prolongators + """ + Ps = [] + Is = [] + As = [A] + + for W in Ws: + P,x = fit_candidate(W,x) + I = smoothed_prolongator(P,A) + A = I.T.tocsr() * A * I + As.append(A) + Ps.append(P) + Is.append(I) + return As,Is,Ps + +def make_bridge(I,N): + tail = I.indptr[-1].repeat(N - I.shape[0]) + ptr = concatenate((I.indptr,tail)) + return csr_matrix((I.data,I.indices,ptr),dims=(N,I.shape[1]),check=False) + +class adaptive_sa_solver: + def __init__(self,A,options=None): + self.A = A + + self.Rs = [] + self.__construct_hierarchy(A) + + def __construct_hierarchy(self,A): + #if self.A.shape[0] <= self.opts['coarse: max size']: + # raise ValueError,'small matrices not handled yet' + + x,AggOps = self.__initialization_stage(A) #first candidate + Ws = AggOps + + #x[:] = 1 #TEST + + self.candidates = [x] + + #create SA using x here + As,Is,Ps = sa_hierarchy(A,Ws,x) + + for i in range(0): + x = self.__develop_candidate(A,As,Is,Ps,Ws,AggOps) + #x[:] = arange(x.shape[0]) + #x[x.shape[0]/2:] = 2*x[x.shape[0]/2] - x[x.shape[0]/2:] + As,Is,Ps,Ws = self.__augment_cycle(A,As,Ps,Ws,AggOps,x) + + self.candidates.append(x) + + #As,Is,Ps = sa_hierarchy(A,AggOps,x) #TESTING + self.Ps = Ps + self.solver = multilevel_solver2(As,Is) + + + + def __develop_candidate(self,A,As,Is,Ps,Ws,AggOps): + x = rand(A.shape[0]) + b = zeros_like(x) + + #x[:] = 1 #TEST + + mu = 5 + + solver = multilevel_solver2(As,Is) + + for n in range(mu): + x = solver.solve(b, x0=x, tol=1e-8, maxiter=1) + #TEST FOR CONVERGENCE HERE + + A_l,P_l,W_l,x_l = As[0],Ps[0],Ws[0],x + + temp_Is = [] + for i in range(len(As) - 2): + P_l_new, x_m, W_l_new, W_m_new = orthonormalize_prolongator(P_l, x_l, W_l, AggOps[i+1]) + + I_l_new = smoothed_prolongator(P_l_new,A_l) + A_m_new = I_l_new.T.tocsr() * A_l * I_l_new + bridge = make_bridge(Is[i+1],A_m_new.shape[0]) + + temp_solver = multilevel_solver2( [A_m_new] + As[i+2:], [bridge] + Is[i+2:] ) + + for n in range(mu): + x_m = temp_solver.solve(zeros_like(x_m), x0=x_m, tol=1e-8, maxiter=1) + + temp_Is.append(I_l_new) + + W_l = vstack_csr(Ws[i+1],W_m_new) #prepare for next iteration + A_l = A_m_new + x_l = x_m + P_l = make_bridge(Ps[i+1],A_m_new.shape[0]) + + x = x_l + for I in reversed(temp_Is): + x = I*x + + return x + + + def __augment_cycle(self,A,As,Ps,Ws,AggOps,x): + #As,Is,Ps,Ws = self.__augment_cycle(A,Ps,Ws,AggOps,x) + + #make a new cycle using the new candidate + A_l,P_l,W_l,x_l = As[0],Ps[0],AggOps[0],x + + new_As,new_Is,new_Ps,new_Ws = [A],[],[],[AggOps[0]] + + for i in range(len(As) - 2): + P_l_new, x_m, W_l_new, W_m_new = orthonormalize_prolongator(P_l, x_l, W_l, AggOps[i+1]) + + I_l_new = smoothed_prolongator(P_l_new,A_l) + A_m_new = I_l_new.T.tocsr() * A_l * I_l_new + W_m_new = vstack_csr(Ws[i+1],W_m_new) + + new_As.append(A_m_new) + new_Ws.append(W_m_new) + new_Is.append(I_l_new) + new_Ps.append(P_l_new) + + #prepare for next iteration + W_l = W_m_new + A_l = A_m_new + x_l = x_m + P_l = make_bridge(Ps[i+1],A_m_new.shape[0]) + + P_l_new, x_m, W_l_new, W_m_new = orthonormalize_prolongator(P_l, x_l, W_l, csr_matrix((P_l.shape[1],1))) + I_l_new = smoothed_prolongator(P_l_new,A_l) + A_m_new = I_l_new.T.tocsr() * A_l * I_l_new + + new_As.append(A_m_new) + new_Is.append(I_l_new) + new_Ps.append(P_l_new) + + return new_As,new_Is,new_Ps,new_Ws + + + def __initialization_stage(self,A): + max_levels = 10 + max_coarse = 50 + + AggOps = [] + Is = [] + + # aSA parameters + mu = 5 # number of test relaxation iterations + epsilon = 0.1 # minimum acceptable relaxation convergence factor + + #step 1 + A_l = A + x = scipy.rand(A_l.shape[0]) + skip_f_to_i = False + + #step 2 + b = zeros_like(x) + gauss_seidel(A_l,x,b,iterations=mu) + #step 3 + #test convergence rate here + + while len(AggOps) < max_levels and A_l.shape[0] > max_coarse: + W_l = sa_constant_interpolation(A_l) #step 4b + #W_l = sa_no_threshold(A_l) #step 4b TEST + P_l,x = fit_candidate(W_l,x) #step 4c + I_l = smoothed_prolongator(P_l,A_l) #step 4d + A_l = I_l.T.tocsr() * A_l * I_l #step 4e + + AggOps.append(W_l) + Is.append(I_l) + + if A_l.shape <= max_coarse: break + + if not skip_f_to_i: + print "." + x_hat = x.copy() #step 4g + gauss_seidel(A_l,x,zeros_like(x),iterations=mu) #step 4h + x_A_x = inner(x,A_l*x) + if (x_A_x/inner(x_hat,A_l*x_hat))**(1.0/mu) < epsilon: #step 4i + print "sufficient convergence, skipping" + skip_f_to_i = True + if x_A_x == 0: + x = x_hat #need to restore x + + #update fine-level candidate + for I in reversed(Is): + x = I * x + + #gauss_seidel(A,x,zeros_like(x),iterations=mu) #TEST + + #x[:] = 1 #TEST + + return x,AggOps #first candidate,aggregation + + + +from scipy import * +from pydec import diag_sparse +from multigrid import poisson_problem,poisson_problem1D +#A = poisson_problem(100).T +A = poisson_problem1D(100).T +D = diag_sparse(1.0/sqrt(10**(12*rand(A.shape[0])-6))).tocsr() +A = D * A * D +#A = A*A +#A = io.mmread("nos2.mtx").tocsr() +asa = adaptive_sa_solver(A) +x = rand(A.shape[0]) +b = zeros_like(x) + +resid = [] + +for n in range(50): + x = asa.solver.solve(b,x) + resid.append(linalg.norm(A*x)) + + + + Modified: trunk/scipy/sandbox/multigrid/simple_test.py =================================================================== --- trunk/scipy/sandbox/multigrid/simple_test.py 2007-08-21 22:12:51 UTC (rev 3252) +++ trunk/scipy/sandbox/multigrid/simple_test.py 2007-08-22 15:55:13 UTC (rev 3253) @@ -2,7 +2,7 @@ from multigrid import * from scipy import * -A = poisson_problem(300).T +A = poisson_problem(100).T s = scalar_solver(A) b = rand(A.shape[0]) x,res = s.solve(b,return_residuals=True) Property changes on: trunk/scipy/sandbox/multigrid/tests ___________________________________________________________________ Name: svn:ignore + *.so *.bak *.pyc ignore.txt From scipy-svn at scipy.org Wed Aug 22 14:26:14 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 22 Aug 2007 13:26:14 -0500 (CDT) Subject: [Scipy-svn] r3254 - in trunk/scipy/sandbox/multigrid: . multigridtools Message-ID: <20070822182614.61B2D39C10C@new.scipy.org> Author: wnbell Date: 2007-08-22 13:26:02 -0500 (Wed, 22 Aug 2007) New Revision: 3254 Added: trunk/scipy/sandbox/multigrid/multigridtools/complex_ops.h trunk/scipy/sandbox/multigrid/multigridtools/numpy.i trunk/scipy/sandbox/multigrid/multigridtools/relaxation.h Modified: trunk/scipy/sandbox/multigrid/multigrid.py trunk/scipy/sandbox/multigrid/multigridtools/smoothed_aggregation.h trunk/scipy/sandbox/multigrid/multilevel.py Log: simplified multilevel_solver Modified: trunk/scipy/sandbox/multigrid/multigrid.py =================================================================== --- trunk/scipy/sandbox/multigrid/multigrid.py 2007-08-22 15:55:13 UTC (rev 3253) +++ trunk/scipy/sandbox/multigrid/multigrid.py 2007-08-22 18:26:02 UTC (rev 3254) @@ -3,36 +3,10 @@ import multigridtools import scipy import numpy -#import scipy.linsolve.umfpack as um - -from pydec import gauss_seidel,diag_sparse,inf_norm +from pydec import diag_sparse,inf_norm -def poisson_problem1D(N): - """ - Return a sparse CSC matrix for the 2d N*N poisson problem - with standard 5-point finite difference stencil - """ - D = 2*numpy.ones(N) - O = -numpy.ones(N) - return scipy.sparse.spdiags([D,O,O],[0,-1,1],N,N) - - - - -def poisson_problem(N): - """ - Return a sparse CSC matrix for the 2d N*N poisson problem - with standard 5-point finite difference stencil - """ - D = 4*numpy.ones(N*N) - T = -numpy.ones(N*N) - O = -numpy.ones(N*N) - T[N-1::N] = 0 - return scipy.sparse.spdiags([D,O,T,T,O],[0,-N,-1,1,N],N*N,N*N) - - def rs_strong_connections(A,theta): if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') @@ -47,8 +21,6 @@ T = S.T.tocsr() - print "RS on A ",A.shape - Ip,Ij,Ix = multigridtools.rs_interpolation(A.shape[0],\ A.indptr,A.indices,A.data,\ S.indptr,S.indices,S.data,\ @@ -63,18 +35,6 @@ Sp,Sj,Sx = multigridtools.sa_strong_connections(A.shape[0],epsilon,A.indptr,A.indices,A.data) return scipy.sparse.csr_matrix((Sx,Sj,Sp),A.shape) - -def sa_no_threshold(A): - if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') - - #tentative (non-smooth) interpolation operator I - Ij = multigridtools.sa_get_aggregates(A.shape[0],A.indptr,A.indices) - Ip = numpy.arange(len(Ij)+1) - Ix = numpy.ones(len(Ij)) - - return scipy.sparse.csr_matrix((Ix,Ij,Ip)) - - def sa_constant_interpolation(A,epsilon=0.08): if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') @@ -91,44 +51,16 @@ def sa_interpolation(A,epsilon=0.08,omega=4.0/3.0): if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') - print "SA on A ",A.shape - I = sa_constant_interpolation(A,epsilon) D_inv = diag_sparse(1.0/diag_sparse(A)) D_inv_A = D_inv * A D_inv_A *= -omega/inf_norm(D_inv_A) - - #S = (scipy.sparse.spidentity(A.shape[0]).T + D_inv_A) - #P = S*I - P = I + (D_inv_A*I) #same as P=S*I, but faster + P = I + (D_inv_A*I) #same as P=S*I, (faster?) - return P,I + return P -##def sa_interpolation(A,epsilon=0.08,omega=4.0/3.0): -## if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') - -## S = sa_strong_connections(A,epsilon) - -## print "SA on A ",A.shape -## #tentative (non-smooth) interpolation operator I -## Ij = multigridtools.sa_get_aggregates(A.shape[0],S.indptr,S.indices) -## Ip = numpy.arange(len(Ij)+1) -## Ix = numpy.ones(len(Ij)) - -## I = scipy.sparse.csr_matrix((Ix,Ij,Ip)) - -## # (I - \omega D^-1 Af) -## Jp,Jj,Jx = multigridtools.sa_smoother(A.shape[0],omega, -## A.indptr,A.indices,A.data, -## S.indptr,S.indices,S.data) - -## J = scipy.sparse.csr_matrix((Jx,Jj,Jp)) - -## return J*I - - Added: trunk/scipy/sandbox/multigrid/multigridtools/complex_ops.h =================================================================== --- trunk/scipy/sandbox/multigrid/multigridtools/complex_ops.h 2007-08-22 15:55:13 UTC (rev 3253) +++ trunk/scipy/sandbox/multigrid/multigridtools/complex_ops.h 2007-08-22 18:26:02 UTC (rev 3254) @@ -0,0 +1 @@ +link ../../../sparse/sparsetools/complex_ops.h \ No newline at end of file Property changes on: trunk/scipy/sandbox/multigrid/multigridtools/complex_ops.h ___________________________________________________________________ Name: svn:special + * Added: trunk/scipy/sandbox/multigrid/multigridtools/numpy.i =================================================================== --- trunk/scipy/sandbox/multigrid/multigridtools/numpy.i 2007-08-22 15:55:13 UTC (rev 3253) +++ trunk/scipy/sandbox/multigrid/multigridtools/numpy.i 2007-08-22 18:26:02 UTC (rev 3254) @@ -0,0 +1 @@ +link ../../../sparse/sparsetools/numpy.i \ No newline at end of file Property changes on: trunk/scipy/sandbox/multigrid/multigridtools/numpy.i ___________________________________________________________________ Name: svn:special + * Added: trunk/scipy/sandbox/multigrid/multigridtools/relaxation.h =================================================================== --- trunk/scipy/sandbox/multigrid/multigridtools/relaxation.h 2007-08-22 15:55:13 UTC (rev 3253) +++ trunk/scipy/sandbox/multigrid/multigridtools/relaxation.h 2007-08-22 18:26:02 UTC (rev 3254) @@ -0,0 +1,75 @@ +#ifndef RELAXATION_H +#define RELAXATION_H + +#include +#include + +template +void gauss_seidel(const I n_row, + const I Ap[], + const I Aj[], + const T Ax[], + T x[], + const T b[], + const I row_start, + const I row_stop, + const I row_step) +{ + for(I i = row_start; i != row_stop; i += row_step) { + I start = Ap[i]; + I end = Ap[i+1]; + T rsum = 0; + T diag = 0; + + for(I jj = start; jj < end; jj++){ + I j = Aj[jj]; + if (i == j) + diag = Ax[jj]; + else + rsum += Ax[jj]*x[j]; + } + + assert(diag != 0); + + x[i] = (b[i] - rsum)/diag; + } +} + +template +void jacobi(const I n_row, + const I Ap[], + const I Aj[], + const T Ax[], + T x[], + const T b[], + T temp[], + const I row_start, + const I row_stop, + const I row_step, + const T omega) +{ + std::copy(x,x+n_row,temp); + + for(I i = row_start; i != row_stop; i += row_step) { + I start = Ap[i]; + I end = Ap[i+1]; + T rsum = 0; + T diag = 0; + + for(I jj = start; jj < end; jj++){ + I j = Aj[jj]; + if (i == j) + diag = Ax[jj]; + else + rsum += Ax[jj]*temp[j]; + } + + assert(diag != 0); + + x[i] = (1 - omega) * temp[i] + omega * ((b[i] - rsum)/diag); + } +} + + +#endif + Modified: trunk/scipy/sandbox/multigrid/multigridtools/smoothed_aggregation.h =================================================================== --- trunk/scipy/sandbox/multigrid/multigridtools/smoothed_aggregation.h 2007-08-22 15:55:13 UTC (rev 3253) +++ trunk/scipy/sandbox/multigrid/multigridtools/smoothed_aggregation.h 2007-08-22 18:26:02 UTC (rev 3254) @@ -7,7 +7,7 @@ #include -#define DEBUG +//#define DEBUG template Modified: trunk/scipy/sandbox/multigrid/multilevel.py =================================================================== --- trunk/scipy/sandbox/multigrid/multilevel.py 2007-08-22 15:55:13 UTC (rev 3253) +++ trunk/scipy/sandbox/multigrid/multilevel.py 2007-08-22 18:26:02 UTC (rev 3254) @@ -1,723 +1,166 @@ -from scipy import ones,zeros,rand,array,array_split,hstack,transpose,sum,ones_like,sqrt,concatenate -from scipy.sparse import spidentity,csr_matrix,coo_matrix from numpy.linalg import norm -from numpy import zeros_like,arange,inner,diff,ravel - -import pydec -from pydec import diag_sparse,inf_norm, mls_polynomial_coeffs,polynomial_smoother - -from multigrid import sa_interpolation,rs_interpolation,sa_constant_interpolation,sa_no_threshold -import multigrid -import multigridtools -from relaxation import gauss_seidel,jacobi - +from numpy import zeros_like import scipy import numpy +from multigrid import sa_interpolation,rs_interpolation +from relaxation import gauss_seidel,jacobi -## import scipy.sandbox.arpack as arpack -## eigs,vecs = arpack.eigen(A,maxiter=10) -## raise ValueError -## return eigs.max() - -def avg_work_per_digit(ml_solver,residuals): - digits = numpy.log(residuals[0]/residuals[-1])/numpy.log(10) - return (ml_solver.cycle_complexity() * len(residuals)) / digits +def poisson_problem1D(N): + """ + Return a sparse CSC matrix for the 1d poisson problem + with standard 3-point finite difference stencil on a + grid with N points. + """ + D = 2*numpy.ones(N) + O = -numpy.ones(N) + return scipy.sparse.spdiags([D,O,O],[0,-1,1],N,N) -def avg_convergence_rate(residuals): - return (residuals[-1]/residuals[0]) ** (1.0/len(residuals)) +def poisson_problem2D(N): + """ + Return a sparse CSC matrix for the 2d poisson problem + with standard 5-point finite difference stencil on a + square N-by-N grid. + """ + D = 4*numpy.ones(N*N) + T = -numpy.ones(N*N) + O = -numpy.ones(N*N) + T[N-1::N] = 0 + return scipy.sparse.spdiags([D,O,T,T,O],[0,-N,-1,1,N],N*N,N*N) +def ruge_stuben_solver(A,max_levels=10,max_coarse=500): + As = [A] + Ps = [] + + while len(As) < max_levels and A.shape[0] > max_coarse: + P = rs_interpolation(A) + + A = (P.T.tocsr() * A) * P #galerkin operator -def asym_work_per_digit(ml_solver,residuals): - digits = numpy.log(residuals[-2]/residuals[-1])/numpy.log(10) - return (ml_solver.cycle_complexity()) / digits + As.append(A) + Ps.append(P) + + return multilevel_solver(As,Ps) - - - -class coarse_grid_solver: - def __init__(self,A,options): - self.opts = options +def smoothed_aggregation_solver(A,max_levels=10,max_coarse=500): + As = [A] + Ps = [] + + while len(As) < max_levels and A.shape[0] > max_coarse: + P = sa_interpolation(A) - self.A = A + A = (P.T.tocsr() * A) * P #galerkin operator - solver = self.opts['coarse: type'] + As.append(A) + Ps.append(P) - if solver == 'pinv': - self.pinv = scipy.linalg.pinv(self.A.todense()) - self.nnz = self.pinv.size - self.__solve = lambda b : numpy.dot(self.pinv,b) - elif solver == 'pinv2': - self.pinv = scipy.linalg.pinv2(self.A.todense()) - self.nnz = self.pinv.size - self.__solve = lambda b : numpy.dot(self.pinv,b) - elif solver == 'splu': - import scipy.linsolve.umfpack as um - self.umfpack = um.UmfpackContext() - self.umfpack.numeric( self.A ) - self.nnz = self.umfpack.info[um.umfDefines['UMFPACK_LU_ENTRIES']] - self.__solve = lambda b : self.umfpack.solve( um.UMFPACK_A, self.A, b, autoTranspose = True ) - elif solver in ['bicg','bicgstab','cg','cgs','gmres','qmr']: - #self.__solve = lambda b : scipy.linalg.cg(self.A,b,tol=1e-12,maxiter=100)[0] - #it_solver = getattr(scipy.linalg.iterative,solver) - - it_solver = pydec.numerical.iterative.cg - self.__solve = lambda b : it_solver(self.A,b,tol=1e-12)[0] - else: - raise ValueError,('unknown solver: %s' % solver) - - def solve(self,b): - #M = self.A.todense() - #val,vec = scipy.linalg.eig(M) - #pet = vec[:,val < 1e-8][:,0] - #print pet - #return self.__solve(b) + pet - return self.__solve(b) + return multilevel_solver(As,Ps) - def nnz(self): - return self.nnz - -class multilevel_solver(list): - class grid_data: - pass - - class options(dict): - def __repr__(self): - keys = sorted([k for k in self.keys() if ':' not in k]) - keys += sorted([k for k in self.keys() if ':' in k]) +class multilevel_solver: + def __init__(self,As,Ps): + self.As = As + self.Ps = Ps - output = "solver options:\n" - for k in keys: - output += " %-25s %-30s\n" % (k,self[k]) - return output - def sub_options(self,sub_opt): - """ - Filter options with a given prefix - - Example: - opts.sub_options('smoother:') - - """ - return dict([ (k,v) for (k,v) in self.iteritems() if k.startswith(sub_opt)]) - - def __init__(self,A,options=None): - assert(False) #should not instantiated - - def __repr__(self): - output = '%s\n'% type(self).__name__ - output += 'Number of Levels: %d (max: %d)\n' % (len(self),self.opts['max levels']) + output = 'multilevel_solver\n' + output += 'Number of Levels: %d\n' % len(self.As) output += 'Operator Complexity: %6.3f\n' % self.operator_complexity() output += 'Grid Complexity: %6.3f\n' % self.grid_complexity() - output += 'Cycle Complexity: %6.3f\n' % self.cycle_complexity() - total_nnz = sum([lvl.A.nnz for lvl in self]) + total_nnz = sum([A.nnz for A in self.As]) - for lvl,data in enumerate(self): - output += ' [level %2d] unknowns: %10d nnz: %5.2f%%\n' % (lvl,data.A.shape[1],(100*float(data.A.nnz)/float(total_nnz))) + for n,A in enumerate(self.As): + output += ' [level %2d] unknowns: %10d nnz: %5.2f%%\n' % (n,A.shape[1],(100*float(A.nnz)/float(total_nnz))) - #output += '\n' + repr(self.opts) return output - - def operator_complexity(self): """number of nonzeros on all levels / number of nonzeros on the finest level""" - return sum([lvl.A.nnz for lvl in self])/float(self[0].A.nnz) + return sum([A.nnz for A in self.As])/float(self.As[0].nnz) + def grid_complexity(self): """number of unknowns on all levels / number of unknowns on the finest level""" - return sum([lvl.A.shape[0] for lvl in self])/float(self[0].A.shape[0]) - def cycle_complexity(self): - """total FLOPs in one MG cycle / FLOPs in single smoother sweep on the finest level""" - return self.cycle_flops()/float(self[0].A.nnz) - def cycle_flops(self): - """total FLOPs in one MG cycle""" - total_flops = 0 - - gamma = self.opts['cycle: gamma'] - passes = self.opts['smoother: passes'] - - if self.opts['smoother: type'] in ['jacobi','symmetric gauss-seidel','richardson']: - passes *= 2 - passes += 1 #residual computation + return sum([A.shape[0] for A in self.As])/float(self.As[0].shape[0]) + - if self.opts['smoother: type'] in ['polynomial']: - print "poly degree:",len(self.opts['smoother: omega'][-1]) - passes *= 2*len(self.opts['smoother: omega'][-1]) - #residual computation already factored in - + def solve(self, b, x0=None, tol=1e-5, maxiter=100, callback=None, return_residuals=False): + """ + TODO + """ - for n,lvl in enumerate(self): - total_flops += (gamma**n)*lvl.A.nnz*passes - - #account for iterative solver using this as a preconditioner - if self.opts['solver: type'] != 'standalone': - total_flops += self.A.nnz - - return total_flops - - def solve(self,b, x0=None, tol=1e-5, maxiter=100, callback=None, return_residuals=False, precond=False): - if x0 is None: - x = zeros(b.shape,max(self.A.dtype,b.dtype)) + x = zeros_like(b) else: - x = x0.copy() + x = array(x0) - #was invoked as a preconditioner - if precond: - #return b #no precond + #TODO change use of tol (relative tolerance) to agree with other iterative solvers + A = self.As[0] + residuals = [norm(b-A*x,2)] + + while len(residuals) <= maxiter and residuals[-1]/residuals[0] > tol: self.__solve(0,x,b) - return x + residuals.append(scipy.linalg.norm(b-A*x,2)) - if self.opts['solver: type'] == 'standalone': - residuals = [norm(b-self[0].A*x,2)] + if callback is not None: + callback(x) - while len(residuals) <= maxiter and residuals[-1]/residuals[0] > tol: - self.__solve(0,x,b) - - residuals.append(scipy.linalg.norm(b-self[0].A*x,2)) - - if callback is not None: - callback(x) - - else: - #using acceleration - - #residuals = [scipy.linalg.norm(b-self[0].A*x,2)] - #callback = lambda x_k : residuals.append(scipy.linalg.norm(b-self[0].A*x_k,2)) - #solver = getattr(scipy.linalg.iterative,self.opts['solver: type']) - - assert(self.opts['solver: type'] == 'cg') #only CG supported now - solver = pydec.iterative.cg - - mtx = self[0].A - mtx.psolve = lambda b : self.solve(b,precond=True) - - x,residuals = solver(mtx,b,x0=x,tol=tol,maxiter=maxiter,callback=callback) - if return_residuals: return x,residuals else: return x - - - - - def __smooth(self,lvl,x,b,which): - smoother_type = self.opts['smoother: type'] - smoother_passes = self.opts['smoother: passes'] - - A = self[lvl].A - if smoother_type == 'jacobi': - omega = self.opts['smoother: omega'][lvl] - jacobi(A,x,b,iterations=smoother_passes,omega=omega) - elif smoother_type == 'richardson': - omega = self.opts['smoother: omega'][lvl] - x += omega*(b - A*x) - elif smoother_type == 'polynomial': - coeffs = self.opts['smoother: omega'][lvl] - polynomial_smoother(A,x,b,coeffs) - elif smoother_type == 'symmetric gauss-seidel': - if which == 'pre': - gauss_seidel(A,x,b,iterations=smoother_passes,sweep="forward") - else: - gauss_seidel(A,x,b,iterations=smoother_passes,sweep="backward") - else: - raise ValueError,'unknown smoother' def __solve(self,lvl,x,b): - - if len(self) == 1: - x[:] = self[0].coarse_solver.solve(b) - return - - A = self[lvl].A - - self.__smooth(lvl,x,b,which='pre') - - residual = b - A*x - - coarse_x = zeros((self[lvl+1].A.shape[0])) - coarse_b = self[lvl].P.T * residual - - if lvl == len(self) - 2: - coarse_x[:] = self[-1].coarse_solver.solve(coarse_b) - else: - for i in range(self.opts['cycle: gamma']): - self.__solve(lvl+1,coarse_x,coarse_b) - - x += self[lvl].P * coarse_x - - self.__smooth(lvl,x,b,which='post') - - - - - - -class scalar_solver(multilevel_solver): - def __init__(self,A,options=None): - self.A = A - - if options is None: - self.opts = scalar_solver.default_options() - else: - self.opts = options - - self.__construct_hierarchy() - - def default_options(): - opts = multilevel_solver.options() - opts['max levels'] = 8 - opts['cycle: gamma'] = 1 - opts['coarse: type'] = 'splu' - opts['coarse: max size'] = 2000 - opts['aggregation: type'] = 'SA' - opts['aggregation: epsilon'] = 0.05 - opts['smoother: passes'] = 1 - opts['smoother: type'] = 'symmetric gauss-seidel' -# opts['smoother: type'] = 'jacobi' - opts['solver: type'] = 'cg' - return opts - default_options = staticmethod(default_options) - - def __construct_hierarchy(self): - A = self.A - - agg_type = self.opts['aggregation: type'] - max_levels = self.opts['max levels'] - max_coarse = self.opts['coarse: max size'] - - while len(self) < max_levels and A.shape[0] > max_coarse: - self.append(self.grid_data()) - - if agg_type == 'SA': - P,I = sa_interpolation(A) - elif agg_type == 'RS': - P = rs_interpolation(A) - else: - raise ValueError,'unknown aggregation type: %s' % agg_type - - self[-1].A = A - self[-1].P = P - - A = (P.T.tocsr() * A) * P - - self.append(self.grid_data()) - - self[-1].coarse_solver = coarse_grid_solver(A,self.opts.sub_options('coarse:')) - self[-1].A = A - if self.opts['smoother: type'] == 'jacobi': - omegas = [] - for lvl in self: - A = lvl.A - D_inv = diag_sparse(1.0/diag_sparse(A)) - - D_inv_A = D_inv * A - omegas.append((4.0/3.0)/inf_norm(D_inv_A)) - self.opts['smoother: omega'] = omegas - - -class multilevel_solver2: - def __init__(self,As,Ps,options=None): - self.As = As - self.Ps = Ps - self.ops = options - - def solve(self,b, x0=None, tol=1e-5, maxiter=100, callback=None, return_residuals=False): - - if x0 is None: - x = zeros(b.shape,max(self.A.dtype,b.dtype)) - else: - x = array(x0) - - self.__solve(0,x,b) - - return x - - def __solve(self,lvl,x,b): - A = self.As[lvl] if len(self.As) == 1: x[:] = scipy.linalg.solve(A.todense(),b) return x - - self.__smooth(lvl,x,b,which='pre') + self.presmoother(A,x,b) - residual = b - A*x + residual = b - A*x coarse_x = zeros((self.As[lvl+1].shape[0])) coarse_b = self.Ps[lvl].T * residual if lvl == len(self.As) - 2: - pass coarse_x[:] = scipy.linalg.solve(self.As[-1].todense(),coarse_b) - #coarse_x[:] = self[-1].coarse_solver.solve(coarse_b) #next level is coarsest else: self.__solve(lvl+1,coarse_x,coarse_b) - x += self.Ps[lvl] * coarse_x + x += self.Ps[lvl] * coarse_x #coarse grid correction - self.__smooth(lvl,x,b,which='post') + self.postsmoother(A,x,b) - def __smooth(self,lvl,x,b,which): - A = self.As[lvl] - if which == 'pre': - gauss_seidel(A,x,b,iterations=1,sweep="forward") - else: - gauss_seidel(A,x,b,iterations=1,sweep="backward") - - -def inf_norm(A): - return abs(A).sum(axis=1).max() #max abs row sum - -def fit_candidate(I,x): - """ - For each aggregate in I (i.e. each column of I) compute vector R and - sparse matrix Q (having the sparsity of I) such that the following holds: - - Q*R = x and Q^T*Q = I - - In otherwords, find a prolongator Q with orthonormal columns so that - x is represented exactly on the coarser level by R. - """ - Q = csr_matrix((x.copy(),I.indices,I.indptr),dims=I.shape,check=False) - R = sqrt(numpy.ravel(csr_matrix((x*x,I.indices,I.indptr),dims=I.shape,check=False).sum(axis=0))) #column 2-norms - Q.data *= (1.0/R)[Q.indices] - print "norm(Q*R - x)",linalg.norm(Q*R - x) - return Q,R - - -def scaled_columns_csr(A,scales): - scales = numpy.ravel(scales) - A = A.copy() - A.data *= scales[A.indices] - return A - -def orthonormalize_candidate(I,x,basis): - Px = csr_matrix((x,I.indices,I.indptr),dims=I.shape,check=False) - Rs = [] - #othogonalize columns of Px against other candidates - for b in basis: - Pb = csr_matrix((b,I.indices,I.indptr),dims=I.shape,check=False) - R = ravel(csr_matrix((Pb.data*Px.data,I.indices,I.indptr),dims=I.shape,check=False).sum(axis=0)) # columnwise projection of Px on Pb - Px.data -= R[I.indices] * Pb.data #subtract component in b direction - Rs.append(R) - - #filter columns here, set unused cols to 0, add to mask + def presmoother(self,A,x,b): + gauss_seidel(A,x,b,iterations=1,sweep="forward") - #normalize columns of Px - R = ravel(csr_matrix((x**x,I.indices,I.indptr),dims=I.shape,check=False).sum(axis=0)) - Px.data *= (1.0/R)[I.indices] - Rs.append(R.reshape(-1,1)) - return Rs + def postsmoother(self,A,x,b): + gauss_seidel(A,x,b,iterations=1,sweep="backward") -def hstack_csr(A,B): - #OPTIMIZE THIS - assert(A.shape[0] == B.shape[0]) - A = A.tocoo() - B = B.tocoo() - I = concatenate((A.row,B.row)) - J = concatenate((A.col,B.col+A.shape[1])) - V = concatenate((A.data,B.data)) - return coo_matrix((V,(I,J)),dims=(A.shape[0],A.shape[1]+B.shape[1])).tocsr() -def vstack_csr(A,B): - #OPTIMIZE THIS - assert(A.shape[1] == B.shape[1]) - A = A.tocoo() - B = B.tocoo() - I = concatenate((A.row,B.row+A.shape[0])) - J = concatenate((A.col,B.col)) - V = concatenate((A.data,B.data)) - return coo_matrix((V,(I,J)),dims=(A.shape[0]+B.shape[0],A.shape[1])).tocsr() +if __name__ == '__main__': + from scipy import * + A = poisson_problem2D(100).T + asa = smoothed_aggregation_solver(A) + #asa = ruge_stuben_solver(A) + x = rand(A.shape[0]) + b = zeros_like(x) - - -def orthonormalize_prolongator(P_l,x_l,W_l,W_m): - """ - - """ - X = csr_matrix((x_l,W_l.indices,W_l.indptr),dims=W_l.shape,check=False) #candidate prolongator (assumes every value from x is used) + resid = [] - R = (P_l.T.tocsr() * X) # R has at most 1 nz per row - X = X - P_l*R # othogonalize X against P_l - - #DROP REDUNDANT COLUMNS FROM P (AND R?) HERE (NULL OUT R ACCORDINGLY?) - #REMOVE CORRESPONDING COLUMNS FROM W_l AND ROWS FROM A_m ALSO - W_l_new = W_l - W_m_new = W_m + for n in range(10): + x = asa.solve(b,x,maxiter=1) + resid.append(linalg.norm(A*x)) - #normalize surviving columns of X - col_norms = ravel(sqrt(csr_matrix((X.data*X.data,X.indices,X.indptr),dims=X.shape,check=False).sum(axis=0))) - print "zero cols",sum(col_norms == 0) - print "small cols",sum(col_norms < 1e-8) - Xcopy = X.copy() - X.data *= (1.0/col_norms)[X.indices] - P_l_new = hstack_csr(P_l,X) - #check orthonormality - print "norm(P.T*P - I) ",scipy.linalg.norm((P_l_new.T * P_l_new - scipy.sparse.spidentity(P_l_new.shape[1])).data) - #assert(scipy.linalg.norm((P_l_new.T * P_l_new - scipy.sparse.spidentity(P_l_new.shape[1])).data)<1e-8) - - x_m = zeros(P_l_new.shape[1],dtype=x_l.dtype) - x_m[:P_l.shape[1]][diff(R.indptr).astype('bool')] = R.data - x_m[P_l.shape[1]:] = col_norms - - print "||x_l - P_l*x_m||",scipy.linalg.norm(P_l_new* x_m - x_l) #see if x_l is represented exactly - - return P_l_new,x_m,W_l,W_m - - - -def prolongation_smoother(A): - omega = (4.0/3.0)/inf_norm(A) - S = (spidentity(A.shape[0]).T - omega*A) - return S - - -def smoothed_prolongator(P,A): - #just use Richardson for now - omega = 4.0/(3.0*inf_norm(A)) - return P - omega*(A*P) - - - - -def sa_hierarchy(A,Ws,x): - """ - Construct multilevel hierarchy using Smoothed Aggregation - Inputs: - A - matrix - Is - list of constant prolongators - x - "candidate" basis function to be approximated - Ouputs: - (As,Is,Ps) - tuple of lists - - As - [A, Ps[0].T*A*Ps[0], Ps[1].T*A*Ps[1], ... ] - - Is - smoothed prolongators - - Ps - tentative prolongators - """ - Ps = [] - Is = [] - As = [A] - - for W in Ws: - P,x = fit_candidate(W,x) - I = smoothed_prolongator(P,A) - A = I.T.tocsr() * A * I - As.append(A) - Ps.append(P) - Is.append(I) - return As,Is,Ps - -def make_bridge(I,N): - tail = I.indptr[-1].repeat(N - I.shape[0]) - ptr = concatenate((I.indptr,tail)) - return csr_matrix((I.data,I.indices,ptr),dims=(N,I.shape[1]),check=False) - -class adaptive_sa_solver: - def __init__(self,A,options=None): - self.A = A - - self.Rs = [] - self.__construct_hierarchy(A) - - def __construct_hierarchy(self,A): - #if self.A.shape[0] <= self.opts['coarse: max size']: - # raise ValueError,'small matrices not handled yet' - - x,AggOps = self.__initialization_stage(A) #first candidate - Ws = AggOps - - #x[:] = 1 #TEST - - self.candidates = [x] - - #create SA using x here - As,Is,Ps = sa_hierarchy(A,Ws,x) - - for i in range(0): - x = self.__develop_candidate(A,As,Is,Ps,Ws,AggOps) - #x[:] = arange(x.shape[0]) - #x[x.shape[0]/2:] = 2*x[x.shape[0]/2] - x[x.shape[0]/2:] - As,Is,Ps,Ws = self.__augment_cycle(A,As,Ps,Ws,AggOps,x) - - self.candidates.append(x) - - #As,Is,Ps = sa_hierarchy(A,AggOps,x) #TESTING - self.Ps = Ps - self.solver = multilevel_solver2(As,Is) - - - - def __develop_candidate(self,A,As,Is,Ps,Ws,AggOps): - x = rand(A.shape[0]) - b = zeros_like(x) - - #x[:] = 1 #TEST - - mu = 5 - - solver = multilevel_solver2(As,Is) - - for n in range(mu): - x = solver.solve(b, x0=x, tol=1e-8, maxiter=1) - #TEST FOR CONVERGENCE HERE - - A_l,P_l,W_l,x_l = As[0],Ps[0],Ws[0],x - - temp_Is = [] - for i in range(len(As) - 2): - P_l_new, x_m, W_l_new, W_m_new = orthonormalize_prolongator(P_l, x_l, W_l, AggOps[i+1]) - - I_l_new = smoothed_prolongator(P_l_new,A_l) - A_m_new = I_l_new.T.tocsr() * A_l * I_l_new - bridge = make_bridge(Is[i+1],A_m_new.shape[0]) - - temp_solver = multilevel_solver2( [A_m_new] + As[i+2:], [bridge] + Is[i+2:] ) - - for n in range(mu): - x_m = temp_solver.solve(zeros_like(x_m), x0=x_m, tol=1e-8, maxiter=1) - - temp_Is.append(I_l_new) - - W_l = vstack_csr(Ws[i+1],W_m_new) #prepare for next iteration - A_l = A_m_new - x_l = x_m - P_l = make_bridge(Ps[i+1],A_m_new.shape[0]) - - x = x_l - for I in reversed(temp_Is): - x = I*x - - return x - - - def __augment_cycle(self,A,As,Ps,Ws,AggOps,x): - #As,Is,Ps,Ws = self.__augment_cycle(A,Ps,Ws,AggOps,x) - - #make a new cycle using the new candidate - A_l,P_l,W_l,x_l = As[0],Ps[0],AggOps[0],x - - new_As,new_Is,new_Ps,new_Ws = [A],[],[],[AggOps[0]] - - for i in range(len(As) - 2): - P_l_new, x_m, W_l_new, W_m_new = orthonormalize_prolongator(P_l, x_l, W_l, AggOps[i+1]) - - I_l_new = smoothed_prolongator(P_l_new,A_l) - A_m_new = I_l_new.T.tocsr() * A_l * I_l_new - W_m_new = vstack_csr(Ws[i+1],W_m_new) - - new_As.append(A_m_new) - new_Ws.append(W_m_new) - new_Is.append(I_l_new) - new_Ps.append(P_l_new) - - #prepare for next iteration - W_l = W_m_new - A_l = A_m_new - x_l = x_m - P_l = make_bridge(Ps[i+1],A_m_new.shape[0]) - - P_l_new, x_m, W_l_new, W_m_new = orthonormalize_prolongator(P_l, x_l, W_l, csr_matrix((P_l.shape[1],1))) - I_l_new = smoothed_prolongator(P_l_new,A_l) - A_m_new = I_l_new.T.tocsr() * A_l * I_l_new - - new_As.append(A_m_new) - new_Is.append(I_l_new) - new_Ps.append(P_l_new) - - return new_As,new_Is,new_Ps,new_Ws - - - def __initialization_stage(self,A): - max_levels = 10 - max_coarse = 50 - - AggOps = [] - Is = [] - - # aSA parameters - mu = 5 # number of test relaxation iterations - epsilon = 0.1 # minimum acceptable relaxation convergence factor - - #step 1 - A_l = A - x = scipy.rand(A_l.shape[0]) - skip_f_to_i = False - - #step 2 - b = zeros_like(x) - gauss_seidel(A_l,x,b,iterations=mu) - #step 3 - #test convergence rate here - - while len(AggOps) < max_levels and A_l.shape[0] > max_coarse: - W_l = sa_constant_interpolation(A_l) #step 4b - #W_l = sa_no_threshold(A_l) #step 4b TEST - P_l,x = fit_candidate(W_l,x) #step 4c - I_l = smoothed_prolongator(P_l,A_l) #step 4d - A_l = I_l.T.tocsr() * A_l * I_l #step 4e - - AggOps.append(W_l) - Is.append(I_l) - - if A_l.shape <= max_coarse: break - - if not skip_f_to_i: - print "." - x_hat = x.copy() #step 4g - gauss_seidel(A_l,x,zeros_like(x),iterations=mu) #step 4h - x_A_x = inner(x,A_l*x) - if (x_A_x/inner(x_hat,A_l*x_hat))**(1.0/mu) < epsilon: #step 4i - print "sufficient convergence, skipping" - skip_f_to_i = True - if x_A_x == 0: - x = x_hat #need to restore x - - #update fine-level candidate - for I in reversed(Is): - x = I * x - - #gauss_seidel(A,x,zeros_like(x),iterations=mu) #TEST - - #x[:] = 1 #TEST - - return x,AggOps #first candidate,aggregation - - - -from scipy import * -from pydec import diag_sparse -from multigrid import poisson_problem,poisson_problem1D -#A = poisson_problem(100).T -A = poisson_problem1D(100).T -D = diag_sparse(1.0/sqrt(10**(12*rand(A.shape[0])-6))).tocsr() -A = D * A * D -#A = A*A -#A = io.mmread("nos2.mtx").tocsr() -asa = adaptive_sa_solver(A) -x = rand(A.shape[0]) -b = zeros_like(x) - -resid = [] - -for n in range(50): - x = asa.solver.solve(b,x) - resid.append(linalg.norm(A*x)) - - - - From scipy-svn at scipy.org Wed Aug 22 14:35:13 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 22 Aug 2007 13:35:13 -0500 (CDT) Subject: [Scipy-svn] r3255 - trunk/scipy/sandbox/multigrid Message-ID: <20070822183513.63B4139C2BD@new.scipy.org> Author: wnbell Date: 2007-08-22 13:35:07 -0500 (Wed, 22 Aug 2007) New Revision: 3255 Added: trunk/scipy/sandbox/multigrid/coarsen.py Removed: trunk/scipy/sandbox/multigrid/multigrid.py Modified: trunk/scipy/sandbox/multigrid/multilevel.py Log: moved multigrid.py to coarsen.py Copied: trunk/scipy/sandbox/multigrid/coarsen.py (from rev 3254, trunk/scipy/sandbox/multigrid/multigrid.py) Deleted: trunk/scipy/sandbox/multigrid/multigrid.py =================================================================== --- trunk/scipy/sandbox/multigrid/multigrid.py 2007-08-22 18:26:02 UTC (rev 3254) +++ trunk/scipy/sandbox/multigrid/multigrid.py 2007-08-22 18:35:07 UTC (rev 3255) @@ -1,66 +0,0 @@ -from scipy import * - -import multigridtools -import scipy -import numpy - -from pydec import diag_sparse,inf_norm - - -def rs_strong_connections(A,theta): - if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') - - Sp,Sj,Sx = multigridtools.rs_strong_connections(A.shape[0],theta,A.indptr,A.indices,A.data) - return scipy.sparse.csr_matrix((Sx,Sj,Sp),A.shape) - - -def rs_interpolation(A,theta=0.25): - if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') - - S = rs_strong_connections(A,theta) - - T = S.T.tocsr() - - Ip,Ij,Ix = multigridtools.rs_interpolation(A.shape[0],\ - A.indptr,A.indices,A.data,\ - S.indptr,S.indices,S.data,\ - T.indptr,T.indices,T.data) - - return scipy.sparse.csr_matrix((Ix,Ij,Ip)) - - -def sa_strong_connections(A,epsilon): - if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') - - Sp,Sj,Sx = multigridtools.sa_strong_connections(A.shape[0],epsilon,A.indptr,A.indices,A.data) - return scipy.sparse.csr_matrix((Sx,Sj,Sp),A.shape) - -def sa_constant_interpolation(A,epsilon=0.08): - if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') - - S = sa_strong_connections(A,epsilon) - - #tentative (non-smooth) interpolation operator I - Ij = multigridtools.sa_get_aggregates(A.shape[0],S.indptr,S.indices) - Ip = numpy.arange(len(Ij)+1) - Ix = numpy.ones(len(Ij)) - - return scipy.sparse.csr_matrix((Ix,Ij,Ip)) - - -def sa_interpolation(A,epsilon=0.08,omega=4.0/3.0): - if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') - - I = sa_constant_interpolation(A,epsilon) - - D_inv = diag_sparse(1.0/diag_sparse(A)) - - D_inv_A = D_inv * A - D_inv_A *= -omega/inf_norm(D_inv_A) - - P = I + (D_inv_A*I) #same as P=S*I, (faster?) - - return P - - - Modified: trunk/scipy/sandbox/multigrid/multilevel.py =================================================================== --- trunk/scipy/sandbox/multigrid/multilevel.py 2007-08-22 18:26:02 UTC (rev 3254) +++ trunk/scipy/sandbox/multigrid/multilevel.py 2007-08-22 18:35:07 UTC (rev 3255) @@ -3,7 +3,7 @@ import scipy import numpy -from multigrid import sa_interpolation,rs_interpolation +from coarsen import sa_interpolation,rs_interpolation from relaxation import gauss_seidel,jacobi @@ -149,7 +149,7 @@ if __name__ == '__main__': from scipy import * - A = poisson_problem2D(100).T + A = poisson_problem2D(200).T asa = smoothed_aggregation_solver(A) #asa = ruge_stuben_solver(A) x = rand(A.shape[0]) From scipy-svn at scipy.org Wed Aug 22 15:10:32 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 22 Aug 2007 14:10:32 -0500 (CDT) Subject: [Scipy-svn] r3256 - in trunk/scipy/sandbox: . multigrid Message-ID: <20070822191032.70C2C39C0F9@new.scipy.org> Author: wnbell Date: 2007-08-22 14:10:26 -0500 (Wed, 22 Aug 2007) New Revision: 3256 Added: trunk/scipy/sandbox/multigrid/info.py trunk/scipy/sandbox/multigrid/setup.py trunk/scipy/sandbox/multigrid/utils.py Modified: trunk/scipy/sandbox/multigrid/coarsen.py trunk/scipy/sandbox/setup.py Log: added config for multigrid subpackage Modified: trunk/scipy/sandbox/multigrid/coarsen.py =================================================================== --- trunk/scipy/sandbox/multigrid/coarsen.py 2007-08-22 18:35:07 UTC (rev 3255) +++ trunk/scipy/sandbox/multigrid/coarsen.py 2007-08-22 19:10:26 UTC (rev 3256) @@ -4,7 +4,7 @@ import scipy import numpy -from pydec import diag_sparse,inf_norm +from utils import diag_sparse,inf_norm def rs_strong_connections(A,theta): Added: trunk/scipy/sandbox/multigrid/info.py =================================================================== --- trunk/scipy/sandbox/multigrid/info.py 2007-08-22 18:35:07 UTC (rev 3255) +++ trunk/scipy/sandbox/multigrid/info.py 2007-08-22 19:10:26 UTC (rev 3256) @@ -0,0 +1,5 @@ +""" +TODO Describe AMG solvers +""" + +postpone_import = 1 Added: trunk/scipy/sandbox/multigrid/setup.py =================================================================== --- trunk/scipy/sandbox/multigrid/setup.py 2007-08-22 18:35:07 UTC (rev 3255) +++ trunk/scipy/sandbox/multigrid/setup.py 2007-08-22 19:10:26 UTC (rev 3256) @@ -0,0 +1,24 @@ +#!/usr/bin/env python + +from os.path import join +import sys + +def configuration(parent_package='',top_path=None): + import numpy + from numpy.distutils.misc_util import Configuration + + config = Configuration('multigrid',parent_package,top_path) + + config.add_data_dir('tests') + + # Adding a Python file as a "source" file for an extension is something of + # a hack, but it works to put it in the right place. + sources = [join('multigridtools', x) for x in ['multigridtools.py', 'multigridtools_wrap.cxx']] + config.add_extension('_multigridtools', + sources=sources, + include_dirs=['multigridtools']) + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) Property changes on: trunk/scipy/sandbox/multigrid/setup.py ___________________________________________________________________ Name: svn:executable + * Added: trunk/scipy/sandbox/multigrid/utils.py =================================================================== --- trunk/scipy/sandbox/multigrid/utils.py 2007-08-22 18:35:07 UTC (rev 3255) +++ trunk/scipy/sandbox/multigrid/utils.py 2007-08-22 19:10:26 UTC (rev 3256) @@ -0,0 +1,57 @@ +__all__ =['inf_norm','diag_sparse'] + +import numpy,scipy,scipy.sparse,scipy.weave +from numpy import ravel,arange +from scipy.sparse import isspmatrix,isspmatrix_csr,isspmatrix_csc, \ + csr_matrix,csc_matrix + +def inf_norm(A): + """ + Infinity norm of a sparse matrix (maximum absolute row sum). This serves + as an upper bound on spectral radius. + """ + + if not isspmatrix_csr(A): + return ValueError,'expected csr_matrix' + + abs_A = csr_matrix((abs(A.data),A.indices,A.indptr),dims=A.shape,check=False) + return (abs_A * numpy.ones(A.shape[1],dtype=A.dtype)).max() + +def diag_sparse(A): + """ + If A is a sparse matrix (e.g. csr_matrix or csc_matrix) + - return the diagonal of A as an array + + Otherwise + - return a csr_matrix with A on the diagonal + """ + + if isspmatrix_csr(A) or isspmatrix_csc(A): + n_row = len(A.indptr) - 1 + data,indices,indptr = A.data,A.indices,A.indptr + + diag = numpy.zeros(n_row,dtype=A.dtype) + + code = """ + #line 33 "sparse.py" + + for(int i = 0; i < n_row; i++){ + for(int jj = indptr(i); jj < indptr(i+1); jj++){ + if(indices(jj) == i){ + diag(i) = data(jj); + } + } + } + """ + + err = scipy.weave.inline(code, + ['data', 'indices', 'indptr', 'n_row', 'diag'], + type_converters = scipy.weave.converters.blitz, + compiler = 'gcc') + return diag + elif isspmatrix(A): + return ravel(array([float(A[i,i]) for i in range(min(A.shape))])) + else: + return csr_matrix((A,arange(len(A)),arange(len(A)+1)),(len(A),len(A))) + + Modified: trunk/scipy/sandbox/setup.py =================================================================== --- trunk/scipy/sandbox/setup.py 2007-08-22 18:35:07 UTC (rev 3255) +++ trunk/scipy/sandbox/setup.py 2007-08-22 19:10:26 UTC (rev 3256) @@ -84,6 +84,10 @@ # Radial basis functions package #config.add_subpackage('rbf') + # Multigrid Solvers + #config.add_subpackage('multigrid') + + return config if __name__ == '__main__': From scipy-svn at scipy.org Wed Aug 22 15:42:42 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 22 Aug 2007 14:42:42 -0500 (CDT) Subject: [Scipy-svn] r3257 - in trunk/scipy/sandbox/multigrid: . multigridtools Message-ID: <20070822194242.572B3C7C043@new.scipy.org> Author: wnbell Date: 2007-08-22 14:42:37 -0500 (Wed, 22 Aug 2007) New Revision: 3257 Added: trunk/scipy/sandbox/multigrid/multigridtools/multigridtools.py Removed: trunk/scipy/sandbox/multigrid/multigridtools.py Log: small change to multigridtools Copied: trunk/scipy/sandbox/multigrid/multigridtools/multigridtools.py (from rev 3256, trunk/scipy/sandbox/multigrid/multigridtools.py) Deleted: trunk/scipy/sandbox/multigrid/multigridtools.py =================================================================== --- trunk/scipy/sandbox/multigrid/multigridtools.py 2007-08-22 19:10:26 UTC (rev 3256) +++ trunk/scipy/sandbox/multigrid/multigridtools.py 2007-08-22 19:42:37 UTC (rev 3257) @@ -1,123 +0,0 @@ -# This file was automatically generated by SWIG (http://www.swig.org). -# Version 1.3.32 -# -# Don't modify this file, modify the SWIG interface instead. -# This file is compatible with both classic and new-style classes. - -import _multigridtools -import new -new_instancemethod = new.instancemethod -try: - _swig_property = property -except NameError: - pass # Python < 2.2 doesn't have 'property'. -def _swig_setattr_nondynamic(self,class_type,name,value,static=1): - if (name == "thisown"): return self.this.own(value) - if (name == "this"): - if type(value).__name__ == 'PySwigObject': - self.__dict__[name] = value - return - method = class_type.__swig_setmethods__.get(name,None) - if method: return method(self,value) - if (not static) or hasattr(self,name): - self.__dict__[name] = value - else: - raise AttributeError("You cannot add attributes to %s" % self) - -def _swig_setattr(self,class_type,name,value): - return _swig_setattr_nondynamic(self,class_type,name,value,0) - -def _swig_getattr(self,class_type,name): - if (name == "thisown"): return self.this.own() - method = class_type.__swig_getmethods__.get(name,None) - if method: return method(self) - raise AttributeError,name - -def _swig_repr(self): - try: strthis = "proxy of " + self.this.__repr__() - except: strthis = "" - return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) - -import types -try: - _object = types.ObjectType - _newclass = 1 -except AttributeError: - class _object : pass - _newclass = 0 -del types - - -U_NODE = _multigridtools.U_NODE -C_NODE = _multigridtools.C_NODE -F_NODE = _multigridtools.F_NODE - -def sa_get_aggregates(*args): - """sa_get_aggregates(int n_row, int Ap, int Aj, std::vector<(int)> Bj)""" - return _multigridtools.sa_get_aggregates(*args) - - -def rs_strong_connections(*args): - """ - rs_strong_connections(int n_row, float theta, int Ap, int Aj, float Ax, std::vector<(int)> Sp, - std::vector<(int)> Sj, - std::vector<(float)> Sx) - rs_strong_connections(int n_row, double theta, int Ap, int Aj, double Ax, - std::vector<(int)> Sp, std::vector<(int)> Sj, - std::vector<(double)> Sx) - """ - return _multigridtools.rs_strong_connections(*args) - -def rs_interpolation(*args): - """ - rs_interpolation(int n_nodes, int Ap, int Aj, float Ax, int Sp, int Sj, - float Sx, int Tp, int Tj, float Tx, std::vector<(int)> Bp, - std::vector<(int)> Bj, std::vector<(float)> Bx) - rs_interpolation(int n_nodes, int Ap, int Aj, double Ax, int Sp, int Sj, - double Sx, int Tp, int Tj, double Tx, std::vector<(int)> Bp, - std::vector<(int)> Bj, std::vector<(double)> Bx) - """ - return _multigridtools.rs_interpolation(*args) - -def sa_strong_connections(*args): - """ - sa_strong_connections(int n_row, float epsilon, int Ap, int Aj, float Ax, - std::vector<(int)> Sp, std::vector<(int)> Sj, - std::vector<(float)> Sx) - sa_strong_connections(int n_row, double epsilon, int Ap, int Aj, double Ax, - std::vector<(int)> Sp, std::vector<(int)> Sj, - std::vector<(double)> Sx) - """ - return _multigridtools.sa_strong_connections(*args) - -def sa_smoother(*args): - """ - sa_smoother(int n_row, float omega, int Ap, int Aj, float Ax, int Sp, - int Sj, float Sx, std::vector<(int)> Bp, - std::vector<(int)> Bj, std::vector<(float)> Bx) - sa_smoother(int n_row, double omega, int Ap, int Aj, double Ax, - int Sp, int Sj, double Sx, std::vector<(int)> Bp, - std::vector<(int)> Bj, std::vector<(double)> Bx) - """ - return _multigridtools.sa_smoother(*args) - -def gauss_seidel(*args): - """ - gauss_seidel(int n_row, int Ap, int Aj, float Ax, float x, float b, - int row_start, int row_stop, int row_step) - gauss_seidel(int n_row, int Ap, int Aj, double Ax, double x, double b, - int row_start, int row_stop, int row_step) - """ - return _multigridtools.gauss_seidel(*args) - -def jacobi(*args): - """ - jacobi(int n_row, int Ap, int Aj, float Ax, float x, float b, - float temp, int row_start, int row_stop, - int row_step, float omega) - jacobi(int n_row, int Ap, int Aj, double Ax, double x, double b, - double temp, int row_start, int row_stop, - int row_step, double omega) - """ - return _multigridtools.jacobi(*args) - From scipy-svn at scipy.org Wed Aug 22 15:56:46 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 22 Aug 2007 14:56:46 -0500 (CDT) Subject: [Scipy-svn] r3258 - trunk/scipy/sandbox/multigrid Message-ID: <20070822195646.C828039C0F9@new.scipy.org> Author: wnbell Date: 2007-08-22 14:56:43 -0500 (Wed, 22 Aug 2007) New Revision: 3258 Modified: trunk/scipy/sandbox/multigrid/multilevel.py trunk/scipy/sandbox/multigrid/simple_test.py Log: minor changes Modified: trunk/scipy/sandbox/multigrid/multilevel.py =================================================================== --- trunk/scipy/sandbox/multigrid/multilevel.py 2007-08-22 19:42:37 UTC (rev 3257) +++ trunk/scipy/sandbox/multigrid/multilevel.py 2007-08-22 19:56:43 UTC (rev 3258) @@ -1,5 +1,10 @@ +__all__ = ['poisson_problem1D','poisson_problem2D', + 'ruge_stuben_solver','smoothed_aggregation_solver', + 'multilevel_solver'] + + from numpy.linalg import norm -from numpy import zeros_like +from numpy import zeros,zeros_like,array import scipy import numpy @@ -10,17 +15,17 @@ def poisson_problem1D(N): """ - Return a sparse CSC matrix for the 1d poisson problem + Return a sparse CSR matrix for the 1d poisson problem with standard 3-point finite difference stencil on a grid with N points. """ D = 2*numpy.ones(N) O = -numpy.ones(N) - return scipy.sparse.spdiags([D,O,O],[0,-1,1],N,N) + return scipy.sparse.spdiags([D,O,O],[0,-1,1],N,N).tocsr() def poisson_problem2D(N): """ - Return a sparse CSC matrix for the 2d poisson problem + Return a sparse CSR matrix for the 2d poisson problem with standard 5-point finite difference stencil on a square N-by-N grid. """ @@ -28,7 +33,7 @@ T = -numpy.ones(N*N) O = -numpy.ones(N*N) T[N-1::N] = 0 - return scipy.sparse.spdiags([D,O,T,T,O],[0,-N,-1,1,N],N*N,N*N) + return scipy.sparse.spdiags([D,O,T,T,O],[0,-N,-1,1,N],N*N,N*N).tocsr() def ruge_stuben_solver(A,max_levels=10,max_coarse=500): As = [A] Modified: trunk/scipy/sandbox/multigrid/simple_test.py =================================================================== --- trunk/scipy/sandbox/multigrid/simple_test.py 2007-08-22 19:42:37 UTC (rev 3257) +++ trunk/scipy/sandbox/multigrid/simple_test.py 2007-08-22 19:56:43 UTC (rev 3258) @@ -2,13 +2,11 @@ from multigrid import * from scipy import * -A = poisson_problem(100).T -s = scalar_solver(A) +A = poisson_problem2D(200) +rs_solver = ruge_stuben_solver(A) b = rand(A.shape[0]) -x,res = s.solve(b,return_residuals=True) -r = (b - A*x) -print abs(r).max() +x,res = rs_solver.solve(b,return_residuals=True) +print res - From scipy-svn at scipy.org Thu Aug 23 14:08:55 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 23 Aug 2007 13:08:55 -0500 (CDT) Subject: [Scipy-svn] r3259 - trunk/scipy/sandbox/multigrid Message-ID: <20070823180855.BA33D39C0F3@new.scipy.org> Author: wnbell Date: 2007-08-23 13:08:52 -0500 (Thu, 23 Aug 2007) New Revision: 3259 Added: trunk/scipy/sandbox/multigrid/__init__.py Log: added missing init file Added: trunk/scipy/sandbox/multigrid/__init__.py =================================================================== --- trunk/scipy/sandbox/multigrid/__init__.py 2007-08-22 19:56:43 UTC (rev 3258) +++ trunk/scipy/sandbox/multigrid/__init__.py 2007-08-23 18:08:52 UTC (rev 3259) @@ -0,0 +1,10 @@ +"Multigrid Solvers" + +from info import __doc__ + +from multilevel import * + +__all__ = filter(lambda s:not s.startswith('_'),dir()) +from numpy.testing import NumpyTest +test = NumpyTest().test + From scipy-svn at scipy.org Thu Aug 23 16:52:41 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 23 Aug 2007 15:52:41 -0500 (CDT) Subject: [Scipy-svn] r3260 - in trunk/scipy/sparse: . sparsetools tests Message-ID: <20070823205241.CB58F39C0EB@new.scipy.org> Author: wnbell Date: 2007-08-23 15:52:36 -0500 (Thu, 23 Aug 2007) New Revision: 3260 Modified: trunk/scipy/sparse/sparse.py trunk/scipy/sparse/sparsetools/sparsetools.h trunk/scipy/sparse/sparsetools/sparsetools.i trunk/scipy/sparse/sparsetools/sparsetools.py trunk/scipy/sparse/sparsetools/sparsetools_wrap.cxx trunk/scipy/sparse/tests/test_sparse.py Log: added diagonal extraction to sparse Modified: trunk/scipy/sparse/sparse.py =================================================================== --- trunk/scipy/sparse/sparse.py 2007-08-23 18:08:52 UTC (rev 3259) +++ trunk/scipy/sparse/sparse.py 2007-08-23 20:52:36 UTC (rev 3260) @@ -7,7 +7,7 @@ __all__ = ['spmatrix','csc_matrix','csr_matrix','coo_matrix', 'lil_matrix','dok_matrix', - 'spdiags','speye','spidentity', + 'spdiags','speye','spidentity','extract_diagonal', 'isspmatrix','issparse','isspmatrix_csc','isspmatrix_csr', 'isspmatrix_lil','isspmatrix_dok', 'lil_eye', 'lil_diags' ] @@ -2636,7 +2636,25 @@ assert(len(offsets) == diags.shape[0]) indptr, rowind, data = sparsetools.spdiags(M, N, len(offsets), offsets, diags) return csc_matrix((data, rowind, indptr), (M, N)) - + +def extract_diagonal(A): + """ + extract_diagonal(A) returns the main diagonal of A. + """ + if isspmatrix_csr(A): + return sparsetools.extract_csr_diagonal(A.shape[0],A.shape[1], + A.indptr,A.indices,A.data) + elif isspmatrix_csc(A): + return sparsetools.extract_csc_diagonal(A.shape[0],A.shape[1], + A.indptr,A.indices,A.data) + elif isspmatrix(A): + return extract_diagonal(A.tocsr()) + else: + raise ValueError,'expected sparse matrix' + + + + def spidentity(n, dtype='d'): """ spidentity( n ) returns the identity matrix of shape (n, n) stored Modified: trunk/scipy/sparse/sparsetools/sparsetools.h =================================================================== --- trunk/scipy/sparse/sparsetools/sparsetools.h 2007-08-23 18:08:52 UTC (rev 3259) +++ trunk/scipy/sparse/sparsetools/sparsetools.h 2007-08-23 20:52:36 UTC (rev 3260) @@ -24,7 +24,53 @@ #include +/* + * Extract main diagonal of CSR matrix A + * + * Input Arguments: + * I n_row - number of rows in A + * I n_col - number of columns in A + * I Ap[n_row+1] - row pointer + * I Aj[nnz(A)] - column indices + * T Ax[n_col] - nonzeros + * + * Output Arguments: + * vec Yx - diagonal entries + * + * Note: + * Output array Yx will be allocated within in the method + * Duplicate entries will be summed. + * + * Complexity: Linear. Specifically O(nnz(A) + min(n_row,n_col)) + * + */ +template +void extract_csr_diagonal(const I n_row, + const I n_col, + const I Ap[], + const I Aj[], + const T Ax[], + std::vector* Yx) +{ + const I N = std::min(n_row, n_col); + + Yx->resize(N); + for(I i = 0; i < N; i++){ + I row_start = Ap[i]; + I row_end = Ap[i+1]; + + T diag = 0; + for(I jj = row_start; jj < row_end; jj++){ + if (Aj[jj] == i) + diag += Ax[jj]; + } + + (*Yx)[i] = diag; + } +} + + /* * Compute B = A for CSR matrix A, CSC matrix B * @@ -132,7 +178,7 @@ template void csrtocoo(const I n_row, const I n_col, - const I Ap [], + const I Ap[], const I Aj[], const T Ax[], std::vector* Bi, @@ -551,7 +597,7 @@ * T Xx[n_col] - nonzeros * * Output Arguments: - * vec Yx - nonzeros (real part) + * vec Yx - nonzeros * * Note: * Output array Xx will be allocated within in the method @@ -562,7 +608,7 @@ template void csrmux(const I n_row, const I n_col, - const I Ap [], + const I Ap[], const I Aj[], const T Ax[], const T Xx[], @@ -593,12 +639,11 @@ * I n_col - number of columns in A * I Ap[n_row+1] - column pointer * I Ai[nnz(A)] - row indices - * T Ax[n_col] - nonzeros (real part) - * T Xx[n_col] - nonzeros (real part) - * bool do_complex - switch scalar/complex modes + * T Ax[n_col] - nonzeros + * T Xx[n_col] - nonzeros * * Output Arguments: - * vec Yx - nonzeros (real part) + * vec Yx - nonzeros * * Note: * Output arrays Xx will be allocated within in the method @@ -817,7 +862,19 @@ /* * Derived methods */ + template +void extract_csc_diagonal(const I n_row, + const I n_col, + const I Ap[], + const I Aj[], + const T Ax[], + std::vector* Yx){ + extract_csr_diagonal(n_col, n_row, Ap, Aj, Ax, Yx); +} + + +template void csctocsr(const I n_row, const I n_col, const I Ap[], Modified: trunk/scipy/sparse/sparsetools/sparsetools.i =================================================================== --- trunk/scipy/sparse/sparsetools/sparsetools.i 2007-08-23 18:08:52 UTC (rev 3259) +++ trunk/scipy/sparse/sparsetools/sparsetools.i 2007-08-23 20:52:36 UTC (rev 3260) @@ -163,6 +163,8 @@ %include "sparsetools.h" /* * Order may be important here, list float before double, scalar before complex + * + * Should we permit unsigned types as array indices? Do any functions require signedness? -- Nathan (Aug 2007) */ %define INSTANTIATE_ALL( f_name ) @@ -176,7 +178,13 @@ %enddef +/* + * diag(CSR) and diag(CSC) + */ +INSTANTIATE_ALL(extract_csr_diagonal) +INSTANTIATE_ALL(extract_csc_diagonal) + /* * CSR->CSC or CSC->CSR or CSR = CSR^T or CSC = CSC^T */ Modified: trunk/scipy/sparse/sparsetools/sparsetools.py =================================================================== --- trunk/scipy/sparse/sparsetools/sparsetools.py 2007-08-23 18:08:52 UTC (rev 3259) +++ trunk/scipy/sparse/sparsetools/sparsetools.py 2007-08-23 20:52:36 UTC (rev 3260) @@ -50,6 +50,32 @@ +def extract_csr_diagonal(*args): + """ + extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, int Ax, std::vector<(int)> Yx) + extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, long Ax, std::vector<(long)> Yx) + extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, float Ax, std::vector<(float)> Yx) + extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, double Ax, std::vector<(double)> Yx) + extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, + std::vector<(npy_cfloat_wrapper)> Yx) + extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, + std::vector<(npy_cdouble_wrapper)> Yx) + """ + return _sparsetools.extract_csr_diagonal(*args) + +def extract_csc_diagonal(*args): + """ + extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, int Ax, std::vector<(int)> Yx) + extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, long Ax, std::vector<(long)> Yx) + extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, float Ax, std::vector<(float)> Yx) + extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, double Ax, std::vector<(double)> Yx) + extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, + std::vector<(npy_cfloat_wrapper)> Yx) + extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, + std::vector<(npy_cdouble_wrapper)> Yx) + """ + return _sparsetools.extract_csc_diagonal(*args) + def csrtocsc(*args): """ csrtocsc(int n_row, int n_col, int Ap, int Aj, int Ax, std::vector<(int)> Bp, Modified: trunk/scipy/sparse/sparsetools/sparsetools_wrap.cxx =================================================================== --- trunk/scipy/sparse/sparsetools/sparsetools_wrap.cxx 2007-08-23 18:08:52 UTC (rev 3259) +++ trunk/scipy/sparse/sparsetools/sparsetools_wrap.cxx 2007-08-23 20:52:36 UTC (rev 3260) @@ -3020,6 +3020,1576 @@ #ifdef __cplusplus extern "C" { #endif +SWIGINTERN PyObject *_wrap_extract_csr_diagonal__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + int *arg5 ; + std::vector *arg6 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + std::vector *tmp6 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + + { + tmp6 = new std::vector(); + arg6 = tmp6; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOO:extract_csr_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "extract_csr_diagonal" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "extract_csr_diagonal" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (int*) array5->data; + } + extract_csr_diagonal(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(int const (*))arg5,arg6); + resultobj = SWIG_Py_Void(); + { + int length = (arg6)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); + memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(int)*length); + delete arg6; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_extract_csr_diagonal__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + long *arg5 ; + std::vector *arg6 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + std::vector *tmp6 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + + { + tmp6 = new std::vector(); + arg6 = tmp6; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOO:extract_csr_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "extract_csr_diagonal" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "extract_csr_diagonal" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONG, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (long*) array5->data; + } + extract_csr_diagonal(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long const (*))arg5,arg6); + resultobj = SWIG_Py_Void(); + { + int length = (arg6)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_LONG); + memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(long)*length); + delete arg6; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_extract_csr_diagonal__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + float *arg5 ; + std::vector *arg6 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + std::vector *tmp6 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + + { + tmp6 = new std::vector(); + arg6 = tmp6; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOO:extract_csr_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "extract_csr_diagonal" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "extract_csr_diagonal" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_FLOAT, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (float*) array5->data; + } + extract_csr_diagonal(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(float const (*))arg5,arg6); + resultobj = SWIG_Py_Void(); + { + int length = (arg6)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_FLOAT); + memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(float)*length); + delete arg6; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_extract_csr_diagonal__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + double *arg5 ; + std::vector *arg6 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + std::vector *tmp6 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + + { + tmp6 = new std::vector(); + arg6 = tmp6; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOO:extract_csr_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "extract_csr_diagonal" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "extract_csr_diagonal" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_DOUBLE, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (double*) array5->data; + } + extract_csr_diagonal(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(double const (*))arg5,arg6); + resultobj = SWIG_Py_Void(); + { + int length = (arg6)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_DOUBLE); + memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(double)*length); + delete arg6; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_extract_csr_diagonal__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + npy_cfloat_wrapper *arg5 ; + std::vector *arg6 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + std::vector *tmp6 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + + { + tmp6 = new std::vector(); + arg6 = tmp6; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOO:extract_csr_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "extract_csr_diagonal" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "extract_csr_diagonal" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CFLOAT, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (npy_cfloat_wrapper*) array5->data; + } + extract_csr_diagonal(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cfloat_wrapper const (*))arg5,arg6); + resultobj = SWIG_Py_Void(); + { + int length = (arg6)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_CFLOAT); + memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(npy_cfloat_wrapper)*length); + delete arg6; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_extract_csr_diagonal__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + npy_cdouble_wrapper *arg5 ; + std::vector *arg6 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + std::vector *tmp6 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + + { + tmp6 = new std::vector(); + arg6 = tmp6; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOO:extract_csr_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "extract_csr_diagonal" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "extract_csr_diagonal" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CDOUBLE, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (npy_cdouble_wrapper*) array5->data; + } + extract_csr_diagonal(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cdouble_wrapper const (*))arg5,arg6); + resultobj = SWIG_Py_Void(); + { + int length = (arg6)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_CDOUBLE); + memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(npy_cdouble_wrapper)*length); + delete arg6; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_extract_csr_diagonal(PyObject *self, PyObject *args) { + int argc; + PyObject *argv[6]; + int ii; + + if (!PyTuple_Check(args)) SWIG_fail; + argc = PyObject_Length(args); + for (ii = 0; (ii < argc) && (ii < 5); ii++) { + argv[ii] = PyTuple_GET_ITEM(args,ii); + } + if (argc == 5) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + return _wrap_extract_csr_diagonal__SWIG_1(self, args); + } + } + } + } + } + } + if (argc == 5) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONG)) ? 1 : 0; + } + if (_v) { + return _wrap_extract_csr_diagonal__SWIG_2(self, args); + } + } + } + } + } + } + if (argc == 5) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_FLOAT)) ? 1 : 0; + } + if (_v) { + return _wrap_extract_csr_diagonal__SWIG_3(self, args); + } + } + } + } + } + } + if (argc == 5) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_DOUBLE)) ? 1 : 0; + } + if (_v) { + return _wrap_extract_csr_diagonal__SWIG_4(self, args); + } + } + } + } + } + } + if (argc == 5) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CFLOAT)) ? 1 : 0; + } + if (_v) { + return _wrap_extract_csr_diagonal__SWIG_5(self, args); + } + } + } + } + } + } + if (argc == 5) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CDOUBLE)) ? 1 : 0; + } + if (_v) { + return _wrap_extract_csr_diagonal__SWIG_6(self, args); + } + } + } + } + } + } + +fail: + SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number of arguments for overloaded function 'extract_csr_diagonal'.\n Possible C/C++ prototypes are:\n extract_csr_diagonal<(int,int)>(int const,int const,int const [],int const [],int const [],std::vector *)\n extract_csr_diagonal<(int,long)>(int const,int const,int const [],int const [],long const [],std::vector *)\n extract_csr_diagonal<(int,float)>(int const,int const,int const [],int const [],float const [],std::vector *)\n extract_csr_diagonal<(int,double)>(int const,int const,int const [],int const [],double const [],std::vector *)\n extract_csr_diagonal<(int,npy_cfloat_wrapper)>(int const,int const,int const [],int const [],npy_cfloat_wrapper const [],std::vector *)\n extract_csr_diagonal<(int,npy_cdouble_wrapper)>(int const,int const,int const [],int const [],npy_cdouble_wrapper const [],std::vector *)\n"); + return NULL; +} + + +SWIGINTERN PyObject *_wrap_extract_csc_diagonal__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + int *arg5 ; + std::vector *arg6 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + std::vector *tmp6 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + + { + tmp6 = new std::vector(); + arg6 = tmp6; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOO:extract_csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "extract_csc_diagonal" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "extract_csc_diagonal" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (int*) array5->data; + } + extract_csc_diagonal(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(int const (*))arg5,arg6); + resultobj = SWIG_Py_Void(); + { + int length = (arg6)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); + memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(int)*length); + delete arg6; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_extract_csc_diagonal__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + long *arg5 ; + std::vector *arg6 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + std::vector *tmp6 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + + { + tmp6 = new std::vector(); + arg6 = tmp6; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOO:extract_csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "extract_csc_diagonal" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "extract_csc_diagonal" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONG, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (long*) array5->data; + } + extract_csc_diagonal(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long const (*))arg5,arg6); + resultobj = SWIG_Py_Void(); + { + int length = (arg6)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_LONG); + memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(long)*length); + delete arg6; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_extract_csc_diagonal__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + float *arg5 ; + std::vector *arg6 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + std::vector *tmp6 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + + { + tmp6 = new std::vector(); + arg6 = tmp6; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOO:extract_csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "extract_csc_diagonal" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "extract_csc_diagonal" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_FLOAT, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (float*) array5->data; + } + extract_csc_diagonal(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(float const (*))arg5,arg6); + resultobj = SWIG_Py_Void(); + { + int length = (arg6)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_FLOAT); + memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(float)*length); + delete arg6; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_extract_csc_diagonal__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + double *arg5 ; + std::vector *arg6 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + std::vector *tmp6 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + + { + tmp6 = new std::vector(); + arg6 = tmp6; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOO:extract_csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "extract_csc_diagonal" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "extract_csc_diagonal" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_DOUBLE, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (double*) array5->data; + } + extract_csc_diagonal(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(double const (*))arg5,arg6); + resultobj = SWIG_Py_Void(); + { + int length = (arg6)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_DOUBLE); + memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(double)*length); + delete arg6; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_extract_csc_diagonal__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + npy_cfloat_wrapper *arg5 ; + std::vector *arg6 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + std::vector *tmp6 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + + { + tmp6 = new std::vector(); + arg6 = tmp6; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOO:extract_csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "extract_csc_diagonal" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "extract_csc_diagonal" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CFLOAT, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (npy_cfloat_wrapper*) array5->data; + } + extract_csc_diagonal(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cfloat_wrapper const (*))arg5,arg6); + resultobj = SWIG_Py_Void(); + { + int length = (arg6)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_CFLOAT); + memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(npy_cfloat_wrapper)*length); + delete arg6; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_extract_csc_diagonal__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + npy_cdouble_wrapper *arg5 ; + std::vector *arg6 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + std::vector *tmp6 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + + { + tmp6 = new std::vector(); + arg6 = tmp6; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOO:extract_csc_diagonal",&obj0,&obj1,&obj2,&obj3,&obj4)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "extract_csc_diagonal" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "extract_csc_diagonal" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CDOUBLE, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (npy_cdouble_wrapper*) array5->data; + } + extract_csc_diagonal(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cdouble_wrapper const (*))arg5,arg6); + resultobj = SWIG_Py_Void(); + { + int length = (arg6)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_CDOUBLE); + memcpy(PyArray_DATA(obj),&((*(arg6))[0]),sizeof(npy_cdouble_wrapper)*length); + delete arg6; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_extract_csc_diagonal(PyObject *self, PyObject *args) { + int argc; + PyObject *argv[6]; + int ii; + + if (!PyTuple_Check(args)) SWIG_fail; + argc = PyObject_Length(args); + for (ii = 0; (ii < argc) && (ii < 5); ii++) { + argv[ii] = PyTuple_GET_ITEM(args,ii); + } + if (argc == 5) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + return _wrap_extract_csc_diagonal__SWIG_1(self, args); + } + } + } + } + } + } + if (argc == 5) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONG)) ? 1 : 0; + } + if (_v) { + return _wrap_extract_csc_diagonal__SWIG_2(self, args); + } + } + } + } + } + } + if (argc == 5) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_FLOAT)) ? 1 : 0; + } + if (_v) { + return _wrap_extract_csc_diagonal__SWIG_3(self, args); + } + } + } + } + } + } + if (argc == 5) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_DOUBLE)) ? 1 : 0; + } + if (_v) { + return _wrap_extract_csc_diagonal__SWIG_4(self, args); + } + } + } + } + } + } + if (argc == 5) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CFLOAT)) ? 1 : 0; + } + if (_v) { + return _wrap_extract_csc_diagonal__SWIG_5(self, args); + } + } + } + } + } + } + if (argc == 5) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CDOUBLE)) ? 1 : 0; + } + if (_v) { + return _wrap_extract_csc_diagonal__SWIG_6(self, args); + } + } + } + } + } + } + +fail: + SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number of arguments for overloaded function 'extract_csc_diagonal'.\n Possible C/C++ prototypes are:\n extract_csc_diagonal<(int,int)>(int const,int const,int const [],int const [],int const [],std::vector *)\n extract_csc_diagonal<(int,long)>(int const,int const,int const [],int const [],long const [],std::vector *)\n extract_csc_diagonal<(int,float)>(int const,int const,int const [],int const [],float const [],std::vector *)\n extract_csc_diagonal<(int,double)>(int const,int const,int const [],int const [],double const [],std::vector *)\n extract_csc_diagonal<(int,npy_cfloat_wrapper)>(int const,int const,int const [],int const [],npy_cfloat_wrapper const [],std::vector *)\n extract_csc_diagonal<(int,npy_cdouble_wrapper)>(int const,int const,int const [],int const [],npy_cdouble_wrapper const [],std::vector *)\n"); + return NULL; +} + + SWIGINTERN PyObject *_wrap_csrtocsc__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; int arg1 ; @@ -28830,6 +30400,26 @@ static PyMethodDef SwigMethods[] = { + { (char *)"extract_csr_diagonal", _wrap_extract_csr_diagonal, METH_VARARGS, (char *)"\n" + "extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, int Ax, std::vector<(int)> Yx)\n" + "extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, long Ax, std::vector<(long)> Yx)\n" + "extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, float Ax, std::vector<(float)> Yx)\n" + "extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, double Ax, std::vector<(double)> Yx)\n" + "extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, \n" + " std::vector<(npy_cfloat_wrapper)> Yx)\n" + "extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, \n" + " std::vector<(npy_cdouble_wrapper)> Yx)\n" + ""}, + { (char *)"extract_csc_diagonal", _wrap_extract_csc_diagonal, METH_VARARGS, (char *)"\n" + "extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, int Ax, std::vector<(int)> Yx)\n" + "extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, long Ax, std::vector<(long)> Yx)\n" + "extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, float Ax, std::vector<(float)> Yx)\n" + "extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, double Ax, std::vector<(double)> Yx)\n" + "extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, \n" + " std::vector<(npy_cfloat_wrapper)> Yx)\n" + "extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, \n" + " std::vector<(npy_cdouble_wrapper)> Yx)\n" + ""}, { (char *)"csrtocsc", _wrap_csrtocsc, METH_VARARGS, (char *)"\n" "csrtocsc(int n_row, int n_col, int Ap, int Aj, int Ax, std::vector<(int)> Bp, \n" " std::vector<(int)> Bi, std::vector<(int)> Bx)\n" Modified: trunk/scipy/sparse/tests/test_sparse.py =================================================================== --- trunk/scipy/sparse/tests/test_sparse.py 2007-08-23 18:08:52 UTC (rev 3259) +++ trunk/scipy/sparse/tests/test_sparse.py 2007-08-23 20:52:36 UTC (rev 3260) @@ -22,7 +22,7 @@ from numpy.testing import * set_package_path() from scipy.sparse import csc_matrix, csr_matrix, dok_matrix, coo_matrix, \ - spidentity, speye, lil_matrix, lil_eye, lil_diags + spidentity, speye, extract_diagonal, lil_matrix, lil_eye, lil_diags from scipy.linsolve import splu restore_path() @@ -329,7 +329,20 @@ xx = splu(B).solve(r) # Don't actually test the output until we know what it should be ... + def check_extract_diagonal(self): + """ + Test extraction of main diagonal from sparse matrices + """ + L = [] + L.append(array([[0,0,3],[1,6,4],[5,2,0]])) + L.append(array([[1,2,3]])) + L.append(array([[7],[6],[5]])) + L.append(array([[2]])) + for A in L: + assert_array_equal(numpy.diag(A),extract_diagonal(self.spmatrix(A))) + + class _test_horiz_slicing: """Tests vertical slicing (e.g. [:, 0]). Tests for individual sparse matrix types that implement this should derive from this class. @@ -925,6 +938,8 @@ b = array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='d') assert_array_equal(a.toarray(), b) + + class test_coo(NumpyTestCase): def check_constructor1(self): row = numpy.array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2]) From scipy-svn at scipy.org Fri Aug 24 11:55:07 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 24 Aug 2007 10:55:07 -0500 (CDT) Subject: [Scipy-svn] r3261 - trunk/scipy/sandbox/multigrid Message-ID: <20070824155507.6575C39C0EE@new.scipy.org> Author: wnbell Date: 2007-08-24 10:55:04 -0500 (Fri, 24 Aug 2007) New Revision: 3261 Modified: trunk/scipy/sandbox/multigrid/multilevel.py trunk/scipy/sandbox/multigrid/utils.py Log: switched inline code to sparse.extract_diagonal() Modified: trunk/scipy/sandbox/multigrid/multilevel.py =================================================================== --- trunk/scipy/sandbox/multigrid/multilevel.py 2007-08-23 20:52:36 UTC (rev 3260) +++ trunk/scipy/sandbox/multigrid/multilevel.py 2007-08-24 15:55:04 UTC (rev 3261) @@ -154,7 +154,7 @@ if __name__ == '__main__': from scipy import * - A = poisson_problem2D(200).T + A = poisson_problem2D(200) asa = smoothed_aggregation_solver(A) #asa = ruge_stuben_solver(A) x = rand(A.shape[0]) Modified: trunk/scipy/sandbox/multigrid/utils.py =================================================================== --- trunk/scipy/sandbox/multigrid/utils.py 2007-08-23 20:52:36 UTC (rev 3260) +++ trunk/scipy/sandbox/multigrid/utils.py 2007-08-24 15:55:04 UTC (rev 3261) @@ -3,8 +3,9 @@ import numpy,scipy,scipy.sparse,scipy.weave from numpy import ravel,arange from scipy.sparse import isspmatrix,isspmatrix_csr,isspmatrix_csc, \ - csr_matrix,csc_matrix + csr_matrix,csc_matrix,extract_diagonal + def inf_norm(A): """ Infinity norm of a sparse matrix (maximum absolute row sum). This serves @@ -26,31 +27,8 @@ - return a csr_matrix with A on the diagonal """ - if isspmatrix_csr(A) or isspmatrix_csc(A): - n_row = len(A.indptr) - 1 - data,indices,indptr = A.data,A.indices,A.indptr - - diag = numpy.zeros(n_row,dtype=A.dtype) - - code = """ - #line 33 "sparse.py" - - for(int i = 0; i < n_row; i++){ - for(int jj = indptr(i); jj < indptr(i+1); jj++){ - if(indices(jj) == i){ - diag(i) = data(jj); - } - } - } - """ - - err = scipy.weave.inline(code, - ['data', 'indices', 'indptr', 'n_row', 'diag'], - type_converters = scipy.weave.converters.blitz, - compiler = 'gcc') - return diag - elif isspmatrix(A): - return ravel(array([float(A[i,i]) for i in range(min(A.shape))])) + if isspmatrix(A): + return extract_diagonal(A) else: return csr_matrix((A,arange(len(A)),arange(len(A)+1)),(len(A),len(A))) From scipy-svn at scipy.org Sat Aug 25 19:15:36 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 25 Aug 2007 18:15:36 -0500 (CDT) Subject: [Scipy-svn] r3262 - trunk/scipy/sandbox/multigrid Message-ID: <20070825231536.BAE3739C04A@new.scipy.org> Author: wnbell Date: 2007-08-25 18:15:34 -0500 (Sat, 25 Aug 2007) New Revision: 3262 Modified: trunk/scipy/sandbox/multigrid/coarsen.py trunk/scipy/sandbox/multigrid/multilevel.py Log: change epsilon per level in SA minor changes Modified: trunk/scipy/sandbox/multigrid/coarsen.py =================================================================== --- trunk/scipy/sandbox/multigrid/coarsen.py 2007-08-24 15:55:04 UTC (rev 3261) +++ trunk/scipy/sandbox/multigrid/coarsen.py 2007-08-25 23:15:34 UTC (rev 3262) @@ -35,10 +35,13 @@ Sp,Sj,Sx = multigridtools.sa_strong_connections(A.shape[0],epsilon,A.indptr,A.indices,A.data) return scipy.sparse.csr_matrix((Sx,Sj,Sp),A.shape) -def sa_constant_interpolation(A,epsilon=0.08): +def sa_constant_interpolation(A,epsilon=None): if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') - S = sa_strong_connections(A,epsilon) + if epsilon is not None: + S = sa_strong_connections(A,epsilon) + else: + S = A #tentative (non-smooth) interpolation operator I Ij = multigridtools.sa_get_aggregates(A.shape[0],S.indptr,S.indices) @@ -48,7 +51,7 @@ return scipy.sparse.csr_matrix((Ix,Ij,Ip)) -def sa_interpolation(A,epsilon=0.08,omega=4.0/3.0): +def sa_interpolation(A,epsilon,omega=4.0/3.0): if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') I = sa_constant_interpolation(A,epsilon) Modified: trunk/scipy/sandbox/multigrid/multilevel.py =================================================================== --- trunk/scipy/sandbox/multigrid/multilevel.py 2007-08-24 15:55:04 UTC (rev 3261) +++ trunk/scipy/sandbox/multigrid/multilevel.py 2007-08-25 23:15:34 UTC (rev 3262) @@ -36,6 +36,15 @@ return scipy.sparse.spdiags([D,O,T,T,O],[0,-N,-1,1,N],N*N,N*N).tocsr() def ruge_stuben_solver(A,max_levels=10,max_coarse=500): + """ + Create a multilevel solver using Ruge-Stuben coarsening (Classical AMG) + + References: + "Multigrid" + Trottenberg, U., C. W. Oosterlee, and Anton Schuller. San Diego: Academic Press, 2001. + See Appendix A + + """ As = [A] Ps = [] @@ -50,11 +59,20 @@ return multilevel_solver(As,Ps) def smoothed_aggregation_solver(A,max_levels=10,max_coarse=500): + """ + Create a multilevel solver using Smoothed Aggregation (SA) + + References: + "Algebraic Multigrid by Smoothed Aggregation for Second and Fourth Order Elliptic Problems", + Petr Vanek and Jan Mandel and Marian Brezina + http://citeseer.ist.psu.edu/vanek96algebraic.html + + """ As = [A] Ps = [] while len(As) < max_levels and A.shape[0] > max_coarse: - P = sa_interpolation(A) + P = sa_interpolation(A,epsilon=0.08*0.5**(len(As)-1)) A = (P.T.tocsr() * A) * P #galerkin operator @@ -135,6 +153,7 @@ coarse_b = self.Ps[lvl].T * residual if lvl == len(self.As) - 2: + #direct solver on coarsest level coarse_x[:] = scipy.linalg.solve(self.As[-1].todense(),coarse_b) else: self.__solve(lvl+1,coarse_x,coarse_b) @@ -155,15 +174,15 @@ if __name__ == '__main__': from scipy import * A = poisson_problem2D(200) - asa = smoothed_aggregation_solver(A) - #asa = ruge_stuben_solver(A) + ml = smoothed_aggregation_solver(A) + #ml = ruge_stuben_solver(A) x = rand(A.shape[0]) b = zeros_like(x) resid = [] for n in range(10): - x = asa.solve(b,x,maxiter=1) + x = ml.solve(b,x,maxiter=1) resid.append(linalg.norm(A*x)) From scipy-svn at scipy.org Mon Aug 27 09:30:23 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 27 Aug 2007 08:30:23 -0500 (CDT) Subject: [Scipy-svn] r3263 - in trunk/scipy/stats: . tests Message-ID: <20070827133023.5CF0F39C153@new.scipy.org> Author: stefan Date: 2007-08-27 08:29:59 -0500 (Mon, 27 Aug 2007) New Revision: 3263 Modified: trunk/scipy/stats/distributions.py trunk/scipy/stats/tests/test_distributions.py Log: Change distribution boundaries to be inclusive. Closes #488. Modified: trunk/scipy/stats/distributions.py =================================================================== --- trunk/scipy/stats/distributions.py 2007-08-25 23:15:34 UTC (rev 3262) +++ trunk/scipy/stats/distributions.py 2007-08-27 13:29:59 UTC (rev 3263) @@ -13,7 +13,7 @@ from numpy import alltrue, where, arange, put, putmask, \ ravel, take, ones, sum, shape, product, repeat, reshape, \ zeros, floor, logical_and, log, sqrt, exp, arctanh, tan, sin, arcsin, \ - arctan, tanh, ndarray, cos, cosh, sinh, newaxis + arctan, tanh, ndarray, cos, cosh, sinh, newaxis, array from numpy import atleast_1d, polyval, angle, ceil, place, extract, \ any, argsort, argmax, vectorize, r_, asarray, nan, inf, pi, isnan, isinf import numpy @@ -467,10 +467,10 @@ args = tuple(map(arr,args)) x = arr((x-loc)*1.0/scale) cond0 = self._argcheck(*args) & (scale > 0) - cond1 = (scale > 0) & (x > self.a) & (x < self.b) + cond1 = (scale > 0) & (x >= self.a) & (x <= self.b) cond = cond0 & cond1 output = zeros(shape(cond),'d') - place(output,(1-cond0)*(cond1==cond1),self.badvalue) + putmask(output,(1-cond0)*array(cond1,bool),self.badvalue) goodargs = argsreduce(cond, *((x,)+args+(scale,))) scale, goodargs = goodargs[-1], goodargs[:-1] place(output,cond,self._pdf(*goodargs) / scale) Modified: trunk/scipy/stats/tests/test_distributions.py =================================================================== --- trunk/scipy/stats/tests/test_distributions.py 2007-08-25 23:15:34 UTC (rev 3262) +++ trunk/scipy/stats/tests/test_distributions.py 2007-08-27 13:29:59 UTC (rev 3263) @@ -211,5 +211,9 @@ for s,p in zip(states,probability): assert abs(sum(x == s)/float(samples) - p) < 0.05 +class test_expon(NumpyTestCase): + def check_zero(self): + assert_equal(stats.expon.pdf(0),1) + if __name__ == "__main__": NumpyTest('stats.distributions').run() From scipy-svn at scipy.org Mon Aug 27 09:39:22 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 27 Aug 2007 08:39:22 -0500 (CDT) Subject: [Scipy-svn] r3264 - trunk/scipy/weave Message-ID: <20070827133922.7E44639C06D@new.scipy.org> Author: stefan Date: 2007-08-27 08:39:11 -0500 (Mon, 27 Aug 2007) New Revision: 3264 Modified: trunk/scipy/weave/catalog.py Log: Ensure that weave output directory exists. Closes #482. Modified: trunk/scipy/weave/catalog.py =================================================================== --- trunk/scipy/weave/catalog.py 2007-08-27 13:29:59 UTC (rev 3263) +++ trunk/scipy/weave/catalog.py 2007-08-27 13:39:11 UTC (rev 3264) @@ -177,9 +177,12 @@ # Use a cached value for fast return if possible try: + assert os.path.exists(default_dir.cached_path) return default_dir.cached_path except AttributeError: pass + except AssertionError: + pass python_name = "python%d%d_compiled" % tuple(sys.version_info[:2]) if sys.platform != 'win32': From scipy-svn at scipy.org Mon Aug 27 09:45:44 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 27 Aug 2007 08:45:44 -0500 (CDT) Subject: [Scipy-svn] r3265 - trunk/scipy/weave Message-ID: <20070827134544.43DFD39C108@new.scipy.org> Author: stefan Date: 2007-08-27 08:45:32 -0500 (Mon, 27 Aug 2007) New Revision: 3265 Modified: trunk/scipy/weave/catalog.py Log: Fix #482 a different way. Modified: trunk/scipy/weave/catalog.py =================================================================== --- trunk/scipy/weave/catalog.py 2007-08-27 13:39:11 UTC (rev 3264) +++ trunk/scipy/weave/catalog.py 2007-08-27 13:45:32 UTC (rev 3265) @@ -176,13 +176,9 @@ """ # Use a cached value for fast return if possible - try: - assert os.path.exists(default_dir.cached_path) + if hasattr(default_dir,"cached_path") and \ + os.path.exists(default_dir.cached_path): return default_dir.cached_path - except AttributeError: - pass - except AssertionError: - pass python_name = "python%d%d_compiled" % tuple(sys.version_info[:2]) if sys.platform != 'win32': From scipy-svn at scipy.org Mon Aug 27 12:17:12 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 27 Aug 2007 11:17:12 -0500 (CDT) Subject: [Scipy-svn] r3266 - in trunk/scipy/weave: . tests Message-ID: <20070827161712.9B07A39C18E@new.scipy.org> Author: stefan Date: 2007-08-27 11:16:49 -0500 (Mon, 27 Aug 2007) New Revision: 3266 Modified: trunk/scipy/weave/size_check.py trunk/scipy/weave/tests/test_size_check.py trunk/scipy/weave/tests/test_wx_spec.py trunk/scipy/weave/wx_spec.py Log: The weave test suite is a mess. Fix some of the tests. Modified: trunk/scipy/weave/size_check.py =================================================================== --- trunk/scipy/weave/size_check.py 2007-08-27 13:45:32 UTC (rev 3265) +++ trunk/scipy/weave/size_check.py 2007-08-27 16:16:49 UTC (rev 3266) @@ -162,11 +162,7 @@ def __len__(self): return self.shape[0] def __getslice__(self,i,j): - # enabling the following would make class compatible with - # lists. Its current incarnation is compatible with arrays. - # Both this and Numeric should have this FIXED to correspond - # to lists. - #i = max(i, 0); j = max(j, 0) + i = max(i, 0); j = max(j, 0) return self.__getitem__((slice(i,j),)) def __getitem__(self,indices): # ayeyaya this is a mess Modified: trunk/scipy/weave/tests/test_size_check.py =================================================================== --- trunk/scipy/weave/tests/test_size_check.py 2007-08-27 13:45:32 UTC (rev 3265) +++ trunk/scipy/weave/tests/test_size_check.py 2007-08-27 16:16:49 UTC (rev 3266) @@ -93,7 +93,7 @@ def check_error1(self): x,y = (5,),(4,) self.generic_error_test(x,y) - + def check_error2(self): x,y = (5,5),(4,5) self.generic_error_test(x,y) @@ -163,8 +163,6 @@ def check_1d_2(self): self.generic_1d('a[-1:]') def check_1d_3(self): - # dummy_array is "bug for bug" equiv to numpy.numerix.array - # on wrapping of indices. self.generic_1d('a[-11:]') def check_1d_4(self): self.generic_1d('a[:1]') @@ -246,7 +244,7 @@ end2 = random.choice(choices) step2 = random.choice(choices) if step in ['0',0]: step = 'None' - if step2 in ['0',0]: step2 = 'None' + if step2 in ['0',0]: step2 = 'None' expr = 'a[%s:%s:%s,%s:%s:%s]' %(beg,end,step,beg2,end2,step2) self.generic_2d(expr) except IndexError: @@ -263,7 +261,7 @@ val = random.choice(choices) if (i+1) % 3 == 0 and val in ['0',0]: val = 'None' - idx.append(val) + idx.append(val) expr = 'a[%s:%s:%s,%s:%s:%s,%s:%s:%s]' % tuple(idx) self.generic_3d(expr) except IndexError: Modified: trunk/scipy/weave/tests/test_wx_spec.py =================================================================== --- trunk/scipy/weave/tests/test_wx_spec.py 2007-08-27 13:45:32 UTC (rev 3265) +++ trunk/scipy/weave/tests/test_wx_spec.py 2007-08-27 16:16:49 UTC (rev 3266) @@ -13,35 +13,43 @@ from weave import ext_tools, wx_spec restore_path() -import wxPython -import wxPython.wx +import wx class test_wx_converter(NumpyTestCase): + def setUp(self): + self.app = wx.App() + self.s = wx_spec.wx_converter() + def check_type_match_string(self,level=5): - s = wx_spec.wx_converter() - assert(not s.type_match('string') ) + assert(not self.s.type_match('string') ) + def check_type_match_int(self,level=5): - s = wx_spec.wx_converter() - assert(not s.type_match(5)) + assert(not self.s.type_match(5)) + def check_type_match_float(self,level=5): - s = wx_spec.wx_converter() - assert(not s.type_match(5.)) + assert(not self.s.type_match(5.)) + def check_type_match_complex(self,level=5): - s = wx_spec.wx_converter() - assert(not s.type_match(5.+1j)) + assert(not self.s.type_match(5.+1j)) + def check_type_match_complex(self,level=5): - s = wx_spec.wx_converter() - assert(not s.type_match(5.+1j)) + assert(not self.s.type_match(5.+1j)) + def check_type_match_wxframe(self,level=5): - s = wx_spec.wx_converter() - f=wxPython.wx.wxFrame(wxPython.wx.NULL,-1,'bob') - assert(s.type_match(f)) + f=wx.Frame(None,-1,'bob') + assert(self.s.type_match(f)) def check_var_in(self,level=5): mod = ext_tools.ext_module('wx_var_in',compiler='msvc') - a = wxPython.wx.wxFrame(wxPython.wx.NULL,-1,'bob') + mod.customize.add_header('') + mod.customize.add_extra_compile_arg(' '.join(self.s.extra_compile_args)) + mod.customize.add_extra_link_arg(' '.join(self.s.extra_link_args)) + + a = wx.Frame(None,-1,'bob') code = """ - a->SetTitle(wxString("jim")); + py::tuple args(1); + args[0] = py::object("jim"); + a.mcall("SetTitle",args); """ test = ext_tools.ext_function('test',code,['a'],locals(),globals()) mod.add_function(test) @@ -64,31 +72,34 @@ def no_check_var_local(self,level=5): mod = ext_tools.ext_module('wx_var_local') a = 'string' + code = 'a="hello";' var_specs = ext_tools.assign_variable_types(['a'],locals()) - code = 'a=Py::String("hello");' - test = ext_tools.ext_function('test',var_specs,code) + test = ext_tools.ext_function_from_specs('test',code,var_specs) mod.add_function(test) mod.compile() import wx_var_local b='bub' q={} wx_var_local.test(b,q) - assert(q['a'] == 'hello') - def no_check_return(self,level=5): + assert('a' == 'string') + + def no_test_no_check_return(self,level=5): mod = ext_tools.ext_module('wx_return') a = 'string' - var_specs = ext_tools.assign_variable_types(['a'],locals()) code = """ a= Py::wx("hello"); return_val = Py::new_reference_to(a); """ - test = ext_tools.ext_function('test',var_specs,code) + test = ext_tools.ext_function('test',code,['a'],locals()) mod.add_function(test) mod.compile() import wx_return b='bub' c = wx_return.test(b) - assert( c == 'hello') + assert(c == 'hello') if __name__ == "__main__": + import sys + if len(sys.argv) == 1: + sys.argv.extend(["--level=5"]) NumpyTest().run() Modified: trunk/scipy/weave/wx_spec.py =================================================================== --- trunk/scipy/weave/wx_spec.py 2007-08-27 13:45:32 UTC (rev 3265) +++ trunk/scipy/weave/wx_spec.py 2007-08-27 16:16:49 UTC (rev 3266) @@ -1,16 +1,30 @@ import common_info from c_spec import common_base_converter import sys,os +import glob -# these may need user configuration. -if sys.platform == "win32": - wx_base = r'c:\third\wxpython-2.4.0.7' -else: - # probably should do some more discovery here. - wx_base = '/usr/lib/wxPython' +def find_base_dir(): + searched_locations = ['c:\third\wxpython*', + '/usr/lib/wx*'] + candidate_locations = [] + for pattern in searched_locations: + candidate_locations.extend(glob.glob(pattern)) + candidate_locations.sort() + + if len(candidate_locations) == 0: + raise RuntimeError("Could not locate wxPython base directory.") + else: + return candidate_locations[-1] + +wx_base = find_base_dir() + def get_wxconfig(flag): wxconfig = os.path.join(wx_base,'bin','wx-config') + if not os.path.exists(wxconfig): + # Could not locate wx-config, assume it is on the path. + wxconfig = 'wx-config' + import commands res,settings = commands.getstatusoutput(wxconfig + ' --' + flag) if res: From scipy-svn at scipy.org Mon Aug 27 13:07:17 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 27 Aug 2007 12:07:17 -0500 (CDT) Subject: [Scipy-svn] r3267 - trunk/scipy/weave Message-ID: <20070827170717.16A3839C0B8@new.scipy.org> Author: stefan Date: 2007-08-27 12:07:05 -0500 (Mon, 27 Aug 2007) New Revision: 3267 Modified: trunk/scipy/weave/catalog.py Log: Handle non-existent catalog file gracefully. Modified: trunk/scipy/weave/catalog.py =================================================================== --- trunk/scipy/weave/catalog.py 2007-08-27 16:16:49 UTC (rev 3266) +++ trunk/scipy/weave/catalog.py 2007-08-27 17:07:05 UTC (rev 3267) @@ -289,8 +289,9 @@ msg = " mode must be 'c', 'n', 'r', or 'w'. See anydbm for more info" raise ValueError, msg catalog_file = catalog_path(module_path) - if (dumb and os.path.exists(catalog_file+'.dat')) \ - or os.path.exists(catalog_file): + if (catalog_file is not None) \ + and ((dumb and os.path.exists(catalog_file+'.dat')) \ + or os.path.exists(catalog_file)): sh = shelve.open(catalog_file,mode) else: if mode=='r': @@ -552,7 +553,7 @@ function exists with a warning. """ writable_cat = None - if not os.path.exists(catalog_path): + if (catalog_path is not None) and (not os.path.exists(catalog_path)): return try: writable_cat = get_catalog(catalog_path,'w') From scipy-svn at scipy.org Mon Aug 27 16:12:58 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 27 Aug 2007 15:12:58 -0500 (CDT) Subject: [Scipy-svn] r3268 - trunk/scipy/weave/tests Message-ID: <20070827201258.551ED39C29A@new.scipy.org> Author: eric Date: 2007-08-27 15:12:57 -0500 (Mon, 27 Aug 2007) New Revision: 3268 Modified: trunk/scipy/weave/tests/test_c_spec.py Log: Fixed a 'base class' tests so that it could run on its own. The test suite is finding it and running it that way causing failures. This is related to #490, but doesn't fix all the failures there. Modified: trunk/scipy/weave/tests/test_c_spec.py =================================================================== --- trunk/scipy/weave/tests/test_c_spec.py 2007-08-27 17:07:05 UTC (rev 3267) +++ trunk/scipy/weave/tests/test_c_spec.py 2007-08-27 20:12:57 UTC (rev 3268) @@ -512,6 +512,15 @@ class test_dict_converter(NumpyTestCase): + """ Base Class for dictionary conversion tests. + """ + + # Default string specifying the compiler to use. While this is set + # in all sub-classes, this base test class is found by the test + # infrastructure and run. Therefore, we give it a default value + # so that it can run on its own. + compiler='' + def check_type_match_bad(self,level=5): s = c_spec.dict_converter() objs = [[],(),'',1,1.,1+1j] From scipy-svn at scipy.org Mon Aug 27 18:18:03 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 27 Aug 2007 17:18:03 -0500 (CDT) Subject: [Scipy-svn] r3269 - trunk/scipy/sandbox/ga Message-ID: <20070827221803.9BE3939C0DF@new.scipy.org> Author: rkern Date: 2007-08-27 17:18:00 -0500 (Mon, 27 Aug 2007) New Revision: 3269 Added: trunk/scipy/sandbox/ga/prng.py Modified: trunk/scipy/sandbox/ga/algorithm.py trunk/scipy/sandbox/ga/examples.py trunk/scipy/sandbox/ga/ga_gnm.py trunk/scipy/sandbox/ga/ga_list.py trunk/scipy/sandbox/ga/ga_util.py trunk/scipy/sandbox/ga/gene.py trunk/scipy/sandbox/ga/genome.py trunk/scipy/sandbox/ga/info_ga.py trunk/scipy/sandbox/ga/language.py trunk/scipy/sandbox/ga/parallel_pop.py trunk/scipy/sandbox/ga/population.py trunk/scipy/sandbox/ga/scaling.py trunk/scipy/sandbox/ga/selection.py trunk/scipy/sandbox/ga/tree.py trunk/scipy/sandbox/ga/tree_opt.py Log: Updated most of the things in the GA package that looked like they needed to be updated. It has not been tested, so it probably doesn't all work, yet. If anyone wants to bang on it, please do so. Modified: trunk/scipy/sandbox/ga/algorithm.py =================================================================== --- trunk/scipy/sandbox/ga/algorithm.py 2007-08-27 20:12:57 UTC (rev 3268) +++ trunk/scipy/sandbox/ga/algorithm.py 2007-08-27 22:18:00 UTC (rev 3269) @@ -1,74 +1,84 @@ -from ga_util import * -import scipy.stats as stats -rv = stats -#import scipy.io.dumb_shelve -import string -import os, sys -import time, pprint, types,copy +import copy import dumbdbm -#import thread, sync +import pprint +import sys +import time + +from ga_util import flip_coin, my_mean, my_std +from prng import prng + + if sys.platform != 'win32': - import fcntl timer = time.clock #clock behaves differently work on linux else: timer = time.time dberror = dumbdbm.error -def max_score(pop): return max(map(lambda x: x.score(),pop)) +def max_score(pop): + """ Find the maximum score in a population. + """ + return max([x.score() for x in pop]) -class galg: - """A basic genetic algorithm. The genetic algorithm is responsible - for evolving a population of genomes. While the population and - the genomes are in charge of defining most of the genetic operators - such as selection, scaling, mutation, and crossover, it is the - genetic algorithm class that orchestrates the evolution and calls - the operators in the correct order. Most of the work is done - in the **step()** method. +class galg(object): + """ A basic genetic algorithm. + + The genetic algorithm is responsible for evolving a population of genomes. + While the population and the genomes are in charge of defining most of the + genetic operators such as selection, scaling, mutation, and crossover, it is + the genetic algorithm class that orchestrates the evolution and calls the + operators in the correct order. Most of the work is done in the **step()** + method. """ - valid_settings = ['pop_size','p_replace', - 'p_cross', 'p_mutate','p_deviation', - 'gens','rand_seed','rand_alg','dbase','update_rate'] - output_settings = ['crossover','selector', 'scaler','genome_type'] - default_settings = {'pop_size':150,'p_replace':.8, - 'p_cross': .8, 'p_mutate':'gene', - 'p_deviation': 0.,'gens':35, - 'rand_seed':0,'rand_alg':'CMRG', - 'update_rate': 10000,'dbase':''} + valid_settings = ['pop_size', 'p_replace', 'p_cross', 'p_mutate', + 'p_deviation', 'gens', 'rand_seed', 'dbase', 'update_rate'] + output_settings = ['crossover', 'selector', 'scaler', 'genome_type'] + default_settings = dict( + pop_size = 150, + p_replace = .8, + p_cross = .8, + p_mutate = 'gene', + p_deviation = 0., + gens = 35, + rand_seed = 0, + update_rate = 10000, + dbase = '', + ) default_verbose = 1 - def __init__(self,pop): + def __init__(self, pop): self.verbose = self.default_verbose self.settings = copy.copy(galg.default_settings) self.pop = pop - def test_settings(self,settings): + + def test_settings(self, settings): + """ Check that a settings dictionary is consistent with the settings + that we can accept. + """ for key in settings.keys(): - try: - self.output_settings.index(key) + if key in self.output_settings: print 'Warning: The key "%s" in settings is readonly.' % key - except ValueError: - try: self.valid_settings.index(key) - except ValueError: - print 'Warning: The key "%s" in not a valid setting.' % key - print 'The valid settings are %s' % self.valid_settings + elif key not in self.valid_settings: + print 'Warning: The key "%s" in not a valid setting.' % key + print 'The valid settings are %s' % self.valid_settings - def initialize(self,reseed = 1): + def initialize(self, reseed=True): b = timer() self.test_settings(self.settings) self.gen = 0 - sd = self.settings['rand_seed']; alg = self.settings['rand_alg'] - if reseed: rv.initialize(seed = sd, algorithm = alg) - self.settings['seed_used'] = rv.initial_seed() + sd = self.settings['rand_seed'] + if reseed: + prng.seed(sd) + self.settings['seed_used'] = sd self._print('initializing... seed = %d' % self.settings['seed_used']) self.crossover = self.pop.model_genome.crossover # get the crossover op from the first genome self.pop.settings = self.settings #should these be shared? self.size_pop(self.settings['pop_size']) - self.settings['crossover'] = string.split(str(self.crossover))[0][1:] - self.settings['selector'] = string.split(str(self.pop.selector))[0][1:] - self.settings['scaler'] = string.split(str(self.pop.scaler))[0][1:] - self.settings['genome_type'] = string.split(str(self.pop.model_genome))[0][1:] -# self._print(self.settings) + self.settings['crossover'] = str(self.crossover).split()[0][1:] + self.settings['selector'] = str(self.pop.selector).split()[0][1:] + self.settings['scaler'] = str(self.pop.scaler).split()[0][1:] + self.settings['genome_type'] = str(self.pop.model_genome).split()[0][1:] self.pop.initialize(self.settings); self.stats = {'selections':0,'crossovers':0,'mutations':0, @@ -76,11 +86,16 @@ self.stats.update(self.pop.stats) self.step_time = timer() - b self.init_dbase() - def size_pop(self,s): + + def size_pop(self, s): + """ Set the size of the population. + """ self.settings['pop_size'] = s self.pop._size(s) - def step(self,steps=1): + def step(self, steps=1): + """ Perform a number of steps. + """ sz = len(self.pop) replace = int(self.settings['p_replace'] * len(self.pop)) p_crossover = self.settings['p_cross'] @@ -139,6 +154,7 @@ self.post_evolve() self.db_entry['run_time'] = timer() - b self.write_dbase() + def iteration_output(self): output = ( 'gen: ' + `self.gen` + ' ' + 'max: ' + `self.stats['current']['max']` + ' ' @@ -164,7 +180,7 @@ self.db_entry['best_scores'] = [self.stats['current']['max']] self.db_entry['stats'] = [copy.deepcopy(self.stats)] self.db_entry['step_time'] = [self.step_time] - self.db_entry['optimization_type'] = string.split(str(self.__class__))[0][1:] + self.db_entry['optimization_type'] = str(self.__class__).split()[0][1:] def update_dbase(self): # self.db_entry['best_scores'].append(self.pop.best().score()) @@ -178,50 +194,56 @@ On NT, hopefully we're using the gdbm module which does automatic file locking. """ - if(self.settings['dbase'] != ''): - fname= self.settings['dbase'] - try: - if sys.platform == 'win32': pass - else: - f = open(fname +'.lock','a') - fcntl.flock(f.fileno(),fcntl.LOCK_EX) - try: - try: db = my_shelve.open(fname,'w') - except dberror: db = my_shelve.open(fname,'c') - keys = db.keys() - if(len(keys) == 0): self.dbkey = `1` - else: - gkeys=[] - for k in keys: - try: gkeys.append(string.atoi(k)) - except ValueError: pass - self.dbkey = `max(gkeys)+1` - print 'DB NAME: ', self.settings['dbase'], 'KEY: ', self.dbkey - db[self.dbkey] = self.db_entry - db.close() - except: pass #if an error occured, we still need to unlock the db - if sys.platform == 'win32': pass - else: - fcntl.flock(f.fileno(),fcntl.LOCK_UN) - f.close() - except: - if sys.platform == 'win32': pass - else: - f = open('error.lock','a') - f.write(os.environ['HOST']) - f.close() + # XXX: broken. No my_shelve. Rewrite. + raise NotImplementedError +# if(self.settings['dbase'] != ''): +# fname= self.settings['dbase'] +# try: +# if sys.platform == 'win32': pass +# else: +# f = open(fname +'.lock','a') +# fcntl.flock(f.fileno(),fcntl.LOCK_EX) +# try: +# try: db = my_shelve.open(fname,'w') +# except dberror: db = my_shelve.open(fname,'c') +# keys = db.keys() +# if(len(keys) == 0): self.dbkey = `1` +# else: +# gkeys=[] +# for k in keys: +# try: gkeys.append(string.atoi(k)) +# except ValueError: pass +# self.dbkey = `max(gkeys)+1` +# print 'DB NAME: ', self.settings['dbase'], 'KEY: ', self.dbkey +# db[self.dbkey] = self.db_entry +# db.close() +# except: pass #if an error occured, we still need to unlock the db +# if sys.platform == 'win32': pass +# else: +# fcntl.flock(f.fileno(),fcntl.LOCK_UN) +# f.close() +# except: +# if sys.platform == 'win32': pass +# else: +# f = open('error.lock','a') +# f.write(os.environ['HOST']) +# f.close() +# +# else: "no dbase specified" - else: "no dbase specified" - - def _print(self,val, level = 1): + def _print(self, val, level=1): if(self.verbose >= level): - if type(val) == types.StringType: print val + if isinstance(val, basestring): + print val else: pp = pprint.PrettyPrinter(indent=4) pp.pprint(val) ALL = -1 + + + class m_galg(galg): valid_settings = galg.valid_settings + ['num_pops', 'migrants'] default_settings = galg.default_settings @@ -229,19 +251,21 @@ default_settings['migrants'] = 2 verbose = 1 + def __init__(self,pop): galg.__init__(self,pop) # self.GAs = self.GAs + [galg(pop.clone())] self.settings = copy.copy(self.default_settings) - def initialize(self, mode = 'serial'): + def initialize(self, mode='serial', reseed=True): b = timer() #same as galg self.test_settings(self.settings) self.gen = 0 - sd = self.settings['rand_seed']; alg = self.settings['rand_alg'] - rv.initialize(seed = sd, algorithm = alg) - self.settings['seed_used'] = rv.initial_seed() + sd = self.settings['rand_seed'] + if reseed: + prng.seed(sd) + self.settings['seed_used'] = sd self._print('initializing... seed = %d' % self.settings['seed_used']) self.crossover = self.pop[0].crossover # get the crossover op from the first genome self.pop.settings = self.settings @@ -262,13 +286,14 @@ self.GAs.append(galg(self.pop.clone())) self.GAs[i].settings = sub_ga_settings.copy() - self.settings['crossover'] = string.split(str(self.crossover))[0][1:] - self.settings['selector'] = string.split(str(self.pop.selector))[0][1:] - self.settings['scaler'] = string.split(str(self.pop.scaler))[0][1:] - self.settings['genome_type'] = string.split(str(self.pop.model_genome))[0][1:] + self.settings['crossover'] = str(self.crossover).split()[0][1:] + self.settings['selector'] = str(self.pop.selector).split()[0][1:] + self.settings['scaler'] = str(self.pop.scaler).split()[0][1:] + self.settings['genome_type'] = str(self.pop.model_genome).split()[0][1:] self._print(self.settings) if mode[0] == 'p' or mode[0] == 'P': + # XXX: what? """ sys.setcheckinterval(1000) finished = sync.event() @@ -279,7 +304,7 @@ sys.setcheckinterval(10) """ else: - for ga in self.GAs: ga.initialize(reseed = 0) + for ga in self.GAs: ga.initialize(reseed = False) cnt = 0 for ga in self.GAs: self.pop[cnt] = ga.pop.best() @@ -318,7 +343,7 @@ try: self.pop.stats['overall']['min'] = min(self.pop.stats['overall']['min'], self.pop.stats['current']['min']) except KeyError: self.pop.stats['overall']['min'] = self.pop.stats['current']['min'] - self.pop.stats + self.pop.stats # XXX: Is this a no-op? self.pop.stats['pop_evals'] = self.GAs[0].stats['pop_evals'] self.stats.update(self.pop.stats) @@ -417,7 +442,7 @@ def GA_initializer(bar,finished,GA): t1 = timer() - GA.initialize(reseed = 0) + GA.initialize(reseed = False) t2 = timer() print 'thread ' + `thread.get_ident()` + 'time ' + `t2-t1` + ' sec.' bar.enter() Modified: trunk/scipy/sandbox/ga/examples.py =================================================================== --- trunk/scipy/sandbox/ga/examples.py 2007-08-27 20:12:57 UTC (rev 3268) +++ trunk/scipy/sandbox/ga/examples.py 2007-08-27 22:18:00 UTC (rev 3269) @@ -54,8 +54,15 @@ # galg = ga.algorithm.galg(pop) # change a few settings - settings = {'pop_size':250,'p_replace':.8,'p_cross': .8, 'p_mutate':'gene', - 'p_deviation': 0.,'gens':35,'rand_seed':0,'rand_alg':'CMRG'} + settings = dict( + pop_size = 250, + p_replace = .8, + p_cross = .8, + p_mutate = 'gene', + p_deviation = 0., + gens = 35, + rand_seed = 0, + ) galg.settings.update(settings) galg.evolve() print galg.pop.best() Modified: trunk/scipy/sandbox/ga/ga_gnm.py =================================================================== --- trunk/scipy/sandbox/ga/ga_gnm.py 2007-08-27 20:12:57 UTC (rev 3268) +++ trunk/scipy/sandbox/ga/ga_gnm.py 2007-08-27 22:18:00 UTC (rev 3269) @@ -1,115 +1,62 @@ """ - This module adds gradient optimization capabilities to the - standard genomes. Basically this means that any problem set up - for a GA is automatically able to be gradient optimized... - - For purity sake, the grad and genome - modules have been left totally separate. It might have - been just as easy to derive genomes directly from - grad.grad - and maybe that will happen in the future. - - Caveats: + This has only be set up for list_genomes made up of floating point genes. + The tree_genomes just need to be recoded here translating the pick_numbers + functions from tree_opt. - This has only be set up for list_genomes made up of - - floating point genes. The tree_genomes just need to - - be recoded here translating the pick_numbers functions - - from tree_opt. - - - - genomes of discrete variable genes should be able to work also. - + Genomes of discrete variable genes should be able to work also. """ - - import grad - import genome - - class list_genome(genome.list_genome,grad.grad): + """ So far, grad_min, and grad_max only work for float_genes. - """ So far, grad_min, and grad_max only - - work for float_genes. - Test: - #Test gradient optimization - >>> import ga_gnm, gene - >>> g = gene.float_gene((-1,1)) - >>> class simple_genome(ga_gnm.list_genome): - ... def performance(self): - ... s = 0 - ... for i in self: s = s+ i - ... return s - >>> a = simple_genome(g.replicate(10)) - >>> a.initialize() - >>> a.grad_opt(5) - 33 - >>> a - [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] - """ def grad_params(self): - return self.get_values() # calls list__genome get_values() def set_grad_params(self,x): - self.set_values(x) # calls list__genome set_values() #do we really need this? - def grad_len(self): - return len(self) def grad_min(self): - gmin = [] - for flt_gene in self: - gmin.append(flt_gene.bounds[0]) - return gmin def grad_max(self): - gmax = [] - for flt_gene in self: - gmax.append(flt_gene.bounds[1]) - return gmax Modified: trunk/scipy/sandbox/ga/ga_list.py =================================================================== --- trunk/scipy/sandbox/ga/ga_list.py 2007-08-27 20:12:57 UTC (rev 3268) +++ trunk/scipy/sandbox/ga/ga_list.py 2007-08-27 22:18:00 UTC (rev 3269) @@ -1,6 +1,5 @@ -from ga_util import * +from ga_util import shallow_clone import UserList -import copy class ga_list(UserList.UserList): def data_clone(self): Modified: trunk/scipy/sandbox/ga/ga_util.py =================================================================== --- trunk/scipy/sandbox/ga/ga_util.py 2007-08-27 20:12:57 UTC (rev 3268) +++ trunk/scipy/sandbox/ga/ga_util.py 2007-08-27 22:18:00 UTC (rev 3269) @@ -1,61 +1,61 @@ -#base definitions for genetic algorithms -import scipy.stats as rv -stats = rv +""" Basic utilities for the GA package. +""" -GAError = 'GA Error' +from numpy import mean, std -def nop(x): return x -def flip_coin(p): return (rv.random() < p) +from prng import prng -import random -def flip_coin2(p): return (random.random() < p) -class empty_class: pass +class GAError(Exception): + """ Error from the GA code. + """ +def nop(x): + """ Basic 'no-op' stub useful for interfaces which require a function. + """ + return x + +def flip_coin(p): + """ Return True with probability p. + """ + return (prng.random() < p) + +class empty_class: + """ Dummy class for cloning objects. + """ + pass + def shallow_clone(item): + """ Make a simple clone of an object. + + The attributes are not copied, just referenced. + """ new = empty_class() new.__class__ = item.__class__ new.__dict__.update(item.__dict__) return new -#these are exacly correct, but htey prevent problems with -Inf and Inf + +def remove_NaN(z): + """ Return an array with only finite (non-NaN, non-inf) values. + """ + from numpy import isfinite + return z[isfinite(z)] + def my_std(s): -# try: + """ Standard deviation robust to NaNs and infs. + """ a = remove_NaN(s) - if len(a) > 1: return stats.std(a) - else: return 0. -# except: -# import pdb -# pdb.set_trace() + if len(a) > 1: + return std(a) + else: + return 0. + def my_mean(s): + """ Mean robust to NaNs and infs. + """ a = remove_NaN(s) - if len(a) > 1: return stats.mean(a) - else: return 0. + if len(a) > 0: + return mean(a) + else: + return 0. -def testflip(): - - import time - b = time.clock() - for i in range(10000): a = flip_coin(.5) - e = time.clock() - print 'rv_flip',e-b - b = time.clock() - for i in range(10000): a = flip_coin2(.5) - e = time.clock() - print 'wh_flip',e-b - from rv import random - b = time.clock() - for i in range(10000): - a = random() < .5 - e = time.clock() - print 'rv',e-b - from random import random - b = time.clock() - for i in range(10000): - a = random() < .5 - e = time.clock() - print 'wh',e-b - - -def remove_NaN(z): - from numpy import isnan, isinf, compress, logical_not - return compress(logical_not( isnan(z)+isinf(z)),z,axis=-1) Modified: trunk/scipy/sandbox/ga/gene.py =================================================================== --- trunk/scipy/sandbox/ga/gene.py 2007-08-27 20:12:57 UTC (rev 3268) +++ trunk/scipy/sandbox/ga/gene.py 2007-08-27 22:18:00 UTC (rev 3269) @@ -12,14 +12,15 @@ 12/31/98 documentation added ej """ -from ga_util import * -import scipy.stats as rv -from random import random -from numpy import * -import copy -from scipy.ga.tree import tree_node +from numpy import array, log10 -class gene: +from tree import tree_node +from ga_util import GAError, nop, shallow_clone +from prng import prng + + + +class gene(object): """ Genes are the most basic building block in this genetic algorithm library. A gene represents a particular trait of an individual solution. The gene class @@ -61,20 +62,27 @@ mutator = None initializer = None is_gene = 1 + def clone(self): - """Makes a shallow copy of the object. override if you need more specialized behavior + """ Makes a shallow copy of the object. + + Override if you need more specialized behavior. """ return shallow_clone(self) - def replicate(self,cnt): - """Returns a list with cnt copies of this object in it + + def replicate(self, count): + """ Returns a list with count copies of this object in it. """ - return map(lambda x: x.clone(),[self]*cnt) + return map(lambda x: x.clone(),[self]*count) + def initialize(self): - """Calls the initializer objects evaluate() function to initialize the gene + """ Calls the initializer objects evaluate() function to initialize the + gene. """ self._value = self.initializer.evaluate(self) return self.value() - def set_mutation(self,mrate): + + def set_mutation(self, mrate): """ Set the mutation rate of the gene. @@ -90,7 +98,7 @@ try: del self.mutation_rate #remove local mrates and use gene classes mrate except AttributeError: pass elif(mrate=='adapt'): - self.mutation_rate = rv.uniform(self.mr_bounds[0],self.mr_bounds[1])[0] + self.mutation_rate = prng.uniform(self.mr_bounds[0], self.mr_bounds[1]) else: self.__class__.mutation_rate = mrate @@ -102,7 +110,7 @@ mutation_rate of the time. Otherwise, it does nothing. """ #inlined 'flip_coin' for speed - if random() < self.mutation_rate: + if prng.random() < self.mutation_rate: self._value = self.mutator.evaluate(self) return 1 return 0 @@ -152,15 +160,16 @@ except AttributeError: v2 = other return cmp(v1,v2) -class list_gene_uniform_mutator: +class list_gene_uniform_mutator(object): """ This class randomly chooses a new gene value from the allele set in a list_gene. It is also useful as an initializer for list_gene. """ def evaluate(self,gene): """ return a randomly chosen value from the genes allele set """ - return rv.choice(gene.allele_set) -class list_gene_gaussian_mutator: + return prng.choice(gene.allele_set) + +class list_gene_gaussian_mutator(object): """ This class chooses a new gene value from the allele set in a list_gene. The new value is chosen from a gaussian @@ -195,12 +204,12 @@ old = gene.index() new = -1; f = -1 while not (0 <= new < size): - f = rv.norm.rvs(old,w)[0] + f = prng.normal(old,w) new = round(f) if(old == new and f > new): new = new + 1 if(old == new and f < new): new = new - 1 return gene.allele_set[int(new)] -class list_gene_walk_mutator: +class list_gene_walk_mutator(object): """ This class chooses a new gene value from the allele set in a list_gene. The newly chosen value is +/-1 element @@ -209,7 +218,7 @@ """ def evaluate(self,gene): old = gene.index() - move = rv.choice((-1,1)) + move = prng.choice((-1,1)) return gene.allele_set[old + move] class list_gene(gene): @@ -243,17 +252,19 @@ and resistor values during evaluation """ func = nop - def value(self): return func(self._value) - def __repr__(self): return `self._value` #??? + def value(self): + return self.func(self._value) + def __repr__(self): + return repr(self._value) #??? -class float_gene_uniform_mutator: +class float_gene_uniform_mutator(object): """ randomly choose a value within the float_gene's bounds""" def evaluate(self,gene): bounds=gene.bounds - new =rv.uniform(bounds[0], bounds[1]-bounds[0] ).rvs()[0] + new = prng.uniform(bounds[0], bounds[1]-bounds[0]) return new -class float_gene_gaussian_mutator: +class float_gene_gaussian_mutator(object): """ chooses a new value for a float_gene with gaussian shaped distribution around the current value. @@ -271,10 +282,10 @@ dev = (gene.bounds[1]-gene.bounds[0]) * self.dev_width new = gene.bounds[1] # while not (gene.bounds[0] <= new < gene.bounds[1]): -# new = rv.norm.rvs(gene.value(),dev)[0] -# new = rv.norm(gene.value(),dev)[0] +# new = prng.normal(gene.value(),dev) +# new = prng.normal(gene.value(),dev) #get the _value explicitly so mutator will work for log_float also - new = rv.norm.rvs(gene._value,dev)[0] + new = prng.normal(gene._value,dev) if new > gene.bounds[1]: new = gene.bounds[1] if new < gene.bounds[0]: new = gene.bounds[0] return new @@ -313,7 +324,7 @@ try: return 10.**(self._value) except AttributeError: raise GAError, 'gene not initialized' -class frozen: +class frozen(object): """frozen is a gene that always maintains the same value. """ def __init__(self,val): self._value = val @@ -370,7 +381,7 @@ try: del self.mutation_rate #remove local mrates and use gene classes mrate except AttributeError: pass elif(mrate=='adapt'): - self.mutation_rate = rv.uniform(self.mr_bounds[0],self.mr_bounds[1])[0] + self.mutation_rate = prng.uniform(self.mr_bounds[0],self.mr_bounds[1]) else: self.__class__.mutation_rate = mrate for child in self._children: child.set_mutation(mrate) Modified: trunk/scipy/sandbox/ga/genome.py =================================================================== --- trunk/scipy/sandbox/ga/genome.py 2007-08-27 20:12:57 UTC (rev 3268) +++ trunk/scipy/sandbox/ga/genome.py 2007-08-27 22:18:00 UTC (rev 3269) @@ -14,18 +14,20 @@ it an evaluator class and you are in business. """ -from ga_util import * -import scipy.stats as rv -import Numeric, copy -import tree +from numpy import array -class default_evaluator: +from ga_util import GAError, shallow_clone +import language +from prng import prng + + +class default_evaluator(object): """ This default evaluator class just reminds you to define your own. """ def evaluate(self,genome): return genome.performance() #if a performance() method is available, use it! #raise GAError, 'objective must be specified' -class genome: +class genome(object): """ The class genome is used as the base class for genome classes that you use in your program. It should not be used directly. In particular, the **clone()** @@ -140,7 +142,7 @@ """ return 1 -class list_genome_default_initializer: +class list_genome_default_initializer(object): """ The evaluate() function for this class simply calls the **initialize()** function for each gene in the **list_genome**. """ @@ -148,7 +150,7 @@ for gene in genome: gene.initialize() def __call__(self,genome): return self.evaluate(genome) -class list_genome_default_mutator: +class list_genome_default_mutator(object): """ The evaluate() function for this class simply calls the **mutate()** function for each gene in the **list_genome**. It returns 1 if any of the genes were mutated @@ -159,14 +161,14 @@ return mutated def __call__(self,genome): return self.evaluate(genome) -class list_genome_singlepoint_crossover: +class list_genome_singlepoint_crossover(object): def evaluate(self,parents): #assume mom and dad are the same length mom = parents[0]; dad = parents[1] if(len(mom) > 1): - crosspoint = rv.randint(1,len(mom)-1).rvs()[0] + crosspoint = prng.randint(1,len(mom)-1).rvs() else: - crosspoint = rv.randint(0,len(mom)).rvs()[0] + crosspoint = prng.randint(0,len(mom)).rvs() brother = (mom[:crosspoint] + dad[crosspoint:]).clone() sister = (dad[:crosspoint] + mom[crosspoint:]).clone() return brother, sister @@ -224,7 +226,7 @@ """Most of the time, the genes in this genome specify numeric parameters. This method returns the values of the genes in an array (NumPy) """ - return Numeric.array(self.get_values()) + return array(self.get_values()) def set_values(self,x): """ Set the values of the genes """ @@ -248,7 +250,7 @@ def dict_choice(dict): tot = 0 for key in dict.keys(): tot = tot + len(dict[key]) - index = rv.choice(xrange(0,tot)) + index = prng.choice(xrange(0,tot)) for key in dict.keys(): if index >= len(dict[key]): index = index - len(dict[key]) @@ -265,7 +267,7 @@ SymbolError = 'SymbolError' NoneError = 'NoneError' -class tree_crossover: +class tree_crossover(object): cross_rejects = ['ST'] def __init__(self): self.cross_point = {} @@ -320,7 +322,7 @@ msg = "chosen symbol not found in dad (%s tries)" % `tries` raise SymbolError, msg else: tried_sym.append(sym) - node_b = rv.choice(bro.symbol_table[sym]) + node_b = prng.choice(bro.symbol_table[sym]) idx = 0 try: for child in node_a.get_parent().children(): @@ -346,12 +348,11 @@ return sib1,sib2 def __call__(self,genome): return self.evaluate(genome) -import language -class tree_genome_default_initializer: +class tree_genome_default_initializer(object): def evaluate(self,genome): genome.generate() def __call__(self,genome): return self.evaluate(genome) -class tree_genome_default_mutator: +class tree_genome_default_mutator(object): def evaluate(self,genome): return genome.root.mutate() def __call__(self,genome): return self.evaluate(genome) @@ -372,7 +373,9 @@ def initialize(self,settings = None): genome.initialize(self,settings) if settings and settings.has_key('p_mutate'): - g.root.set_mutation(settings['p_mutate']) + raise NotImplementedError + # XXX: what is g? + #g.root.set_mutation(settings['p_mutate']) def defaultize(self): """ set the nodes to their default values""" if self.root is None: Modified: trunk/scipy/sandbox/ga/info_ga.py =================================================================== --- trunk/scipy/sandbox/ga/info_ga.py 2007-08-27 20:12:57 UTC (rev 3268) +++ trunk/scipy/sandbox/ga/info_ga.py 2007-08-27 22:18:00 UTC (rev 3269) @@ -4,4 +4,3 @@ """ -postpone_import = 1 Modified: trunk/scipy/sandbox/ga/language.py =================================================================== --- trunk/scipy/sandbox/ga/language.py 2007-08-27 20:12:57 UTC (rev 3268) +++ trunk/scipy/sandbox/ga/language.py 2007-08-27 22:18:00 UTC (rev 3269) @@ -1,13 +1,21 @@ import types -import time -from random import * -SymbolError = 'Symbol Error' -DepthError = 'Depth Error' +from prng import prng -class language: + +class SymbolError(Exception): + """ Error finding a symbol. + """ + +class DepthError(Exception): + """ Tree tried to grow beyond the configured maximum depth. + """ + + +class language(object): max_depth = 20 dont_cross = ['ST'] # dont perform crossovers at the start symbol node + def __init__(self,lang): self.lang = lang self.dsc = 0 @@ -30,7 +38,7 @@ new_active_node = active_node if type(cur_sym) == types.StringType: if self.lang.has_key(cur_sym): - rule = choice(self.lang[cur_sym]) + rule = prng.choice(self.lang[cur_sym]) for sym in rule: new_active_node = self._gen(sym,new_active_node,depth) else: Modified: trunk/scipy/sandbox/ga/parallel_pop.py =================================================================== --- trunk/scipy/sandbox/ga/parallel_pop.py 2007-08-27 20:12:57 UTC (rev 3268) +++ trunk/scipy/sandbox/ga/parallel_pop.py 2007-08-27 22:18:00 UTC (rev 3269) @@ -1,641 +1,334 @@ -from Numeric import * -import sys, thread, sync +import sync +import sys +import thread +import time +from numpy import arange, shape, zeros - import remote_exec - import population - -""" - ###### - -#I've got to lean up evaluate and initial in population so that - +#I've got to clean up evaluate and initial in population so that #the incorporation of the parallel stuff is smoother. - ###### -""" -import sys, thread, sync - - - def array_round(x): - y = zeros(shape(x)) - for i in range(len(x.flat)): - y[i] = int(round(x[i])) return y - - def divide_list(l,sections): - Ntot = len(l) - Nsec = float(sections) - Neach = Ntot/Nsec - div_points = array_round(arange(0,Ntot,Neach)).tolist() - if div_points[-1] != Ntot: div_points.append(Ntot) - sub_pops = [] - st = div_points[0] - for end in div_points[1:]: - sub_pops.append(l[st:end]) - st = end - return sub_pops - - class parallel_pop_initializer: - def evaluate(self,pop,settings = None): - #only send the individuals out that need evaluation - if len(pop): - Nserv = len(pop.server_list) - groups = divide_list(pop,Nserv) - sys.setcheckinterval(10) - finished = sync.event() - bar = sync.barrier(Nserv) - print '************',len(groups), len(pop.server_list), len(pop) - for i in range(len(groups)): - inputs = {'sub_pop':groups[i],'settings':settings, 'initializer':pop.initializer} - returns = ('sub_pop',) - code = 'initializer.evaluate(sub_pop,settings)' - data_pack = (inputs,returns,code) - server = pop.server_list[i] - thread.start_new_thread(remote_thread_init,(bar,finished,server,data_pack)) - finished.wait() - sys.setcheckinterval(10) #what is this? for ind in pop: ind.evaluate(force) - import cPickle def plen(obj): return len(cPickle.dumps(obj,1)) - class parallel_pop_evaluator: - def evaluate(self,pop,force = 0): - - import tree - + #import tree #print '1',tree.ref() - #only send the individuals out that need evaluation - if force: - _eval_list = pop.data - else: - _eval_list = filter(lambda x: not x.evaluated,pop) - #print '2',tree.ref() - eval_list = pop.clone() - #print '3',tree.ref() - eval_list.data = _eval_list - if len(eval_list): - Nserv = len(pop.server_list) - groups = divide_list(eval_list,Nserv) - #print '4',tree.ref() - sys.setcheckinterval(10) - finished = sync.event() - bar = sync.barrier(Nserv) - #print "EVAL LENGTH!!!", plen(pop.evaluator) - gr = groups[0] - print "GROUP LENGTH!!!", plen(groups[0]), len(gr), - #print "IND!!!", plen(gr[0]),plen(gr[0].root) - #print '4.5',tree.ref() - for i in range(len(groups)): - inputs = {'sub_pop':groups[i], 'evaluator':pop.evaluator, 'force':force} - returns = ('sub_pop',) - code = 'evaluator.evaluate(sub_pop,force)' - data_pack = (inputs,returns,code) - server = pop.server_list[i] - thread.start_new_thread(remote_thread_eval,(bar,finished,server,data_pack)) - #print '7',tree.ref() - finished.wait() - sys.setcheckinterval(10) - #what is this? for ind in pop: ind.evaluate(force) - """ - def evaluate(self,pop,force = 0): - #only send the individuals out that need evaluation - _eval_list = filter(lambda x: not x.evaluated,pop) - eval_list = pop.clone() - eval_list.data = _eval_list - if len(eval_list): - #finest grain possible - groups = divide_list(eval_list,len(eval_list)) - finished = sync.event() - bar = sync.barrier(groups) - - sys.setcheckinterval(10) - Nserv = len(pop.server_list) - idx = 0 - while idx < len(groups): - inputs = {'sub_pop':groups[idx], 'evaluator':pop.evaluator} - returns = ('sub_pop',) - code = 'evaluator.evaluate(sub_pop)' - data_pack = (inputs,returns,code) - server = pop.server_list[i] - thread.start_new_thread(remote_thread_eval,(bar,finished,server,data_pack)) - #for i in range(len(groups)): - # inputs = {'sub_pop':groups[i], 'evaluator':pop.evaluator} - # returns = ('sub_pop',) - # code = 'evaluator.evaluate(sub_pop)' - # data_pack = (inputs,returns,code) - # server = pop.server_list[i] - # thread.start_new_thread(remote_thread,(bar,finished,server,data_pack)) - finished.wait() - sys.setcheckinterval(10) - #what is this? for ind in pop: ind.evaluate(force) - """ - - def remote_thread_init(bar,finished,server,data_pack): - try: - remote = remote_exec.remote_exec(server[0],server[1],0,1) - results = remote.run(data_pack) - #assign the results from the returned data to the local individuals - inputs = data_pack[0] - old = inputs['sub_pop'] - new = results['sub_pop'] - for i in range(len(old)): - old[i].__dict__.update(new[i].__dict__) - except IndexError: - print 'error in %s,%d' % server - bar.enter() - finished.post() - - def remote_thread_eval(bar,finished,server,data_pack): - - import tree - + #import tree try: - #print '5',tree.ref() - remote = remote_exec.remote_exec(server[0],server[1],0,1) - results = remote.run(data_pack) - #print '6',tree.ref() - #assign the results from the returned data to the local individuals - inputs = data_pack[0] - old = inputs['sub_pop'] - new = results['sub_pop'] - for gnm in new: - gnm.root.delete_circulars() - del gnm.root - #print '6.25',tree.ref() - for i in range(len(old)): - old[i].__dict__.update(new[i].__dict__) - - #print '6.5',tree.ref() - except IndexError: - print 'error in %s,%d' % server - """ - import sys - #r = new[0].root - #print 'ref count',sys.getrefcount(r) - #print '6.75',tree.ref() - #Huh??? Why do I need to delete the new genomes - #individually here? Why aren't they garbage collected? - indices = range(len(new)) - indices.reverse() - for i in indices: - del new[i] - #print 'ref count',sys.getrefcount(r) - #print '6.8',tree.ref() - #r.delete_circulars() - #print 'ref count',sys.getrefcount(r) - #print '6.9',tree.ref() - #del r - #print '6.95',tree.ref() - """ - bar.enter() - finished.post() - - class ga_parallel_pop(population.population): - parallel_evaluator = parallel_pop_evaluator() - parallel_initializer = parallel_pop_initializer() - def __init__(self,genome,size=1,server_list=None): - """Arguments: - - genome -- a genome object. - size -- number. The population size. The genome will be - replicated size times to fill the population. - server_list -- a list of tuple pairs with machine names and - ports listed for the available servers - ex: [(ee.duke.edu,8000),('elsie.ee.duke.edu',8000)] - """ - population.population.__init__(self,genome,size) - assert(server_list) - self.server_list = server_list - def initialize(self,settings = None): - """This method **must** be called before a genetic algorithm - begins evolving the population. It takes care of initializing - the individual genomes, evaluating them, and scaling the population. - It also clears and intializes the statistics for the population. - - Arguments: - - settings -- dictionary of genetic algorithm parameters. These - are passed on to the genomes for initialization. - """ - self.stats = {'current':{},'initial':{},'overall':{}} - self.stats['ind_evals'] = 0 - - print "beigninning genome generation" - b = time.clock() - self.parallel_initializer.evaluate(self,settings) - e = time.clock() - print "finished generation: ", e-b - self.touch(); - b = time.clock() - self.evaluate() - e = time.clock() - print "evaluation time: ", e-b - self.scale() - self.update_stats() - self.stats['initial']['avg'] = self.stats['current']['avg'] - self.stats['initial']['max'] = self.stats['current']['max'] - self.stats['initial']['min'] = self.stats['current']['min'] - self.stats['initial']['dev'] = self.stats['current']['dev'] - - def evaluate(self, force = 0): - """ call the parallel_evaluator instead of the evaluator directly - """ - self.selector.clear() - self.parallel_evaluator.evaluate(self,force) - #self.post_evaluate() - #all of the remaining should be put in post eval... - self.sort() - #this is a cluge to get eval count to work correctly - preval = self.stats['ind_evals'] - for ind in self: - self.stats['ind_evals'] = self.stats['ind_evals'] + ind.evals - ind.evals = 0 - print 'evals: ', self.stats['ind_evals'] - preval - self.touch() - self.evaluated = 1 - - ########################## test stuff ############################ - -import genome - -import gene - -import time - - - -import socket - - - -class objective: - - def __init__(self,wait=.01): - - self.wait = wait - - def evaluate(self,genome): - - time.sleep(self.wait) - - return sum(genome.array(),axis=0) - - - -def test_pop(server_list,size=100,wait=.01): - - obj = objective(wait) - - the_gene = gene.float_gene((0,2.5)) - - genome = genome.list_genome(the_gene.replicate(5)) - - genome.evaluator = obj - - pop = ga_parallel_pop(genome,size,server_list) - - print '########### awaiting evaluation#############' - - pop.initialize() - - print ' evaluation done!' - - print 'best:', pop.best() - - print 'worst',pop.worst() - - - -def gen_pop(): - - genome.list_genome.evaluator = objective() - - gene = gene.float_gene((0,2.5)) - - genome = genome.list_genome(gene.replicate(5)) - - pop = ga_parallel_pop(genome,100,[(host,port),]) - - return pop - - - - import parallel_pop,beowulf,os - - - -def test_pop2(server_list,size=100,wait=.01): - - import hmm_gnm,os - - genome = hmm_gnm.make_genome() - - #pop = ga_parallel_pop(genome,4,server_list) - - global galg - - #genome.target = targets[0] - - pop = ga_parallel_pop(genome,1,server_list) - - galg = hmm_gnm.class_ga(pop) - - galg.settings.update({ 'pop_size':6,'gens':2,'p_mutate':.03, - - 'dbase':os.environ['HOME'] + '/all_lift3', 'p_cross':0.9, 'p_replace':.6, - - 'p_deviation': -.001}) - - galg.evolve() - - - - print '########### awaiting evaluation#############' - - pop.initialize() - - print ' evaluation done!' - - print 'best:', pop.best() - - print 'worst',pop.worst() - - - -import thread - -def test(): - - host = socket.gethostname() - - port = 8000 - - server_list = [(host,port),(host,port+1)] - - for server in server_list: - - host,port = server - - thread.start_new_thread(remote_exec.server,(host,port)) - - thread.start_new_thread(test_pop2,(server_list,)) - - - -def test2(machines=32,size=100,wait=.01): - - import time - - t1 = time.time() - - #requires that servers are started on beowulf 1 and 2. - - import beowulf - - server_list = beowulf.beowulf.servers[:machines] - - thread.start_new_thread(test_pop,(server_list,size,wait)) - - print 'total time:', time.time()-t1 +#import genome +#import gene +#import time +# +#import socket +# +#class objective: +# def __init__(self,wait=.01): +# self.wait = wait +# def evaluate(self,genome): +# time.sleep(self.wait) +# return sum(genome.array(),axis=0) +# +#def test_pop(server_list,size=100,wait=.01): +# obj = objective(wait) +# the_gene = gene.float_gene((0,2.5)) +# genome_ = genome.list_genome(the_gene.replicate(5)) +# genome_.evaluator = obj +# pop = ga_parallel_pop(genome_,size,server_list) +# print '########### awaiting evaluation#############' +# pop.initialize() +# print ' evaluation done!' +# print 'best:', pop.best() +# print 'worst',pop.worst() +# +# +#def gen_pop(): +# genome.list_genome.evaluator = objective() +# gene = gene.float_gene((0,2.5)) +# genome_ = genome.list_genome(gene.replicate(5)) +# pop = ga_parallel_pop(genome_,100,[(host,port),]) +# return pop +# +#import os +# +#import parallel_pop +# +# +#def test_pop2(server_list,size=100,wait=.01): +# import hmm_gnm,os +# genome = hmm_gnm.make_genome() +# #pop = ga_parallel_pop(genome,4,server_list) +# global galg +# #genome.target = targets[0] +# pop = ga_parallel_pop(genome,1,server_list) +# galg = hmm_gnm.class_ga(pop) +# galg.settings.update({ 'pop_size':6,'gens':2,'p_mutate':.03, +# 'dbase':os.environ['HOME'] + '/all_lift3', 'p_cross':0.9, 'p_replace':.6, +# 'p_deviation': -.001}) +# galg.evolve() +# +# print '########### awaiting evaluation#############' +# pop.initialize() +# print ' evaluation done!' +# print 'best:', pop.best() +# print 'worst',pop.worst() +# +#import thread +#def test(): +# host = socket.gethostname() +# port = 8000 +# server_list = [(host,port),(host,port+1)] +# for server in server_list: +# host,port = server +# thread.start_new_thread(remote_exec.server,(host,port)) +# thread.start_new_thread(test_pop2,(server_list,)) +# +#def test2(machines=32,size=100,wait=.01): +# import time +# t1 = time.time() +# #requires that servers are started on beowulf 1 and 2. +# import beowulf +# server_list = beowulf.beowulf.servers[:machines] +# thread.start_new_thread(test_pop,(server_list,size,wait)) +# print 'total time:', time.time()-t1 Modified: trunk/scipy/sandbox/ga/population.py =================================================================== --- trunk/scipy/sandbox/ga/population.py 2007-08-27 20:12:57 UTC (rev 3268) +++ trunk/scipy/sandbox/ga/population.py 2007-08-27 22:18:00 UTC (rev 3269) @@ -1,30 +1,35 @@ #genetic algorithm population #based on galib. +import re import time + +from numpy import array + import ga_list -import re, copy import scaling import selection -import Numeric -import scipy.stats as stats -from ga_util import * -import pdb +from ga_util import GAError, my_mean, my_std + def ftn_minimize(x,y): """Minimization comparator for fitness (scaled score).""" return cmp(x.fitness(),y.fitness()) + def ftn_maximize(x,y): """Maximization comparator for fitness (scaled score).""" return cmp(y.fitness(),x.fitness()) + def sc_minimize(x,y): """Minimization comparator for raw score.""" # return cmp(x.score(),y.score()) #removed one function call return cmp(x.evaluate(),y.evaluate()) + def sc_maximize(x,y): """Maximization comparator for raw score.""" # return cmp(y.score(),x.score()) return cmp(y.evaluate(),x.evaluate()) + class default_pop_evaluator: """The **evaluate()** method simply calls the **evaluate()** method for all the genomes in the population @@ -33,18 +38,21 @@ try: evals = 0 if not pop.evaluated or force: - for ind in pop: ind.evaluate(force) + for ind in pop: + ind.evaluate(force) except: #this makes where a pop evaluator can simply evaluate a list #of genomes - might be useful to simplify remote evaluation - for ind in pop: ind.evaluate(force) + for ind in pop: + ind.evaluate(force) class default_pop_initializer: - """The **evaluate()** method simply calls the **evaluate()** - method for all the genomes in the population + """ The **evaluate()** method simply calls the **evaluate()** method for all + the genomes in the population """ def evaluate(self,pop,settings): - for i in pop: i.initialize(settings) + for i in pop: + i.initialize(settings) class population(ga_list.ga_list): """A population of genomes. The population is constructed by cloning a @@ -106,6 +114,7 @@ self._size(size) self.selector = population.default_selector() #why'd I do this? self.stats={} + def initialize(self,settings = None): """This method **must** be called before a genetic algorithm begins evolving the population. It takes care of initializing @@ -136,6 +145,7 @@ self.stats['initial']['max'] = self.stats['current']['max'] self.stats['initial']['min'] = self.stats['current']['min'] self.stats['initial']['dev'] = self.stats['current']['dev'] + def clone(self): """Returns a population that has a shallow copy the all the attributes and clone of all the genomes in the original @@ -145,15 +155,22 @@ new.stats = {} new.stats.update(self.stats) return new + def touch(self): """Reset all the flags for the population.""" - self.evaluated = 0; self.scaled = 0; self.sorted = 0; self.select_ready = 0 + self.evaluated = 0 + self.scaled = 0 + self.sorted = 0 + self.select_ready = 0 self.stated = 0 + def _size(self, l): """Resize the population.""" del self[l:len(self)] - for i in range(len(self),l): self.append(self.model_genome.clone()) + for i in range(len(self),l): + self.append(self.model_genome.clone()) return len(self) + def evaluate(self, force = 0): """Call the **evaluator.evaluate()** method to evaluate the population. The population is also sorted so that @@ -180,9 +197,11 @@ self.evaluated = 1 e4 = time.clock() #print 'eval:',e1-b, 'sort:',e2-e1, 'stats:',e3-e2, 'touch:',e4-e3 + def mutate(self): mutations = 0 - for ind in self: mutations = mutations + ind.mutate() + for ind in self: + mutations = mutations + ind.mutate() return mutations def sort(self,type = 'raw', force = 0): @@ -199,10 +218,14 @@ force -- forces the sort even if sorted = 1 """ # if not self.sorted or force: - if(type == 'scaled'): self.data.sort(self.ftn_comparator) - elif(type == 'raw'): self.data.sort(self.sc_comparator) - else: raise GAError, 'sort type must be "scaled" or "raw"' + if type == 'scaled': + self.data.sort(self.ftn_comparator) + elif type == 'raw': + self.data.sort(self.sc_comparator) + else: + raise GAError('sort type must be "scaled" or "raw"') self.sorted = 1 + def select(self, cnt = 1): """Calls the selector and returns *cnt* individuals. @@ -214,6 +237,7 @@ self.selector.update(self) self.select_ready = 1 return self.selector.select(self,cnt) + def scale(self, force = 0): """Calls the **scaler.scale()** method and updates the fitness of each individual. @@ -225,16 +249,19 @@ if not (self.scaled or force): self.scaler.scale(self) self.scaled = 1 + def fitnesses(self): - """Returns the fitness (scaled score) of all the - individuals in a population as a Numeric array. + """ Returns the fitness (scaled score) of all the individuals in + a population as an array. """ - return Numeric.array(map(lambda x: x.fitness(),self)) + return array([x.fitness() for x in self]) + def scores(self): - """Returns the scores (raw) of all the - individuals in a population as a Numeric array. + """ Returns the scores (raw) of all the individuals in a population as + an array. """ - return Numeric.array(map(lambda x: x.score(),self)) + return array([x.score() for x in self]) + def best(self, ith_best = 1): """Returns the best individual in the population. *It assumes the population has been sorted.* @@ -245,6 +272,7 @@ best individual in the population. """ return self[ith_best - 1] + def worst(self,ith_worst = 1): """Returns the worst individual in the population. *It assumes the population has been sorted.* @@ -255,6 +283,7 @@ worst individual in the population. """ return self[-ith_worst] + def min_or_max(self,*which_one): """Returns or set 'min' or 'max' indicating whether the population is to be minimized or maximized. @@ -274,20 +303,30 @@ elif (re.match('max.*',which_one[0],re.I)): self.ftn_comparator = ftn_maximize self.sc_comparator = sc_maximize - else: raise GaError, "min_or_max expects 'min' or 'max'" - if self.ftn_comparator == ftn_minimize: return 'min' - elif self.ftn_comparator == ftn_maximize: return 'max' + else: + raise GAError("min_or_max expects 'min' or 'max'") + if self.ftn_comparator == ftn_minimize: + return 'min' + elif self.ftn_comparator == ftn_maximize: + return 'max' + def update_stats(self): """Update the statistics for the population.""" s = self.scores() self.stats['current']['max'] = max(s) self.stats['current']['avg'] = my_mean(s) self.stats['current']['min'] = min(s) - if len(s) > 1: self.stats['current']['dev'] = my_std(s) - else: self.stats['current']['dev'] = 0 - try: self.stats['overall']['max'] = max(self.stats['overall']['max'], - self.stats['current']['max']) - except KeyError: self.stats['overall']['max'] = self.stats['current']['max'] - try: self.stats['overall']['min'] = min(self.stats['overall']['min'], - self.stats['current']['min']) - except KeyError: self.stats['overall']['min'] = self.stats['current']['min'] + if len(s) > 1: + self.stats['current']['dev'] = my_std(s) + else: + self.stats['current']['dev'] = 0 + try: + self.stats['overall']['max'] = max(self.stats['overall']['max'], + self.stats['current']['max']) + except KeyError: + self.stats['overall']['max'] = self.stats['current']['max'] + try: + self.stats['overall']['min'] = min(self.stats['overall']['min'], + self.stats['current']['min']) + except KeyError: + self.stats['overall']['min'] = self.stats['current']['min'] Added: trunk/scipy/sandbox/ga/prng.py =================================================================== --- trunk/scipy/sandbox/ga/prng.py 2007-08-27 20:12:57 UTC (rev 3268) +++ trunk/scipy/sandbox/ga/prng.py 2007-08-27 22:18:00 UTC (rev 3269) @@ -0,0 +1,36 @@ +""" Package-global pseudo-random number generator. + +This global is a transitional hack from the old code. Ideally, each run of a GA +should control its own in order to allow multiple concurrent runs. However, we +are transitioning from an older implementation that used a really global PRNG. +""" + +from numpy.random import RandomState + + +class GAPRNG(RandomState): + """ PRNG for the GA package. + + In addition to all of the functionality derived from RandomState, we also + store the seed values that were used. + """ + + def seed(self, seed=None): + """ Seed the generator. + + seed can be an integer, an array (or other sequence) of integers of any + length, or None. If seed is None, then RandomState will try to read data + from /dev/urandom (or the Windows analogue) if available or seed from + the clock otherwise. + """ + RandomState.seed(self, seed) + self.initial_seed = seed + + def choice(self, seq): + """ Randomly and uniformly select an item from a sequence. + """ + i = self.randint(len(seq)) + return seq[i] + + +prng = GAPRNG() Property changes on: trunk/scipy/sandbox/ga/prng.py ___________________________________________________________________ Name: svn:eol-style + native Modified: trunk/scipy/sandbox/ga/scaling.py =================================================================== --- trunk/scipy/sandbox/ga/scaling.py 2007-08-27 20:12:57 UTC (rev 3268) +++ trunk/scipy/sandbox/ga/scaling.py 2007-08-27 22:18:00 UTC (rev 3269) @@ -1,39 +1,46 @@ #genetic algorithm scaling routines #based on galib. -# -from ga_util import * -import scipy.stats as stats -from numpy import * + +from numpy import clip, inf +from ga_util import GAError, my_mean, my_std + + # if a score is less the 2 standard deviations below, the average, its score # is arbitrarily set to zero -class sigma_truncation_scaling: - def __init__(self,scaling = 2): +class sigma_truncation_scaling(object): + def __init__(self, scaling = 2): self.scaling = scaling - def scale(self,pop): + + def scale(self, pop): sc = pop.scores() avg = my_mean(sc) - if len(sc) > 1: dev = my_std(sc) - else: dev = 0 - f = sc - avg + self.scaling * dev - f=choose(less_equal(f,0.),(f,0.)) - for i in range(len(pop)): pop[i].fitness(f[i]) + if len(sc) > 1: + dev = my_std(sc) + else: + dev = 0 + f = clip(sc - avg + self.scaling * dev, 0, inf) + for i in range(len(pop)): + pop[i].fitness(f[i]) return pop -class no_scaling: +class no_scaling(object): def scale(self,pop): - for ind in pop: ind.fitness(ind.score()) + for ind in pop: + ind.fitness(ind.score()) return pop -class linear_scaling: +class linear_scaling(object): def __init__(self,mult = 1.2): self.mult = mult + def scale(self,pop): sc = pop.scores() pmin = min(sc) - if pmin < 0: raise GAError, 'linear scaling does not work with objective scores < 0' + if pmin < 0: + raise GAError('linear scaling does not work with objective scores < 0') pmax = max(sc) pavg = my_mean(sc) - if(pavg == pmax): + if pavg == pmax: a = 1. b = 0. elif pmin > (self.mult * pavg - pmax)/(self.mult - 1.): @@ -44,6 +51,6 @@ delta = pavg - pmin a = pavg / delta b = -pmin * pavg / delta - f = sc * a + b - f=choose(less_equal(f,0.),(f,0.)) - for i in range(len(pop)): pop[i].fitness(f[i]) + f = clip(sc * a + b, 0, inf) + for i in range(len(pop)): + pop[i].fitness(f[i]) Modified: trunk/scipy/sandbox/ga/selection.py =================================================================== --- trunk/scipy/sandbox/ga/selection.py 2007-08-27 20:12:57 UTC (rev 3268) +++ trunk/scipy/sandbox/ga/selection.py 2007-08-27 22:18:00 UTC (rev 3269) @@ -1,93 +1,120 @@ #genetic algorithm selection routines #based on galib. #exception - these classes only work on the scaled fitness -from ga_util import * -import scipy.stats as rv -stats = rv -import pdb -from numpy import * -class selector: - def update(self,pop): pass - def select(self,pop): raise GAError, 'selector.select() must be overridden' - def clear(self): pass +import numpy as np + +from ga_util import GAError +from prng import prng + +class selector(object): + def update(self,pop): + pass + def select(self,pop): + raise GAError('selector.select() must be overridden') + def clear(self): + pass + class uniform_selector(selector): def select(self,pop,cnt = 1): - if cnt == 1: return rv.choice(pop) + if cnt == 1: + return prng.choice(pop) res = [] - for i in range(cnt): res.append(rv.choice(pop)) + for i in range(cnt): + res.append(prng.choice(pop)) return res -class rank_selector(selector): - def select(self,pop,cnt = 1): - pop.sort() - studliest = pop[0].fitness() - tied_for_first = filter(lambda x,y=studliest: x.fitness()==y,pop) - if cnt == 1: return rv.choice(tied_for_first) - res = [] - for i in range(cnt): res.append(rv.choice(tied_for_first)) - return res +#class rank_selector(selector): +# def select(self,pop,cnt = 1): +# pop.sort() +# studliest = pop[0].fitness() +# # XXX: y? +# tied_for_first = [x for x in pop if x.fitness() == y] +# if cnt == 1: +# return prng.choice(tied_for_first) +# res = [] +# for i in range(cnt): +# res.append(prng.choice(tied_for_first)) +# return res #scores must all be positive class roulette_selector(selector): def update(self,pop): self.pop = pop[:] sz = len(pop) - if not sz: raise GAError, 'srs_selector - the pop size is 0!' + if not sz: + raise GAError('srs_selector - the pop size is 0!') f =self.pop.fitnesses() f_max = max(f); f_min = min(f) if not ( (f_max >= 0 and f_min >= 0) or (f_max <= 0 and f_min <= 0)): - raise GAError, 'srs_selector requires all fitnesses values to be either strictly positive or strictly negative' - if f_max == f_min: f = ones(shape(f),typecode = Float32) - self.dart_board = add.accumulate(f / sum(f,axis=0)) + raise GAError('srs_selector requires all fitnesses values to be either strictly positive or strictly negative') + if f_max == f_min: + f = np.ones_like(f) + self.dart_board = np.add.accumulate(f / sum(f,axis=0)) + def select(self,pop,cnt = 1): returns = [] for i in range(cnt): - dart = rv.random() + dart = prng.random() idx = 0 #binary search would be faster - while dart > self.dart_board[idx]: idx = idx + 1 + while dart > self.dart_board[idx]: + idx = idx + 1 returns.append(self.pop[idx]) - if cnt == 1: return returns[0] - else: return returns + if cnt == 1: + return returns[0] + else: + return returns + def clear(self): del self.pop + #scores must all be positive class srs_selector(selector): def update(self,pop): sz = len(pop) - if not sz: raise GAError, 'srs_selector - the pop size is 0!' + if not sz: + raise GAError('srs_selector - the pop size is 0!') f =pop.fitnesses() f_max = max(f); f_min = min(f) if not ( (f_max >= 0. and f_min >= 0.) or (f_max <= 0. and f_min <= 0.)): - raise GAError, 'srs_selector requires all fitnesses values to be either strictly positive or strictly negative - min %f, max %f' %(f_min,f_max) + raise GAError('srs_selector requires all fitnesses values to be either strictly positive or strictly negative - min %f, max %f' %(f_min,f_max)) f_avg = sum(f,axis=0)/sz - if f_avg == 0.: e = ones(shape(f),typecode = Float32) + if f_avg == 0.: + e = np.ones_like(f) else: - if pop.min_or_max() == 'max': e = f/f_avg - else: e = (-f+f_max+f_min)/f_avg + if pop.min_or_max() == 'max': + e = f/f_avg + else: + e = (-f+f_max+f_min)/f_avg self.expected_value = e garauntee,chance = divmod(e,1.) # garauntee = floor(e) # chance = remainder(e,1) choices = [] - for i in xrange(sz): choices = choices + [pop[i]] * int(garauntee[i]) + for i in xrange(sz): + choices = choices + [pop[i]] * int(garauntee[i]) #now deal with the remainder - dart_board = add.accumulate(chance / sum(chance,axis=0)) + dart_board = np.add.accumulate(chance / sum(chance,axis=0)) for i in range(len(choices),sz): - dart = rv.random() + dart = prng.random() idx = 0 - while dart > dart_board[idx]: idx = idx + 1 + while dart > dart_board[idx]: + idx = idx + 1 choices.append(pop[idx]) self.choices = choices + def select(self,pop,cnt = 1): #ignore the past in pop res = [] - for i in range(cnt): res.append(rv.choice(self.choices)) + for i in range(cnt): + res.append(prng.choice(self.choices)) # for chosen in res: self.choices.remove(chosen) - if cnt == 1: return res[0] + if cnt == 1: + return res[0] return res + def clear(self): if hasattr(self,'choices'): del self.choices Modified: trunk/scipy/sandbox/ga/tree.py =================================================================== --- trunk/scipy/sandbox/ga/tree.py 2007-08-27 20:12:57 UTC (rev 3268) +++ trunk/scipy/sandbox/ga/tree.py 2007-08-27 22:18:00 UTC (rev 3269) @@ -1,21 +1,17 @@ -from ga_util import * -#import regress -AddError = 'AddError' -import pprint import sys -#if sys.platform == 'win32': -# import pywin.debugger -# pdb = pywin.debugger -#else: -# import pdb +from ga_util import shallow_clone -#pp = ejpprint.PrettyPrinter(indent = 4) -pp = pprint.PrettyPrinter(indent = 4) -ParentError = 'ParentError' +class AddError(Exception): + """ Cannot add to the tree. + """ -class base_tree_node: +class ParentError(Exception): + """ Parent is incorrect. + """ + +class base_tree_node(object): objects_ever = 0 objects = 0 circular = 0 @@ -23,19 +19,22 @@ def inc(self): base_tree_node.objects = base_tree_node.objects + 1 base_tree_node.objects_ever = base_tree_node.objects_ever + 1 + def dec(self): base_tree_node.objects = base_tree_node.objects - 1 def __init__(self,child_count,node_type='',derive_type='', parent=None): # print 'trenode init',type(parent) self.inc() - if parent is None: self.symbol_table = {} + if parent is None: + self.symbol_table = {} self.node_type = node_type self.derive_type = derive_type self.child_count = child_count self._children = [] self.set_parent(parent) self.label = node_type + #so that we don't overwrite the general creation function in a derived class #and screw up clone def _create(self,parent = None): @@ -49,29 +48,34 @@ #new._children = [] #if parent is None: new.symbol_table = {} return new + def create(self,parent = None): new = self._create(parent) new._children = [] - if parent is None: new.symbol_table = {} + if parent is None: + new.symbol_table = {} return new + def clone(self,parent = None): """ make lean and mean cause its called a ton """ new = self._create(parent) - new._children = map(lambda x,par=new: - x.clone(par), - self._children) + new._children = [x.clone(parent) for x in self._children] + if parent is None: + new.generate_symbol_table() + return new - if parent is None: new.generate_symbol_table() - return new def set_parent(self,parent): self.parent = parent + def get_parent(self): return self.parent + def generate_symbol_table(self): self.symbol_table = {} self._generate_symbol_table(self.symbol_table) + def _generate_symbol_table(self,symbol_table): """ Return: @@ -86,6 +90,7 @@ if symbol_table.has_key(self.derive_type): symbol_table[self.derive_type].append(self) else: symbol_table[self.derive_type] = [self] + def parent_test(self): for child in self._children: child.parent_test() @@ -93,6 +98,7 @@ if not child.get_parent() is self: #pdb.set_trace() raise ParentError + def node_count(self,type=None): cnt = 0 for child in self._children: @@ -110,9 +116,12 @@ if len(self._children) < self.child_count: node.set_parent(self) self._children.append(node) - else: raise AddError, 'to many children' + else: + raise AddError('too many children') + def filled(self): return len(self._children) >= self.child_count + def children(self): return self._children @@ -124,20 +133,26 @@ for child in self._children: leaves = leaves + child.leaves() return leaves + def depth(self): - if self.child_count == 0: return 1 + if self.child_count == 0: + return 1 else: return max(map(tree_node.depth,self._children)) + 1 + def ancestors(self): if self.get_parent(): return self.get_parent().ancestors() + 1 return 1 + def root(self): if self.get_parent(): return self.get_parent().root() return self + # I needed this to output string chromosomes for antennas # rethink this later def file_output(self,file): for child in self._children: child.file_output(file) + def __repr__(self): res = '%s %s' % (self.label, self.derive_type) if len(self._children): @@ -145,11 +160,11 @@ res = res + '\n' + '\t'*self.ancestors() res = res + '%s' % self._children return res + def delete_circulars(self): #if hasattr(self,'parent'): # if self.parent is None: print 'deleting root ciculars' base_tree_node.circular = base_tree_node.circular + 1 - import sys self.symbol_table = None for child in self._children: if len(child._children): @@ -166,60 +181,53 @@ # print 'tree_node killed:',tree_node.objects self.dec() #base_tree_node.objects = base_tree_node.objects - 1 + def __cmp__(self,other): #like ga_list compare... - try: return cmp(self.__dict__,other.__dict__) - except AttributeError: return 1 - """ - equal = 0 try: - equal = (self.node_type == other.node_type and - self.derive_type == other.derive_type and - self.child_count == other.child_count and - self._children == other._children) - except AttributeError: pass - return not equal - """ + return cmp(self.__dict__,other.__dict__) + except AttributeError: + return 1 + def __setstate__(self,state): for key in state.keys(): setattr(self, key, state[key]) self.inc() -""" -#core dumps on linux -import weakdict -class weak_tree_node(base_tree_node,weakdict.WeakValue): - def __init__(self,child_count,node_type='',derive_type='', parent=None): - weakdict.WeakValue.__init__(self) - base_tree_node.__init__(self,child_count,node_type,derive_type, parent) - def set_parent(self,parent): - print 'in set' - if not hasattr(self,'parent'): - self.parent = weakdict.WeakDict() - if parent: self.parent[0] = parent - elif self.parent.has_key(0): del self.parent[0] - print 'out set' - def get_parent(self): - print 'in get' - if self.parent.has_key(0): p = self.parent[0] - else: p = None - print 'out get' - return p - def delete_circulars(self): - pass -""" -""" -import mxProxy -class proxy_tree_node(base_tree_node): - passobj = 2 #could be anything - def set_parent(self,parent): - self.parent = mxProxy.WeakProxy(parent,None,self.passobj) - def get_parent(self): - if self.parent: return self.parent.proxy_object(self.passobj) - return None - def delete_circulars(self): - pass -""" + +##core dumps on linux +#import weakdict +#class weak_tree_node(base_tree_node,weakdict.WeakValue): +# def __init__(self,child_count,node_type='',derive_type='', parent=None): +# weakdict.WeakValue.__init__(self) +# base_tree_node.__init__(self,child_count,node_type,derive_type, parent) +# def set_parent(self,parent): +# print 'in set' +# if not hasattr(self,'parent'): +# self.parent = weakdict.WeakDict() +# if parent: self.parent[0] = parent +# elif self.parent.has_key(0): del self.parent[0] +# print 'out set' +# def get_parent(self): +# print 'in get' +# if self.parent.has_key(0): p = self.parent[0] +# else: p = None +# print 'out get' +# return p +# def delete_circulars(self): +# pass + +#import mxProxy +#class proxy_tree_node(base_tree_node): +# passobj = 2 #could be anything +# def set_parent(self,parent): +# self.parent = mxProxy.WeakProxy(parent,None,self.passobj) +# def get_parent(self): +# if self.parent: return self.parent.proxy_object(self.passobj) +# return None +# def delete_circulars(self): +# pass + tree_node = base_tree_node #tree_node = weak_tree_node #tree_node = proxy_tree_node @@ -228,6 +236,7 @@ print 'current', base_tree_node.objects print 'ever', base_tree_node.objects_ever print 'circular deletes', base_tree_node.circular + def test_treenode(): a = tree_node(2,'root') a.add_child(tree_node(0,'kid1')) Modified: trunk/scipy/sandbox/ga/tree_opt.py =================================================================== --- trunk/scipy/sandbox/ga/tree_opt.py 2007-08-27 20:12:57 UTC (rev 3268) +++ trunk/scipy/sandbox/ga/tree_opt.py 2007-08-27 22:18:00 UTC (rev 3269) @@ -5,29 +5,29 @@ list_range nodes to exchange places. """ +from numpy import log10 + import tree import gene -""" -import weakdict -class opt_object(tree.tree_node,weakdict.WeakValue): - opt_dict = weakdict.WeakDict() - def __init__(self,node_type, sub_nodes): - tree.tree_node.__init__(self,sub_nodes,node_type=node_type) - weakdict.WeakValue.__init__(self) - opt_object.opt_dict[id(self)] = self - def _create(self,parent = None): - new = tree.tree_node._create(self,parent) - weakdict.WeakValue.__init__(new) - self._WeakValue__cid = None #force a reset of the __cid value - weakdict.WeakValue.__init__(new) #now get a new value for it - opt_object.opt_dict[id(new)] = new - return new - def __del__(self): - tree.tree_node.__del__(self) - weakdict.WeakValue.__del__(self) -""" +#import weakdict +#class opt_object(tree.tree_node,weakdict.WeakValue): +# opt_dict = weakdict.WeakDict() +# def __init__(self,node_type, sub_nodes): +# tree.tree_node.__init__(self,sub_nodes,node_type=node_type) +# weakdict.WeakValue.__init__(self) +# opt_object.opt_dict[id(self)] = self +# def _create(self,parent = None): +# new = tree.tree_node._create(self,parent) +# weakdict.WeakValue.__init__(new) +# self._WeakValue__cid = None #force a reset of the __cid value +# weakdict.WeakValue.__init__(new) #now get a new value for it +# opt_object.opt_dict[id(new)] = new +# return new +# def __del__(self): +# tree.tree_node.__del__(self) +# weakdict.WeakValue.__del__(self) class opt_object(tree.tree_node): def __init__(self,node_type, sub_nodes): @@ -43,139 +43,176 @@ else: self.default = (bounds[0] + bounds[1])/2. self._value = self.default print self._value - def clone(self,parent = None):return tree.tree_node.clone(self,parent) - """ - def mutate(self): - m = gene.float_gene.mutate(self) - if(m and self.parent): self.parent.recalc(force_parent=1) - return m - """ + + def clone(self, parent=None): + return tree.tree_node.clone(self,parent) + +# def mutate(self): +# m = gene.float_gene.mutate(self) +# if(m and self.parent): self.parent.recalc(force_parent=1) +# return m + def scale(self,sc): self.bounds = (self.bounds[0]*sc,self.bounds[1]*sc) self.default = self.default*sc self._value = self._value*sc + def defaultize(self): self._value = self.default - for child in self._children: child.defaultize() - def create(self,parent): + for child in self._children: + child.defaultize() + + def create(self, parent): new = tree.tree_node.create(self,parent) new.initialize() gene.float_gene.initialize(new) return new + def __del__(self): # gene.float_gene.__del__(self) opt_object.__del__(self) + def __repr__(self): try: val = self.value() - if( val < .01 or val > 1000): v = "%4.3e" % self.value() - else: v = "%4.3f" % self.value() - except gene.GAError: v = 'not initialized' + if val < .01 or val > 1000: + v = "%4.3e" % self.value() + else: + v = "%4.3f" % self.value() + except gene.GAError: + v = 'not initialized' self.label = '%s = %s' % (self.node_type, v) return tree.tree_node.__repr__(self) -import math + class log_float_range(gene.log_float_gene,opt_object): optimize = 1 + def __init__(self,bounds,node_type='log_float_range', sub_nodes = 0): gene.log_float_gene.__init__(self,bounds[:2]) opt_object.__init__(self,node_type,sub_nodes) - if(len(bounds) == 3): self.default = bounds[2] - else: self.default = (bounds[0] + bounds[1])/2. - self._value = math.log10(self.default) - def clone(self,parent = None):return tree.tree_node.clone(self,parent) - """ + if len(bounds) == 3: + self.default = bounds[2] + else: + self.default = (bounds[0] + bounds[1])/2. + self._value = log10(self.default) + + def clone(self, parent=None): + return tree.tree_node.clone(self,parent) + def mutate(self): m=gene.log_float_gene.mutate(self) - if(m and self.parent): self.parent.recalc(force_parent=1) + if m and self.parent: + self.parent.recalc(force_parent=1) return m - """ + def scale(self,sc): self.default = self.default*sc sc = log10(sc) self.bounds = (self.bounds[0]*sc,self.bounds[1]*sc) self._value = self._value*sc + def defaultize(self): self._value = self.default - for child in self._children: child.defaultize() + for child in self._children: + child.defaultize() + def create(self,parent): new = tree.tree_node.create(self,parent) new.initialize() gene.log_float_gene.initialize(new) return new + def __del__(self): # gene.log_float_gene.__del__(self) opt_object.__del__(self) + def __repr__(self): try: val = self.value() - if( val < .01 or val > 1000): v = "%4.3e" % self.value() - else: v = "%4.3f" % self.value() - except gene.GAError: v = 'not initialized' + if val < .01 or val > 1000: + v = "%4.3e" % self.value() + else: + v = "%4.3f" % self.value() + except gene.GAError: + v = 'not initialized' self.label = '%s = %s' % (self.node_type, v) return tree.tree_node.__repr__(self) class list_range(gene.list_gene,opt_object): optimize = 1 - def __init__(self,allele_set,node_type='list_range', default=None, sub_nodes = 0): + + def __init__(self, allele_set, node_type='list_range', default=None, sub_nodes = 0): gene.list_gene.__init__(self,allele_set) opt_object.__init__(self,node_type,sub_nodes) gene.list_gene.initialize(self) # prevents trouble in tree generation - if(default): self.default = default - else: self.default = allele_set[int(len(allele_set)/2.)] #the center item + if default: + self.default = default + else: + self.default = allele_set[int(len(allele_set)/2.)] #the center item self._value = self.default - """ - def mutate(self): - m=gene.list_gene.mutate(self) - if(m and self.parent): self.parent.recalc(force_parent=1) - return m - """ - def clone(self,parent = None):return tree.tree_node.clone(self,parent) + + def clone(self, parent=None): + return tree.tree_node.clone(self,parent) + def scale(self,sc): for i in range(len(self.allele_set)): self.allele_set[i] = self.allele_set[i] *sc self.default = self.default*sc self._value = self._value*sc + def defaultize(self): self._value = self.default - for child in self._children: child.defaultize() + for child in self._children: + child.defaultize() + def create(self,parent): new = tree.tree_node.create(self,parent) new.initialize() gene.list_gene.initialize(new) return new + def __del__(self): # gene.list_gene.__del__(self) opt_object.__del__(self) + def __repr__(self): self.label = '%s = %s' % (self.node_type, self.value()) return tree.tree_node.__repr__(self) class val(gene.frozen,opt_object): optimize = 0 - def __init__(self,val,node_type='val',sub_nodes=0): + + def __init__(self, val, node_type='val', sub_nodes=0): gene.frozen.__init__(self,val) opt_object.__init__(self,node_type,sub_nodes) - def clone(self,parent = None):return tree.tree_node.clone(self,parent) - def scale(self,sc): self._value = self._value*sc - def defaultize(self): pass + + def clone(self,parent = None): + return tree.tree_node.clone(self,parent) + + def scale(self,sc): + self._value = self._value*sc + + def defaultize(self): + pass + def create(self,parent): new = tree.tree_node.create(self,parent) new.initialize() return new + def __del__(self): # gene.frozen.__del__(self) opt_object.__del__(self) + def __repr__(self): self.label = '%s = %s' % (self.node_type, self.value()) return tree.tree_node.__repr__(self) -""" -These two routines are useful for picking off or replacing the nodes in a -tree that should be that should be numerically optimized. They are helpful if -your interested in using a gradient method to optimize some of the paramters -of the array -""" +# These two routines are useful for picking off or replacing the nodes in a tree +# that should be that should be numerically optimized. They are helpful if your +# interested in using a gradient method to optimize some of the paramters of the +# array def pick_numbers(node): start = []; lower = []; upper =[]; @@ -203,10 +240,8 @@ index = index + 1 return index -""" -Grab the numerical nodes that need to be optimized so that you can directly -manipulate them -""" +# Grab the numerical nodes that need to be optimized so that you can directly +# manipulate them def pick_optimize_nodes(node): nodes = []; for child in node.children(): From scipy-svn at scipy.org Mon Aug 27 18:56:09 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 27 Aug 2007 17:56:09 -0500 (CDT) Subject: [Scipy-svn] r3270 - trunk/scipy/weave Message-ID: <20070827225609.1D8EC39C250@new.scipy.org> Author: stefan Date: 2007-08-27 17:55:52 -0500 (Mon, 27 Aug 2007) New Revision: 3270 Modified: trunk/scipy/weave/c_spec.py trunk/scipy/weave/ext_tools.py Log: Remove two more warnings. An #ifdef is not ideal; better would be an API to expose compiler features. Modified: trunk/scipy/weave/c_spec.py =================================================================== --- trunk/scipy/weave/c_spec.py 2007-08-27 22:18:00 UTC (rev 3269) +++ trunk/scipy/weave/c_spec.py 2007-08-27 22:55:52 UTC (rev 3270) @@ -268,8 +268,6 @@ code = """ PyObject* file_to_py(FILE* file, char* name, char* mode) { - PyObject* py_obj = NULL; - //extern int fclose(FILE *); return (PyObject*) PyFile_FromFile(file, name, mode, fclose); } """ Modified: trunk/scipy/weave/ext_tools.py =================================================================== --- trunk/scipy/weave/ext_tools.py 2007-08-27 22:18:00 UTC (rev 3269) +++ trunk/scipy/weave/ext_tools.py 2007-08-27 22:55:52 UTC (rev 3270) @@ -252,7 +252,7 @@ def warning_code(self): all_warnings = self.build_information().warnings() w=map(lambda x: "#pragma warning(%s)\n" % x,all_warnings) - return ''.join(w) + return '#ifndef __GNUC__\n' + ''.join(w) + '\n#endif' def header_code(self): h = self.get_headers() From scipy-svn at scipy.org Mon Aug 27 19:14:36 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 27 Aug 2007 18:14:36 -0500 (CDT) Subject: [Scipy-svn] r3271 - trunk/scipy/weave/tests Message-ID: <20070827231436.D485839C08B@new.scipy.org> Author: stefan Date: 2007-08-27 18:14:25 -0500 (Mon, 27 Aug 2007) New Revision: 3271 Modified: trunk/scipy/weave/tests/test_scxx_object.py Log: Use assert_equal instead of assert and == so we get better debugging output on failure. When run from the terminal, use level 5 by default. Modified: trunk/scipy/weave/tests/test_scxx_object.py =================================================================== --- trunk/scipy/weave/tests/test_scxx_object.py 2007-08-27 22:55:52 UTC (rev 3270) +++ trunk/scipy/weave/tests/test_scxx_object.py 2007-08-27 23:14:25 UTC (rev 3271) @@ -20,24 +20,24 @@ return_val = val; """ res = inline_tools.inline(code) - assert sys.getrefcount(res) == 2 - assert res == 1001 + assert_equal(sys.getrefcount(res),2) + assert_equal(res,1001) def check_float(self,level=5): code = """ py::object val = (float)1.0; return_val = val; """ res = inline_tools.inline(code) - assert sys.getrefcount(res) == 2 - assert res == 1.0 + assert_equal(sys.getrefcount(res),2) + assert_equal(res,1.0) def check_double(self,level=5): code = """ py::object val = 1.0; return_val = val; """ res = inline_tools.inline(code) - assert sys.getrefcount(res) == 2 - assert res == 1.0 + assert_equal(sys.getrefcount(res),2) + assert_equal(res,1.0) def check_complex(self,level=5): code = """ std::complex num = std::complex(1.0,1.0); @@ -45,16 +45,16 @@ return_val = val; """ res = inline_tools.inline(code) - assert sys.getrefcount(res) == 2 - assert res == 1.0+1.0j + assert_equal(sys.getrefcount(res),2) + assert_equal(res,1.0+1.0j) def check_string(self,level=5): code = """ py::object val = "hello"; return_val = val; """ res = inline_tools.inline(code) - assert sys.getrefcount(res) == 2 - assert res == "hello" + assert_equal(sys.getrefcount(res),2) + assert_equal(res,"hello") def check_std_string(self,level=5): code = """ @@ -63,8 +63,8 @@ return_val = val; """ res = inline_tools.inline(code) - assert sys.getrefcount(res) == 2 - assert res == "hello" + assert_equal(sys.getrefcount(res),2) + assert_equal(res,"hello") class test_object_print(NumpyTestCase): #------------------------------------------------------------------------ @@ -86,7 +86,7 @@ """ res = inline_tools.inline(code,['file_imposter']) print file_imposter.getvalue() - assert file_imposter.getvalue() == "'how now brown cow'" + assert_equal(file_imposter.getvalue(),"'how now brown cow'") ## def check_failure(self,level=5): ## code = """ @@ -211,10 +211,10 @@ before = sys.getrefcount(a.b) res = inline_tools.inline(code,args) - assert res == a.b + assert_equal(res,a.b) del res after = sys.getrefcount(a.b) - assert after == before + assert_equal(after,before) def check_char(self,level=5): self.generic_attr('return_val = a.attr("b");') @@ -258,8 +258,8 @@ del res res = inline_tools.inline('return_val = a.attr("bar").call();',['a']) second = sys.getrefcount(res) - assert res == "bar results" - assert first == second + assert_equal(res,"bar results") + assert_equal(first,second) class test_object_set_attr(NumpyTestCase): @@ -268,13 +268,13 @@ a = foo() a.b = 12345 res = inline_tools.inline(code,args) - assert a.b == desired + assert_equal(a.b,desired) def generic_new(self, code, desired): args = ['a'] a = foo() res = inline_tools.inline(code,args) - assert a.b == desired + assert_equal(a.b,desired) def check_existing_char(self,level=5): self.generic_existing('a.set_attr("b","hello");',"hello") @@ -352,7 +352,7 @@ def check_equal(self,level=5): a,b = 1,1 res = inline_tools.inline('return_val = (a == b);',['a','b']) - assert res == (a == b) + assert_equal(res,(a == b)) def check_equal_objects(self,level=5): class foo: def __init__(self,x): @@ -361,47 +361,47 @@ return cmp(self.x,other.x) a,b = foo(1),foo(2) res = inline_tools.inline('return_val = (a == b);',['a','b']) - assert res == (a == b) + assert_equal(res,(a == b)) def check_lt(self,level=5): a,b = 1,2 res = inline_tools.inline('return_val = (a < b);',['a','b']) - assert res == (a < b) + assert_equal(res,(a < b)) def check_gt(self,level=5): a,b = 1,2 res = inline_tools.inline('return_val = (a > b);',['a','b']) - assert res == (a > b) + assert_equal(res,(a > b)) def check_gte(self,level=5): a,b = 1,2 res = inline_tools.inline('return_val = (a >= b);',['a','b']) - assert res == (a >= b) + assert_equal(res,(a >= b)) def check_lte(self,level=5): a,b = 1,2 res = inline_tools.inline('return_val = (a <= b);',['a','b']) - assert res == (a <= b) + assert_equal(res,(a <= b)) def check_not_equal(self,level=5): a,b = 1,2 res = inline_tools.inline('return_val = (a != b);',['a','b']) - assert res == (a != b) + assert_equal(res,(a != b)) def check_int(self,level=5): a = 1 res = inline_tools.inline('return_val = (a == 1);',['a']) - assert res == (a == 1) + assert_equal(res,(a == 1)) def check_int2(self,level=5): a = 1 res = inline_tools.inline('return_val = (1 == a);',['a']) - assert res == (a == 1) + assert_equal(res,(a == 1)) def check_unsigned_long(self,level=5): a = 1 res = inline_tools.inline('return_val = (a == (unsigned long)1);',['a']) - assert res == (a == 1) + assert_equal(res,(a == 1)) def check_double(self,level=5): a = 1 res = inline_tools.inline('return_val = (a == 1.0);',['a']) - assert res == (a == 1.0) + assert_equal(res,(a == 1.0)) def check_char(self,level=5): a = "hello" res = inline_tools.inline('return_val = (a == "hello");',['a']) - assert res == (a == "hello") + assert_equal(res,(a == "hello")) def check_std_string(self,level=5): a = "hello" code = """ @@ -409,7 +409,7 @@ return_val = (a == hello); """ res = inline_tools.inline(code,['a']) - assert res == (a == "hello") + assert_equal(res,(a == "hello")) class test_object_repr(NumpyTestCase): def check_repr(self,level=5): @@ -424,8 +424,8 @@ del res res = inline_tools.inline('return_val = a.repr();',['a']) second = sys.getrefcount(res) - assert first == second - assert res == "repr return" + assert_equal(first,second) + assert_equal(res,"repr return") class test_object_str(NumpyTestCase): def check_str(self,level=5): @@ -440,9 +440,9 @@ del res res = inline_tools.inline('return_val = a.str();',['a']) second = sys.getrefcount(res) - assert first == second + assert_equal(first,second) print res - assert res == "str return" + assert_equal(res,"str return") class test_object_unicode(NumpyTestCase): # This ain't going to win awards for test of the year... @@ -458,8 +458,8 @@ del res res = inline_tools.inline('return_val = a.unicode();',['a']) second = sys.getrefcount(res) - assert first == second - assert res == "unicode" + assert_equal(first,second) + assert_equal(res,"unicode") class test_object_is_callable(NumpyTestCase): def check_true(self,level=5): @@ -481,8 +481,8 @@ def foo(): return (1,2,3) res = inline_tools.inline('return_val = foo.call();',['foo']) - assert res == (1,2,3) - assert sys.getrefcount(res) == 2 + assert_equal(res,(1,2,3)) + assert_equal(sys.getrefcount(res),2) def check_args(self,level=5): def foo(val1,val2): return (val1,val2) @@ -493,8 +493,8 @@ return_val = foo.call(args); """ res = inline_tools.inline(code,['foo']) - assert res == (1,"hello") - assert sys.getrefcount(res) == 2 + assert_equal(res,(1,"hello")) + assert_equal(sys.getrefcount(res),2) def check_args_kw(self,level=5): def foo(val1,val2,val3=1): return (val1,val2,val3) @@ -507,8 +507,8 @@ return_val = foo.call(args,kw); """ res = inline_tools.inline(code,['foo']) - assert res == (1,"hello",3) - assert sys.getrefcount(res) == 2 + assert_equal(res,(1,"hello",3)) + assert_equal(sys.getrefcount(res),2) def check_noargs_with_args(self,level=5): # calling a function that does take args with args # should fail. @@ -530,19 +530,19 @@ except TypeError: third = sys.getrefcount(foo) # first should == second, but the weird refcount error - assert second == third + assert_equal(second,third) class test_object_mcall(NumpyTestCase): def check_noargs(self,level=5): a = foo() res = inline_tools.inline('return_val = a.mcall("bar");',['a']) - assert res == "bar results" + assert_equal(res,"bar results") first = sys.getrefcount(res) del res res = inline_tools.inline('return_val = a.mcall("bar");',['a']) - assert res == "bar results" + assert_equal(res,"bar results") second = sys.getrefcount(res) - assert first == second + assert_equal(first,second) def check_args(self,level=5): a = foo() code = """ @@ -552,8 +552,8 @@ return_val = a.mcall("bar2",args); """ res = inline_tools.inline(code,['a']) - assert res == (1,"hello") - assert sys.getrefcount(res) == 2 + assert_equal(res,(1,"hello")) + assert_equal(sys.getrefcount(res),2) def check_args_kw(self,level=5): a = foo() code = """ @@ -565,19 +565,19 @@ return_val = a.mcall("bar3",args,kw); """ res = inline_tools.inline(code,['a']) - assert res == (1,"hello",3) - assert sys.getrefcount(res) == 2 + assert_equal(res,(1,"hello",3)) + assert_equal(sys.getrefcount(res),2) def check_std_noargs(self,level=5): a = foo() method = "bar" res = inline_tools.inline('return_val = a.mcall(method);',['a','method']) - assert res == "bar results" + assert_equal(res,"bar results") first = sys.getrefcount(res) del res res = inline_tools.inline('return_val = a.mcall(method);',['a','method']) - assert res == "bar results" + assert_equal(res,"bar results") second = sys.getrefcount(res) - assert first == second + assert_equal(first,second) def check_std_args(self,level=5): a = foo() method = "bar2" @@ -588,8 +588,8 @@ return_val = a.mcall(method,args); """ res = inline_tools.inline(code,['a','method']) - assert res == (1,"hello") - assert sys.getrefcount(res) == 2 + assert_equal(res,(1,"hello")) + assert_equal(sys.getrefcount(res),2) def check_std_args_kw(self,level=5): a = foo() method = "bar3" @@ -602,8 +602,8 @@ return_val = a.mcall(method,args,kw); """ res = inline_tools.inline(code,['a','method']) - assert res == (1,"hello",3) - assert sys.getrefcount(res) == 2 + assert_equal(res,(1,"hello",3)) + assert_equal(sys.getrefcount(res),2) def check_noargs_with_args(self,level=5): # calling a function that does take args with args # should fail. @@ -624,7 +624,7 @@ except TypeError: third = sys.getrefcount(a) # first should == second, but the weird refcount error - assert second == third + assert_equal(second,third) class test_object_hash(NumpyTestCase): def check_hash(self,level=5): @@ -634,7 +634,7 @@ a= foo() res = inline_tools.inline('return_val = a.hash(); ',['a']) print 'hash:', res - assert res == 123 + assert_equal(res,123) class test_object_is_true(NumpyTestCase): def check_true(self,level=5): @@ -642,23 +642,23 @@ pass a= foo() res = inline_tools.inline('return_val = a.is_true();',['a']) - assert res == 1 + assert_equal(res,1) def check_false(self,level=5): a= None res = inline_tools.inline('return_val = a.is_true();',['a']) - assert res == 0 + assert_equal(res,0) class test_object_is_true(NumpyTestCase): def check_false(self,level=5): class foo: pass a= foo() - res = inline_tools.inline('return_val = a.not();',['a']) - assert res == 0 + res = inline_tools.inline('return_val = a.mcall("not");',['a']) + assert_equal(res,0) def check_true(self,level=5): a= None - res = inline_tools.inline('return_val = a.not();',['a']) - assert res == 1 + res = inline_tools.inline('return_val = a.mcall("not");',['a']) + assert_equal(res,1) class test_object_type(NumpyTestCase): def check_type(self,level=5): @@ -666,7 +666,7 @@ pass a= foo() res = inline_tools.inline('return_val = a.type();',['a']) - assert res == type(a) + assert_equal(res,type(a)) class test_object_size(NumpyTestCase): def check_size(self,level=5): @@ -675,21 +675,21 @@ return 10 a= foo() res = inline_tools.inline('return_val = a.size();',['a']) - assert res == len(a) + assert_equal(res,len(a)) def check_len(self,level=5): class foo: def __len__(self): return 10 a= foo() res = inline_tools.inline('return_val = a.len();',['a']) - assert res == len(a) + assert_equal(res,len(a)) def check_length(self,level=5): class foo: def __len__(self): return 10 a= foo() res = inline_tools.inline('return_val = a.length();',['a']) - assert res == len(a) + assert_equal(res,len(a)) from UserList import UserList class test_object_set_item_op_index(NumpyTestCase): @@ -699,32 +699,32 @@ inline_tools.inline("a[1] = 1234;",['a']) before1 = sys.getrefcount(a) after1 = sys.getrefcount(a) - assert after1 == before1 + assert_equal(after1,before1) def check_set_int(self,level=5): a = UserList([1,2,3]) inline_tools.inline("a[1] = 1234;",['a']) - assert sys.getrefcount(a[1]) == 2 - assert a[1] == 1234 + assert_equal(sys.getrefcount(a[1]),2) + assert_equal(a[1],1234) def check_set_double(self,level=5): a = UserList([1,2,3]) inline_tools.inline("a[1] = 123.0;",['a']) - assert sys.getrefcount(a[1]) == 2 - assert a[1] == 123.0 + assert_equal(sys.getrefcount(a[1]),2) + assert_equal(a[1],123.0) def check_set_char(self,level=5): a = UserList([1,2,3]) inline_tools.inline('a[1] = "bubba";',['a']) - assert sys.getrefcount(a[1]) == 2 - assert a[1] == 'bubba' + assert_equal(sys.getrefcount(a[1]),2) + assert_equal(a[1],'bubba') def check_set_string(self,level=5): a = UserList([1,2,3]) inline_tools.inline('a[1] = std::string("sissy");',['a']) - assert sys.getrefcount(a[1]) == 2 - assert a[1] == 'sissy' + assert_equal(sys.getrefcount(a[1]),2) + assert_equal(a[1],'sissy') def check_set_string(self,level=5): a = UserList([1,2,3]) inline_tools.inline('a[1] = std::complex(1,1);',['a']) - assert sys.getrefcount(a[1]) == 2 - assert a[1] == 1+1j + assert_equal(sys.getrefcount(a[1]),2) + assert_equal(a[1],1+1j) from UserDict import UserDict class test_object_set_item_op_key(NumpyTestCase): @@ -755,9 +755,12 @@ return_val = ref_counts; """ obj,key,val = inline_tools.inline(code,['a']) - assert obj[0] == obj[1] and obj[1] == obj[2] - assert key[0] + 1 == key[1] and key[1] == key[2] - assert val[0] + 1 == val[1] and val[1] == val[2] + assert_equal(obj[0],obj[1]) + assert_equal(obj[1],obj[2]) + assert_equal(key[0] + 1, key[1]) + assert_equal(key[1], key[2]) + assert_equal(val[0] + 1, val[1]) + assert_equal(val[1], val[2]) def check_set_double_exists(self,level=5): a = UserDict() @@ -767,30 +770,30 @@ first = sys.getrefcount(key) inline_tools.inline('a[key] = 123.0;',['a','key']) second = sys.getrefcount(key) - assert first == second + assert_equal(first,second) # !! I think the following should be 3 - assert sys.getrefcount(key) == 5 - assert sys.getrefcount(a[key]) == 2 - assert a[key] == 123.0 + assert_equal(sys.getrefcount(key),5) + assert_equal(sys.getrefcount(a[key]),2) + assert_equal(a[key],123.0) def check_set_double_new(self,level=5): a = UserDict() key = 1.0 inline_tools.inline('a[key] = 123.0;',['a','key']) - assert sys.getrefcount(key) == 4 # should be 3 - assert sys.getrefcount(a[key]) == 2 - assert a[key] == 123.0 + assert_equal(sys.getrefcount(key),4) # should be 3 + assert_equal(sys.getrefcount(a[key]),2) + assert_equal(a[key],123.0) def check_set_complex(self,level=5): a = UserDict() key = 1+1j inline_tools.inline("a[key] = 1234;",['a','key']) - assert sys.getrefcount(key) == 3 - assert sys.getrefcount(a[key]) == 2 - assert a[key] == 1234 + assert_equal(sys.getrefcount(key),3) + assert_equal(sys.getrefcount(a[key]),2) + assert_equal(a[key],1234) def check_set_char(self,level=5): a = UserDict() inline_tools.inline('a["hello"] = 123.0;',['a']) - assert sys.getrefcount(a["hello"]) == 2 - assert a["hello"] == 123.0 + assert_equal(sys.getrefcount(a["hello"]),2) + assert_equal(a["hello"],123.0) def check_set_class(self,level=5): a = UserDict() @@ -805,17 +808,20 @@ inline_tools.inline('a[key] = "bubba";',['a','key']) second = sys.getrefcount(key) # I don't think we're leaking if this is true - assert first == second + assert_equal(first,second) # !! BUT -- I think this should be 3 - assert sys.getrefcount(key) == 4 - assert sys.getrefcount(a[key]) == 2 - assert a[key] == 'bubba' + assert_equal(sys.getrefcount(key),4) + assert_equal(sys.getrefcount(a[key]),2) + assert_equal(a[key],'bubba') def check_set_from_member(self,level=5): a = UserDict() a['first'] = 1 a['second'] = 2 inline_tools.inline('a["first"] = a["second"];',['a']) - assert a['first'] == a['second'] + assert_equal(a['first'],a['second']) if __name__ == "__main__": + import sys + if len(sys.argv) == 1: + sys.argv.extend(["--level=5"]) NumpyTest().run() From scipy-svn at scipy.org Tue Aug 28 12:30:01 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 28 Aug 2007 11:30:01 -0500 (CDT) Subject: [Scipy-svn] r3272 - in trunk/scipy/sparse: . tests Message-ID: <20070828163001.1E11C39C315@new.scipy.org> Author: stefan Date: 2007-08-28 11:29:41 -0500 (Tue, 28 Aug 2007) New Revision: 3272 Modified: trunk/scipy/sparse/sparse.py trunk/scipy/sparse/tests/test_sparse.py Log: Reject invalid shape assignments. Support reshaping. Fix whitespace. Closes ticket #335. Modified: trunk/scipy/sparse/sparse.py =================================================================== --- trunk/scipy/sparse/sparse.py 2007-08-27 23:14:25 UTC (rev 3271) +++ trunk/scipy/sparse/sparse.py 2007-08-28 16:29:41 UTC (rev 3272) @@ -6,8 +6,8 @@ __all__ = ['spmatrix','csc_matrix','csr_matrix','coo_matrix', - 'lil_matrix','dok_matrix', - 'spdiags','speye','spidentity','extract_diagonal', + 'lil_matrix','dok_matrix', + 'spdiags','speye','spidentity','extract_diagonal', 'isspmatrix','issparse','isspmatrix_csc','isspmatrix_csr', 'isspmatrix_lil','isspmatrix_dok', 'lil_eye', 'lil_diags' ] @@ -16,14 +16,14 @@ from numpy import zeros, isscalar, real, imag, asarray, asmatrix, matrix, \ ndarray, amax, amin, rank, conj, searchsorted, ndarray, \ less, where, greater, array, transpose, empty, ones, \ - arange, shape, intc, clip + arange, shape, intc, clip, prod, unravel_index import numpy from scipy.sparse.sparsetools import cscmux, csrmux, \ cootocsr, csrtocoo, cootocsc, csctocoo, csctocsr, csrtocsc, \ densetocsr, csrtodense, \ csrmucsr, cscmucsc, \ csr_plus_csr, csc_plus_csc, csr_minus_csr, csc_minus_csc, \ - csr_elmul_csr, csc_elmul_csc, csr_eldiv_csr, csc_eldiv_csc + csr_elmul_csr, csc_elmul_csc, csr_eldiv_csr, csc_eldiv_csc import sparsetools import itertools, operator, copy @@ -92,12 +92,34 @@ ndim = 2 def __init__(self, maxprint=MAXPRINT, allocsize=ALLOCSIZE): self.format = self.__class__.__name__[:3] + self._shape = None if self.format == 'spm': raise ValueError, "This class is not intended" \ " to be instantiated directly." self.maxprint = maxprint self.allocsize = allocsize + def set_shape(self,shape): + s = tuple(shape) + if len(s) != 2: + raise ValueError("Only two-dimensional sparse arrays " + "are supported.") + if (self._shape != shape) and (self._shape is not None): + try: + self = self.reshape(shape) + except NotImplementedError: + raise NotImplementedError("Reshaping not implemented for %s." % + self.__class__.__name__) + self._shape = shape + + def get_shape(self): + return self._shape + + shape = property(fget=get_shape, fset=set_shape) + + def reshape(self,shape): + raise NotImplementedError + def astype(self, t): csc = self.tocsc() return csc.astype(t) @@ -159,7 +181,7 @@ return "<%dx%d sparse matrix of type '%s'\n" \ "\twith %d stored elements in %s format>" % \ (self.shape + (self.dtype.type, nnz, _formats[format][1])) - + def __str__(self): nnz = self.getnnz() maxprint = self.getmaxprint() @@ -192,7 +214,7 @@ # and operations return in csc format # thus, a new sparse matrix format just needs to define # a tocsc method - + def __abs__(self): csc = self.tocsc() return abs(csc) @@ -200,7 +222,7 @@ def __add__(self, other): # self + other csc = self.tocsc() return csc.__add__(other) - + def __radd__(self, other): # other + self return self.__add__(other) @@ -232,7 +254,7 @@ def __pow__(self, other): csc = self.tocsc() return csc ** other - + def __neg__(self): csc = self.tocsc() return -csc @@ -292,7 +314,7 @@ return self * a def getrow(self, i): - """Returns a copy of row i of the matrix, as a (1 x n) sparse + """Returns a copy of row i of the matrix, as a (1 x n) sparse matrix (row vector). """ # Spmatrix subclasses should override this method for efficiency. @@ -431,7 +453,7 @@ return self.sum(None) * 1.0 / (self.shape[0]*self.shape[1]) else: raise ValueError, "axis out of bounds" - + def setdiag(self, values, k=0): """Fills the diagonal elements {a_ii} with the values from the given sequence. If k != 0, fills the off-diagonal elements @@ -453,7 +475,7 @@ for i,v in enumerate(values[:max_index]): self[i, i + k] = v - + def save(self, file_name, format = '%d %d %f\n'): try: fd = open(file_name, 'w') @@ -478,7 +500,7 @@ "elements (space for %d)\n\tin %s format>" % \ (self.shape + (self.dtype.type, self.getnnz(), self.nzmax, \ _formats[format][1])) - + def _with_data(self,data,copy=True): """ Return a matrix with the same sparsity structure as self, @@ -491,17 +513,17 @@ else: return self.__class__((data,self.indices,self.indptr), \ dims=self.shape,dtype=data.dtype,check=False) - + def __abs__(self): return self._with_data(abs(self.data)) - + def _real(self): return self._with_data(numpy.real(self.data)) - + def _imag(self): return self._with_data(numpy.imag(self.data)) - - + + def _binopt(self, other, fn, in_shape=None, out_shape=None): """apply the binary operation fn to two sparse matrices""" other = self._tothis(other) @@ -510,13 +532,13 @@ in_shape = self.shape if out_shape is None: out_shape = self.shape - + indptr, ind, data = fn(in_shape[0], in_shape[1], \ - self.indptr, self.indices, self.data, + self.indptr, self.indices, self.data, other.indptr, other.indices, other.data) return self.__class__((data, ind, indptr), dims=out_shape, check=False) - - + + def __add__(self,other,fn): # First check if argument is a scalar if isscalarlike(other): @@ -532,7 +554,7 @@ return self.todense() + other else: raise NotImplemented - + def __sub__(self,other,fn): # First check if argument is a scalar if isscalarlike(other): @@ -548,9 +570,9 @@ return self.todense() - other else: raise NotImplemented - - - def __mul__(self, other): # self * other + + + def __mul__(self, other): # self * other """ Scalar, vector, or matrix multiplication """ if isscalarlike(other): @@ -559,7 +581,7 @@ return self.dot(other) - def __rmul__(self, other): # other * self + def __rmul__(self, other): # other * self if isscalarlike(other): return self.__mul__(other) else: @@ -619,10 +641,10 @@ # being created on-the-fly like dense matrix objects can. #if len(other) != self.shape[1]: # raise ValueError, "dimension mismatch" - oth = numpy.ravel(other) + oth = numpy.ravel(other) y = fn(self.shape[0], self.shape[1], \ self.indptr, self.indices, self.data, oth) - if isinstance(other, matrix): + if isinstance(other, matrix): y = asmatrix(y) if other.ndim == 2 and other.shape[1] == 1: # If 'other' was an (nx1) column vector, transpose the result @@ -649,7 +671,7 @@ self.indptr, self.indices, self.data) return coo_matrix((data, (rows, cols)), self.shape) - + def sum(self, axis=None): """Sum the matrix over the given axis. If the axis is None, sum over both rows and columns, returning a scalar. @@ -665,7 +687,7 @@ def copy(self): return self._with_data(self.data.copy(),copy=True) - + def _get_slice(self, i, start, stop, stride, dims): """Returns a view of the elements [i, myslice.start:myslice.stop]. """ @@ -675,7 +697,7 @@ raise ValueError, "slice width must be >= 1" indices = [] - + for ind in xrange(self.indptr[i], self.indptr[i+1]): if self.indices[ind] >= start and self.indices[ind] < stop: indices.append(ind) @@ -690,8 +712,8 @@ def _transpose(self, cls, copy=False): M, N = self.shape return cls((self.data,self.indices,self.indptr),(N,M),copy=copy,check=False) - + def conj(self, copy=False): return self._with_data(self.data.conj(),copy=copy) @@ -843,7 +865,7 @@ else: # Matrix is completely empty M = max(oldM, M) - + self.shape = (M, N) self._check(check) @@ -885,9 +907,9 @@ "the size of data list" if (self.indices.dtype != numpy.intc): self.indices = self.indices.astype(numpy.intc) - if (self.indptr.dtype != numpy.intc): + if (self.indptr.dtype != numpy.intc): self.indptr = self.indptr.astype(numpy.intc) - + self.nnz = nnz self.nzmax = nzmax self.dtype = self.data.dtype @@ -903,16 +925,16 @@ else: return _cs_matrix.__getattr__(self, attr) - + def __add__(self, other): return _cs_matrix.__add__(self, other, csc_plus_csc) - + def __sub__(self, other): return _cs_matrix.__sub__(self, other, csc_minus_csc) def __truediv__(self,other): return _cs_matrix.__truediv__(self,other, csc_eldiv_csc) - + def __pow__(self, other): return _cs_matrix.__pow__(self, other, csc_elmul_csc) @@ -986,21 +1008,21 @@ alloc = max(1, self.allocsize) self.data = resize1d(self.data, nzmax + alloc) self.indices = resize1d(self.indices, nzmax + alloc) - + newindex = self.indptr[col] self.data[newindex+1:] = self.data[newindex:-1] self.indices[newindex+1:] = self.indices[newindex:-1] - + self.data[newindex] = val self.indices[newindex] = row self.indptr[col+1:] += 1 - + elif len(indxs[0]) == 1: #value already present self.data[self.indptr[col]:self.indptr[col+1]][indxs[0]] = val else: raise IndexError, "row index occurs more than once" - + self._check() else: # We should allow slices here! @@ -1014,7 +1036,7 @@ """ start, stop, stride = myslice.indices(self.shape[0]) return _cs_matrix._get_slice(self, j, start, stop, stride, (stop - start, 1)) - + def rowcol(self, ind): row = self.indices[ind] col = searchsorted(self.indptr, ind+1)-1 @@ -1036,7 +1058,7 @@ def _tothis(self, other): return other.tocsc() - + def toarray(self): return self.tocsr().toarray() @@ -1165,7 +1187,7 @@ else: raise ValueError, "unrecognized form for csr_matrix constructor" - + # Read matrix dimensions given, if any if dims is not None: try: @@ -1188,7 +1210,7 @@ N = max(oldN, N) self.shape = (M, N) - + self._check(check) def _check(self,full_check=True): @@ -1226,7 +1248,7 @@ "the size of data list" if (self.indices.dtype != numpy.intc): self.indices = self.indices.astype(numpy.intc) - if (self.indptr.dtype != numpy.intc): + if (self.indptr.dtype != numpy.intc): self.indptr = self.indptr.astype(numpy.intc) self.nnz = nnz @@ -1243,13 +1265,13 @@ return self.indices else: return _cs_matrix.__getattr__(self, attr) - + def __add__(self, other): return _cs_matrix.__add__(self, other, csr_plus_csr) - + def __sub__(self, other): return _cs_matrix.__sub__(self, other, csr_minus_csr) - + def __truediv__(self,other): return _cs_matrix.__truediv__(self,other, csr_eldiv_csr) @@ -1297,13 +1319,13 @@ def _getslice(self, i, myslice): return self._getrowslice(i, myslice) - + def _getrowslice(self, i, myslice): """Returns a view of the elements [i, myslice.start:myslice.stop]. """ start, stop, stride = myslice.indices(self.shape[1]) return _cs_matrix._get_slice(self, i, start, stop, stride, (1, stop-start)) - + def __setitem__(self, key, val): if isinstance(key, tuple): row = key[0] @@ -1331,21 +1353,21 @@ alloc = max(1, self.allocsize) self.data = resize1d(self.data, nzmax + alloc) self.indices = resize1d(self.indices, nzmax + alloc) - + newindex = self.indptr[row] self.data[newindex+1:] = self.data[newindex:-1] self.indices[newindex+1:] = self.indices[newindex:-1] - + self.data[newindex] = val self.indices[newindex] = col self.indptr[row+1:] += 1 - + elif len(indxs[0]) == 1: #value already present self.data[self.indptr[row]:self.indptr[row+1]][indxs[0]] = val else: raise IndexError, "row index occurs more than once" - + self._check() else: # We should allow slices here! @@ -1372,7 +1394,7 @@ def _tothis(self, other): return other.tocsr() - + def toarray(self): data = numpy.zeros(self.shape, self.data.dtype) csrtodense(self.shape[0], self.shape[1], self.indptr, self.indices, @@ -1423,16 +1445,7 @@ to copy. """ dict.__init__(self) - spmatrix.__init__(self) - if shape is None: - self.shape = (0, 0) - else: - try: - m, n = shape - except: - raise "shape not understood" - else: - self.shape = shape + spmatrix.__init__(self,shape) self.dtype = getdtype(dtype, A, default=float) if A is not None: if isinstance(A, tuple): @@ -1670,7 +1683,7 @@ # Not a sequence raise TypeError, "unsupported type for" \ " dok_matrix.__setitem__" - + # Value is a sequence for element, val in itertools.izip(seq, value): self[element, j] = val # don't use dict.__setitem__ @@ -2009,12 +2022,12 @@ where the dimensions are optional. If supplied, we set (M, N) = dims. If not supplied, we infer these from the index arrays ij[0][:] and ij[1][:] - + The arguments 'obj' and 'ij' represent three arrays: 1. obj[:] the entries of the matrix, in any order 2. ij[0][:] the row indices of the matrix entries 3. ij[1][:] the column indices of the matrix entries - + So the following holds: A[ij[0][k], ij[1][k] = obj[k] """ @@ -2039,7 +2052,7 @@ return else: raise TypeError, "invalid input format" - + self.dtype = getdtype(dtype, obj, default=float) try: @@ -2047,7 +2060,7 @@ raise TypeError except TypeError: raise TypeError, "invalid input format" - + if dims is None: if len(ij[0]) == 0 or len(ij[1]) == 0: raise ValueError, "cannot infer dimensions from zero sized index arrays" @@ -2058,7 +2071,7 @@ # Use 2 steps to ensure dims has length 2. M, N = dims self.shape = (M, N) - + self.row = asarray(ij[0], dtype=numpy.intc) self.col = asarray(ij[1], dtype=numpy.intc) self.data = asarray(obj, dtype=self.dtype) @@ -2096,16 +2109,16 @@ #sort by increasing rows first, columns second if getattr(self, '_is_normalized', None): #columns already sorted, use stable sort for rows - P = numpy.argsort(self.row, kind='mergesort') + P = numpy.argsort(self.row, kind='mergesort') return self.data[P], self.row[P], self.col[P] else: #nothing already sorted - P = numpy.lexsort(keys=(self.col, self.row)) + P = numpy.lexsort(keys=(self.col, self.row)) return self.data[P], self.row[P], self.col[P] if getattr(self, '_is_normalized', None): return self.data, self.row, self.col #sort by increasing rows first, columns second - P = numpy.lexsort(keys=(self.row, self.col)) + P = numpy.lexsort(keys=(self.row, self.col)) self.data, self.row, self.col = self.data[P], self.row[P], self.col[P] setattr(self, '_is_normalized', 1) return self.data, self.row, self.col @@ -2125,23 +2138,23 @@ self.data) return csc_matrix((data, rowind, indptr), self.shape, check=False) - + def tocsr(self): if self.nnz == 0: return csr_matrix(self.shape, dtype=self.dtype) else: indptr, colind, data = cootocsr(self.shape[0], self.shape[1], \ self.size, self.row, self.col, \ - self.data) + self.data) return csr_matrix((data, colind, indptr), self.shape, check=False) - + def tocoo(self, copy=False): return self.toself(copy) class lil_matrix(spmatrix): """Row-based linked list matrix, by Ed Schofield. - + This contains a list (self.rows) of rows, each of which is a sorted list of column indices of non-zero elements. It also contains a list (self.data) of lists of these elements. @@ -2176,7 +2189,7 @@ if not isinstance(A, lil_matrix) and \ not isinstance(A, csr_matrix): raise TypeError, "unsupported matrix type" - + # Otherwise, try converting to a matrix. So if it's # a list (rank 1), it will become a row vector else: @@ -2251,7 +2264,7 @@ if j < 0: j += self.shape[1] - + if j < 0 or j > self.shape[1]: raise IndexError,'column index out of bounds' @@ -2267,7 +2280,7 @@ elif j.start is None: start = 0 else: - start = j.start + start = j.start if j.stop is not None and j.stop < 0: stop = shape + j.stop elif j.stop is None: @@ -2277,7 +2290,7 @@ j = range(start, stop, j.step or 1) return j - + def __getitem__(self, index): """Return the element(s) index=(i, j), where j may be a slice. This always returns a copy for consistency, since slices into @@ -2309,7 +2322,7 @@ else: raise IndexError - + def _insertat(self, i, j, x): """ helper for __setitem__: insert a value at (i,j) where i, j and x are all scalars """ @@ -2320,13 +2333,13 @@ def _insertat2(self, row, data, j, x): """ helper for __setitem__: insert a value in the given row/data at column j. """ - + if j < 0: #handle negative column indices j += self.shape[1] if j < 0 or j >= self.shape[1]: raise IndexError,'column index out of bounds' - + pos = bisect_left(row, j) if x != 0: if pos == len(row): @@ -2379,7 +2392,7 @@ row = self.rows[i] data = self.data[i] self._insertat3(row, data, j, x) - elif issequence(i) and issequence(j): + elif issequence(i) and issequence(j): if isscalar(x): for ii, jj in zip(i, j): self._insertat(ii, jj, x) @@ -2450,6 +2463,15 @@ new.rows = copy.deepcopy(self.rows) return new + def reshape(self,shape): + new = lil_matrix(shape,dtype=self.dtype) + j_max = self.shape[1] + for i,row in enumerate(self.rows): + for col,j in enumerate(row): + new_r,new_c = unravel_index(i*j_max + j,shape) + new[new_r,new_c] = self[i,j] + return new + def __add__(self, other): if isscalar(other): new = self.copy() @@ -2465,8 +2487,8 @@ return self.__mul__(other) else: return spmatrix.__rmul__(self, other) - - + + def toarray(self): d = zeros(self.shape, dtype=self.dtype) for i, row in enumerate(self.rows): @@ -2480,7 +2502,7 @@ # Overriding the spmatrix.transpose method here prevents an unnecessary # csr -> csc conversion return self.tocsr().transpose() - + def tocsr(self, nzmax=None): """ Return Compressed Sparse Row format arrays for this matrix. """ @@ -2636,7 +2658,7 @@ assert(len(offsets) == diags.shape[0]) indptr, rowind, data = sparsetools.spdiags(M, N, len(offsets), offsets, diags) return csc_matrix((data, rowind, indptr), (M, N)) - + def extract_diagonal(A): """ extract_diagonal(A) returns the main diagonal of A. @@ -2739,5 +2761,3 @@ def issequence(t): return isinstance(t, (list, tuple)) - - Modified: trunk/scipy/sparse/tests/test_sparse.py =================================================================== --- trunk/scipy/sparse/tests/test_sparse.py 2007-08-27 23:14:25 UTC (rev 3271) +++ trunk/scipy/sparse/tests/test_sparse.py 2007-08-28 16:29:41 UTC (rev 3272) @@ -813,6 +813,17 @@ x = x*0 assert_equal(x[0,0],0) + def check_reshape(self): + x = lil_matrix((4,3)) + x[0,0] = 1 + x[2,1] = 3 + x[3,2] = 5 + x[0,2] = 7 + + for s in [(12,1),(1,12)]: + assert_array_equal(x.reshape(s).todense(), + x.todense().reshape(s)) + def check_lil_lil_assignment(self): """ Tests whether a row of one lil_matrix can be assigned to another. From scipy-svn at scipy.org Tue Aug 28 12:55:00 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 28 Aug 2007 11:55:00 -0500 (CDT) Subject: [Scipy-svn] r3273 - in trunk/scipy/misc: . tests tests/data Message-ID: <20070828165500.EA50A39C053@new.scipy.org> Author: stefan Date: 2007-08-28 11:54:29 -0500 (Tue, 28 Aug 2007) New Revision: 3273 Added: trunk/scipy/misc/tests/data/ trunk/scipy/misc/tests/data/icon.png trunk/scipy/misc/tests/data/icon_mono.png trunk/scipy/misc/tests/data/icon_mono_flat.png Modified: trunk/scipy/misc/pilutil.py trunk/scipy/misc/tests/test_pilutil.py Log: Fix fromimage for monochrome images (closes ticket #259). Add tests. Modified: trunk/scipy/misc/pilutil.py =================================================================== --- trunk/scipy/misc/pilutil.py 2007-08-28 16:29:41 UTC (rev 3272) +++ trunk/scipy/misc/pilutil.py 2007-08-28 16:54:29 UTC (rev 3273) @@ -46,53 +46,26 @@ return def fromimage(im, flatten=0): - """Takes a PIL image and returns a copy of the image in a numpy container. - If the image is RGB returns a 3-dimensional array: arr[:,:,n] is each channel + """Return a copy of a PIL image as a numpy array. - Optional arguments: + :Parameters: + im : PIL image + Input image. + flatten : bool + If true, convert the output to grey-scale. - - flatten (0): if true, the image is flattened by calling convert('F') on - the image object before extracting the numerical data. This flattens the - color layers into a single grayscale layer. Note that the supplied image - object is NOT modified. + :Returns: + img_array : ndarray + The different colour bands/channels are stored in the + third dimension, such that a grey-image is MxN, an + RGB-image MxNx3 and an RGBA-image MxNx4. + """ - assert Image.isImageType(im), "Not a PIL image." + if not Image.isImageType(im): + raise TypeError("Input is not a PIL image.") if flatten: im = im.convert('F') - mode = im.mode - adjust = 0 - if mode == '1': - im = im.convert(mode='L') - mode = 'L' - adjust = 1 - str = im.tostring() - type = uint8 - if mode == 'F': - type = numpy.float32 - elif mode == 'I': - type = numpy.uint32 - elif mode == 'I;16': - type = numpy.uint16 - arr = numpy.fromstring(str,type) - shape = list(im.size) - shape.reverse() - if mode == 'P': - arr.shape = shape - if im.palette.rawmode != 'RGB': - print "Warning: Image has invalid palette." - return arr - pal = numpy.fromstring(im.palette.data,type) - N = len(pal) - pal.shape = (int(N/3.0),3) - return arr, pal - if mode in ['RGB','YCbCr']: - shape += [3] - elif mode in ['CMYK','RGBA']: - shape += [4] - arr.shape = shape - if adjust: - arr = (arr != 0) - return arr + return array(im) _errstr = "Mode is unknown or incompatible with input array shape." def toimage(arr,high=255,low=0,cmin=None,cmax=None,pal=None, Added: trunk/scipy/misc/tests/data/icon.png =================================================================== (Binary files differ) Property changes on: trunk/scipy/misc/tests/data/icon.png ___________________________________________________________________ Name: svn:mime-type + application/octet-stream Added: trunk/scipy/misc/tests/data/icon_mono.png =================================================================== (Binary files differ) Property changes on: trunk/scipy/misc/tests/data/icon_mono.png ___________________________________________________________________ Name: svn:mime-type + application/octet-stream Added: trunk/scipy/misc/tests/data/icon_mono_flat.png =================================================================== (Binary files differ) Property changes on: trunk/scipy/misc/tests/data/icon_mono_flat.png ___________________________________________________________________ Name: svn:mime-type + application/octet-stream Modified: trunk/scipy/misc/tests/test_pilutil.py =================================================================== --- trunk/scipy/misc/tests/test_pilutil.py 2007-08-28 16:29:41 UTC (rev 3272) +++ trunk/scipy/misc/tests/test_pilutil.py 2007-08-28 16:54:29 UTC (rev 3273) @@ -1,22 +1,41 @@ from numpy.testing import * set_package_path() +import PIL.Image import scipy.misc.pilutil as pilutil restore_path() +import glob +import os.path import numpy as N -class test_pilutil(NumpyTestCase): - def check_imresize(self): +datapath = os.path.dirname(__file__) + +class test_pilutil(ParametricTestCase): + def test_imresize(self): im = N.random.random((10,20)) for T in N.sctypes['float'] + [float]: im1 = pilutil.imresize(im,T(1.1)) assert_equal(im1.shape,(11,22)) - def check_bytescale(self): + def test_bytescale(self): x = N.array([0,1,2],N.uint8) y = N.array([0,1,2]) assert_equal(pilutil.bytescale(x),x) assert_equal(pilutil.bytescale(y),[0,127,255]) + def tst_fromimage(self,filename,irange): + img = pilutil.fromimage(PIL.Image.open(filename)) + imin,imax = irange + assert img.min() >= imin + assert img.max() <= imax + + def testip_fromimage(self): + data = {'icon.png':(0,255), + 'icon_mono.png':(0,2), + 'icon_mono_flat.png':(0,1)} + + return ((self.tst_fromimage,os.path.join(datapath,'data',fn),irange) + for fn,irange in data.iteritems()) + if __name__ == "__main__": NumpyTest().run() From scipy-svn at scipy.org Tue Aug 28 15:24:06 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 28 Aug 2007 14:24:06 -0500 (CDT) Subject: [Scipy-svn] r3274 - in trunk/scipy/sandbox/models: . tests Message-ID: <20070828192406.4911839C034@new.scipy.org> Author: jonathan.taylor Date: 2007-08-28 14:23:59 -0500 (Tue, 28 Aug 2007) New Revision: 3274 Added: trunk/scipy/sandbox/models/tests/test_bspline.py Modified: trunk/scipy/sandbox/models/bspline.py trunk/scipy/sandbox/models/bspline_module.py trunk/scipy/sandbox/models/gam.py trunk/scipy/sandbox/models/glm.py trunk/scipy/sandbox/models/info.py trunk/scipy/sandbox/models/setup.py trunk/scipy/sandbox/models/smoothers.py Log: added proper docstrings to BSpline and SmoothingSpline classes Modified: trunk/scipy/sandbox/models/bspline.py =================================================================== --- trunk/scipy/sandbox/models/bspline.py 2007-08-28 16:54:29 UTC (rev 3273) +++ trunk/scipy/sandbox/models/bspline.py 2007-08-28 19:23:59 UTC (rev 3274) @@ -5,9 +5,51 @@ from scipy.optimize import golden from scipy.sandbox.models import _bspline +def _band2array(a, lower=0, symmetric=False, hermitian=False): + """ + Take an upper or lower triangular banded matrix and return a + numpy array. + + INPUTS: + a -- a matrix in upper or lower triangular banded matrix + lower -- is the matrix upper or lower triangular? + symmetric -- if True, return the original result plus its transpose + hermitian -- if True (and symmetric False), return the original + result plus its conjugate transposed + + """ + + n = a.shape[1] + r = a.shape[0] + _a = 0 + + if not lower: + for j in range(r): + _b = N.diag(a[r-1-j],k=j)[j:(n+j),j:(n+j)] + _a += _b + if symmetric and j > 0: _a += _b.T + elif hermitian and j > 0: _a += _b.conjugate().T + else: + for j in range(r): + _b = N.diag(a[j],k=j)[0:n,0:n] + _a += _b + if symmetric and j > 0: _a += _b.T + elif hermitian and j > 0: _a += _b.conjugate().T + _a = _a.T + + return _a + + def _upper2lower(ub): """ Convert upper triangular banded matrix to lower banded form. + + INPUTS: + ub -- an upper triangular banded matrix + + OUTPUTS: lb + lb -- a lower triangular banded matrix with same entries + as ub """ lb = N.zeros(ub.shape, ub.dtype) @@ -19,7 +61,14 @@ def _lower2upper(lb): """ - Convert upper triangular banded matrix to lower banded form. + Convert lower triangular banded matrix to upper banded form. + + INPUTS: + lb -- a lower triangular banded matrix + + OUTPUTS: ub + ub -- an upper triangular banded matrix with same entries + as lb """ ub = N.zeros(lb.shape, lb.dtype) @@ -31,8 +80,22 @@ def _triangle2unit(tb, lower=0): """ - Take a banded triangular matrix and return its diagonal and the unit matrix: - the banded triangular matrix with 1's on the diagonal. + Take a banded triangular matrix and return its diagonal and the + unit matrix: the banded triangular matrix with 1's on the diagonal, + i.e. each row is divided by the corresponding entry on the diagonal. + + INPUTS: + tb -- a lower triangular banded matrix + lower -- if True, then tb is assumed to be lower triangular banded, + in which case return value is also lower triangular banded. + + OUTPUTS: d, b + d -- diagonal entries of tb + b -- unit matrix: if lower is False, b is upper triangular + banded and its rows of have been divided by d, + else lower is True, b is lower triangular banded + and its columns have been divieed by d. + """ if lower: d = tb[0].copy() @@ -43,9 +106,19 @@ l = _upper2lower(tb) return d, _lower2upper(l / d) -def _trace_symbanded(a,b, lower=0): +def _trace_symbanded(a, b, lower=0): """ - Compute the trace(a*b) for two upper or lower banded real symmetric matrices. + Compute the trace(ab) for two upper or banded real symmetric matrices + stored either in either upper or lower form. + + INPUTS: + a, b -- two banded real symmetric matrices (either lower or upper) + lower -- if True, a and b are assumed to be the lower half + + + OUTPUTS: trace + trace -- trace(ab) + """ if lower: @@ -58,7 +131,12 @@ def _zero_triband(a, lower=0): """ - Zero out unnecessary elements of a real symmetric banded matrix. + Explicitly zero out unused elements of a real symmetric banded matrix. + + INPUTS: + a -- a real symmetric banded matrix (either upper or lower hald) + lower -- if True, a is assumed to be the lower half + """ nrow, ncol = a.shape @@ -68,18 +146,36 @@ for i in range(nrow): a[i,0:i] = 0. return a -def _zerofunc(x): - return N.zeros(x.shape, N.float) +class BSpline(object): -class BSpline: + ''' - """ - knots should be sorted, knots[0] is lower boundary, knots[1] is upper boundary - knots[1:-1] are internal knots - """ + Bsplines of a given order and specified knots. - def __init__(self, knots, order=4, coef=None, M=None, eps=0.0): + Implementation is based on description in Chapter 5 of + + Hastie, Tibshirani and Friedman (2001). "The Elements of Statistical + Learning." Springer-Verlag. 536 pages. + + + INPUTS: + knots -- a sorted array of knots with knots[0] the lower boundary, + knots[1] the upper boundary and knots[1:-1] the internal + knots. + order -- order of the Bspline, default is 4 which yields cubic + splines + M -- number of additional boundary knots, if None it defaults + to order + coef -- an optional array of real-valued coefficients for the Bspline + of shape (knots.shape + 2 * (M - 1) - order,). + x -- an optional set of x values at which to evaluate the + Bspline to avoid extra evaluation in the __call__ method + + ''' + + def __init__(self, knots, order=4, M=None, coef=None, x=None): + knots = N.squeeze(N.unique(N.asarray(knots))) if knots.ndim != 1: @@ -89,10 +185,9 @@ if M is None: M = self.m self.M = M -# if self.M < self.m: -# raise 'multiplicity of knots, M, must be at least equal to order, m' - self.tau = N.hstack([[knots[0]-eps]*(self.M-1), knots, [knots[-1]+eps]*(self.M-1)]) + self.tau = N.hstack([[knots[0]]*(self.M-1), knots, [knots[-1]]*(self.M-1)]) + self.K = knots.shape[0] - 2 if coef is None: self.coef = N.zeros((self.K + 2 * self.M - self.m), N.float64) @@ -100,12 +195,64 @@ self.coef = N.squeeze(coef) if self.coef.shape != (self.K + 2 * self.M - self.m): raise ValueError, 'coefficients of Bspline have incorrect shape' + if x is not None: + self.x = x - def __call__(self, x): - b = N.asarray(self.basis(x)).T + def _setx(self, x): + self._x = x + self._basisx = self.basis(self._x) + + def _getx(self): + return self._x + + x = property(_getx, _setx) + + def __call__(self, *args): + """ + Evaluate the BSpline at a given point, yielding + a matrix B and return + + B * self.coef + + + INPUTS: + args -- optional arguments. If None, it returns self._basisx, + the BSpline evaluated at the x values passed in __init__. + Otherwise, return the BSpline evaluated at the + first argument args[0]. + + OUTPUTS: y + y -- value of Bspline at specified x values + + BUGS: + If self has no attribute x, an exception will be raised + because self has no attribute _basisx. + + """ + + if not args: + b = self._basisx.T + else: + x = args[0] + b = N.asarray(self.basis(x)).T return N.squeeze(N.dot(b, self.coef)) - + def basis_element(self, x, i, d=0): + """ + Evaluate a particular basis element of the BSpline, + or its derivative. + + INPUTS: + x -- x values at which to evaluate the basis element + i -- which element of the BSpline to return + d -- the order of derivative + + OUTPUTS: y + y -- value of d-th derivative of the i-th basis element + of the BSpline at specified x values + + """ + x = N.asarray(x, N.float64) _shape = x.shape if _shape == (): @@ -122,7 +269,26 @@ v.shape = _shape return v - def basis(self, x, d=0, upper=None, lower=None): + def basis(self, x, d=0, lower=None, upper=None): + """ + Evaluate the basis of the BSpline or its derivative. + If lower or upper is specified, then only + the [lower:upper] elements of the basis are returned. + + INPUTS: + x -- x values at which to evaluate the basis element + i -- which element of the BSpline to return + d -- the order of derivative + lower -- optional lower limit of the set of basis + elements + upper -- optional upper limit of the set of basis + elements + + OUTPUTS: y + y -- value of d-th derivative of the basis elements + of the BSpline at specified x values + + """ x = N.asarray(x) _shape = x.shape if _shape == (): @@ -154,9 +320,38 @@ v[-1] = N.where(N.equal(x, self.tau[-1]), 1, v[-1]) return v - def gram(self, d=0, full=False): + def gram(self, d=0): """ - Compute Gram inner product matrix. + Compute Gram inner product matrix, storing it in lower + triangular banded form. + + The (i,j) entry is + + G_ij = integral b_i^(d) b_j^(d) + + where b_i are the basis elements of the BSpline and (d) is the + d-th derivative. + + If d is a matrix then, it is assumed to specify a differential + operator as follows: the first row represents the order of derivative + with the second row the coefficient corresponding to that order. + + For instance: + + [[2, 3], + [3, 1]] + + represents 3 * f^(2) + 1 * f^(3). + + INPUTS: + d -- which derivative to apply to each basis element, + if d is a matrix, it is assumed to specify + a differential operator as above + + OUTPUTS: gram + gram -- the matrix of inner products of (derivatives) + of the BSpline elements + """ d = N.squeeze(d) @@ -184,18 +379,54 @@ method = "target_df" target_df = 5 default_pen = 1.0e-03 + optimize = True - def smooth(self, y, x=None, weights=None): - if self.method == "target_df": - self.fit_target_df(y, x=x, weights=weights, df=self.target_df) - elif self.method == "optimize_gcv": - self.fit_optimize_gcv(y, x=x, weights=weights) + ''' + A smoothing spline, which can be used to smooth scatterplots, i.e. + a list of (x,y) tuples. + See fit method for more information. + + ''' + def fit(self, y, x=None, weights=None, pen=0.): + """ + Fit the smoothing spline to a set of (x,y) pairs. + + INPUTS: + y -- response variable + x -- if None, uses self.x + weights -- optional array of weights + pen -- constant in front of Gram matrix + + OUTPUTS: None + The smoothing spline is determined by self.coef, + subsequent calls of __call__ will be the smoothing spline. + + ALGORITHM: + Formally, this solves a minimization: + + fhat = ARGMIN_f SUM_i=1^n (y_i-f(x_i))^2 + pen * int f^(2)^2 + + See Chapter 5 of + + Hastie, Tibshirani and Friedman (2001). "The Elements of Statistical + Learning." Springer-Verlag. 536 pages. + + for more details. + + TODO: + Should add arbitrary derivative penalty instead of just + second derivative. + """ + banded = True if x is None: - x = self.tau[(self.M-1):-(self.M-1)] # internal knots + x = self._x + bt = self._basisx.copy() + else: + bt = self.basis(x) if pen == 0.: # can't use cholesky for singular matrices banded = False @@ -204,7 +435,6 @@ raise ValueError, 'x and y shape do not agree, by default x are \ the Bspline\'s internal knots' - bt = self.basis(x) if pen >= self.penmax: pen = self.penmax @@ -225,13 +455,16 @@ y = y[mask] self.df_total = y.shape[0] - bty = N.dot(bt, _w * y) + + bty = N.squeeze(N.dot(bt, _w * y)) self.N = y.shape[0] + if not banded: self.btb = N.dot(bt, bt.T) - _g = band2array(self.g, lower=1, symmetric=True) + _g = _band2array(self.g, lower=1, symmetric=True) self.coef, _, self.rank = L.lstsq(self.btb + pen*_g, bty)[0:3] self.rank = min(self.rank, self.btb.shape[0]) + del(_g) else: self.btb = N.zeros(self.g.shape, N.float64) nband, nbasis = self.g.shape @@ -239,7 +472,8 @@ for k in range(min(nband, nbasis-i)): self.btb[k,i] = (bt[i] * bt[i+k]).sum() - bty.shape = (1,bty.shape[0]) + bty.shape = (1,bty.shape[0]) + self.pen = pen self.chol, self.coef = solveh_banded(self.btb + pen*self.g, bty, lower=1) @@ -248,9 +482,24 @@ self.resid = y * self.weights - N.dot(self.coef, bt) self.pen = pen + del(bty); del(mask); del(bt) + + def smooth(self, y, x=None, weights=None): + + if self.method == "target_df": + if hasattr(self, 'pen'): + self.fit(y, x=x, weights=weights, pen=self.pen) + else: + self.fit_target_df(y, x=x, weights=weights, df=self.target_df) + elif self.method == "optimize_gcv": + self.fit_optimize_gcv(y, x=x, weights=weights) + + def gcv(self): """ Generalized cross-validation score of current fit. + + TODO: addin a reference to Wahba, and whoever else I used. """ norm_resid = (self.resid**2).sum() @@ -258,6 +507,8 @@ def df_resid(self): """ + Residual degrees of freedom in the fit. + self.N - self.trace() where self.N is the number of observations of last fit. @@ -267,15 +518,18 @@ def df_fit(self): """ - = self.trace() + How many degrees of freedom used in the fit? - How many degrees of freedom used in the fit? + self.trace() + """ return self.trace() def trace(self): """ Trace of the smoothing matrix S(pen) + + TODO: addin a reference to Wahba, and whoever else I used. """ if self.pen > 0: @@ -285,20 +539,36 @@ else: return self.rank - def fit_target_df(self, y, x=None, df=None, weights=None, tol=1.0e-03): + def fit_target_df(self, y, x=None, df=None, weights=None, tol=1.0e-03, + apen=0, bpen=1.0e-03): + """ Fit smoothing spline with approximately df degrees of freedom used in the fit, i.e. so that self.trace() is approximately df. + Uses binary search strategy. + In general, df must be greater than the dimension of the null space of the Gram inner product. For cubic smoothing splines, this means that df > 2. + INPUTS: + y -- response variable + x -- if None, uses self.x + df -- target degrees of freedom + weights -- optional array of weights + tol -- (relative) tolerance for convergence + apen -- lower bound of penalty for binary search + bpen -- upper bound of penalty for binary search + + OUTPUTS: None + The smoothing spline is determined by self.coef, + subsequent calls of __call__ will be the smoothing spline. + """ df = df or self.target_df - apen, bpen = 0, 1.0e-03 olddf = y.shape[0] - self.m if hasattr(self, "pen"): @@ -312,6 +582,7 @@ apen, bpen = 0., self.pen while True: + curpen = 0.5 * (apen + bpen) self.fit(y, x=x, weights=weights, pen=curpen) curdf = self.trace() @@ -326,7 +597,7 @@ break def fit_optimize_gcv(self, y, x=None, weights=None, tol=1.0e-03, - bracket=(0,1.0e-03)): + brack=(-100,20)): """ Fit smoothing spline trying to optimize GCV. @@ -336,6 +607,18 @@ It is probably best to use target_df instead, as it is sometimes difficult to find a bracketing interval. + INPUTS: + y -- response variable + x -- if None, uses self.x + df -- target degrees of freedom + weights -- optional array of weights + tol -- (relative) tolerance for convergence + brack -- an initial guess at the bracketing interval + + OUTPUTS: None + The smoothing spline is determined by self.coef, + subsequent calls of __call__ will be the smoothing spline. + """ def _gcv(pen, y, x): @@ -343,32 +626,7 @@ a = self.gcv() return a - a = golden(_gcv, args=(y,x), brack=(-100,20), tol=tol) + a = golden(_gcv, args=(y,x), brack=bracket, tol=tol) -def band2array(a, lower=0, symmetric=False, hermitian=False): - """ - Take an upper or lower triangular banded matrix and return a matrix using - LAPACK storage convention. For testing banded Cholesky decomposition, etc. - """ - n = a.shape[1] - r = a.shape[0] - _a = 0 - - if not lower: - for j in range(r): - _b = N.diag(a[r-1-j],k=j)[j:(n+j),j:(n+j)] - _a += _b - if symmetric and j > 0: _a += _b.T - elif hermitian and j > 0: _a += _b.conjugate().T - else: - for j in range(r): - _b = N.diag(a[j],k=j)[0:n,0:n] - _a += _b - if symmetric and j > 0: _a += _b.T - elif hermitian and j > 0: _a += _b.conjugate().T - _a = _a.T - - return _a - Modified: trunk/scipy/sandbox/models/bspline_module.py =================================================================== --- trunk/scipy/sandbox/models/bspline_module.py 2007-08-28 16:54:29 UTC (rev 3273) +++ trunk/scipy/sandbox/models/bspline_module.py 2007-08-28 19:23:59 UTC (rev 3274) @@ -132,11 +132,11 @@ PyArrayObject *basis; double *data; - basis = (PyArrayObject *) PyArray_SimpleNew(2, dim, PyArray_DOUBLE); data = (double *) basis->data; bspline(&data, x, Nx[0], knots, Nknots[0], m, d, lower, upper); return_val = (PyObject *) basis; + Py_DECREF((PyObject *) basis); ''' @@ -184,7 +184,9 @@ double bspline_quad(double *knots, int nknots, int m, int l, int r, int dl, int dr) + /* This is based on scipy.integrate.fixed_quad */ + { double *y; double qx[%(nq)d]={%(qx)s}; @@ -202,8 +204,8 @@ lower = l - m - 1; if (lower < 0) { lower = 0;} upper = lower + 2 * m + 4; - if (upper > nknots - 1) {upper = nknots-1;} -/* upper = nknots - m; */ + if (upper > nknots - 1) { upper = nknots-1; } + for (k=lower; kdata; bspline_gram(&data, knots, Nknots[0], m, dl, dr); return_val = (PyObject *) gram; + Py_DECREF((PyObject *) gram); ''' @@ -324,7 +330,8 @@ data = (double *) invband->data; invband_compute(&data, L, NL[1], NL[0]-1); - return_val = (PyObject *) invband; + return_val = (PyObject *) invband; + Py_DECREF((PyObject *) invband); ''' Modified: trunk/scipy/sandbox/models/gam.py =================================================================== --- trunk/scipy/sandbox/models/gam.py 2007-08-28 16:54:29 UTC (rev 3273) +++ trunk/scipy/sandbox/models/gam.py 2007-08-28 19:23:59 UTC (rev 3274) @@ -1,6 +1,8 @@ """ Generalized additive models + """ + import numpy as N from scipy.sandbox.models import family @@ -12,7 +14,7 @@ _x.sort() n = x.shape[0] # taken form smooth.spline in R - print "herenow" + if n < 50: nknots = n else: @@ -29,7 +31,8 @@ else: nknots = 200 + (n - 3200.)**0.2 knots = _x[N.linspace(0, n-1, nknots).astype(N.int32)] - s = SmoothingSpline(knots) + + s = SmoothingSpline(knots, x=x.copy()) s.gram(d=2) s.target_df = 5 return s @@ -41,7 +44,7 @@ self.offset = offset def __call__(self, *args, **kw): - return self.fn(*args, **kw) + offset + return self.fn(*args, **kw) + self.offset class results: @@ -62,7 +65,7 @@ return N.sum(self.smoothed(design), axis=0) + self.alpha def smoothed(self, design): - return N.array([self.smoothers[i](design[:,i]) + self.offset[i] for i in range(design.shape[1])]) + return N.array([self.smoothers[i]() + self.offset[i] for i in range(design.shape[1])]) class additive_model: @@ -89,16 +92,16 @@ offset = N.zeros(self.design.shape[1], N.float64) alpha = (Y * self.weights).sum() / self.weights.sum() for i in range(self.design.shape[1]): - tmp = self.smoothers[i](self.design[:,i]) - self.smoothers[i].smooth(Y - alpha - mu + tmp, x=self.design[:,i], + tmp = self.smoothers[i]() + self.smoothers[i].smooth(Y - alpha - mu + tmp, weights=self.weights) - tmp2 = self.smoothers[i](self.design[:,i]) + tmp2 = self.smoothers[i]() offset[i] = -(tmp2*self.weights).sum() / self.weights.sum() mu += tmp2 - tmp return results(Y, alpha, self.design, self.smoothers, self.family, offset) - def cont(self, tol=1.0e-02): + def cont(self, tol=1.0e-04): curdev = (((self.results.Y - self.results.predict(self.design))**2) * self.weights).sum() @@ -124,9 +127,9 @@ offset = N.zeros(self.design.shape[1], N.float64) for i in range(self.design.shape[1]): - self.smoothers[i].smooth(Y - alpha - mu, x=self.design[:,i], + self.smoothers[i].smooth(Y - alpha - mu, weights=self.weights) - tmp = self.smoothers[i](self.design[:,i]) + tmp = self.smoothers[i]() offset[i] = (tmp * self.weights).sum() / self.weights.sum() tmp -= tmp.sum() mu += tmp @@ -190,8 +193,7 @@ return self.results -if __name__ == "__main__": - +def _run(): import numpy.random as R n = lambda x: (x - x.mean()) / x.std() n_ = lambda x: (x - x.mean()) @@ -220,11 +222,11 @@ toc = time.time() m.fit(b) tic = time.time() - import pylab - pylab.figure(num=1) - pylab.plot(x1, n(m.smoothers[0](x1))); pylab.plot(x1, n(f1(x1)), linewidth=2) - pylab.figure(num=2) - pylab.plot(x2, n(m.smoothers[1](x2))); pylab.plot(x2, n(f2(x2)), linewidth=2); +## import pylab +## pylab.figure(num=1) +## pylab.plot(x1, n(m.smoothers[0](x1)), 'r'); pylab.plot(x1, n(f1(x1)), linewidth=2) +## pylab.figure(num=2) +## pylab.plot(x2, n(m.smoothers[1](x2)), 'r'); pylab.plot(x2, n(f2(x2)), linewidth=2); print tic-toc f = family.Poisson() @@ -235,9 +237,12 @@ m.fit(p) tic = time.time() print tic-toc - pylab.figure(num=1) - pylab.plot(x1, n(m.smoothers[0](x1))); pylab.plot(x1, n(f1(x1)), linewidth=2) - pylab.figure(num=2) - pylab.plot(x2, n(m.smoothers[1](x2))); pylab.plot(x2, n(f2(x2)), linewidth=2) - pylab.show() +## pylab.figure(num=1) +## pylab.plot(x1, n(m.smoothers[0](x1)), 'b'); pylab.plot(x1, n(f1(x1)), linewidth=2) +## pylab.figure(num=2) +## pylab.plot(x2, n(m.smoothers[1](x2)), 'b'); pylab.plot(x2, n(f2(x2)), linewidth=2) +## pylab.show() + +if __name__ == "__main__": + _run() Modified: trunk/scipy/sandbox/models/glm.py =================================================================== --- trunk/scipy/sandbox/models/glm.py 2007-08-28 16:54:29 UTC (rev 3273) +++ trunk/scipy/sandbox/models/glm.py 2007-08-28 19:23:59 UTC (rev 3274) @@ -1,5 +1,6 @@ """ -General linear model +General linear models +-------------------- """ import numpy as N from scipy.sandbox.models import family @@ -14,7 +15,7 @@ self.weights = 1 self.initialize(design) - def __iter__(self): + def __iter__(self): self.iter = 0 self.dev = N.inf return self Modified: trunk/scipy/sandbox/models/info.py =================================================================== --- trunk/scipy/sandbox/models/info.py 2007-08-28 16:54:29 UTC (rev 3273) +++ trunk/scipy/sandbox/models/info.py 2007-08-28 19:23:59 UTC (rev 3274) @@ -6,7 +6,7 @@ - `ols_model` (ordinary least square regression) - `wls_model` (weighted least square regression) - - `ar_model` (autoregression) + - `ar_model` (autoregressive model) - `glm.model` (generalized linear models) - robust statistical models Modified: trunk/scipy/sandbox/models/setup.py =================================================================== --- trunk/scipy/sandbox/models/setup.py 2007-08-28 16:54:29 UTC (rev 3273) +++ trunk/scipy/sandbox/models/setup.py 2007-08-28 19:23:59 UTC (rev 3274) @@ -8,6 +8,8 @@ config.add_data_dir('tests') try: + import sys + print sys.path from scipy.sandbox.models.bspline_module import mod n, s, d = weave_ext(mod) config.add_extension(n, s, **d) Modified: trunk/scipy/sandbox/models/smoothers.py =================================================================== --- trunk/scipy/sandbox/models/smoothers.py 2007-08-28 16:54:29 UTC (rev 3273) +++ trunk/scipy/sandbox/models/smoothers.py 2007-08-28 19:23:59 UTC (rev 3274) @@ -10,7 +10,7 @@ from scipy.optimize import golden from scipy.sandbox.models import _bspline -from scipy.sandbox.models.bspline import bspline, band2array +from scipy.sandbox.models.bspline import bspline, _band2array class poly_smoother: @@ -103,7 +103,7 @@ self.N = y.shape[0] if not banded: self.btb = N.dot(bt, bt.T) - _g = band2array(self.g, lower=1, symmetric=True) + _g = _band2array(self.g, lower=1, symmetric=True) self.coef, _, self.rank = L.lstsq(self.btb + pen*_g, bty)[0:3] self.rank = min(self.rank, self.btb.shape[0]) else: Added: trunk/scipy/sandbox/models/tests/test_bspline.py =================================================================== --- trunk/scipy/sandbox/models/tests/test_bspline.py 2007-08-28 16:54:29 UTC (rev 3273) +++ trunk/scipy/sandbox/models/tests/test_bspline.py 2007-08-28 19:23:59 UTC (rev 3274) @@ -0,0 +1,23 @@ +""" +Test functions for models.glm +""" + +import numpy as N +from numpy.testing import NumpyTest, NumpyTestCase + +import scipy.sandbox.models as S +import scipy.sandbox.models.bspline as B + + +class test_BSpline(NumpyTestCase): + + def test1(self): + b = B.BSpline(N.linspace(0,10,11), x=N.linspace(0,10,101)) + old = b._basisx.shape + b.x = N.linspace(0,10,51) + new = b._basisx.shape + self.assertEqual((old[0], 51), new) + + +if __name__ == "__main__": + NumpyTest().run() From scipy-svn at scipy.org Wed Aug 29 00:05:44 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 28 Aug 2007 23:05:44 -0500 (CDT) Subject: [Scipy-svn] r3275 - trunk/scipy/sandbox/models/family Message-ID: <20070829040544.D49FB39C033@new.scipy.org> Author: jonathan.taylor Date: 2007-08-28 23:05:33 -0500 (Tue, 28 Aug 2007) New Revision: 3275 Modified: trunk/scipy/sandbox/models/family/__init__.py trunk/scipy/sandbox/models/family/family.py trunk/scipy/sandbox/models/family/links.py trunk/scipy/sandbox/models/family/varfuncs.py Log: docstrings for scipy.sandbox.models.family Modified: trunk/scipy/sandbox/models/family/__init__.py =================================================================== --- trunk/scipy/sandbox/models/family/__init__.py 2007-08-28 19:23:59 UTC (rev 3274) +++ trunk/scipy/sandbox/models/family/__init__.py 2007-08-29 04:05:33 UTC (rev 3275) @@ -1,3 +1,15 @@ +''' +This module contains the one-parameter exponential families used +for fitting GLMs and GAMs. + +These families are described in + + P. McCullagh and J. A. Nelder. "Generalized linear models." + Monographs on Statistics and Applied Probability. + Chapman & Hall, London, 1983. + +''' + from scipy.sandbox.models.family.family import Gaussian, Family, \ Poisson, Gamma, InverseGaussian, Binomial Modified: trunk/scipy/sandbox/models/family/family.py =================================================================== --- trunk/scipy/sandbox/models/family/family.py 2007-08-28 19:23:59 UTC (rev 3274) +++ trunk/scipy/sandbox/models/family/family.py 2007-08-29 04:05:33 UTC (rev 3275) @@ -2,13 +2,36 @@ from scipy.sandbox.models.family import links as L from scipy.sandbox.models.family import varfuncs as V -class Family: +class Family(object): + """ + A class to model one-parameter exponential + families. + + INPUTS: + link -- a Link instance + variance -- a variance function (models means as a function + of mean) + + """ + valid = [-N.inf, N.inf] tol = 1.0e-05 + def _setlink(self, link): + self._link = link + if hasattr(self, "links"): + if link not in links: + raise ValueError, 'invalid link for family, should be in %s' % `self.links` + + def _getlink(self): + return self._link + + link = property(_getlink, _setlink) + def __init__(self, link, variance): + self.link = link self.variance = variance @@ -16,32 +39,93 @@ """ Weights for IRLS step. + + w = 1 / (link'(mu)**2 * variance(mu)) + + INPUTS: + mu -- mean parameter in exponential family + + OUTPUTS: + w -- weights used in WLS step of GLM/GAM fit + """ return 1. / (self.link.deriv(mu)**2 * self.variance(mu)) def deviance(self, Y, mu, scale=1.): + """ + Deviance of (Y,mu) pair. Deviance is usually defined + as the difference + + DEV = (SUM_i -2 log Likelihood(Y_i,mu_i) + 2 log Likelihood(mu_i,mu_i)) / scale + + INPUTS: + Y -- response variable + mu -- mean parameter + scale -- optional scale in denominator of deviance + + OUTPUTS: dev + dev -- DEV, as described aboce + + """ + return N.power(self.devresid(Y, mu), 2).sum() / scale def devresid(self, Y, mu): + """ + The deviance residuals, defined as the residuals + in the deviance. + + Without knowing the link, they default to Pearson residuals + + resid_P = (Y - mu) * sqrt(weight(mu)) + + INPUTS: + Y -- response variable + mu -- mean parameter + + OUTPUTS: resid + resid -- deviance residuals + """ + return (Y - mu) * N.sqrt(self.weights(mu)) def fitted(self, eta): """ Fitted values based on linear predictors eta. + + INPUTS: + eta -- values of linear predictors, say, + X beta in a generalized linear model. + + OUTPUTS: mu + mu -- link.inverse(eta), mean parameter based on eta + """ return self.link.inverse(eta) def predict(self, mu): """ Linear predictors based on given mu values. + + INPUTS: + mu -- mean parameter of one-parameter exponential family + + OUTPUTS: eta + eta -- link(mu), linear predictors, based on + mean parameters mu + """ return self.link(mu) class Poisson(Family): """ - Poisson exponential family in glm context. + Poisson exponential family. + + INPUTS: + link -- a Link instance + """ links = [L.log, L.identity, L.sqrt] @@ -49,81 +133,125 @@ valid = [0, N.inf] def __init__(self, link=L.log): - if link not in Poisson.links: - raise ValueError, 'invalid link for Poisson family' self.variance = Poisson.variance self.link = link def devresid(self, Y, mu): + """ + Poisson deviance residual + + INPUTS: + Y -- response variable + mu -- mean parameter + + OUTPUTS: resid + resid -- deviance residuals + + """ return N.sign(Y - mu) * N.sqrt(2 * Y * N.log(Y / mu) - 2 * (Y - mu)) class Gaussian(Family): """ - Gaussian exponential family in glm context. + Gaussian exponential family. + + INPUTS: + link -- a Link instance + """ links = [L.log, L.identity, L.inverse] variance = V.constant def __init__(self, link=L.identity): - if link not in Gaussian.links: - raise ValueError, 'invalid link for Gaussian family' self.variance = Gaussian.variance self.link = link def devresid(self, Y, mu, scale=1.): + """ + Gaussian deviance residual + + INPUTS: + Y -- response variable + mu -- mean parameter + scale -- optional scale in denominator (after taking sqrt) + + OUTPUTS: resid + resid -- deviance residuals + """ + return (Y - mu) / N.sqrt(self.variance(mu) * scale) class Gamma(Family): """ - Gaussian exponential family in glm context. + Gamma exponential family. + + INPUTS: + link -- a Link instance + + BUGS: + no deviance residuals? + """ links = [L.log, L.identity, L.inverse] variance = V.mu_squared def __init__(self, link=L.identity): - if link not in Gamma.links: - raise ValueError, 'invalid link for Gamma family' self.variance = Gamma.variance self.link = link - class Binomial(Family): """ - Binomial exponential family in glm context. + Binomial exponential family. + + INPUTS: + link -- a Link instance + n -- number of trials for Binomial """ links = [L.logit, L.probit, L.cauchy, L.log, L.cloglog] variance = V.binary def __init__(self, link=L.logit, n=1): - if link not in Binomial.links: - raise ValueError, 'invalid link for Binomial family' self.n = n self.variance = V.Binomial(n=self.n) self.link = link - def devresid(self, Y, mu, scale=1.): + def devresid(self, Y, mu): + """ + Binomial deviance residual + + INPUTS: + Y -- response variable + mu -- mean parameter + + OUTPUTS: resid + resid -- deviance residuals + + """ + mu = self.link.clean(mu) return N.sign(Y - mu) * N.sqrt(-2 * (Y * N.log(mu / self.n) + (self.n - Y) * N.log(1 - mu / self.n))) class InverseGaussian(Family): """ - Gaussian exponential family in glm context. + InverseGaussian exponential family. + + INPUTS: + link -- a Link instance + n -- number of trials for Binomial + """ links = [L.inverse_squared, L.inverse, L.identity, L.log] variance = V.mu_cubed - def __init__(self, link=L.identity, n=1): - if link not in InverseGaussian.links: - raise ValueError, 'invalid link for InverseGaussian family' + def __init__(self, link=L.identity): self.n = n self.variance = InverseGaussian.variance self.link = link Modified: trunk/scipy/sandbox/models/family/links.py =================================================================== --- trunk/scipy/sandbox/models/family/links.py 2007-08-28 19:23:59 UTC (rev 3274) +++ trunk/scipy/sandbox/models/family/links.py 2007-08-29 04:05:33 UTC (rev 3275) @@ -3,31 +3,97 @@ class Link: + """ + A generic link function for one-parameter exponential + family, with call, inverse and deriv methods. + + """ + def initialize(self, Y): return N.asarray(Y).mean() * N.ones(Y.shape) + def __call__(self, p): + return NotImplementedError + + def inverse(self, z): + return NotImplementedError + + def deriv(self, p): + return NotImplementedError + + class Logit(Link): """ The logit transform as a link function: - g(x) = log(x / (1 - x)) + g'(x) = 1 / (x * (1 - x)) + g^(-1)(x) = exp(x)/(1 + exp(x)) + """ tol = 1.0e-10 def clean(self, p): + """ + Clip logistic values to range (tol, 1-tol) + + INPUTS: + p -- probabilities + + OUTPUTS: pclip + pclip -- clipped probabilities + """ + return N.clip(p, Logit.tol, 1. - Logit.tol) def __call__(self, p): + """ + Logit transform + + g(p) = log(p / (1 - p)) + + INPUTS: + p -- probabilities + + OUTPUTS: z + z -- logit transform of p + + """ + p = self.clean(p) return N.log(p / (1. - p)) def inverse(self, z): + """ + Inverse logit transform + + h(z) = exp(z)/(1+exp(z)) + + INPUTS: + z -- logit transform of p + + OUTPUTS: p + p -- probabilities + + """ t = N.exp(z) return t / (1. + t) def deriv(self, p): + + """ + Derivative of logit transform + + g(p) = 1 / (p * (1 - p)) + + INPUTS: + p -- probabilities + + OUTPUTS: y + y -- derivative of logit transform of p + + """ p = self.clean(p) return 1. / (p * (1 - p)) @@ -46,12 +112,50 @@ self.power = power def __call__(self, x): + """ + Power transform + + g(x) = x**self.power + + INPUTS: + x -- mean parameters + + OUTPUTS: z + z -- power transform of x + + """ + return N.power(x, self.power) - def inverse(self, x): - return N.power(x, 1. / self.power) + def inverse(self, z): + """ + Inverse of power transform + + g(x) = x**(1/self.power) + INPUTS: + z -- linear predictors in glm + + OUTPUTS: x + x -- mean parameters + + """ + return N.power(z, 1. / self.power) + def deriv(self, x): + """ + Derivative of power transform + + g(x) = self.power * x**(self.power - 1) + + INPUTS: + x -- mean parameters + + OUTPUTS: z + z -- derivative of power transform of x + + """ + return self.power * N.power(x, self.power - 1) inverse = Power(power=-1.) @@ -101,13 +205,50 @@ return N.clip(x, Logit.tol, N.inf) def __call__(self, x, **extra): + """ + Log transform + + g(x) = log(x) + + INPUTS: + x -- mean parameters + + OUTPUTS: z + z -- log(x) + + """ x = self.clean(x) return N.log(x) def inverse(self, z): + """ + Inverse of log transform + + g(x) = exp(x) + + INPUTS: + z -- linear predictors in glm + + OUTPUTS: x + x -- exp(z) + + """ return N.exp(z) def deriv(self, x): + """ + Derivative of log transform + + g(x) = 1/x + + INPUTS: + x -- mean parameters + + OUTPUTS: z + z -- derivative of log transform of x + + """ + x = self.clean(x) return 1. / x @@ -126,13 +267,49 @@ self.dbn = dbn def __call__(self, p): + """ + CDF link + + g(p) = self.dbn.pdf(p) + + INPUTS: + p -- mean parameters + + OUTPUTS: z + z -- derivative of CDF transform of p + + """ p = self.clean(p) return self.dbn.ppf(p) def inverse(self, z): + """ + Derivative of CDF link + + g(z) = self.dbn.cdf(z) + + INPUTS: + z -- linear predictors in glm + + OUTPUTS: p + p -- inverse of CDF link of z + + """ return self.dbn.cdf(z) def deriv(self, p): + """ + Derivative of CDF link + + g(p) = 1/self.dbn.pdf(self.dbn.ppf(p)) + + INPUTS: + x -- mean parameters + + OUTPUTS: z + z -- derivative of CDF transform of x + + """ p = self.clean(p) return 1. / self.dbn.pdf(self(p)) @@ -164,13 +341,49 @@ """ def __call__(self, p): + """ + C-Log-Log transform + + g(p) = log(-log(p)) + + INPUTS: + p -- mean parameters + + OUTPUTS: z + z -- log(-log(p)) + + """ p = self.clean(p) return N.log(-N.log(p)) def inverse(self, z): + """ + Inverse of C-Log-Log transform + + g(z) = exp(-exp(z)) + + INPUTS: + z -- linear predictor scale + + OUTPUTS: p + p -- mean parameters + + """ return N.exp(-N.exp(z)) def deriv(self, p): + """ + Derivatve of C-Log-Log transform + + g(p) = - 1 / (log(p) * p) + + INPUTS: + p -- mean parameters + + OUTPUTS: z + z -- - 1 / (log(p) * p) + + """ p = self.clean(p) return -1. / (N.log(p) * p) Modified: trunk/scipy/sandbox/models/family/varfuncs.py =================================================================== --- trunk/scipy/sandbox/models/family/varfuncs.py 2007-08-28 19:23:59 UTC (rev 3274) +++ trunk/scipy/sandbox/models/family/varfuncs.py 2007-08-29 04:05:33 UTC (rev 3275) @@ -9,21 +9,45 @@ """ def __call__(self, mu): + """ + Default variance function + + INPUTS: + mu -- mean parameters + + OUTPUTS: v + v -- ones(mu.shape) + """ + return N.ones(mu.shape, N.float64) constant = VarianceFunction() class Power: """ - Variance function: + Power variance function: V(mu) = fabs(mu)**power + + INPUTS: + power -- exponent used in power variance function + """ def __init__(self, power=1.): self.power = power def __call__(self, mu): + + """ + Power variance function + + INPUTS: + mu -- mean parameters + + OUTPUTS: v + v -- fabs(mu)**self.power + """ return N.power(N.fabs(mu), self.power) class Binomial: @@ -31,6 +55,9 @@ Binomial variance function p = mu / n; V(mu) = p * (1 - p) * n + + INPUTS: + n -- number of trials in Binomial """ tol = 1.0e-10 @@ -42,6 +69,15 @@ return N.clip(p, Binomial.tol, 1 - Binomial.tol) def __call__(self, mu): + """ + Binomial variance function + + INPUTS: + mu -- mean parameters + + OUTPUTS: v + v -- mu / self.n * (1 - mu / self.n) * self.n + """ p = self.clean(mu / self.n) return p * (1 - p) * self.n From scipy-svn at scipy.org Wed Aug 29 00:19:15 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 28 Aug 2007 23:19:15 -0500 (CDT) Subject: [Scipy-svn] r3276 - trunk/scipy/sandbox/models Message-ID: <20070829041915.73E7439C03A@new.scipy.org> Author: jonathan.taylor Date: 2007-08-28 23:19:12 -0500 (Tue, 28 Aug 2007) New Revision: 3276 Modified: trunk/scipy/sandbox/models/bspline.py Log: references for bsplines Modified: trunk/scipy/sandbox/models/bspline.py =================================================================== --- trunk/scipy/sandbox/models/bspline.py 2007-08-29 04:05:33 UTC (rev 3275) +++ trunk/scipy/sandbox/models/bspline.py 2007-08-29 04:19:12 UTC (rev 3276) @@ -1,3 +1,20 @@ +''' +Bspines and smoothing splines. + +General references: + + Craven, P. and Wahba, G. (1978) "Smoothing noisy data with spline functions. + Estimating the correct degree of smoothing by + the method of generalized cross-validation." + Numerische Mathematik, 31(4), 377-403. + + Hastie, Tibshirani and Friedman (2001). "The Elements of Statistical + Learning." Springer-Verlag. 536 pages. + + Hutchison, M. and Hoog, F. "Smoothing noisy data with spline functions." + Numerische Mathematik, 47(1), 99-106. +''' + import numpy as N import numpy.linalg as L @@ -259,7 +276,7 @@ x.shape = (1,) x.shape = (N.product(_shape,axis=0),) if i < self.tau.shape[0] - 1: - ## TODO: OWNDATA flags... + ## TODO: OWNDATA flags... v = _bspline.evaluate(x, self.tau, self.m, d, i, i+1) else: return N.zeros(x.shape, N.float64) @@ -499,7 +516,10 @@ """ Generalized cross-validation score of current fit. - TODO: addin a reference to Wahba, and whoever else I used. + Craven, P. and Wahba, G. "Smoothing noisy data with spline functions. + Estimating the correct degree of smoothing by + the method of generalized cross-validation." + Numerische Mathematik, 31(4), 377-403. """ norm_resid = (self.resid**2).sum() From scipy-svn at scipy.org Wed Aug 29 03:21:03 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 29 Aug 2007 02:21:03 -0500 (CDT) Subject: [Scipy-svn] r3277 - trunk/scipy/optimize Message-ID: <20070829072103.120D839C069@new.scipy.org> Author: dmitrey.kroshko Date: 2007-08-29 02:20:42 -0500 (Wed, 29 Aug 2007) New Revision: 3277 Modified: trunk/scipy/optimize/optimize.py Log: some changes in docstrings Modified: trunk/scipy/optimize/optimize.py =================================================================== --- trunk/scipy/optimize/optimize.py 2007-08-29 04:19:12 UTC (rev 3276) +++ trunk/scipy/optimize/optimize.py 2007-08-29 07:20:42 UTC (rev 3277) @@ -1,4 +1,4 @@ - +#__docformat__ = "restructuredtext en" # ******NOTICE*************** # optimize.py module by Travis E. Oliphant # @@ -98,44 +98,51 @@ def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None): """Minimize a function using the downhill simplex algorithm. + + :Parameters: - Description: - - Uses a Nelder-Mead simplex algorithm to find the minimum of function - of one or more variables. - - Inputs: - - func -- the Python function or method to be minimized. - x0 -- the initial guess. - args -- extra arguments for func. - callback -- an optional user-supplied function to call after each + func : the Python function or method to be minimized. + x0 : ndarray - the initial guess. + args : extra arguments for func. + callback : an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. - Outputs: (xopt, {fopt, iter, funcalls, warnflag}) + :Returns: (xopt, {fopt, iter, funcalls, warnflag}) - xopt -- minimizer of function - - fopt -- value of function at minimum: fopt = func(xopt) - iter -- number of iterations - funcalls -- number of function calls - warnflag -- Integer warning flag: + xopt : ndarray + minimizer of function + fopt : number + value of function at minimum: fopt = func(xopt) + iter : number + number of iterations + funcalls : number + number of function calls + warnflag : number + Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' - allvecs -- a list of solutions at each iteration + allvecs : Python list + a list of solutions at each iteration - Additional Inputs: + :OtherParameters: - xtol -- acceptable relative error in xopt for convergence. - ftol -- acceptable relative error in func(xopt) for convergence. - maxiter -- the maximum number of iterations to perform. - maxfun -- the maximum number of function evaluations. - full_output -- non-zero if fval and warnflag outputs are desired. - disp -- non-zero to print convergence messages. - retall -- non-zero to return list of solutions at each iteration + xtol : number + acceptable relative error in xopt for convergence. + ftol : number + acceptable relative error in func(xopt) for convergence. + maxiter : number + the maximum number of iterations to perform. + maxfun : number + the maximum number of function evaluations. + full_output : number + non-zero if fval and warnflag outputs are desired. + disp : number + non-zero to print convergence messages. + retall : number + non-zero to return list of solutions at each iteration - See also: + :SeeAlso: fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers @@ -153,7 +160,13 @@ brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding fixed_point -- scalar fixed-point finder - + + Notes + + ----------- + + Uses a Nelder-Mead simplex algorithm to find the minimum of function + of one or more variables. """ fcalls, func = wrap_function(func, args) x0 = asfarray(x0).flatten() @@ -408,12 +421,34 @@ def line_search(f, myfprime, xk, pk, gfk, old_fval, old_old_fval, args=(), c1=1e-4, c2=0.9, amax=50): """Find alpha that satisfies strong Wolfe conditions. - + + :Parameters: + + f : objective function + myfprime : objective function gradient (can be None) + xk : ndarray -- start point + pk : ndarray -- search direction + gfk : ndarray -- gradient value for x=xk + args : additional arguments for user functions + c1 : number -- parameter for Armijo condition rule + c2 : number - parameter for curvature condition rule + + :Returns: + + alpha0 : number -- required alpha (x_new = x0 + alpha * pk) + fc : number of function evaluations + gc : number of gradient evaluations + + + Notes + + -------------------------------- + Uses the line search algorithm to enforce strong Wolfe conditions Wright and Nocedal, 'Numerical Optimization', 1999, pg. 59-60 For the zoom phase it uses an algorithm by - Outputs: (alpha0, gc, fc) + """ global _ls_fc, _ls_gc, _ls_ingfk @@ -521,7 +556,7 @@ Uses the interpolation algorithm (Armiijo backtracking) as suggested by Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57 - Outputs: (alpha, fc, gc) + :Returns: (alpha, fc, gc) """ xk = atleast_1d(xk) @@ -596,51 +631,59 @@ epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None): """Minimize a function using the BFGS algorithm. + + :Parameters: - Description: + f : the Python function or method to be minimized. + x0 : ndarray + the initial guess for the minimizer. - Optimize the function, f, whose gradient is given by fprime using the - quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) - See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. - - Inputs: - - f -- the Python function or method to be minimized. - x0 -- the initial guess for the minimizer. - - fprime -- a function to compute the gradient of f. - args -- extra arguments to f and fprime. - gtol -- gradient norm must be less than gtol before succesful termination - norm -- order of norm (Inf is max, -Inf is min) - epsilon -- if fprime is approximated use this value for + fprime : a function to compute the gradient of f. + args : extra arguments to f and fprime. + gtol : number + gradient norm must be less than gtol before succesful termination + norm : number + order of norm (Inf is max, -Inf is min) + epsilon : number + if fprime is approximated use this value for the step size (can be scalar or vector) - callback -- an optional user-supplied function to call after each + callback : an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. - Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, ) + :Returns: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, ) - xopt -- the minimizer of f. + xopt : ndarray + the minimizer of f. - fopt -- the value of f(xopt). - gopt -- the value of f'(xopt). (Should be near 0) - Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) - func_calls -- the number of function_calls. - grad_calls -- the number of gradient calls. - warnflag -- an integer warning flag: + fopt : number + the value of f(xopt). + gopt : ndarray + the value of f'(xopt). (Should be near 0) + Bopt : ndarray + the value of 1/f''(xopt). (inverse hessian matrix) + func_calls : number + the number of function_calls. + grad_calls : number + the number of gradient calls. + warnflag : integer 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' - allvecs -- a list of all iterates (only returned if retall==1) + allvecs : a list of all iterates (only returned if retall==1) - Additional Inputs: + :OtherParameters: - maxiter -- the maximum number of iterations. - full_output -- if non-zero then return fopt, func_calls, grad_calls, + maxiter : number + the maximum number of iterations. + full_output : number + if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. - disp -- print convergence message if non-zero. - retall -- return a list of results at each iteration if non-zero + disp : number + print convergence message if non-zero. + retall : number + return a list of results at each iteration if non-zero - See also: + :SeeAlso: fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers @@ -658,7 +701,14 @@ brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding fixed_point -- scalar fixed-point finder + + Notes + + ---------------------------------- + Optimize the function, f, whose gradient is given by fprime using the + quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) + See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. """ x0 = asarray(x0).squeeze() if x0.ndim == 0: @@ -768,48 +818,54 @@ maxiter=None, full_output=0, disp=1, retall=0, callback=None): """Minimize a function with nonlinear conjugate gradient algorithm. - Description: + :Parameters: - Optimize the function, f, whose gradient is given by fprime using the - nonlinear conjugate gradient algorithm of Polak and Ribiere - See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. - - Inputs: - f -- the Python function or method to be minimized. - x0 -- the initial guess for the minimizer. + x0 : ndarray -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. - gtol -- stop when norm of gradient is less than gtol - norm -- order of vector norm to use - epsilon -- if fprime is approximated use this value for + gtol : number + stop when norm of gradient is less than gtol + norm : number + order of vector norm to use + epsilon :number + if fprime is approximated use this value for the step size (can be scalar or vector) callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. - Outputs: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) + :Returns: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) - xopt -- the minimizer of f. - - fopt -- the value of f(xopt). - func_calls -- the number of function_calls. - grad_calls -- the number of gradient calls. - warnflag -- an integer warning flag: + xopt : ndarray + the minimizer of f. + fopt :number + the value of f(xopt). + func_calls : number + the number of function_calls. + grad_calls : number + the number of gradient calls. + warnflag :number + an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' - allvecs -- if retall then this vector of the iterates is returned + allvecs : ndarray + if retall then this vector of the iterates is returned - Additional Inputs: + :OtherParameters: - maxiter -- the maximum number of iterations. - full_output -- if non-zero then return fopt, func_calls, grad_calls, + maxiter :number + the maximum number of iterations. + full_output : number + if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. - disp -- print convergence message if non-zero. - retall -- return a list of results at each iteration if True + disp : number + print convergence message if non-zero. + retall : number + return a list of results at each iteration if True - See also: + :SeeAlso: fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers @@ -827,7 +883,13 @@ brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding fixed_point -- scalar fixed-point finder + + Notes + --------------------------------------------- + Optimize the function, f, whose gradient is given by fprime using the + nonlinear conjugate gradient algorithm of Polak and Ribiere + See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. """ x0 = asarray(x0).flatten() if maxiter is None: @@ -924,18 +986,12 @@ def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None): - """Description: + """ Minimize the function f using the Newton-CG method. - Minimize the function, f, whose gradient is given by fprime using the - Newton-CG method. fhess_p must compute the hessian times an arbitrary - vector. If it is not given, finite-differences on fprime are used to - compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, - pg. 140. + :Parameters: - Inputs: - f -- the Python function or method to be minimized. - x0 -- the initial guess for the minimizer. + x0 : ndarray -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) @@ -943,42 +999,47 @@ args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). - epsilon -- if fhess is approximated use this value for + epsilon : number + if fhess is approximated use this value for the step size (can be scalar or vector) callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. - Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) + :Returns: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) - xopt -- the minimizer of f - - fopt -- the value of the function at xopt: fopt = f(xopt) - fcalls -- the number of function calls. - gcalls -- the number of gradient calls. - hcalls -- the number of hessian calls. - warnflag -- algorithm warnings: + xopt : ndarray + the minimizer of f + fopt : number + the value of the function at xopt: fopt = f(xopt) + fcalls : number + the number of function calls + gcalls : number + the number of gradient calls + hcalls : number + the number of hessian calls. + warnflag : number + algorithm warnings: 1 : 'Maximum number of iterations exceeded.' - allvecs -- a list of all tried iterates + allvecs : Python list + a list of all tried iterates - Additional Inputs: + :OtherParameters: - avextol -- Convergence is assumed when the average relative error in + avextol : number + Convergence is assumed when the average relative error in the minimizer falls below this amount. - maxiter -- Maximum number of iterations to allow. - full_output -- If non-zero return the optional outputs. - disp -- If non-zero print convergence message. - retall -- return a list of results at each iteration if True + maxiter : number + Maximum number of iterations to allow. + full_output : number + If non-zero return the optional outputs. + disp : number + If non-zero print convergence message. + retall : bool + return a list of results at each iteration if True - Remarks: + :SeeAlso: - Only one of fhess_p or fhess need be given. If fhess is provided, - then fhess_p will be ignored. If neither fhess nor fhess_p is - provided, then the hessian product will be approximated using finite - differences on fprime. - - See also: - fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers leastsq -- nonlinear least squares minimizer @@ -996,6 +1057,18 @@ fixed_point -- scalar fixed-point finder + Notes + + --------------------------------------------- + + Only one of fhess_p or fhess need be given. If fhess is provided, + then fhess_p will be ignored. If neither fhess nor fhess_p is + provided, then the hessian product will be approximated using finite + differences on fprime. fhess_p must compute the hessian times an arbitrary + vector. If it is not given, finite-differences on fprime are used to + compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, + pg. 140. + """ x0 = asarray(x0).flatten() fcalls, f = wrap_function(f, args) @@ -1106,36 +1179,40 @@ full_output=0, disp=1): """Bounded minimization for scalar functions. - Description: + :Parameters: - Finds a local minimizer of the scalar function func in the interval - x1 < xopt < x2 using Brent's method. (See brent for auto-bracketing). - - Inputs: - func -- the function to be minimized (must accept scalar input and return scalar output). - x1, x2 -- the optimization bounds. + x1, x2 : ndarray + the optimization bounds. args -- extra arguments to pass to function. - xtol -- the convergence tolerance. - maxfun -- maximum function evaluations. - full_output -- Non-zero to return optional outputs. - disp -- Non-zero to print messages. + xtol : number + the convergence tolerance. + maxfun : number + maximum function evaluations. + full_output : number + Non-zero to return optional outputs. + disp : number + Non-zero to print messages. 0 : no message printing. 1 : non-convergence notification messages only. 2 : print a message on convergence too. 3 : print iteration results. - Outputs: (xopt, {fval, ierr, numfunc}) + :Returns: (xopt, {fval, ierr, numfunc}) - xopt -- The minimizer of the function over the interval. - fval -- The function value at the minimum point. - ierr -- An error flag (0 if converged, 1 if maximum number of + xopt : ndarray + The minimizer of the function over the interval. + fval : number + The function value at the minimum point. + ierr : number + An error flag (0 if converged, 1 if maximum number of function calls reached). - numfunc -- The number of function calls. + numfunc : number + The number of function calls. - See also: + :SeeAlso: fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers @@ -1154,6 +1231,14 @@ fixed_point -- scalar fixed-point finder + Notes + + ------------------------------------------------------- + + Finds a local minimizer of the scalar function func in the interval + x1 < xopt < x2 using Brent's method. (See brent for auto-bracketing). + + """ if x1 > x2: @@ -1405,15 +1490,32 @@ def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500): """ Given a function of one-variable and a possible bracketing interval, return the minimum of the function isolated to a fractional precision of - tol. A bracketing interval is a triple (a,b,c) where (a f(xb) < f(xc). It doesn't always mean that obtained solution will satisfy xa<=x<=xb + + :Parameters: + + func -- objective func + xa, xb : number + bracketing interval + args -- additional arguments (if present) + grow_limit : number + max grow limit + maxiter : number + max iterations number + + :Returns: xa, xb, xc, fa, fb, fc, funcalls + + xa, xb, xc : number + bracket + fa, fb, fc : number + objective function values in bracket + funcalls : number + number of function evaluations """ _gold = 1.618034 _verysmall_num = 1e-21 @@ -1601,45 +1744,54 @@ direc=None): """Minimize a function using modified Powell's method. - Description: + :Parameters: - Uses a modification of Powell's method to find the minimum of a function - of N variables - - Inputs: - func -- the Python function or method to be minimized. - x0 -- the initial guess. - args -- extra arguments for func. + x0 : ndarray + the initial guess. + args -- extra arguments for func callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the - current parameter vector. + current parameter vector direc -- initial direction set - Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) + :Returns: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) - xopt -- minimizer of function + xopt : ndarray + minimizer of function - fopt -- value of function at minimum: fopt = func(xopt) + fopt : number + value of function at minimum: fopt = func(xopt) direc -- current direction set - iter -- number of iterations - funcalls -- number of function calls - warnflag -- Integer warning flag: + iter : number + number of iterations + funcalls : number + number of function calls + warnflag : number + Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' - allvecs -- a list of solutions at each iteration + allvecs : Python list + a list of solutions at each iteration - Additional Inputs: + :OtherParameters: - xtol -- line-search error tolerance. - ftol -- acceptable relative error in func(xopt) for convergence. - maxiter -- the maximum number of iterations to perform. - maxfun -- the maximum number of function evaluations. - full_output -- non-zero if fval and warnflag outputs are desired. - disp -- non-zero to print convergence messages. - retall -- non-zero to return a list of the solution at each iteration + xtol : number + line-search error tolerance. + ftol : number + acceptable relative error in func(xopt) for convergence. + maxiter : number + the maximum number of iterations to perform. + maxfun : number + the maximum number of function evaluations. + full_output : number + non-zero if fval and warnflag outputs are desired. + disp : number + non-zero to print convergence messages. + retall : number + non-zero to return a list of the solution at each iteration - See also: + :SeeAlso: fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers @@ -1657,7 +1809,13 @@ brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding fixed_point -- scalar fixed-point finder + + Notes + + ----------------------- + Uses a modification of Powell's method to find the minimum of a function + of N variables """ # we need to use a mutable object here that we can update in the # wrapper function @@ -1767,28 +1925,30 @@ def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin): """Minimize a function over a given range by brute force. - That is find the minimum of a function evaluated on a grid - given by the tuple ranges. + :Parameters: - Inputs: - - func -- Function to be optimized - ranges -- Tuple where each element is a tuple of parameters + func -- Function to be optimized + ranges : tuple + Tuple where each element is a tuple of parameters or a slice object to be handed to numpy.mgrid - args -- Extra arguments to function. - Ns -- Default number of samples if not given - full_output -- Nonzero to return evaluation grid. + args -- Extra arguments to function. + Ns : number + Default number of samples if not given + full_output : number + Nonzero to return evaluation grid. - Outputs: (x0, fval, {grid, Jout}) + :Returns: (x0, fval, {grid, Jout}) - x0 -- Value of arguments giving minimum over the grird - fval -- Function value at minimum - grid -- tuple with same length as x0 representing the - evaluation grid - Jout -- Function values over grid: Jout = func(*grid) + x0 : ndarray + Value of arguments giving minimum over the grird + fval : number + Function value at minimum + grid : tuple + tuple with same length as x0 representing the evaluation grid + Jout : ndarray -- Function values over grid: Jout = func(*grid) - See also: + :SeeAlso: fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers @@ -1807,6 +1967,11 @@ fixed_point -- scalar fixed-point finder + Notes + + ------------------ + + Find the minimum of a function evaluated on a grid given by the tuple ranges. """ N = len(ranges) if N > 40: From scipy-svn at scipy.org Wed Aug 29 05:56:10 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 29 Aug 2007 04:56:10 -0500 (CDT) Subject: [Scipy-svn] r3278 - trunk/scipy/sandbox/models Message-ID: <20070829095610.ACB4939C075@new.scipy.org> Author: matthew.brett at gmail.com Date: 2007-08-29 04:55:33 -0500 (Wed, 29 Aug 2007) New Revision: 3278 Modified: trunk/scipy/sandbox/models/glm.py trunk/scipy/sandbox/models/model.py trunk/scipy/sandbox/models/regression.py Log: Allow wls weights to be 1 or 2D, use super() for called methods Modified: trunk/scipy/sandbox/models/glm.py =================================================================== --- trunk/scipy/sandbox/models/glm.py 2007-08-29 07:20:42 UTC (rev 3277) +++ trunk/scipy/sandbox/models/glm.py 2007-08-29 09:55:33 UTC (rev 3278) @@ -12,8 +12,7 @@ def __init__(self, design, family=family.Gaussian()): self.family = family - self.weights = 1 - self.initialize(design) + super(model, self).__init__(design, weights=1) def __iter__(self): self.iter = 0 @@ -39,7 +38,7 @@ self.weights = self.family.weights(results.mu) self.initialize(self.design) Z = results.predict + self.family.link.deriv(results.mu) * (Y - results.mu) - newresults = wls_model.fit(self, Z) + newresults = super(model, self).fit(self, Z) newresults.Y = Y newresults.mu = self.family.link.inverse(newresults.predict) self.iter += 1 @@ -70,12 +69,14 @@ if Y is None: Y = self.Y resid = Y - results.mu - return (N.power(resid, 2) / self.family.variance(results.mu)).sum() / results.df_resid - + return ((N.power(resid, 2) / self.family.variance(results.mu)).sum() + / results.df_resid) + def fit(self, Y): self.Y = N.asarray(Y, N.float64) iter(self) - self.results = wls_model.fit(self, self.family.link.initialize(Y)) + self.results = super(model, self).fit( + self.family.link.initialize(Y)) self.results.mu = self.family.link.inverse(self.results.predict) self.scale = self.results.scale = self.estimate_scale() Modified: trunk/scipy/sandbox/models/model.py =================================================================== --- trunk/scipy/sandbox/models/model.py 2007-08-29 07:20:42 UTC (rev 3277) +++ trunk/scipy/sandbox/models/model.py 2007-08-29 09:55:33 UTC (rev 3278) @@ -5,7 +5,7 @@ from scipy.sandbox.models.contrast import ContrastResults from scipy.sandbox.models.utils import recipr -class Model: +class Model(object): """ A (predictive) statistical model. The class Model itself does nothing but lays out the methods expected of any subclass. Modified: trunk/scipy/sandbox/models/regression.py =================================================================== --- trunk/scipy/sandbox/models/regression.py 2007-08-29 07:20:42 UTC (rev 3277) +++ trunk/scipy/sandbox/models/regression.py 2007-08-29 09:55:33 UTC (rev 3278) @@ -22,7 +22,8 @@ import numpy.linalg as L from scipy.linalg import norm, toeplitz -from scipy.sandbox.models.model import likelihood_model, likelihood_model_results +from scipy.sandbox.models.model import likelihood_model, \ + likelihood_model_results from scipy.sandbox.models import utils class ols_model(likelihood_model): @@ -65,7 +66,7 @@ design : TODO TODO """ - likelihood_model.__init__(self) + super(ols_model, self).__init__() self.initialize(design) def initialize(self, design): @@ -89,7 +90,6 @@ """ OLS model whitener does nothing: returns Y. """ - return Y def est_coef(self, Y): @@ -98,7 +98,6 @@ and coefficients, but initialize is not called so no psuedo-inverse is calculated. """ - Z = self.whiten(Y) lfit = regression_results(L.lstsq(self.wdesign, Z)[0], Y) @@ -111,7 +110,6 @@ (whitened) residuals and scale. """ - Z = self.whiten(Y) lfit = regression_results(N.dot(self.calc_beta, Z), Y, @@ -173,7 +171,6 @@ >>> print model.rho [-0.61887622 -0.88137957] """ - def __init__(self, design, rho): if type(rho) is type(1): self.order = rho @@ -185,7 +182,7 @@ if self.rho.shape == (): self.rho.shape = (1,) self.order = self.rho.shape[0] - ols_model.__init__(self, design) + super(ar_model, self).__init__(design) def iterative_fit(self, Y, niter=3): """ @@ -203,7 +200,6 @@ results = self.fit(Y) self.rho, _ = self.yule_walker(Y - results.predict) - def whiten(self, X): """ Whiten a series of columns according to an AR(p) @@ -297,16 +293,23 @@ >>> print results.Fcontrast(N.identity(2)) """ - def __init__(self, design, weights=1): - self.weights = weights - ols_model.__init__(self, design) + weights = N.array(weights) + if weights.shape == (): # scalar + self.weights = weights + else: + design_rows = design.shape[0] + if not(weights.shape[0] == design_rows and + weights.size == design_rows) : + raise ValueError( + 'Weights must be scalar or same length as design') + self.weights = weights.reshape(design_rows) + super(wls_model, self).__init__(design) def whiten(self, X): """ Whitener for WLS model, multiplies by sqrt(self.weights) """ - X = N.asarray(X, N.float64) if X.ndim == 1: @@ -326,7 +329,9 @@ """ def __init__(self, beta, Y, normalized_cov_beta=None, scale=1.): - likelihood_model_results.__init__(self, beta, normalized_cov_beta, scale) + super(regression_results, self).__init__(beta, + normalized_cov_beta, + scale) self.Y = Y def norm_resid(self): @@ -335,7 +340,6 @@ Note: residuals are whitened residuals. """ - if not hasattr(self, 'resid'): raise ValueError, 'need normalized residuals to estimate standard deviation' @@ -364,7 +368,6 @@ if the contrast C is estimable by looking at the rank of vstack([C,D]) and verifying it is the same as the rank of D. """ - if C.ndim == 1: C.shape = (C.shape[0], 1) new = N.vstack([C, D]) From scipy-svn at scipy.org Wed Aug 29 11:08:36 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 29 Aug 2007 10:08:36 -0500 (CDT) Subject: [Scipy-svn] r3279 - trunk/scipy/sandbox/models Message-ID: <20070829150836.6563039C069@new.scipy.org> Author: matthew.brett at gmail.com Date: 2007-08-29 10:08:33 -0500 (Wed, 29 Aug 2007) New Revision: 3279 Modified: trunk/scipy/sandbox/models/model.py Log: Some docstring updates Modified: trunk/scipy/sandbox/models/model.py =================================================================== --- trunk/scipy/sandbox/models/model.py 2007-08-29 09:55:33 UTC (rev 3278) +++ trunk/scipy/sandbox/models/model.py 2007-08-29 15:08:33 UTC (rev 3279) @@ -69,12 +69,27 @@ # return -self.logL(theta) # self.results = optimize.fmin(f, theta) -class likelihood_model_results: +class likelihood_model_results(object): + ''' Class to contain results from likelihood models ''' + def __init__(self, beta, normalized_cov_beta=None, scale=1.): + ''' Set up results structure + beta - parameter estimates from estimated model + normalized_cov_beta - + Normalized (before scaling) covariance of betas + scale - scalar - def __init__(self, beta, normalized_cov_beta=None, scale=1.): + normalized_cov_betas is also known as the hat matrix or H + (Semiparametric regression, Ruppert, Wand, Carroll; CUP 2003) + + The covariance of betas is given by scale times + normalized_cov_beta + + For (some subset of models) scale will typically be the + mean square error from the estimated model (sigma^2) + ''' self.beta = beta self.normalized_cov_beta = normalized_cov_beta - self.scale = 1. + self.scale = scale def t(self, column=None): """ From scipy-svn at scipy.org Wed Aug 29 11:34:50 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 29 Aug 2007 10:34:50 -0500 (CDT) Subject: [Scipy-svn] r3280 - in trunk/scipy/sparse: . tests Message-ID: <20070829153450.0B51A39C069@new.scipy.org> Author: stefan Date: 2007-08-29 10:34:30 -0500 (Wed, 29 Aug 2007) New Revision: 3280 Modified: trunk/scipy/sparse/sparse.py trunk/scipy/sparse/tests/test_sparse.py Log: Fix broken functionality in sparse matrices. lil_matrix: Fix inplace operations. Fix fancy-indexed assignments. Add row iteration. Remove scalar addition. Warn on attempt. sp_matrix: Add generic row iteration. Add generic inplace operators. csc_matrix and csr_matrix: More helpful error messages for fancy indexed assignments. dok_matrix: Allow iteration over rows. Partially addresses ticket #226. Modified: trunk/scipy/sparse/sparse.py =================================================================== --- trunk/scipy/sparse/sparse.py 2007-08-29 15:08:33 UTC (rev 3279) +++ trunk/scipy/sparse/sparse.py 2007-08-29 15:34:30 UTC (rev 3280) @@ -124,6 +124,10 @@ csc = self.tocsc() return csc.astype(t) + def __iter__(self): + for r in xrange(self.shape[0]): + yield self[r,:] + def getmaxprint(self): try: maxprint = self.maxprint @@ -259,6 +263,18 @@ csc = self.tocsc() return -csc + def __iadd__(self, other): + raise NotImplementedError + + def __isub__(self, other): + raise NotImplementedError + + def __imul__(self, other): + raise NotImplementedError + + def __idiv__(self, other): + raise TypeError("No support for matrix division.") + def __getattr__(self, attr): if attr == 'A': return self.toarray() @@ -925,6 +941,10 @@ else: return _cs_matrix.__getattr__(self, attr) + def __iter__(self): + csr = self.tocsr() + for r in xrange(self.shape[0]): + yield csr[r,:] def __add__(self, other): return _cs_matrix.__add__(self, other, csc_plus_csc) @@ -985,6 +1005,9 @@ if isinstance(key, tuple): row = key[0] col = key[1] + if not (isscalarlike(row) and isscalarlike(col)): + raise NotImplementedError("Fancy indexing in assignments not" + "supported for csc matrices.") M, N = self.shape if (row < 0): row = M + row @@ -1330,6 +1353,9 @@ if isinstance(key, tuple): row = key[0] col = key[1] + if not (isscalarlike(row) and isscalarlike(col)): + raise NotImplementedError("Fancy indexing in assignment not " + "supported for csr matrices.") M, N = self.shape if (row < 0): row = M + row @@ -1573,7 +1599,7 @@ # [self.get((element, j), 0) for element in seq] # ** Instead just add the non-zero elements. This uses # ** linear time in the number of non-zeros: - for (ii, jj) in self: + for (ii, jj) in self.keys(): if jj == j and ii >= first and ii <= last: dict.__setitem__(new, (ii-first, 0), \ dict.__getitem__(self, (ii,jj))) @@ -1608,7 +1634,7 @@ # [self.get((i, element), 0) for element in seq] # ** Instead loop over the non-zero elements. This is slower # ** if there are many non-zeros - for (ii, jj) in self: + for (ii, jj) in self.keys(): if ii == i and jj >= first and jj <= last: dict.__setitem__(new, (0, jj-first), \ dict.__getitem__(self, (ii,jj))) @@ -1634,7 +1660,7 @@ if i < 0 or i >= self.shape[0] or j < 0 or j >= self.shape[1]: raise IndexError, "index out of bounds" if isintlike(value) and value == 0: - if key in self: # get rid of it something already there + if key in self.keys(): # get rid of it something already there del self[key] else: # Ensure value is a single element, not a sequence @@ -1745,7 +1771,7 @@ # the two matrices to be summed. Would this be a good idea? new = dok_matrix(self.shape, dtype=self.dtype) new.update(self) - for key in other: + for key in other.keys(): new[key] += other[key] elif isspmatrix(other): csc = self.tocsc() @@ -1785,7 +1811,7 @@ def __neg__(self): new = dok_matrix(self.shape, dtype=self.dtype) - for key in self: + for key in self.keys(): new[key] = -self[key] return new @@ -1851,13 +1877,13 @@ indx = int((columns == 1)) N = len(cols_or_rows) if indx: # columns - for key in self: + for key in self.keys(): num = searchsorted(cols_or_rows, key[1]) if num < N: newkey = (key[0], num) new[newkey] = self[key] else: - for key in self: + for key in self.keys(): num = searchsorted(cols_or_rows, key[0]) if num < N: newkey = (num, key[1]) @@ -1871,7 +1897,7 @@ ext = dok_matrix() indx = int((columns == 1)) if indx: - for key in self: + for key in self.keys(): num = searchsorted(cols_or_rows, key[1]) if cols_or_rows[num] == key[1]: newkey = (key[0], num) @@ -1880,7 +1906,7 @@ newkey = (key[0], key[1]-num) base[newkey] = self[key] else: - for key in self: + for key in self.keys(): num = searchsorted(cols_or_rows, key[0]) if cols_or_rows[num] == key[0]: newkey = (num, key[1]) @@ -1896,7 +1922,7 @@ if other.shape[0] != self.shape[1]: raise ValueError, "dimensions do not match" new = [0] * self.shape[0] - for key in self: + for key in self.keys(): new[int(key[0])] += self[key] * other[int(key[1]), ...] new = array(new) if isinstance(other, matrix): @@ -1913,7 +1939,7 @@ if other.shape[-1] != self.shape[0]: raise ValueError, "dimensions do not match" new = [0] * self.shape[1] - for key in self: + for key in self.keys(): new[int(key[1])] += other[..., int(key[0])] * conj(self[key]) new = array(new) if isinstance(other, matrix): @@ -1959,7 +1985,7 @@ """ Return Compressed Sparse Column format arrays for this matrix """ # Fast sort on columns using the Schwartzian transform - keys = [(k[1], k[0]) for k in self] + keys = [(k[1], k[0]) for k in self.keys()] keys.sort() keys = [(k[1], k[0]) for k in keys] @@ -1986,7 +2012,7 @@ def toarray(self): new = zeros(self.shape, dtype=self.dtype) - for key in self: + for key in self.keys(): ikey0 = int(key[0]) ikey1 = int(key[1]) new[ikey0, ikey1] = self[key] @@ -2220,7 +2246,21 @@ for i in xrange(A.shape[0]): self[i, :] = A[i, :] + def __iadd__(self,other): + self[:,:] = self + other + return self + def __isub__(self,other): + self[:,:] = self - other + return self + + def __imul__(self,other): + if isscalarlike(other): + self[:,:] = self * other + return self + else: + raise TypeError("In-place matrix multiplication not supported.") + # Whenever the dimensions change, empty lists should be created for each # row @@ -2256,8 +2296,6 @@ new.data[0] = self.data[i][:] return new - - def _get1(self, i, j): row = self.rows[i] data = self.data[i] @@ -2473,11 +2511,9 @@ return new def __add__(self, other): - if isscalar(other): - new = self.copy() - new.data = numpy.array([[val+other for val in rowvals] for - rowvals in new.data], dtype=object) - return new + if isscalar(other) and other != 0: + raise ValueError("Refusing to destroy sparsity. " + "Use x.todense() + c instead.") else: return spmatrix.__add__(self, other) Modified: trunk/scipy/sparse/tests/test_sparse.py =================================================================== --- trunk/scipy/sparse/tests/test_sparse.py 2007-08-29 15:08:33 UTC (rev 3279) +++ trunk/scipy/sparse/tests/test_sparse.py 2007-08-29 15:34:30 UTC (rev 3280) @@ -27,7 +27,6 @@ restore_path() class _test_cs: - def setUp(self): self.dat = matrix([[1,0,0,2],[3,0,1,0],[0,2,0,0]],'d') self.datsp = self.spmatrix(self.dat) @@ -623,7 +622,7 @@ for ir in range( asp.shape[0] ): for ic in range( asp.shape[1] ): assert_equal( asp[ir, ic], bsp[ir, ic] ) - + class test_csc(_test_cs, _test_vert_slicing, _test_arith, NumpyTestCase): spmatrix = csc_matrix @@ -698,7 +697,7 @@ A = dok_matrix((3,2)) A[0,1] = -10 A[2,0] = 20 - A += 10 + A = A + 10 B = matrix([[10, 0], [10, 10], [30, 10]]) assert_array_equal(A.todense(), B) @@ -790,8 +789,16 @@ assert_equal(caught,5) -class test_lil(_test_cs, _test_horiz_slicing, NumpyTestCase): +class test_lil(_test_cs, _test_horiz_slicing, NumpyTestCase, + ParametricTestCase): spmatrix = lil_matrix + + B = lil_matrix((4,3)) + B[0,0] = 2 + B[1,2] = 7 + B[2,1] = 3 + B[3,0] = 10 + def check_dot(self): A = matrix(zeros((10,10))) A[0,3] = 10 @@ -828,17 +835,54 @@ """ Tests whether a row of one lil_matrix can be assigned to another. """ - B = lil_matrix((10,10)) - B[0,3] = 10 - B[5,6] = 20 - B[8,3] = 30 - B[3,8] = 40 - B[8,9] = 50 + B = self.B.copy() A = B / 10 - B[0, :] = A[0, :] - assert_array_equal(A[0, :].A, B[0, :].A) - assert_array_equal(A[0, :].A, array([[0, 0, 0, 1, 0, 0, 0, 0, 0, 0.]])) + B[0,:] = A[0,:] + assert_array_equal(A[0,:].A, B[0,:].A) + def tst_inplace_op(self,op,arr,other,result): + cpy = arr + getattr(arr,"__i%s__" % op)(other) + + assert_array_equal(cpy.todense(),arr.todense()) + assert_array_equal(arr.todense(),result) + + def testip_inplace_ops(self): + B = self.B[:3,:3].copy() + B[:,:] = B-B + C = B.todense() + + data = {'add':(B,C+C), + 'sub':(B,zeros(B.shape)), + 'mul':(3,C*3)} + + return [(self.tst_inplace_op,op,B,other,result) + for op,(other,result) in data.iteritems()] + + def check_lil_slice_assignment(self): + B = lil_matrix((4,3)) + B[0,0] = 5 + B[1,2] = 3 + B[2,1] = 7 + + expected = array([[10,0,0], + [0,0,6], + [0,14,0], + [0,0,0]]) + + B[:,:] = B+B + assert_array_equal(B.todense(),expected) + + block = [[1,0],[0,4]] + B[:2,:2] = csc_matrix(array(block)) + assert_array_equal(B.todense()[:2,:2],block) + + def check_lil_iteration(self): + row_data = [[1,2,3],[4,5,6]] + B = lil_matrix(array(row_data)) + for r,row in enumerate(B): + assert_array_equal(row.todense(),array(row_data[r],ndmin=2)) + def check_lil_from_csr(self): """ Tests whether a lil_matrix can be constructed from a csr_matrix. @@ -853,19 +897,6 @@ D = lil_matrix(C) assert_array_equal(C.A, D.A) - def check_scalar_add(self): - a = lil_matrix((3,3)) - a[0,0] = 1 - a[0,1] = 2 - a[1,1] = 3 - a[2,1] = 4 - a[2,2] = 5 - - assert_array_equal((a-5).todense(), - [[-4,-3,0], - [ 0,-2,0], - [ 0,-1,0]]) - def check_point_wise_multiply(self): l = lil_matrix((4,3)) l[0,0] = 1 From scipy-svn at scipy.org Wed Aug 29 11:42:24 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 29 Aug 2007 10:42:24 -0500 (CDT) Subject: [Scipy-svn] r3281 - trunk/scipy/optimize Message-ID: <20070829154224.3DA5739C054@new.scipy.org> Author: dmitrey.kroshko Date: 2007-08-29 10:42:18 -0500 (Wed, 29 Aug 2007) New Revision: 3281 Modified: trunk/scipy/optimize/lbfgsb.py trunk/scipy/optimize/tnc.py Log: updates in docstrings Modified: trunk/scipy/optimize/lbfgsb.py =================================================================== --- trunk/scipy/optimize/lbfgsb.py 2007-08-29 15:34:30 UTC (rev 3280) +++ trunk/scipy/optimize/lbfgsb.py 2007-08-29 15:42:18 UTC (rev 3281) @@ -119,6 +119,7 @@ ACM Transactions on Mathematical Software, Vol 23, Num. 4, pp. 550 - 560. See also: + scikits.openopt, which offers a unified syntax to call this and other solvers fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers Modified: trunk/scipy/optimize/tnc.py =================================================================== --- trunk/scipy/optimize/tnc.py 2007-08-29 15:34:30 UTC (rev 3280) +++ trunk/scipy/optimize/tnc.py 2007-08-29 15:42:18 UTC (rev 3281) @@ -164,6 +164,8 @@ Return code as defined in the RCSTRINGS dict. :SeeAlso: + - scikits.openopt, which offers a unified syntax to call this and other solvers + - fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg : multivariate local optimizers From scipy-svn at scipy.org Wed Aug 29 12:47:09 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 29 Aug 2007 11:47:09 -0500 (CDT) Subject: [Scipy-svn] r3282 - trunk/scipy/sparse Message-ID: <20070829164709.20EE839C054@new.scipy.org> Author: stefan Date: 2007-08-29 11:46:52 -0500 (Wed, 29 Aug 2007) New Revision: 3282 Modified: trunk/scipy/sparse/sparse.py Log: Fix typo. Modified: trunk/scipy/sparse/sparse.py =================================================================== --- trunk/scipy/sparse/sparse.py 2007-08-29 15:42:18 UTC (rev 3281) +++ trunk/scipy/sparse/sparse.py 2007-08-29 16:46:52 UTC (rev 3282) @@ -569,7 +569,7 @@ # Convert this matrix to a dense matrix and add them return self.todense() + other else: - raise NotImplemented + raise NotImplementedError def __sub__(self,other,fn): # First check if argument is a scalar From scipy-svn at scipy.org Wed Aug 29 14:24:15 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 29 Aug 2007 13:24:15 -0500 (CDT) Subject: [Scipy-svn] r3283 - in trunk/scipy: sandbox stats stats/models stats/models/family stats/models/robust stats/models/tests Message-ID: <20070829182415.9181239C122@new.scipy.org> Author: jarrod.millman Date: 2007-08-29 13:24:04 -0500 (Wed, 29 Aug 2007) New Revision: 3283 Added: trunk/scipy/stats/models/ Removed: trunk/scipy/sandbox/models/ Modified: trunk/scipy/stats/models/__init__.py trunk/scipy/stats/models/bspline.py trunk/scipy/stats/models/contrast.py trunk/scipy/stats/models/cox.py trunk/scipy/stats/models/family/__init__.py trunk/scipy/stats/models/family/family.py trunk/scipy/stats/models/formula.py trunk/scipy/stats/models/gam.py trunk/scipy/stats/models/glm.py trunk/scipy/stats/models/mixed.py trunk/scipy/stats/models/model.py trunk/scipy/stats/models/regression.py trunk/scipy/stats/models/rlm.py trunk/scipy/stats/models/robust/__init__.py trunk/scipy/stats/models/setup.py trunk/scipy/stats/models/smoothers.py trunk/scipy/stats/models/tests/test_bspline.py trunk/scipy/stats/models/tests/test_formula.py trunk/scipy/stats/models/tests/test_glm.py trunk/scipy/stats/models/tests/test_regression.py trunk/scipy/stats/models/tests/test_rlm.py trunk/scipy/stats/models/tests/test_utils.py trunk/scipy/stats/models/utils.py Log: moved scipy/sandbox/models to scipy/stats/models Copied: trunk/scipy/stats/models (from rev 3282, trunk/scipy/sandbox/models) Modified: trunk/scipy/stats/models/__init__.py =================================================================== --- trunk/scipy/sandbox/models/__init__.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/__init__.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -4,15 +4,15 @@ __docformat__ = 'restructuredtext' -from scipy.sandbox.models.info import __doc__ +from scipy.stats.models.info import __doc__ -import scipy.sandbox.models.model -import scipy.sandbox.models.formula -import scipy.sandbox.models.regression -import scipy.sandbox.models.robust -import scipy.sandbox.models.family -from scipy.sandbox.models.glm import model as glm -from scipy.sandbox.models.rlm import model as rlm +import scipy.stats.models.model +import scipy.stats.models.formula +import scipy.stats.models.regression +import scipy.stats.models.robust +import scipy.stats.models.family +from scipy.stats.models.glm import model as glm +from scipy.stats.models.rlm import model as rlm __all__ = filter(lambda s:not s.startswith('_'),dir()) Modified: trunk/scipy/stats/models/bspline.py =================================================================== --- trunk/scipy/sandbox/models/bspline.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/bspline.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -20,7 +20,7 @@ from scipy.linalg import solveh_banded from scipy.optimize import golden -from scipy.sandbox.models import _bspline +from scipy.stats.models import _bspline def _band2array(a, lower=0, symmetric=False, hermitian=False): """ Modified: trunk/scipy/stats/models/contrast.py =================================================================== --- trunk/scipy/sandbox/models/contrast.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/contrast.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -1,6 +1,6 @@ import numpy as N from numpy.linalg import pinv -from scipy.sandbox.models import utils +from scipy.stats.models import utils class ContrastResults: """ Modified: trunk/scipy/stats/models/cox.py =================================================================== --- trunk/scipy/sandbox/models/cox.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/cox.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -3,7 +3,7 @@ import numpy as N -from scipy.sandbox.models import survival, model +from scipy.stats.models import survival, model class discrete: @@ -199,7 +199,7 @@ for i in range(2*n): subjects[i].X = X[i] - import scipy.sandbox.models.formula as F + import scipy.stats.models.formula as F x = F.quantitative('X') f = F.formula(x) Modified: trunk/scipy/stats/models/family/__init__.py =================================================================== --- trunk/scipy/sandbox/models/family/__init__.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/family/__init__.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -10,7 +10,7 @@ ''' -from scipy.sandbox.models.family.family import Gaussian, Family, \ +from scipy.stats.models.family.family import Gaussian, Family, \ Poisson, Gamma, InverseGaussian, Binomial Modified: trunk/scipy/stats/models/family/family.py =================================================================== --- trunk/scipy/sandbox/models/family/family.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/family/family.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -1,6 +1,6 @@ import numpy as N -from scipy.sandbox.models.family import links as L -from scipy.sandbox.models.family import varfuncs as V +from scipy.stats.models.family import links as L +from scipy.stats.models.family import varfuncs as V class Family(object): Modified: trunk/scipy/stats/models/formula.py =================================================================== --- trunk/scipy/sandbox/models/formula.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/formula.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -247,7 +247,7 @@ of another term, i.e. to take powers: >>> import numpy as N - >>> from scipy.sandbox.models import formula + >>> from scipy.stats.models import formula >>> X = N.linspace(0,10,101) >>> x = formula.term('X') >>> x.namespace={'X':X} @@ -600,7 +600,7 @@ only term in the formula, then a keywords argument \'nrow\' is needed. ->>> from scipy.sandbox.models.formula import formula, I +>>> from scipy.stats.models.formula import formula, I >>> I() array(1.0) >>> I(nrow=5) Modified: trunk/scipy/stats/models/gam.py =================================================================== --- trunk/scipy/sandbox/models/gam.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/gam.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -5,9 +5,9 @@ import numpy as N -from scipy.sandbox.models import family -from scipy.sandbox.models.bspline import SmoothingSpline -from scipy.sandbox.models.glm import model as glm +from scipy.stats.models import family +from scipy.stats.models.bspline import SmoothingSpline +from scipy.stats.models.glm import model as glm def default_smoother(x): _x = x.copy() Modified: trunk/scipy/stats/models/glm.py =================================================================== --- trunk/scipy/sandbox/models/glm.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/glm.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -3,8 +3,8 @@ -------------------- """ import numpy as N -from scipy.sandbox.models import family -from scipy.sandbox.models.regression import wls_model +from scipy.stats.models import family +from scipy.stats.models.regression import wls_model class model(wls_model): Modified: trunk/scipy/stats/models/mixed.py =================================================================== --- trunk/scipy/sandbox/models/mixed.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/mixed.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -4,7 +4,7 @@ import numpy as N import numpy.linalg as L -from scipy.sandbox.models.formula import formula, I +from scipy.stats.models.formula import formula, I class Unit: """ @@ -311,7 +311,7 @@ n = 3 - from scipy.sandbox.models.formula import term + from scipy.stats.models.formula import term fixed = term('f') random = term('r') response = term('y') Modified: trunk/scipy/stats/models/model.py =================================================================== --- trunk/scipy/sandbox/models/model.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/model.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -2,8 +2,8 @@ from numpy.linalg import inv #from scipy import optimize -from scipy.sandbox.models.contrast import ContrastResults -from scipy.sandbox.models.utils import recipr +from scipy.stats.models.contrast import ContrastResults +from scipy.stats.models.utils import recipr class Model(object): """ Modified: trunk/scipy/stats/models/regression.py =================================================================== --- trunk/scipy/sandbox/models/regression.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/regression.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -22,9 +22,9 @@ import numpy.linalg as L from scipy.linalg import norm, toeplitz -from scipy.sandbox.models.model import likelihood_model, \ +from scipy.stats.models.model import likelihood_model, \ likelihood_model_results -from scipy.sandbox.models import utils +from scipy.stats.models import utils class ols_model(likelihood_model): """ @@ -34,8 +34,8 @@ -------- >>> import numpy as N >>> - >>> from scipy.sandbox.models.formula import term, I - >>> from scipy.sandbox.models.regression import ols_model + >>> from scipy.stats.models.formula import term, I + >>> from scipy.stats.models.regression import ols_model >>> >>> data={'Y':[1,3,4,5,2,3,4], ... 'X':range(1,8)} @@ -136,8 +136,8 @@ >>> import numpy as N >>> import numpy.random as R >>> - >>> from scipy.sandbox.models.formula import term, I - >>> from scipy.sandbox.models.regression import ar_model + >>> from scipy.stats.models.formula import term, I + >>> from scipy.stats.models.regression import ar_model >>> >>> data={'Y':[1,3,4,5,8,10,9], ... 'X':range(1,8)} @@ -273,8 +273,8 @@ >>> import numpy as N >>> - >>> from scipy.sandbox.models.formula import term, I - >>> from scipy.sandbox.models.regression import wls_model + >>> from scipy.stats.models.formula import term, I + >>> from scipy.stats.models.regression import wls_model >>> >>> data={'Y':[1,3,4,5,2,3,4], ... 'X':range(1,8)} Modified: trunk/scipy/stats/models/rlm.py =================================================================== --- trunk/scipy/sandbox/models/rlm.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/rlm.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -3,8 +3,8 @@ """ import numpy as N -from scipy.sandbox.models.regression import wls_model -from scipy.sandbox.models.robust import norms, scale +from scipy.stats.models.regression import wls_model +from scipy.stats.models.robust import norms, scale class model(wls_model): Modified: trunk/scipy/stats/models/robust/__init__.py =================================================================== --- trunk/scipy/sandbox/models/robust/__init__.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/robust/__init__.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -4,5 +4,5 @@ import numpy as N import numpy.linalg as L -from scipy.sandbox.models.robust import norms -from scipy.sandbox.models.robust.scale import MAD +from scipy.stats.models.robust import norms +from scipy.stats.models.robust.scale import MAD Modified: trunk/scipy/stats/models/setup.py =================================================================== --- trunk/scipy/sandbox/models/setup.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/setup.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -10,7 +10,7 @@ try: import sys print sys.path - from scipy.sandbox.models.bspline_module import mod + from scipy.stats.models.bspline_module import mod n, s, d = weave_ext(mod) config.add_extension(n, s, **d) except ImportError: pass @@ -26,4 +26,4 @@ if __name__ == '__main__': from numpy.distutils.core import setup - setup(**configuration(top_path='', package_name='scipy.sandbox.models').todict()) + setup(**configuration(top_path='', package_name='scipy.stats.models').todict()) Modified: trunk/scipy/stats/models/smoothers.py =================================================================== --- trunk/scipy/sandbox/models/smoothers.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/smoothers.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -9,8 +9,8 @@ from scipy.linalg import solveh_banded from scipy.optimize import golden -from scipy.sandbox.models import _bspline -from scipy.sandbox.models.bspline import bspline, _band2array +from scipy.stats.models import _bspline +from scipy.stats.models.bspline import bspline, _band2array class poly_smoother: Modified: trunk/scipy/stats/models/tests/test_bspline.py =================================================================== --- trunk/scipy/sandbox/models/tests/test_bspline.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/tests/test_bspline.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -5,8 +5,8 @@ import numpy as N from numpy.testing import NumpyTest, NumpyTestCase -import scipy.sandbox.models as S -import scipy.sandbox.models.bspline as B +import scipy.stats.models as S +import scipy.stats.models.bspline as B class test_BSpline(NumpyTestCase): Modified: trunk/scipy/stats/models/tests/test_formula.py =================================================================== --- trunk/scipy/sandbox/models/tests/test_formula.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/tests/test_formula.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -9,7 +9,7 @@ import numpy.linalg as L from numpy.testing import assert_almost_equal, NumpyTest, NumpyTestCase -from scipy.sandbox.models import utils, formula, contrast +from scipy.stats.models import utils, formula, contrast class test_term(NumpyTestCase): Modified: trunk/scipy/stats/models/tests/test_glm.py =================================================================== --- trunk/scipy/sandbox/models/tests/test_glm.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/tests/test_glm.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -6,8 +6,8 @@ import numpy.random as R from numpy.testing import NumpyTest, NumpyTestCase -import scipy.sandbox.models as S -import scipy.sandbox.models.glm as models +import scipy.stats.models as S +import scipy.stats.models.glm as models W = R.standard_normal Modified: trunk/scipy/stats/models/tests/test_regression.py =================================================================== --- trunk/scipy/sandbox/models/tests/test_regression.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/tests/test_regression.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -5,7 +5,7 @@ from numpy.random import standard_normal from numpy.testing import NumpyTest, NumpyTestCase -from scipy.sandbox.models.regression import ols_model, ar_model +from scipy.stats.models.regression import ols_model, ar_model W = standard_normal Modified: trunk/scipy/stats/models/tests/test_rlm.py =================================================================== --- trunk/scipy/sandbox/models/tests/test_rlm.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/tests/test_rlm.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -5,7 +5,7 @@ import numpy.random as R from numpy.testing import NumpyTest, NumpyTestCase -import scipy.sandbox.models.rlm as models +import scipy.stats.models.rlm as models W = R.standard_normal Modified: trunk/scipy/stats/models/tests/test_utils.py =================================================================== --- trunk/scipy/sandbox/models/tests/test_utils.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/tests/test_utils.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -6,7 +6,7 @@ import numpy.random as R from numpy.testing import assert_almost_equal, NumpyTest, NumpyTestCase -from scipy.sandbox.models import utils +from scipy.stats.models import utils class test_Utils(NumpyTestCase): Modified: trunk/scipy/stats/models/utils.py =================================================================== --- trunk/scipy/sandbox/models/utils.py 2007-08-29 16:46:52 UTC (rev 3282) +++ trunk/scipy/stats/models/utils.py 2007-08-29 18:24:04 UTC (rev 3283) @@ -82,7 +82,7 @@ Examples -------- >>> from numpy import arange - >>> from scipy.sandbox.models.utils import StepFunction + >>> from scipy.stats.models.utils import StepFunction >>> >>> x = arange(20) >>> y = arange(20) From scipy-svn at scipy.org Wed Aug 29 15:43:32 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 29 Aug 2007 14:43:32 -0500 (CDT) Subject: [Scipy-svn] r3284 - trunk/scipy/stats Message-ID: <20070829194332.68FC839C0FE@new.scipy.org> Author: jarrod.millman Date: 2007-08-29 14:43:28 -0500 (Wed, 29 Aug 2007) New Revision: 3284 Modified: trunk/scipy/stats/setup.py Log: add models subpackages to scipy.stats Modified: trunk/scipy/stats/setup.py =================================================================== --- trunk/scipy/stats/setup.py 2007-08-29 18:24:04 UTC (rev 3283) +++ trunk/scipy/stats/setup.py 2007-08-29 19:43:28 UTC (rev 3284) @@ -6,6 +6,7 @@ from numpy.distutils.misc_util import Configuration config = Configuration('stats', parent_package, top_path) + config.add_subpackage('models') config.add_data_dir('tests') config.add_library('statlib', From scipy-svn at scipy.org Wed Aug 29 15:43:49 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 29 Aug 2007 14:43:49 -0500 (CDT) Subject: [Scipy-svn] r3285 - trunk/scipy/stats/models/family Message-ID: <20070829194349.D853039C0FE@new.scipy.org> Author: jarrod.millman Date: 2007-08-29 14:43:47 -0500 (Wed, 29 Aug 2007) New Revision: 3285 Modified: trunk/scipy/stats/models/family/family.py Log: fix typo Modified: trunk/scipy/stats/models/family/family.py =================================================================== --- trunk/scipy/stats/models/family/family.py 2007-08-29 19:43:28 UTC (rev 3284) +++ trunk/scipy/stats/models/family/family.py 2007-08-29 19:43:47 UTC (rev 3285) @@ -18,11 +18,12 @@ valid = [-N.inf, N.inf] tol = 1.0e-05 + links = [] def _setlink(self, link): self._link = link if hasattr(self, "links"): - if link not in links: + if link not in self.links: raise ValueError, 'invalid link for family, should be in %s' % `self.links` def _getlink(self): From scipy-svn at scipy.org Wed Aug 29 18:35:51 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 29 Aug 2007 17:35:51 -0500 (CDT) Subject: [Scipy-svn] r3286 - trunk Message-ID: <20070829223551.BBCB439C039@new.scipy.org> Author: jarrod.millman Date: 2007-08-29 17:35:47 -0500 (Wed, 29 Aug 2007) New Revision: 3286 Modified: trunk/DEVELOPERS.txt trunk/MANIFEST.in trunk/new_manifest.sh Log: few fixes leftover from switch to scipy/ from Lib/ Modified: trunk/DEVELOPERS.txt =================================================================== --- trunk/DEVELOPERS.txt 2007-08-29 19:43:47 UTC (rev 3285) +++ trunk/DEVELOPERS.txt 2007-08-29 22:35:47 UTC (rev 3286) @@ -44,7 +44,7 @@ Additions to distutils-generated SciPy tar-balls. Its usage is deprecated. - Lib/ + scipy/ Contains SciPy __init__.py and the directories of SciPy modules. @@ -54,7 +54,7 @@ ------------- In the following, a *SciPy module* is defined as a Python package, say -xxx, that is located in the Lib/ directory. All SciPy modules should +xxx, that is located in the scipy/ directory. All SciPy modules should follow the following conventions: * Ideally, each SciPy module should be as self-contained as possible. Modified: trunk/MANIFEST.in =================================================================== --- trunk/MANIFEST.in 2007-08-29 19:43:47 UTC (rev 3285) +++ trunk/MANIFEST.in 2007-08-29 22:35:47 UTC (rev 3286) @@ -5,101 +5,101 @@ # include setup.py include *.txt -include Lib/*.py +include scipy/*.py # # The following files are in the sandbox and wouldn't normally get included in sdist: # -include Lib/sandbox/ann/* -include Lib/sandbox/ann/data/* -include Lib/sandbox/ann/doc/* -include Lib/sandbox/arpack/* -include Lib/sandbox/arpack/ARPACK/* -include Lib/sandbox/arpack/ARPACK/LAPACK/* -include Lib/sandbox/arpack/ARPACK/SRC/* -include Lib/sandbox/arpack/ARPACK/UTIL/* -include Lib/sandbox/arpack/build/* -include Lib/sandbox/arpack/tests/* -include Lib/sandbox/arraysetops/* -include Lib/sandbox/arraysetops/tests/* -include Lib/sandbox/buildgrid/* -include Lib/sandbox/cdavid/* -include Lib/sandbox/cdavid/src/* -include Lib/sandbox/cdavid/tests/* -include Lib/sandbox/constants/* -include Lib/sandbox/cow/* -include Lib/sandbox/delaunay/* -include Lib/sandbox/delaunay/tests/* -include Lib/sandbox/exmplpackage/* -include Lib/sandbox/exmplpackage/tests/* -include Lib/sandbox/exmplpackage/yyy/* -include Lib/sandbox/exmplpackage/yyy/tests/* -include Lib/sandbox/fdfpack/* -include Lib/sandbox/fdfpack/src/* -include Lib/sandbox/fdfpack/tests/* -include Lib/sandbox/fdfpack/utils/* -include Lib/sandbox/ga/* -include Lib/sandbox/gplt/* -include Lib/sandbox/image/* -include Lib/sandbox/maskedarray/* -include Lib/sandbox/maskedarray/tests/* -include Lib/sandbox/models/* -include Lib/sandbox/models/family/* -include Lib/sandbox/models/robust/* -include Lib/sandbox/models/tests/* -include Lib/sandbox/montecarlo/* -include Lib/sandbox/montecarlo/src/* -include Lib/sandbox/montecarlo/tests/* -include Lib/sandbox/netcdf/* -include Lib/sandbox/newoptimize/* -include Lib/sandbox/numexpr/* -include Lib/sandbox/numexpr/tests/* -include Lib/sandbox/oliphant/* -include Lib/sandbox/plt/* -include Lib/sandbox/pyem/* -include Lib/sandbox/pyem/profile_data/* -include Lib/sandbox/pyem/src/* -include Lib/sandbox/pyem/tests/* -include Lib/sandbox/pysparse/* -include Lib/sandbox/pysparse/Tools/* -include Lib/sandbox/pysparse/amd/* -include Lib/sandbox/pysparse/docs/* -include Lib/sandbox/pysparse/examples/* -include Lib/sandbox/pysparse/examples/poisson_test/* -include Lib/sandbox/pysparse/include/* -include Lib/sandbox/pysparse/include/pysparse/* -include Lib/sandbox/pysparse/lib/* -include Lib/sandbox/pysparse/src/* -include Lib/sandbox/pysparse/superlu/* -include Lib/sandbox/pysparse/tests/* -include Lib/sandbox/pysparse/umfpack/* -include Lib/sandbox/rkern/* -include Lib/sandbox/spline/* -include Lib/sandbox/spline/fitpack/* -include Lib/sandbox/spline/tests/* -include Lib/sandbox/stats/* -include Lib/sandbox/svm/* -include Lib/sandbox/svm/libsvm-2.82/* -include Lib/sandbox/svm/tests/* -include Lib/sandbox/timeseries/* -include Lib/sandbox/timeseries/doc/* -include Lib/sandbox/timeseries/examples/* -include Lib/sandbox/timeseries/mtimeseries/* -include Lib/sandbox/timeseries/mtimeseries/tests/* -include Lib/sandbox/timeseries/old/* -include Lib/sandbox/timeseries/plotlib/* -include Lib/sandbox/timeseries/src/* -include Lib/sandbox/umfpack/* -include Lib/sandbox/umfpack/umfpack/* -include Lib/sandbox/wavelet/* -include Lib/sandbox/xplt/* -include Lib/sandbox/xplt/gistdata/* -include Lib/sandbox/xplt/pygist/* -include Lib/sandbox/xplt/src/* -include Lib/sandbox/xplt/src/g/* -include Lib/sandbox/xplt/src/gist/* -include Lib/sandbox/xplt/src/play/* -include Lib/sandbox/xplt/src/play/all/* -include Lib/sandbox/xplt/src/play/mac/* -include Lib/sandbox/xplt/src/play/unix/* -include Lib/sandbox/xplt/src/play/win/* -include Lib/sandbox/xplt/src/play/x11/* +include scipy/sandbox/ann/* +include scipy/sandbox/ann/data/* +include scipy/sandbox/ann/doc/* +include scipy/sandbox/arpack/* +include scipy/sandbox/arpack/ARPACK/* +include scipy/sandbox/arpack/ARPACK/LAPACK/* +include scipy/sandbox/arpack/ARPACK/SRC/* +include scipy/sandbox/arpack/ARPACK/UTIL/* +include scipy/sandbox/arpack/build/* +include scipy/sandbox/arpack/tests/* +include scipy/sandbox/arraysetops/* +include scipy/sandbox/arraysetops/tests/* +include scipy/sandbox/buildgrid/* +include scipy/sandbox/cdavid/* +include scipy/sandbox/cdavid/src/* +include scipy/sandbox/cdavid/tests/* +include scipy/sandbox/constants/* +include scipy/sandbox/cow/* +include scipy/sandbox/delaunay/* +include scipy/sandbox/delaunay/tests/* +include scipy/sandbox/exmplpackage/* +include scipy/sandbox/exmplpackage/tests/* +include scipy/sandbox/exmplpackage/yyy/* +include scipy/sandbox/exmplpackage/yyy/tests/* +include scipy/sandbox/fdfpack/* +include scipy/sandbox/fdfpack/src/* +include scipy/sandbox/fdfpack/tests/* +include scipy/sandbox/fdfpack/utils/* +include scipy/sandbox/ga/* +include scipy/sandbox/gplt/* +include scipy/sandbox/image/* +include scipy/sandbox/maskedarray/* +include scipy/sandbox/maskedarray/tests/* +include scipy/sandbox/models/* +include scipy/sandbox/models/family/* +include scipy/sandbox/models/robust/* +include scipy/sandbox/models/tests/* +include scipy/sandbox/montecarlo/* +include scipy/sandbox/montecarlo/src/* +include scipy/sandbox/montecarlo/tests/* +include scipy/sandbox/netcdf/* +include scipy/sandbox/newoptimize/* +include scipy/sandbox/numexpr/* +include scipy/sandbox/numexpr/tests/* +include scipy/sandbox/oliphant/* +include scipy/sandbox/plt/* +include scipy/sandbox/pyem/* +include scipy/sandbox/pyem/profile_data/* +include scipy/sandbox/pyem/src/* +include scipy/sandbox/pyem/tests/* +include scipy/sandbox/pysparse/* +include scipy/sandbox/pysparse/Tools/* +include scipy/sandbox/pysparse/amd/* +include scipy/sandbox/pysparse/docs/* +include scipy/sandbox/pysparse/examples/* +include scipy/sandbox/pysparse/examples/poisson_test/* +include scipy/sandbox/pysparse/include/* +include scipy/sandbox/pysparse/include/pysparse/* +include scipy/sandbox/pysparse/lib/* +include scipy/sandbox/pysparse/src/* +include scipy/sandbox/pysparse/superlu/* +include scipy/sandbox/pysparse/tests/* +include scipy/sandbox/pysparse/umfpack/* +include scipy/sandbox/rkern/* +include scipy/sandbox/spline/* +include scipy/sandbox/spline/fitpack/* +include scipy/sandbox/spline/tests/* +include scipy/sandbox/stats/* +include scipy/sandbox/svm/* +include scipy/sandbox/svm/libsvm-2.82/* +include scipy/sandbox/svm/tests/* +include scipy/sandbox/timeseries/* +include scipy/sandbox/timeseries/doc/* +include scipy/sandbox/timeseries/examples/* +include scipy/sandbox/timeseries/mtimeseries/* +include scipy/sandbox/timeseries/mtimeseries/tests/* +include scipy/sandbox/timeseries/old/* +include scipy/sandbox/timeseries/plotlib/* +include scipy/sandbox/timeseries/src/* +include scipy/sandbox/umfpack/* +include scipy/sandbox/umfpack/umfpack/* +include scipy/sandbox/wavelet/* +include scipy/sandbox/xplt/* +include scipy/sandbox/xplt/gistdata/* +include scipy/sandbox/xplt/pygist/* +include scipy/sandbox/xplt/src/* +include scipy/sandbox/xplt/src/g/* +include scipy/sandbox/xplt/src/gist/* +include scipy/sandbox/xplt/src/play/* +include scipy/sandbox/xplt/src/play/all/* +include scipy/sandbox/xplt/src/play/mac/* +include scipy/sandbox/xplt/src/play/unix/* +include scipy/sandbox/xplt/src/play/win/* +include scipy/sandbox/xplt/src/play/x11/* Modified: trunk/new_manifest.sh =================================================================== --- trunk/new_manifest.sh 2007-08-29 19:43:47 UTC (rev 3285) +++ trunk/new_manifest.sh 2007-08-29 22:35:47 UTC (rev 3286) @@ -6,7 +6,7 @@ # would otherwise leave these files out. MANIFEST_IN=MANIFEST.in -SANDBOX_DIR="Lib/sandbox" +SANDBOX_DIR="scipy/sandbox" cat < $MANIFEST_IN # This automatically generated by new_manifest.sh @@ -16,7 +16,7 @@ # include setup.py include *.txt -include Lib/*.py +include scipy/*.py # # The following files are in the sandbox and wouldn't normally get included in sdist: # From scipy-svn at scipy.org Wed Aug 29 20:24:04 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 29 Aug 2007 19:24:04 -0500 (CDT) Subject: [Scipy-svn] r3287 - trunk Message-ID: <20070830002404.6B4B839C0D6@new.scipy.org> Author: jarrod.millman Date: 2007-08-29 19:24:02 -0500 (Wed, 29 Aug 2007) New Revision: 3287 Modified: trunk/MANIFEST.in Log: remove old references to scipy.sandbox.models Modified: trunk/MANIFEST.in =================================================================== --- trunk/MANIFEST.in 2007-08-29 22:35:47 UTC (rev 3286) +++ trunk/MANIFEST.in 2007-08-30 00:24:02 UTC (rev 3287) @@ -42,10 +42,6 @@ include scipy/sandbox/image/* include scipy/sandbox/maskedarray/* include scipy/sandbox/maskedarray/tests/* -include scipy/sandbox/models/* -include scipy/sandbox/models/family/* -include scipy/sandbox/models/robust/* -include scipy/sandbox/models/tests/* include scipy/sandbox/montecarlo/* include scipy/sandbox/montecarlo/src/* include scipy/sandbox/montecarlo/tests/* From scipy-svn at scipy.org Wed Aug 29 20:37:38 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 29 Aug 2007 19:37:38 -0500 (CDT) Subject: [Scipy-svn] r3288 - trunk Message-ID: <20070830003738.53E7A39C069@new.scipy.org> Author: jarrod.millman Date: 2007-08-29 19:37:32 -0500 (Wed, 29 Aug 2007) New Revision: 3288 Modified: trunk/TOCHANGE.txt Log: removed some old text Modified: trunk/TOCHANGE.txt =================================================================== --- trunk/TOCHANGE.txt 2007-08-30 00:24:02 UTC (rev 3287) +++ trunk/TOCHANGE.txt 2007-08-30 00:37:32 UTC (rev 3288) @@ -1,14 +1,3 @@ -Changes needed to convert SciPy to work with the new NumPy. - -done * scipy_distutils and scipy_base to scipy.disutils and scipy.base -done * Numeric/arrayobject.h to scipy/arrayobject.h -done * Numeric/ufuncobject.h to scipy/ufuncobject.h -done * scipy_test to scipy.test -done * Look for use of descr->zero and descr->ones --- need to be replaced -done * Change use of default_config_dict to Configuration - - - Changes that should be made someday: * io rewritten to use internal writing capabilities of arrays From scipy-svn at scipy.org Wed Aug 29 21:55:33 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 29 Aug 2007 20:55:33 -0500 (CDT) Subject: [Scipy-svn] r3289 - trunk/scipy Message-ID: <20070830015533.110F539C103@new.scipy.org> Author: jarrod.millman Date: 2007-08-29 20:55:30 -0500 (Wed, 29 Aug 2007) New Revision: 3289 Modified: trunk/scipy/version.py Log: next release is 0.6.0 not 0.6 Modified: trunk/scipy/version.py =================================================================== --- trunk/scipy/version.py 2007-08-30 00:37:32 UTC (rev 3288) +++ trunk/scipy/version.py 2007-08-30 01:55:30 UTC (rev 3289) @@ -1,4 +1,4 @@ -version = '0.6' +version = '0.6.0' release=False if not release: From scipy-svn at scipy.org Wed Aug 29 21:56:12 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 29 Aug 2007 20:56:12 -0500 (CDT) Subject: [Scipy-svn] r3290 - branches Message-ID: <20070830015612.249CE39C103@new.scipy.org> Author: jarrod.millman Date: 2007-08-29 20:56:07 -0500 (Wed, 29 Aug 2007) New Revision: 3290 Added: branches/0.6.x/ Log: Create a release branch. Copied: branches/0.6.x (from rev 3289, trunk) From scipy-svn at scipy.org Wed Aug 29 21:57:03 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 29 Aug 2007 20:57:03 -0500 (CDT) Subject: [Scipy-svn] r3291 - trunk/scipy Message-ID: <20070830015703.6EF3C39C103@new.scipy.org> Author: jarrod.millman Date: 2007-08-29 20:57:01 -0500 (Wed, 29 Aug 2007) New Revision: 3291 Modified: trunk/scipy/version.py Log: update trunk for next release (0.7.0) Modified: trunk/scipy/version.py =================================================================== --- trunk/scipy/version.py 2007-08-30 01:56:07 UTC (rev 3290) +++ trunk/scipy/version.py 2007-08-30 01:57:01 UTC (rev 3291) @@ -1,4 +1,4 @@ -version = '0.6.0' +version = '0.7.0' release=False if not release: From scipy-svn at scipy.org Thu Aug 30 04:14:43 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 30 Aug 2007 03:14:43 -0500 (CDT) Subject: [Scipy-svn] r3292 - in branches/0.6.x/scipy: misc/tests sparse/tests Message-ID: <20070830081443.CA57539C146@new.scipy.org> Author: stefan Date: 2007-08-30 03:13:39 -0500 (Thu, 30 Aug 2007) New Revision: 3292 Modified: branches/0.6.x/scipy/misc/tests/test_pilutil.py branches/0.6.x/scipy/sparse/tests/test_sparse.py Log: Change parametric tests to normal tests. Modified: branches/0.6.x/scipy/misc/tests/test_pilutil.py =================================================================== --- branches/0.6.x/scipy/misc/tests/test_pilutil.py 2007-08-30 01:57:01 UTC (rev 3291) +++ branches/0.6.x/scipy/misc/tests/test_pilutil.py 2007-08-30 08:13:39 UTC (rev 3292) @@ -8,9 +8,9 @@ import os.path import numpy as N -datapath = os.path.dirname(__file__) +datapath = os.path.join(os.path.dirname(__file__),'data') -class test_pilutil(ParametricTestCase): +class test_pilutil(NumpyTestCase): def test_imresize(self): im = N.random.random((10,20)) for T in N.sctypes['float'] + [float]: @@ -23,19 +23,16 @@ assert_equal(pilutil.bytescale(x),x) assert_equal(pilutil.bytescale(y),[0,127,255]) - def tst_fromimage(self,filename,irange): - img = pilutil.fromimage(PIL.Image.open(filename)) - imin,imax = irange - assert img.min() >= imin - assert img.max() <= imax - - def testip_fromimage(self): + def test_fromimage(self): data = {'icon.png':(0,255), 'icon_mono.png':(0,2), 'icon_mono_flat.png':(0,1)} - return ((self.tst_fromimage,os.path.join(datapath,'data',fn),irange) - for fn,irange in data.iteritems()) + for fn,irange in data.iteritems(): + img = pilutil.fromimage(PIL.Image.open(os.path.join(datapath,fn))) + imin,imax = irange + assert img.min() >= imin + assert img.max() <= imax if __name__ == "__main__": NumpyTest().run() Modified: branches/0.6.x/scipy/sparse/tests/test_sparse.py =================================================================== --- branches/0.6.x/scipy/sparse/tests/test_sparse.py 2007-08-30 01:57:01 UTC (rev 3291) +++ branches/0.6.x/scipy/sparse/tests/test_sparse.py 2007-08-30 08:13:39 UTC (rev 3292) @@ -789,8 +789,7 @@ assert_equal(caught,5) -class test_lil(_test_cs, _test_horiz_slicing, NumpyTestCase, - ParametricTestCase): +class test_lil(_test_cs, _test_horiz_slicing, NumpyTestCase): spmatrix = lil_matrix B = lil_matrix((4,3)) @@ -840,14 +839,7 @@ B[0,:] = A[0,:] assert_array_equal(A[0,:].A, B[0,:].A) - def tst_inplace_op(self,op,arr,other,result): - cpy = arr - getattr(arr,"__i%s__" % op)(other) - - assert_array_equal(cpy.todense(),arr.todense()) - assert_array_equal(arr.todense(),result) - - def testip_inplace_ops(self): + def test_inplace_ops(self): B = self.B[:3,:3].copy() B[:,:] = B-B C = B.todense() @@ -856,9 +848,14 @@ 'sub':(B,zeros(B.shape)), 'mul':(3,C*3)} - return [(self.tst_inplace_op,op,B,other,result) - for op,(other,result) in data.iteritems()] + for op,(other,result) in data.iteritems(): + arr = B.copy() + cpy = arr + getattr(arr,"__i%s__" % op)(other) + assert_array_equal(cpy.todense(),arr.todense()) + assert_array_equal(arr.todense(),result) + def check_lil_slice_assignment(self): B = lil_matrix((4,3)) B[0,0] = 5 From scipy-svn at scipy.org Thu Aug 30 04:18:12 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 30 Aug 2007 03:18:12 -0500 (CDT) Subject: [Scipy-svn] r3293 - trunk/scipy/stats/models Message-ID: <20070830081812.264DA39C14D@new.scipy.org> Author: stefan Date: 2007-08-30 03:17:46 -0500 (Thu, 30 Aug 2007) New Revision: 3293 Modified: trunk/scipy/stats/models/setup.py Log: Don't print sys.path on setup. Modified: trunk/scipy/stats/models/setup.py =================================================================== --- trunk/scipy/stats/models/setup.py 2007-08-30 08:13:39 UTC (rev 3292) +++ trunk/scipy/stats/models/setup.py 2007-08-30 08:17:46 UTC (rev 3293) @@ -9,7 +9,6 @@ try: import sys - print sys.path from scipy.stats.models.bspline_module import mod n, s, d = weave_ext(mod) config.add_extension(n, s, **d) From scipy-svn at scipy.org Thu Aug 30 04:18:43 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 30 Aug 2007 03:18:43 -0500 (CDT) Subject: [Scipy-svn] r3294 - branches/0.6.x/scipy/stats/models Message-ID: <20070830081843.DFDF039C15C@new.scipy.org> Author: stefan Date: 2007-08-30 03:18:22 -0500 (Thu, 30 Aug 2007) New Revision: 3294 Modified: branches/0.6.x/scipy/stats/models/setup.py Log: Don't print system path on setup. Modified: branches/0.6.x/scipy/stats/models/setup.py =================================================================== --- branches/0.6.x/scipy/stats/models/setup.py 2007-08-30 08:17:46 UTC (rev 3293) +++ branches/0.6.x/scipy/stats/models/setup.py 2007-08-30 08:18:22 UTC (rev 3294) @@ -9,7 +9,6 @@ try: import sys - print sys.path from scipy.stats.models.bspline_module import mod n, s, d = weave_ext(mod) config.add_extension(n, s, **d) From scipy-svn at scipy.org Thu Aug 30 15:54:04 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 30 Aug 2007 14:54:04 -0500 (CDT) Subject: [Scipy-svn] r3295 - in trunk/scipy/linalg: . src tests Message-ID: <20070830195404.6BCAC39C167@new.scipy.org> Author: chris.burns Date: 2007-08-30 14:53:17 -0500 (Thu, 30 Aug 2007) New Revision: 3295 Added: trunk/scipy/linalg/src/fblaswrap_veclib_c.c Modified: trunk/scipy/linalg/setup.py trunk/scipy/linalg/tests/test_blas.py Log: Applied veclib.patch from cdavid regarding ticket #238. This fixes the check_dot fail in linalg version of test_fblas1_simple but not the lib version. Modified: trunk/scipy/linalg/setup.py =================================================================== --- trunk/scipy/linalg/setup.py 2007-08-30 08:18:22 UTC (rev 3294) +++ trunk/scipy/linalg/setup.py 2007-08-30 19:53:17 UTC (rev 3295) @@ -23,6 +23,23 @@ #-------------------- +def needs_cblas_wrapper(info): + """Returns true if needs c wrapper around cblas for calling from + fortran.""" + import re + r_accel = re.compile("Accelerate") + r_vec = re.compile("vecLib") + res = False + try: + tmpstr = info['extra_link_args'] + for i in tmpstr: + if r_accel.search(i) or r_vec.search(i): + res = True + except KeyError: + pass + + return res + def configuration(parent_package='',top_path=None): from numpy.distutils.system_info import get_info, NotFoundError @@ -98,16 +115,28 @@ return target # fblas: - config.add_extension('fblas', - sources = [generate_pyf, - join('src','fblaswrap.f')], - depends = ['generic_fblas.pyf', - 'generic_fblas1.pyf', - 'generic_fblas2.pyf', - 'generic_fblas3.pyf', - 'interface_gen.py'], - extra_info = lapack_opt - ) + if needs_cblas_wrapper(lapack_opt): + config.add_extension('fblas', + sources = [generate_pyf, + join('src','fblaswrap_veclib_c.c')], + depends = ['generic_fblas.pyf', + 'generic_fblas1.pyf', + 'generic_fblas2.pyf', + 'generic_fblas3.pyf', + 'interface_gen.py'], + extra_info = lapack_opt + ) + else: + config.add_extension('fblas', + sources = [generate_pyf, + join('src','fblaswrap.f')], + depends = ['generic_fblas.pyf', + 'generic_fblas1.pyf', + 'generic_fblas2.pyf', + 'generic_fblas3.pyf', + 'interface_gen.py'], + extra_info = lapack_opt + ) # cblas: config.add_extension('cblas', Added: trunk/scipy/linalg/src/fblaswrap_veclib_c.c =================================================================== --- trunk/scipy/linalg/src/fblaswrap_veclib_c.c 2007-08-30 08:18:22 UTC (rev 3294) +++ trunk/scipy/linalg/src/fblaswrap_veclib_c.c 2007-08-30 19:53:17 UTC (rev 3295) @@ -0,0 +1,22 @@ +#include + +//#define WRAP_F77(a) wcblas_##a##_ +#define WRAP_F77(a) w##a##_ +void WRAP_F77(cdotc)(complex *dotc, const int *N, const complex *X, const int *incX, const complex *Y, const int *incY) +{ + cblas_cdotc_sub(*N, X, *incX, Y, *incY, dotc); +} + +void WRAP_F77(cdotu)(complex* dotu, const int *N, const complex *X, const int *incX, const complex *Y, const int *incY) +{ + cblas_cdotu_sub(*N, X, *incX, Y, *incY, dotu); +} + +void WRAP_F77(zdotc)(double complex *dotu, const int *N, const double complex *X, const int *incX, const double complex *Y, const int *incY) +{ + cblas_zdotc_sub(*N, X, *incX, Y, *incY, dotu); +} +void WRAP_F77(zdotu)(double complex *dotu, const int *N, const double complex *X, const int *incX, const double complex *Y, const int *incY) +{ + cblas_zdotu_sub(*N, X, *incX, Y, *incY, dotu); +} Property changes on: trunk/scipy/linalg/src/fblaswrap_veclib_c.c ___________________________________________________________________ Name: svn:keywords + Id Name: svn:eol-style + native Modified: trunk/scipy/linalg/tests/test_blas.py =================================================================== --- trunk/scipy/linalg/tests/test_blas.py 2007-08-30 08:18:22 UTC (rev 3294) +++ trunk/scipy/linalg/tests/test_blas.py 2007-08-30 19:53:17 UTC (rev 3295) @@ -20,6 +20,7 @@ from numpy.testing import * set_package_path() from linalg import fblas +print fblas from linalg import cblas restore_path() @@ -69,12 +70,18 @@ f = getattr(fblas,p+'dot',None) if f is None: continue assert_almost_equal(f([3,-4,5],[2,5,1]),-9) + def check_complex_dotu(self): for p in 'cz': f = getattr(fblas,p+'dotu',None) if f is None: continue assert_almost_equal(f([3j,-4,3-4j],[2,3,1]),-9+2j) - f = getattr(fblas,p+'dotc') + + def check_complex_dotc(self): + for p in 'cz': + f = getattr(fblas,p+'dotc',None) + if f is None: continue assert_almost_equal(f([3j,-4,3-4j],[2,3j,1]),3-14j) + def check_nrm2(self): for p in 'sd': f = getattr(fblas,p+'nrm2',None) From scipy-svn at scipy.org Thu Aug 30 17:16:00 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 30 Aug 2007 16:16:00 -0500 (CDT) Subject: [Scipy-svn] r3296 - trunk/scipy/interpolate Message-ID: <20070830211600.1653F39C167@new.scipy.org> Author: oliphant Date: 2007-08-30 16:15:48 -0500 (Thu, 30 Aug 2007) New Revision: 3296 Modified: trunk/scipy/interpolate/interpolate.py Log: Allow zero to be called 'nearest' Modified: trunk/scipy/interpolate/interpolate.py =================================================================== --- trunk/scipy/interpolate/interpolate.py 2007-08-30 19:53:17 UTC (rev 3295) +++ trunk/scipy/interpolate/interpolate.py 2007-08-30 21:15:48 UTC (rev 3296) @@ -171,9 +171,10 @@ y : array An N-D array of real values. y's length along the interpolation axis must be equal to the length of x. - kind : str - Specifies the kind of interpolation. At the moment, - only 'linear' and 'cubic' are implemented for now. + kind : str or int + Specifies the kind of interpolation as a string ('linear', + 'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an integer + specifying the order of the spline interpolator to use. axis : int Specifies the axis of y along which to interpolate. Interpolation defaults to the last axis of y. @@ -197,8 +198,9 @@ self.bounds_error = bounds_error self.fill_value = fill_value - if kind in ['zero', 'slinear', 'quadratic', 'cubic']: - order = {'zero':0,'slinear':1,'quadratic':2, 'cubic':3}[kind] + if kind in ['zero', 'slinear', 'quadratic', 'cubic', 'nearest']: + order = {'nearest':0, 'zero':0,'slinear':1, + 'quadratic':2, 'cubic':3}[kind] kind = 'spline' elif isinstance(kind, int): order = kind From scipy-svn at scipy.org Thu Aug 30 19:19:31 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 30 Aug 2007 18:19:31 -0500 (CDT) Subject: [Scipy-svn] r3297 - in branches/0.6.x/scipy/linalg: . src tests Message-ID: <20070830231931.8F02439C107@new.scipy.org> Author: chris.burns Date: 2007-08-30 18:19:19 -0500 (Thu, 30 Aug 2007) New Revision: 3297 Added: branches/0.6.x/scipy/linalg/src/fblaswrap_veclib_c.c Modified: branches/0.6.x/scipy/linalg/setup.py branches/0.6.x/scipy/linalg/tests/test_blas.py Log: Applied veclib.patch from cdavid re ticket238 Modified: branches/0.6.x/scipy/linalg/setup.py =================================================================== --- branches/0.6.x/scipy/linalg/setup.py 2007-08-30 21:15:48 UTC (rev 3296) +++ branches/0.6.x/scipy/linalg/setup.py 2007-08-30 23:19:19 UTC (rev 3297) @@ -23,6 +23,23 @@ #-------------------- +def needs_cblas_wrapper(info): + """Returns true if needs c wrapper around cblas for calling from + fortran.""" + import re + r_accel = re.compile("Accelerate") + r_vec = re.compile("vecLib") + res = False + try: + tmpstr = info['extra_link_args'] + for i in tmpstr: + if r_accel.search(i) or r_vec.search(i): + res = True + except KeyError: + pass + + return res + def configuration(parent_package='',top_path=None): from numpy.distutils.system_info import get_info, NotFoundError @@ -98,16 +115,28 @@ return target # fblas: - config.add_extension('fblas', - sources = [generate_pyf, - join('src','fblaswrap.f')], - depends = ['generic_fblas.pyf', - 'generic_fblas1.pyf', - 'generic_fblas2.pyf', - 'generic_fblas3.pyf', - 'interface_gen.py'], - extra_info = lapack_opt - ) + if needs_cblas_wrapper(lapack_opt): + config.add_extension('fblas', + sources = [generate_pyf, + join('src','fblaswrap_veclib_c.c')], + depends = ['generic_fblas.pyf', + 'generic_fblas1.pyf', + 'generic_fblas2.pyf', + 'generic_fblas3.pyf', + 'interface_gen.py'], + extra_info = lapack_opt + ) + else: + config.add_extension('fblas', + sources = [generate_pyf, + join('src','fblaswrap.f')], + depends = ['generic_fblas.pyf', + 'generic_fblas1.pyf', + 'generic_fblas2.pyf', + 'generic_fblas3.pyf', + 'interface_gen.py'], + extra_info = lapack_opt + ) # cblas: config.add_extension('cblas', Added: branches/0.6.x/scipy/linalg/src/fblaswrap_veclib_c.c =================================================================== --- branches/0.6.x/scipy/linalg/src/fblaswrap_veclib_c.c 2007-08-30 21:15:48 UTC (rev 3296) +++ branches/0.6.x/scipy/linalg/src/fblaswrap_veclib_c.c 2007-08-30 23:19:19 UTC (rev 3297) @@ -0,0 +1,22 @@ +#include + +//#define WRAP_F77(a) wcblas_##a##_ +#define WRAP_F77(a) w##a##_ +void WRAP_F77(cdotc)(complex *dotc, const int *N, const complex *X, const int *incX, const complex *Y, const int *incY) +{ + cblas_cdotc_sub(*N, X, *incX, Y, *incY, dotc); +} + +void WRAP_F77(cdotu)(complex* dotu, const int *N, const complex *X, const int *incX, const complex *Y, const int *incY) +{ + cblas_cdotu_sub(*N, X, *incX, Y, *incY, dotu); +} + +void WRAP_F77(zdotc)(double complex *dotu, const int *N, const double complex *X, const int *incX, const double complex *Y, const int *incY) +{ + cblas_zdotc_sub(*N, X, *incX, Y, *incY, dotu); +} +void WRAP_F77(zdotu)(double complex *dotu, const int *N, const double complex *X, const int *incX, const double complex *Y, const int *incY) +{ + cblas_zdotu_sub(*N, X, *incX, Y, *incY, dotu); +} Property changes on: branches/0.6.x/scipy/linalg/src/fblaswrap_veclib_c.c ___________________________________________________________________ Name: svn:keywords + Id Name: svn:eol-style + native Modified: branches/0.6.x/scipy/linalg/tests/test_blas.py =================================================================== --- branches/0.6.x/scipy/linalg/tests/test_blas.py 2007-08-30 21:15:48 UTC (rev 3296) +++ branches/0.6.x/scipy/linalg/tests/test_blas.py 2007-08-30 23:19:19 UTC (rev 3297) @@ -20,6 +20,7 @@ from numpy.testing import * set_package_path() from linalg import fblas +print fblas from linalg import cblas restore_path() @@ -69,12 +70,18 @@ f = getattr(fblas,p+'dot',None) if f is None: continue assert_almost_equal(f([3,-4,5],[2,5,1]),-9) + def check_complex_dotu(self): for p in 'cz': f = getattr(fblas,p+'dotu',None) if f is None: continue assert_almost_equal(f([3j,-4,3-4j],[2,3,1]),-9+2j) - f = getattr(fblas,p+'dotc') + + def check_complex_dotc(self): + for p in 'cz': + f = getattr(fblas,p+'dotc',None) + if f is None: continue assert_almost_equal(f([3j,-4,3-4j],[2,3j,1]),3-14j) + def check_nrm2(self): for p in 'sd': f = getattr(fblas,p+'nrm2',None)