[Scipy-svn] r4511 - in branches/refactor_fft: . scipy scipy/cluster scipy/cluster/src scipy/cluster/tests scipy/fftpack scipy/integrate scipy/interpolate scipy/interpolate/tests scipy/io scipy/io/matlab scipy/io/matlab/tests scipy/io/tests scipy/lib/blas scipy/lib/lapack scipy/linalg scipy/ndimage scipy/ndimage/src/register scipy/ndimage/tests scipy/odr scipy/optimize scipy/sandbox scipy/sandbox/mkufunc scipy/sandbox/mkufunc/examples scipy/sandbox/mkufunc/mkufunc scipy/signal scipy/sparse scipy/sparse/linalg/dsolve scipy/sparse/linalg/dsolve/umfpack scipy/sparse/linalg/eigen/arpack scipy/sparse/linalg/eigen/lobpcg scipy/sparse/linalg/eigen/lobpcg/tests scipy/sparse/linalg/isolve scipy/sparse/sparsetools scipy/sparse/tests scipy/special scipy/special/tests scipy/stats scipy/stats/models scipy/stats/models/tests scipy/stats/tests scipy/stsci/convolve scipy/stsci/image scipy/testing scipy/weave/tests

scipy-svn at scipy.org scipy-svn at scipy.org
Tue Jul 1 00:55:37 EDT 2008


Author: cdavid
Date: 2008-06-30 23:52:00 -0500 (Mon, 30 Jun 2008)
New Revision: 4511

Added:
   branches/refactor_fft/scipy/cluster/SConscript
   branches/refactor_fft/scipy/cluster/SConstruct
   branches/refactor_fft/scipy/cluster/distance.py
   branches/refactor_fft/scipy/cluster/src/common.h
   branches/refactor_fft/scipy/cluster/src/distance.c
   branches/refactor_fft/scipy/cluster/src/distance.h
   branches/refactor_fft/scipy/cluster/src/distance_wrap.c
   branches/refactor_fft/scipy/cluster/tests/inconsistent-complete-tdist-depth-1.txt
   branches/refactor_fft/scipy/cluster/tests/inconsistent-complete-tdist-depth-2.txt
   branches/refactor_fft/scipy/cluster/tests/inconsistent-complete-tdist-depth-3.txt
   branches/refactor_fft/scipy/cluster/tests/inconsistent-complete-tdist-depth-4.txt
   branches/refactor_fft/scipy/cluster/tests/inconsistent-single-tdist-depth-0.txt
   branches/refactor_fft/scipy/cluster/tests/inconsistent-single-tdist-depth-1.txt
   branches/refactor_fft/scipy/cluster/tests/inconsistent-single-tdist-depth-2.txt
   branches/refactor_fft/scipy/cluster/tests/inconsistent-single-tdist-depth-3.txt
   branches/refactor_fft/scipy/cluster/tests/inconsistent-single-tdist-depth-4.txt
   branches/refactor_fft/scipy/cluster/tests/inconsistent-single-tdist-depth-5.txt
   branches/refactor_fft/scipy/cluster/tests/inconsistent-single-tdist.txt
   branches/refactor_fft/scipy/cluster/tests/inconsistent-weighted-tdist-depth-1.txt
   branches/refactor_fft/scipy/cluster/tests/inconsistent-weighted-tdist-depth-2.txt
   branches/refactor_fft/scipy/cluster/tests/inconsistent-weighted-tdist-depth-3.txt
   branches/refactor_fft/scipy/cluster/tests/inconsistent-weighted-tdist-depth-4.txt
   branches/refactor_fft/scipy/cluster/tests/linkage-X.txt
   branches/refactor_fft/scipy/cluster/tests/linkage-average-tdist.txt
   branches/refactor_fft/scipy/cluster/tests/linkage-complete-tdist.txt
   branches/refactor_fft/scipy/cluster/tests/linkage-single-tdist.txt
   branches/refactor_fft/scipy/cluster/tests/linkage-weighted-tdist.txt
   branches/refactor_fft/scipy/cluster/tests/random-bool-data.txt
   branches/refactor_fft/scipy/fftpack/SConscript
   branches/refactor_fft/scipy/fftpack/SConstruct
   branches/refactor_fft/scipy/integrate/SConscript
   branches/refactor_fft/scipy/integrate/SConstruct
   branches/refactor_fft/scipy/interpolate/SConscript
   branches/refactor_fft/scipy/interpolate/SConstruct
   branches/refactor_fft/scipy/io/SConscript
   branches/refactor_fft/scipy/io/SConstruct
   branches/refactor_fft/scipy/lib/blas/SConscript
   branches/refactor_fft/scipy/lib/blas/SConstruct
   branches/refactor_fft/scipy/lib/lapack/SConscript
   branches/refactor_fft/scipy/lib/lapack/SConstruct
   branches/refactor_fft/scipy/linalg/SConscript
   branches/refactor_fft/scipy/linalg/SConstruct
   branches/refactor_fft/scipy/ndimage/SConscript
   branches/refactor_fft/scipy/ndimage/SConstruct
   branches/refactor_fft/scipy/ndimage/tests/test_registration.py
   branches/refactor_fft/scipy/ndimage/tests/test_regression.py
   branches/refactor_fft/scipy/odr/SConscript
   branches/refactor_fft/scipy/odr/SConstruct
   branches/refactor_fft/scipy/optimize/SConscript
   branches/refactor_fft/scipy/optimize/SConstruct
   branches/refactor_fft/scipy/sandbox/mkufunc/
   branches/refactor_fft/scipy/sandbox/mkufunc/README.txt
   branches/refactor_fft/scipy/sandbox/mkufunc/TODO.txt
   branches/refactor_fft/scipy/sandbox/mkufunc/docs/
   branches/refactor_fft/scipy/sandbox/mkufunc/examples/
   branches/refactor_fft/scipy/sandbox/mkufunc/examples/benchmark.py
   branches/refactor_fft/scipy/sandbox/mkufunc/examples/primes.py
   branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/
   branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/__init__.py
   branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/api.py
   branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/driver.py
   branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/interactive.py
   branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/pypy.h
   branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/test_func_hash.py
   branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/test_mkufunc.py
   branches/refactor_fft/scipy/sandbox/mkufunc/setup.py
   branches/refactor_fft/scipy/signal/SConscript
   branches/refactor_fft/scipy/signal/SConstruct
   branches/refactor_fft/scipy/sparse/.svnignore
   branches/refactor_fft/scipy/sparse/linalg/dsolve/SConscript
   branches/refactor_fft/scipy/sparse/linalg/dsolve/SConstruct
   branches/refactor_fft/scipy/sparse/linalg/dsolve/umfpack/SConscript
   branches/refactor_fft/scipy/sparse/linalg/dsolve/umfpack/SConstruct
   branches/refactor_fft/scipy/sparse/linalg/eigen/arpack/SConscript
   branches/refactor_fft/scipy/sparse/linalg/eigen/arpack/SConstruct
   branches/refactor_fft/scipy/sparse/linalg/isolve/SConscript
   branches/refactor_fft/scipy/sparse/linalg/isolve/SConstruct
   branches/refactor_fft/scipy/sparse/sparsetools/SConscript
   branches/refactor_fft/scipy/sparse/sparsetools/SConstruct
   branches/refactor_fft/scipy/special/SConscript
   branches/refactor_fft/scipy/special/SConstruct
   branches/refactor_fft/scipy/stats/SConscript
   branches/refactor_fft/scipy/stats/SConstruct
   branches/refactor_fft/scipy/stsci/convolve/SConscript
   branches/refactor_fft/scipy/stsci/convolve/SConstruct
   branches/refactor_fft/scipy/stsci/image/SConscript
   branches/refactor_fft/scipy/stsci/image/SConstruct
Removed:
   branches/refactor_fft/scipy/cluster/SConstruct
   branches/refactor_fft/scipy/integrate/SConstruct
   branches/refactor_fft/scipy/interpolate/SConstruct
   branches/refactor_fft/scipy/io/SConstruct
   branches/refactor_fft/scipy/lib/blas/SConstruct
   branches/refactor_fft/scipy/lib/lapack/SConstruct
   branches/refactor_fft/scipy/linalg/SConstruct
   branches/refactor_fft/scipy/ndimage/SConstruct
   branches/refactor_fft/scipy/odr/SConstruct
   branches/refactor_fft/scipy/optimize/SConstruct
   branches/refactor_fft/scipy/sandbox/mkufunc/README.txt
   branches/refactor_fft/scipy/sandbox/mkufunc/TODO.txt
   branches/refactor_fft/scipy/sandbox/mkufunc/docs/
   branches/refactor_fft/scipy/sandbox/mkufunc/examples/
   branches/refactor_fft/scipy/sandbox/mkufunc/examples/benchmark.py
   branches/refactor_fft/scipy/sandbox/mkufunc/examples/primes.py
   branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/
   branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/__init__.py
   branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/api.py
   branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/driver.py
   branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/interactive.py
   branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/pypy.h
   branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/test_func_hash.py
   branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/test_mkufunc.py
   branches/refactor_fft/scipy/sandbox/mkufunc/setup.py
   branches/refactor_fft/scipy/signal/SConstruct
   branches/refactor_fft/scipy/sparse/linalg/dsolve/SConstruct
   branches/refactor_fft/scipy/sparse/linalg/dsolve/umfpack/SConstruct
   branches/refactor_fft/scipy/sparse/linalg/eigen/arpack/SConstruct
   branches/refactor_fft/scipy/sparse/linalg/isolve/SConstruct
   branches/refactor_fft/scipy/sparse/sparsetools/SConstruct
   branches/refactor_fft/scipy/special/SConstruct
   branches/refactor_fft/scipy/splinalg/
   branches/refactor_fft/scipy/stats/SConstruct
   branches/refactor_fft/scipy/stsci/convolve/SConstruct
   branches/refactor_fft/scipy/stsci/image/SConstruct
   branches/refactor_fft/scipy/testing/nulltester.py
Modified:
   branches/refactor_fft/
   branches/refactor_fft/INSTALL.txt
   branches/refactor_fft/THANKS.txt
   branches/refactor_fft/scipy/__init__.py
   branches/refactor_fft/scipy/cluster/__init__.py
   branches/refactor_fft/scipy/cluster/hierarchy.py
   branches/refactor_fft/scipy/cluster/info.py
   branches/refactor_fft/scipy/cluster/setup.py
   branches/refactor_fft/scipy/cluster/src/hierarchy.c
   branches/refactor_fft/scipy/cluster/src/hierarchy.h
   branches/refactor_fft/scipy/cluster/src/hierarchy_wrap.c
   branches/refactor_fft/scipy/cluster/tests/test_hierarchy.py
   branches/refactor_fft/scipy/cluster/tests/test_vq.py
   branches/refactor_fft/scipy/cluster/vq.py
   branches/refactor_fft/scipy/integrate/__odepack.h
   branches/refactor_fft/scipy/integrate/odepack.py
   branches/refactor_fft/scipy/interpolate/fitpack.py
   branches/refactor_fft/scipy/interpolate/fitpack.pyf
   branches/refactor_fft/scipy/interpolate/fitpack2.py
   branches/refactor_fft/scipy/interpolate/interpolate.py
   branches/refactor_fft/scipy/interpolate/rbf.py
   branches/refactor_fft/scipy/interpolate/tests/test_fitpack.py
   branches/refactor_fft/scipy/interpolate/tests/test_interpolate.py
   branches/refactor_fft/scipy/io/array_import.py
   branches/refactor_fft/scipy/io/matlab/miobase.py
   branches/refactor_fft/scipy/io/matlab/tests/test_mio.py
   branches/refactor_fft/scipy/io/mmio.py
   branches/refactor_fft/scipy/io/tests/test_mmio.py
   branches/refactor_fft/scipy/io/wavfile.py
   branches/refactor_fft/scipy/lib/blas/scons_support.py
   branches/refactor_fft/scipy/lib/lapack/scons_support.py
   branches/refactor_fft/scipy/linalg/scons_support.py
   branches/refactor_fft/scipy/ndimage/_registration.py
   branches/refactor_fft/scipy/ndimage/src/register/Register_EXT.c
   branches/refactor_fft/scipy/ndimage/src/register/Register_IMPL.c
   branches/refactor_fft/scipy/ndimage/tests/test_segment.py
   branches/refactor_fft/scipy/optimize/anneal.py
   branches/refactor_fft/scipy/optimize/minpack.py
   branches/refactor_fft/scipy/setup.py
   branches/refactor_fft/scipy/setupscons.py
   branches/refactor_fft/scipy/signal/signaltools.py
   branches/refactor_fft/scipy/sparse/base.py
   branches/refactor_fft/scipy/sparse/compressed.py
   branches/refactor_fft/scipy/sparse/construct.py
   branches/refactor_fft/scipy/sparse/linalg/eigen/arpack/arpack.py
   branches/refactor_fft/scipy/sparse/linalg/eigen/arpack/speigs.py
   branches/refactor_fft/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py
   branches/refactor_fft/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py
   branches/refactor_fft/scipy/sparse/linalg/isolve/utils.py
   branches/refactor_fft/scipy/sparse/sparsetools/coo.h
   branches/refactor_fft/scipy/sparse/tests/test_base.py
   branches/refactor_fft/scipy/special/_cephesmodule.c
   branches/refactor_fft/scipy/special/specfun.pyf
   branches/refactor_fft/scipy/special/specfun_wrappers.c
   branches/refactor_fft/scipy/special/specfun_wrappers.h
   branches/refactor_fft/scipy/special/tests/test_basic.py
   branches/refactor_fft/scipy/stats/models/formula.py
   branches/refactor_fft/scipy/stats/models/tests/test_bspline.py
   branches/refactor_fft/scipy/stats/mstats.py
   branches/refactor_fft/scipy/stats/stats.py
   branches/refactor_fft/scipy/stats/tests/test_stats.py
   branches/refactor_fft/scipy/testing/__init__.py
   branches/refactor_fft/scipy/testing/decorators.py
   branches/refactor_fft/scipy/testing/nosetester.py
   branches/refactor_fft/scipy/testing/pkgtester.py
   branches/refactor_fft/scipy/weave/tests/test_wx_spec.py
Log:
Merged revisions 4379-4510 via svnmerge from 
http://svn.scipy.org/svn/scipy/trunk

........
  r4389 | chris.burns | 2008-05-29 05:17:44 +0900 (Thu, 29 May 2008) | 1 line
  
  Fix nose call to run test.
........
  r4390 | chris.burns | 2008-05-29 05:34:43 +0900 (Thu, 29 May 2008) | 1 line
  
  Modify imports to match scipy standards.
........
  r4391 | oliphant | 2008-05-29 08:46:58 +0900 (Thu, 29 May 2008) | 1 line
  
  Add filtfilt function to scipy.  Make fixed_point into a vectorized function.
........
  r4392 | tom.waite | 2008-05-29 08:58:51 +0900 (Thu, 29 May 2008) | 1 line
  
  Added lower triangular tensor product for NIPY normalize
........
  r4393 | tom.waite | 2008-05-29 08:59:15 +0900 (Thu, 29 May 2008) | 1 line
  
  Added lower triangular tensor product for NIPY normalize
........
  r4394 | tom.waite | 2008-05-30 10:21:37 +0900 (Fri, 30 May 2008) | 1 line
  
  removed the integer truncation of image resample gradients. 
........
  r4395 | damian.eads | 2008-05-30 11:23:09 +0900 (Fri, 30 May 2008) | 1 line
  
  Added some more test data.
........
  r4396 | damian.eads | 2008-05-31 04:36:47 +0900 (Sat, 31 May 2008) | 1 line
  
  Added some initial tests for hierarchy.linkage and hierarchy.squareform
........
  r4397 | damian.eads | 2008-05-31 05:30:15 +0900 (Sat, 31 May 2008) | 1 line
  
  Fixed bug in hierarchy.is_valid_linkage. Wrote tests for hierarchy.numobs.
........
  r4398 | damian.eads | 2008-05-31 05:57:27 +0900 (Sat, 31 May 2008) | 1 line
  
  Fixed a boundary condition bug in hierarchy.squareform with 0-length pdist arrays.
........
  r4399 | damian.eads | 2008-05-31 06:38:29 +0900 (Sat, 31 May 2008) | 1 line
  
  Added tests for hierarchy.{yule,sokalsneath,matching,jaccard}
........
  r4400 | damian.eads | 2008-05-31 06:57:10 +0900 (Sat, 31 May 2008) | 1 line
  
  Polishing some tests.
........
  r4401 | oliphant | 2008-05-31 08:51:35 +0900 (Sat, 31 May 2008) | 1 line
  
  Re-factor scipy.optimize.fixed_point to handle vector and scalar portions separately.
........
  r4402 | oliphant | 2008-05-31 09:15:46 +0900 (Sat, 31 May 2008) | 1 line
  
  Fix wavfile reading for 64-bit platforms.
........
  r4403 | wnbell | 2008-06-03 11:00:08 +0900 (Tue, 03 Jun 2008) | 3 lines
  
  fixed missing import of numpy.multiply
  resolves ticket #680
........
  r4404 | damian.eads | 2008-06-03 13:41:10 +0900 (Tue, 03 Jun 2008) | 1 line
  
  Added small boolean observation vector data set for testing boolean distance metrics.
........
  r4405 | damian.eads | 2008-06-03 15:50:49 +0900 (Tue, 03 Jun 2008) | 1 line
  
  Removed unnecessary imports in hierarchy and refactored its code. Wrote more tests.
........
  r4406 | damian.eads | 2008-06-03 16:35:10 +0900 (Tue, 03 Jun 2008) | 1 line
  
  Removed unnecessary imports in hierarchy and refactored its code. Wrote more tests.
........
  r4407 | damian.eads | 2008-06-03 16:48:54 +0900 (Tue, 03 Jun 2008) | 1 line
  
  Convert non-double arrays to double so that hierarchy functions can be called with more input types.
........
  r4408 | damian.eads | 2008-06-03 17:51:31 +0900 (Tue, 03 Jun 2008) | 1 line
  
  Fixed minor dtype conversion bug in hierarchy.linkage.
........
  r4409 | jarrod.millman | 2008-06-04 00:55:06 +0900 (Wed, 04 Jun 2008) | 2 lines
  
  updated 1.0.5 to 1.1.0
........
  r4410 | damian.eads | 2008-06-04 02:50:47 +0900 (Wed, 04 Jun 2008) | 1 line
  
  Removed more unnecessary imports in hierarchy, namely sys and math. Now uses np.ndarray instead of _array_type for type checking.
........
  r4411 | damian.eads | 2008-06-04 02:53:23 +0900 (Wed, 04 Jun 2008) | 1 line
  
  Changed allocation of result arrays in hierarchy so np.int is used instead of np.int32. Will assume np.int corresponds to the int data type in C on the host machine.
........
  r4412 | damian.eads | 2008-06-04 17:08:30 +0900 (Wed, 04 Jun 2008) | 1 line
  
  Fixed issue with cosine.
........
  r4413 | pierregm | 2008-06-05 01:21:10 +0900 (Thu, 05 Jun 2008) | 2 lines
  
  * Fixed some documentation
  * Fixed theilslopes
........
  r4414 | wnbell | 2008-06-06 15:29:48 +0900 (Fri, 06 Jun 2008) | 2 lines
  
  minor cleanup of lobpcg
........
  r4415 | wnbell | 2008-06-06 15:54:44 +0900 (Fri, 06 Jun 2008) | 2 lines
  
  renamed lobpcg parameters to better conform to other iterative methods
........
  r4416 | wnbell | 2008-06-07 17:13:02 +0900 (Sat, 07 Jun 2008) | 2 lines
  
  added coo_matvec
........
  r4417 | damian.eads | 2008-06-09 14:55:44 +0900 (Mon, 09 Jun 2008) | 1 line
  
  Moved distance functions to new module.
........
  r4418 | damian.eads | 2008-06-09 14:59:39 +0900 (Mon, 09 Jun 2008) | 1 line
  
  Added information on distance module in cluster/info.py
........
  r4419 | damian.eads | 2008-06-09 15:01:51 +0900 (Mon, 09 Jun 2008) | 1 line
  
  Added extension building code for distance_wrap to cluster/SConstruct
........
  r4420 | damian.eads | 2008-06-09 15:05:10 +0900 (Mon, 09 Jun 2008) | 1 line
  
  Added import to cluster/__init__.py. Removed pdist import from hierarchy.
........
  r4421 | tom.waite | 2008-06-10 07:42:02 +0900 (Tue, 10 Jun 2008) | 2 lines
  
  bug fixes
........
  r4422 | wnbell | 2008-06-10 12:40:56 +0900 (Tue, 10 Jun 2008) | 3 lines
  
  Added patch by Andrew Straw to fix MATLAB support on gzip files
  resolves ticket #682
........
  r4423 | rkern | 2008-06-10 15:35:31 +0900 (Tue, 10 Jun 2008) | 1 line
  
  Correct syntax typo. Fix a few undefined references while I'm at it, too. pyflakes rules.
........
  r4424 | rkern | 2008-06-11 19:47:02 +0900 (Wed, 11 Jun 2008) | 1 line
  
  Use local imports to avoid importing nose until tests are actually requested. This speeds up the load-time of scipy.
........
  r4425 | cdavid | 2008-06-12 00:36:30 +0900 (Thu, 12 Jun 2008) | 1 line
  
  Remove executable bit for test_stats.py
........
  r4426 | cdavid | 2008-06-12 00:38:20 +0900 (Thu, 12 Jun 2008) | 1 line
  
  Put nanmean, nanstd and nanmedian into scipy.stats namespace + adapt tests.
........
  r4427 | cdavid | 2008-06-12 19:07:55 +0900 (Thu, 12 Jun 2008) | 1 line
  
  Move all scons SConstruct to SConscript, for upcoming adpatation to new numscons build_dir architecture.
........
  r4428 | cdavid | 2008-06-12 19:14:16 +0900 (Thu, 12 Jun 2008) | 1 line
  
  Add SConstruct file for scipy.cluter.
........
  r4429 | cdavid | 2008-06-12 19:16:21 +0900 (Thu, 12 Jun 2008) | 1 line
  
  Add SConstruct files for all pkgs built with scons script.
........
  r4430 | cdavid | 2008-06-12 19:18:28 +0900 (Thu, 12 Jun 2008) | 1 line
  
  Renamve NumpyPythonExtension to DistutilsPythonExtension.
........
  r4431 | cdavid | 2008-06-12 19:20:52 +0900 (Thu, 12 Jun 2008) | 1 line
  
  rename numpyf2py to f2py scons tool; numpyf2py is deprecated.
........
  r4432 | cdavid | 2008-06-12 19:23:33 +0900 (Thu, 12 Jun 2008) | 1 line
  
  Rename NumpyStaticExtLibrary to DistutilsStaticExtLibrary.
........
  r4433 | cdavid | 2008-06-12 19:25:25 +0900 (Thu, 12 Jun 2008) | 1 line
  
  Fix libpath issue with new scons build_dir.
........
  r4434 | cdavid | 2008-06-12 19:38:29 +0900 (Thu, 12 Jun 2008) | 1 line
  
  Replace build_dir by current directory when used as a LIBPATH.
........
  r4435 | cdavid | 2008-06-12 19:45:57 +0900 (Thu, 12 Jun 2008) | 1 line
  
  Do not play with build_dir anymore in emitters.
........
  r4436 | cdavid | 2008-06-12 19:47:48 +0900 (Thu, 12 Jun 2008) | 1 line
  
  Remove last build_dir uses.
........
  r4437 | cdavid | 2008-06-12 19:58:13 +0900 (Thu, 12 Jun 2008) | 1 line
  
  Replace depreacted NumpyFrom*Template builders by their new names.
........
  r4438 | cdavid | 2008-06-12 19:59:41 +0900 (Thu, 12 Jun 2008) | 1 line
  
  Replace depreacted NumpyF2py builder by their new names.
........
  r4439 | cdavid | 2008-06-12 20:23:17 +0900 (Thu, 12 Jun 2008) | 1 line
  
  Fix some include paths related to scons build_dir arch changes.
........
  r4440 | cdavid | 2008-06-12 20:26:55 +0900 (Thu, 12 Jun 2008) | 1 line
  
  Use Glob instead of NumpyGlob, which is not needed anymore.
........
  r4441 | wnbell | 2008-06-13 06:39:52 +0900 (Fri, 13 Jun 2008) | 3 lines
  
  fixed bug reported by James Philbin
  http://thread.gmane.org/gmane.comp.python.scientific.user/16720
........
  r4442 | wnbell | 2008-06-13 07:09:16 +0900 (Fri, 13 Jun 2008) | 2 lines
  
  removed splinalg
........
  r4443 | wnbell | 2008-06-13 07:14:49 +0900 (Fri, 13 Jun 2008) | 2 lines
  
  silenced ARPACK debugging output
........
  r4444 | wnbell | 2008-06-13 12:08:49 +0900 (Fri, 13 Jun 2008) | 2 lines
  
  fix problem with writing CSR/CSC matrices in MatrixMarket format
........
  r4445 | wnbell | 2008-06-13 12:16:04 +0900 (Fri, 13 Jun 2008) | 2 lines
  
  add sparse ignore filter
........
  r4446 | rkern | 2008-06-17 10:19:04 +0900 (Tue, 17 Jun 2008) | 1 line
  
  BUG: Ensure that no subpackage name is added to scipy.__all__.
........
  r4447 | tom.waite | 2008-06-19 04:04:15 +0900 (Thu, 19 Jun 2008) | 1 line
  
  Parameter checking. Replace c-extension integrated histogram thresholding with pure numpy version.
........
  r4448 | tom.waite | 2008-06-19 07:15:03 +0900 (Thu, 19 Jun 2008) | 1 line
  
  added for testing registration
........
  r4449 | tom.waite | 2008-06-19 07:36:32 +0900 (Thu, 19 Jun 2008) | 1 line
  
  remove demo methods which go to nipy registration
........
  r4450 | tom.waite | 2008-06-19 08:13:03 +0900 (Thu, 19 Jun 2008) | 1 line
  
  fixed axis bug in build_fwhm method
........
  r4455 | wnbell | 2008-06-21 18:04:55 +0900 (Sat, 21 Jun 2008) | 3 lines
  
  should fix ticket #611
  the code seemed to be capturing the wrong Exception type
........
  r4456 | cdavid | 2008-06-21 20:28:01 +0900 (Sat, 21 Jun 2008) | 1 line
  
  No splinalg anymore, remove it from scons build.
........
  r4457 | cdavid | 2008-06-22 01:48:48 +0900 (Sun, 22 Jun 2008) | 1 line
  
  Add regression test for #413.
........
  r4458 | cdavid | 2008-06-23 00:14:43 +0900 (Mon, 23 Jun 2008) | 1 line
  
  Do not run bspline tests if _bspline is not available.
........
  r4459 | cdavid | 2008-06-23 00:45:05 +0900 (Mon, 23 Jun 2008) | 1 line
  
  Fix weave test: conditionally import wx to avoid ImportError for platforms wo wx.
........
  r4460 | cdavid | 2008-06-23 01:25:32 +0900 (Mon, 23 Jun 2008) | 1 line
  
  Add a note on CFLAGS and co.
........
  r4461 | cdavid | 2008-06-23 01:27:28 +0900 (Mon, 23 Jun 2008) | 1 line
  
  Revert accidentaly commited print debug statement.
........
  r4462 | cdavid | 2008-06-23 22:57:33 +0900 (Mon, 23 Jun 2008) | 1 line
  
  Flag some memory hungry tests as slow.
........
  r4463 | cdavid | 2008-06-23 22:58:24 +0900 (Mon, 23 Jun 2008) | 1 line
  
  Do not run wx tests if wx not found.
........
  r4464 | cdavid | 2008-06-23 23:30:58 +0900 (Mon, 23 Jun 2008) | 1 line
  
  Maybe I will get this right at some point: set DONOTRUN to False by default to decide wether we should run some weave test or not.
........
  r4465 | cdavid | 2008-06-23 23:37:03 +0900 (Mon, 23 Jun 2008) | 1 line
  
  Fix #505 in scipy.cluster: unhelpful message when size 0 arrays are input.
........
  r4466 | ptvirtan | 2008-06-24 05:53:22 +0900 (Tue, 24 Jun 2008) | 1 line
  
  Reformat integrate.odeint docstring
........
  r4467 | ptvirtan | 2008-06-24 06:14:11 +0900 (Tue, 24 Jun 2008) | 1 line
  
  Wrap and expose dblint from dfitpack. (Implements #206). Add corresponding tests.
........
  r4468 | wnbell | 2008-06-24 09:54:19 +0900 (Tue, 24 Jun 2008) | 2 lines
  
  addresses ticket #659
........
  r4469 | cdavid | 2008-06-24 16:46:46 +0900 (Tue, 24 Jun 2008) | 1 line
  
  Fix #535 with tests.
........
  r4470 | cdavid | 2008-06-24 16:59:43 +0900 (Tue, 24 Jun 2008) | 1 line
  
  Handle bogus number of clusters better + test.
........
  r4471 | cdavid | 2008-06-24 17:01:24 +0900 (Tue, 24 Jun 2008) | 1 line
  
  Fix 535 first comment.
........
  r4472 | cdavid | 2008-06-24 17:02:32 +0900 (Tue, 24 Jun 2008) | 1 line
  
  Set regression test to correct number.
........
  r4474 | cdavid | 2008-06-24 19:52:35 +0900 (Tue, 24 Jun 2008) | 1 line
  
  Add test for #8.
........
  r4475 | cdavid | 2008-06-24 20:41:19 +0900 (Tue, 24 Jun 2008) | 1 line
  
  Do not set imxer to unitiliazed value if not set by fortran.
........
  r4476 | wnbell | 2008-06-24 22:53:46 +0900 (Tue, 24 Jun 2008) | 2 lines
  
  edited a few docstrings
........
  r4477 | cdavid | 2008-06-24 23:23:09 +0900 (Tue, 24 Jun 2008) | 1 line
  
  Add Damiean Eads and me in THANKS.txt.
........
  r4478 | rkern | 2008-06-25 02:48:31 +0900 (Wed, 25 Jun 2008) | 1 line
  
  BUG: Allow __del__ to work even when self.file never got constructed. Thanks to Yosef Meller for finding this bug and suggesting the fix. #681
........
  r4479 | ilan | 2008-06-26 00:43:39 +0900 (Thu, 26 Jun 2008) | 1 line
  
  Adding project mkufunc (make U function decorator) to sandbox
........
  r4480 | ilan | 2008-06-26 00:59:50 +0900 (Thu, 26 Jun 2008) | 2 lines
  
  Testing output files
........
  r4481 | ilan | 2008-06-26 01:12:15 +0900 (Thu, 26 Jun 2008) | 1 line
  
  Added readme file
........
  r4482 | tom.waite | 2008-06-27 03:49:38 +0900 (Fri, 27 Jun 2008) | 1 line
  
  replace parameter vector with inverse affine matrix as input to remap_image method
........
  r4483 | ilan | 2008-06-27 11:14:16 +0900 (Fri, 27 Jun 2008) | 1 line
  
  Added test for speed comparison
........
  r4484 | ilan | 2008-06-27 12:04:21 +0900 (Fri, 27 Jun 2008) | 1 line
  
  Adding documentation
........
  r4485 | ilan | 2008-06-27 22:24:23 +0900 (Fri, 27 Jun 2008) | 1 line
  
  Started unittest suite and other small changes
........
  r4486 | rkern | 2008-06-28 04:37:49 +0900 (Sat, 28 Jun 2008) | 1 line
  
  BUG: fix missing import and 'gausian' typo. Thanks to Lorenzo Bolla for the patch.
........
  r4487 | ilan | 2008-06-28 07:29:20 +0900 (Sat, 28 Jun 2008) | 1 line
  
  Added blitz
........
  r4488 | ilan | 2008-06-28 10:34:29 +0900 (Sat, 28 Jun 2008) | 1 line
  
  started work on typecasting
........
  r4489 | ptvirtan | 2008-06-28 22:54:11 +0900 (Sat, 28 Jun 2008) | 1 line
  
  interpolate: Fix #289. Make interp1d order axes in the result correctly when y.ndim > 2. Fix a bug in splmake that was triggered when y.ndim > 2. Add corresponding tests.
........
  r4490 | ilan | 2008-06-28 23:36:29 +0900 (Sat, 28 Jun 2008) | 1 line
  
  Improved dispatch on type in mkufunc and added more tests
........
  r4491 | ilan | 2008-06-29 00:40:16 +0900 (Sun, 29 Jun 2008) | 1 line
  
  Moved some tests
........
  r4492 | ilan | 2008-06-29 04:05:29 +0900 (Sun, 29 Jun 2008) | 1 line
  
  Implemented multiple input arguments
........
  r4493 | ilan | 2008-06-29 05:37:04 +0900 (Sun, 29 Jun 2008) | 1 line
  
  Added more test cases
........
  r4494 | ilan | 2008-06-29 06:17:58 +0900 (Sun, 29 Jun 2008) | 1 line
  
  Improved type checking and added tests to see if TypeError was raised
........
  r4495 | ilan | 2008-06-29 09:45:36 +0900 (Sun, 29 Jun 2008) | 1 line
  
  Refactoring
........
  r4496 | ilan | 2008-06-29 09:47:14 +0900 (Sun, 29 Jun 2008) | 2 lines
  
  Now all in test_mkufunc.py
........
  r4497 | ilan | 2008-06-29 17:47:38 +0900 (Sun, 29 Jun 2008) | 1 line
  
  Implements a function for getting an MD5 from the bytecode of a function
........
  r4498 | ilan | 2008-06-29 17:53:48 +0900 (Sun, 29 Jun 2008) | 1 line
  
  Numerous changes, mostly regarding for preparing caching
........
  r4499 | ilan | 2008-06-29 19:30:22 +0900 (Sun, 29 Jun 2008) | 1 line
  
  Free variables still need work
........
  r4500 | ilan | 2008-06-30 07:21:36 +0900 (Mon, 30 Jun 2008) | 1 line
  
  Implemented caching for pypy translated functions, also temp files are now stored in weave's temp directory.
........
  r4501 | ilan | 2008-06-30 09:07:36 +0900 (Mon, 30 Jun 2008) | 1 line
  
  Moved tests into seperate file
........
  r4502 | ilan | 2008-06-30 10:32:41 +0900 (Mon, 30 Jun 2008) | 1 line
  
  Refactoring
........
  r4503 | ilan | 2008-06-30 12:19:29 +0900 (Mon, 30 Jun 2008) | 1 line
  
  PyPy output source now also in support code; no more pypy.c
........
  r4504 | ilan | 2008-06-30 23:08:53 +0900 (Mon, 30 Jun 2008) | 1 line
  
  Added test for function with no args
........
  r4505 | rkern | 2008-07-01 01:21:16 +0900 (Tue, 01 Jul 2008) | 1 line
  
  BUG: handle a broader range of function outputs. Thanks to Yosef Meller for the fix.
........
  r4506 | ilan | 2008-07-01 04:19:55 +0900 (Tue, 01 Jul 2008) | 1 line
  
  Simplyfied func_hash and put into main file
........
  r4507 | ilan | 2008-07-01 09:32:12 +0900 (Tue, 01 Jul 2008) | 1 line
  
  Made a package which uses setuptools
........
  r4508 | ilan | 2008-07-01 09:35:15 +0900 (Tue, 01 Jul 2008) | 1 line
  
  Moved into mkufunc/
........
  r4509 | ilan | 2008-07-01 09:36:02 +0900 (Tue, 01 Jul 2008) | 1 line
  
  Moved into mkufunc/
........
  r4510 | ilan | 2008-07-01 09:46:27 +0900 (Tue, 01 Jul 2008) | 1 line
  
  Test removed
........



Property changes on: branches/refactor_fft
___________________________________________________________________
Name: svnmerge-integrated
   - /branches/build_with_scons:1-3868 /branches/scipy.scons:1-3533 /branches/sparse_build_reduce_mem:1-4005 /branches/testing_cleanup:1-3662 /trunk:1-4378
   + /branches/build_with_scons:1-3868 /branches/scipy.scons:1-3533 /branches/sparse_build_reduce_mem:1-4005 /branches/testing_cleanup:1-3662 /trunk:1-4510

Modified: branches/refactor_fft/INSTALL.txt
===================================================================
--- branches/refactor_fft/INSTALL.txt	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/INSTALL.txt	2008-07-01 04:52:00 UTC (rev 4511)
@@ -32,7 +32,7 @@
 
 __ http://www.python.org
 
-2) NumPy__ 1.0.5 or newer
+2) NumPy__ 1.1.0 or newer
 
    Debian package: python-numpy
 
@@ -178,6 +178,11 @@
 
    It is not necessary to install blas or lapack libraries in addition.
 
+ 4) Compiler flags customization (FFLAGS, CFLAGS, etc...). If you customize
+ CFLAGS and other related flags from the command line or the shell environment,
+ beware that is does not have the standard behavior of appending options.
+ Instead, it overrides the options. As such, you have to give all options in the
+ flag for the build to be successful.
 
 GETTING SCIPY
 =============

Modified: branches/refactor_fft/THANKS.txt
===================================================================
--- branches/refactor_fft/THANKS.txt	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/THANKS.txt	2008-07-01 04:52:00 UTC (rev 4511)
@@ -29,6 +29,8 @@
                        sparse matrix module
 Travis Vaught       -- initial work on stats module clean up
 Jeff Whitaker       -- Mac OS X support
+David Cournapeau    -- bug-fixes, refactor of fftpack and cluster, numscons build.
+Damian Eads         -- hiearchical clustering
 
 
 Testing:

Modified: branches/refactor_fft/scipy/__init__.py
===================================================================
--- branches/refactor_fft/scipy/__init__.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/__init__.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -44,6 +44,7 @@
 # Remove the linalg imported from numpy so that the scipy.linalg package can be
 # imported.
 del linalg
+__all__.remove('linalg')
 
 try:
     from __config__ import show as show_config
@@ -61,6 +62,20 @@
 del _os
 pkgload = PackageLoader()
 pkgload(verbose=SCIPY_IMPORT_VERBOSE,postpone=True)
+
+# Remove subpackage names from __all__ such that they are not imported via 
+# "from scipy import *". This works around a numpy bug present in < 1.2.
+subpackages = """cluster constants fftpack integrate interpolate io lib linalg
+linsolve maxentropy misc ndimage odr optimize sandbox signal sparse special
+splinalg stats stsci testing weave""".split()
+for name in subpackages:
+    try:
+        __all__.remove(name)
+    except ValueError:
+        pass
+
+del name, subpackages
+
 __doc__ += """
 
 Available subpackages

Copied: branches/refactor_fft/scipy/cluster/SConscript (from rev 4510, trunk/scipy/cluster/SConscript)

Deleted: branches/refactor_fft/scipy/cluster/SConstruct
===================================================================
--- branches/refactor_fft/scipy/cluster/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/cluster/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,15 +0,0 @@
-# Last Change: Thu Oct 18 09:00 PM 2007 J
-# vim:syntax=python
-from os.path import join
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numscons import GetNumpyEnvironment
-
-env = GetNumpyEnvironment(ARGUMENTS)
-
-env.AppendUnique(CPPPATH = get_numpy_include_dirs())
-env.NumpyPythonExtension('_vq', source = [join('src', 'vq_module.c'),
-                                          join('src', 'vq.c')])
-
-env.NumpyPythonExtension('_hierarchy_wrap', source = [join('src', 'hierarchy_wrap.c'),
-                                          join('src', 'hierarchy.c')])

Copied: branches/refactor_fft/scipy/cluster/SConstruct (from rev 4510, trunk/scipy/cluster/SConstruct)

Modified: branches/refactor_fft/scipy/cluster/__init__.py
===================================================================
--- branches/refactor_fft/scipy/cluster/__init__.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/cluster/__init__.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -4,8 +4,8 @@
 
 from info import __doc__
 
-__all__ = ['vq', 'hierarchy']
+__all__ = ['vq', 'hierarchy', 'distance']
 
-import vq, hierarchy
+import vq, hierarchy, distance
 from scipy.testing.pkgtester import Tester
 test = Tester().test

Copied: branches/refactor_fft/scipy/cluster/distance.py (from rev 4510, trunk/scipy/cluster/distance.py)

Modified: branches/refactor_fft/scipy/cluster/hierarchy.py
===================================================================
--- branches/refactor_fft/scipy/cluster/hierarchy.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/cluster/hierarchy.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -22,9 +22,6 @@
  median             the median/WPGMC algorithm. (alias)
  ward               the Ward/incremental algorithm. (alias)
 
-Distance matrix computation from a collection of raw observation vectors
-
- pdist              computes distances between each observation pair.
  squareform         converts a sq. D.M. to a condensed one and vice versa.
 
 Statistic computations on hierarchies
@@ -47,30 +44,6 @@
  lvlist             a left-to-right traversal of the leaves.
  totree             represents a linkage matrix as a tree object.
 
-Distance functions between two vectors u and v
-
- braycurtis         the Bray-Curtis distance.
- canberra           the Canberra distance.
- chebyshev          the Chebyshev distance.
- cityblock          the Manhattan distance.
- correlation        the Correlation distance.
- cosine             the Cosine distance.
- dice               the Dice dissimilarity (boolean).
- euclidean          the Euclidean distance.
- hamming            the Hamming distance (boolean).
- jaccard            the Jaccard distance (boolean).
- kulsinski          the Kulsinski distance (boolean).
- mahalanobis        the Mahalanobis distance.
- matching           the matching dissimilarity (boolean).
- minkowski          the Minkowski distance.
- rogerstanimoto     the Rogers-Tanimoto dissimilarity (boolean).
- russellrao         the Russell-Rao dissimilarity (boolean).
- seuclidean         the normalized Euclidean distance.
- sokalmichener      the Sokal-Michener dissimilarity (boolean).
- sokalsneath        the Sokal-Sneath dissimilarity (boolean).
- sqeuclidean        the squared Euclidean distance.
- yule               the Yule dissimilarity (boolean).
-
 Predicates
 
  is_valid_dm        checks for a valid distance matrix.
@@ -175,32 +148,23 @@
 """
 
 import numpy as np
-import _hierarchy_wrap, scipy, types, math, sys, scipy.stats
+import _hierarchy_wrap, types
+import distance
 
 _cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2,
                            'weighted': 6}
 _cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5}
 _cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union(
     set(_cpy_euclid_methods.keys()))
-_array_type = np.ndarray
 
 try:
     import warnings
     def _warning(s):
-        warnings.warn('scipy-cluster: %s' % s, stacklevel=3)
+        warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
 except:
     def _warning(s):
-        print ('[WARNING] scipy-cluster: %s' % s)
+        print ('[WARNING] scipy.cluster: %s' % s)
 
-def _unbiased_variance(X):
-    """
-    Computes the unbiased variance of each dimension of a collection of
-    observation vectors, represented by a matrix where the rows are the
-    observations.
-    """
-    #n = np.double(X.shape[1])
-    return scipy.stats.var(X, axis=0) # * n / (n - 1.0)
-
 def _copy_array_if_base_present(a):
     """
     Copies the array if its base points to a parent array.
@@ -452,7 +416,7 @@
     if not isinstance(method, str):
         raise TypeError("Argument 'method' must be a string.")
 
-    y = np.asarray(y)
+    y = _convert_to_double(np.asarray(y))
 
     s = y.shape
     if len(s) == 1:
@@ -473,14 +437,14 @@
         if method not in _cpy_linkage_methods:
             raise ValueError('Invalid method: %s' % method)
         if method in _cpy_non_euclid_methods.keys():
-            dm = pdist(X, metric)
+            dm = distance.pdist(X, metric)
             Z = np.zeros((n - 1, 4))
             _hierarchy_wrap.linkage_wrap(dm, Z, n, \
                                        int(_cpy_non_euclid_methods[method]))
         elif method in _cpy_euclid_methods.keys():
             if metric != 'euclidean':
                 raise ValueError('Method %s requires the distance metric to be euclidean' % s)
-            dm = pdist(X, metric)
+            dm = distance.pdist(X, metric)
             Z = np.zeros((n - 1, 4))
             _hierarchy_wrap.linkage_euclid_wrap(dm, Z, X, m, n,
                                               int(_cpy_euclid_methods[method]))
@@ -638,6 +602,8 @@
     functions in this library.
     """
 
+    Z = np.asarray(Z)
+
     is_valid_linkage(Z, throw=True, name='Z')
 
     # The number of original objects is equal to the number of rows minus
@@ -721,7 +687,7 @@
     transformation.
     """
 
-    X = np.asarray(X)
+    X = _convert_to_double(np.asarray(X))
 
     if not np.issubsctype(X, np.double):
         raise TypeError('A double array must be passed.')
@@ -730,12 +696,14 @@
 
     # X = squareform(v)
     if len(s) == 1 and force != 'tomatrix':
+        if X.shape[0] == 0:
+            return np.zeros((1,1), dtype=np.double)
+
         # Grab the closest value to the square root of the number
         # of elements times 2 to see if the number of elements
         # is indeed a binomial coefficient.
         d = int(np.ceil(np.sqrt(X.shape[0] * 2)))
 
-        print d, s[0]
         # Check that v is of valid dimensions.
         if d * (d - 1) / 2 != int(s[0]):
             raise ValueError('Incompatible vector size. It must be a binomial coefficient n choose 2 for some integer n >= 2.')
@@ -760,13 +728,16 @@
             raise ValueError('The matrix argument must be square.')
         if checks:
             if np.sum(np.sum(X == X.transpose())) != np.product(X.shape):
-                raise ValueError('The distance matrix must be symmetrical.')
+                raise ValueError('The distance matrix array must be symmetrical.')
             if (X.diagonal() != 0).any():
-                raise ValueError('The distance matrix must have zeros along the diagonal.')
+                raise ValueError('The distance matrix array must have zeros along the diagonal.')
 
         # One-side of the dimensions is set here.
         d = s[0]
 
+        if d <= 1:
+            return np.array([], dtype=np.double)
+
         # Create a vector.
         v = np.zeros(((d * (d - 1) / 2),), dtype=np.double)
 
@@ -780,717 +751,22 @@
     elif len(s) != 2 and force.lower() == 'tomatrix':
         raise ValueError("Forcing 'tomatrix' but input X is not a distance vector.")
     else:
-        raise ValueError('The first argument must be a vector or matrix. A %d-dimensional array is not permitted' % len(s))
+        raise ValueError('The first argument must be one or two dimensional array. A %d-dimensional array is not permitted' % len(s))
 
-def minkowski(u, v, p):
-    """
-    d = minkowski(u, v, p)
+def _convert_to_bool(X):
+    if X.dtype != np.bool:
+        X = np.bool_(X)
+    if not X.flags.contiguous:
+        X = X.copy()
+    return X
 
-      Returns the Minkowski distance between two vectors u and v,
+def _convert_to_double(X):
+    if X.dtype != np.double:
+        X = np.double(X)
+    if not X.flags.contiguous:
+        X = X.copy()
+    return X
 
-        ||u-v||_p = (\sum {|u_i - v_i|^p})^(1/p).
-    """
-    if p < 1:
-        raise ValueError("p must be at least 1")
-    return math.pow((abs(u-v)**p).sum(), 1.0/p)
-
-def euclidean(u, v):
-    """
-    d = euclidean(u, v)
-
-      Computes the Euclidean distance between two n-vectors u and v, ||u-v||_2
-    """
-    q=np.matrix(u-v)
-    return np.sqrt((q*q.T).sum())
-
-def sqeuclidean(u, v):
-    """
-    d = sqeuclidean(u, v)
-
-      Computes the squared Euclidean distance between two n-vectors u and v,
-        (||u-v||_2)^2.
-    """
-    return ((u-v)*(u-v).T).sum()
-
-def cosine(u, v):
-    """
-    d = cosine(u, v)
-
-      Computes the Cosine distance between two n-vectors u and v,
-        (1-uv^T)/(||u||_2 * ||v||_2).
-    """
-    return (1.0 - (scipy.dot(u, v.T) / \
-                   (np.sqrt(scipy.dot(u, u.T)) * np.sqrt(scipy.dot(v, v.T)))))
-
-def correlation(u, v):
-    """
-    d = correlation(u, v)
-
-      Computes the correlation distance between two n-vectors u and v,
-
-            1 - (u - n|u|_1)(v - n|v|_1)^T
-            --------------------------------- ,
-            |(u - n|u|_1)|_2 |(v - n|v|_1)|^T
-
-      where |*|_1 is the Manhattan norm and n is the common dimensionality
-      of the vectors.
-    """
-    umu = u.mean()
-    vmu = v.mean()
-    um = u - umu
-    vm = v - vmu
-    return 1.0 - (scipy.dot(um, vm) /
-                  (np.sqrt(scipy.dot(um, um)) \
-                   * np.sqrt(scipy.dot(vm, vm))))
-
-def hamming(u, v):
-    """
-    d = hamming(u, v)
-
-      Computes the Hamming distance between two n-vectors u and v,
-      which is simply the proportion of disagreeing components in u
-      and v. If u and v are boolean vectors, the hamming distance is
-
-         (c_{01} + c_{10}) / n
-
-      where c_{ij} is the number of occurrences of
-
-         u[k] == i and v[k] == j
-
-      for k < n.
-    """
-    return (u != v).mean()
-
-def jaccard(u, v):
-    """
-    d = jaccard(u, v)
-
-      Computes the Jaccard-Needham dissimilarity between two boolean
-      n-vectors u and v, which is
-
-              c_{TF} + c_{FT}
-         ------------------------
-         c_{TT} + c_{FT} + c_{TF}
-
-      where c_{ij} is the number of occurrences of
-
-         u[k] == i and v[k] == j
-
-      for k < n.
-    """
-    return (np.double(np.bitwise_and((u != v),
-                     np.bitwise_or(u != 0, v != 0)).sum()) 
-            /  np.double(np.bitwise_or(u != 0, v != 0).sum()))
-
-def kulsinski(u, v):
-    """
-    d = kulsinski(u, v)
-
-      Computes the Kulsinski dissimilarity between two boolean n-vectors
-      u and v, which is
-
-         c_{TF} + c_{FT} - c_{TT} + n
-         ----------------------------
-              c_{FT} + c_{TF} + n
-
-      where c_{ij} is the number of occurrences of
-
-         u[k] == i and v[k] == j
-
-      for k < n.
-    """
-    (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
-
-    return (ntf + nft - ntt + n) / (ntf + nft + n)
-
-def seuclidean(u, v, V):
-    """
-    d = seuclidean(u, v, V)
-
-      Returns the standardized Euclidean distance between two
-      n-vectors u and v. V is a m-dimensional vector of component
-      variances. It is usually computed among a larger collection vectors.
-    """
-    V = np.asarray(V)
-    if len(V.shape) != 1 or V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]:
-        raise TypeError('V must be a 1-D array of the same dimension as u and v.')
-    return np.sqrt(((u-v)**2 / V).sum())
-
-def cityblock(u, v):
-    """
-    d = cityblock(u, v)
-
-      Computes the Manhattan distance between two n-vectors u and v,
-         \sum {u_i-v_i}.
-    """
-    return abs(u-v).sum()
-
-def mahalanobis(u, v, VI):
-    """
-    d = mahalanobis(u, v, VI)
-
-      Computes the Mahalanobis distance between two n-vectors u and v,
-        (u-v)VI(u-v)^T
-      where VI is the inverse covariance matrix.
-    """
-    V = np.asarray(V)
-    return np.sqrt(np.dot(np.dot((u-v),VI),(u-v).T).sum())
-
-def chebyshev(u, v):
-    """
-    d = chebyshev(u, v)
-
-      Computes the Chebyshev distance between two n-vectors u and v,
-        \max {|u_i-v_i|}.
-    """
-    return max(abs(u-v))
-
-def braycurtis(u, v):
-    """
-    d = braycurtis(u, v)
-
-      Computes the Bray-Curtis distance between two n-vectors u and v,
-        \sum{|u_i-v_i|} / \sum{|u_i+v_i|}.
-    """
-    return abs(u-v).sum() / abs(u+v).sum()
-
-def canberra(u, v):
-    """
-    d = canberra(u, v)
-
-      Computes the Canberra distance between two n-vectors u and v,
-        \sum{|u_i-v_i|} / \sum{|u_i|+|v_i}.
-    """
-    return abs(u-v).sum() / (abs(u).sum() + abs(v).sum())
-
-def _nbool_correspond_all(u, v):
-    not_u = scipy.bitwise_not(u)
-    not_v = scipy.bitwise_not(v)
-    nff = scipy.bitwise_and(not_u, not_v).sum()
-    nft = scipy.bitwise_and(not_u, v).sum()
-    ntf = scipy.bitwise_and(u, not_v).sum()
-    ntt = scipy.bitwise_and(u, v).sum()
-    return (nff, nft, ntf, ntt)
-
-def _nbool_correspond_ft_tf(u, v):
-    not_u = scipy.bitwise_not(u)
-    not_v = scipy.bitwise_not(v)
-    nft = scipy.bitwise_and(not_u, v).sum()
-    ntf = scipy.bitwise_and(u, not_v).sum()
-    return (nft, ntf)
-
-def yule(u, v):
-    """
-    d = yule(u, v)
-      Computes the Yule dissimilarity between two boolean n-vectors u and v,
-
-                  R
-         ---------------------
-         c_{TT} + c_{FF} + R/2
-
-      where c_{ij} is the number of occurrences of
-
-         u[k] == i and v[k] == j
-
-      for k < n, and
-
-         R = 2.0 * (c_{TF} + c_{FT}).
-    """
-    (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
-    return float(2.0 * ntf * nft) / float(ntt * nff + ntf * nft)
-
-def matching(u, v):
-    """
-    d = matching(u, v)
-
-      Computes the Matching dissimilarity between two boolean n-vectors
-      u and v, which is
-
-         (c_{TF} + c_{FT}) / n
-
-      where c_{ij} is the number of occurrences of
-
-         u[k] == i and v[k] == j
-
-      for k < n.
-    """
-    (nft, ntf) = _nbool_correspond_ft_tf(u, v)
-    return float(nft + ntf) / float(len(u))
-
-def dice(u, v):
-    """
-    d = dice(u, v)
-
-      Computes the Dice dissimilarity between two boolean n-vectors
-      u and v, which is
-
-                c_{TF} + c_{FT}
-         ----------------------------
-         2 * c_{TT} + c_{FT} + c_{TF}
-
-      where c_{ij} is the number of occurrences of
-
-         u[k] == i and v[k] == j
-
-      for k < n.
-    """
-    ntt = scipy.bitwise_and(u, v).sum()
-    (nft, ntf) = _nbool_correspond_ft_tf(u, v)
-    return float(ntf + nft)/float(2.0 * ntt + ntf + nft)
-
-def rogerstanimoto(u, v):
-    """
-    d = rogerstanimoto(u, v)
-
-      Computes the Rogers-Tanimoto dissimilarity between two boolean
-      n-vectors u and v,
-
-                  R
-         -------------------
-         c_{TT} + c_{FF} + R
-
-      where c_{ij} is the number of occurrences of
-
-         u[k] == i and v[k] == j
-
-      for k < n, and
-
-         R = 2.0 * (c_{TF} + c_{FT}).
-
-    """
-    (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
-    return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft)))
-
-def russellrao(u, v):
-    """
-    d = russellrao(u, v)
-
-      Computes the Russell-Rao dissimilarity between two boolean n-vectors
-      u and v, (n - c_{TT}) / n where c_{ij} is the number of occurrences
-      of u[k] == i and v[k] == j for k < n.
-    """
-    ntt = scipy.bitwise_and(u, v).sum()
-    return float(len(u) - ntt) / float(len(u))
-
-def sokalmichener(u, v):
-    """
-    d = sokalmichener(u, v)
-
-      Computes the Sokal-Michener dissimilarity between two boolean vectors
-      u and v, 2R / (S + 2R) where c_{ij} is the number of occurrences of
-      u[k] == i and v[k] == j for k < n and R = 2 * (c_{TF} + c{FT}) and
-      S = c_{FF} + c_{TT}.
-    """
-    ntt = scipy.bitwise_and(u, v).sum()
-    nff = scipy.bitwise_and(scipy.bitwise_not(u), scipy.bitwise_not(v)).sum()
-    (nft, ntf) = _nbool_correspond_ft_tf(u, v)
-    return float(2.0 * (ntf + nft))/float(ntt + nff + 2.0 * (ntf + nft))
-
-def sokalsneath(u, v):
-    """
-    d = sokalsneath(u, v)
-
-      Computes the Sokal-Sneath dissimilarity between two boolean vectors
-      u and v, 2R / (c_{TT} + 2R) where c_{ij} is the number of occurrences
-      of u[k] == i and v[k] == j for k < n and R = 2 * (c_{TF} + c{FT}).
-    """
-    ntt = scipy.bitwise_and(u, v).sum()
-    (nft, ntf) = _nbool_correspond_ft_tf(u, v)
-    return float(2.0 * (ntf + nft))/float(ntt + 2.0 * (ntf + nft))
-
-# V means pass covariance
-_pdist_metric_info = {'euclidean': ['double'],
-                      'seuclidean': ['double'],
-                      'sqeuclidean': ['double'],
-                      'minkowski': ['double'],
-                      'cityblock': ['double'],
-                      'cosine': ['double'],
-                      'correlation': ['double'],
-                      'hamming': ['double','bool'],
-                      'jaccard': ['double', 'bool'],
-                      'chebyshev': ['double'],
-                      'canberra': ['double'],
-                      'braycurtis': ['double'],
-                      'mahalanobis': ['bool'],
-                      'yule': ['bool'],
-                      'matching': ['bool'],
-                      'dice': ['bool'],
-                      'kulsinski': ['bool'],
-                      'rogerstanimoto': ['bool'],
-                      'russellrao': ['bool'],
-                      'sokalmichener': ['bool'],
-                      'sokalsneath': ['bool']}
-
-def pdist(X, metric='euclidean', p=2, V=None, VI=None):
-    """ Y = pdist(X, method='euclidean', p=2)
-
-           Computes the distance between m original observations in
-           n-dimensional space. Returns a condensed distance matrix Y.
-           For each i and j (i<j), the metric dist(u=X[i], v=X[j]) is
-           computed and stored in the ij'th entry. See squareform
-           to learn how to retrieve this entry.
-
-        1. Y = pdist(X)
-
-          Computes the distance between m points using Euclidean distance
-          (2-norm) as the distance metric between the points. The points
-          are arranged as m n-dimensional row vectors in the matrix X.
-
-        2. Y = pdist(X, 'minkowski', p)
-
-          Computes the distances using the Minkowski distance ||u-v||_p
-          (p-norm) where p>=1.
-
-        3. Y = pdist(X, 'cityblock')
-
-          Computes the city block or Manhattan distance between the
-          points.
-
-        4. Y = pdist(X, 'seuclidean', V=None)
-
-          Computes the standardized Euclidean distance. The standardized
-          Euclidean distance between two n-vectors u and v is
-
-            sqrt(\sum {(u_i-v_i)^2 / V[x_i]}).
-
-          V is the variance vector; V[i] is the variance computed over all
-          the i'th components of the points. If not passed, it is
-          automatically computed.
-
-        5. Y = pdist(X, 'sqeuclidean')
-
-          Computes the squared Euclidean distance ||u-v||_2^2 between
-          the vectors.
-
-        6. Y = pdist(X, 'cosine')
-
-          Computes the cosine distance between vectors u and v,
-
-               1 - uv^T
-             -----------
-             |u|_2 |v|_2
-
-          where |*|_2 is the 2 norm of its argument *.
-
-        7. Y = pdist(X, 'correlation')
-
-          Computes the correlation distance between vectors u and v. This is
-
-            1 - (u - n|u|_1)(v - n|v|_1)^T
-            --------------------------------- ,
-            |(u - n|u|_1)|_2 |(v - n|v|_1)|^T
-
-          where |*|_1 is the Manhattan (or 1-norm) of its argument *,
-          and n is the common dimensionality of the vectors.
-
-        8. Y = pdist(X, 'hamming')
-
-          Computes the normalized Hamming distance, or the proportion
-          of those vector elements between two n-vectors u and v which
-          disagree. To save memory, the matrix X can be of type boolean.
-
-        9. Y = pdist(X, 'jaccard')
-
-          Computes the Jaccard distance between the points. Given two
-          vectors, u and v, the Jaccard distance is the proportion of
-          those elements u_i and v_i that disagree where at least one
-          of them is non-zero.
-
-        10. Y = pdist(X, 'chebyshev')
-
-          Computes the Chebyshev distance between the points. The
-          Chebyshev distance between two n-vectors u and v is the maximum
-          norm-1 distance between their respective elements. More
-          precisely, the distance is given by
-
-            d(u,v) = max {|u_i-v_i|}.
-
-        11. Y = pdist(X, 'canberra')
-
-          Computes the Canberra distance between the points. The
-          Canberra distance between two points u and v is
-
-                      |u_1-v_1|     |u_2-v_2|           |u_n-v_n|
-            d(u,v) = ----------- + ----------- + ... + -----------
-                     |u_1|+|v_1|   |u_2|+|v_2|         |u_n|+|v_n|
-
-        12. Y = pdist(X, 'braycurtis')
-
-          Computes the Bray-Curtis distance between the points. The
-          Bray-Curtis distance between two points u and v is
-
-                     |u_1-v_1| + |u_2-v_2| + ... + |u_n-v_n|
-            d(u,v) = ---------------------------------------
-                     |u_1+v_1| + |u_2+v_2| + ... + |u_n+v_n|
-
-        13. Y = pdist(X, 'mahalanobis', VI=None)
-
-          Computes the Mahalanobis distance between the points. The
-          Mahalanobis distance between two points u and v is
-                (u-v)(1/V)(u-v)^T
-          where (1/V) is the inverse covariance. If VI is not None,
-          VI will be used as the inverse covariance matrix.
-
-        14. Y = pdist(X, 'yule')
-
-          Computes the Yule distance between each pair of boolean
-          vectors. (see yule function documentation)
-
-        15. Y = pdist(X, 'matching')
-
-          Computes the matching distance between each pair of boolean
-          vectors. (see matching function documentation)
-
-        16. Y = pdist(X, 'dice')
-
-          Computes the Dice distance between each pair of boolean
-          vectors. (see dice function documentation)
-
-        17. Y = pdist(X, 'kulsinski')
-
-          Computes the Kulsinski distance between each pair of
-          boolean vectors. (see kulsinski function documentation)
-
-        17. Y = pdist(X, 'rogerstanimoto')
-
-          Computes the Rogers-Tanimoto distance between each pair of
-          boolean vectors. (see rogerstanimoto function documentation)
-
-        18. Y = pdist(X, 'russellrao')
-
-          Computes the Russell-Rao distance between each pair of
-          boolean vectors. (see russellrao function documentation)
-
-        19. Y = pdist(X, 'sokalmichener')
-
-          Computes the Sokal-Michener distance between each pair of
-          boolean vectors. (see sokalmichener function documentation)
-
-        20. Y = pdist(X, 'sokalsneath')
-
-          Computes the Sokal-Sneath distance between each pair of
-          boolean vectors. (see sokalsneath function documentation)
-
-        21. Y = pdist(X, f)
-
-          Computes the distance between all pairs of vectors in X
-          using the user supplied 2-arity function f. For example,
-          Euclidean distance between the vectors could be computed
-          as follows,
-
-            dm = pdist(X, (lambda u, v: np.sqrt(((u-v)*(u-v).T).sum())))
-
-          Note that you should avoid passing a reference to one of
-          the distance functions defined in this library. For example,
-
-            dm = pdist(X, sokalsneath)
-
-          would calculate the pair-wise distances between the vectors
-          in X using the Python function sokalsneath. This would result
-          in sokalsneath being called {n \choose 2} times, which is
-          inefficient. Instead, the optimized C version is more
-          efficient, and we call it using the following syntax.
-
-            dm = pdist(X, 'sokalsneath')
-       """
-#         21. Y = pdist(X, 'test_Y')
-#
-#           Computes the distance between all pairs of vectors in X
-#           using the distance metric Y but with a more succint,
-#           verifiable, but less efficient implementation.
-
-
-    X = np.asarray(X)
-
-    if np.issubsctype(X, np.floating) and not np.issubsctype(X, np.double):
-        raise TypeError('Floating point arrays must be 64-bit (got %r).' %
-        (X.dtype.type,))
-
-    # The C code doesn't do striding.
-    [X] = _copy_arrays_if_base_present([X])
-
-    s = X.shape
-
-    if len(s) != 2:
-        raise ValueError('A 2-dimensional array must be passed.');
-
-    m = s[0]
-    n = s[1]
-    dm = np.zeros((m * (m - 1) / 2,), dtype=np.double)
-
-    mtype = type(metric)
-    if mtype is types.FunctionType:
-        k = 0
-        if metric == minkowski:
-            for i in xrange(0, m - 1):
-                for j in xrange(i+1, m):
-                    dm[k] = minkowski(X[i, :], X[j, :], p)
-                    k = k + 1
-        elif metric == seuclidean:
-            for i in xrange(0, m - 1):
-                for j in xrange(i+1, m):
-                    dm[k] = seuclidean(X[i, :], X[j, :], V)
-                    k = k + 1
-        elif metric == mahalanobis:
-            for i in xrange(0, m - 1):
-                for j in xrange(i+1, m):
-                    dm[k] = mahalanobis(X[i, :], X[j, :], V)
-                    k = k + 1
-        else:
-            for i in xrange(0, m - 1):
-                for j in xrange(i+1, m):
-                    dm[k] = metric(X[i, :], X[j, :])
-                    k = k + 1
-
-    elif mtype is types.StringType:
-        mstr = metric.lower()
-
-        if X.dtype != np.double and \
-               (mstr != 'hamming' and mstr != 'jaccard'):
-            TypeError('A double array must be passed.')
-        if mstr in set(['euclidean', 'euclid', 'eu', 'e']):
-            _hierarchy_wrap.pdist_euclidean_wrap(X, dm)
-        elif mstr in set(['sqeuclidean']):
-            _hierarchy_wrap.pdist_euclidean_wrap(X, dm)
-            dm = dm ** 2.0
-        elif mstr in set(['cityblock', 'cblock', 'cb', 'c']):
-            _hierarchy_wrap.pdist_city_block_wrap(X, dm)
-        elif mstr in set(['hamming', 'hamm', 'ha', 'h']):
-            if X.dtype == np.double:
-                _hierarchy_wrap.pdist_hamming_wrap(X, dm)
-            elif X.dtype == bool:
-                _hierarchy_wrap.pdist_hamming_bool_wrap(X, dm)
-            else:
-                raise TypeError('Invalid input array value type %s '
-                                'for hamming.' % str(X.dtype))
-        elif mstr in set(['jaccard', 'jacc', 'ja', 'j']):
-            if X.dtype == np.double:
-                _hierarchy_wrap.pdist_jaccard_wrap(X, dm)
-            elif X.dtype == np.bool:
-                _hierarchy_wrap.pdist_jaccard_bool_wrap(X, dm)
-            else:
-                raise TypeError('Invalid input array value type %s for '
-                                'jaccard.' % str(X.dtype))
-        elif mstr in set(['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']):
-            _hierarchy_wrap.pdist_chebyshev_wrap(X, dm)
-        elif mstr in set(['minkowski', 'mi', 'm']):
-            _hierarchy_wrap.pdist_minkowski_wrap(X, dm, p)
-        elif mstr in set(['seuclidean', 'se', 's']):
-            if V is not None:
-                if type(V) is not _array_type:
-                    raise TypeError('Variance vector V must be a numpy array')
-                if V.dtype != np.double:
-                    raise TypeError('Variance vector V must contain doubles.')
-                if len(V.shape) != 1:
-                    raise ValueError('Variance vector V must be one-dimensional.')
-                if V.shape[0] != n:
-                    raise ValueError('Variance vector V must be of the same dimension as the vectors on which the distances are computed.')
-                # The C code doesn't do striding.
-                [VV] = _copy_arrays_if_base_present([V])
-            else:
-                VV = _unbiased_variance(X)
-            _hierarchy_wrap.pdist_seuclidean_wrap(X, VV, dm)
-        # Need to test whether vectorized cosine works better.
-        # Find out: Is there a dot subtraction operator so I can
-        # subtract matrices in a similar way to multiplying them?
-        # Need to get rid of as much unnecessary C code as possible.
-        elif mstr in set(['cosine_old', 'cos_old']):
-            norms = np.sqrt(np.sum(X * X, axis=1))
-            _hierarchy_wrap.pdist_cosine_wrap(X, dm, norms)
-        elif mstr in set(['cosine', 'cos']):
-            norms = np.sqrt(np.sum(X * X, axis=1))
-            nV = norms.reshape(m, 1)
-            # The numerator u * v
-            nm = np.dot(X, X.T)
-            # The denom. ||u||*||v||
-            de = np.dot(nV, nV.T);
-            dm = 1 - (nm / de)
-            dm[xrange(0,m),xrange(0,m)] = 0
-            dm = squareform(dm)
-        elif mstr in set(['correlation', 'co']):
-            X2 = X - X.mean(1)[:,np.newaxis]
-            #X2 = X - np.matlib.repmat(np.mean(X, axis=1).reshape(m, 1), 1, n)
-            norms = np.sqrt(np.sum(X2 * X2, axis=1))
-            _hierarchy_wrap.pdist_cosine_wrap(X2, dm, norms)
-        elif mstr in set(['mahalanobis', 'mahal', 'mah']):
-            if VI is not None:
-                if type(VI) != _array_type:
-                    raise TypeError('VI must be a numpy array.')
-                if VI.dtype != np.double:
-                    raise TypeError('The array must contain 64-bit floats.')
-                [VI] = _copy_arrays_if_base_present([VI])
-            else:
-                V = np.cov(X.T)
-                VI = np.linalg.inv(V).T.copy()
-            # (u-v)V^(-1)(u-v)^T
-            _hierarchy_wrap.pdist_mahalanobis_wrap(X, VI, dm)
-        elif mstr == 'canberra':
-            _hierarchy_wrap.pdist_canberra_wrap(X, dm)
-        elif mstr == 'braycurtis':
-            _hierarchy_wrap.pdist_bray_curtis_wrap(X, dm)
-        elif mstr == 'yule':
-            _hierarchy_wrap.pdist_yule_bool_wrap(X, dm)
-        elif mstr == 'matching':
-            _hierarchy_wrap.pdist_matching_bool_wrap(X, dm)
-        elif mstr == 'kulsinski':
-            _hierarchy_wrap.pdist_kulsinski_bool_wrap(X, dm)
-        elif mstr == 'dice':
-            _hierarchy_wrap.pdist_dice_bool_wrap(X, dm)
-        elif mstr == 'rogerstanimoto':
-            _hierarchy_wrap.pdist_rogerstanimoto_bool_wrap(X, dm)
-        elif mstr == 'russellrao':
-            _hierarchy_wrap.pdist_russellrao_bool_wrap(X, dm)
-        elif mstr == 'sokalmichener':
-            _hierarchy_wrap.pdist_sokalmichener_bool_wrap(X, dm)
-        elif mstr == 'sokalsneath':
-            _hierarchy_wrap.pdist_sokalsneath_bool_wrap(X, dm)
-        elif metric == 'test_euclidean':
-            dm = pdist(X, euclidean)
-        elif metric == 'test_sqeuclidean':
-            if V is None:
-                V = _unbiased_variance(X)
-            dm = pdist(X, lambda u, v: seuclidean(u, v, V))
-        elif metric == 'test_braycurtis':
-            dm = pdist(X, braycurtis)
-        elif metric == 'test_mahalanobis':
-            if VI is None:
-                V = np.cov(X.T)
-                VI = np.linalg.inv(V)
-            [VI] = _copy_arrays_if_base_present([VI])
-            # (u-v)V^(-1)(u-v)^T
-            dm = pdist(X, (lambda u, v: mahalanobis(u, v, VI)))
-        elif metric == 'test_cityblock':
-            dm = pdist(X, cityblock)
-        elif metric == 'test_minkowski':
-            dm = pdist(X, minkowski, p)
-        elif metric == 'test_cosine':
-            dm = pdist(X, cosine)
-        elif metric == 'test_correlation':
-            dm = pdist(X, correlation)
-        elif metric == 'test_hamming':
-            dm = pdist(X, hamming)
-        elif metric == 'test_jaccard':
-            dm = pdist(X, jaccard)
-        elif metric == 'test_chebyshev' or metric == 'test_chebychev':
-            dm = pdist(X, chebyshev)
-        elif metric == 'test_yule':
-            dm = pdist(X, yule)
-        elif metric == 'test_matching':
-            dm = pdist(X, matching)
-        elif metric == 'test_dice':
-            dm = pdist(X, dice)
-        elif metric == 'test_rogerstanimoto':
-            dm = pdist(X, rogerstanimoto)
-        elif metric == 'test_russellrao':
-            dm = pdist(X, russellrao)
-        elif metric == 'test_sokalsneath':
-            dm = pdist(X, sokalsneath)
-        else:
-            raise ValueError('Unknown Distance Metric: %s' % mstr)
-    else:
-        raise TypeError('2nd argument metric must be a string identifier or a function.')
-    return dm
-
 def cophenet(*args, **kwargs):
     """
     d = cophenet(Z)
@@ -1518,6 +794,8 @@
       Also returns the cophenetic distance matrix in condensed form.
 
     """
+    Z = np.asarray(Z)
+
     nargs = len(args)
 
     if nargs < 1:
@@ -1531,7 +809,7 @@
     zz = np.zeros((n*(n-1)/2,), dtype=np.double)
     # Since the C code does not support striding using strides.
     # The dimensions are used instead.
-    [Z] = _copy_arrays_if_base_present([Z])
+    Z = _convert_to_double(Z)
 
     _hierarchy_wrap.cophenetic_distances_wrap(Z, zz, int(n))
     if nargs == 1:
@@ -1575,6 +853,7 @@
       This function behaves similarly to the MATLAB(TM) inconsistent
       function.
     """
+    Z = np.asarray(Z)
 
     Zs = Z.shape
     is_valid_linkage(Z, throw=True, name='Z')
@@ -1607,7 +886,7 @@
        the number of original observations (leaves) in the non-singleton
        cluster i.
     """
-    is_valid_linkage(Z, throw=True, name='Z')
+    Z = np.asarray(Z)
     Zs = Z.shape
     Zpart = Z[:,0:2]
     Zd = Z[:,2].reshape(Zs[0], 1)
@@ -1629,6 +908,7 @@
     last column removed and the cluster indices converted to use
     1..N indexing.
     """
+    Z = np.asarray(Z)
     is_valid_linkage(Z, throw=True, name='Z')
 
     return np.hstack([Z[:,0:2] + 1, Z[:,2]])
@@ -1641,6 +921,7 @@
       if for every cluster s and t joined, the distance between them is
       no less than the distance between any previously joined clusters.
     """
+    Z = np.asarray(Z)
     is_valid_linkage(Z, throw=True, name='Z')
 
     # We expect the i'th value to be greater than its successor.
@@ -1655,18 +936,19 @@
       must be nonnegative. The link counts R[:,2] must be positive and
       no greater than n-1.
     """
+    R = np.asarray(R)
     valid = True
     try:
-        if type(R) is not _array_type:
+        if type(R) != np.ndarray:
             if name:
                 raise TypeError('Variable \'%s\' passed as inconsistency matrix is not a numpy array.' % name)
             else:
                 raise TypeError('Variable passed as inconsistency matrix is not a numpy array.')
         if R.dtype != np.double:
             if name:
-                raise TypeError('Inconsistency matrix \'%s\' must contain doubles (float64).' % name)
+                raise TypeError('Inconsistency matrix \'%s\' must contain doubles (double).' % name)
             else:
-                raise TypeError('Inconsistency matrix must contain doubles (float64).')
+                raise TypeError('Inconsistency matrix must contain doubles (double).')
         if len(R.shape) != 2:
             if name:
                 raise ValueError('Inconsistency matrix \'%s\' must have shape=2 (i.e. be two-dimensional).' % name)
@@ -1714,18 +996,19 @@
       variable.
 
     """
+    Z = np.asarray(Z)
     valid = True
     try:
-        if type(Z) is not _array_type:
+        if type(Z) != np.ndarray:
             if name:
                 raise TypeError('\'%s\' passed as a linkage is not a valid array.' % name)
             else:
                 raise TypeError('Variable is not a valid array.')
         if Z.dtype != np.double:
             if name:
-                raise TypeError('Linkage matrix \'%s\' must contain doubles (float64).' % name)
+                raise TypeError('Linkage matrix \'%s\' must contain doubles (double).' % name)
             else:
-                raise TypeError('Linkage matrix must contain doubles (float64).')
+                raise TypeError('Linkage matrix must contain doubles (double).')
         if len(Z.shape) != 2:
             if name:
                 raise ValueError('Linkage matrix \'%s\' must have shape=2 (i.e. be two-dimensional).' % name)
@@ -1737,12 +1020,13 @@
             else:
                 raise ValueError('Linkage matrix must have 4 columns.')
         n = Z.shape[0]
-        if not ((Z[:,0]-xrange(n-1, n*2-1) < 0).any()) or \
-           not (Z[:,1]-xrange(n-1, n*2-1) < 0).any():
-            if name:
-                raise ValueError('Linkage \'%s\' contains negative indices.' % name)
-            else:
-                raise ValueError('Linkage contains negative indices.')
+        if n > 1:
+            if ((Z[:,0] < 0).any() or
+                (Z[:,1] < 0).any()):
+                if name:
+                    raise ValueError('Linkage \'%s\' contains negative indices.' % name)
+                else:
+                    raise ValueError('Linkage contains negative indices.')
     except Exception, e:
         if throw:
             raise
@@ -1774,18 +1058,19 @@
       referencing the offending variable.
 
     """
+    y = np.asarray(y)
     valid = True
     try:
-        if type(y) is not _array_type:
+        if type(y) != np.ndarray:
             if name:
                 raise TypeError('\'%s\' passed as a condensed distance matrix is not a numpy array.' % name)
             else:
                 raise TypeError('Variable is not a numpy array.')
         if y.dtype != np.double:
             if name:
-                raise TypeError('Condensed distance matrix \'%s\' must contain doubles (float64).' % name)
+                raise TypeError('Condensed distance matrix \'%s\' must contain doubles (double).' % name)
             else:
-                raise TypeError('Condensed distance matrix must contain doubles (float64).')
+                raise TypeError('Condensed distance matrix must contain doubles (double).')
         if len(y.shape) != 1:
             if name:
                 raise ValueError('Condensed distance matrix \'%s\' must have shape=1 (i.e. be one-dimensional).' % name)
@@ -1807,7 +1092,7 @@
     return valid
 
 
-def is_valid_dm(D, t=0.0):
+def is_valid_dm(D, tol=0.0, throw=False, name="D"):
     """
     is_valid_dm(D)
 
@@ -1815,12 +1100,12 @@
       Distance matrices must be 2-dimensional numpy arrays containing
       doubles. They must have a zero-diagonal, and they must be symmetric.
 
-    is_valid_dm(D, t)
+    is_valid_dm(D, tol)
 
       Returns True if the variable D passed is a valid distance matrix.
       Small numerical differences in D and D.T and non-zeroness of the
       diagonal are ignored if they are within the tolerance specified
-      by t.
+      by tol.
 
     is_valid_dm(..., warning=True, name='V')
 
@@ -1835,25 +1120,26 @@
       the offending variable.
 
     """
-
+    D = np.asarray(D)
     valid = True
     try:
-        if type(D) is not _array_type:
+        if type(D) != np.ndarray:
             if name:
                 raise TypeError('\'%s\' passed as a distance matrix is not a numpy array.' % name)
             else:
                 raise TypeError('Variable is not a numpy array.')
+        s = D.shape
         if D.dtype != np.double:
             if name:
-                raise TypeError('Distance matrix \'%s\' must contain doubles (float64).' % name)
+                raise TypeError('Distance matrix \'%s\' must contain doubles (double).' % name)
             else:
-                raise TypeError('Distance matrix must contain doubles (float64).')
+                raise TypeError('Distance matrix must contain doubles (double).')
         if len(D.shape) != 2:
             if name:
                 raise ValueError('Distance matrix \'%s\' must have shape=2 (i.e. be two-dimensional).' % name)
             else:
                 raise ValueError('Distance matrix must have shape=2 (i.e. be two-dimensional).')
-        if t == 0.0:
+        if tol == 0.0:
             if not (D == D.T).all():
                 if name:
                     raise ValueError('Distance matrix \'%s\' must be symmetric.' % name)
@@ -1865,16 +1151,16 @@
                 else:
                     raise ValueError('Distance matrix diagonal must be zero.')
         else:
-            if not (D - D.T <= t).all():
+            if not (D - D.T <= tol).all():
                 if name:
-                    raise ValueError('Distance matrix \'%s\' must be symmetric within tolerance %d.' % (name, t))
+                    raise ValueError('Distance matrix \'%s\' must be symmetric within tolerance %d.' % (name, tol))
                 else:
-                    raise ValueError('Distance matrix must be symmetric within tolerance %d.' % t)
-            if not (D[xrange(0, s[0]), xrange(0, s[0])] <= t).all():
+                    raise ValueError('Distance matrix must be symmetric within tolerance %5.5f.' % tol)
+            if not (D[xrange(0, s[0]), xrange(0, s[0])] <= tol).all():
                 if name:
-                    raise ValueError('Distance matrix \'%s\' diagonal must be close to zero within tolerance %d.' % (name, t))
+                    raise ValueError('Distance matrix \'%s\' diagonal must be close to zero within tolerance %5.5f.' % (name, tol))
                 else:
-                    raise ValueError('Distance matrix \'%s\' diagonal must be close to zero within tolerance %d.' % t)
+                    raise ValueError('Distance matrix \'%s\' diagonal must be close to zero within tolerance %5.5f.' % tol)
     except Exception, e:
         if throw:
             raise
@@ -1888,8 +1174,9 @@
     Returns the number of original observations that correspond to a
     linkage matrix Z.
     """
+    Z = np.asarray(Z)
     is_valid_linkage(Z, throw=True, name='Z')
-    return (Z.shape[0] - 1)
+    return (Z.shape[0] + 1)
 
 def numobs_dm(D):
     """
@@ -1898,7 +1185,8 @@
       Returns the number of original observations that correspond to a
       square, non-condensed distance matrix D.
     """
-    is_valid_dm(D, tol=Inf, throw=True, name='D')
+    D = np.asarray(D)
+    is_valid_dm(D, tol=np.inf, throw=True, name='D')
     return D.shape[0]
 
 def numobs_y(Y):
@@ -1908,8 +1196,9 @@
       Returns the number of original observations that correspond to a
       condensed distance matrix Y.
     """
-    is_valid_y(y, throw=True, name='Y')
-    d = int(np.ceil(np.sqrt(y.shape[0] * 2)))
+    Y = np.asarray(Y)
+    is_valid_y(Y, throw=True, name='Y')
+    d = int(np.ceil(np.sqrt(Y.shape[0] * 2)))
     return d
 
 def Z_y_correspond(Z, Y):
@@ -1922,6 +1211,8 @@
       check in algorithms that make extensive use of linkage and distance
       matrices that must correspond to the same set of original observations.
     """
+    Z = np.asarray(Z)
+    Y = np.asarray(Y)
     return numobs_y(Y) == numobs_Z(Z)
 
 def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
@@ -1981,10 +1272,11 @@
           cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
 
     """
+    Z = np.asarray(Z)
     is_valid_linkage(Z, throw=True, name='Z')
 
     n = Z.shape[0] + 1
-    T = np.zeros((n,), dtype=np.int32)
+    T = np.zeros((n,), dtype=np.int)
 
     # Since the C code does not support striding using strides.
     # The dimensions are used instead.
@@ -1994,6 +1286,7 @@
         if R is None:
             R = inconsistent(Z, depth)
         else:
+            R = np.asarray(R)
             is_valid_im(R, throw=True, name='R')
             # Since the C code does not support striding using strides.
             # The dimensions are used instead.
@@ -2048,7 +1341,7 @@
                     descriptions.
 
         distance:   the distance metric for calculating pairwise
-                    distances. See pdist for descriptions and
+                    distances. See distance.pdist for descriptions and
                     linkage to verify compatibility with the linkage
                     method.
 
@@ -2063,14 +1356,17 @@
 
     This function is similar to MATLAB(TM) clusterdata function.
     """
+    X = np.asarray(X)
 
-    if type(X) is not _array_type or len(X.shape) != 2:
-        raise TypeError('X must be an n by m numpy array.')
+    if type(X) != np.ndarray or len(X.shape) != 2:
+        raise TypeError('The observation matrix X must be an n by m numpy array.')
 
-    Y = pdist(X, metric=distance)
+    Y = distance.pdist(X, metric=distance)
     Z = linkage(Y, method=method)
     if R is None:
         R = inconsistent(Z, d=depth)
+    else:
+        R = np.asarray(R)
     T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
     return T
 
@@ -2081,9 +1377,10 @@
       Returns a list of leaf node ids as they appear in the tree from
       left to right. Z is a linkage matrix.
     """
+    Z = np.asarray(Z)
     is_valid_linkage(Z, throw=True, name='Z')
     n = Z.shape[0] + 1
-    ML = np.zeros((n,), dtype=np.int32)
+    ML = np.zeros((n,), dtype=np.int)
     [Z] = _copy_arrays_if_base_present([Z])
     _hierarchy_wrap.prelist_wrap(Z, ML, int(n))
     return ML
@@ -2102,10 +1399,10 @@
     # p <= 20, size="12"
     # 20 < p <= 30, size="10"
     # 30 < p <= 50, size="8"
-    # 50 < p <= scipy.inf, size="6"
+    # 50 < p <= np.inf, size="6"
 
-    _dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, scipy.inf: 5}
-    _drotation =  {20: 0,          40: 45,       scipy.inf: 90}
+    _dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
+    _drotation =  {20: 0,          40: 45,       np.inf: 90}
     _dtextsortedkeys = list(_dtextsizes.keys())
     _dtextsortedkeys.sort()
     _drotationsortedkeys = list(_drotation.keys())
@@ -2141,7 +1438,7 @@
         ivw = len(ivl) * 10
         # Depenendent variable plot height
         dvw = mh + mh * 0.05
-        ivticks = scipy.arange(5, len(ivl)*10+5, 10)
+        ivticks = np.arange(5, len(ivl)*10+5, 10)
         if orientation == 'top':
             axis.set_ylim([0, dvw])
             axis.set_xlim([0, ivw])
@@ -2519,6 +1816,7 @@
     #         or results in a crossing, an exception will be thrown. Passing
     #         None orders leaf nodes based on the order they appear in the
     #         pre-order traversal.
+    Z = np.asarray(Z)
 
     is_valid_linkage(Z, throw=True, name='Z')
     Zs = Z.shape
@@ -2537,7 +1835,7 @@
 
     if truncate_mode == 'mtica' or truncate_mode == 'level':
         if p <= 0:
-            p = scipy.inf
+            p = np.inf
     if get_leaves:
         lvs = []
     else:
@@ -2637,7 +1935,7 @@
 
 
 def _dendrogram_calculate_info(Z, p, truncate_mode, \
-                               colorthreshold=scipy.inf, get_leaves=True, \
+                               colorthreshold=np.inf, get_leaves=True, \
                                orientation='top', labels=None, \
                                count_sort=False, distance_sort=False, \
                                show_leaf_counts=False, i=-1, iv=0.0, \
@@ -2882,9 +2180,12 @@
       Returns True iff two different cluster assignments T1 and T2 are
       equivalent. T1 and T2 must be arrays of the same size.
     """
-    if type(T1) is not _array_type:
+    T1 = np.asarray(T1)
+    T2 = np.asarray(T2)
+
+    if type(T1) != np.ndarray:
         raise TypeError('T1 must be a numpy array.')
-    if type(T2) is not _array_type:
+    if type(T2) != np.ndarray:
         raise TypeError('T2 must be a numpy array.')
 
     T1S = T1.shape
@@ -2919,6 +2220,7 @@
       Note that when Z[:,2] is monotonic, Z[:,2] and MD should not differ.
       See linkage for more information on this issue.
     """
+    Z = np.asarray(Z)
     is_valid_linkage(Z, throw=True, name='Z')
 
     n = Z.shape[0] + 1
@@ -2936,6 +2238,8 @@
       inconsistency matrix. MI is a monotonic (n-1)-sized numpy array of
       doubles.
     """
+    Z = np.asarray(Z)
+    R = np.asarray(R)
     is_valid_linkage(Z, throw=True, name='Z')
     is_valid_im(R, throw=True, name='R')
 
@@ -2954,6 +2258,8 @@
     is the maximum over R[Q(j)-n, i] where Q(j) the set of all node ids
     corresponding to nodes below and including j.
     """
+    Z = np.asarray(Z)
+    R = np.asarray(R)
     is_valid_linkage(Z, throw=True, name='Z')
     is_valid_im(R, throw=True, name='R')
     if type(i) is not types.IntType:
@@ -2989,7 +2295,9 @@
     i < n, i corresponds to an original observation, otherwise it
     corresponds to a non-singleton cluster.
     """
-    if type(T) != _array_type or T.dtype != np.int:
+    Z = np.asarray(Z)
+    T = np.asarray(T)
+    if type(T) != np.ndarray or T.dtype != np.int:
         raise TypeError('T must be a one-dimensional numpy array of integers.')
     is_valid_linkage(Z, throw=True, name='Z')
     if len(T) != Z.shape[0] + 1:
@@ -2997,8 +2305,8 @@
 
     Cl = np.unique(T)
     kk = len(Cl)
-    L = np.zeros((kk,), dtype=np.int32)
-    M = np.zeros((kk,), dtype=np.int32)
+    L = np.zeros((kk,), dtype=np.int)
+    M = np.zeros((kk,), dtype=np.int)
     n = Z.shape[0] + 1
     [Z, T] = _copy_arrays_if_base_present([Z, T])
     s = _hierarchy_wrap.leaders_wrap(Z, T, L, M, int(kk), int(n))

Modified: branches/refactor_fft/scipy/cluster/info.py
===================================================================
--- branches/refactor_fft/scipy/cluster/info.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/cluster/info.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -14,6 +14,12 @@
     clustering. Its features include generating hierarchical clusters from 
     distance matrices, computing distance matrices from observation vectors, 
     calculating statistics on clusters, cutting linkages to generate flat 
-    clusters, and visualizing clusters with dendrograms. 
+    clusters, and visualizing clusters with dendrograms.
 
+Distance Computation
+====================
+
+    The distance module provides functions for computing distances between
+    pairs of vectors from a set of observation vectors.
+
 """

Modified: branches/refactor_fft/scipy/cluster/setup.py
===================================================================
--- branches/refactor_fft/scipy/cluster/setup.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/cluster/setup.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -12,6 +12,10 @@
         sources=[join('src', 'vq_module.c'), join('src', 'vq.c')],
         include_dirs = [get_numpy_include_dirs()])
 
+    config.add_extension('_distance_wrap',
+        sources=[join('src', 'distance_wrap.c'), join('src', 'distance.c')],
+        include_dirs = [get_numpy_include_dirs()])
+
     config.add_extension('_hierarchy_wrap',
         sources=[join('src', 'hierarchy_wrap.c'), join('src', 'hierarchy.c')],
         include_dirs = [get_numpy_include_dirs()])

Copied: branches/refactor_fft/scipy/cluster/src/common.h (from rev 4510, trunk/scipy/cluster/src/common.h)

Copied: branches/refactor_fft/scipy/cluster/src/distance.c (from rev 4510, trunk/scipy/cluster/src/distance.c)

Copied: branches/refactor_fft/scipy/cluster/src/distance.h (from rev 4510, trunk/scipy/cluster/src/distance.h)

Copied: branches/refactor_fft/scipy/cluster/src/distance_wrap.c (from rev 4510, trunk/scipy/cluster/src/distance_wrap.c)

Modified: branches/refactor_fft/scipy/cluster/src/hierarchy.c
===================================================================
--- branches/refactor_fft/scipy/cluster/src/hierarchy.c	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/cluster/src/hierarchy.c	2008-07-01 04:52:00 UTC (rev 4511)
@@ -34,12 +34,11 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#define NCHOOSE2(_n) ((_n)*(_n-1)/2)
+#include "common.h"
+
 #define ISCLUSTER(_nd) ((_nd)->id >= n)
 #define GETCLUSTER(_id) ((lists + _id - n))
 
-#define CPY_MAX(_x, _y) ((_x > _y) ? (_x) : (_y))
-#define CPY_MIN(_x, _y) ((_x < _y) ? (_x) : (_y))
 /** The number of link stats (for the inconsistency computation) for each
     cluster. */
 
@@ -61,39 +60,15 @@
 #define CPY_LIN_DIST 2
 #define CPY_LIN_CNT 3
 
-#define CPY_BITS_PER_CHAR (sizeof(unsigned char) * 8)
-#define CPY_FLAG_ARRAY_SIZE_BYTES(num_bits) (CPY_CEIL_DIV((num_bits), \
-                                                          CPY_BITS_PER_CHAR))
-#define CPY_GET_BIT(_xx, i) (((_xx)[(i) / CPY_BITS_PER_CHAR] >> \
-                             ((CPY_BITS_PER_CHAR-1) - \
-                              ((i) % CPY_BITS_PER_CHAR))) & 0x1)
-#define CPY_SET_BIT(_xx, i) ((_xx)[(i) / CPY_BITS_PER_CHAR] |= \
-                              ((0x1) << ((CPY_BITS_PER_CHAR-1) \
-                                         -((i) % CPY_BITS_PER_CHAR))))
-#define CPY_CLEAR_BIT(_xx, i) ((_xx)[(i) / CPY_BITS_PER_CHAR] &= \
-                              ~((0x1) << ((CPY_BITS_PER_CHAR-1) \
-                                         -((i) % CPY_BITS_PER_CHAR))))
-
-#ifndef CPY_CEIL_DIV
-#define CPY_CEIL_DIV(x, y) ((((double)x)/(double)y) == \
-                            ((double)((x)/(y))) ? ((x)/(y)) : ((x)/(y) + 1))
-#endif
-
-
-#ifdef CPY_DEBUG
-#define CPY_DEBUG_MSG(...) fprintf(stderr, __VA_ARGS__)
-#else
-#define CPY_DEBUG_MSG(...)
-#endif
-
 #include <stdlib.h>
 #include <string.h>
 #include <stdio.h>
 #include <math.h>
 
 #include "hierarchy.h"
+#include "distance.h"
 
-double euclidean_distance(const double *u, const double *v, int n) {
+static inline double euclidean_distance(const double *u, const double *v, int n) {
   int i = 0;
   double s = 0.0, d;
   for (i = 0; i < n; i++) {
@@ -103,548 +78,6 @@
   return sqrt(s);
 }
 
-double ess_distance(const double *u, const double *v, int n) {
-  int i = 0;
-  double s = 0.0, d;
-  for (i = 0; i < n; i++) {
-    d = fabs(u[i] - v[i]);
-    s = s + d * d;
-  }
-  return s;
-}
-
-double chebyshev_distance(const double *u, const double *v, int n) {
-  int i = 0;
-  double d, maxv = 0.0;
-  for (i = 0; i < n; i++) {
-    d = fabs(u[i] - v[i]);
-    if (d > maxv) {
-      maxv = d;
-    }
-  }
-  return maxv;
-}
-
-double canberra_distance(const double *u, const double *v, int n) {
-  int i;
-  double s = 0.0;
-  for (i = 0; i < n; i++) {
-    s += (fabs(u[i] - v[i]) / (fabs(u[i]) + fabs(v[i])));
-  }
-  return s;
-}
-
-double bray_curtis_distance(const double *u, const double *v, int n) {
-  int i;
-  double s1 = 0.0, s2 = 0.0;
-  for (i = 0; i < n; i++) {
-    s1 += fabs(u[i] - v[i]);
-    s2 += fabs(u[i] + v[i]);
-  }
-  return s1 / s2;
-}
-
-double mahalanobis_distance(const double *u, const double *v,
-			    const double *covinv, double *dimbuf1,
-			    double *dimbuf2, int n) {
-  int i, j;
-  double s;
-  const double *covrow = covinv;
-  for (i = 0; i < n; i++) {
-    dimbuf1[i] = u[i] - v[i];
-  }
-  for (i = 0; i < n; i++) {
-    covrow = covinv + (i * n);
-    s = 0.0;
-    for (j = 0; j < n; j++) {
-      s += dimbuf1[j] * covrow[j];
-    }
-    dimbuf2[i] = s;
-  }
-  s = 0.0;
-  for (i = 0; i < n; i++) {
-    s += dimbuf1[i] * dimbuf2[i];
-  }
-  return sqrt(s);
-}
-
-double hamming_distance(const double *u, const double *v, int n) {
-  int i = 0;
-  double s = 0.0;
-  for (i = 0; i < n; i++) {
-    s = s + (u[i] != v[i]);
-  }
-  return s / (double)n;
-}
-
-double hamming_distance_bool(const char *u, const char *v, int n) {
-  int i = 0;
-  double s = 0.0;
-  for (i = 0; i < n; i++) {
-    s = s + (u[i] != v[i]);
-  }
-  return s / (double)n;
-}
-
-double yule_distance_bool(const char *u, const char *v, int n) {
-  int i = 0;
-  int ntt = 0, nff = 0, nft = 0, ntf = 0;
-  for (i = 0; i < n; i++) {
-    ntt += (u[i] && v[i]);
-    ntf += (u[i] && !v[i]);
-    nft += (!u[i] && v[i]);
-    nff += (!u[i] && !v[i]);
-  }
-  return (2.0 * ntf * nft) / (double)(ntt * nff + ntf * nft);  
-}
-
-double matching_distance_bool(const char *u, const char *v, int n) {
-  int i = 0;
-  int nft = 0, ntf = 0;
-  for (i = 0; i < n; i++) {
-    ntf += (u[i] && !v[i]);
-    nft += (!u[i] && v[i]);
-  }
-  return (double)(ntf + nft) / (double)(n);
-}
-
-double dice_distance_bool(const char *u, const char *v, int n) {
-  int i = 0;
-  int ntt = 0, nft = 0, ntf = 0;
-  for (i = 0; i < n; i++) {
-    ntt += (u[i] && v[i]);
-    ntf += (u[i] && !v[i]);
-    nft += (!u[i] && v[i]);
-  }
-  return (double)(nft + ntf) / (double)(2.0 * ntt + ntf + nft);
-}
-
-
-double rogerstanimoto_distance_bool(const char *u, const char *v, int n) {
-  int i = 0;
-  int ntt = 0, nff = 0, nft = 0, ntf = 0;
-  for (i = 0; i < n; i++) {
-    ntt += (u[i] && v[i]);
-    ntf += (u[i] && !v[i]);
-    nft += (!u[i] && v[i]);
-    nff += (!u[i] && !v[i]);
-  }
-  return (2.0 * (ntf + nft)) / ((double)ntt + nff + (2.0 * (ntf + nft)));
-}
-
-double russellrao_distance_bool(const char *u, const char *v, int n) {
-  int i = 0;
-  /**  int nff = 0, nft = 0, ntf = 0;**/
-  int ntt = 0;
-  for (i = 0; i < n; i++) {
-    /**    nff += (!u[i] && !v[i]);
-    ntf += (u[i] && !v[i]);
-    nft += (!u[i] && v[i]);**/
-    ntt += (u[i] && v[i]);
-  }
-  /**  return (double)(ntf + nft + nff) / (double)n;**/
-  return (double) (n - ntt) / (double) n;
-}
-
-static inline double kulsinski_distance_bool(const char *u, const char *v, int n) {
-  int _i = 0;
-  int ntt = 0, nft = 0, ntf = 0, nff = 0;
-  for (_i = 0; _i < n; _i++) {
-    ntt += (u[_i] && v[_i]);
-    ntf += (u[_i] && !v[_i]);
-    nft += (!u[_i] && v[_i]);
-    nff += (!u[_i] && !v[_i]);
-  }
-  return ((double)(ntf + nft - ntt + n)) / ((double)(ntf + nft + n));
-}
-
-static inline double sokalsneath_distance_bool(const char *u, const char *v, int n) {
-  int _i = 0;
-  int ntt = 0, nft = 0, ntf = 0;
-  for (_i = 0; _i < n; _i++) {
-    ntt += (u[_i] && v[_i]);
-    ntf += (u[_i] && !v[_i]);
-    nft += (!u[_i] && v[_i]);
-  }
-  return (2.0 * (ntf + nft))/(2.0 * (ntf + nft) + ntt);
-}
-
-static inline double sokalmichener_distance_bool(const char *u, const char *v, int n) {
-  int _i = 0;
-  int ntt = 0, nft = 0, ntf = 0, nff = 0;
-  for (_i = 0; _i < n; _i++) {
-    ntt += (u[_i] && v[_i]);
-    nff += (!u[_i] && !v[_i]);
-    ntf += (u[_i] && !v[_i]);
-    nft += (!u[_i] && v[_i]);
-  }
-  return (2.0 * (ntf + nft))/(2.0 * (ntf + nft) + ntt + nff);
-}
-
-double jaccard_distance(const double *u, const double *v, int n) {
-  int i = 0;
-  double denom = 0.0, num = 0.0;
-  for (i = 0; i < n; i++) {
-    num += (u[i] != v[i]) && ((u[i] != 0.0) || (v[i] != 0.0));
-    denom += (u[i] != 0.0) || (v[i] != 0.0);
-  }
-  return num / denom;
-}
-
-double jaccard_distance_bool(const char *u, const char *v, int n) {
-  int i = 0;
-  double num = 0.0, denom = 0.0;
-  for (i = 0; i < n; i++) {
-    num += (u[i] != v[i]) && ((u[i] != 0) || (v[i] != 0));
-    denom += (u[i] != 0) || (v[i] != 0);
-  }
-  return num / denom;
-}
-
-double dot_product(const double *u, const double *v, int n) {
-  int i;
-  double s = 0.0;
-  for (i = 0; i < n; i++) {
-    s += u[i] * v[i];
-  }
-  return s;
-}
-
-double cosine_distance(const double *u, const double *v, int n,
-		       const double nu, const double nv) {
-  return 1.0 - (dot_product(u, v, n) / (nu * nv));
-}
-
-double seuclidean_distance(const double *var,
-			   const double *u, const double *v, int n) {
-  int i = 0;
-  double s = 0.0, d;
-  for (i = 0; i < n; i++) {
-    d = u[i] - v[i];
-    s = s + (d * d) / var[i];
-  }
-  return sqrt(s);
-}
-
-double city_block_distance(const double *u, const double *v, int n) {
-  int i = 0;
-  double s = 0.0, d;
-  for (i = 0; i < n; i++) {
-    d = fabs(u[i] - v[i]);
-    s = s + d;
-  }
-  return s;
-}
-
-double minkowski_distance(const double *u, const double *v, int n, double p) {
-  int i = 0;
-  double s = 0.0, d;
-  for (i = 0; i < n; i++) {
-    d = fabs(u[i] - v[i]);
-    s = s + pow(d, p);
-  }
-  return pow(s, 1.0 / p);
-}
-
-void compute_mean_vector(double *res, const double *X, int m, int n) {
-  int i, j;
-  const double *v;
-  for (i = 0; i < n; i++) {
-    res[i] = 0.0;
-  }
-  for (j = 0; j < m; j++) {
-
-    v = X + (j * n);
-    for (i = 0; i < n; i++) {
-      res[i] += v[i];
-    }
-  }
-  for (i = 0; i < n; i++) {
-    res[i] /= (double)m;
-  }
-}
-
-void pdist_euclidean(const double *X, double *dm, int m, int n) {
-  int i, j;
-  const double *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = euclidean_distance(u, v, n);
-    }
-  }
-}
-
-void pdist_mahalanobis(const double *X, const double *covinv,
-		       double *dm, int m, int n) {
-  int i, j;
-  const double *u, *v;
-  double *it = dm;
-  double *dimbuf1, *dimbuf2;
-  dimbuf1 = (double*)malloc(sizeof(double) * 2 * n);
-  dimbuf2 = dimbuf1 + n;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = mahalanobis_distance(u, v, covinv, dimbuf1, dimbuf2, n);
-    }
-  }
-  dimbuf2 = 0;
-  free(dimbuf1);
-}
-
-void pdist_bray_curtis(const double *X, double *dm, int m, int n) {
-  int i, j;
-  const double *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = bray_curtis_distance(u, v, n);
-    }
-  }
-}
-
-void pdist_canberra(const double *X, double *dm, int m, int n) {
-  int i, j;
-  const double *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = canberra_distance(u, v, n);
-    }
-  }
-}
-
-void pdist_hamming(const double *X, double *dm, int m, int n) {
-  int i, j;
-  const double *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = hamming_distance(u, v, n);
-    }
-  }
-}
-
-void pdist_hamming_bool(const char *X, double *dm, int m, int n) {
-  int i, j;
-  const char *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = hamming_distance_bool(u, v, n);
-    }
-  }
-}
-
-void pdist_jaccard(const double *X, double *dm, int m, int n) {
-  int i, j;
-  const double *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = jaccard_distance(u, v, n);
-    }
-  }
-}
-
-void pdist_jaccard_bool(const char *X, double *dm, int m, int n) {
-  int i, j;
-  const char *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = jaccard_distance_bool(u, v, n);
-    }
-  }
-}
-
-
-void pdist_chebyshev(const double *X, double *dm, int m, int n) {
-  int i, j;
-  const double *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = chebyshev_distance(u, v, n);
-    }
-  }
-}
-
-void pdist_cosine(const double *X, double *dm, int m, int n, const double *norms) {
-  int i, j;
-  const double *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = cosine_distance(u, v, n, norms[i], norms[j]);
-    }
-  }
-}
-
-void pdist_seuclidean(const double *X, const double *var,
-		     double *dm, int m, int n) {
-  int i, j;
-  const double *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = seuclidean_distance(var, u, v, n);
-    }
-  }
-}
-
-void pdist_city_block(const double *X, double *dm, int m, int n) {
-  int i, j;
-  const double *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = city_block_distance(u, v, n);
-    }
-  }
-}
-
-void pdist_minkowski(const double *X, double *dm, int m, int n, double p) {
-  int i, j;
-  const double *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = minkowski_distance(u, v, n, p);
-    }
-  }
-}
-
-void pdist_yule_bool(const char *X, double *dm, int m, int n) {
-  int i, j;
-  const char *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = yule_distance_bool(u, v, n);
-    }
-  }
-}
-
-void pdist_matching_bool(const char *X, double *dm, int m, int n) {
-  int i, j;
-  const char *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = matching_distance_bool(u, v, n);
-    }
-  }
-}
-
-void pdist_dice_bool(const char *X, double *dm, int m, int n) {
-  int i, j;
-  const char *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = dice_distance_bool(u, v, n);
-    }
-  }
-}
-
-void pdist_rogerstanimoto_bool(const char *X, double *dm, int m, int n) {
-  int i, j;
-  const char *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = rogerstanimoto_distance_bool(u, v, n);
-    }
-  }
-}
-
-void pdist_russellrao_bool(const char *X, double *dm, int m, int n) {
-  int i, j;
-  const char *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = russellrao_distance_bool(u, v, n);
-    }
-  }
-}
-
-void pdist_kulsinski_bool(const char *X, double *dm, int m, int n) {
-  int i, j;
-  const char *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = kulsinski_distance_bool(u, v, n);
-    }
-  }
-}
-
-void pdist_sokalsneath_bool(const char *X, double *dm, int m, int n) {
-  int i, j;
-  const char *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = sokalsneath_distance_bool(u, v, n);
-    }
-  }
-}
-
-void pdist_sokalmichener_bool(const char *X, double *dm, int m, int n) {
-  int i, j;
-  const char *u, *v;
-  double *it = dm;
-  for (i = 0; i < m; i++) {
-    for (j = i + 1; j < m; j++, it++) {
-      u = X + (n * i);
-      v = X + (n * j);
-      *it = sokalmichener_distance_bool(u, v, n);
-    }
-  }
-}
-
 void chopmins(int *ind, int mini, int minj, int np) {
   int i;
   for (i = mini; i < minj - 1; i++) {
@@ -734,7 +167,7 @@
     xi = inds[i];
     cnode *xnd = info->nodes + xi;
     xn = xnd->n;
-    mply = 1.0 / (((double)xn) * rscnt);
+    mply = (double)1.0 / (((double)xn) * rscnt);
     *bit = mply * ((drx * (rc * xn)) + (dsx * (sc * xn)));
   }
   for (i = mini + 1; i < minj; i++, bit++) {
@@ -743,7 +176,7 @@
     xi = inds[i];
     cnode *xnd = info->nodes + xi;
     xn = xnd->n;
-    mply = 1.0 / (((double)xn) * rscnt);
+    mply = (double)1.0 / (((double)xn) * rscnt);
     *bit = mply * ((drx * (rc * xn)) + (dsx * (sc * xn)));
   }
   for (i = minj + 1; i < np; i++, bit++) {
@@ -752,7 +185,7 @@
     xi = inds[i];
     cnode *xnd = info->nodes + xi;
     xn = xnd->n;
-    mply = 1.0 / (((double)xn) * rscnt);
+    mply = (double)1.0 / (((double)xn) * rscnt);
     *bit = mply * ((drx * (rc * xn)) + (dsx * (sc * xn)));
   }
 }

Modified: branches/refactor_fft/scipy/cluster/src/hierarchy.h
===================================================================
--- branches/refactor_fft/scipy/cluster/src/hierarchy.h	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/cluster/src/hierarchy.h	2008-07-01 04:52:00 UTC (rev 4511)
@@ -34,8 +34,8 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef _CPY_CLUSTER_H
-#define _CPY_CLUSTER_H
+#ifndef _CPY_HIERARCHY_H
+#define _CPY_HIERARCHY_H
 
 #define CPY_LINKAGE_SINGLE 0
 #define CPY_LINKAGE_COMPLETE 1
@@ -89,35 +89,9 @@
 void dist_to_squareform_from_vector(double *M, const double *v, int n);
 void dist_to_vector_from_squareform(const double *M, double *v, int n);
 
-void pdist_euclidean(const double *X, double *dm, int m, int n);
-void pdist_seuclidean(const double *X,
-		      const double *var, double *dm, int m, int n);
-void pdist_mahalanobis(const double *X, const double *covinv,
-		       double *dm, int m, int n);
-void pdist_bray_curtis(const double *X, double *dm, int m, int n);
-void pdist_canberra(const double *X, double *dm, int m, int n);
-void pdist_hamming(const double *X, double *dm, int m, int n);
-void pdist_hamming_bool(const char *X, double *dm, int m, int n);
-void pdist_city_block(const double *X, double *dm, int m, int n);
-void pdist_cosine(const double *X, double *dm, int m, int n, const double *norms);
-void pdist_chebyshev(const double *X, double *dm, int m, int n);
-void pdist_jaccard(const double *X, double *dm, int m, int n);
-void pdist_jaccard_bool(const char *X, double *dm, int m, int n);
-void pdist_kulsinski_bool(const char *X, double *dm, int m, int n);
-void pdist_minkowski(const double *X, double *dm, int m, int n, double p);
-void pdist_yule_bool(const char *X, double *dm, int m, int n);
-void pdist_matching_bool(const char *X, double *dm, int m, int n);
-void pdist_dice_bool(const char *X, double *dm, int m, int n);
-void pdist_rogerstanimoto_bool(const char *X, double *dm, int m, int n);
-void pdist_russellrao_bool(const char *X, double *dm, int m, int n);
-void pdist_sokalmichener_bool(const char *X, double *dm, int m, int n);
-void pdist_sokalsneath_bool(const char *X, double *dm, int m, int n);
-
 void inconsistency_calculation(const double *Z, double *R, int n, int d);
 void inconsistency_calculation_alt(const double *Z, double *R, int n, int d);
 
-double dot_product(const double *u, const double *v, int n);
-
 void chopmins(int *ind, int mini, int minj, int np);
 void chopmins_ns_i(double *ind, int mini, int np);
 void chopmins_ns_ij(double *ind, int mini, int minj, int np);

Modified: branches/refactor_fft/scipy/cluster/src/hierarchy_wrap.c
===================================================================
--- branches/refactor_fft/scipy/cluster/src/hierarchy_wrap.c	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/cluster/src/hierarchy_wrap.c	2008-07-01 04:52:00 UTC (rev 4511)
@@ -332,18 +332,6 @@
   return Py_BuildValue("d", 0.0);
 }
 
-extern PyObject *dot_product_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *d1_, *d2_;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &d1_,
-			&PyArray_Type, &d2_)) {
-    return 0;
-  }
-  return Py_BuildValue("d", dot_product((const double*)d1_->data,
-					(const double*)d2_->data,
-					d1_->dimensions[0]));
-}
-
 extern PyObject *to_squareform_from_vector_wrap(PyObject *self, PyObject *args) {
   PyArrayObject *M_, *v_;
   int n;
@@ -382,459 +370,6 @@
   return Py_BuildValue("d", 0.0);
 }
 
-extern PyObject *pdist_euclidean_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const double *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const double*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_euclidean(X, dm, m, n);
-  }
-  return Py_BuildValue("d", 0.0);
-}
-
-extern PyObject *pdist_canberra_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const double *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const double*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_canberra(X, dm, m, n);
-  }
-  return Py_BuildValue("d", 0.0);
-}
-
-extern PyObject *pdist_bray_curtis_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const double *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const double*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_bray_curtis(X, dm, m, n);
-  }
-  return Py_BuildValue("d", 0.0);
-}
-
-
-extern PyObject *pdist_mahalanobis_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *covinv_, *dm_;
-  int m, n;
-  double *dm;
-  const double *X;
-  const double *covinv;
-  if (!PyArg_ParseTuple(args, "O!O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &covinv_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const double*)X_->data;
-    covinv = (const double*)covinv_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_mahalanobis(X, covinv, dm, m, n);
-  }
-  return Py_BuildValue("d", 0.0);
-}
-
-
-extern PyObject *pdist_chebyshev_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const double *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const double*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_chebyshev(X, dm, m, n);
-  }
-  return Py_BuildValue("d", 0.0);
-}
-
-
-extern PyObject *pdist_cosine_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_, *norms_;
-  int m, n;
-  double *dm;
-  const double *X, *norms;
-  if (!PyArg_ParseTuple(args, "O!O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_,
-			&PyArray_Type, &norms_)) {
-    return 0;
-  }
-  else {
-    X = (const double*)X_->data;
-    dm = (double*)dm_->data;
-    norms = (const double*)norms_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_cosine(X, dm, m, n, norms);
-  }
-  return Py_BuildValue("d", 0.0);
-}
-
-extern PyObject *pdist_seuclidean_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_, *var_;
-  int m, n;
-  double *dm;
-  const double *X, *var;
-  if (!PyArg_ParseTuple(args, "O!O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &var_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (double*)X_->data;
-    dm = (double*)dm_->data;
-    var = (double*)var_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_seuclidean(X, var, dm, m, n);
-  }
-  return Py_BuildValue("d", 0.0);
-}
-
-extern PyObject *pdist_city_block_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const double *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const double*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_city_block(X, dm, m, n);
-  }
-  return Py_BuildValue("d", 0.0);
-}
-
-extern PyObject *pdist_hamming_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const double *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const double*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_hamming(X, dm, m, n);
-  }
-  return Py_BuildValue("d", 0.0);
-}
-
-extern PyObject *pdist_hamming_bool_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const char *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const char*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_hamming_bool(X, dm, m, n);
-  }
-  return Py_BuildValue("d", 0.0);
-}
-
-extern PyObject *pdist_jaccard_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const double *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const double*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_jaccard(X, dm, m, n);
-  }
-  return Py_BuildValue("d", 0.0);
-}
-
-extern PyObject *pdist_jaccard_bool_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const char *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const char*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_jaccard_bool(X, dm, m, n);
-  }
-  return Py_BuildValue("d", 0.0);
-}
-
-extern PyObject *pdist_minkowski_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm, *X;
-  double p;
-  if (!PyArg_ParseTuple(args, "O!O!d",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_,
-			&p)) {
-    return 0;
-  }
-  else {
-    X = (double*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_minkowski(X, dm, m, n, p);
-  }
-  return Py_BuildValue("d", 0.0);
-}
-
-
-extern PyObject *pdist_yule_bool_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const char *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const char*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_yule_bool(X, dm, m, n);
-  }
-  return Py_BuildValue("");
-}
-
-extern PyObject *pdist_matching_bool_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const char *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const char*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_matching_bool(X, dm, m, n);
-  }
-  return Py_BuildValue("");
-}
-
-extern PyObject *pdist_dice_bool_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const char *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const char*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_dice_bool(X, dm, m, n);
-  }
-  return Py_BuildValue("");
-}
-
-extern PyObject *pdist_rogerstanimoto_bool_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const char *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const char*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_rogerstanimoto_bool(X, dm, m, n);
-  }
-  return Py_BuildValue("");
-}
-
-extern PyObject *pdist_russellrao_bool_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const char *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const char*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_russellrao_bool(X, dm, m, n);
-  }
-  return Py_BuildValue("");
-}
-
-extern PyObject *pdist_kulsinski_bool_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const char *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const char*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_kulsinski_bool(X, dm, m, n);
-  }
-  return Py_BuildValue("");
-}
-
-extern PyObject *pdist_sokalmichener_bool_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const char *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const char*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_sokalmichener_bool(X, dm, m, n);
-  }
-  return Py_BuildValue("");
-}
-
-extern PyObject *pdist_sokalsneath_bool_wrap(PyObject *self, PyObject *args) {
-  PyArrayObject *X_, *dm_;
-  int m, n;
-  double *dm;
-  const char *X;
-  if (!PyArg_ParseTuple(args, "O!O!",
-			&PyArray_Type, &X_,
-			&PyArray_Type, &dm_)) {
-    return 0;
-  }
-  else {
-    X = (const char*)X_->data;
-    dm = (double*)dm_->data;
-    m = X_->dimensions[0];
-    n = X_->dimensions[1];
-
-    pdist_sokalsneath_bool(X, dm, m, n);
-  }
-  return Py_BuildValue("");
-}
-
 extern PyObject *leaders_wrap(PyObject *self, PyObject *args) {
   PyArrayObject *Z_, *T_, *L_, *M_;
   int kk, n, res;
@@ -864,7 +399,6 @@
   {"cluster_maxclust_monocrit_wrap", cluster_maxclust_monocrit_wrap, METH_VARARGS},
   {"cluster_monocrit_wrap", cluster_monocrit_wrap, METH_VARARGS},
   {"cophenetic_distances_wrap", cophenetic_distances_wrap, METH_VARARGS},
-  {"dot_product_wrap", dot_product_wrap, METH_VARARGS},
   {"get_max_dist_for_each_cluster_wrap",
    get_max_dist_for_each_cluster_wrap, METH_VARARGS},
   {"get_max_Rfield_for_each_cluster_wrap",
@@ -873,27 +407,6 @@
   {"leaders_wrap", leaders_wrap, METH_VARARGS},
   {"linkage_euclid_wrap", linkage_euclid_wrap, METH_VARARGS},
   {"linkage_wrap", linkage_wrap, METH_VARARGS},
-  {"pdist_bray_curtis_wrap", pdist_bray_curtis_wrap, METH_VARARGS},
-  {"pdist_canberra_wrap", pdist_canberra_wrap, METH_VARARGS},
-  {"pdist_chebyshev_wrap", pdist_chebyshev_wrap, METH_VARARGS},
-  {"pdist_city_block_wrap", pdist_city_block_wrap, METH_VARARGS},
-  {"pdist_cosine_wrap", pdist_cosine_wrap, METH_VARARGS},
-  {"pdist_dice_bool_wrap", pdist_dice_bool_wrap, METH_VARARGS},
-  {"pdist_euclidean_wrap", pdist_euclidean_wrap, METH_VARARGS},
-  {"pdist_hamming_wrap", pdist_hamming_wrap, METH_VARARGS},
-  {"pdist_hamming_bool_wrap", pdist_hamming_bool_wrap, METH_VARARGS},
-  {"pdist_jaccard_wrap", pdist_jaccard_wrap, METH_VARARGS},
-  {"pdist_jaccard_bool_wrap", pdist_jaccard_bool_wrap, METH_VARARGS},
-  {"pdist_kulsinski_bool_wrap", pdist_kulsinski_bool_wrap, METH_VARARGS},
-  {"pdist_mahalanobis_wrap", pdist_mahalanobis_wrap, METH_VARARGS},
-  {"pdist_matching_bool_wrap", pdist_matching_bool_wrap, METH_VARARGS},
-  {"pdist_minkowski_wrap", pdist_minkowski_wrap, METH_VARARGS},
-  {"pdist_rogerstanimoto_bool_wrap", pdist_rogerstanimoto_bool_wrap, METH_VARARGS},
-  {"pdist_russellrao_bool_wrap", pdist_russellrao_bool_wrap, METH_VARARGS},
-  {"pdist_seuclidean_wrap", pdist_seuclidean_wrap, METH_VARARGS},
-  {"pdist_sokalmichener_bool_wrap", pdist_sokalmichener_bool_wrap, METH_VARARGS},
-  {"pdist_sokalsneath_bool_wrap", pdist_sokalsneath_bool_wrap, METH_VARARGS},
-  {"pdist_yule_bool_wrap", pdist_yule_bool_wrap, METH_VARARGS},
   {"prelist_wrap", prelist_wrap, METH_VARARGS},
   {"to_squareform_from_vector_wrap",
    to_squareform_from_vector_wrap, METH_VARARGS},

Copied: branches/refactor_fft/scipy/cluster/tests/inconsistent-complete-tdist-depth-1.txt (from rev 4510, trunk/scipy/cluster/tests/inconsistent-complete-tdist-depth-1.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/inconsistent-complete-tdist-depth-2.txt (from rev 4510, trunk/scipy/cluster/tests/inconsistent-complete-tdist-depth-2.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/inconsistent-complete-tdist-depth-3.txt (from rev 4510, trunk/scipy/cluster/tests/inconsistent-complete-tdist-depth-3.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/inconsistent-complete-tdist-depth-4.txt (from rev 4510, trunk/scipy/cluster/tests/inconsistent-complete-tdist-depth-4.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/inconsistent-single-tdist-depth-0.txt (from rev 4510, trunk/scipy/cluster/tests/inconsistent-single-tdist-depth-0.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/inconsistent-single-tdist-depth-1.txt (from rev 4510, trunk/scipy/cluster/tests/inconsistent-single-tdist-depth-1.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/inconsistent-single-tdist-depth-2.txt (from rev 4510, trunk/scipy/cluster/tests/inconsistent-single-tdist-depth-2.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/inconsistent-single-tdist-depth-3.txt (from rev 4510, trunk/scipy/cluster/tests/inconsistent-single-tdist-depth-3.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/inconsistent-single-tdist-depth-4.txt (from rev 4510, trunk/scipy/cluster/tests/inconsistent-single-tdist-depth-4.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/inconsistent-single-tdist-depth-5.txt (from rev 4510, trunk/scipy/cluster/tests/inconsistent-single-tdist-depth-5.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/inconsistent-single-tdist.txt (from rev 4510, trunk/scipy/cluster/tests/inconsistent-single-tdist.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/inconsistent-weighted-tdist-depth-1.txt (from rev 4510, trunk/scipy/cluster/tests/inconsistent-weighted-tdist-depth-1.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/inconsistent-weighted-tdist-depth-2.txt (from rev 4510, trunk/scipy/cluster/tests/inconsistent-weighted-tdist-depth-2.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/inconsistent-weighted-tdist-depth-3.txt (from rev 4510, trunk/scipy/cluster/tests/inconsistent-weighted-tdist-depth-3.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/inconsistent-weighted-tdist-depth-4.txt (from rev 4510, trunk/scipy/cluster/tests/inconsistent-weighted-tdist-depth-4.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/linkage-X.txt (from rev 4510, trunk/scipy/cluster/tests/linkage-X.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/linkage-average-tdist.txt (from rev 4510, trunk/scipy/cluster/tests/linkage-average-tdist.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/linkage-complete-tdist.txt (from rev 4510, trunk/scipy/cluster/tests/linkage-complete-tdist.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/linkage-single-tdist.txt (from rev 4510, trunk/scipy/cluster/tests/linkage-single-tdist.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/linkage-weighted-tdist.txt (from rev 4510, trunk/scipy/cluster/tests/linkage-weighted-tdist.txt)

Copied: branches/refactor_fft/scipy/cluster/tests/random-bool-data.txt (from rev 4510, trunk/scipy/cluster/tests/random-bool-data.txt)

Modified: branches/refactor_fft/scipy/cluster/tests/test_hierarchy.py
===================================================================
--- branches/refactor_fft/scipy/cluster/tests/test_hierarchy.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/cluster/tests/test_hierarchy.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -33,524 +33,178 @@
 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-
 import sys
 import os.path
-from scipy.testing import *
-from scipy.cluster.hierarchy import pdist
-
 import numpy
-#import math
 
-#from scipy.cluster.hierarchy import pdist, euclidean
+from scipy.testing import *
+from scipy.cluster.hierarchy import squareform, linkage, from_mlab_linkage, numobs_dm, numobs_y, numobs_linkage
+from scipy.cluster.distance import pdist, matching, jaccard, dice, sokalsneath, rogerstanimoto, russellrao, yule
 
-_filenames = ["iris.txt",
-              "pdist-hamming-ml.txt",
-              "pdist-boolean-inp.txt",
-              "pdist-jaccard-ml.txt",
-              "pdist-cityblock-ml-iris.txt",
-              "pdist-minkowski-3.2-ml-iris.txt",
-              "pdist-cityblock-ml.txt",
-              "pdist-correlation-ml-iris.txt",
-              "pdist-minkowski-5.8-ml-iris.txt",
-              "pdist-correlation-ml.txt",
-              "pdist-minkowski-3.2-ml.txt",
-              "pdist-cosine-ml-iris.txt",
-              "pdist-seuclidean-ml-iris.txt",
-              "pdist-cosine-ml.txt",
-              "pdist-seuclidean-ml.txt",
-              "pdist-double-inp.txt",
-              "pdist-spearman-ml.txt",
-              "pdist-euclidean-ml.txt",
-              "pdist-euclidean-ml-iris.txt",
-              "pdist-chebychev-ml.txt",
-              "pdist-chebychev-ml-iris.txt"]
+_tdist = numpy.array([[0,    662,  877,  255,  412,  996],
+                      [662,  0,    295,  468,  268,  400],
+                      [877,  295,  0,    754,  564,  138],
+                      [255,  468,  754,  0,    219,  869],
+                      [412,  268,  564,  219,  0,    669],
+                      [996,  400,  138,  869,  669,  0  ]], dtype='double')
 
-# A hashmap of expected output arrays for the tests. These arrays
-# come from a list of text files, which are read prior to testing.
+_ytdist = squareform(_tdist)
 
+
 eo = {}
 
+_filenames = ["iris.txt",
+              "linkage-single-tdist.txt",
+              "linkage-complete-tdist.txt",
+              "linkage-average-tdist.txt",
+              "linkage-weighted-tdist.txt",
+              "random-bool-data.txt"]
+
+
 def load_testing_files():
     for fn in _filenames:
         name = fn.replace(".txt", "").replace("-ml", "")
         fqfn = os.path.join(os.path.dirname(__file__), fn)
         eo[name] = numpy.loadtxt(open(fqfn))
         #print "%s: %s   %s" % (name, str(eo[name].shape), str(eo[name].dtype))
-    eo['pdist-boolean-inp'] = numpy.bool_(eo['pdist-boolean-inp'])
+    #eo['pdist-boolean-inp'] = numpy.bool_(eo['pdist-boolean-inp'])
 
 load_testing_files()
 
-#print eo.keys()
+class TestSquareForm(TestCase):
 
+    ################### squareform
+    def test_squareform_empty_matrix(self):
+        "Tests squareform on an empty matrix."
+        A = numpy.zeros((0,0))
+        rA = squareform(numpy.array(A, dtype='double'))
+        self.failUnless(rA.shape == (0,))
 
-#print numpy.abs(Y_test2 - Y_right).max()
-#print numpy.abs(Y_test1 - Y_right).max()
+    def test_squareform_empty_vector(self):
+        v = numpy.zeros((0,))
+        rv = squareform(numpy.array(v, dtype='double'))
+        self.failUnless(rv.shape == (1,1))
+        self.failUnless(rv[0, 0] == 0)
 
-class TestPdist(TestCase):
+    def test_squareform_1by1_matrix(self):
+        "Tests squareform on a 1x1 matrix."
+        A = numpy.zeros((1,1))
+        rA = squareform(numpy.array(A, dtype='double'))
+        self.failUnless(rA.shape == (0,))
 
-    def test_pdist_raises_type_error_float32(self):
-        "Testing whether passing a float32 observation array generates an exception."
-        X = numpy.zeros((10, 10), dtype=numpy.float32)
-        try:
-            pdist(X, 'euclidean')
-        except TypeError:
-            pass
-        except:
-            self.fail("float32 observation matrices should generate an error in pdist.")
+    def test_squareform_one_vector(self):
+        "Tests squareform on a 1-D array, length=1."
+        v = numpy.ones((1,)) * 8.3
+        rv = squareform(numpy.array(v, dtype='double'))
+        self.failUnless(rv.shape == (2,2))
+        self.failUnless(rv[0,1] == 8.3)
+        self.failUnless(rv[1,0] == 8.3)
 
-    def test_pdist_raises_type_error_longdouble(self):
-        "Testing whether passing a longdouble observation array generates an exception."
-        X = numpy.zeros((10, 10), dtype=numpy.longdouble)
-        try:
-            pdist(X, 'euclidean')
-        except TypeError:
-            pass
-        except:
-            self.fail("longdouble observation matrices should generate an error in pdist.")
+    def test_squareform_2by2_matrix(self):
+        "Tests squareform on a 2x2 matrix."
+        A = numpy.zeros((2,2))
+        A[0,1]=0.8
+        A[1,0]=0.8
+        rA = squareform(numpy.array(A, dtype='double'))
+        self.failUnless(rA.shape == (1,))
+        self.failUnless(rA[0] == 0.8)
 
-    def test_pdist_var_raises_type_error_float32(self):
-        "Testing whether passing a float32 variance matrix generates an exception."
-        X = numpy.zeros((10, 10))
-        V = numpy.zeros((10, 10), dtype=numpy.float32)
-        try:
-            pdist(X, 'seuclidean', V=V)
-        except TypeError:
-            pass
-        except:
-            self.fail("float32 V matrices should generate an error in pdist('seuclidean').")
+    def test_squareform_multi_matrix(self):
+        "Tests squareform on a square matrices of multiple sizes."
+        for n in xrange(2, 5):
+            yield self.check_squareform_multi_matrix(n)
 
-    def test_pdist_var_raises_type_error_longdouble(self):
-        "Testing whether passing a longdouble variance matrix generates an exception."
-        X = numpy.zeros((10, 10))
-        V = numpy.zeros((10, 10), dtype=numpy.longdouble)
+    def check_squareform_multi_matrix(self, n):
+        X = numpy.random.rand(n, 4)
+        Y = pdist(X)
+        self.failUnless(len(Y.shape) == 1)
+        A = squareform(Y)
+        Yr = squareform(A)
+        s = A.shape
+        k = 0
+        print A.shape, Y.shape, Yr.shape
+        self.failUnless(len(s) == 2)
+        self.failUnless(len(Yr.shape) == 1)
+        self.failUnless(s[0] == s[1])
+        for i in xrange(0, s[0]):
+            for j in xrange(i+1, s[1]):
+                if i != j:
+                    #print i, j, k, A[i, j], Y[k]
+                    self.failUnless(A[i, j] == Y[k])
+                    k += 1
+                else:
+                    self.failUnless(A[i, j] == 0)
 
-        try:
-            pdist(X, 'seuclidean', V=V)
-        except TypeError:
-            pass
-        except:
-            self.fail("longdouble matrices should generate an error in pdist('seuclidean').")
+class TestNumObs(TestCase):
 
-    def test_pdist_ivar_raises_type_error_float32(self):
-        "Testing whether passing a float32 variance matrix generates an exception."
-        X = numpy.zeros((10, 10))
-        VI = numpy.zeros((10, 10), dtype=numpy.float32)
-        try:
-            pdist(X, 'mahalanobis', VI=VI)
-        except TypeError:
-            pass
-        except:
-            self.fail("float32 matrices should generate an error in pdist('mahalanobis').")
+    ############## numobs_dm
+    def test_numobs_dm_multi_matrix(self):
+        "Tests numobs_dm with observation matrices of multiple sizes."
+        for n in xrange(1, 10):
+            X = numpy.random.rand(n, 4)
+            Y = pdist(X)
+            A = squareform(Y)
+            print A.shape, Y.shape
+            self.failUnless(numobs_dm(A) == n)
 
-    def test_pdist_ivar_raises_type_error_longdouble(self):
-        "Testing whether passing a longdouble variance matrix generates an exception."
-        X = numpy.zeros((10, 10))
-        VI = numpy.zeros((10, 10), dtype=numpy.longdouble)
+    def test_numobs_y_multi_matrix(self):
+        "Tests numobs_y with observation matrices of multiple sizes."
+        for n in xrange(2, 10):
+            X = numpy.random.rand(n, 4)
+            Y = pdist(X)
+            #print A.shape, Y.shape, Yr.shape
+            self.failUnless(numobs_y(Y) == n)
 
-        try:
-            pdist(X, 'mahalanobis', VI=VI)
-        except TypeError:
-            pass
-        except:
-            self.fail("longdouble matrices should generate an error in pdist('mahalanobis').")
+    def test_numobs_linkage_multi_matrix(self):
+        "Tests numobs_linkage with observation matrices of multiple sizes."
+        for n in xrange(2, 10):
+            X = numpy.random.rand(n, 4)
+            Y = pdist(X)
+            Z = linkage(Y)
+            #print Z
+            #print A.shape, Y.shape, Yr.shape
+            self.failUnless(numobs_linkage(Z) == n)
 
-    ################### pdist: euclidean
-    def test_pdist_euclidean_random(self):
-        "Tests pdist(X, 'euclidean') on random data."
-        eps = 1e-07
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-double-inp']
-        Y_right = eo['pdist-euclidean']
+class TestLinkage(TestCase):
 
-        Y_test1 = pdist(X, 'euclidean')
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
+    ################### linkage
+    def test_linkage_single_tdist(self):
+        "Tests linkage(Y, 'single') on the tdist data set."
+        Z = linkage(_ytdist, 'single')
+        Zmlab = eo['linkage-single-tdist']
+        eps = 1e-10
+        expectedZ = from_mlab_linkage(Zmlab)
+        self.failUnless(within_tol(Z, expectedZ, eps))
 
-    def test_pdist_euclidean_random_nonC(self):
-        "Tests pdist(X, 'test_euclidean') [the non-C implementation] on random data."
-        eps = 1e-07
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-double-inp']
-        Y_right = eo['pdist-euclidean']
-        Y_test2 = pdist(X, 'test_euclidean')
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
+    def test_linkage_complete_tdist(self):
+        "Tests linkage(Y, 'complete') on the tdist data set."
+        Z = linkage(_ytdist, 'complete')
+        Zmlab = eo['linkage-complete-tdist']
+        eps = 1e-10
+        expectedZ = from_mlab_linkage(Zmlab)
+        self.failUnless(within_tol(Z, expectedZ, eps))
 
-    def test_pdist_euclidean_iris(self):
-        "Tests pdist(X, 'euclidean') on the Iris data set."
-        eps = 1e-07
-        # Get the data: the input matrix and the right output.
-        X = eo['iris']
-        Y_right = eo['pdist-euclidean-iris']
-
-        Y_test1 = pdist(X, 'euclidean')
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-
-    def test_pdist_euclidean_iris_nonC(self):
-        "Tests pdist(X, 'test_euclidean') [the non-C implementation] on the Iris data set."
-        eps = 1e-07
-        # Get the data: the input matrix and the right output.
-        X = eo['iris']
-        Y_right = eo['pdist-euclidean-iris']
-        Y_test2 = pdist(X, 'test_euclidean')
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    ################### pdist: seuclidean
-    def test_pdist_seuclidean_random(self):
-        "Tests pdist(X, 'seuclidean') on random data."
+    def test_linkage_average_tdist(self):
+        "Tests linkage(Y, 'average') on the tdist data set."
+        Z = linkage(_ytdist, 'average')
+        Zmlab = eo['linkage-average-tdist']
         eps = 1e-05
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-double-inp']
-        Y_right = eo['pdist-seuclidean']
+        expectedZ = from_mlab_linkage(Zmlab)
+        #print Z, expectedZ, numpy.abs(Z - expectedZ).max()
+        self.failUnless(within_tol(Z, expectedZ, eps))
 
-        Y_test1 = pdist(X, 'seuclidean')
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
+    def test_linkage_weighted_tdist(self):
+        "Tests linkage(Y, 'weighted') on the tdist data set."
+        Z = linkage(_ytdist, 'weighted')
+        Zmlab = eo['linkage-weighted-tdist']
+        eps = 1e-10
+        expectedZ = from_mlab_linkage(Zmlab)
+        #print Z, expectedZ, numpy.abs(Z - expectedZ).max()
+        self.failUnless(within_tol(Z, expectedZ, eps))
 
-    def test_pdist_seuclidean_random_nonC(self):
-        "Tests pdist(X, 'test_sqeuclidean') [the non-C implementation] on random data."
-        eps = 1e-05
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-double-inp']
-        Y_right = eo['pdist-seuclidean']
-        Y_test2 = pdist(X, 'test_sqeuclidean')
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    def test_pdist_seuclidean_iris(self):
-        "Tests pdist(X, 'seuclidean') on the Iris data set."
-        eps = 1e-05
-        # Get the data: the input matrix and the right output.
-        X = eo['iris']
-        Y_right = eo['pdist-seuclidean-iris']
-
-        Y_test1 = pdist(X, 'seuclidean')
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-
-    def test_pdist_seuclidean_iris_nonC(self):
-        "Tests pdist(X, 'test_seuclidean') [the non-C implementation] on the Iris data set."
-        eps = 1e-05
-        # Get the data: the input matrix and the right output.
-        X = eo['iris']
-        Y_right = eo['pdist-seuclidean-iris']
-        Y_test2 = pdist(X, 'test_sqeuclidean')
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    ################### pdist: cosine
-    def test_pdist_cosine_random(self):
-        "Tests pdist(X, 'cosine') on random data."
-        eps = 1e-08
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-double-inp']
-        Y_right = eo['pdist-cosine']
-
-        Y_test1 = pdist(X, 'cosine')
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-
-    def test_pdist_cosine_random_nonC(self):
-        "Tests pdist(X, 'test_cosine') [the non-C implementation] on random data."
-        eps = 1e-08
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-double-inp']
-        Y_right = eo['pdist-cosine']
-        Y_test2 = pdist(X, 'test_cosine')
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    def test_pdist_cosine_iris(self):
-        "Tests pdist(X, 'cosine') on the Iris data set."
-        eps = 1e-08
-        # Get the data: the input matrix and the right output.
-        X = eo['iris']
-        Y_right = eo['pdist-cosine-iris']
-
-        Y_test1 = pdist(X, 'cosine')
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-        #print "cosine-iris", numpy.abs(Y_test1 - Y_right).max()
-
-    def test_pdist_cosine_iris_nonC(self):
-        "Tests pdist(X, 'test_cosine') [the non-C implementation] on the Iris data set."
-        eps = 1e-08
-        # Get the data: the input matrix and the right output.
-        X = eo['iris']
-        Y_right = eo['pdist-cosine-iris']
-        Y_test2 = pdist(X, 'test_cosine')
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    ################### pdist: cityblock
-    def test_pdist_cityblock_random(self):
-        "Tests pdist(X, 'cityblock') on random data."
-        eps = 1e-06
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-double-inp']
-        Y_right = eo['pdist-cityblock']
-
-        Y_test1 = pdist(X, 'cityblock')
-        #print "cityblock", numpy.abs(Y_test1 - Y_right).max()
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-
-    def test_pdist_cityblock_random_nonC(self):
-        "Tests pdist(X, 'test_cityblock') [the non-C implementation] on random data."
-        eps = 1e-06
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-double-inp']
-        Y_right = eo['pdist-cityblock']
-        Y_test2 = pdist(X, 'test_cityblock')
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    def test_pdist_cityblock_iris(self):
-        "Tests pdist(X, 'cityblock') on the Iris data set."
-        eps = 1e-14
-        # Get the data: the input matrix and the right output.
-        X = eo['iris']
-        Y_right = eo['pdist-cityblock-iris']
-
-        Y_test1 = pdist(X, 'cityblock')
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-        #print "cityblock-iris", numpy.abs(Y_test1 - Y_right).max()
-
-    def test_pdist_cityblock_iris_nonC(self):
-        "Tests pdist(X, 'test_cityblock') [the non-C implementation] on the Iris data set."
-        eps = 1e-14
-        # Get the data: the input matrix and the right output.
-        X = eo['iris']
-        Y_right = eo['pdist-cityblock-iris']
-        Y_test2 = pdist(X, 'test_cityblock')
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    ################### pdist: correlation
-    def test_pdist_correlation_random(self):
-        "Tests pdist(X, 'correlation') on random data."
-        eps = 1e-07
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-double-inp']
-        Y_right = eo['pdist-correlation']
-
-        Y_test1 = pdist(X, 'correlation')
-        #print "correlation", numpy.abs(Y_test1 - Y_right).max()
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-
-    def test_pdist_correlation_random_nonC(self):
-        "Tests pdist(X, 'test_correlation') [the non-C implementation] on random data."
-        eps = 1e-07
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-double-inp']
-        Y_right = eo['pdist-correlation']
-        Y_test2 = pdist(X, 'test_correlation')
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    def test_pdist_correlation_iris(self):
-        "Tests pdist(X, 'correlation') on the Iris data set."
-        eps = 1e-08
-        # Get the data: the input matrix and the right output.
-        X = eo['iris']
-        Y_right = eo['pdist-correlation-iris']
-
-        Y_test1 = pdist(X, 'correlation')
-        #print "correlation-iris", numpy.abs(Y_test1 - Y_right).max()
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-
-    def test_pdist_correlation_iris_nonC(self):
-        "Tests pdist(X, 'test_correlation') [the non-C implementation] on the Iris data set."
-        eps = 1e-08
-        # Get the data: the input matrix and the right output.
-        X = eo['iris']
-        Y_right = eo['pdist-correlation-iris']
-        Y_test2 = pdist(X, 'test_correlation')
-        #print "test-correlation-iris", numpy.abs(Y_test2 - Y_right).max()
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    ################# minkowski
-
-    def test_pdist_minkowski_random(self):
-        "Tests pdist(X, 'minkowski') on random data."
-        eps = 1e-05
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-double-inp']
-        Y_right = eo['pdist-minkowski-3.2']
-
-        Y_test1 = pdist(X, 'minkowski', 3.2)
-        #print "minkowski", numpy.abs(Y_test1 - Y_right).max()
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-
-    def test_pdist_minkowski_random_nonC(self):
-        "Tests pdist(X, 'test_minkowski') [the non-C implementation] on random data."
-        eps = 1e-05
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-double-inp']
-        Y_right = eo['pdist-minkowski-3.2']
-        Y_test2 = pdist(X, 'test_minkowski', 3.2)
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    def test_pdist_minkowski_iris(self):
-        "Tests pdist(X, 'minkowski') on iris data."
-        eps = 1e-07
-        # Get the data: the input matrix and the right output.
-        X = eo['iris']
-        Y_right = eo['pdist-minkowski-3.2-iris']
-
-        Y_test1 = pdist(X, 'minkowski', 3.2)
-        #print "minkowski-iris-3.2", numpy.abs(Y_test1 - Y_right).max()
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-
-    def test_pdist_minkowski_iris_nonC(self):
-        "Tests pdist(X, 'test_minkowski') [the non-C implementation] on iris data."
-        eps = 1e-07
-        # Get the data: the input matrix and the right output.
-        X = eo['iris']
-        Y_right = eo['pdist-minkowski-3.2-iris']
-        Y_test2 = pdist(X, 'test_minkowski', 3.2)
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    def test_pdist_minkowski_iris(self):
-        "Tests pdist(X, 'minkowski') on iris data."
-        eps = 1e-07
-        # Get the data: the input matrix and the right output.
-        X = eo['iris']
-        Y_right = eo['pdist-minkowski-5.8-iris']
-
-        Y_test1 = pdist(X, 'minkowski', 5.8)
-        #print "minkowski-iris-5.8", numpy.abs(Y_test1 - Y_right).max()
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-
-    def test_pdist_minkowski_iris_nonC(self):
-        "Tests pdist(X, 'test_minkowski') [the non-C implementation] on iris data."
-        eps = 1e-07
-        # Get the data: the input matrix and the right output.
-        X = eo['iris']
-        Y_right = eo['pdist-minkowski-5.8-iris']
-        Y_test2 = pdist(X, 'test_minkowski', 5.8)
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    ################### pdist: hamming
-    def test_pdist_hamming_random(self):
-        "Tests pdist(X, 'hamming') on random data."
-        eps = 1e-07
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-boolean-inp']
-        Y_right = eo['pdist-hamming']
-
-        Y_test1 = pdist(X, 'hamming')
-        #print "hamming", numpy.abs(Y_test1 - Y_right).max()
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-
-    def test_pdist_hamming_random_nonC(self):
-        "Tests pdist(X, 'test_hamming') [the non-C implementation] on random data."
-        eps = 1e-07
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-boolean-inp']
-        Y_right = eo['pdist-hamming']
-        Y_test2 = pdist(X, 'test_hamming')
-        #print "test-hamming", numpy.abs(Y_test2 - Y_right).max()
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    ################### pdist: hamming (double)
-    def test_pdist_dhamming_random(self):
-        "Tests pdist(X, 'hamming') on random data."
-        eps = 1e-07
-        # Get the data: the input matrix and the right output.
-        X = numpy.float64(eo['pdist-boolean-inp'])
-        Y_right = eo['pdist-hamming']
-
-        Y_test1 = pdist(X, 'hamming')
-        #print "hamming", numpy.abs(Y_test1 - Y_right).max()
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-
-    def test_pdist_dhamming_random_nonC(self):
-        "Tests pdist(X, 'test_hamming') [the non-C implementation] on random data."
-        eps = 1e-07
-        # Get the data: the input matrix and the right output.
-        X = numpy.float64(eo['pdist-boolean-inp'])
-        Y_right = eo['pdist-hamming']
-        Y_test2 = pdist(X, 'test_hamming')
-        #print "test-hamming", numpy.abs(Y_test2 - Y_right).max()
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    ################### pdist: jaccard
-    def test_pdist_jaccard_random(self):
-        "Tests pdist(X, 'jaccard') on random data."
-        eps = 1e-08
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-boolean-inp']
-        Y_right = eo['pdist-jaccard']
-
-        Y_test1 = pdist(X, 'jaccard')
-        #print "jaccard", numpy.abs(Y_test1 - Y_right).max()
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-
-    def test_pdist_jaccard_random_nonC(self):
-        "Tests pdist(X, 'test_jaccard') [the non-C implementation] on random data."
-        eps = 1e-08
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-boolean-inp']
-        Y_right = eo['pdist-jaccard']
-        Y_test2 = pdist(X, 'test_jaccard')
-        #print "test-jaccard", numpy.abs(Y_test2 - Y_right).max()
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    ################### pdist: jaccard (double)
-    def test_pdist_djaccard_random(self):
-        "Tests pdist(X, 'jaccard') on random data."
-        eps = 1e-08
-        # Get the data: the input matrix and the right output.
-        X = numpy.float64(eo['pdist-boolean-inp'])
-        Y_right = eo['pdist-jaccard']
-
-        Y_test1 = pdist(X, 'jaccard')
-        #print "jaccard", numpy.abs(Y_test1 - Y_right).max()
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-
-    def test_pdist_djaccard_random_nonC(self):
-        "Tests pdist(X, 'test_jaccard') [the non-C implementation] on random data."
-        eps = 1e-08
-        # Get the data: the input matrix and the right output.
-        X = numpy.float64(eo['pdist-boolean-inp'])
-        Y_right = eo['pdist-jaccard']
-        Y_test2 = pdist(X, 'test_jaccard')
-        #print "test-jaccard", numpy.abs(Y_test2 - Y_right).max()
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    ################### pdist: chebychev
-    def test_pdist_chebychev_random(self):
-        "Tests pdist(X, 'chebychev') on random data."
-        eps = 1e-08
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-double-inp']
-        Y_right = eo['pdist-chebychev']
-
-        Y_test1 = pdist(X, 'chebychev')
-        #print "chebychev", numpy.abs(Y_test1 - Y_right).max()
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-
-    def test_pdist_chebychev_random_nonC(self):
-        "Tests pdist(X, 'test_chebychev') [the non-C implementation] on random data."
-        eps = 1e-08
-        # Get the data: the input matrix and the right output.
-        X = eo['pdist-double-inp']
-        Y_right = eo['pdist-chebychev']
-        Y_test2 = pdist(X, 'test_chebychev')
-        #print "test-chebychev", numpy.abs(Y_test2 - Y_right).max()
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
-    def test_pdist_chebychev_iris(self):
-        "Tests pdist(X, 'chebychev') on the Iris data set."
-        eps = 1e-15
-        # Get the data: the input matrix and the right output.
-        X = eo['iris']
-        Y_right = eo['pdist-chebychev-iris']
-
-        Y_test1 = pdist(X, 'chebychev')
-        #print "chebychev-iris", numpy.abs(Y_test1 - Y_right).max()
-        self.failUnless(within_tol(Y_test1, Y_right, eps))
-
-    def test_pdist_chebychev_iris_nonC(self):
-        "Tests pdist(X, 'test_chebychev') [the non-C implementation] on the Iris data set."
-        eps = 1e-15
-        # Get the data: the input matrix and the right output.
-        X = eo['iris']
-        Y_right = eo['pdist-chebychev-iris']
-        Y_test2 = pdist(X, 'test_chebychev')
-        #print "test-chebychev-iris", numpy.abs(Y_test2 - Y_right).max()
-        self.failUnless(within_tol(Y_test2, Y_right, eps))
-
 def within_tol(a, b, tol):
     return numpy.abs(a - b).max() < tol
 
 if __name__ == "__main__":
     nose.run(argv=['', __file__])
+

Modified: branches/refactor_fft/scipy/cluster/tests/test_vq.py
===================================================================
--- branches/refactor_fft/scipy/cluster/tests/test_vq.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/cluster/tests/test_vq.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,7 +1,7 @@
 #! /usr/bin/env python
 
 # David Cournapeau
-# Last Change: Tue Jul 03 08:00 PM 2007 J
+# Last Change: Tue Jun 24 04:00 PM 2008 J
 
 # For now, just copy the tests from sandbox.pyem, so we can check that
 # kmeans works OK for trivial examples.
@@ -152,5 +152,34 @@
         kmeans2(data, 3, minit = 'random')
         kmeans2(data, 3, minit = 'points')
 
+    def test_kmeans2_empty(self):
+        """Ticket #505."""
+        try:
+            kmeans2([], 2)
+            raise AssertionError("This should not succeed.")
+        except ValueError, e:
+            # OK, that's what we expect
+            pass
+
+    def test_kmeans_0k(self):
+        """Regression test for #546: fail when k arg is 0."""
+        try:
+            kmeans(X, 0)
+            raise AssertionError("kmeans with 0 clusters should fail.")
+        except ValueError:
+            pass
+
+        try:
+            kmeans2(X, 0)
+            raise AssertionError("kmeans2 with 0 clusters should fail.")
+        except ValueError:
+            pass
+
+        try:
+            kmeans2(X, N.array([]))
+            raise AssertionError("kmeans2 with 0 clusters should fail.")
+        except ValueError:
+            pass
+
 if __name__ == "__main__":
     nose.run(argv=['', __file__])

Modified: branches/refactor_fft/scipy/cluster/vq.py
===================================================================
--- branches/refactor_fft/scipy/cluster/vq.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/cluster/vq.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -370,10 +370,10 @@
     """
 
     code_book = array(guess, copy = True)
-    nc = code_book.shape[0]
     avg_dist = []
     diff = thresh+1.
     while diff > thresh:
+        nc = code_book.shape[0]
         #compute membership and distances between obs and code_book
         obs_code, distort = vq(obs, code_book)
         avg_dist.append(mean(distort, axis=-1))
@@ -479,13 +479,17 @@
         raise ValueError, 'iter must be >= to 1.'
     if type(k_or_guess) == type(array([])):
         guess = k_or_guess
+        if guess.size < 1:
+            raise ValueError("Asked for 0 cluster ? initial book was %s" % \
+                             guess)
         result = _kmeans(obs, guess, thresh = thresh)
     else:
         #initialize best distance value to a large value
         best_dist = 100000
         No = obs.shape[0]
         k = k_or_guess
-        #print 'kmeans iter: ',
+        if k < 1:
+            raise ValueError("Asked for 0 cluster ? ")
         for i in range(iter):
             #the intial code book is randomly selected from observations
             guess = take(obs, randint(0, No, k), 0)
@@ -633,6 +637,9 @@
     else:
         raise ValueError("Input of rank > 2 not supported")
 
+    if N.size(data) < 1:
+        raise ValueError("Input has 0 items.")
+
     # If k is not a single value, then it should be compatible with data's
     # shape
     if N.size(k) > 1 or minit == 'matrix':
@@ -647,7 +654,14 @@
                         data")
         clusters = k.copy()
     else:
-        nc = int(k)
+        try:
+            nc = int(k)
+        except TypeError:
+            raise ValueError("k (%s) could not be converted to an integer " % str(k))
+
+        if nc < 1:
+            raise ValueError("kmeans2 for 0 clusters ? (k was %s)" % str(k))
+
         if not nc == k:
             warnings.warn("k was not an integer, was converted.")
         try:

Copied: branches/refactor_fft/scipy/fftpack/SConscript (from rev 4510, trunk/scipy/fftpack/SConscript)

Copied: branches/refactor_fft/scipy/fftpack/SConstruct (from rev 4510, trunk/scipy/fftpack/SConstruct)

Copied: branches/refactor_fft/scipy/integrate/SConscript (from rev 4510, trunk/scipy/integrate/SConscript)

Deleted: branches/refactor_fft/scipy/integrate/SConstruct
===================================================================
--- branches/refactor_fft/scipy/integrate/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/integrate/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,61 +0,0 @@
-# Last Change: Sat May 03 02:00 PM 2008 J
-# vim:syntax=python
-from os.path import join as pjoin
-import warnings
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numscons import GetNumpyEnvironment, CheckF77Clib, CheckF77BLAS
-
-env = GetNumpyEnvironment(ARGUMENTS)
-env.Tool('numpyf2py')
-
-# Configuration
-config = env.NumpyConfigure(custom_tests = {'CheckF77Clib' : CheckF77Clib,
-                                            'CheckF77BLAS' : CheckF77BLAS})
-
-if not config.CheckF77Clib():
-    raise Exception("Could not check F77 runtime, needed for interpolate")
-if not config.CheckF77BLAS():
-    raise Exception("Could not find F77 BLAS, needed for integrate package")
-
-config.Finish()
-
-env.AppendUnique(CPPPATH = get_numpy_include_dirs())
-env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR'])
-
-# XXX: lapack integration
-
-# Build linpack_lite
-src = [str(s) for s in env.NumpyGlob(pjoin('linpack_lite', '*.f'))]
-linpack_lite = env.NumpyStaticExtLibrary('linpack_lite', source = src)
-
-# Build mach
-# XXX: do not use optimization flags for mach
-src = [str(s) for s in env.NumpyGlob(pjoin('mach', '*.f'))]
-mach = env.NumpyStaticExtLibrary('mach', source = src)
-
-# Build quadpack
-src = [str(s) for s in env.NumpyGlob(pjoin('quadpack', '*.f'))]
-quadpack = env.NumpyStaticExtLibrary('quadpack', source = src)
-
-# Build odepack
-src = [str(s) for s in env.NumpyGlob(pjoin('odepack', '*.f'))]
-odepack = env.NumpyStaticExtLibrary('odepack', source = src)
-
-env.AppendUnique(LIBPATH = env['build_dir'])
-env.AppendUnique(LINKFLAGSEND = env['F77_LDFLAGS'])
-
-quadenv = env.Clone()
-quadenv.Prepend(LIBS = ['quadpack', 'linpack_lite', 'mach'])
-
-odenv = env.Clone()
-odenv.Prepend(LIBS = ['odepack', 'linpack_lite', 'mach'])
-
-# Build _quadpack
-quadenv.NumpyPythonExtension('_quadpack', source = '_quadpackmodule.c')
-
-# Build _odepack
-odenv.NumpyPythonExtension('_odepack', source = '_odepackmodule.c')
-
-# Build vode
-odenv.NumpyPythonExtension('vode', source = 'vode.pyf')

Copied: branches/refactor_fft/scipy/integrate/SConstruct (from rev 4510, trunk/scipy/integrate/SConstruct)

Modified: branches/refactor_fft/scipy/integrate/__odepack.h
===================================================================
--- branches/refactor_fft/scipy/integrate/__odepack.h	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/integrate/__odepack.h	2008-07-01 04:52:00 UTC (rev 4511)
@@ -326,7 +326,11 @@
       *((int *)ap_nfe->data + (k-1)) = iwork[11];
       *((int *)ap_nje->data + (k-1)) = iwork[12];
       *((int *)ap_nqu->data + (k-1)) = iwork[13];
-      imxer = iwork[15];
+      if (istate == -5 || istate == -4) {
+        imxer = iwork[15];
+      } else {
+        imxer = -1;
+      }
       lenrw = iwork[16];
       leniw = iwork[17];
       *((int *)ap_mused->data + (k-1)) = iwork[18];
@@ -348,7 +352,20 @@
 
   /* Do Full output */
     if (full_output) {
-      return Py_BuildValue("N{s:N,s:N,s:N,s:N,s:N,s:N,s:N,s:N,s:i,s:i,s:i,s:N}i",PyArray_Return(ap_yout),"hu",PyArray_Return(ap_hu),"tcur",PyArray_Return(ap_tcur),"tolsf",PyArray_Return(ap_tolsf),"tsw",PyArray_Return(ap_tsw),"nst",PyArray_Return(ap_nst),"nfe",PyArray_Return(ap_nfe),"nje",PyArray_Return(ap_nje),"nqu",PyArray_Return(ap_nqu),"imxer",imxer,"lenrw",lenrw,"leniw",leniw,"mused",PyArray_Return(ap_mused),istate);
+      return Py_BuildValue("N{s:N,s:N,s:N,s:N,s:N,s:N,s:N,s:N,s:i,s:i,s:i,s:N}i",PyArray_Return(ap_yout),
+                      "hu",PyArray_Return(ap_hu),
+                      "tcur",PyArray_Return(ap_tcur),
+                      "tolsf",PyArray_Return(ap_tolsf),
+                      "tsw",PyArray_Return(ap_tsw),
+                      "nst",PyArray_Return(ap_nst),
+                      "nfe",PyArray_Return(ap_nfe),
+                      "nje",PyArray_Return(ap_nje),
+                      "nqu",PyArray_Return(ap_nqu),
+                      "imxer",imxer,
+                      "lenrw",lenrw,
+                      "leniw",leniw,
+                      "mused",PyArray_Return(ap_mused),
+                      istate);
     }
     else {
       return Py_BuildValue("Ni",PyArray_Return(ap_yout),istate);

Modified: branches/refactor_fft/scipy/integrate/odepack.py
===================================================================
--- branches/refactor_fft/scipy/integrate/odepack.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/integrate/odepack.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -21,96 +21,116 @@
            ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
            hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
            mxords=5, printmessg=0):
-
     """Integrate a system of ordinary differential equations.
 
-    Description:
+    Solve a system of ordinary differential equations using lsoda from the
+    FORTRAN library odepack.
 
-      Solve a system of ordinary differential equations Using lsoda from the
-      FORTRAN library odepack.
+    Solves the initial value problem for stiff or non-stiff systems
+    of first order ode-s::
+    
+        dy/dt = func(y,t0,...)
 
-      Solves the initial value problem for stiff or non-stiff systems
-      of first order ode-s:
-           dy/dt = func(y,t0,...) where y can be a vector.
+    where y can be a vector.
 
-    Inputs:
+    Parameters
+    ----------
+    func : callable(y, t0, ...)
+        Computes the derivative of y at t0.
+    y0 : array
+        Initial condition on y (can be a vector).
+    t : array
+        A sequence of time points for which to solve for y.  The initial
+        value point should be the first element of this sequence.
+    args : tuple
+        Extra arguments to pass to function.
+    Dfun : callable(y, t0, ...)
+        Gradient (Jacobian) of func.
+    col_deriv : boolean
+        True if Dfun defines derivatives down columns (faster),
+        otherwise Dfun should define derivatives across rows.
+    full_output : boolean
+        True if to return a dictionary of optional outputs as the second output
+    printmessg : boolean
+        Whether to print the convergence message
 
-      func -- func(y,t0,...) computes the derivative of y at t0.
-      y0   -- initial condition on y (can be a vector).
-      t    -- a sequence of time points for which to solve for y.  The intial
-              value point should be the first element of this sequence.
-      args -- extra arguments to pass to function.
-      Dfun -- the gradient (Jacobian) of func (same input signature as func).
-      col_deriv -- non-zero implies that Dfun defines derivatives down
-                   columns (faster), otherwise Dfun should define derivatives
-                   across rows.
-      full_output -- non-zero to return a dictionary of optional outputs as
-                     the second output.
-      printmessg -- print the convergence message.
+    Returns
+    -------
+    y : array, shape (len(y0), len(t))
+        Array containing the value of y for each desired time in t,
+        with the initial value y0 in the first row.
+    
+    infodict : dict, only returned if full_output == True
+        Dictionary containing additional output information
+        
+        =======  ============================================================
+        key      meaning
+        =======  ============================================================
+        'hu'     vector of step sizes successfully used for each time step.
+        'tcur'   vector with the value of t reached for each time step.
+                 (will always be at least as large as the input times).
+        'tolsf'  vector of tolerance scale factors, greater than 1.0,
+                 computed when a request for too much accuracy was detected.
+        'tsw'    value of t at the time of the last method switch
+                 (given for each time step)
+        'nst'    cumulative number of time steps
+        'nfe'    cumulative number of function evaluations for each time step
+        'nje'    cumulative number of jacobian evaluations for each time step
+        'nqu'    a vector of method orders for each successful step.
+        'imxer'  index of the component of largest magnitude in the
+                 weighted local error vector (e / ewt) on an error return, -1
+                 otherwise.
+        'lenrw'  the length of the double work array required.
+        'leniw'  the length of integer work array required.
+        'mused'  a vector of method indicators for each successful time step:
+                 1: adams (nonstiff), 2: bdf (stiff)
+        =======  ============================================================
+    
+    Other Parameters
+    ----------------
+    ml, mu : integer
+        If either of these are not-None or non-negative, then the
+        Jacobian is assumed to be banded.  These give the number of
+        lower and upper non-zero diagonals in this banded matrix.
+        For the banded case, Dfun should return a matrix whose
+        columns contain the non-zero bands (starting with the
+        lowest diagonal).  Thus, the return matrix from Dfun should
+        have shape len(y0) * (ml + mu + 1) when ml >=0 or mu >=0
+    rtol, atol : float
+        The input parameters rtol and atol determine the error
+        control performed by the solver.  The solver will control the
+        vector, e, of estimated local errors in y, according to an
+        inequality of the form::         
+            max-norm of (e / ewt) <= 1
+        where ewt is a vector of positive error weights computed as::
+            ewt = rtol * abs(y) + atol
+        rtol and atol can be either vectors the same length as y or scalars.
+    tcrit : array
+        Vector of critical points (e.g. singularities) where integration
+        care should be taken.
+    h0 : float, (0: solver-determined)
+        The step size to be attempted on the first step.
+    hmax : float, (0: solver-determined)
+        The maximum absolute step size allowed.
+    hmin : float, (0: solver-determined)
+        The minimum absolute step size allowed.
+    ixpr : boolean
+        Whether to generate extra printing at method switches.
+    mxstep : integer, (0: solver-determined)
+        Maximum number of (internally defined) steps allowed for each
+        integration point in t.
+    mxhnil : integer, (0: solver-determined)
+        Maximum number of messages printed.
+    mxordn : integer, (0: solver-determined)
+        Maximum order to be allowed for the nonstiff (Adams) method.
+    mxords : integer, (0: solver-determined)
+        Maximum order to be allowed for the stiff (BDF) method.
 
-    Outputs: (y, {infodict,})
-
-      y -- a rank-2 array containing the value of y in each row for each
-           desired time in t (with the initial value y0 in the first row).
-
-      infodict -- a dictionary of optional outputs:
-        'hu'    : a vector of step sizes successfully used for each time step.
-        'tcur'  : a vector with the value of t reached for each time step.
-                  (will always be at least as large as the input times).
-        'tolsf' : a vector of tolerance scale factors, greater than 1.0,
-                  computed when a request for too much accuracy was detected.
-        'tsw'   : the value of t at the time of the last method switch
-                  (given for each time step).
-        'nst'   : the cumulative number of time steps.
-        'nfe'   : the cumulative number of function evaluations for eadh
-                  time step.
-        'nje'   : the cumulative number of jacobian evaluations for each
-                  time step.
-        'nqu'   : a vector of method orders for each successful step.
-        'imxer' : index of the component of largest magnitude in the
-                   weighted local error vector (e / ewt) on an error return.
-        'lenrw' : the length of the double work array required.
-        'leniw' : the length of integer work array required.
-        'mused' : a vector of method indicators for each successful time step:
-                  1 -- adams (nonstiff)
-                  2 -- bdf (stiff)
-
-    Additional Inputs:
-
-      ml, mu -- If either of these are not-None or non-negative, then the
-                Jacobian is assumed to be banded.  These give the number of
-                lower and upper non-zero diagonals in this banded matrix.
-                For the banded case, Dfun should return a matrix whose
-                columns contain the non-zero bands (starting with the
-                lowest diagonal).  Thus, the return matrix from Dfun should
-                have shape len(y0) x (ml + mu + 1) when ml >=0 or mu >=0
-      rtol -- The input parameters rtol and atol determine the error
-      atol    control performed by the solver.  The solver will control the
-              vector, e, of estimated local errors in y, according to an
-              inequality of the form
-                   max-norm of (e / ewt) <= 1
-              where ewt is a vector of positive error weights computed as
-                   ewt = rtol * abs(y) + atol
-              rtol and atol can be either vectors the same length as y or
-              scalars.
-      tcrit -- a vector of critical points (e.g. singularities) where
-               integration care should be taken.
-
-       (For the next inputs a zero default means the solver determines it).
-
-      h0 -- the step size to be attempted on the first step.
-      hmax -- the maximum absolute step size allowed.
-      hmin -- the minimum absolute step size allowed.
-      ixpr -- non-zero to generate extra printing at method switches.
-      mxstep -- maximum number of (internally defined) steps allowed
-                for each integration point in t.
-      mxhnil -- maximum number of messages printed.
-      mxordn -- maximum order to be allowed for the nonstiff (Adams) method.
-      mxords -- maximum order to be allowed for the stiff (BDF) method.
-
-    See also:
-      ode - a more object-oriented integrator based on VODE
-      quad - for finding the area under a curve
+    See Also
+    --------
+    ode : a more object-oriented integrator based on VODE
+    quad : for finding the area under a curve
+    
     """
 
     if ml is None:

Copied: branches/refactor_fft/scipy/interpolate/SConscript (from rev 4510, trunk/scipy/interpolate/SConscript)

Deleted: branches/refactor_fft/scipy/interpolate/SConstruct
===================================================================
--- branches/refactor_fft/scipy/interpolate/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/interpolate/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,31 +0,0 @@
-# Last Change: Sat May 03 02:00 PM 2008 J
-# vim:syntax=python
-from os.path import join as pjoin
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numscons import GetNumpyEnvironment, CheckF77Clib
-
-env = GetNumpyEnvironment(ARGUMENTS)
-env.Tool('numpyf2py')
-
-config = env.NumpyConfigure(custom_tests = {'CheckF77Clib' : CheckF77Clib})
-if not config.CheckF77Clib():
-    raise Exception("Could not check F77 runtime, needed for interpolate")
-config.Finish()
-
-env.PrependUnique(CPPPATH = get_numpy_include_dirs())
-env.PrependUnique(CPPPATH = env['F2PYINCLUDEDIR'])
-env.AppendUnique(LINKFLAGSEND = env['F77_LDFLAGS'])
-
-# Build fitpack
-src = [str(s) for s in env.NumpyGlob(pjoin('fitpack', '*.f'))]
-fitpack = env.NumpyStaticExtLibrary('fitpack', source = src)
-
-env.PrependUnique(LIBS = ['fitpack'])
-env.PrependUnique(LIBPATH = [env['build_dir']])
-
-# Build _fitpack
-env.NumpyPythonExtension('_fitpack', source = '_fitpackmodule.c')
-
-# Build dfitpack
-env.NumpyPythonExtension('dfitpack', source = 'fitpack.pyf')

Copied: branches/refactor_fft/scipy/interpolate/SConstruct (from rev 4510, trunk/scipy/interpolate/SConstruct)

Modified: branches/refactor_fft/scipy/interpolate/fitpack.py
===================================================================
--- branches/refactor_fft/scipy/interpolate/fitpack.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/interpolate/fitpack.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -842,6 +842,28 @@
     if len(z[0])>1: return z[0]
     return z[0][0]
 
+def dblint(xa,xb,ya,yb,tck):
+    """Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
+    
+    Parameters
+    ----------
+    xa, xb : float
+        The end-points of the x integration interval.
+    ya, yb : float
+        The end-points of the y integration interval.
+    tck : list [tx, ty, c, kx, ky]
+        A sequence of length 5 returned by bisplrep containing the knot
+        locations tx, ty, the coefficients c, and the degrees kx, ky
+        of the spline.
+
+    Returns
+    -------
+    integ : float
+        The value of the resulting integral.
+    """
+    tx,ty,c,kx,ky=tck
+    return dfitpack.dblint(tx,ty,c,kx,ky,xb,xe,yb,ye)
+
 def insert(x,tck,m=1,per=0):
     """Insert knots into a B-spline.
 

Modified: branches/refactor_fft/scipy/interpolate/fitpack.pyf
===================================================================
--- branches/refactor_fft/scipy/interpolate/fitpack.pyf	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/interpolate/fitpack.pyf	2008-07-01 04:52:00 UTC (rev 4511)
@@ -456,7 +456,24 @@
             :: kwrk=3+mx+my+nxest+nyest
        integer intent(out) :: ier
      end subroutine regrid_smth
-
+     
+     function dblint(tx,nx,ty,ny,c,kx,ky,xb,xe,yb,ye,wrk)
+       ! iy = dblint(tx,ty,c,kx,ky,xb,xe,yb,ye)
+       real*8 dimension(nx),intent(in) :: tx
+       integer intent(hide),depend(tx) :: nx=len(tx)
+       real*8 dimension(ny),intent(in) :: ty
+       integer intent(hide),depend(ty) :: ny=len(ty)
+       real*8 intent(in),dimension((nx-kx-1)*(ny-ky-1)),depend(nx,ny,kx,ky),&
+            check(len(c)==(nx-kx-1)*(ny-ky-1)):: c
+       integer :: kx
+       integer :: ky
+       real*8 intent(in) :: xb
+       real*8 intent(in) :: xe
+       real*8 intent(in) :: yb
+       real*8 intent(in) :: ye
+       real*8 dimension(nx+ny-kx-ky-2),depend(nx,ny,kx,ky),intent(cache,hide) :: wrk
+       real*8 :: dblint
+     end function dblint
   end interface
 end python module dfitpack
 

Modified: branches/refactor_fft/scipy/interpolate/fitpack2.py
===================================================================
--- branches/refactor_fft/scipy/interpolate/fitpack2.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/interpolate/fitpack2.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -352,6 +352,27 @@
             assert ier==0,'Invalid input: ier='+`ier`
             return z
         raise NotImplementedError
+    
+    def integral(self, xa, xb, ya, yb):
+        """
+        Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
+        
+        Parameters
+        ----------
+        xa, xb : float
+            The end-points of the x integration interval.
+        ya, yb : float
+            The end-points of the y integration interval.
+        
+        Returns
+        -------
+        integ : float
+            The value of the resulting integral.
+        
+        """
+        tx,ty,c = self.tck[:3]
+        kx,ky = self.degrees
+        return dfitpack.dblint(tx,ty,c,kx,ky,xa,xb,ya,yb)
 
 class SmoothBivariateSpline(BivariateSpline):
     """ Smooth bivariate spline approximation.

Modified: branches/refactor_fft/scipy/interpolate/interpolate.py
===================================================================
--- branches/refactor_fft/scipy/interpolate/interpolate.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/interpolate/interpolate.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -152,9 +152,6 @@
     UnivariateSpline - a more recent wrapper of the FITPACK routines
     """
 
-    _interp_axis = -1 # used to set which is default interpolation
-                      # axis.  DO NOT CHANGE OR CODE WILL BREAK.
-
     def __init__(self, x, y, kind='linear', axis=-1,
                  copy=True, bounds_error=True, fill_value=np.nan):
         """ Initialize a 1D linear interpolation class.
@@ -226,12 +223,18 @@
         if kind == 'linear':
             # Make a "view" of the y array that is rotated to the interpolation
             # axis.
-            oriented_y = y.swapaxes(self._interp_axis, axis)
+            axes = range(y.ndim)
+            del axes[self.axis]
+            axes.append(self.axis)
+            oriented_y = y.transpose(axes)
             minval = 2
-            len_y = oriented_y.shape[self._interp_axis]
+            len_y = oriented_y.shape[-1]
             self._call = self._call_linear
         else:
-            oriented_y = y.swapaxes(0, axis)
+            axes = range(y.ndim)
+            del axes[self.axis]
+            axes.insert(0, self.axis)
+            oriented_y = y.transpose(axes)
             minval = order + 1
             len_y = oriented_y.shape[0]
             self._call = self._call_spline
@@ -322,10 +325,10 @@
             return y_new.transpose(axes)
         else:
             y_new[out_of_bounds] = self.fill_value
-            axes = range(ny - nx, ny)
-            axes[self.axis:self.axis] = range(ny - nx)
+            axes = range(nx, ny)
+            axes[self.axis:self.axis] = range(nx)
             return y_new.transpose(axes)
-
+    
     def _check_bounds(self, x_new):
         """ Check the inputs for being in the bounds of the interpolated data.
 
@@ -407,6 +410,16 @@
     fromspline = classmethod(fromspline)
 
 
+def _dot0(a, b):
+    """Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
+    if b.ndim <= 2:
+        return dot(a, b)
+    else:
+        axes = range(b.ndim)
+        axes.insert(-1, 0)
+        axes.pop(0)
+        return dot(a, b.transpose(axes))
+
 def _find_smoothest(xk, yk, order, conds=None, B=None):
     # construct Bmatrix, and Jmatrix
     # e = J*c
@@ -431,9 +444,8 @@
     tmp = dot(tmp,V1)
     tmp = dot(tmp,np.diag(1.0/s))
     tmp = dot(tmp,u.T)
-    return dot(tmp, yk)
+    return _dot0(tmp, yk)
 
-
 def _setdiag(a, k, v):
     assert (a.ndim==2)
     M,N = a.shape
@@ -471,7 +483,7 @@
     V2[1::2] = -1
     V2 /= math.sqrt(Np1)
     dk = np.diff(xk)
-    b = 2*np.diff(yk)/dk
+    b = 2*np.diff(yk, axis=0)/dk
     J = np.zeros((N-1,N+1))
     idk = 1.0/dk
     _setdiag(J,0,idk[:-1])
@@ -480,7 +492,7 @@
     A = dot(J.T,J)
     val = dot(V2,dot(A,V2))
     res1 = dot(np.outer(V2,V2)/val,A)
-    mk = dot(np.eye(Np1)-res1,dot(Bd,b))
+    mk = dot(np.eye(Np1)-res1, _dot0(Bd,b))
     return mk
 
 def _get_spline2_Bb(xk, yk, kind, conds):

Modified: branches/refactor_fft/scipy/interpolate/rbf.py
===================================================================
--- branches/refactor_fft/scipy/interpolate/rbf.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/interpolate/rbf.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -42,7 +42,7 @@
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 """
 
-from numpy import sqrt, log, asarray, newaxis, all, dot, float64, eye
+from numpy import sqrt, log, asarray, newaxis, all, dot, float64, exp, eye
 from scipy import linalg
 
 class Rbf(object):
@@ -58,7 +58,7 @@
             return sqrt((1.0/self.epsilon*r)**2 + 1)
         elif self.function.lower() == 'inverse multiquadric':
             return 1.0/sqrt((1.0/self.epsilon*r)**2 + 1)
-        elif self.function.lower() == 'gausian':
+        elif self.function.lower() == 'gaussian':
             return exp(-(self.epsilon*r)**2)
         elif self.function.lower() == 'cubic':
             return r**3
@@ -84,7 +84,7 @@
             ::
                 'multiquadric': sqrt((self.epsilon*r)**2 + 1)
                 'inverse multiquadric': 1.0/sqrt((self.epsilon*r)**2 + 1)
-                'gausian': exp(-(self.epsilon*r)**2)
+                'gaussian': exp(-(self.epsilon*r)**2)
                 'cubic': r**3
                 'quintic': r**5
                 'thin-plate': r**2 * log(r)

Modified: branches/refactor_fft/scipy/interpolate/tests/test_fitpack.py
===================================================================
--- branches/refactor_fft/scipy/interpolate/tests/test_fitpack.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/interpolate/tests/test_fitpack.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -14,7 +14,7 @@
 
 import sys
 from scipy.testing import *
-from numpy import array
+from numpy import array, diff
 from scipy.interpolate.fitpack2 import UnivariateSpline,LSQUnivariateSpline,\
      InterpolatedUnivariateSpline
 from scipy.interpolate.fitpack2 import LSQBivariateSpline, \
@@ -48,10 +48,49 @@
         tx = [1+s,3-s]
         ty = [1+s,3-s]
         lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
-        #print lut.get_knots()
-        #print lut.get_coeffs()
-        #print lut.get_residual()
 
+        assert_almost_equal(lut(2,2), 3.)
+
+    def test_bilinearity(self):
+        x = [1,1,1,2,2,2,3,3,3]
+        y = [1,2,3,1,2,3,1,2,3]
+        z = [0,7,8,3,4,7,1,3,4]
+        s = 0.1
+        tx = [1+s,3-s]
+        ty = [1+s,3-s]
+        lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
+
+        tx, ty = lut.get_knots()
+
+        for xa, xb in zip(tx[:-1], tx[1:]):
+            for ya, yb in zip(ty[:-1], ty[1:]):
+                for t in [0.1, 0.5, 0.9]:
+                    for s in [0.3, 0.4, 0.7]:
+                        xp = xa*(1-t) + xb*t
+                        yp = ya*(1-s) + yb*s
+                        zp = (+ lut(xa, ya)*(1-t)*(1-s)
+                              + lut(xb, ya)*t*(1-s)
+                              + lut(xa, yb)*(1-t)*s
+                              + lut(xb, yb)*t*s)
+                        assert_almost_equal(lut(xp,yp), zp)
+
+    def test_integral(self):
+        x = [1,1,1,2,2,2,8,8,8]
+        y = [1,2,3,1,2,3,1,2,3]
+        z = array([0,7,8,3,4,7,1,3,4])
+
+        s = 0.1
+        tx = [1+s,3-s]
+        ty = [1+s,3-s]
+        lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
+        tx, ty = lut.get_knots()
+
+        tz = lut(tx, ty)
+        trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
+                    *(tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
+
+        assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz)
+
 class TestSmoothBivariateSpline(TestCase):
     def test_linear_constant(self):
         x = [1,1,1,2,2,2,3,3,3]
@@ -73,6 +112,29 @@
         assert_almost_equal(lut.get_residual(),0.0)
         assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[0,0],[1,1],[2,2]])
 
+    def test_integral(self):
+        x = [1,1,1,2,2,2,4,4,4]
+        y = [1,2,3,1,2,3,1,2,3]
+        z = array([0,7,8,3,4,7,1,3,4])
+ 
+        lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1,s=0)
+        tx = [1,2,4]
+        ty = [1,2,3]
+ 
+        tz = lut(tx, ty)
+        trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
+                    *(tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
+        assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz)
+ 
+        lut2 = SmoothBivariateSpline(x,y,z,kx=2,ky=2,s=0)
+        assert_almost_equal(lut2.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz,
+                            decimal=0) # the quadratures give 23.75 and 23.85
+        
+        tz = lut(tx[:-1], ty[:-1])
+        trpz = .25*(diff(tx[:-1])[:,None]*diff(ty[:-1])[None,:]
+                    *(tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
+        assert_almost_equal(lut.integral(tx[0], tx[-2], ty[0], ty[-2]), trpz)
+
 class TestRectBivariateSpline(TestCase):
     def test_defaults(self):
         x = array([1,2,3,4,5])

Modified: branches/refactor_fft/scipy/interpolate/tests/test_interpolate.py
===================================================================
--- branches/refactor_fft/scipy/interpolate/tests/test_interpolate.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/interpolate/tests/test_interpolate.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -29,7 +29,7 @@
 
         self.y210 = np.arange(20.).reshape((2, 10))
         self.y102 = np.arange(20.).reshape((10, 2))
-
+        
         self.fill_value = -100.0
 
     def test_validation(self):
@@ -125,13 +125,30 @@
             np.array([2.4, 5.6, 6.0]),
         )
 
+    def test_cubic(self):
+        """ Check the actual implementation of spline interpolation.
+        """
 
-    def test_bounds(self):
+        interp10 = interp1d(self.x10, self.y10, kind='cubic')
+        assert_array_almost_equal(
+            interp10(self.x10),
+            self.y10,
+        )
+        assert_array_almost_equal(
+            interp10(1.2),
+            np.array([1.2]),
+        )
+        assert_array_almost_equal(
+            interp10([2.4, 5.6, 6.0]),
+            np.array([2.4, 5.6, 6.0]),
+        )
+
+    def _bounds_check(self, kind='linear'):
         """ Test that our handling of out-of-bounds input is correct.
         """
 
         extrap10 = interp1d(self.x10, self.y10, fill_value=self.fill_value,
-            bounds_error=False)
+            bounds_error=False, kind=kind)
         assert_array_equal(
             extrap10(11.2),
             np.array([self.fill_value]),
@@ -145,25 +162,28 @@
             np.array([True, False, False, False, True]),
         )
 
-        raises_bounds_error = interp1d(self.x10, self.y10, bounds_error=True)
+        raises_bounds_error = interp1d(self.x10, self.y10, bounds_error=True,
+                                       kind=kind)
         self.assertRaises(ValueError, raises_bounds_error, -1.0)
         self.assertRaises(ValueError, raises_bounds_error, 11.0)
         raises_bounds_error([0.0, 5.0, 9.0])
 
+    def test_bounds(self):
+        for kind in ('linear', 'cubic'):
+            self._bounds_check(kind=kind)
 
-    def test_nd(self):
+    def _nd_check(self, kind='linear'):
         """ Check the behavior when the inputs and outputs are multidimensional.
         """
-
         # Multidimensional input.
-        interp10 = interp1d(self.x10, self.y10)
+        interp10 = interp1d(self.x10, self.y10, kind=kind)
         assert_array_almost_equal(
             interp10(np.array([[3.4, 5.6], [2.4, 7.8]])),
             np.array([[3.4, 5.6], [2.4, 7.8]]),
         )
-
+        
         # Multidimensional outputs.
-        interp210 = interp1d(self.x10, self.y210)
+        interp210 = interp1d(self.x10, self.y210, kind=kind)
         assert_array_almost_equal(
             interp210(1.5),
             np.array([[1.5], [11.5]]),
@@ -174,7 +194,7 @@
                       [11.5, 12.4]]),
         )
 
-        interp102 = interp1d(self.x10, self.y102, axis=0)
+        interp102 = interp1d(self.x10, self.y102, axis=0, kind=kind)
         assert_array_almost_equal(
             interp102(1.5),
             np.array([[3.0, 4.0]]),
@@ -197,7 +217,24 @@
             np.array([[[6.8, 7.8], [11.2, 12.2]],
                       [[4.8, 5.8], [15.6, 16.6]]]),
         )
+        
+        # Check large ndim output
+        a = [4, 5, 6, 7]
+        y = np.arange(np.prod(a)).reshape(*a)
+        for n, s in enumerate(a):
+            x = np.arange(s)
+            z = interp1d(x, y, axis=n, kind=kind)
+            assert_array_almost_equal(z(x), y)
+            
+            x2 = np.arange(2*3*1).reshape((2,3,1)) / 12.
+            b = list(a)
+            b[n:n+1] = [2,3,1]
+            assert_array_almost_equal(z(x2).shape, b)
 
+    def test_nd(self):
+        for kind in ('linear', 'cubic'):
+            self._nd_check(kind=kind)
+
 class TestLagrange(TestCase):
 
     def test_lagrange(self):

Copied: branches/refactor_fft/scipy/io/SConscript (from rev 4510, trunk/scipy/io/SConscript)

Deleted: branches/refactor_fft/scipy/io/SConstruct
===================================================================
--- branches/refactor_fft/scipy/io/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/io/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,11 +0,0 @@
-# Last Change: Wed Mar 05 03:00 PM 2008 J
-# vim:syntax=python
-from os.path import join
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numscons import GetNumpyEnvironment
-
-env = GetNumpyEnvironment(ARGUMENTS)
-
-env.AppendUnique(CPPPATH = get_numpy_include_dirs())
-env.NumpyPythonExtension('numpyio', source = 'numpyiomodule.c')

Copied: branches/refactor_fft/scipy/io/SConstruct (from rev 4510, trunk/scipy/io/SConstruct)

Modified: branches/refactor_fft/scipy/io/array_import.py
===================================================================
--- branches/refactor_fft/scipy/io/array_import.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/io/array_import.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -169,7 +169,7 @@
             return lines[:-1]
 
     def __del__(self):
-        if hasattr(self.file,'close') and self.should_close_file:
+        if hasattr(getattr(self, 'file', None),'close') and self.should_close_file:
             self.file.close()
 
     def __getitem__(self, item):

Modified: branches/refactor_fft/scipy/io/matlab/miobase.py
===================================================================
--- branches/refactor_fft/scipy/io/matlab/miobase.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/io/matlab/miobase.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -284,7 +284,8 @@
 
     def end_of_stream(self):
         b = self.mat_stream.read(1)
-        self.mat_stream.seek(-1,1)
+        curpos = self.mat_stream.tell()
+        self.mat_stream.seek(curpos-1)
         return len(b) == 0
 
 

Modified: branches/refactor_fft/scipy/io/matlab/tests/test_mio.py
===================================================================
--- branches/refactor_fft/scipy/io/matlab/tests/test_mio.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/io/matlab/tests/test_mio.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -3,7 +3,7 @@
 import os
 from glob import glob
 from cStringIO import StringIO
-from tempfile import mkstemp
+from tempfile import mkstemp, mkdtemp
 from scipy.testing import *
 from numpy import arange, array, eye, pi, cos, exp, sin, sqrt, ndarray,  \
      zeros, reshape, transpose, empty
@@ -12,6 +12,9 @@
 from scipy.io.matlab.mio import loadmat, savemat
 from scipy.io.matlab.mio5 import mat_obj, mat_struct
 
+import shutil
+import gzip
+
 try:  # Python 2.3 support
     from sets import Set as set
 except:
@@ -238,3 +241,29 @@
         expected = case['expected']
         format = case in case_table4 and '4' or '5'
         yield _make_rt_check_case, name, expected, format
+
+def test_gzip_simple():
+    xdense = zeros((20,20))
+    xdense[2,3]=2.3
+    xdense[4,5]=4.5
+    x = SP.csc_matrix(xdense)
+
+    name = 'gzip_test'
+    expected = {'x':x}
+    format='4'
+
+    tmpdir = mkdtemp()
+    try:
+        fname = os.path.join(tmpdir,name)
+        mat_stream = gzip.open( fname,mode='wb')
+        savemat(mat_stream, expected, format=format)
+        mat_stream.close()
+
+        mat_stream = gzip.open( fname,mode='rb')
+        actual = loadmat(mat_stream)
+        mat_stream.close()
+    finally:
+        shutil.rmtree(tmpdir)
+
+    assert_array_almost_equal(actual['x'].todense(),
+                              expected['x'].todense())

Modified: branches/refactor_fft/scipy/io/mmio.py
===================================================================
--- branches/refactor_fft/scipy/io/mmio.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/io/mmio.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -554,16 +554,16 @@
             coo = a.tocoo() # convert to COOrdinate format
 
             # write shape spec
-            stream.write('%i %i %i\n' % (rows,cols,coo.nnz))
+            stream.write('%i %i %i\n' % (rows, cols, coo.nnz))
 
             fmt = '%%.%dg' % precision
 
             if field == self.FIELD_PATTERN:
-                IJV = vstack((a.row, a.col)).T
+                IJV = vstack((coo.row, coo.col)).T
             elif field in [ self.FIELD_INTEGER, self.FIELD_REAL ]:
-                IJV = vstack((a.row, a.col, a.data)).T
+                IJV = vstack((coo.row, coo.col, coo.data)).T
             elif field == self.FIELD_COMPLEX:
-                IJV = vstack((a.row, a.col, a.data.real, a.data.imag)).T
+                IJV = vstack((coo.row, coo.col, coo.data.real, coo.data.imag)).T
             else:
                 raise TypeError('Unknown field type %s' % `field`)
 

Modified: branches/refactor_fft/scipy/io/tests/test_mmio.py
===================================================================
--- branches/refactor_fft/scipy/io/tests/test_mmio.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/io/tests/test_mmio.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -291,5 +291,28 @@
         b = mmread(fn).todense()
         assert_array_almost_equal(a,b)
 
+    def test_sparse_formats(self):
+        mats = []
+        
+        I = array([0, 0, 1, 2, 3, 3, 3, 4])
+        J = array([0, 3, 1, 2, 1, 3, 4, 4])
+
+        V = array([  1.0,   6.0,   10.5, 0.015,   250.5,  -280.0, 33.32, 12.0 ])
+        mats.append( scipy.sparse.coo_matrix((V,(I,J)),shape=(5,5)) )
+        
+        V = array([  1.0 + 3j,    6.0 + 2j,  10.50 + 0.9j, 0.015 + -4.4j,
+                   250.5 + 0j, -280.0 + 5j,  33.32 + 6.4j, 12.00 + 0.8j])
+        mats.append( scipy.sparse.coo_matrix((V,(I,J)),shape=(5,5)) )
+
+        for mat in mats:
+            expected = mat.todense()
+            for fmt in ['csr','csc','coo']:
+                fn = mktemp()
+                mmwrite(fn, mat.asformat(fmt))
+        
+                result = mmread(fn).todense()
+                assert_array_almost_equal(result, expected)
+
+
 if __name__ == "__main__":
     nose.run(argv=['', __file__])

Modified: branches/refactor_fft/scipy/io/wavfile.py
===================================================================
--- branches/refactor_fft/scipy/io/wavfile.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/io/wavfile.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -15,7 +15,7 @@
 # assumes file pointer is immediately
 #   after the 'data' id
 def _read_data_chunk(fid, noc, bits):
-    size = struct.unpack('l',fid.read(4))[0]
+    size = struct.unpack('i',fid.read(4))[0]
     if bits == 8:
         data = numpy.fromfile(fid, dtype=numpy.ubyte, count=size)
         if noc > 1:
@@ -30,7 +30,7 @@
 
 def _read_riff_chunk(fid):
     str1 = fid.read(4)
-    fsize = struct.unpack('L', fid.read(4))[0] + 8
+    fsize = struct.unpack('I', fid.read(4))[0] + 8
     str2 = fid.read(4)
     if (str1 != 'RIFF' or str2 != 'WAVE'):
         raise ValueError, "Not a WAV file."
@@ -64,7 +64,7 @@
             data = _read_data_chunk(fid, noc, bits)
         else:
             print "Warning:  %s chunk not understood"
-            size = struct.unpack('L',fid.read(4))[0]
+            size = struct.unpack('I',fid.read(4))[0]
             bytes = fid.read(size)
     fid.close()
     return rate, data
@@ -99,11 +99,11 @@
     fid.write(struct.pack('lhHLLHH', 16, 1, noc, rate, sbytes, ba, bits))
     # data chunk
     fid.write('data')
-    fid.write(struct.pack('l', data.nbytes))
+    fid.write(struct.pack('i', data.nbytes))
     data.tofile(fid)
     # Determine file size and place it in correct
     #  position at start of the file.
     size = fid.tell()
     fid.seek(4)
-    fid.write(struct.pack('l', size-8))
+    fid.write(struct.pack('i', size-8))
     fid.close()

Copied: branches/refactor_fft/scipy/lib/blas/SConscript (from rev 4510, trunk/scipy/lib/blas/SConscript)

Deleted: branches/refactor_fft/scipy/lib/blas/SConstruct
===================================================================
--- branches/refactor_fft/scipy/lib/blas/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/lib/blas/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,85 +0,0 @@
-# Last Change: Sat May 03 02:00 PM 2008 J
-# vim:syntax=python
-
-import os
-from os.path import join as pjoin, splitext
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numscons import get_python_inc#, get_pythonlib_dir
-from numscons import GetNumpyEnvironment
-from numscons import CheckCBLAS, CheckF77BLAS,\
-                     IsVeclib, IsAccelerate, \
-                     IsATLAS, GetATLASVersion
-from numscons import write_info
-
-from scons_support import do_generate_fake_interface, generate_interface_emitter
-
-env = GetNumpyEnvironment(ARGUMENTS)
-env.Tool('numpyf2py')
-
-env.Append(CPPPATH = [get_python_inc(), get_numpy_include_dirs()])
-#if os.name == 'nt':
-#    # NT needs the pythonlib to run any code importing Python.h, including
-#    # simple code using only typedef and so on, so we need it for configuration
-#    # checks
-#    env.AppendUnique(LIBPATH = [get_pythonlib_dir()])
-
-env['BUILDERS']['GenerateFakePyf'] = Builder(action = do_generate_fake_interface,
-                                             emitter = generate_interface_emitter)
-
-#=======================
-# Starting Configuration
-#=======================
-config = env.NumpyConfigure(custom_tests = {'CheckCBLAS' : CheckCBLAS,
-                                            'CheckBLAS' : CheckF77BLAS})
-
-#--------------
-# Checking Blas
-#--------------
-st = config.CheckBLAS(check_version = 1)
-if not st:
-    raise RuntimeError("no blas found, necessary for linalg module")
-if IsATLAS(env, 'blas'):
-    version = GetATLASVersion(env)
-    env.Append(CPPDEFINES = [('ATLAS_INFO', '"\\"%s"\\"' % version)])
-else:
-    env.Append(CPPDEFINES = [('NO_ATLAS_INFO', 1)])
-
-if config.CheckCBLAS():
-    has_cblas = 1
-else:
-    has_cblas = 0
-
-config.Finish()
-write_info(env)
-
-#==========
-#  Build
-#==========
-
-# XXX: handle cblas wrapper for complex (check in numpy.scons or here ?)
-env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR'])
-env.AppendUnique(F2PYOPTIONS = '--quiet')
-
-#------------
-#   fblas
-#------------
-env.NumpyFromFTemplate('fblas.pyf', 'fblas.pyf.src')
-source = ['fblas.pyf']
-if IsVeclib(env, 'blas') or IsAccelerate(env, 'blas'):
-    env.NumpyFromCTemplate('fblaswrap_veclib_c.c', 'fblaswrap_veclib_c.c.src')
-    source.append('fblaswrap_veclib_c.c')
-else:
-    env.NumpyFromFTemplate('fblaswrap.f', 'fblaswrap.f.src')
-    source.append('fblaswrap.f')
-env.NumpyPythonExtension('fblas', source)
-
-#------------
-#   cblas
-#------------
-source = ['cblas.pyf']
-if has_cblas:
-    env.NumpyFromFTemplate('cblas.pyf', 'cblas.pyf.src')
-else:
-    print env.GenerateFakePyf('cblas', 'cblas.pyf.src')
-env.NumpyPythonExtension('cblas', source)

Copied: branches/refactor_fft/scipy/lib/blas/SConstruct (from rev 4510, trunk/scipy/lib/blas/SConstruct)

Modified: branches/refactor_fft/scipy/lib/blas/scons_support.py
===================================================================
--- branches/refactor_fft/scipy/lib/blas/scons_support.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/lib/blas/scons_support.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,8 +1,6 @@
 from os.path import join as pjoin, splitext, basename as pbasename
 
 def generate_interface_emitter(target, source, env):
-    source = [pjoin(env['build_dir'], str(i)) for i in source]
-    target = [pjoin(env['build_dir'], str(i)) for i in target]
     base = str(target[0])
     return (['%s.pyf' % base], source)
 

Copied: branches/refactor_fft/scipy/lib/lapack/SConscript (from rev 4510, trunk/scipy/lib/lapack/SConscript)

Deleted: branches/refactor_fft/scipy/lib/lapack/SConstruct
===================================================================
--- branches/refactor_fft/scipy/lib/lapack/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/lib/lapack/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,95 +0,0 @@
-# Last Change: Sat May 03 02:00 PM 2008 J
-# vim:syntax=python
-
-import os
-from os.path import join as pjoin, splitext
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numscons import get_python_inc
-from numscons import GetNumpyEnvironment
-from numscons import CheckF77LAPACK,\
-                                  CheckCLAPACK, \
-                                  IsATLAS, GetATLASVersion, \
-                                  CheckF77Clib
-from numscons import write_info
-
-from scons_support import do_generate_fake_interface, \
-                          generate_interface_emitter
-
-env = GetNumpyEnvironment(ARGUMENTS)
-env.Tool('numpyf2py')
-env.Append(CPPPATH = [get_python_inc(), get_numpy_include_dirs()])
-#if os.name == 'nt':
-#    # NT needs the pythonlib to run any code importing Python.h, including
-#    # simple code using only typedef and so on, so we need it for configuration
-#    # checks
-#    env.AppendUnique(LIBPATH = [get_pythonlib_dir()])
-
-#=======================
-# Starting Configuration
-#=======================
-config = env.NumpyConfigure(custom_tests = {'CheckCLAPACK' : CheckCLAPACK,
-                                            'CheckLAPACK' : CheckF77LAPACK,
-                                            'CheckF77Clib' : CheckF77Clib})
-
-#--------------
-# Checking Blas
-#--------------
-if not config.CheckF77Clib():
-    raise RuntimeError("Could not check F/C runtime library for %s/%s, " \
-                       "contact the maintainer" % (env['CC'], env['F77']))
-
-st = config.CheckLAPACK(check_version = 1)
-if not st:
-    raise RuntimeError("no lapack found, necessary for lapack module")
-
-if IsATLAS(env, 'lapack'):
-    version = GetATLASVersion(env)
-    env.Append(CPPDEFINES = [('ATLAS_INFO', '"\\"%s"\\"' % version)])
-else:
-    env.Append(CPPDEFINES = [('NO_ATLAS_INFO', 1)])
-
-if config.CheckCLAPACK():
-    has_clapack = 1
-else:
-    has_clapack = 0
-
-config.Finish()
-write_info(env)
-
-#==========
-#  Build
-#==========
-env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR'])
-env.AppendUnique(F2PYOPTIONS = '--quiet')
-
-env['BUILDERS']['GenerateFakePyf'] = Builder(action = do_generate_fake_interface,
-                                  emitter = generate_interface_emitter)
-
-#------------
-#   flapack
-#------------
-yop = env.NumpyFromFTemplate('flapack.pyf', 'flapack.pyf.src')
-env.NumpyPythonExtension('flapack', source = ['flapack.pyf'])
-
-#------------
-#   clapack
-#------------
-if has_clapack:
-    env.NumpyFromFTemplate('clapack.pyf', 'clapack.pyf.src')
-else:
-    env.GenerateFakePyf('clapack', 'clapack.pyf.src')
-env.NumpyPythonExtension('clapack', source = 'clapack.pyf')
-
-#----------------
-# calc_lwork:
-#----------------
-calc_src = env.NumpyF2py(pjoin('calc_lworkmodule.c'), 
-                         source = pjoin('calc_lwork.f'))
-env.NumpyPythonExtension('calc_lwork', source = calc_src + ['calc_lwork.f'],
-                         LINKFLAGSEND = env['F77_LDFLAGS'])
-
-#--------------
-# Atlas version
-#--------------
-env.NumpyPythonExtension('atlas_version', 'atlas_version.c')

Copied: branches/refactor_fft/scipy/lib/lapack/SConstruct (from rev 4510, trunk/scipy/lib/lapack/SConstruct)

Modified: branches/refactor_fft/scipy/lib/lapack/scons_support.py
===================================================================
--- branches/refactor_fft/scipy/lib/lapack/scons_support.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/lib/lapack/scons_support.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,8 +1,6 @@
 from os.path import join as pjoin, splitext, basename as pbasename
 
 def generate_interface_emitter(target, source, env):
-    source = [pjoin(env['build_dir'], str(i)) for i in source]
-    target = [pjoin(env['build_dir'], str(i)) for i in target]
     base = str(target[0])
     return (['%s.pyf' % base], source)
 

Copied: branches/refactor_fft/scipy/linalg/SConscript (from rev 4510, trunk/scipy/linalg/SConscript)

Deleted: branches/refactor_fft/scipy/linalg/SConstruct
===================================================================
--- branches/refactor_fft/scipy/linalg/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/linalg/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,159 +0,0 @@
-# Last Change: Sat May 03 02:00 PM 2008 J
-# vim:syntax=python
-
-import os
-from os.path import join as pjoin, splitext
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numscons import get_python_inc#, get_pythonlib_dir
-from numscons import GetNumpyEnvironment
-from numscons import CheckCBLAS, CheckF77BLAS, CheckF77LAPACK,\
-                                  CheckCLAPACK, IsVeclib, IsAccelerate, \
-                                  IsATLAS, GetATLASVersion, CheckF77Clib
-from numscons import write_info
-
-from scons_support import do_generate_interface, do_generate_fake_interface, \
-                          generate_interface_emitter
-
-#from scons_support import CheckBrokenMathlib, define_no_smp, \
-#    generate_config_header, generate_config_header_emitter
-
-env = GetNumpyEnvironment(ARGUMENTS)
-env.Tool('numpyf2py')
-env.Append(CPPPATH = [get_python_inc(), get_numpy_include_dirs()])
-
-# XXX: handle cblas wrapper for complex (check in numpy.scons or here ?)
-env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR'])
-env.AppendUnique(F2PYOPTIONS = '--quiet')
-
-env['BUILDERS']['haha'] = Builder(action = do_generate_interface,
-                                  emitter = generate_interface_emitter)
-
-env['BUILDERS']['hihi'] = Builder(action = do_generate_fake_interface,
-                                  emitter = generate_interface_emitter)
-
-#if os.name == 'nt':
-#    # NT needs the pythonlib to run any code importing Python.h, including
-#    # simple code using only typedef and so on, so we need it for configuration
-#    # checks
-#    env.AppendUnique(LIBPATH = [get_pythonlib_dir()])
-
-fenv = env.Clone()
-
-#=======================
-# Starting Configuration
-#=======================
-config = env.Configure(custom_tests = {'CheckCBLAS' : CheckCBLAS,
-                                       'CheckCLAPACK' : CheckCLAPACK})
-
-#-------------------------
-# Checking cblas/clapack
-#-------------------------
-if config.CheckCBLAS():
-    has_cblas = 1
-else:
-    has_cblas = 0
-if has_cblas:
-    if IsATLAS(env, 'cblas'):
-        version = GetATLASVersion(env)
-        env.Append(CPPDEFINES = [('ATLAS_INFO', '"\\"%s"\\"' % version)])
-    else:
-        env.Append(CPPDEFINES = [('NO_ATLAS_INFO', 1)])
-
-if config.CheckCLAPACK():
-    has_clapack = 1
-else:
-    has_clapack = 0
-
-config.Finish()
-write_info(env)
-
-#---------------------------
-# Checking F77 blas/lapack
-#---------------------------
-fconfig = fenv.Configure(custom_tests = {'CheckBLAS' : CheckF77BLAS,
-                         'CheckLAPACK' : CheckF77LAPACK,
-                         'CheckF77Clib' : CheckF77Clib})
-
-if not fconfig.CheckF77Clib():
-    raise RuntimeError("Could not check F/C runtime library for %s/%s, " \
-                       "contact the maintainer" % (fenv['CC'], fenv['F77']))
-
-st = fconfig.CheckBLAS(check_version = 1)
-if not st:
-    raise RuntimeError("no blas found, necessary for linalg module")
-if IsATLAS(fenv, 'blas'):
-    version = GetATLASVersion(fenv)
-    env.Append(CPPDEFINES = [('ATLAS_INFO', '"\\"%s"\\"' % version)])
-else:
-    env.Append(CPPDEFINES = [('NO_ATLAS_INFO', 1)])
-
-st = fconfig.CheckLAPACK()
-if not st:
-    raise RuntimeError("no lapack found, necessary for linalg module")
-fconfig.Finish()
-write_info(fenv)
-
-
-#==========
-#  Build
-#==========
-#------------
-#   fblas
-#------------
-fenv.haha('fblas', 'generic_fblas.pyf')
-source = ['fblas.pyf']
-if IsVeclib(fenv, 'blas') or IsAccelerate(fenv, 'blas'):
-    source.append(pjoin('src', 'fblaswrap_veclib_c.c'))
-else:
-    source.append(pjoin('src', 'fblaswrap.f'))
-fenv.NumpyPythonExtension('fblas', source)
-
-#------------
-#   cblas
-#------------
-if has_cblas:
-    env.haha('cblas', 'generic_cblas.pyf')
-else:
-    env.hihi('cblas', 'generic_cblas.pyf')
-env.NumpyPythonExtension('cblas', source = 'cblas.pyf')
-
-#------------
-#   flapack
-#------------
-yop = fenv.haha('flapack', 'generic_flapack.pyf')
-# XXX: automatically scan dependency on flapack_user_routines.pyf ?
-fenv.Depends(yop, pjoin(env['build_dir'], 'flapack_user_routines.pyf'))
-fenv.NumpyPythonExtension('flapack', 'flapack.pyf')
-
-#------------
-#   clapack
-#------------
-if has_clapack:
-    env.haha('clapack', 'generic_clapack.pyf')
-else:
-    env.hihi('clapack', 'generic_clapack.pyf')
-env.NumpyPythonExtension('clapack', source = 'clapack.pyf')
-
-#----------------
-#   _flinalg
-#----------------
-flinalg_fsrc = [pjoin('src', i) for i in ['det.f', 'lu.f']]
-flinalg_src = fenv.NumpyF2py(pjoin('src', '_flinalgmodule.c'), flinalg_fsrc)
-
-fenv.NumpyPythonExtension('_flinalg', source = flinalg_src + flinalg_fsrc)
-
-#----------------
-# calc_lwork:
-#----------------
-calc_fsrc = [pjoin('src', 'calc_lwork.f')]
-calc_src = env.NumpyF2py(pjoin('src', 'calc_lworkmodule.c'), calc_fsrc)
-fenv.NumpyPythonExtension('calc_lwork', calc_src + calc_fsrc)
-
-#--------------
-# Atlas version
-#--------------
-atlas_env = env.Clone()
-if not IsATLAS(env, 'cblas'):
-    atlas_env.AppendUnique(CPPDEFINES = "NO_ATLAS_INFO")
-atlas_env.NumpyPythonExtension('atlas_version', 'atlas_version.c')

Copied: branches/refactor_fft/scipy/linalg/SConstruct (from rev 4510, trunk/scipy/linalg/SConstruct)

Modified: branches/refactor_fft/scipy/linalg/scons_support.py
===================================================================
--- branches/refactor_fft/scipy/linalg/scons_support.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/linalg/scons_support.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -14,8 +14,6 @@
     return 0
 
 def generate_interface_emitter(target, source, env):
-    source = [pjoin(env['build_dir'], str(i)) for i in source]
-    target = [pjoin(env['build_dir'], str(i)) for i in target]
     base = str(target[0])
     return (['%s.pyf' % base], source)
 

Copied: branches/refactor_fft/scipy/ndimage/SConscript (from rev 4510, trunk/scipy/ndimage/SConscript)

Deleted: branches/refactor_fft/scipy/ndimage/SConstruct
===================================================================
--- branches/refactor_fft/scipy/ndimage/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/ndimage/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,22 +0,0 @@
-# Last Change: Wed Mar 05 09:00 PM 2008 J
-from os.path import join
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numscons import GetNumpyEnvironment
-
-env = GetNumpyEnvironment(ARGUMENTS)
-
-env.AppendUnique(CPPPATH = get_numpy_include_dirs())
-env.AppendUnique(CPPPATH = 'src')
-
-ndimage_src = ["nd_image.c", "ni_filters.c", "ni_fourier.c", "ni_interpolation.c",
-               "ni_measure.c", "ni_morphology.c", "ni_support.c"]
-env.NumpyPythonExtension('_nd_image', source = [join('src', i) for i in ndimage_src])
-
-segment_src = ['Segmenter_EXT.c', 'Segmenter_IMPL.c']
-env.NumpyPythonExtension('_segment', source = [join('src', 'segment', i) 
-                                               for i in segment_src])
-
-register_src = ['Register_EXT.c', 'Register_IMPL.c']
-env.NumpyPythonExtension('_register', source = [join('src', 'register', i) 
-                                                for i in register_src])

Copied: branches/refactor_fft/scipy/ndimage/SConstruct (from rev 4510, trunk/scipy/ndimage/SConstruct)

Modified: branches/refactor_fft/scipy/ndimage/_registration.py
===================================================================
--- branches/refactor_fft/scipy/ndimage/_registration.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/ndimage/_registration.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,10 +1,17 @@
+#
+# written by Tom Waite
+# rigid body 3D registration
+#
+
+
 import math
-import os
-import numpy as NP
-import scipy.ndimage._register as R
-import scipy.special  as SP
-import scipy.ndimage  as NDI
-import scipy.optimize as OPT
+import numpy as np
+from scipy.special import erf
+from scipy.ndimage import correlate1d
+from scipy.optimize import fmin_powell, fmin_cg
+
+import scipy.ndimage._register as reg
+
 import time
 import glob
 
@@ -29,71 +36,60 @@
 #  ---- co-registration and IO  ---- 
 #
 
-def resize_image(imageG, imageF_mat):
+def resize_image(imageS, imageS_mat, imageR_mat):
     """
-    zoom_image = resize_image(source_image, reference_image[mat])
+    zoom_image = resize_image(imageS, imageS_mat, imageR_mat)
 
-    Fractional resample source_image to reference_imagesize. The
-    resample is implemented with 3D cubic spline. The reference
-    image [mat] is the 4x4 voxel-to-physical conversion matrix.
+    Fractional resample source_image to reference_image size. The
+    resample is implemented with 3D cubic spline. The source
+    imageS_mat is the 4x4 voxel-to-physical conversion matrix.
     
     Parameters 
     ----------
 
-    imageG : {dictionary} 
-        imageG is the source image to be resized. it is a dictionary with
-        the data as an ndarray in the ['data'] component.
+    imageS: {ndarray} 
+        imageS is the source image to be resized.
 
-    reference_image[mat] : {ndarray}
-        refernce_image is the image whose sampling dimensions the source
-        image is to be remapped to. [mat] refers to the component
-        of the image dictionary, reference_image['mat'] that is the
-        sampling dimensions.
+    imageS_mat : {ndarray} 
+        the 4x4 transform of the source image that maps voxel to physical.
 
+    imageR_mat : {ndarray}
+        the 4x4 transform of the destination image that maps voxel to physical.
+
     Returns 
     -------
-    zoom_image : {dictionary}
+    zoom_image : {ndarray}
 
     Examples
     --------
 
     >>> import _registration as reg
-    >>> measures, imageF_anat, fmri_series = reg.demo_MRI_coregistration()
+    >>> measures, image_anat, image_anat_mat, image_fmri_mat, fmri_series = reg.demo_MRI_coregistration()
 
-    >>> resampled_fmri = reg.resize_image(fmri_series[10], imageF_anat['mat'])
+    >>> resampled_fmri = reg.resize_image(fmri_series[10], image_fmri_mat, image_anat_mat)
 
-    image 10 in the fmri_series is resampled to imageF_anat coordinates
+    image 10 in the fmri_series is resampled from image_fmri_mat to image_anat coordinates
 
     """
 
-    Z = NP.zeros(3, dtype=NP.float64);
     # get the zoom
-    Z[0] = imageG['mat'][0][0] / imageF_mat[0][0]
-    Z[1] = imageG['mat'][1][1] / imageF_mat[1][1]
-    Z[2] = imageG['mat'][2][2] / imageF_mat[2][2]
+    Z = imageS_mat.diagonal() / imageR_mat.diagonal()
 
-    # new volume dimensions (rounded)
-    D = NP.zeros(3, dtype=NP.int32);
-    D[0] = int(float(imageG['dim'][0])*Z[0]+0.5)
-    D[1] = int(float(imageG['dim'][1])*Z[1]+0.5)
-    D[2] = int(float(imageG['dim'][2])*Z[2]+0.5)
+    # new volume dimensions (rounded). D, imageS and Z are 3D and this is a vector element product
+    D = (imageS.shape * Z + 0.5).astype(np.int16)
 
-    M = NP.eye(4, dtype=NP.float64);
-    # for the test data, set the xyz voxel sizes for fMRI volume
-    M[0][0] = imageG['mat'][0][0]/Z[0]
-    M[1][1] = imageG['mat'][1][1]/Z[1]
-    M[2][2] = imageG['mat'][2][2]/Z[2]
+    # for the test data, set the xyz voxel sizes for fMRI volume. M is a 4x4 matrix.
+    M = np.diag(imageS_mat.diagonal() / Z)    
 
-    image = NP.zeros(D[2]*D[1]*D[0], dtype=NP.uint8).reshape(D[2], D[0], D[1])
+    image = np.zeros((D[2],D[1],D[0]),np.uint8)
+    
     mode  = 2
     scale = 0
-    R.register_volume_resample(imageG['data'], image, Z, scale, mode)
-    F = NP.zeros(3, dtype=NP.float64);
-    zoom_image = {'data' : image, 'mat' : M, 'dim' : D, 'fwhm' : F}
+    reg.register_volume_resample(imageS, image, Z, scale, mode)
 
-    return zoom_image
+    return image, M
 
-def remap_image(image, parm_vector, resample='linear'):
+def remap_image(image, M_inverse, resample='linear'):
     """
     remaped_image = remap_image(image, parm_vector, resample='linear')
 
@@ -103,51 +99,44 @@
 
     Parameters 
     ----------
-    image : {dictionary} 
-        image is the source image to be remapped. it is a dictionary with
-        the data as an ndarray in the ['data'] component.
+    image : {ndarray} 
+        image is the source image to be remapped. 
 
-    parm_vector : {ndarray}
-        parm_vector is the 6-dimensional vector (3 angles, 3 translations)
-        generated from the registration.
+    M_inverse : {ndarray}
+        M_inverse is the 4x4 inverse affine matrix 
 
     resample : {'linear', 'cubic'}, optional
 
 
     Returns 
     -------
-    remaped_image : {dictionary}
+    remaped_image : {ndarray}
 
     Examples
     --------
         image = fmri_series[i]
         x[0:6] = measures[i]['align_rotate'][0:6]
+	M = get_inverse_mappings(x)
         # overwrite the fMRI volume with the aligned volume
-        fmri_series[i] = remap_image(image, x, resample='cubic')
+        fmri_series[i] = remap_image(image, M, resample='cubic')
 
     """
 
-    #
-    # remap imageG to coordinates of imageF (creates imageG')
-    # use the 6 dim parm_vector (3 angles, 3 translations) to remap
-    #
-    M_inverse = get_inverse_mappings(parm_vector)
-    (layers, rows, cols) = image['data'].shape
     # allocate the zero image
-    remaped_image = NP.zeros(layers*rows*cols, dtype=NP.uint8).reshape(layers, rows, cols)
-    remaped_image = {'data' : remaped_image, 'mat' : image['mat'], 
-                     'dim' : image['dim'], 'fwhm' : image['fwhm']}
-    imdata = build_structs()
+    remaped_image = np.zeros(image.shape, dtype=np.uint8)
 
+    step = np.array([1, 1, 1], dtype=np.int32)
+
     if resample == 'linear':
         # trilinear interpolation mapping.
-        R.register_linear_resample(image['data'], remaped_image['data'], M_inverse, imdata['step'])
+        reg.register_linear_resample(image, remaped_image, M_inverse, step)
     elif resample == 'cubic':
         # tricubic convolve interpolation mapping. 
-        R.register_cubic_resample(image['data'], remaped_image['data'], M_inverse, imdata['step'])
+        reg.register_cubic_resample(image, remaped_image, M_inverse, step)
 
     return remaped_image
 
+
 def get_inverse_mappings(parm_vector):
     """
     M_inverse = get_inverse_mappings(parm_vector)
@@ -168,7 +157,7 @@
 
     >>> import numpy as NP
     >>> import _registration as reg
-    >>> array = NP.zeros(6, dtype=float)
+    >>> array = np.zeros(6, dtype=float)
     >>> M = reg.get_inverse_mappings(array)
     >>> M 
 
@@ -180,55 +169,53 @@
 
     """
     # get the inverse mapping to rotate the G matrix to F space following registration
-    imdata = build_structs()
-    # inverse angles and translations
-    imdata['parms'][0] = -parm_vector[0]
-    imdata['parms'][1] = -parm_vector[1]
-    imdata['parms'][2] = -parm_vector[2]
-    imdata['parms'][3] = -parm_vector[3]
-    imdata['parms'][4] = -parm_vector[4]
-    imdata['parms'][5] = -parm_vector[5]
-    M_inverse = build_rotate_matrix(imdata['parms'])
+    # -parm_vector is the inverse angles and translations
+    M_inverse = build_rotate_matrix(-parm_vector)
     return M_inverse
 
-def python_coreg(image1, image2, imdata, ftype=1, smimage=0, lite=0, smhist=0,
-                 method='nmi', opt_method='powell'):
+def register(image1, image1_mat, image2, image2_mat, multires=[4, 2], histo_fwhm=3, 
+             ftype=1, lite=0, smhist=0, method='nmi', opt_method='hybrid',
+	     optimize_function=None):
+
     """
-    parm_vector = python_coreg(image1, image2, imdata, ftype=1, smimage=0, lite=0,
-                               smhist=0, method='nmi', opt_method='powell'):
+    parm_vector = register(image1, image1_mat, image2, image2_mat, multires=[4, 2], histo_fwhm=3,
+                             ftype=1, lite=0, smhist=0, method='nmi', opt_method='powell'):
 
-    takes two images and the image data descriptor (imdata) and determines the optimal 
     alignment of the two images (measured by mutual information or cross correlation) 
     using optimization search of 3 angle and 3 translation parameters. The optimization 
     uses either the Powell or Conjugate Gradient methods in the scipy optimization 
-    package. The optimal parameter is returned.
+    package. The optimal rigid body parameter is returned.
 
     Parameters 
     ----------
-    image1 : {dictionary} 
+    image1 : {nd_array} 
         image1 is the source image to be remapped during the registration. 
-        it is a dictionary with the data as an ndarray in the ['data'] component.
-    image2 : {dictionary} 
+    image1_mat : {nd_array} 
+        image1_mat is the source image MAT 
+    image2 : {nd_array} 
         image2 is the reference image that image1 gets mapped to. 
-    imdata : {dictionary} 
-        image sampling and optimization information.
+    image2_mat : {nd_array} 
+        image2_mat is the source image MAT 
+    multires: {list}, optional
+        the volume subsample values for each pass of the registration.
+	the default is 2 passes with subsample 4 in pass 1 and subsample 2 in pass 2
+    histo_fwhm : {int}, optional
+        used for the filter kernel in the low pass filter of the joint histogram 
     ftype : {0, 1}, optional
         flag for type of low pass filter. 0 is Gauss-Spline
         1 is pure Gauss. Sigma determined from volume sampling info.
-    smimage : {0, 1}, optional
-        flag for volume 3D low pass filtering of image 2.
-        0 for no filter, 1 for do filter.
     lite : {0, 1}, optional
         lite of 1 is to jitter both images during resampling. 0
         is to not jitter. jittering is for non-aliased volumes.
     smhist: {0, 1}, optional
         flag for joint histogram low pass filtering. 0 for no filter,
         1 for do filter.
-    method: {'nmi', 'mi', 'ncc', 'ecc'}, optional
+    method: {'nmi', 'mi', 'ncc', 'ecc', 'mse'}, optional
         flag for type of registration metric. nmi is normalized mutual
         information; mi is mutual information; ecc is entropy cross
-        correlation; ncc is normalized cross correlation.
-    opt_method: {'powell', 'hybrid'}, optional
+        correlation; ncc is normalized cross correlation. mse is mean
+	squared error.
+    opt_method: {'powell', 'cg', 'hybrid'}, optional
         registration is two pass. Pass 1 is low res to get close to alignment
         and pass 2 starts at the pass 1 optimal alignment. In powell pass 1 and
         2 are powell, in hybrid pass 2 is conjugate gradient.
@@ -246,107 +233,169 @@
     >>> import numpy as NP
     >>> import _registration as reg
 
-    >>> image1, image2, imdata = reg.demo_MRI_volume_align()
-    >>> parm_vector = python_coreg(image1, image2, imdata)
+    >>> image1, image2, fwhm, improc = reg.demo_build_dual_volumes()
+    >>> parm_vector = register(image1, image2, fwhm, improc)
 
     """
-    start = time.time()
-    # smooth of the images
-    if smimage: 
-        image_F_xyz2 = filter_image_3D(image2['data'], image2['fwhm'], ftype)
-        image2['data'] = image_F_xyz2
-    parm_vector = multires_registration(image1, image2, imdata, lite, smhist, method, opt_method)
-    stop = time.time()
-    print 'Total Optimizer Time is ', (stop-start)
+
+    # do the parameter validity checking. this is specific to this 3D registration.
+    # make sure the image is 3D and the mats are 4x4 with nonzero diagonal
+
+    if image1.ndim != 3:
+        raise ValueError, "Image 1 is not 3 dimensional"
+
+    if image2.ndim != 3:
+        raise ValueError, "Image 2 is not 3 dimensional"
+
+    if image1.dtype != np.uint8:
+        raise ValueError, "Image 1 is not 8 bit (required for joint histogram)"
+
+    if image2.dtype != np.uint8:
+        raise ValueError, "Image 2 is not 8 bit (required for joint histogram)"
+
+    if image1_mat.shape != (4,4):
+        raise ValueError, "Image1 MAT is not 4x4"
+
+    if image2_mat.shape != (4,4):
+        raise ValueError, "Image2 MAT is not 4x4"
+
+    if (np.diag(image1_mat)).prod() == 0:
+        raise ValueError, "Image1 MAT has a 0 on the diagonal"
+
+    if (np.diag(image2_mat)).prod() == 0:
+        raise ValueError, "Image2 MAT has a 0 on the diagonal"
+
+    if opt_method=='hybrid' and np.size(multires) != 2:
+        raise ValueError, "hybrid method must be 2 pass registration"
+
+    if ftype != 0 and ftype != 1: 
+        raise ValueError, "choose filter type 0 or 1 only"
+
+    if lite != 0 and lite != 1: 
+        raise ValueError, "choose histogram generation type 0 or 1 only"
+
+    if smhist != 0 and smhist != 1: 
+        raise ValueError, "choose histogram smoothing type 0 or 1 only"
+
+    if method != 'nmi' and method != 'mi'  and method != 'ncc'\
+                       and method != 'ecc' and method != 'mse':
+        raise ValueError, "choose cost method nmi, mi, ecc, mse, ncc"
+
+    if opt_method != 'powell' and opt_method != 'cg'  and opt_method != 'hybrid':
+        raise ValueError, "only optimize methods powell, cg or hybrid are supported"
+
+    # default is to use the cost_function I provided.
+    # this shows you can override this but the parameters will have to
+    # be changed for the new cost function if it is different
+
+    if optimize_function == None:
+        optimize_function = cost_function
+
+    parm_vector = multires_registration(optimize_function, image1, image1_mat, image2, image2_mat,
+		                        multires, histo_fwhm, lite, smhist, method, opt_method)
+
     return parm_vector
 
-def multires_registration(image1, image2, imdata, lite, smhist, method, opt_method):
+def multires_registration(optimize_function, image1, image1_mat, image2, image2_mat,
+		          multires, histo_fwhm, lite, smhist, method, opt_method):
+
     """
-    x = multires_registration(image1, image2, imdata, lite, smhist, method, opt_method)
 
-    to be called by python_coreg() which optionally does 3D image filtering and 
-    provies timing for registration.
+    to be called by register() which does parameter validation 
 
     Parameters 
     ----------
-
-    image1 : {dictionary} 
+    image1 : {nd_array} 
         image1 is the source image to be remapped during the registration. 
-        it is a dictionary with the data as an ndarray in the ['data'] component.
-    image2 : {dictionary} 
+    image1_mat : {nd_array} 
+        image1_mat is the source image MAT 
+    image2 : {nd_array} 
         image2 is the reference image that image1 gets mapped to. 
-    imdata : {dictionary} 
-        image sampling and optimization information.
-    lite : {integer}
+    image2_mat : {nd_array} 
+        image2_mat is the source image MAT 
+    multires: {list}, optional
+        the volume subsample values for each pass of the registration.
+	the default is 2 passes with subsample 4 in pass 1 and subsample 2 in pass 2
+    histo_fwhm : {int}, optional
+        used for the filter kernel in the low pass filter of the joint histogram 
+    ftype : {0, 1}, optional
+        flag for type of low pass filter. 0 is Gauss-Spline
+        1 is pure Gauss. Sigma determined from volume sampling info.
+    lite : {0, 1}, optional
         lite of 1 is to jitter both images during resampling. 0
         is to not jitter. jittering is for non-aliased volumes.
-    smhist: {integer}
+    smhist: {0, 1}, optional
         flag for joint histogram low pass filtering. 0 for no filter,
         1 for do filter.
-    method: {'nmi', 'mi', 'ncc', 'ecc'}
+    method: {'nmi', 'mi', 'ncc', 'ecc', 'mse'}, optional
         flag for type of registration metric. nmi is normalized mutual
         information; mi is mutual information; ecc is entropy cross
-        correlation; ncc is normalized cross correlation.
-    opt_method: {'powell', 'hybrid'}
+        correlation; ncc is normalized cross correlation. mse is mean
+	squared error.
+    opt_method: {'powell', 'cg', 'hybrid'}, optional
         registration is two pass. Pass 1 is low res to get close to alignment
         and pass 2 starts at the pass 1 optimal alignment. In powell pass 1 and
         2 are powell, in hybrid pass 2 is conjugate gradient.
 
+
     Returns 
     -------
-    x : {nd_array}
+    parm_vector : {nd_array}
         this is the optimal alignment (6-dim) array with 3 angles and
         3 translations.
 
     Examples
     --------
 
-    (calling this from python_coreg which optionally filters image2)
+    (calling this from register which optionally filters image2)
     >>> import numpy as NP
     >>> import _registration as reg
-    >>> image1, image2, imdata = reg.demo_MRI_volume_align()
-    >>> parm_vector = python_coreg(image1, image2, imdata)
+    >>> image1, mat1, image2, mat2 = reg.demo_build_dual_volumes()
+    >>> parm_vector = register(image1, image2, imdata)
 
     """
     ret_histo=0
-    # zero out the start parameter; but this may be set to large values 
-    # if the head is out of range and well off the optimal alignment skirt
-    imdata['parms'][0:5] = 0.0
+    step = np.array([1, 1, 1], dtype=np.int32)
+    fwhm = np.zeros(2, dtype=np.int32)
     # make the step a scalar to can put in a multi-res loop
-    loop = range(imdata['sample'].size)
-    x = imdata['parms']
+    loop = range(np.size(multires))
+    # 6-D zero vector
+    x = np.zeros(6, dtype=np.float64);
+    # the kernel fwhm value for the x and y joint histogram filter
+    fwhm[:] = histo_fwhm
     for i in loop:
-        step = imdata['sample'][i]
-        imdata['step'][:] = step
-        optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist,
-                        method, ret_histo)
+	# this is the volume subsample
+	step[:] = multires[i]
+	# optfunc_args is specific to the cost_function in this file
+	# this will need to change if you use another optimize_function.
+        optfunc_args = (image1, image1_mat, image2, image2_mat, step, histo_fwhm,
+			lite, smhist, method, ret_histo)
         p_args = (optfunc_args,)
         if opt_method=='powell':
             print 'POWELL multi-res registration step size ', step
             print 'vector ', x
-            x = OPT.fmin_powell(optimize_function, x, args=p_args,
-                                callback=callback_powell) 
+            x = fmin_powell(optimize_function, x, args=p_args, callback=callback_powell)
         elif opt_method=='cg':
             print 'CG multi-res registration step size ', step
             print 'vector ', x
-            x = OPT.fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) 
+            x = fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) 
         elif opt_method=='hybrid':
             if i==0:
                 print 'Hybrid POWELL multi-res registration step size ', step
                 print 'vector ', x
                 lite = 0
-                optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist,
-                                method, ret_histo)
+                optfunc_args = (image1, image1_mat, image2, image2_mat, step, histo_fwhm,
+                                lite, smhist, method, ret_histo)
                 p_args = (optfunc_args,)
-                x = OPT.fmin_powell(optimize_function, x, args=p_args, callback=callback_powell) 
+                x = fmin_powell(optimize_function, x, args=p_args, callback=callback_powell) 
             elif i==1:
                 print 'Hybrid CG multi-res registration step size ', step
                 print 'vector ', x
                 lite = 1
-                optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, 
-                                smhist, method, ret_histo)
+                optfunc_args = (image1, image1_mat, image2, image2_mat, step, histo_fwhm,
+                                lite, smhist, method, ret_histo)
                 p_args = (optfunc_args,)
-                x = OPT.fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) 
+                x = fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) 
 
     return x
 
@@ -367,7 +416,7 @@
     print x
     return
 
-def smooth_kernel(fwhm, x, ktype=1):
+def smooth_kernel(fwhm, x, pixel_scale=8.0, ktype=1):
     """
     kernel = smooth_kernel(fwhm, x, ktype=1)
 
@@ -395,8 +444,8 @@
     >>> import _registration as reg
     >>> fwhm = 3
     >>> ftype = 2
-    >>> p = NP.ceil(2*fwhm).astype(int)
-    >>> x = NP.array(range(-p, p+1))
+    >>> p = np.ceil(2*fwhm).astype(int)
+    >>> x = np.array(range(-p, p+1))
     >>> kernel = reg.smooth_kernel(fwhm, x, ktype=ftype)
     >>> kernel
 
@@ -409,24 +458,26 @@
 
     """
     eps = 0.00001
-    s   = NP.square((fwhm/math.sqrt(8.0*math.log(2.0)))) + eps
+    s   = np.square((fwhm/math.sqrt(pixel_scale*math.log(2.0)))) + eps
     if ktype==1:
         # from SPM: Gauss kernel convolved with 1st degree B spline
         w1 = 0.5 * math.sqrt(2.0/s)
         w2 = -0.5 / s
         w3 = math.sqrt((s*math.pi) /2.0)
-        kernel = 0.5*(SP.erf(w1*(x+1))*(x+1)       + SP.erf(w1*(x-1))*(x-1)    - 2.0*SP.erf(w1*(x))*(x) + 
-                      w3*(NP.exp(w2*NP.square(x+1))) + NP.exp(w2*(NP.square(x-1))) - 2.0*NP.exp(w2*NP.square(x)))
+        kernel = 0.5*(erf(w1*(x+1))*(x+1) + erf(w1*(x-1))*(x-1)
+                      - 2.0*erf(w1*(x))*(x) + w3*(np.exp(w2*np.square(x+1))) 
+                      + np.exp(w2*(np.square(x-1)))
+                      - 2.0*np.exp(w2*np.square(x)))
         kernel[kernel<0] = 0
         kernel = kernel / kernel.sum()  
     else:
         # Gauss kernel 
-        kernel = (1.0/math.sqrt(2.0*math.pi*s)) * NP.exp(-NP.square(x)/(2.0*s)) 
+        kernel = (1.0/math.sqrt(2.0*math.pi*s)) * np.exp(-np.square(x)/(2.0*s)) 
         kernel = kernel / kernel.sum()  
 
     return kernel
 
-def filter_image_3D(imageRaw, fwhm, ftype=2):
+def filter_image_3D(imageRaw, fwhm, ftype=2, give_2D=0):
     """
     image_F_xyz = filter_image_3D(imageRaw, fwhm, ftype=2):
     does 3D separable digital filtering using scipy.ndimage.correlate1d
@@ -436,9 +487,9 @@
     imageRaw : {nd_array}
         the unfiltered 3D volume image
     fwhm : {int}
-        used for kernel width
+        used for kernel width. this is 3 elements (one for each dimension)
     ktype: {1, 2}, optional
-        kernel type. 1 is Gauss convoled with spline, 2 is Gauss
+        kernel type. 1 is Gauss convoled with spline (SPM), 2 is Gauss
 
     Returns 
     -------
@@ -449,31 +500,39 @@
     --------
 
     >>> import _registration as reg
-    >>> image1, image2, imdata = reg.demo_MRI_volume_align()
+    >>> image1, image2, imdata = reg.demo_build_dual_volumes()
     >>> ftype = 1
-    >>> image_Filter_xyz = filter_image_3D(image1['data'], image1['fwhm'], ftype)
+    >>> image_Filter_xyz = filter_image_3D(image, fwhm, ftype)
     >>> image1['data'] = image_Filter_xyz
     """
 
-    p = NP.ceil(2*fwhm[0]).astype(int)
-    x = NP.array(range(-p, p+1))
+    p = np.ceil(2*fwhm).astype(int)
+    x = np.array(range(-p[0], p[0]+1))
     kernel_x = smooth_kernel(fwhm[0], x, ktype=ftype)
-    p = NP.ceil(2*fwhm[1]).astype(int)
-    x = NP.array(range(-p, p+1))
+
+    x = np.array(range(-p[1], p[1]+1))
     kernel_y = smooth_kernel(fwhm[1], x, ktype=ftype)
-    p = NP.ceil(2*fwhm[2]).astype(int)
-    x = NP.array(range(-p, p+1))
+
+    x = np.array(range(-p[2], p[2]+1))
     kernel_z = smooth_kernel(fwhm[2], x, ktype=ftype)
+
     output=None
-    # 3D filter in 3 1D separable stages
+    # 3D filter in 3 1D separable stages. keep the image
+    # names at each stage separate in case you need them
+    # for example may need an image that is 2D slice filtered only
     axis = 0
-    image_F_x   = NDI.correlate1d(imageRaw,   kernel_x, axis, output)
+    image_F_x   = correlate1d(imageRaw,   kernel_x, axis, output)
     axis = 1
-    image_F_xy  = NDI.correlate1d(image_F_x,  kernel_y, axis, output)
+    image_F_xy  = correlate1d(image_F_x,  kernel_y, axis, output)
     axis = 2
-    image_F_xyz = NDI.correlate1d(image_F_xy, kernel_z, axis, output)
-    return image_F_xyz  
+    image_F_xyz = correlate1d(image_F_xy, kernel_z, axis, output)
 
+    if give_2D==0:
+        return image_F_xyz  
+    else:
+        return image_F_xyz, image_F_xy
+
+
 def build_fwhm(M, S):
     """
     fwhm = build_fwhm(M, S)
@@ -500,28 +559,28 @@
     >>> import _registration as reg
     >>> anat_desc = reg.load_anatMRI_desc()
     >>> image1 = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img')
-    >>> imdata = reg.build_structs()
     >>> image1['fwhm'] = reg.build_fwhm(image1['mat'], imdata['step'])
 
     """
-    view_3x3 = NP.square(M[0:3, 0:3])
-    # sum the elements inn the first row
-    vxg = NP.sqrt(view_3x3.sum(axis=0))
-    # assumes that sampling is the same for xyz
-    size = NP.array([1,1,1])*S[0]
-    x = NP.square(size) - NP.square(vxg)
+    # M contains the voxel to physical mapping
+    view_3x3 = np.square(M[0:3, 0:3])
+    # sum the elements in the first row
+    vxg = np.sqrt(view_3x3.sum(axis=1))
+    # assumes that voxel sampling is the same for xyz as S is the step
+    size = np.array([1,1,1])*S[0]
+    x = np.square(size) - np.square(vxg)
     # clip
     x[x<0] = 0
-    fwhm = NP.sqrt(x) / vxg
+    fwhm = np.sqrt(x) / vxg
     # pathology when stepsize = 1 for MAT equal to the identity matrix
     fwhm[fwhm==0] = 1
     # return the 3D Gaussian kernel width (xyz)
     return fwhm 
 
-def optimize_function(x, optfunc_args):
+def cost_function(x, optfunc_args):
     """
-    cost = optimize_function(x, optfunc_args)    --- OR ---
-    cost, joint_histogram = optimize_function(x, optfunc_args)   
+    cost = cost_function(x, optfunc_args)    --- OR ---
+    cost, joint_histogram = cost_function(x, optfunc_args)   
 
     computes the alignment between 2 volumes using cross correlation or mutual
     information metrics. In both the 8 bit joint histogram of the 2 images is
@@ -588,7 +647,6 @@
     >>> anat_desc = reg.load_anatMRI_desc()
     >>> image1 = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img')
     >>> image2 = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img')
-    >>> imdata = reg.build_structs()
     >>> image1['fwhm'] = reg.build_fwhm(image1['mat'], imdata['step'])
     >>> image2['fwhm'] = reg.build_fwhm(image2['mat'], imdata['step'])
     >>> method = 'ncc'
@@ -596,46 +654,47 @@
     >>> smhist = 0
     >>> ret_histo = 1
     >>> optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo)
-    >>> x = NP.zeros(6, dtype=NP.float64)
-    >>> return cost, joint_histogram = reg.optimize_function(x, optfunc_args)
+    >>> x = np.zeros(6, dtype=np.float64)
+    >>> return cost, joint_histogram = reg.cost_function(x, optfunc_args)
 
 
     """
 
     image_F       = optfunc_args[0]
-    image_G       = optfunc_args[1]
-    sample_vector = optfunc_args[2]
-    fwhm          = optfunc_args[3]
-    do_lite       = optfunc_args[4]
-    smooth        = optfunc_args[5]
-    method        = optfunc_args[6]
-    ret_histo     = optfunc_args[7]
+    image_F_mat   = optfunc_args[1]
+    image_G       = optfunc_args[2]
+    image_G_mat   = optfunc_args[3]
+    sample_vector = optfunc_args[4]
+    fwhm          = optfunc_args[5]
+    do_lite       = optfunc_args[6]
+    smooth        = optfunc_args[7]
+    method        = optfunc_args[8]
+    ret_histo     = optfunc_args[9]
 
     rot_matrix = build_rotate_matrix(x)
     cost = 0.0
     epsilon = 2.2e-16 
     # image_G is base image
     # image_F is the to-be-rotated image
-    # rot_matrix is the 4x4 constructed (current angles and translates) transform matrix
+    # rot_matrix is the 4x4 constructed (rigid body) transform matrix
     # sample_vector is the subsample vector for x-y-z
 
-    F_inv = NP.linalg.inv(image_F['mat'])
-    composite = NP.dot(F_inv, image_G['mat'])
-    composite = NP.dot(composite, rot_matrix)
+    F_inv = np.linalg.inv(image_F_mat)
+    composite = np.dot(F_inv, image_G_mat)
+    composite = np.dot(composite, rot_matrix)
 
     if method == 'mse':
         #
         # mean squard error method
         #
 
-        (layers, rows, cols) = image_F['data'].shape
         # allocate the zero image
-        remap_image_F = NP.zeros(layers*rows*cols, dtype=NP.uint8).reshape(layers, rows, cols)
-        imdata = build_structs()
+        #(layers, rows, cols) = image_F.shape
+        remap_image_F = np.zeros(image_F.shape, dtype=np.uint8)
         # trilinear interpolation mapping.
-        R.register_linear_resample(image_F['data'], remap_image_F, composite,
-                                   imdata['step'])
-        cost = (NP.square(image_G['data']-remap_image_F)).mean()
+        reg.register_linear_resample(image_F, remap_image_F, composite, sample_vector)
+        cost = (np.square(image_G-remap_image_F)).mean()
+	# cost is min when G and F are aligned so keep cost positive
 
         return cost
 
@@ -645,29 +704,25 @@
         #
 
         # allocate memory for 2D histogram
-        joint_histogram = NP.zeros([256, 256], dtype=NP.float64);
+        joint_histogram = np.zeros([256, 256], dtype=np.float64)
 
         if do_lite: 
-            R.register_histogram_lite(image_F['data'], image_G['data'], composite,
-                                      sample_vector, joint_histogram)
+            reg.register_histogram_lite(image_F, image_G, composite, sample_vector, joint_histogram)
         else:
-            R.register_histogram(image_F['data'], image_G['data'], composite,
-                                 sample_vector, joint_histogram)
+            reg.register_histogram(image_F, image_G, composite, sample_vector, joint_histogram)
 
         # smooth the histogram
         if smooth: 
-            p = NP.ceil(2*fwhm[0]).astype(int)
-            x = NP.array(range(-p, p+1))
-            kernel1 = smooth_kernel(fwhm[0], x)
-            p = NP.ceil(2*fwhm[1]).astype(int)
-            x = NP.array(range(-p, p+1))
-            kernel2 = smooth_kernel(fwhm[1], x)
+            p = np.ceil(2*fwhm).astype(int)
+            x = np.array(range(-p, p+1))
+            hkernel = smooth_kernel(fwhm, x)
             output=None
-            # 2D filter in 1D separable stages
+            # 2D filter in 1D separable stages using the same kernel. SPM
+	    # has options for a 2D fwhm kernel yet only uses 1 element
             axis = 0
-            result = NDI.correlate1d(joint_histogram, kernel1, axis, output)
+            joint_histogram = correlate1d(joint_histogram, hkernel, axis, output)
             axis = 1
-            joint_histogram = NDI.correlate1d(result, kernel1, axis, output)
+            joint_histogram = correlate1d(joint_histogram, hkernel, axis, output)
 
         joint_histogram += epsilon # prevent log(0) 
         # normalize the joint histogram
@@ -678,44 +733,44 @@
 
         if method == 'mi':
             # mutual information
-            marginal_outer = NP.outer(marginal_col, marginal_row)
-            H = joint_histogram * NP.log(joint_histogram / marginal_outer)  
+            marginal_outer = np.outer(marginal_col, marginal_row)
+            H = joint_histogram * np.log(joint_histogram / marginal_outer)  
             mutual_information = H.sum()
             cost = -mutual_information
 
         elif method == 'ecc':
             # entropy correlation coefficient 
-            marginal_outer = NP.outer(marginal_col, marginal_row)
-            H = joint_histogram * NP.log(joint_histogram / marginal_outer)  
+            marginal_outer = np.outer(marginal_col, marginal_row)
+            H = joint_histogram * np.log(joint_histogram / marginal_outer)  
             mutual_information = H.sum()
-            row_entropy = marginal_row * NP.log(marginal_row)
-            col_entropy = marginal_col * NP.log(marginal_col)
+            row_entropy = marginal_row * np.log(marginal_row)
+            col_entropy = marginal_col * np.log(marginal_col)
             ecc  = -2.0*mutual_information/(row_entropy.sum() + col_entropy.sum())
             cost = -ecc
 
         elif method == 'nmi':
             # normalized mutual information
-            row_entropy = marginal_row * NP.log(marginal_row)
-            col_entropy = marginal_col * NP.log(marginal_col)
-            H = joint_histogram * NP.log(joint_histogram)  
-            nmi = (row_entropy.sum() + col_entropy.sum()) / (H.sum())
+            row_entropy = marginal_row * np.log(marginal_row)
+            col_entropy = marginal_col * np.log(marginal_col)
+            H = joint_histogram * np.log(joint_histogram)  
+            nmi  = (row_entropy.sum() + col_entropy.sum()) / (H.sum())
             cost = -nmi
 
         elif method == 'ncc':
             # cross correlation from the joint histogram 
             r, c = joint_histogram.shape
-            i = NP.array(range(1,c+1))
-            j = NP.array(range(1,r+1))
+            i = np.array(range(1,c+1))
+            j = np.array(range(1,r+1))
             m1 = (marginal_row * i).sum()
             m2 = (marginal_col * j).sum()
-            sig1 = NP.sqrt((marginal_row*(NP.square(i-m1))).sum())
-            sig2 = NP.sqrt((marginal_col*(NP.square(j-m2))).sum())
-            [a, b] = NP.mgrid[1:c+1, 1:r+1]
+            sig1 = np.sqrt((marginal_row*(np.square(i-m1))).sum())
+            sig2 = np.sqrt((marginal_col*(np.square(j-m2))).sum())
+            [a, b] = np.mgrid[1:c+1, 1:r+1]
             a = a - m1
             b = b - m2
             # element multiplies in the joint histogram and grids
             H = ((joint_histogram * a) * b).sum()
-            ncc = H / (NP.dot(sig1, sig2)) 
+            ncc  = H / (np.dot(sig1, sig2)) 
             cost = -ncc
 
         if ret_histo:
@@ -724,64 +779,6 @@
             return cost
 
 
-def build_structs(step=1):
-    """
-    img_data = build_structs(step=1)
-
-    builds the image data (imdata) dictionary for later use as parameter
-    storage in the co-registration.
-
-    Parameters 
-    ----------
-    step : {int} : optional
-    default is 1 and is the sample increment in voxels. This sets the sample
-    for x,y,z and is the same value in all 3 axes. only change the default for debug.
-
-    Returns 
-    -------
-    img_data : {dictionary}
-
-    Examples
-    --------
-
-    >>> import numpy as NP
-    >>> import _registration as reg
-    >>> imdata = reg.build_structs()
-
-    """
-
-    # build image data structures here
-    P = NP.zeros(6, dtype=NP.float64);
-    T = NP.zeros(6, dtype=NP.float64);
-    F = NP.zeros(2, dtype=NP.int32);
-    S = NP.ones(3,  dtype=NP.int32);
-    sample = NP.zeros(2, dtype=NP.int32);
-    S[0] = step
-    S[1] = step
-    S[2] = step
-    # image/histogram smoothing
-    F[0] = 3
-    F[1] = 3
-    # subsample for multiresolution registration
-    sample[0] = 4
-    sample[1] = 2
-    # tolerances for angle (0-2) and translation (3-5)
-    T[0] = 0.02 
-    T[1] = 0.02 
-    T[2] = 0.02 
-    T[3] = 0.001 
-    T[4] = 0.001 
-    T[5] = 0.001 
-    # P[0] = alpha <=> pitch. + alpha is moving back in the sagittal plane
-    # P[1] = beta  <=> roll.  + beta  is moving right in the coronal plane
-    # P[2] = gamma <=> yaw.   + gamma is right turn in the transverse plane
-    # P[3] = Tx
-    # P[4] = Ty
-    # P[5] = Tz
-    img_data = {'parms' : P, 'step' : S, 'fwhm' : F, 'tol' : T, 'sample' : sample}
-    return img_data
-
-
 def build_rotate_matrix(img_data_parms):
     """
     rot_matrix = reg.build_rotate_matrix(img_data_parms)
@@ -803,8 +800,7 @@
 
     >>> import numpy as NP
     >>> import _registration as reg
-    >>> imdata = reg.build_structs()
-    >>> x = NP.zeros(6, dtype=NP.float64)
+    >>> x = np.zeros(6, dtype=np.float64)
     >>> M = reg.build_rotate_matrix(x)
     >>> M 
     array([[ 1.,  0.,  0.,  0.],
@@ -815,10 +811,10 @@
 
     """
 
-    R1 = NP.zeros([4,4], dtype=NP.float64);
-    R2 = NP.zeros([4,4], dtype=NP.float64);
-    R3 = NP.zeros([4,4], dtype=NP.float64);
-    T  = NP.eye(4, dtype=NP.float64);
+    R1 = np.zeros([4,4], dtype=np.float64);
+    R2 = np.zeros([4,4], dtype=np.float64);
+    R3 = np.zeros([4,4], dtype=np.float64);
+    T  = np.eye(4, dtype=np.float64);
 
     alpha = math.radians(img_data_parms[0])
     beta  = math.radians(img_data_parms[1])
@@ -853,404 +849,196 @@
     T[1][3] = img_data_parms[4]
     T[2][3] = img_data_parms[5]
 
-    rot_matrix = NP.dot(T, R1);
-    rot_matrix = NP.dot(rot_matrix, R2);
-    rot_matrix = NP.dot(rot_matrix, R3);
+    rot_matrix = np.dot(T, R1);
+    rot_matrix = np.dot(rot_matrix, R2);
+    rot_matrix = np.dot(rot_matrix, R3);
 
     return rot_matrix
 
 
-def load_volume(imagedesc, imagename=None, threshold=0.999, debug=0):
+def build_gauss_volume(imagedesc, S=[1500.0, 2500.0, 1000.0]):
 
     """
-    image = load_volume(imagedesc, imagename=None, threshold=0.999, debug=0)  --- OR ---
-    image, h, ih, index = load_volume(imagedesc, imagename=None, threshold=0.999, debug=0)
+    build a 3D Gaussian volume. user passes in image dims in imagedesc
+    the sigma for each axis is S[3] where 0=z, 1=y, 2=x
 
-    gets an image descriptor and optional filename and returns a scaled 8 bit volume. The
-    scaling is designed to make full use of the 8 bits (ignoring high amplitude outliers).
-    The current method uses numpy fromfile and will be replaced by neuroimage nifti load.
+    volume3D = build_test_volume(imagedesc, S)
 
     Parameters 
     ----------
-    imagedesc : {dictionary} 
-        imagedesc is the descriptor of the image to be read. 
+    imagedesc : {dictionary}
+        volume dimensions and sampling
 
-    imagename : {string} : optional
-        name of image file. No name creates a blank image that is used for creating
-        a rotated test image or image rescaling.
+    S : {tuple}
+        the Gaussian sigma for Z, Y and X
 
-    threshold : {float} : optional
-        this is the threshold for upper cutoff in the 8 bit scaling. The volume histogram
-        and integrated histogram is computed and the upper amplitude cutoff is where the 
-        integrated histogram crosses the value set in the threshold. setting threshold to
-        1.0 means the scaling is done over the min to max amplitude range.
-
-    debug : {0, 1} : optional
-        when debug=1 the method returns the volume histogram, integrated histogram and the 
-        amplitude index where the provided threshold occured.
-
     Returns 
     -------
-    image : {dictionary}
-        the volume data assoicated with the filename or a blank volume of the same
-        dimensions as specified in imagedesc.
 
-    --- OR --- (if debug = 1)
+    volume3D : {nd_array}
+        the 3D volume for testing
 
-    image : {dictionary}
-        the volume data assoicated with the filename or a blank volume of the same
-        dimensions as specified in imagedesc.
+    """
+    layers = imagedesc['layers']
+    rows   = imagedesc['rows']
+    cols   = imagedesc['cols']
 
-    h : {nd_array}
-        the volume 1D amplitude histogram
+    L = layers/2
+    R = rows/2
+    C = cols/2
 
-    ih : {nd_array}
-        the volume 1D amplitude integrated histogram
+    # build coordinates for 3D Gaussian volume
+    # coordinates are centered at (0, 0, 0)
+    [a, b, c] = np.mgrid[-L:L, -R:R, -C:C]
 
-    index : {int}
-        the amplitude (histogram index) where the integrated histogram
-        crosses the 'threshold' provided.
+    sigma    = np.array([S[0], S[1], S[2]])
+    aa       = (np.square(a))/sigma[0]
+    bb       = (np.square(b))/sigma[1]
+    cc       = (np.square(c))/sigma[2]
+    volume3D = (255.0*np.exp(-(aa + bb + cc))).astype(np.uint8)
 
-    Examples
-    --------
+    return volume3D
 
-    >>> import numpy as NP
-    >>> import _registration as reg
-    >>> anat_desc = reg.load_anatMRI_desc()
-    >>> image_anat, h, ih, index = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img', debug=1)
-    >>> index
-    210
 
+def scale_image(image, max_amp=255, image_type=np.uint8, threshold=0.999, fetch_ih=0):
 
     """
+    scale and threshold clip the volume using the integrated histogram
+    to set the high threshold
 
-    # load MRI or fMRI volume and return an autoscaled 8 bit image.
-    # autoscale is using integrated histogram to deal with outlier high amplitude voxels
-    if imagename == None:
-        # imagename of none means to create a blank image
-        ImageVolume = NP.zeros(imagedesc['layers']*imagedesc['rows']*imagedesc['cols'],
-                        dtype=NP.uint16).reshape(imagedesc['layers'], imagedesc['rows'], imagedesc['cols'])
-    else:
-        ImageVolume = NP.fromfile(imagename,
-                        dtype=NP.uint16).reshape(imagedesc['layers'], imagedesc['rows'], imagedesc['cols']);
+    Parameters 
+    ----------
+    image : {nd_array}
+        raw unscaled volume
 
-    # the mat (voxel to physical) matrix
-    M = NP.eye(4, dtype=NP.float64);
-    # for now just the sample size (mm units) in x, y and z
-    M[0][0] = imagedesc['sample_x']
-    M[1][1] = imagedesc['sample_y']
-    M[2][2] = imagedesc['sample_z']
-    # dimensions 
-    D = NP.zeros(3, dtype=NP.int32);
-    # Gaussian kernel - fill in with build_fwhm() 
-    F = NP.zeros(3, dtype=NP.float64);
-    D[0] = imagedesc['rows']
-    D[1] = imagedesc['cols']
-    D[2] = imagedesc['layers']
+    max_amp : int (default 255)
+        the maximum value of the scaled image
 
-    if imagename == None:
-        # no voxels to scale to 8 bits
-        ImageVolume = ImageVolume.astype(NP.uint8)
-        image = {'data' : ImageVolume, 'mat' : M, 'dim' : D, 'fwhm' : F}
-        return image
+    image_type : nd_array dtype (default uint8)
+        the type of the volume to return.
 
-    # 8 bit scale with threshold clip of the volume integrated histogram
-    max = ImageVolume.max()
-    min = ImageVolume.min()
-    ih  = NP.zeros(max-min+1, dtype=NP.float64);
-    h   = NP.zeros(max-min+1, dtype=NP.float64);
-    if threshold <= 0:
-        threshold = 0.999
-    elif threshold > 1.0:
-        threshold = 1.0
-    # get the integrated histogram of the volume and get max from 
-    # the threshold crossing in the integrated histogram 
-    index  = R.register_image_threshold(ImageVolume, h, ih, threshold)
-    scale  = 255.0 / (index-min)
-    # generate the scaled 8 bit image
-    images = (scale*(ImageVolume.astype(NP.float)-min))
-    images[images>255] = 255 
-    image = {'data' : images.astype(NP.uint8), 'mat' : M, 'dim' : D, 'fwhm' : F}
-    if debug == 1:
-        return image, h, ih, index
-    else:
-        return image
+    threshold : float (default 0.999)
+        the value of the normalized integrated histogram
+	that when reached sets the high threshold index
 
+    Returns 
+    -------
+    image : {nd_array}
+        the scaled volume
+    ih : {nd_array}
+        the integrated histogram. can be used for image display 
+	purpose (histogram equalization)
 
+    """
 
-#
-#  ---- demo/debug routines  ---- 
-#
+    max = image.max()
+    min = image.min()
+    if max == 0 and min == 0:
+        raise ValueError, "Zero image. cannot be scaled"
 
-def load_anatMRI_desc():
-    # this is for demo on the test MRI and fMRI volumes
-    rows   = 256
-    cols   = 256
-    layers = 90
-    xsamp  = 0.9375
-    ysamp  = 0.9375
-    zsamp  = 1.5
-    desc = {'rows' : rows, 'cols' : cols, 'layers' : layers, 
-            'sample_x' : xsamp, 'sample_y' : ysamp, 'sample_z' : zsamp}
-    return desc
+    # need range of pixels for the number of bins
+    h, edges = np.histogram(image, bins=(max-min))
+    ih = (np.cumsum(h)).astype(np.float64)
+    # normalize the integrated histogram
+    ih = ih / ih.max()
+    indices = np.where(ih >= threshold)
+    # wind up getting all the indices where the ih >= threshold
+    # and only need the first index. tuple has one nd_array and
+    # get the 0 element from it ([0][0])
+    index   = indices[0][0]
+    scale   = float(max_amp) / (index-min)
+    image   = (scale*(image.astype(np.float)-min))
+    image[image>max_amp] = max_amp
+    # down type. usually will go from float to 8 bit (needed for the 8 bit joint histogram)
+    image = image.astype(image_type)
 
-def load_fMRI_desc():
-    # this is for demo on the test MRI and fMRI volumes
-    rows   = 64
-    cols   = 64
-    layers = 28
-    xsamp  = 3.75
-    ysamp  = 3.75
-    zsamp  = 5.0
-    desc = {'rows' : rows, 'cols' : cols, 'layers' : layers, 
-            'sample_x' : xsamp, 'sample_y' : ysamp, 'sample_z' : zsamp}
-    return desc
-
-def read_fMRI_directory(path):
-    files_fMRI = glob.glob(path)
-    return files_fMRI
-
-
-def check_alignment(image1, image2, imdata, method='ncc', lite=0, smhist=0, 
-                    alpha=0.0, beta=0.0, gamma=0.0, Tx=0, Ty=0, Tz=0, ret_histo=0):
-                    
-    #
-    # to test the cost function and view the joint histogram
-    # for 2 images. used for debug
-    #
-    imdata['parms'][0] = alpha
-    imdata['parms'][1] = beta
-    imdata['parms'][2] = gamma
-    imdata['parms'][3] = Tx
-    imdata['parms'][4] = Ty
-    imdata['parms'][5] = Tz
-    M = build_rotate_matrix(imdata['parms'])
-    optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo)
-
-    if ret_histo:
-        cost, joint_histogram = optimize_function(imdata['parms'], optfunc_args)
-        return cost, joint_histogram 
+    if fetch_ih == 1:
+        return image, ih
     else:
-        cost = optimize_function(imdata['parms'], optfunc_args)
-        return cost
+        return image
 
-def build_scale_image(image, scale):
-    #
-    # rescale the 'mat' (voxel to physical mapping matrix) 
-    #
-    (layers, rows, cols) = image['data'].shape
-    M = image['mat'] * scale
-    # dimensions 
-    D = NP.zeros(3, dtype=NP.int32);
-    # Gaussian kernel - fill in with build_fwhm() 
-    F = NP.zeros(3, dtype=NP.float64);
-    Z = NP.zeros(3, dtype=NP.float64);
-    D[0] = rows/scale
-    D[1] = cols/scale
-    D[2] = layers/scale
-    image2 = NP.zeros(D[2]*D[1]*D[0], dtype=NP.uint8).reshape(D[2], D[0], D[1]);
-    mode = 1;
-    R.register_volume_resample(image['data'], image2, Z, scale, mode)
-    scaled_image = {'data' : image2, 'mat' : M, 'dim' : D, 'fwhm' : F}
-    return scaled_image
 
-
-def demo_MRI_volume_align(scale=2, alpha=3.0, beta=4.0, gamma=5.0, Tx = 0.0, Ty = 0.0, Tz = 0.0):
+def check_alignment(image1, image1_mat, image2, image2_mat, histo_fwhm=3, method='ncc', lite=0,
+                    smhist=0, alpha=0.0, beta=0.0, gamma=0.0, Tx=0, Ty=0, Tz=0, ret_histo=0):
+                    
     """
-    demo with (must have file ANAT1_V0001.img)
+    test the cost function and (optional) view the joint histogram. can be used
+    during intra-modal registration to measure the current alignment (return
+    the cross correlation). would measure before and after registration
 
-    image1, image2, imdata = reg.demo_MRI_volume_align()
-    x = reg.python_coreg(image1, image2, imdata, method='ncc', lite=1) 
-    image2r = reg.remap_image(image2, x, resample='cubic')
-    image2rz = reg.resize_image(image2r, image1['mat'])
 
 
-    slice1 = image1['data'][45, :, :]
-    slice2 = image2['data'][45/2, :, :]
-    slice2r = image2r['data'][45/2, :, :]
-    slice2rz = image2rz['data'][45, :, :]
-
-    pylab.figure(1)
-    pylab.bone()
-    pylab.imshow(slice1)
-    pylab.imshow(slice1)
-    pylab.figure(2)
-    pylab.imshow(slice2)
-    pylab.figure(3)
-    pylab.imshow(slice2r)
-    pylab.figure(4)
-    pylab.imshow(slice2rz)
-    pylab.show()
-
     """
-    #
-    # this is for coreg MRI / fMRI scale test. The volume is anatomical MRI.
-    # the image is rotated in 3D. after rotation the image is scaled.  
-    #
 
-    anat_desc = load_anatMRI_desc()
-    image1 = load_volume(anat_desc, imagename='ANAT1_V0001.img')
-    image2 = load_volume(anat_desc, imagename=None)
-    imdata = build_structs()
-    image1['fwhm'] = build_fwhm(image1['mat'], imdata['step'])
-    image2['fwhm'] = build_fwhm(image2['mat'], imdata['step'])
-    imdata['parms'][0] = alpha
-    imdata['parms'][1] = beta
-    imdata['parms'][2] = gamma
-    imdata['parms'][3] = Tx
-    imdata['parms'][4] = Ty
-    imdata['parms'][5] = Tz
-    M = build_rotate_matrix(imdata['parms'])
-    # rotate volume. linear interpolation means the volume is low pass filtered
-    R.register_linear_resample(image1['data'], image2['data'], M, imdata['step'])
-    # subsample volume
-    image3 = build_scale_image(image2, scale)
-    return image1, image3, imdata
+    # do the parameter validity checking. this is specific to this 3D registration.
+    # make sure the image is 3D and the mats are 4x4 with nonzero diagonal
 
-def demo_rotate_fMRI_volume(fMRIVol, x): 
-    #
-    # return rotated fMRIVol. the fMRIVol is already loaded, and gets rotated
-    #
+    if image1.ndim != 3:
+        raise ValueError, "Image 1 is not 3 dimensional"
 
-    desc = load_fMRI_desc()
-    image = load_volume(desc, imagename=None)
-    imdata = build_structs()
-    image['fwhm'] = build_fwhm(image['mat'], imdata['step'])
-    imdata['parms'][0] = x[0]  # alpha
-    imdata['parms'][1] = x[1]  # beta
-    imdata['parms'][2] = x[2]  # gamma
-    imdata['parms'][3] = x[3]  # Tx
-    imdata['parms'][4] = x[4]  # Ty
-    imdata['parms'][5] = x[5]  # Tz
-    M = build_rotate_matrix(imdata['parms'])
-    # rotate volume. cubic spline interpolation means the volume is NOT low pass filtered
-    R.register_cubic_resample(fMRIVol['data'], image['data'], M, imdata['step'])
-    return image
+    if image2.ndim != 3:
+        raise ValueError, "Image 2 is not 3 dimensional"
 
-def demo_MRI_coregistration(optimizer_method='powell', histo_method=1, smooth_histo=0, smooth_image=0, ftype=1):
-    """
-    demo with (must have file ANAT1_V0001.img and fMRI directory fMRIData)
+    if image1.dtype != np.uint8:
+        raise ValueError, "Image 1 is not 8 bit (required for joint histogram)"
 
-    measures, imageF_anat, fmri_series = reg.demo_MRI_coregistration()
+    if image2.dtype != np.uint8:
+        raise ValueError, "Image 2 is not 8 bit (required for joint histogram)"
 
-    show results with
+    if image1_mat.shape != (4,4):
+        raise ValueError, "Image1 MAT is not 4x4"
 
-    In [59]: measures[25]['cost']
-    Out[59]: -0.48607185
+    if image2_mat.shape != (4,4):
+        raise ValueError, "Image2 MAT is not 4x4"
 
-    In [60]: measures[25]['align_cost']
-    Out[60]: -0.99514639
+    if (np.diag(image1_mat)).prod() == 0:
+        raise ValueError, "Image1 MAT has a 0 on the diagonal"
 
-    In [61]: measures[25]['align_rotate']
-    Out[61]:
-    array([ 1.94480181,  5.64703989,  5.35002136, -5.00544405, -2.2712214, -1.42249691], dtype=float32)
+    if (np.diag(image2_mat)).prod() == 0:
+        raise ValueError, "Image2 MAT has a 0 on the diagonal"
 
-    In [62]: measures[25]['rotate']
-    Out[62]:
-    array([ 1.36566341,  4.70644331,  4.68198586, -4.32256889, -2.47607017, -2.39173937], dtype=float32)
+    if method != 'nmi' and method != 'mi'  and method != 'ncc'\
+                       and method != 'ecc' and method != 'mse':
+        raise ValueError, "choose cost method nmi, mi, ecc, mse, ncc"
 
+    P    = np.zeros(6, dtype=np.float64);
+    P[0] = alpha
+    P[1] = beta
+    P[2] = gamma
+    P[3] = Tx
+    P[4] = Ty
+    P[5] = Tz
 
-    """
+    step = np.array([1, 1, 1], dtype=np.int32)
+    optfunc_args = (image1, image1_mat, image2, image2_mat, step, histo_fwhm, lite,
+		    smhist, method, ret_histo)
+			
+    if ret_histo:
+        cost, joint_histogram = cost_function(P, optfunc_args)
+        return cost, joint_histogram 
+    else:
+        cost = cost_function(P, optfunc_args)
+        return cost
 
-    # demo of alignment of fMRI series with anatomical MRI
-    # in this demo, each fMRI volume is first perturbed (rotated, translated) 
-    # by a random value. The initial registration is measured, then the optimal
-    # alignment is computed and the registration measure made following the volume remap.
-    # The fMRI registration is done with the first fMRI volume using normalized cross-correlation.
-    # Each fMRI volume is rotated to the fMRI-0 volume and the series is ensemble averaged.
-    # The ensemble averaged is then registered with the anatomical MRI volume using normalized mutual information.
-    # The fMRI series is then rotated with this parameter. The alignments are done with 3D cubic splines.
 
-    # read the anatomical MRI volume
-    anat_desc = load_anatMRI_desc()
-    imageF_anat = load_volume(anat_desc, imagename='ANAT1_V0001.img')
-    # the sampling structure
-    imdata = build_structs()
-    # the volume filter
-    imageF_anat['fwhm'] = build_fwhm(imageF_anat['mat'], imdata['step'])
 
-    # read in the file list of the fMRI data
-    metric_test = NP.dtype([('cost', 'f'),
-                           ('align_cost', 'f'),
-                           ('rotate', 'f', 6),
-                           ('align_rotate', 'f', 6)])
+def build_scale_volume(image, mat, scale):
+    #
+    # rescale the 'mat' (voxel to physical mapping matrix) 
+    #
+    M = mat * scale
+    (layers, rows, cols) = image.shape
+    # dimensions 
+    D = np.zeros(3, dtype=np.int32);
+    Z = np.zeros(3, dtype=np.float64);
+    D[0] = rows/scale
+    D[1] = cols/scale
+    D[2] = layers/scale
+    image2 = np.zeros([D[2], D[0], D[1]], dtype=np.uint8)
+    mode = 1;
+    reg.register_volume_resample(image, image2, Z, scale, mode)
+    return image2, M
 
-    fMRIdata = read_fMRI_directory('fMRIData\*.img')
-    fmri_desc = load_fMRI_desc()
-    fmri_series = {}
-    ave_fMRI_volume = NP.zeros(fmri_desc['layers']*fmri_desc['rows']*fmri_desc['cols'],
-                      dtype=NP.float64).reshape(fmri_desc['layers'], fmri_desc['rows'], fmri_desc['cols'])
-    count = 0
-    number_volumes = len(fMRIdata)
-    measures = NP.zeros(number_volumes, dtype=metric_test)
-    # load and perturb (rotation, translation) the fMRI volumes
-    for i in fMRIdata:
-        image = load_volume(fmri_desc, i)
-        # random perturbation of angle, translation for each volume beyond the first
-        if count == 0:
-            image['fwhm'] = build_fwhm(image['mat'], imdata['step'])
-            fmri_series[count] = image
-            count = count + 1
-        else:
-            x = NP.random.random(6) - 0.5
-            x = 10.0 * x
-            fmri_series[count] = demo_rotate_fMRI_volume(image, x)
-            measures[count]['rotate'][0:6] = x[0:6]
-            count = count + 1
 
 
-    # load and register the fMRI volumes with volume_0 using normalized cross correlation metric
-    imageF = fmri_series[0]
-    if smooth_image:
-        image_F_xyz = filter_image_3D(imageF['data'], imageF['fwhm'], ftype)
-        imageF['data'] = image_F_xyz
-    for i in range(1, number_volumes):
-        imageG = fmri_series[i]
-        # the measure prior to alignment 
-        measures[i]['cost'] = check_alignment(imageF, imageG, imdata, method='ncc',
-                                              lite=histo_method, smhist=smooth_histo)
-        x = python_coreg(imageF, imageG, imdata, lite=histo_method, method='ncc',
-                         opt_method=optimizer_method, smhist=smooth_histo, smimage=smooth_image)
-        measures[i]['align_rotate'][0:6] = x[0:6]
-        measures[i]['align_cost'] = check_alignment(imageF, imageG, imdata, method='ncc', 
-                                         lite=histo_method, smhist=smooth_histo,
-                                         alpha=x[0], beta=x[1], gamma=x[2], Tx=x[3], Ty=x[4], Tz=x[5])
 
-
-    # align the volumes and average them for co-registration with the anatomical MRI 
-    ave_fMRI_volume = fmri_series[0]['data'].astype(NP.float64)
-    for i in range(1, number_volumes):
-        image = fmri_series[i]
-        x[0:6] = measures[i]['align_rotate'][0:6]
-        # overwrite the fMRI volume with the aligned volume
-        fmri_series[i] = remap_image(image, x, resample='cubic')
-        ave_fMRI_volume = ave_fMRI_volume + fmri_series[i]['data'].astype(NP.float64)
-
-    ave_fMRI_volume = (ave_fMRI_volume / float(number_volumes)).astype(NP.uint8)
-    ave_fMRI_volume = {'data' : ave_fMRI_volume, 'mat' : imageF['mat'], 
-                       'dim' : imageF['dim'], 'fwhm' : imageF['fwhm']}
-    # register (using normalized mutual information) with the anatomical MRI
-    if smooth_image:
-        image_F_anat_xyz = filter_image_3D(imageF_anat['data'], imageF_anat['fwhm'], ftype)
-        imageF_anat['data'] = image_F_anat_xyz
-    x = python_coreg(imageF_anat, ave_fMRI_volume, imdata, lite=histo_method,
-                     method='nmi', opt_method=optimizer_method, smhist=smooth_histo, smimage=smooth_image)
-    print 'functional-anatomical align parameters '
-    print x
-    for i in range(number_volumes):
-        image = fmri_series[i]
-        # overwrite the fMRI volume with the anatomical-aligned volume
-        fmri_series[i] = remap_image(image, x, resample='cubic')
-
-    return measures, imageF_anat, fmri_series
-
-
-def demo_fMRI_resample(imageF_anat, fmri_series):
-    resampled_fmri_series = {}
-    number_volumes = len(fmri_series)
-    for i in range(number_volumes):
-        resampled_fmri_series[i] = resize_image(fmri_series[i], imageF_anat['mat'])
-
-    return resampled_fmri_series
-
-

Modified: branches/refactor_fft/scipy/ndimage/src/register/Register_EXT.c
===================================================================
--- branches/refactor_fft/scipy/ndimage/src/register/Register_EXT.c	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/ndimage/src/register/Register_EXT.c	2008-07-01 04:52:00 UTC (rev 4511)
@@ -629,9 +629,108 @@
 }
 
 
+static PyObject *Register_Complete_Symmetry(PyObject *self, PyObject *args)
+{
 
+    int nx;
+    int ny;
+    int nz;
+    int ni;
+    double   *A;
+    PyObject *AlphaArray = NULL;
+
+    if(!PyArg_ParseTuple(args, "Oiiii", &AlphaArray, &nx, &ny, &nz, &ni))
+	goto exit;
+
+    A = (double *)PyArray_DATA(AlphaArray);
+
+    if(!NI_Complete_Symmetry(A, nx, ny, nz, ni)) 
+	    goto exit;
+
+exit:
+
+    return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); 
+
+}
+
+
+
+static PyObject *Register_LT_Tensor_Product(PyObject *self, PyObject *args)
+{
+    int M1;
+    int M2;
+    int rows;
+    int cur_row;
+    int coeff_1;
+    int coeff_2;
+    double   *A1;
+    double   *A2;
+    double   *B1;
+    double   *B2;
+    double   *Basis;
+    PyObject *AlphaArray1 = NULL;
+    PyObject *AlphaArray2 = NULL;
+    PyObject *BetaArray1  = NULL;
+    PyObject *BetaArray2  = NULL;
+    PyObject *BasisArray  = NULL;
+
+    if(!PyArg_ParseTuple(args, "OOOOOiiiiii", &AlphaArray1, &AlphaArray2, &BetaArray1, &BetaArray2, 
+		         &BasisArray, &M1, &M2, &rows, &cur_row, &coeff_1, &coeff_2))
+	goto exit;
+
+    A1    = (double *)PyArray_DATA(AlphaArray1);
+    A2    = (double *)PyArray_DATA(AlphaArray2);
+    B1    = (double *)PyArray_DATA(BetaArray1);
+    B2    = (double *)PyArray_DATA(BetaArray2);
+    Basis = (double *)PyArray_DATA(BasisArray);
+
+    if(!NI_LT_Tensor_Product(A1, A2, B1, B2, Basis, M1, M2, rows, cur_row, coeff_1, coeff_2)) 
+	    goto exit;
+
+
+exit:
+
+    return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); 
+
+}
+
+
+
+static PyObject *Register_LT_Mrqcof(PyObject *self, PyObject *args)
+{
+
+    int M1;
+    double   wt;
+    double   value;
+    double   *A;
+    double   *B;
+    double   *V;
+    PyObject *AlphaArray = NULL;
+    PyObject *BetaArray  = NULL;
+    PyObject *VArray     = NULL;
+
+    if(!PyArg_ParseTuple(args, "OOOddi", &AlphaArray, &BetaArray, &VArray, &wt, &value, &M1))
+	goto exit;
+
+    A = (double *)PyArray_DATA(AlphaArray);
+    B = (double *)PyArray_DATA(BetaArray);
+    V = (double *)PyArray_DATA(VArray);
+
+    if(!NI_LT_Mrqcof(A, B, V, wt, value, M1)) 
+	    goto exit;
+
+exit:
+
+    return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); 
+
+}
+
+
 static PyMethodDef RegisterMethods[] =
 {
+    { "register_complete_symmetry",             Register_Complete_Symmetry,        METH_VARARGS, NULL },
+    { "register_lt_mrqcof",                     Register_LT_Mrqcof,                METH_VARARGS, NULL },
+    { "register_lt_tensor_product",             Register_LT_Tensor_Product,        METH_VARARGS, NULL },
     { "register_find_mask",                     Register_Find_Mask,                METH_VARARGS, NULL },
     { "register_resample_coords",               Register_Resample_Coords,          METH_VARARGS, NULL },
     { "register_resample_gradient_coords",      Register_Resample_Gradient_Coords, METH_VARARGS, NULL },

Modified: branches/refactor_fft/scipy/ndimage/src/register/Register_IMPL.c
===================================================================
--- branches/refactor_fft/scipy/ndimage/src/register/Register_IMPL.c	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/ndimage/src/register/Register_IMPL.c	2008-07-01 04:52:00 UTC (rev 4511)
@@ -936,9 +936,9 @@
 			        V110 * (dx1) * (dy1) * (-1.0) +
 			        V111 * (dx1) * (dy1) * (1.0);
 
-			gradientX[sliceD+rowD+(int)x] = (int)(gradX*scale[(int)zp]);
-			gradientY[sliceD+rowD+(int)x] = (int)(gradY*scale[(int)zp]);
-			gradientZ[sliceD+rowD+(int)x] = (int)(gradZ*scale[(int)zp]);
+			gradientX[sliceD+rowD+(int)x] = (gradX*scale[(int)zp]);
+			gradientY[sliceD+rowD+(int)x] = (gradY*scale[(int)zp]);
+			gradientZ[sliceD+rowD+(int)x] = (gradZ*scale[(int)zp]);
 
 		    }
 	        }
@@ -1083,9 +1083,9 @@
 	                V111 * (dx1) * (dy1) * (1.0);
 
 	        /* gradients saved in the unrolled clipped gradient volume */
-	        gradientX[i] = (int)(gradX*scale[(int)zp]);
-	        gradientY[i] = (int)(gradY*scale[(int)zp]);
-	        gradientZ[i] = (int)(gradZ*scale[(int)zp]);
+	        gradientX[i] = (gradX*scale[(int)zp]);
+	        gradientY[i] = (gradY*scale[(int)zp]);
+	        gradientZ[i] = (gradZ*scale[(int)zp]);
 
 	    }
 
@@ -1203,3 +1203,167 @@
 
 
 
+int NI_LT_Mrqcof(double *alpha, double *beta, double *V, double wt, double value, int M1){
+
+	int i, j;
+	double v1;
+	int status;
+
+	for(i = 0; i < M1; ++i){
+	    v1 = V[i];
+	    beta[i] = v1 * value * wt;
+	    for(j = 0; j <= i; ++j){
+		alpha[M1*i+j] = v1 * V[j];
+	    }
+	}
+
+	status = 1;
+
+	return status;
+
+}
+
+
+int NI_LT_Tensor_Product(double *alpha_1, double *alpha_2, double *beta_1, double *beta_2, double *basis,
+	                 int M1, int M2, int rows, int row_number, int coeff_1, int coeff_2){
+
+
+	//
+	// lower triangular tensor product
+	//
+
+	int i, j, k, m;
+	int loop3_outer, loop3_inner;
+	int status;
+	double wt1;
+	double wt2;
+	double *ptr1;
+	double *ptr2;
+
+	for(i = 0; i < coeff_1; ++i){
+	    wt1 = basis[rows*i + row_number];
+	    for(loop3_outer = 0; loop3_outer < 3; ++loop3_outer){
+		//
+		// spatial-spatial covariances
+		//
+		for(loop3_inner = 0; loop3_inner <= loop3_outer; ++loop3_inner){
+		    for(j = 0; j <= i; ++j){
+			//
+		        // outer product of basis array
+			//
+	    		wt2  = wt1 * basis[rows*j + row_number];
+			ptr1 = &alpha_1[coeff_2*(M1*(coeff_1*loop3_outer+i)+(coeff_1*loop3_inner)+j)];
+			ptr2 = &alpha_2[coeff_2*(M2*loop3_outer+loop3_inner)];
+			for(k = 0; k < coeff_2; ++k){
+			    for(m = 0; m <= k; ++m){
+				ptr1[M1*k+m] += (wt2 * ptr2[M2*k+m]);
+			    }
+			}
+		    }
+		    //
+		    // spatial-intensity covariances (single G volume assumed)
+		    //
+		    ptr1 = &alpha_1[coeff_2*(M1*coeff_1*3+(coeff_1*loop3_inner)+i)];
+		    ptr2 = &alpha_2[coeff_2*(M2*3+loop3_outer)];
+		    for(k = 0; k < coeff_2; ++k){
+			ptr1[M1+k] += (wt1 * ptr2[M2+k]);
+		    }
+		    //
+		    // spatial component of beta
+		    //
+		    for(k = 0; k < coeff_2; ++k){
+			beta_1[k+coeff_2*(coeff_1*loop3_outer+i)] += (wt1 * beta_2[coeff_2*loop3_outer+k]);
+		    }
+		}
+	    }
+	}
+
+	//
+	// intensity-intensity covariances
+	//
+	ptr1 = &alpha_1[coeff_2*(M1*coeff_1*3+(coeff_1*3))];
+	ptr2 = &alpha_2[coeff_2*(M2*3+3)];
+	for(k = 0; k < coeff_2; ++k){
+	    ptr1[k] += ptr2[k];
+	}
+
+	//
+	// intensity component of beta
+	//
+
+	beta_1[coeff_2*coeff_1*3] += beta_2[coeff_2*3];
+
+	status = 1;
+
+	return status;
+
+}
+
+
+
+int NI_Complete_Symmetry(double *Alpha, int nx, int ny, int nz, int ni4){
+
+	//
+	// complete symmetry of Alpha matrix over the 3D brain volume
+	//
+
+	int z1, z2;
+	int y1, y2;
+	int x1, x2;
+	int loop3_outer, loop3_inner;
+	int M1;
+	int status;
+	double *ptrx;
+	double *ptry;
+	double *ptrz;
+
+	M1 = 3*nx*ny*nz + ni4;
+
+	for(loop3_outer = 0; loop3_outer < 3; ++loop3_outer){
+	    for(loop3_inner = 0; loop3_inner <= loop3_outer; ++loop3_inner){
+		ptrz = &Alpha[nx*ny*nz*(M1*loop3_outer+loop3_inner)];
+		for(z1 = 0; z1 < nz; ++z1){
+		    for(z2 = 0; z2 <= z1; ++z2){
+			ptry = ptrz + nx*ny*(M1*z1 + z2);
+			for(y1 = 0; y1 < ny; ++y1){
+		            for(y2 = 0; y2 <= y1; ++y2){
+			        ptrx = ptry + nx*(M1*y1 + y2);
+		                for(x1 = 0; x1 <= nx; ++x1){
+		                    for(y2 = 0; y2 <= y1; ++y2){
+					ptrx[M1*x2+x1] = ptrx[M1*x1+x2];
+			            }
+			        }
+			    }
+			}
+			for(x1 = 0; x1 < nx*ny; ++x1){
+			    for(x2 = 0; x2 < x1; ++x2){
+				ptry[M1*x2+x1] = ptry[M1*x1+x2];
+			    }
+			}
+		    }
+		    for(x1 = 0; x1 < nx*ny*nz; ++x1){
+		        for(x2 = 0; x2 < x1; ++x2){
+			    ptrz[M1*x2+x1] = ptrz[M1*x1+x2];
+		        }
+		    }
+
+		}
+	    }
+	}
+
+	for(x1 = 0; x1 < nx*ny*nz*3+ni4; ++x1){
+	    for(x2 = 0; x2 < x1; ++x2){
+		Alpha[M1*x2+x1] = Alpha[M1*x1+x2];
+	    }
+	}
+
+
+	status = 1;
+
+	return status;
+
+}
+
+
+
+

Copied: branches/refactor_fft/scipy/ndimage/tests/test_registration.py (from rev 4510, trunk/scipy/ndimage/tests/test_registration.py)

Copied: branches/refactor_fft/scipy/ndimage/tests/test_regression.py (from rev 4510, trunk/scipy/ndimage/tests/test_regression.py)

Modified: branches/refactor_fft/scipy/ndimage/tests/test_segment.py
===================================================================
--- branches/refactor_fft/scipy/ndimage/tests/test_segment.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/ndimage/tests/test_segment.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -155,5 +155,5 @@
         return
 
 if __name__ == "__main__":
-    inittest.main()
+    nose.runmodule()
 

Copied: branches/refactor_fft/scipy/odr/SConscript (from rev 4510, trunk/scipy/odr/SConscript)

Deleted: branches/refactor_fft/scipy/odr/SConstruct
===================================================================
--- branches/refactor_fft/scipy/odr/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/odr/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,62 +0,0 @@
-# Last Change: Wed Mar 05 04:00 PM 2008 J
-# vim:syntax=python
-
-import os
-from os.path import join as pjoin, splitext
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numscons import get_python_inc#, get_pythonlib_dir
-from numscons import GetNumpyEnvironment
-from numscons import CheckF77BLAS, CheckF77Clib
-
-from numscons import write_info
-
-env = GetNumpyEnvironment(ARGUMENTS)
-env.Append(CPPPATH = [get_python_inc(), get_numpy_include_dirs()])
-#if os.name == 'nt':
-#    # NT needs the pythonlib to run any code importing Python.h, including
-#    # simple code using only typedef and so on, so we need it for configuration
-#    # checks
-#    env.AppendUnique(LIBPATH = [get_pythonlib_dir()])
-
-#=======================
-# Starting Configuration
-#=======================
-config = env.NumpyConfigure(custom_tests = {'CheckBLAS' : CheckF77BLAS, 
-                                            'CheckF77Clib' : CheckF77Clib})
-
-if not config.CheckF77Clib():
-    raise RuntimeError("Could not check F/C runtime library for %s/%s, " \
-                       "contact the maintainer" % (env['CC'], env['F77']))
-
-#--------------
-# Checking Blas
-#--------------
-st = config.CheckBLAS()
-if not st:
-    has_blas = 0
-else:
-    has_blas = 1
-
-config.Finish()
-write_info(env)
-
-#==========
-#  Build
-#==========
-
-# odr lib
-libodr_src = [pjoin('odrpack', i) for i in ['d_odr.f', 'd_mprec.f', 'dlunoc.f']]
-if has_blas:
-    libodr_src.append(pjoin('odrpack', 'd_lpk.f'))
-else:
-    libodr_src.append(pjoin('odrpack', 'd_lpkbls.f'))
-
-env.NumpyStaticExtLibrary('odrpack', source = libodr_src)
-
-env.PrependUnique(LIBS = 'odrpack')
-env.PrependUnique(LIBPATH = env['build_dir'])
-
-# odr pyextension
-env.NumpyPythonExtension('__odrpack', '__odrpack.c',
-                         LINKFLAGSEND = env['F77_LDFLAGS'])

Copied: branches/refactor_fft/scipy/odr/SConstruct (from rev 4510, trunk/scipy/odr/SConstruct)

Copied: branches/refactor_fft/scipy/optimize/SConscript (from rev 4510, trunk/scipy/optimize/SConscript)

Deleted: branches/refactor_fft/scipy/optimize/SConstruct
===================================================================
--- branches/refactor_fft/scipy/optimize/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/optimize/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,91 +0,0 @@
-# Last Change: Sat May 03 02:00 PM 2008 J
-# vim:syntax=python
-
-import os
-from os.path import join as pjoin, splitext
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numscons import get_python_inc#, get_pythonlib_dir
-from numscons import GetNumpyEnvironment
-from numscons import CheckF77LAPACK, CheckF77Clib
-
-from numscons import write_info
-
-env = GetNumpyEnvironment(ARGUMENTS)
-env.Tool('numpyf2py')
-env.Append(CPPPATH = get_numpy_include_dirs())
-env.Append(CPPPATH = env['F2PYINCLUDEDIR'])
-#if os.name == 'nt':
-#    # NT needs the pythonlib to run any code importing Python.h, including
-#    # simple code using only typedef and so on, so we need it for configuration
-#    # checks
-#    env.AppendUnique(LIBPATH = [get_pythonlib_dir()])
-
-#=======================
-# Starting Configuration
-#=======================
-config = env.NumpyConfigure(custom_tests = {'CheckLAPACK' : CheckF77LAPACK,
-                                            'CheckF77Clib' : CheckF77Clib})
-
-#-----------------
-# Checking Lapack
-#-----------------
-if not config.CheckF77Clib():
-    raise RuntimeLibrary("Could not check C/F runtime library for %s/%s"\
-                         " , contact the maintainer" % (env['CC'], env['F77']))
-
-st = config.CheckLAPACK()
-if not st:
-    has_lapack = 0
-else:
-    has_lapack = 1
-
-config.Finish()
-write_info(env)
-
-#==========
-#  Build
-#==========
-
-# minpack lib
-minpack_src = [str(s) for s in env.NumpyGlob(pjoin('minpack', '*.f'))]
-env.NumpyStaticExtLibrary('minpack', source = minpack_src)
-
-# rootfind lib
-rootfind_src = [str(s) for s in env.NumpyGlob(pjoin('Zeros', '*.c'))]
-env.NumpyStaticExtLibrary('rootfind', source = rootfind_src)
-
-env.AppendUnique(LIBS = ['minpack', 'rootfind'])
-env.AppendUnique(LIBPATH = env['build_dir'])
-
-# _minpack pyextension
-env.NumpyPythonExtension('_minpack', '_minpackmodule.c',
-                         LINKFLAGSEND = env['F77_LDFLAGS'])
-
-# _zeros pyextension
-env.NumpyPythonExtension('_zeros', 'zeros.c')
-
-# _lbfgsb pyextension
-src = [pjoin('lbfgsb', i) for i in ['lbfgsb.pyf', 'routines.f']]
-env.NumpyPythonExtension('_lbfgsb', source = src,
-                         LINKFLAGSEND = env['F77_LDFLAGS'])
-
-# _cobyla pyextension
-src = [pjoin('cobyla', i) for i in ['cobyla2.f', 'trstlp.f', 'cobyla.pyf']]
-env.NumpyPythonExtension('_cobyla', source = src,
-                         LINKFLAGSEND = env['F77_LDFLAGS'])
-
-# _minpack2 pyextension
-src = [pjoin('minpack2', i) for i in ['dcsrch.f', 'dcstep.f', 'minpack2.pyf']]
-env.NumpyPythonExtension('minpack2', source = src,
-                         LINKFLAGSEND = env['F77_LDFLAGS'])
-
-# moduleTNC pyextension
-env.NumpyPythonExtension('moduleTNC', 
-                         source = [pjoin('tnc', i) for i in \
-                                                   ['moduleTNC.c', 'tnc.c']])
-
-# _slsqp pyextension
-src = [pjoin('slsqp', i) for i in ['slsqp_optmz.f', 'slsqp.pyf']]
-env.NumpyPythonExtension('_slsqp', source = src,
-                         LINKFLAGSEND = env['F77_LDFLAGS'])

Copied: branches/refactor_fft/scipy/optimize/SConstruct (from rev 4510, trunk/scipy/optimize/SConstruct)

Modified: branches/refactor_fft/scipy/optimize/anneal.py
===================================================================
--- branches/refactor_fft/scipy/optimize/anneal.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/optimize/anneal.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -9,6 +9,7 @@
 
 __all__ = ['anneal']
 
+_double_min = numpy.finfo(float).min
 _double_max = numpy.finfo(float).max
 class base_schedule(object):
     def __init__(self):
@@ -35,11 +36,25 @@
         self.tests = 0
 
     def getstart_temp(self, best_state):
+        """ Find a matching starting temperature and starting parameters vector
+        i.e. find x0 such that func(x0) = T0.
+
+        Parameters
+        ----------
+        best_state : _state
+            A _state object to store the function value and x0 found.
+
+        Returns
+        -------
+        x0 : array
+            The starting parameters vector.
+        """
+
         assert(not self.dims is None)
         lrange = self.lower
         urange = self.upper
-        fmax = -300e8
-        fmin = 300e8
+        fmax = _double_min
+        fmin = _double_max
         for _ in range(self.Ninit):
             x0 = random.uniform(size=self.dims)*(urange-lrange) + lrange
             fval = self.func(x0, *self.args)
@@ -50,6 +65,7 @@
                 fmin = fval
                 best_state.cost = fval
                 best_state.x = array(x0)
+
         self.T0 = (fmax-fmin)*1.5
         return best_state.x
 

Modified: branches/refactor_fft/scipy/optimize/minpack.py
===================================================================
--- branches/refactor_fft/scipy/optimize/minpack.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/optimize/minpack.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,7 +1,8 @@
 import _minpack
 
 from numpy import atleast_1d, dot, take, triu, shape, eye, \
-                  transpose, zeros, product, greater, array
+                  transpose, zeros, product, greater, array, \
+                  any, all, where, isscalar, asarray, ndarray
 
 error = _minpack.error
 
@@ -103,7 +104,7 @@
 
       brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding
 
-      fixed_point -- scalar fixed-point finder
+      fixed_point -- scalar and vector fixed-point finder
 
     """
     x0 = array(x0,ndmin=1)
@@ -256,7 +257,7 @@
 
       brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding
 
-      fixed_point -- scalar fixed-point finder
+      fixed_point -- scalar and vector fixed-point finder      
 
     """
     x0 = array(x0,ndmin=1)
@@ -372,8 +373,8 @@
 
       brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding
 
-      fixed_point -- scalar fixed-point finder
-
+      fixed_point -- scalar and vector fixed-point finder
+            
     """
 
     if fprime is not None:
@@ -411,10 +412,26 @@
 
 
 # Steffensen's Method using Aitken's Del^2 convergence acceleration.
-def fixed_point(func, x0, args=(), xtol=1e-10, maxiter=500):
-    """Given a function of one variable and a starting point, find a
+def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500):
+    """Find the point where func(x) == x
+    
+    Given a function of one or more variables and a starting point, find a
     fixed-point of the function: i.e. where func(x)=x.
 
+    Uses Steffensen's Method using Aitken's Del^2 convergence acceleration.
+    See Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
+
+    Example
+    -------
+    >>> from numpy import sqrt, array
+    >>> from scipy.optimize import fixed_point
+    >>> def func(x, c1, c2):
+            return sqrt(c1/(x+c2))
+    >>> c1 = array([10,12.])
+    >>> c2 = array([3, 5.])
+    >>> fixed_point(func, [1.2, 1.3], args=(c1,c2))
+    array([ 1.4920333 ,  1.37228132])
+
     See also:
 
       fmin, fmin_powell, fmin_cg,
@@ -432,26 +449,39 @@
 
       brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding
 
-      fixed_point -- scalar fixed-point finder
-
     """
+    if not isscalar(x0):
+        x0 = asarray(x0)
+        p0 = x0                  
+        for iter in range(maxiter):
+            p1 = func(p0, *args)
+            p2 = func(p1, *args)
+            d = p2 - 2.0 * p1 + p0
+            p = where(d == 0, p2, p0 - (p1 - p0)*(p1-p0) / d)
+            relerr = where(p0 == 0, p, (p-p0)/p0)
+            if all(relerr < xtol):
+                return p
+            p0 = p
+    else:
+        p0 = x0
+        for iter in range(maxiter):
+            p1 = func(p0, *args)
+            p2 = func(p1, *args)
+            d = p2 - 2.0 * p1 + p0
+            if d == 0.0:            
+                return p2
+            else:
+                p = p0 - (p1 - p0)*(p1-p0) / d
+            if p0 == 0:
+                relerr = p
+            else:
+                relerr = (p-p0)/p0
+            if relerr < xtol:
+                return p
+            p0 = p
+    raise RuntimeError, "Failed to converge after %d iterations, value is %s" % (maxiter,p)
 
-    p0 = x0
-    for iter in range(maxiter):
-        p1 = func(*((p0,)+args))
-        p2 = func(*((p1,)+args))
-        d = p2 - 2.0 * p1 + p0
-        if d == 0.0:
-            print "Warning: Difference in estimates is %g" % (abs(p2-p1))
-            return p2
-        else:
-            p = p0 - (p1 - p0)*(p1-p0) / d
-        if abs(p-p0) < xtol:
-            return p
-        p0 = p
-    raise RuntimeError, "Failed to converge after %d iterations, value is %f" % (maxiter,p)
 
-
 def bisection(func, a, b, args=(), xtol=1e-10, maxiter=400):
     """Bisection root-finding method.  Given a function and an interval with
     func(a) * func(b) < 0, find the root between a and b.
@@ -473,7 +503,7 @@
 
       brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding
 
-      fixed_point -- scalar fixed-point finder
+      fixed_point -- scalar and vector fixed-point finder
 
     """
     i = 1

Copied: branches/refactor_fft/scipy/sandbox/mkufunc (from rev 4510, trunk/scipy/sandbox/mkufunc)

Deleted: branches/refactor_fft/scipy/sandbox/mkufunc/README.txt
===================================================================
--- trunk/scipy/sandbox/mkufunc/README.txt	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sandbox/mkufunc/README.txt	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,19 +0,0 @@
-
-mkufunc (make universal function) is a tool which lets you create
-a C compiled version of a universal function (UFunc).
-
-It works by translating the python function into C and then uses
-scipy.weave to create a UFunc which calls the appropriate C function
-in the inner 1-d loop.  This means that there are no Python calls
-when the calculation is performed, making the calculation
-fast (in particular when the arrays involved in the calculation
-are very large).
-
-Requirements:
-
-  pypy
-
-You need the pypy path in your PYTHONPATH environment:
-
-$ export PYTHONPATH=/giant/src/pypy-dist
-

Copied: branches/refactor_fft/scipy/sandbox/mkufunc/README.txt (from rev 4510, trunk/scipy/sandbox/mkufunc/README.txt)

Deleted: branches/refactor_fft/scipy/sandbox/mkufunc/TODO.txt
===================================================================

Copied: branches/refactor_fft/scipy/sandbox/mkufunc/TODO.txt (from rev 4510, trunk/scipy/sandbox/mkufunc/TODO.txt)

Copied: branches/refactor_fft/scipy/sandbox/mkufunc/docs (from rev 4510, trunk/scipy/sandbox/mkufunc/docs)

Copied: branches/refactor_fft/scipy/sandbox/mkufunc/examples (from rev 4510, trunk/scipy/sandbox/mkufunc/examples)

Deleted: branches/refactor_fft/scipy/sandbox/mkufunc/examples/benchmark.py
===================================================================
--- trunk/scipy/sandbox/mkufunc/examples/benchmark.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sandbox/mkufunc/examples/benchmark.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-from math import sin, cos
-import time
-
-from numpy import arange, vectorize, allclose
-from scipy import weave
-
-from mkufunc.api import mkufunc
-
-
-def f(x):
-    return 4.2 * x * x + 3.7 * x + 1.5
-
-
-vfunc = vectorize(f)
-
-ufunc = mkufunc([(float, float)])(f)
-
-
-x = arange(0, 1000, 0.001)    #print "x =", x, x.dtype
-
-start_time = time.time()
-b_y = x.copy()
-weave.blitz("b_y[:] = 4.2 * x[:] * x[:] + 3.7 * x[:] + 1.5")
-b_time = time.time() - start_time
-print 'blitz: %.6f sec' % b_time
-
-start_time = time.time()
-n_y = f(x)
-n_time = time.time() - start_time
-print 'numpy: %.6f sec' % n_time
-
-start_time = time.time()
-v_y = vfunc(x)
-v_time = time.time() - start_time
-print 'vectorize: %.6f sec' % v_time
-
-start_time = time.time()
-u_y = ufunc(x)
-u_time = time.time() - start_time
-print 'mkufunc: %.6f sec' % u_time
-
-print "speedup over blitz:",     b_time/u_time
-print "speedup over numpy:",     n_time/u_time
-print "speedup over vectorize:", v_time/u_time
-
-assert allclose(b_y, n_y)
-assert allclose(v_y, n_y)
-assert allclose(u_y, n_y)

Copied: branches/refactor_fft/scipy/sandbox/mkufunc/examples/benchmark.py (from rev 4510, trunk/scipy/sandbox/mkufunc/examples/benchmark.py)

Deleted: branches/refactor_fft/scipy/sandbox/mkufunc/examples/primes.py
===================================================================
--- trunk/scipy/sandbox/mkufunc/examples/primes.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sandbox/mkufunc/examples/primes.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,29 +0,0 @@
-#!/usr/bin/env python
-from math import sqrt
-import time
-
-from numpy import arange
-
-from mkufunc.api import mkufunc
-
-
-def is_prime(n):
-    if n < 2:
-        return 0
-    for i in xrange(2, min(n, int(sqrt(n)+2.0))):
-        if n %i == 0:
-            return 0
-    return 1
-
-
-start_time = time.time()
-assert sum(is_prime(n) for n in xrange(1000000)) == 78498
-print 'Python: %.6f sec' % (time.time() - start_time)
-
-
-is_prime = mkufunc(int)(is_prime)
-
-
-start_time = time.time()
-assert is_prime(arange(1000000)).sum() == 78498
-print 'Compiled: %.6f sec' % (time.time() - start_time)

Copied: branches/refactor_fft/scipy/sandbox/mkufunc/examples/primes.py (from rev 4510, trunk/scipy/sandbox/mkufunc/examples/primes.py)

Copied: branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc (from rev 4510, trunk/scipy/sandbox/mkufunc/mkufunc)

Deleted: branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/__init__.py
===================================================================

Copied: branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/__init__.py (from rev 4510, trunk/scipy/sandbox/mkufunc/mkufunc/__init__.py)

Deleted: branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/api.py
===================================================================
--- trunk/scipy/sandbox/mkufunc/mkufunc/api.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/api.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,346 +0,0 @@
-""" mkufunc (make U function)
-
-
-Author: Ilan Schnell (with help from Travis Oliphant and Eric Jones)
-"""
-import sys
-import re
-import os, os.path
-import cStringIO
-import hashlib
-from types import FunctionType
-
-import numpy
-from scipy import weave
-
-
-verbose = 0
-
-def func_hash(f, salt=None):
-    """ Return a MD5 hash for a function object as string.
-    """
-    co = f.func_code
-    return hashlib.md5(co.co_code + repr(co.co_names) + repr(salt)
-                       ).hexdigest()
-
-
-def translate(f, argtypes):
-    """ Return pypy's C output for a given function and argument types.
-        The cache files are in weave's directory.
-    """
-    cache_file_name = os.path.join(weave.catalog.default_dir(),
-                                   'pypy_%s.c' % func_hash(f, salt=argtypes))
-    try:
-        return open(cache_file_name).read()
-    
-    except IOError:
-        from interactive import Translation
-        
-        t = Translation(f, backend='c')
-        t.annotate(argtypes)
-        t.source()
-        
-        os.rename(t.driver.c_source_filename, cache_file_name)
-        
-        return translate(f, argtypes)
-
-
-class Ctype:
-    def __init__(self, npy, c):
-        self.npy = npy
-        self.c = c
-
-typedict = {
-    int:    Ctype('NPY_LONG',   'long'  ),
-    float:  Ctype('NPY_DOUBLE', 'double'),
-}
-
-
-class Cfunc(object):
-    """ C compiled python functions
-
-    >>> def sqr(x):
-    ...     return x * x
-
-    >>> signature = [int, int] # only the input arguments are used here
-    
-    compilation is done upon initialization
-    >>> x = Cfunc(sqr, signature, 123)
-    ...
-    >>> x.nin # number of input arguments
-    1
-    >>> x.nout # number of output arguments (must be 1 for now)
-    1
-    >>> x.sig
-    [<type 'int'>, <type 'int'>]
-    
-    Attributes:
-        f           -- the Python function object
-        n           -- id number
-        sig         -- signature
-        nin         -- number of input arguments
-        nout        -- number of output arguments
-        cname       -- name of the C function
-
-    Methods:
-        decl()      -- returns the C declaration for the function
-        cfunc()     -- returns the C function (as string)
-        ufunc_support_code()
-                    -- generate the C support code to make this
-                       function part work with PyUFuncGenericFunction
-    """
-    def __init__(self, f, signature, n):
-        self.f = f
-        self.n = n
-        self.sig = signature
-        self.nin = f.func_code.co_argcount
-        self.nout = len(self.sig) - self.nin
-        assert self.nout == 1                  # for now
-        
-        src = translate(f, signature[:self.nin])
-        
-        self._prefix = 'f%i_' % self.n
-        self._allCsrc = src.replace('pypy_', self._prefix + 'pypy_')
-        self.cname = self._prefix + 'pypy_g_' + f.__name__
-
-    def cfunc(self):
-        p = re.compile(r'^\w+[*\s\w]+' + self.cname +
-                       r'\s*\([^)]*\)\s*\{.*?[\n\r]\}[\n\r]',
-                       re.DOTALL | re.MULTILINE | re.VERBOSE)
-        
-        found = p.findall(self._allCsrc)
-        assert len(found) == 1
-        res = found[0]
-        res = res.replace(self._prefix + 'pypy_g_ll_math_ll_math_', '')
-        return 'inline ' + res + '\n'
-    
-    def ufunc_support_code(self):
-        # Unfortunately the code in here is very hard to read.
-        # In order to make the code clearer, one would need a real template
-        # engine link Cheetah (http://cheetahtemplate.org/).
-        # However, somehting like that would be too much overhead for scipy.
-        n = self.n
-        nin = self.nin
-        cname = self.cname
-
-        def varname(i):
-            return chr(i + ord('a'))
-        
-        declargs = ', '.join('%s %s' % (typedict[self.sig[i]].c, varname(i))
-                             for i in xrange(self.nin))
-        
-        args = ', '.join(varname(i) for i in xrange(self.nin))
-        
-        isn_steps = '\n\t'.join('npy_intp is%i = steps[%i];' % (i, i)
-                                for i in xrange(self.nin))
-        
-        ipn_args = '\n\t'.join('char *ip%i = args[%i];' % (i, i)
-                               for i in xrange(self.nin))
-        
-        body1d_in = '\n\t\t'.join('%s *in%i = (%s *)ip%i;' %
-                                  (2*(typedict[self.sig[i]].c, i))
-                                  for i in xrange(self.nin))
-        
-        body1d_add = '\n\t\t'.join('ip%i += is%i;' % (i, i)
-                                   for i in xrange(self.nin))
-        
-        ptrargs = ', '.join('*in%i' % i for i in xrange(self.nin))
-        
-        rettype = typedict[self.sig[-1]].c
-        
-        return '''
-static %(rettype)s wrap_%(cname)s(%(declargs)s)
-{
-	return %(cname)s(%(args)s);
-}
-
-typedef %(rettype)s Func_%(n)i(%(declargs)s);
-
-static void
-PyUFunc_%(n)i(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
-	npy_intp i, n;
-        %(isn_steps)s
-	npy_intp os = steps[%(nin)s];
-        %(ipn_args)s
-	char *op = args[%(nin)s];
-	Func_%(n)i *f = (Func_%(n)i *) func;
-	n = dimensions[0];
-        
-	for(i = 0; i < n; i++) {
-		%(body1d_in)s
-		%(rettype)s *out = (%(rettype)s *)op;
-		
-		*out = (%(rettype)s) f(%(ptrargs)s);
-
-                %(body1d_add)s
-                op += os;
-	}
-}
-''' % locals()
-
-
-def support_code(cfuncs):
-    """ Given a list of Cfunc instances, return the support code for weave.
-    """
-    acc = cStringIO.StringIO()
-    
-    acc.write('/********************* start pypy.h  **************/\n\n')
-    acc.write(open(os.path.join(os.path.dirname(__file__),
-                                'pypy.h')).read())
-    acc.write('/********************** end pypy.h ****************/\n\n')
-    
-    for cf in cfuncs:
-        acc.write(cf.cfunc())
-        acc.write(cf.ufunc_support_code())
-        
-    fname = cfuncs[0].f.__name__
-    
-    pyufuncs = ''.join('\tPyUFunc_%i,\n' % cf.n for cf in cfuncs)
-    
-    data = ''.join('\t(void *) wrap_%s,\n' % cf.cname for cf in cfuncs)
-    
-    types = ''.join('\t%s  /* %i */\n' %
-                    (''.join(typedict[t].npy + ', ' for t in cf.sig), cf.n)
-                    for cf in cfuncs)
-    
-    acc.write('''
-static PyUFuncGenericFunction %(fname)s_functions[] = {
-%(pyufuncs)s};
-
-static void *%(fname)s_data[] = {
-%(data)s};
-
-static char %(fname)s_types[] = {
-%(types)s};
-''' % locals())
-
-    if verbose:
-        print '------------------ start support_code -----------------'
-        print acc.getvalue()
-        print '------------------- end support_code ------------------'
-        
-    return acc.getvalue()
-
-
-def code(f, signatures):
-    """ Return the code for weave.
-    """
-    nin = f.func_code.co_argcount
-    ntypes = len(signatures)
-    fname = f.__name__
-    fhash = func_hash(f)
-    
-    res = '''
-import_ufunc();
-
-/****************************************************************************
-**  function name: %(fname)s
-**  signatures: %(signatures)r
-**  fhash: %(fhash)s
-*****************************************************************************/
-
-return_val = PyUFunc_FromFuncAndData(
-    %(fname)s_functions,
-    %(fname)s_data,
-    %(fname)s_types,
-    %(ntypes)i,      /* ntypes */
-    %(nin)i,         /* nin */
-    1,               /* nout */
-    PyUFunc_None,    /* identity */
-    "%(fname)s",     /* name */
-    "UFunc created by mkufunc", /* doc */
-    0);
-''' % locals()
-
-    if verbose:
-        print '---------------------- start code ---------------------'
-        print res
-        print '----------------------- end code ----------------------'
-
-    return res
-
-
-def genufunc(f, signatures):
-    """ Return the Ufunc Python object for given function and signatures.
-    """
-    if len(signatures) == 0:
-        raise ValueError("At least one signature needed")
-    
-    signatures.sort(key=lambda sig: [numpy.dtype(typ).num for typ in sig])
-    
-    cfuncs = [Cfunc(f, sig, n) for n, sig in enumerate(signatures)]
-    
-    ufunc_info = weave.base_info.custom_info()
-    ufunc_info.add_header('"numpy/ufuncobject.h"')
-    
-    return weave.inline(code(f, signatures),
-                        verbose=verbose,
-                        support_code=support_code(cfuncs),
-                        customize=ufunc_info)
-
-
-def mkufunc(arg0=[float]):
-    """ Python decorator which returns compiled UFunc of the function given.
-    
-    >>> from numpy import arange
-    >>> from mkufunc.api import mkufunc
-    >>> @mkufunc
-    ... def foo(x):
-    ...     return 4.2 * x * x - x + 6.3
-    ...
-    >>> a = arange(5)
-    >>> a
-    array([0, 1, 2, 3, 4])
-    >>> foo(a)
-    array([  6.3,   9.5,  21.1,  41.1,  69.5])
-    """
-    class UFunc(object):
-        
-        def __init__(self, f):
-            nin = f.func_code.co_argcount
-            nout = 1
-            for i, sig in enumerate(signatures):
-                if isinstance(sig, tuple):
-                    pass
-                elif sig in typedict.keys():
-                    signatures[i] = (nin + nout) * (sig,)
-                else:
-                    raise TypeError("no match for %r" % sig)
-                
-            for sig in signatures:
-                assert isinstance(sig, tuple)
-                if len(sig) != nin + nout:
-                    raise TypeError("signature %r does not match the "
-                                    "number of args of function %s" %
-                                    (sig, f.__name__))
-                for t in sig:
-                    if t not in typedict.keys():
-                        raise TypeError("no match for %r" % t)
-            
-            self.ufunc = genufunc(f, signatures)
-            
-        def __call__(self, *args):
-            return self.ufunc(*args)
-        
-    if isinstance(arg0, FunctionType):
-        f = arg0
-        signatures = [float]
-        return UFunc(f)
-    
-    elif isinstance(arg0, list):
-        signatures = arg0
-        return UFunc
-    
-    elif arg0 in typedict.keys():
-        signatures = [arg0]
-        return UFunc
-    
-    else:
-        raise TypeError("first argument has to be a function, a type, or "
-                        "a list of signatures")
-
-
-if __name__ == '__main__':
-    import doctest
-    doctest.testmod()

Copied: branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/api.py (from rev 4510, trunk/scipy/sandbox/mkufunc/mkufunc/api.py)

Deleted: branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/driver.py
===================================================================
--- trunk/scipy/sandbox/mkufunc/mkufunc/driver.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/driver.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,728 +0,0 @@
-import sys, os
-
-from pypy.translator.translator import TranslationContext, graphof
-from pypy.translator.tool.taskengine import SimpleTaskEngine
-from pypy.translator.goal import query
-from pypy.translator.goal.timing import Timer
-from pypy.annotation import model as annmodel
-from pypy.annotation.listdef import s_list_of_strings
-from pypy.annotation import policy as annpolicy
-from py.compat import optparse
-from pypy.tool.udir import udir
-
-import py
-from pypy.tool.ansi_print import ansi_log
-log = py.log.Producer("translation")
-py.log.setconsumer("translation", ansi_log)
-
-DEFAULTS = {
-  'translation.gc': 'ref',
-  'translation.cc': None,
-  'translation.profopt': None,
-
-  'translation.thread': False, # influences GC policy
-
-  'translation.stackless': False,
-  'translation.debug': True,
-  'translation.insist': False,
-  'translation.backend': 'c',
-  'translation.fork_before': None,
-  'translation.backendopt.raisingop2direct_call' : False,
-  'translation.backendopt.merge_if_blocks': True,
-}
-
-
-def taskdef(taskfunc, deps, title, new_state=None, expected_states=[],
-            idemp=False, earlycheck=None):
-    taskfunc.task_deps = deps
-    taskfunc.task_title = title
-    taskfunc.task_newstate = None
-    taskfunc.task_expected_states = expected_states
-    taskfunc.task_idempotent = idemp
-    taskfunc.task_earlycheck = earlycheck
-    return taskfunc
-
-# TODO:
-# sanity-checks using states
-
-_BACKEND_TO_TYPESYSTEM = {
-    'c': 'lltype',
-    'llvm': 'lltype'
-}
-
-def backend_to_typesystem(backend):
-    return _BACKEND_TO_TYPESYSTEM.get(backend, 'ootype')
-
-# set of translation steps to profile
-PROFILE = set([])
-
-class Instrument(Exception):
-    pass
-
-
-class ProfInstrument(object):
-    name = "profinstrument"
-    def __init__(self, datafile, compiler):
-        self.datafile = datafile
-        self.compiler = compiler
-
-    def first(self):
-        self.compiler._build()
-
-    def probe(self, exe, args):
-        from py.compat import subprocess
-        env = os.environ.copy()
-        env['_INSTRUMENT_COUNTERS'] = str(self.datafile)
-        subprocess.call("'%s' %s" % (exe, args), env=env, shell=True)
-        
-    def after(self):
-        # xxx
-        os._exit(0)
-
-
-class TranslationDriver(SimpleTaskEngine):
-
-    def __init__(self, setopts=None, default_goal=None,
-                 disable=[],
-                 exe_name=None, extmod_name=None,
-                 config=None, overrides=None):
-        self.timer = Timer()
-        SimpleTaskEngine.__init__(self)
-
-        self.log = log
-
-        if config is None:
-            from pypy.config.pypyoption import get_pypy_config
-            config = get_pypy_config(DEFAULTS, translating=True)
-        self.config = config
-        if overrides is not None:
-            self.config.override(overrides)
-
-        if setopts is not None:
-            self.config.set(**setopts)
-        
-        self.exe_name = exe_name
-        self.extmod_name = extmod_name
-
-        self.done = {}
-
-        self.disable(disable)
-
-        if default_goal:
-            default_goal, = self.backend_select_goals([default_goal])
-            if default_goal in self._maybe_skip():
-                default_goal = None
-        
-        self.default_goal = default_goal
-        self.extra_goals = []
-        self.exposed = []
-
-        # expose tasks
-        def expose_task(task, backend_goal=None):
-            if backend_goal is None:
-                backend_goal = task
-            def proc():
-                return self.proceed(backend_goal)
-            self.exposed.append(task)
-            setattr(self, task, proc)
-
-        backend, ts = self.get_backend_and_type_system()
-        for task in self.tasks:
-            explicit_task = task
-            parts = task.split('_')
-            if len(parts) == 1:
-                if task in ('annotate'):
-                    expose_task(task)
-            else:
-                task, postfix = parts
-                if task in ('rtype', 'backendopt', 'llinterpret',
-                            'prehannotatebackendopt', 'hintannotate',
-                            'timeshift'):
-                    if ts:
-                        if ts == postfix:
-                            expose_task(task, explicit_task)
-                    else:
-                        expose_task(explicit_task)
-                elif task in ('source', 'compile', 'run'):
-                    if backend:
-                        if backend == postfix:
-                            expose_task(task, explicit_task)
-                    elif ts:
-                        if ts == backend_to_typesystem(postfix):
-                            expose_task(explicit_task)
-                    else:
-                        expose_task(explicit_task)
-
-    def set_extra_goals(self, goals):
-        self.extra_goals = goals
-
-    def get_info(self): # XXX more?
-        d = {'backend': self.config.translation.backend}
-        return d
-
-    def get_backend_and_type_system(self):
-        type_system = self.config.translation.type_system
-        backend = self.config.translation.backend
-        return backend, type_system
-
-    def backend_select_goals(self, goals):
-        backend, ts = self.get_backend_and_type_system()
-        postfixes = [''] + ['_'+p for p in (backend, ts) if p]
-        l = []
-        for goal in goals:
-            for postfix in postfixes:
-                cand = "%s%s" % (goal, postfix)
-                if cand in self.tasks:
-                    new_goal = cand
-                    break
-            else:
-                raise Exception, "cannot infer complete goal from: %r" % goal 
-            l.append(new_goal)
-        return l
-
-    def disable(self, to_disable):
-        self._disabled = to_disable
-
-    def _maybe_skip(self):
-        maybe_skip = []
-        if self._disabled:
-             for goal in  self.backend_select_goals(self._disabled):
-                 maybe_skip.extend(self._depending_on_closure(goal))
-        return dict.fromkeys(maybe_skip).keys()
-
-
-    def setup(self, entry_point, inputtypes, policy=None, extra={}, empty_translator=None):
-        standalone = inputtypes is None
-        self.standalone = standalone
-
-        if standalone:
-            inputtypes = [s_list_of_strings]
-        self.inputtypes = inputtypes
-
-        if policy is None:
-            policy = annpolicy.AnnotatorPolicy()
-        if standalone:
-            policy.allow_someobjects = False
-        self.policy = policy
-
-        self.extra = extra
-
-        if empty_translator:
-            translator = empty_translator
-        else:
-            translator = TranslationContext(config=self.config)
-
-        self.entry_point = entry_point
-        self.translator = translator
-        self.libdef = None
-
-        self.translator.driver_instrument_result = self.instrument_result
-
-    def setup_library(self, libdef, policy=None, extra={}, empty_translator=None):
-        self.setup(None, None, policy, extra, empty_translator)
-        self.libdef = libdef
-
-    def instrument_result(self, args):
-        backend, ts = self.get_backend_and_type_system()
-        if backend != 'c' or sys.platform == 'win32':
-            raise Exception("instrumentation requires the c backend"
-                            " and unix for now")
-        from pypy.tool.udir import udir
-        
-        datafile = udir.join('_instrument_counters')
-        makeProfInstrument = lambda compiler: ProfInstrument(datafile, compiler)
-
-        pid = os.fork()
-        if pid == 0:
-            # child compiling and running with instrumentation
-            self.config.translation.instrument = True
-            self.config.translation.instrumentctl = (makeProfInstrument,
-                                                     args)
-            raise Instrument
-        else:
-            pid, status = os.waitpid(pid, 0)
-            if os.WIFEXITED(status):
-                status = os.WEXITSTATUS(status)
-                if status != 0:
-                    raise Exception, "instrumentation child failed: %d" % status
-            else:
-                raise Exception, "instrumentation child aborted"
-            import array, struct
-            n = datafile.size()//struct.calcsize('L')
-            datafile = datafile.open('rb')
-            counters = array.array('L')
-            counters.fromfile(datafile, n)
-            datafile.close()
-            return counters
-
-    def info(self, msg):
-        log.info(msg)
-
-    def _profile(self, goal, func):
-        from cProfile import Profile
-        from pypy.tool.lsprofcalltree import KCacheGrind
-        d = {'func':func}
-        prof = Profile()
-        prof.runctx("res = func()", globals(), d)
-        KCacheGrind(prof).output(open(goal + ".out", "w"))
-        return d['res']
-
-    def _do(self, goal, func, *args, **kwds):
-        title = func.task_title
-        if goal in self.done:
-            self.log.info("already done: %s" % title)
-            return
-        else:
-            self.log.info("%s..." % title)
-        self.timer.start_event(goal)
-        try:
-            instrument = False
-            try:
-                if goal in PROFILE:
-                    res = self._profile(goal, func)
-                else:
-                    res = func()
-            except Instrument:
-                instrument = True
-            if not func.task_idempotent:
-                self.done[goal] = True
-            if instrument:
-                self.proceed('compile')
-                assert False, 'we should not get here'
-        finally:
-            self.timer.end_event(goal)
-        return res
-
-    def task_annotate(self):
-        # includes annotation and annotatation simplifications
-        translator = self.translator
-        policy = self.policy
-        self.log.info('with policy: %s.%s' %
-                      (policy.__class__.__module__, policy.__class__.__name__))
-        
-        annmodel.DEBUG = self.config.translation.debug
-        annotator = translator.buildannotator(policy=policy)
-
-        if self.entry_point:
-            s = annotator.build_types(self.entry_point, self.inputtypes)
-
-            self.sanity_check_annotation()
-            if self.standalone and s.knowntype != int:
-                raise Exception("stand-alone program entry point must return an "
-                                "int (and not, e.g., None or always raise an "
-                                "exception).")
-            annotator.simplify()
-            return s
-        else:
-            assert self.libdef is not None
-            for func, inputtypes in self.libdef.functions:
-                annotator.build_types(func, inputtypes)
-            self.sanity_check_annotation()
-            annotator.simplify()
-    #
-    task_annotate = taskdef(task_annotate, [], "Annotating&simplifying")
-
-
-    def sanity_check_annotation(self):
-        translator = self.translator
-        irreg = query.qoutput(query.check_exceptblocks_qgen(translator))
-        if irreg:
-            self.log.info("Some exceptblocks seem insane")
-
-        lost = query.qoutput(query.check_methods_qgen(translator))
-        assert not lost, "lost methods, something gone wrong with the annotation of method defs"
-
-        so = query.qoutput(query.polluted_qgen(translator))
-        tot = len(translator.graphs)
-        percent = int(tot and (100.0*so / tot) or 0)
-        # if there are a few SomeObjects even if the policy doesn't allow
-        # them, it means that they were put there in a controlled way
-        # and then it's not a warning.
-        if not translator.annotator.policy.allow_someobjects:
-            pr = self.log.info
-        elif percent == 0:
-            pr = self.log.info
-        else:
-            pr = log.WARNING
-        pr("-- someobjectness %2d%% (%d of %d functions polluted by SomeObjects)" % (percent, so, tot))
-
-
-
-    def task_rtype_lltype(self):
-        rtyper = self.translator.buildrtyper(type_system='lltype')
-        insist = not self.config.translation.insist
-        rtyper.specialize(dont_simplify_again=True,
-                          crash_on_first_typeerror=insist)
-    #
-    task_rtype_lltype = taskdef(task_rtype_lltype, ['annotate'], "RTyping")
-    RTYPE = 'rtype_lltype'
-
-    def task_rtype_ootype(self):
-        # Maybe type_system should simply be an option used in task_rtype
-        insist = not self.config.translation.insist
-        rtyper = self.translator.buildrtyper(type_system="ootype")
-        rtyper.specialize(dont_simplify_again=True,
-                          crash_on_first_typeerror=insist)
-    #
-    task_rtype_ootype = taskdef(task_rtype_ootype, ['annotate'], "ootyping")
-    OOTYPE = 'rtype_ootype'
-
-    def task_prehannotatebackendopt_lltype(self):
-        from pypy.translator.backendopt.all import backend_optimizations
-        backend_optimizations(self.translator,
-                              inline_threshold=0,
-                              merge_if_blocks=True,
-                              constfold=True,
-                              raisingop2direct_call=False,
-                              remove_asserts=True)
-    #
-    task_prehannotatebackendopt_lltype = taskdef(
-        task_prehannotatebackendopt_lltype,
-        [RTYPE],
-        "Backendopt before Hint-annotate")
-
-    def task_hintannotate_lltype(self):
-        from pypy.jit.hintannotator.annotator import HintAnnotator
-        from pypy.jit.hintannotator.model import OriginFlags
-        from pypy.jit.hintannotator.model import SomeLLAbstractConstant
-
-        get_portal = self.extra['portal']
-        PORTAL, POLICY = get_portal(self)
-        t = self.translator
-        self.portal_graph = graphof(t, PORTAL)
-
-        hannotator = HintAnnotator(base_translator=t, policy=POLICY)
-        self.hint_translator = hannotator.translator
-        hs = hannotator.build_types(self.portal_graph,
-                                    [SomeLLAbstractConstant(v.concretetype,
-                                                            {OriginFlags(): True})
-                                     for v in self.portal_graph.getargs()])
-        count = hannotator.bookkeeper.nonstuboriggraphcount
-        stubcount = hannotator.bookkeeper.stuboriggraphcount
-        self.log.info("The hint-annotator saw %d graphs"
-                      " (and made stubs for %d graphs)." % (count, stubcount))
-        n = len(list(hannotator.translator.graphs[0].iterblocks()))
-        self.log.info("portal has %d blocks" % n)
-        self.hannotator = hannotator
-    #
-    task_hintannotate_lltype = taskdef(task_hintannotate_lltype,
-                                       ['prehannotatebackendopt_lltype'],
-                                       "Hint-annotate")
-
-    def task_timeshift_lltype(self):
-        from pypy.jit.timeshifter.hrtyper import HintRTyper
-        from pypy.jit.codegen import detect_cpu
-        cpu = detect_cpu.autodetect()
-        if cpu == 'i386':
-            from pypy.jit.codegen.i386.rgenop import RI386GenOp as RGenOp
-            RGenOp.MC_SIZE = 32 * 1024 * 1024
-        elif cpu == 'ppc':
-            from pypy.jit.codegen.ppc.rgenop import RPPCGenOp as RGenOp
-            RGenOp.MC_SIZE = 32 * 1024 * 1024
-        else:
-            raise Exception('Unsuported cpu %r'%cpu)
-
-        del self.hint_translator
-        ha = self.hannotator
-        t = self.translator
-        # make the timeshifted graphs
-        hrtyper = HintRTyper(ha, t.rtyper, RGenOp)
-        hrtyper.specialize(origportalgraph=self.portal_graph, view=False)
-    #
-    task_timeshift_lltype = taskdef(task_timeshift_lltype,
-                             ["hintannotate_lltype"],
-                             "Timeshift")
-
-    def task_backendopt_lltype(self):
-        from pypy.translator.backendopt.all import backend_optimizations
-        backend_optimizations(self.translator)
-    #
-    task_backendopt_lltype = taskdef(task_backendopt_lltype,
-                                     [RTYPE,
-                                      '??timeshift_lltype'],
-                                     "lltype back-end optimisations")
-    BACKENDOPT = 'backendopt_lltype'
-
-    def task_backendopt_ootype(self):
-        from pypy.translator.backendopt.all import backend_optimizations
-        backend_optimizations(self.translator)
-    #
-    task_backendopt_ootype = taskdef(task_backendopt_ootype, 
-                                        [OOTYPE], "ootype back-end optimisations")
-    OOBACKENDOPT = 'backendopt_ootype'
-
-
-    def task_stackcheckinsertion_lltype(self):
-        from pypy.translator.transform import insert_ll_stackcheck
-        count = insert_ll_stackcheck(self.translator)
-        self.log.info("inserted %d stack checks." % (count,))
-        
-    task_stackcheckinsertion_lltype = taskdef(
-        task_stackcheckinsertion_lltype,
-        ['?'+BACKENDOPT, RTYPE, 'annotate'],
-        "inserting stack checks")
-    STACKCHECKINSERTION = 'stackcheckinsertion_lltype'
-
-    def possibly_check_for_boehm(self):
-        if self.config.translation.gc == "boehm":
-            from pypy.translator.tool.cbuild import check_boehm_presence
-            from pypy.translator.tool.cbuild import CompilationError
-            try:
-                check_boehm_presence(noerr=False)
-            except CompilationError, e:
-                i = 'Boehm GC not installed.  Try e.g. "translate.py --gc=hybrid"'
-                raise CompilationError('%s\n--------------------\n%s' % (e, i))
-
-    def task_database_c(self):
-        translator = self.translator
-        if translator.annotator is not None:
-            translator.frozen = True
-
-        standalone = self.standalone
-
-        if standalone:
-            from pypy.translator.c.genc import CStandaloneBuilder as CBuilder
-        else:
-            from pypy.translator.c.genc import CExtModuleBuilder as CBuilder
-        cbuilder = CBuilder(self.translator, self.entry_point,
-                            config=self.config)
-        cbuilder.stackless = self.config.translation.stackless
-        if not standalone:     # xxx more messy
-            cbuilder.modulename = self.extmod_name
-        database = cbuilder.build_database()
-        self.log.info("database for generating C source was created")
-        self.cbuilder = cbuilder
-        self.database = database
-    #
-    task_database_c = taskdef(task_database_c,
-                            [STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE, '?annotate'], 
-                            "Creating database for generating c source",
-                            earlycheck = possibly_check_for_boehm)
-    
-    def task_source_c(self):  # xxx messy
-        translator = self.translator
-        cbuilder = self.cbuilder
-        database = self.database
-        c_source_filename = cbuilder.generate_source(database)
-        self.log.info("written: %s" % (c_source_filename,))
-        self.c_source_filename = str(c_source_filename)
-    #
-    task_source_c = taskdef(task_source_c, ['database_c'], "Generating c source")
-
-    def task_compile_c(self): # xxx messy
-        cbuilder = self.cbuilder
-        cbuilder.compile()
-        
-        if self.standalone:
-            self.c_entryp = cbuilder.executable_name
-            self.create_exe()
-        else:
-            self.c_entryp = cbuilder.get_entry_point()
-    #
-    task_compile_c = taskdef(task_compile_c, ['source_c'], "Compiling c source")
-    
-    
-    def task_run_c(self):
-        self.backend_run('c')
-    #
-    task_run_c = taskdef(task_run_c, ['compile_c'], 
-                         "Running compiled c source",
-                         idemp=True)
-
-    def task_llinterpret_lltype(self):
-        from pypy.rpython.llinterp import LLInterpreter
-        py.log.setconsumer("llinterp operation", None)
-        
-        translator = self.translator
-        interp = LLInterpreter(translator.rtyper)
-        bk = translator.annotator.bookkeeper
-        graph = bk.getdesc(self.entry_point).getuniquegraph()
-        v = interp.eval_graph(graph,
-                              self.extra.get('get_llinterp_args',
-                                             lambda: [])())
-
-        log.llinterpret.event("result -> %s" % v)
-    #
-    task_llinterpret_lltype = taskdef(task_llinterpret_lltype, 
-                                      [STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE], 
-                                      "LLInterpreting")
-
-    def task_source_llvm(self):
-        translator = self.translator
-        if translator.annotator is None:
-            raise ValueError, "llvm requires annotation."
-
-        from pypy.translator.llvm import genllvm
-
-        self.llvmgen = genllvm.GenLLVM(translator, self.standalone)
-
-        llvm_filename = self.llvmgen.gen_source(self.entry_point)
-        self.log.info("written: %s" % (llvm_filename,))
-    #
-    task_source_llvm = taskdef(task_source_llvm, 
-                               [STACKCHECKINSERTION, BACKENDOPT, RTYPE], 
-                               "Generating llvm source")
-
-    def task_compile_llvm(self):
-        gen = self.llvmgen
-        if self.standalone:
-            exe_name = (self.exe_name or 'testing') % self.get_info()
-            self.c_entryp = gen.compile_standalone(exe_name)
-            self.create_exe()
-        else:
-            self.c_module, self.c_entryp = gen.compile_module()
-    #
-    task_compile_llvm = taskdef(task_compile_llvm, 
-                                ['source_llvm'], 
-                                "Compiling llvm source")
-
-    def task_run_llvm(self):
-        self.backend_run('llvm')
-    #
-    task_run_llvm = taskdef(task_run_llvm, ['compile_llvm'], 
-                            "Running compiled llvm source",
-                            idemp=True)
-
-    def task_source_js(self):
-        from pypy.translator.js.js import JS
-        self.gen = JS(self.translator, functions=[self.entry_point],
-                      stackless=self.config.translation.stackless)
-        filename = self.gen.write_source()
-        self.log.info("Wrote %s" % (filename,))
-    task_source_js = taskdef(task_source_js, 
-                        [OOTYPE],
-                        'Generating Javascript source')
-
-    def task_compile_js(self):
-        pass
-    task_compile_js = taskdef(task_compile_js, ['source_js'],
-                              'Skipping Javascript compilation')
-
-    def task_run_js(self):
-        pass
-    task_run_js = taskdef(task_run_js, ['compile_js'],
-                              'Please manually run the generated code')
-
-    def task_source_cli(self):
-        from pypy.translator.cli.gencli import GenCli
-        from pypy.translator.cli.entrypoint import get_entrypoint
-
-        if self.entry_point is not None: # executable mode
-            entry_point_graph = self.translator.graphs[0]
-            entry_point = get_entrypoint(entry_point_graph)
-        else:
-            # library mode
-            assert self.libdef is not None
-            bk = self.translator.annotator.bookkeeper
-            entry_point = self.libdef.get_entrypoint(bk)
-
-        self.gen = GenCli(udir, self.translator, entry_point, config=self.config)
-        filename = self.gen.generate_source()
-        self.log.info("Wrote %s" % (filename,))
-    task_source_cli = taskdef(task_source_cli, ["?" + OOBACKENDOPT, OOTYPE],
-                             'Generating CLI source')
-
-    def task_compile_cli(self):
-        from pypy.translator.oosupport.support import unpatch_os
-        from pypy.translator.cli.test.runtest import CliFunctionWrapper
-        filename = self.gen.build_exe()
-        self.c_entryp = CliFunctionWrapper(filename)
-        # restore original os values
-        if hasattr(self, 'old_cli_defs'):
-            unpatch_os(self.old_cli_defs)
-        
-        self.log.info("Compiled %s" % filename)
-        if self.standalone and self.exe_name:
-            self.copy_cli_exe()
-    task_compile_cli = taskdef(task_compile_cli, ['source_cli'],
-                              'Compiling CLI source')
-
-    def task_run_cli(self):
-        pass
-    task_run_cli = taskdef(task_run_cli, ['compile_cli'],
-                              'XXX')
-    
-    def task_source_jvm(self):
-        from pypy.translator.jvm.genjvm import GenJvm
-        from pypy.translator.jvm.node import EntryPoint
-
-        entry_point_graph = self.translator.graphs[0]
-        is_func = not self.standalone
-        entry_point = EntryPoint(entry_point_graph, is_func, is_func)
-        self.gen = GenJvm(udir, self.translator, entry_point)
-        self.jvmsource = self.gen.generate_source()
-        self.log.info("Wrote JVM code")
-    task_source_jvm = taskdef(task_source_jvm, ["?" + OOBACKENDOPT, OOTYPE],
-                             'Generating JVM source')
-
-    def task_compile_jvm(self):
-        from pypy.translator.oosupport.support import unpatch_os
-        from pypy.translator.jvm.test.runtest import JvmGeneratedSourceWrapper
-        self.jvmsource.compile()
-        self.c_entryp = JvmGeneratedSourceWrapper(self.jvmsource)
-        # restore original os values
-        if hasattr(self, 'old_cli_defs'):
-            unpatch_os(self.old_cli_defs)
-        self.log.info("Compiled JVM source")
-        if self.standalone and self.exe_name:
-            self.copy_jvm_jar()
-    task_compile_jvm = taskdef(task_compile_jvm, ['source_jvm'],
-                              'Compiling JVM source')
-
-    def task_run_jvm(self):
-        pass
-    task_run_jvm = taskdef(task_run_jvm, ['compile_jvm'],
-                           'XXX')
-
-    def proceed(self, goals):
-        if not goals:
-            if self.default_goal:
-                goals = [self.default_goal]
-            else:
-                self.log.info("nothing to do")
-                return
-        elif isinstance(goals, str):
-            goals = [goals]
-        goals.extend(self.extra_goals)
-        goals = self.backend_select_goals(goals)
-        return self._execute(goals, task_skip = self._maybe_skip())
-
-    def from_targetspec(targetspec_dic, config=None, args=None,
-                        empty_translator=None,
-                        disable=[],
-                        default_goal=None):
-        if args is None:
-            args = []
-
-        driver = TranslationDriver(config=config, default_goal=default_goal,
-                                   disable=disable)
-        # patch some attributes of the os module to make sure they
-        # have the same value on every platform.
-        backend, ts = driver.get_backend_and_type_system()
-        if backend in ('cli', 'jvm'):
-            from pypy.translator.oosupport.support import patch_os
-            driver.old_cli_defs = patch_os()
-        
-        target = targetspec_dic['target']
-        spec = target(driver, args)
-
-        try:
-            entry_point, inputtypes, policy = spec
-        except ValueError:
-            entry_point, inputtypes = spec
-            policy = None
-
-        driver.setup(entry_point, inputtypes, 
-                     policy=policy, 
-                     extra=targetspec_dic,
-                     empty_translator=empty_translator)
-
-        return driver
-
-    from_targetspec = staticmethod(from_targetspec)
-
-    def prereq_checkpt_rtype(self):
-        assert 'pypy.rpython.rmodel' not in sys.modules, (
-            "cannot fork because the rtyper has already been imported")
-    prereq_checkpt_rtype_lltype = prereq_checkpt_rtype
-    prereq_checkpt_rtype_ootype = prereq_checkpt_rtype    

Copied: branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/driver.py (from rev 4510, trunk/scipy/sandbox/mkufunc/mkufunc/driver.py)

Deleted: branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/interactive.py
===================================================================
--- trunk/scipy/sandbox/mkufunc/mkufunc/interactive.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/interactive.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,115 +0,0 @@
-import driver
-
-from pypy.translator.translator import TranslationContext
-
-
-DEFAULTS = {
-  'translation.backend': None,
-  'translation.type_system': None,
-  'translation.verbose': False,
-}
-
-class Translation(object):
-
-    def __init__(self, entry_point, argtypes=None, **kwds):
-        self.driver = driver.TranslationDriver(overrides=DEFAULTS)
-        self.config = self.driver.config
-
-        self.entry_point = entry_point
-        self.context = TranslationContext(config=self.config)
-        
-        # hook into driver events
-        driver_own_event = self.driver._event
-        def _event(kind, goal, func):
-            self.driver_event(kind, goal, func)
-            driver_own_event(kind, goal, func)
-        self.driver._event = _event
-        self.driver_setup = False
-        
-        self.update_options(argtypes, kwds)
-        # for t.view() to work just after construction
-        graph = self.context.buildflowgraph(entry_point)
-        self.context._prebuilt_graphs[entry_point] = graph
-
-    def driver_event(self, kind, goal, func):
-        if kind == 'pre':
-             self.ensure_setup()
-            
-    def ensure_setup(self, argtypes=None, policy=None, standalone=False):
-        if not self.driver_setup:
-            if standalone:
-                assert argtypes is None
-            else:
-                if argtypes is None:
-                    argtypes = []
-            self.driver.setup(self.entry_point, argtypes, policy,
-                              empty_translator=self.context)
-            self.ann_argtypes = argtypes
-            self.ann_policy = policy
-            self.driver_setup = True
-        else:
-            # check consistency
-            if standalone:
-                assert argtypes is None
-                assert self.ann_argtypes is None
-            elif argtypes is not None and argtypes != self.ann_argtypes:
-                raise Exception("inconsistent argtype supplied")
-            if policy is not None and policy != self.ann_policy:
-                raise Exception("inconsistent annotation polish supplied")
-
-    def update_options(self, argtypes, kwds):
-        if argtypes or kwds.get('policy') or kwds.get('standalone'):
-            self.ensure_setup(argtypes, kwds.get('policy'),
-                                        kwds.get('standalone'))
-        kwds.pop('policy', None)
-        kwds.pop('standalone', None)
-        self.config.translation.set(**kwds)
-
-    def ensure_opt(self, name, value=None, fallback=None):
-        if value is not None:
-            self.update_options(None, {name: value})
-            return value
-        val = getattr(self.config.translation, name, None)
-        if fallback is not None and val is None:
-            self.update_options(None, {name: fallback})
-            return fallback
-        if val is not None:
-            return val
-        raise Exception(
-              "the %r option should have been specified at this point" %name)
-
-    def ensure_type_system(self, type_system=None):
-        if self.config.translation.backend is not None:
-            return self.ensure_opt('type_system')
-        return self.ensure_opt('type_system', type_system, 'lltype')
-        
-    def ensure_backend(self, backend=None):
-        backend = self.ensure_opt('backend', backend)
-        self.ensure_type_system()
-        return backend
-
-    # backend independent
-
-    def annotate(self, argtypes=None, **kwds):
-        self.update_options(argtypes, kwds)
-        return self.driver.annotate()
-
-    # type system dependent
-
-    def rtype(self, argtypes=None, **kwds):
-        self.update_options(argtypes, kwds)
-        ts = self.ensure_type_system()
-        return getattr(self.driver, 'rtype_'+ts)()        
-
-    # backend depedent
-
-    def source(self, argtypes=None, **kwds):
-        self.update_options(argtypes, kwds)
-        backend = self.ensure_backend()
-        self.driver.source_c()
-       
-    def compile(self, argtypes=None, **kwds):
-        self.update_options(argtypes, kwds)
-        backend = self.ensure_backend()
-        self.driver.compile_c()
-        return self.driver.c_entryp

Copied: branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/interactive.py (from rev 4510, trunk/scipy/sandbox/mkufunc/mkufunc/interactive.py)

Deleted: branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/pypy.h
===================================================================
--- trunk/scipy/sandbox/mkufunc/mkufunc/pypy.h	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/pypy.h	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,381 +0,0 @@
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <math.h>
-
-/* ================================================== g_prerequisite.h === */
-
-typedef unsigned char bool_t;
-
-/* ================================================== exception.h ======== */
-
-#define RPY_DEBUG_RETURN()        /* nothing */
-
-
-/* ================================================== int.h ============== */
-
-/*** unary operations ***/
-
-#define OP_INT_IS_TRUE(x,r)   OP_INT_NE(x,0,r)
-
-#define OP_INT_INVERT(x,r)    r = ~((x))
-
-#define OP_INT_NEG(x,r)    r = -(x)
-
-#define OP_INT_NEG_OVF(x,r) \
-    if ((x) == LONG_MIN) FAIL_OVF("integer negate"); \
-	OP_INT_NEG(x,r)
-#define OP_LLONG_NEG_OVF(x,r) \
-    if ((x) == LLONG_MIN) FAIL_OVF("integer negate"); \
-	OP_LLONG_NEG(x,r)
-
-#define OP_INT_ABS(x,r)    r = (x) >= 0 ? x : -(x)
-
-#define OP_INT_ABS_OVF(x,r) \
-    if ((x) == LONG_MIN) FAIL_OVF("integer absolute"); \
-	OP_INT_ABS(x,r)
-#define OP_LLONG_ABS_OVF(x,r) \
-    if ((x) == LLONG_MIN) FAIL_OVF("integer absolute"); \
-	OP_LLONG_ABS(x,r)
-
-/***  binary operations ***/
-
-#define OP_INT_EQ(x,y,r)	  r = ((x) == (y))
-#define OP_INT_NE(x,y,r)	  r = ((x) != (y))
-#define OP_INT_LE(x,y,r)	  r = ((x) <= (y))
-#define OP_INT_GT(x,y,r)	  r = ((x) >  (y))
-#define OP_INT_LT(x,y,r)	  r = ((x) <  (y))
-#define OP_INT_GE(x,y,r)	  r = ((x) >= (y))
-
-/* addition, subtraction */
-
-#define OP_INT_ADD(x,y,r)     r = (x) + (y)
-
-#define OP_INT_ADD_OVF(x,y,r) \
-	OP_INT_ADD(x,y,r); \
-	if ((r^(x)) >= 0 || (r^(y)) >= 0); \
-	else FAIL_OVF("integer addition")
-
-#define OP_INT_ADD_NONNEG_OVF(x,y,r)  /* y can be assumed >= 0 */ \
-    OP_INT_ADD(x,y,r); \
-    if (r >= (x)); \
-    else FAIL_OVF("integer addition")
-/* XXX can a C compiler be too clever and think it can "prove" that
- * r >= x always hold above? */
-
-#define OP_INT_SUB(x,y,r)     r = (x) - (y)
-
-#define OP_INT_SUB_OVF(x,y,r) \
-	OP_INT_SUB(x,y,r); \
-	if ((r^(x)) >= 0 || (r^~(y)) >= 0); \
-	else FAIL_OVF("integer subtraction")
-
-#define OP_INT_MUL(x,y,r)     r = (x) * (y)
-
-#if defined(HAVE_LONG_LONG) && SIZE_OF_LONG_LONG < SIZE_OF_LONG
-#  define OP_INT_MUL_OVF_LL      1
-#lse
-#  define OP_INT_MUL_OVF_LL      0
-#endif
-
-#if !OP_INT_MUL_OVF_LL
-
-#define OP_INT_MUL_OVF(x,y,r) \
-	if (op_int_mul_ovf(x,y,&r)); \
-	else FAIL_OVF("integer multiplication")
-
-#else
-
-#define OP_INT_MUL_OVF(x,y,r) \
-	{ \
-		PY_LONG_LONG lr = (PY_LONG_LONG)(x) * (PY_LONG_LONG)(y); \
-		r = (long)lr; \
-		if ((PY_LONG_LONG)r == lr); \
-		else FAIL_OVF("integer multiplication"); \
-	}
-#endif
-
-/* shifting */
-
-/* NB. shifting has same limitations as C: the shift count must be
-       >= 0 and < LONG_BITS. */
-#define OP_INT_RSHIFT(x,y,r)    r = Py_ARITHMETIC_RIGHT_SHIFT(long, x, y)
-#define OP_UINT_RSHIFT(x,y,r)   r = (x) >> (y)
-#define OP_LLONG_RSHIFT(x,y,r)  r = Py_ARITHMETIC_RIGHT_SHIFT(PY_LONG_LONG,x,y)
-#define OP_ULLONG_RSHIFT(x,y,r) r = (x) >> (y)
-
-#define OP_INT_LSHIFT(x,y,r)    r = (x) << (y)
-#define OP_UINT_LSHIFT(x,y,r)   r = (x) << (y)
-#define OP_LLONG_LSHIFT(x,y,r)  r = (x) << (y)
-#define OP_ULLONG_LSHIFT(x,y,r) r = (x) << (y)
-
-#define OP_INT_LSHIFT_OVF(x,y,r) \
-	OP_INT_LSHIFT(x,y,r); \
-	if ((x) != Py_ARITHMETIC_RIGHT_SHIFT(long, r, (y))) \
-		FAIL_OVF("x<<y losing bits or changing sign")
-
-/* the safe value-checking version of the above macros */
-
-#define OP_INT_RSHIFT_VAL(x,y,r) \
-	if ((y) >= 0) { OP_INT_RSHIFT(x,y,r); } \
-	else FAIL_VAL("negative shift count")
-#define OP_LLONG_RSHIFT_VAL(x,y,r) \
-	if ((y) >= 0) { OP_LLONG_RSHIFT(x,y,r); } \
-	else FAIL_VAL("negative shift count")
-
-#define OP_INT_LSHIFT_VAL(x,y,r) \
-	if ((y) >= 0) { OP_INT_LSHIFT(x,y,r); } \
-	else FAIL_VAL("negative shift count")
-#define OP_LLONG_LSHIFT_VAL(x,y,r) \
-	if ((y) >= 0) { OP_LLONG_LSHIFT(x,y,r); } \
-	else FAIL_VAL("negative shift count")
-
-#define OP_INT_LSHIFT_OVF_VAL(x,y,r) \
-	if ((y) >= 0) { OP_INT_LSHIFT_OVF(x,y,r); } \
-	else FAIL_VAL("negative shift count")
-
-/* pff */
-#define OP_UINT_LSHIFT_VAL(x,y,r) \
-	if ((y) >= 0) { OP_UINT_LSHIFT(x,y,r); } \
-	else FAIL_VAL("negative shift count")
-#define OP_ULLONG_LSHIFT_VAL(x,y,r) \
-	if ((y) >= 0) { OP_ULLONG_LSHIFT(x,y,r); } \
-	else FAIL_VAL("negative shift count")
-
-#define OP_UINT_RSHIFT_VAL(x,y,r) \
-	if ((y) >= 0) { OP_UINT_RSHIFT(x,y,r); } \
-	else FAIL_VAL("negative shift count")
-#define OP_ULLONG_RSHIFT_VAL(x,y,r) \
-	if ((y) >= 0) { OP_ULLONG_RSHIFT(x,y,r); } \
-	else FAIL_VAL("negative shift count")
-
-
-/* floor division */
-
-#define OP_INT_FLOORDIV(x,y,r)    r = (x) / (y)
-#define OP_UINT_FLOORDIV(x,y,r)   r = (x) / (y)
-#define OP_LLONG_FLOORDIV(x,y,r)  r = (x) / (y)
-#define OP_ULLONG_FLOORDIV(x,y,r) r = (x) / (y)
-
-#define OP_INT_FLOORDIV_OVF(x,y,r) \
-	if ((y) == -1 && (x) == LONG_MIN) \
-            { FAIL_OVF("integer division"); } \
-        else OP_INT_FLOORDIV(x,y,r)
-
-#define OP_INT_FLOORDIV_ZER(x,y,r) \
-	if ((y)) { OP_INT_FLOORDIV(x,y,r); } \
-	else FAIL_ZER("integer division")
-#define OP_UINT_FLOORDIV_ZER(x,y,r) \
-	if ((y)) { OP_UINT_FLOORDIV(x,y,r); } \
-	else FAIL_ZER("unsigned integer division")
-#define OP_LLONG_FLOORDIV_ZER(x,y,r) \
-	if ((y)) { OP_LLONG_FLOORDIV(x,y,r); } \
-	else FAIL_ZER("integer division")
-#define OP_ULLONG_FLOORDIV_ZER(x,y,r) \
-	if ((y)) { OP_ULLONG_FLOORDIV(x,y,r); } \
-	else FAIL_ZER("unsigned integer division")
-
-#define OP_INT_FLOORDIV_OVF_ZER(x,y,r) \
-	if ((y)) { OP_INT_FLOORDIV_OVF(x,y,r); } \
-	else FAIL_ZER("integer division")
-
-/* modulus */
-
-#define OP_INT_MOD(x,y,r)     r = (x) % (y)
-#define OP_UINT_MOD(x,y,r)    r = (x) % (y)
-#define OP_LLONG_MOD(x,y,r)   r = (x) % (y)
-#define OP_ULLONG_MOD(x,y,r)  r = (x) % (y)
-
-#define OP_INT_MOD_OVF(x,y,r) \
-	if ((y) == -1 && (x) == LONG_MIN) \
-            { FAIL_OVF("integer modulo"); }\
-        else OP_INT_MOD(x,y,r)
-
-#define OP_INT_MOD_ZER(x,y,r) \
-	if ((y)) { OP_INT_MOD(x,y,r); } \
-	else FAIL_ZER("integer modulo")
-#define OP_UINT_MOD_ZER(x,y,r) \
-	if ((y)) { OP_UINT_MOD(x,y,r); } \
-	else FAIL_ZER("unsigned integer modulo")
-#define OP_LLONG_MOD_ZER(x,y,r) \
-	if ((y)) { OP_LLONG_MOD(x,y,r); } \
-	else FAIL_ZER("integer modulo")
-#define OP_ULLONG_MOD_ZER(x,y,r) \
-	if ((y)) { OP_ULLONG_MOD(x,y,r); } \
-	else FAIL_ZER("integer modulo")
-
-#define OP_INT_MOD_OVF_ZER(x,y,r) \
-	if ((y)) { OP_INT_MOD_OVF(x,y,r); } \
-	else FAIL_ZER("integer modulo")
-
-/* bit operations */
-
-#define OP_INT_AND(x,y,r)     r = (x) & (y)
-#define OP_INT_OR( x,y,r)     r = (x) | (y)
-#define OP_INT_XOR(x,y,r)     r = (x) ^ (y)
-
-/*** conversions ***/
-
-#define OP_CAST_BOOL_TO_INT(x,r)    r = (long)(x)
-#define OP_CAST_BOOL_TO_UINT(x,r)   r = (unsigned long)(x)
-#define OP_CAST_UINT_TO_INT(x,r)    r = (long)(x)
-#define OP_CAST_INT_TO_UINT(x,r)    r = (unsigned long)(x)
-#define OP_CAST_INT_TO_LONGLONG(x,r) r = (long long)(x)
-#define OP_CAST_CHAR_TO_INT(x,r)    r = (long)((unsigned char)(x))
-#define OP_CAST_INT_TO_CHAR(x,r)    r = (char)(x)
-#define OP_CAST_PTR_TO_INT(x,r)     r = (long)(x)    /* XXX */
-
-#define OP_TRUNCATE_LONGLONG_TO_INT(x,r) r = (long)(x)
-
-#define OP_CAST_UNICHAR_TO_INT(x,r)    r = (long)((unsigned long)(x)) /*?*/
-#define OP_CAST_INT_TO_UNICHAR(x,r)    r = (unsigned int)(x)
-
-/* bool operations */
-
-#define OP_BOOL_NOT(x, r) r = !(x)
-
-/* _________________ certain implementations __________________ */
-
-#if !OP_INT_MUL_OVF_LL
-/* adjusted from intobject.c, Python 2.3.3 */
-
-/* prototypes */
-
-int op_int_mul_ovf(long a, long b, long *longprod);
-
-/* implementations */
-
-#ifndef PYPY_NOT_MAIN_FILE
-
-int
-op_int_mul_ovf(long a, long b, long *longprod)
-{
-	double doubled_longprod;	/* (double)longprod */
-	double doubleprod;		/* (double)a * (double)b */
-
-	*longprod = a * b;
-	doubleprod = (double)a * (double)b;
-	doubled_longprod = (double)*longprod;
-
-	/* Fast path for normal case:  small multiplicands, and no info
-	   is lost in either method. */
-	if (doubled_longprod == doubleprod)
-		return 1;
-
-	/* Somebody somewhere lost info.  Close enough, or way off?  Note
-	   that a != 0 and b != 0 (else doubled_longprod == doubleprod == 0).
-	   The difference either is or isn't significant compared to the
-	   true value (of which doubleprod is a good approximation).
-	*/
-	{
-		const double diff = doubled_longprod - doubleprod;
-		const double absdiff = diff >= 0.0 ? diff : -diff;
-		const double absprod = doubleprod >= 0.0 ? doubleprod :
-							  -doubleprod;
-		/* absdiff/absprod <= 1/32 iff
-		   32 * absdiff <= absprod -- 5 good bits is "close enough" */
-		if (32.0 * absdiff <= absprod)
-			return 1;
-		return 0;
-	}
-}
-
-#endif /* PYPY_NOT_MAIN_FILE */
-
-#endif /* !OP_INT_MUL_OVF_LL */
-
-/* implementations */
-
-#define OP_UINT_IS_TRUE OP_INT_IS_TRUE
-#define OP_UINT_INVERT OP_INT_INVERT
-#define OP_UINT_ADD OP_INT_ADD
-#define OP_UINT_SUB OP_INT_SUB
-#define OP_UINT_MUL OP_INT_MUL
-#define OP_UINT_LT OP_INT_LT
-#define OP_UINT_LE OP_INT_LE
-#define OP_UINT_EQ OP_INT_EQ
-#define OP_UINT_NE OP_INT_NE
-#define OP_UINT_GT OP_INT_GT
-#define OP_UINT_GE OP_INT_GE
-#define OP_UINT_AND OP_INT_AND
-#define OP_UINT_OR OP_INT_OR
-#define OP_UINT_XOR OP_INT_XOR
-
-#define OP_LLONG_IS_TRUE OP_INT_IS_TRUE
-#define OP_LLONG_NEG     OP_INT_NEG
-#define OP_LLONG_ABS     OP_INT_ABS
-#define OP_LLONG_INVERT  OP_INT_INVERT
-
-#define OP_LLONG_ADD OP_INT_ADD
-#define OP_LLONG_SUB OP_INT_SUB
-#define OP_LLONG_MUL OP_INT_MUL
-#define OP_LLONG_LT  OP_INT_LT
-#define OP_LLONG_LE  OP_INT_LE
-#define OP_LLONG_EQ  OP_INT_EQ
-#define OP_LLONG_NE  OP_INT_NE
-#define OP_LLONG_GT  OP_INT_GT
-#define OP_LLONG_GE  OP_INT_GE
-#define OP_LLONG_AND    OP_INT_AND
-#define OP_LLONG_OR     OP_INT_OR
-#define OP_LLONG_XOR    OP_INT_XOR
-
-#define OP_ULLONG_IS_TRUE OP_LLONG_IS_TRUE
-#define OP_ULLONG_INVERT  OP_LLONG_INVERT
-#define OP_ULLONG_ADD OP_LLONG_ADD
-#define OP_ULLONG_SUB OP_LLONG_SUB
-#define OP_ULLONG_MUL OP_LLONG_MUL
-#define OP_ULLONG_LT OP_LLONG_LT
-#define OP_ULLONG_LE OP_LLONG_LE
-#define OP_ULLONG_EQ OP_LLONG_EQ
-#define OP_ULLONG_NE OP_LLONG_NE
-#define OP_ULLONG_GT OP_LLONG_GT
-#define OP_ULLONG_GE OP_LLONG_GE
-#define OP_ULLONG_AND OP_LLONG_AND
-#define OP_ULLONG_OR OP_LLONG_OR
-#define OP_ULLONG_XOR OP_LLONG_XOR
-
-/* ================================================== float.h ============ */
-
-/*** unary operations ***/
-
-#define OP_FLOAT_IS_TRUE(x,r)   OP_FLOAT_NE(x,0.0,r)
-#define OP_FLOAT_NEG(x,r)       r = -x
-#define OP_FLOAT_ABS(x,r)       r = fabs(x)
-
-/***  binary operations ***/
-
-#define OP_FLOAT_EQ(x,y,r)	  r = (x == y)
-#define OP_FLOAT_NE(x,y,r)	  r = (x != y)
-#define OP_FLOAT_LE(x,y,r)	  r = (x <= y)
-#define OP_FLOAT_GT(x,y,r)	  r = (x >  y)
-#define OP_FLOAT_LT(x,y,r)	  r = (x <  y)
-#define OP_FLOAT_GE(x,y,r)	  r = (x >= y)
-
-#define OP_FLOAT_CMP(x,y,r) \
-	r = ((x > y) - (x < y))
-
-/* addition, subtraction */
-
-#define OP_FLOAT_ADD(x,y,r)     r = x + y
-#define OP_FLOAT_SUB(x,y,r)     r = x - y
-#define OP_FLOAT_MUL(x,y,r)     r = x * y
-#define OP_FLOAT_TRUEDIV(x,y,r) r = x / y
-#define OP_FLOAT_POW(x,y,r)     r = pow(x, y) 
-
-/*** conversions ***/
-
-#define OP_CAST_FLOAT_TO_INT(x,r)       r = (long)(x)
-#define OP_CAST_FLOAT_TO_UINT(x,r)      r = (unsigned long)(x)
-#define OP_CAST_INT_TO_FLOAT(x,r)       r = (double)(x)
-#define OP_CAST_UINT_TO_FLOAT(x,r)      r = (double)(x)
-#define OP_CAST_LONGLONG_TO_FLOAT(x,r)  r = (double)(x)
-#define OP_CAST_BOOL_TO_FLOAT(x,r)      r = (double)(x)
-
-#ifdef HAVE_LONG_LONG
-#define OP_CAST_FLOAT_TO_LONGLONG(x,r)  r = (long long)(x)
-#endif
-
-/* ================================================== support.h ========== */
-
-#define RPyField(ptr, name)             NULL
-

Copied: branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/pypy.h (from rev 4510, trunk/scipy/sandbox/mkufunc/mkufunc/pypy.h)

Deleted: branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/test_func_hash.py
===================================================================
--- trunk/scipy/sandbox/mkufunc/mkufunc/test_func_hash.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/test_func_hash.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,48 +0,0 @@
-import unittest
-
-from api import func_hash
-
-
-class Tests(unittest.TestCase):
-    
-    # These tests are very (Python) version specific.
-    
-    def test_simple(self):
-        
-        def f(x):
-            return 2.5 * x * x + 4.7 * x
-        
-        self.assertEqual(func_hash(f),
-                         '5f12e97debf1d2cb9e0a2f92e045b1fb')
-        
-        
-    def test_extra(self):
-        
-        def f(x):
-            return 2.5 * x * x + 4.7 * x
-        
-        self.assertEqual(func_hash(f, salt=[(int, int), (float, float)]),
-                         'e637d9825ef20cb56d364041118ca72e')
-        
-    def test_const(self):
-        
-        def add_a(b):
-            return a + b   # a in globals
-        
-        self.assertEqual(func_hash(add_a),
-                         '9ff237f372bf233470ce940edd58f60d')
-        
-    def test_inner(self):
-        
-        def foo(x):
-            inner1 = lambda t: t/3.0
-            def inner2(n):
-                return n + 3
-            return inner1(x) + inner2(int(x))
-        
-        self.assertEqual(func_hash(foo),
-                         '814c113dfc77e7ebb52915dd3ce9c37a')
-
-
-if __name__ == '__main__':
-    unittest.main()

Copied: branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/test_func_hash.py (from rev 4510, trunk/scipy/sandbox/mkufunc/mkufunc/test_func_hash.py)

Deleted: branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/test_mkufunc.py
===================================================================
--- trunk/scipy/sandbox/mkufunc/mkufunc/test_mkufunc.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/test_mkufunc.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,255 +0,0 @@
-import math
-import unittest
-
-from numpy import array, arange, allclose
-
-from api import Cfunc, genufunc, mkufunc
-
-
-class Util:
-
-    def assertClose(self, x, y):
-        self.assert_(allclose(x, y), '%s != %s' % (x, y))
-            
-
-class Internal_Tests(unittest.TestCase, Util):
-    
-    def test_Cfunc(self):
-        def sqr(x):
-            return x * x
-        cf = Cfunc(sqr, [int, int], 42)
-        self.assertEqual(cf.nin, 1)
-        self.assertEqual(cf.nout, 1)
-        self.assertEqual(cf.cname, 'f42_pypy_g_sqr')
-
-    def test_genufunc(self):
-        def foo(x):
-            return x + 17
-        uf = genufunc(foo, [
-                (float, float),
-                (int, int),
-                ])
-        self.assertEqual(uf(4), 21)
-        x = array([1.1, 2.3])
-        y = uf(x)
-        self.assertClose(y, [18.1, 19.3])
-        self.assert_(str(y.dtype).startswith('float'))
-        
-        x = array([1, 4])
-        y = uf(x)
-        self.assertEqual(list(y), [18, 21])
-        self.assert_(str(y.dtype).startswith('int'))
-
-
-class Arg_Tests(unittest.TestCase, Util):
-    
-    def check_ufunc(self, f):
-        for arg in (array([0.0, 1.0, 2.5]),
-                    [0.0, 1.0, 2.5],
-                    (0.0, 1.0, 2.5)):
-            self.assertClose(f(arg), [0.0, 1.0, 6.25])
-            
-        self.assertEqual(f(3), 9)
-        self.assert_(f(-2.5) - 6.25 < 1E-10)
-
-    def test_direct(self):
-        @mkufunc
-        def f(x):
-            return x * x
-        self.check_ufunc(f)
-        
-    def test_noargs(self):
-        @mkufunc()
-        def f(x):
-            return x * x
-        self.check_ufunc(f)
-        
-    def test_varargs(self):
-        for arg in (float,
-                    [float],
-                    [(float, float)]):
-            @mkufunc(arg)
-            def f(x):
-                return x * x
-            self.check_ufunc(f)
-
-    def test_int(self):
-        @mkufunc(int)
-        def f(x):
-            return x * x
-        self.assertEqual(f(3), 9)
-        self.assert_(isinstance(f(42), int))
-        
-    def test_mixed(self):
-        @mkufunc([(int, float, int), float])
-        def f(n, x):
-            return n + x * x
-        
-        y = f(2, 3.9)            # Note that int(2 + 3.9 * 3.9) = 17
-        self.assertEqual(y, 17)
-        self.assert_(isinstance(y, int))
-        
-        y = f(2.0, 3.9)
-        self.assertClose(y, 17.21)
-        self.assert_(isinstance(y, float))
-        
-    def test_exceptions(self):
-        def f(x):
-            return x
-
-        self.assertRaises(TypeError, mkufunc, {})
-        self.assertRaises(TypeError, mkufunc([(float,)]), f)
-        self.assertRaises(TypeError, mkufunc([3*(float,)]), f)
-        self.assertRaises(TypeError, mkufunc([{}]), f)
-        self.assertRaises(TypeError, mkufunc([(int, {})]), f)
-        self.assertRaises(ValueError, mkufunc([]), f)
-        
-
-class Math_Tests(unittest.TestCase, Util):
-    
-    def assertFuncsEqual(self, uf, f):
-        x = 0.4376
-        a = uf(x)
-        b = f(x)
-        self.assertClose(a, b)
-        xx = arange(0.1, 0.9, 0.01)
-        a = uf(xx)
-        b = [f(x) for x in xx]
-        self.assertClose(a, b)
-        
-    def test_exp(self):
-        @mkufunc
-        def f(x): return math.exp(x)
-        self.assertFuncsEqual(f, math.exp)
-
-    def test_log(self):
-        @mkufunc
-        def f(x): return math.log(x)
-        self.assertFuncsEqual(f, math.log)
-
-    def test_sqrt(self):
-        @mkufunc
-        def f(x): return math.sqrt(x)
-        self.assertFuncsEqual(f, math.sqrt)
-        
-    def test_cos(self):
-        @mkufunc
-        def f(x): return math.cos(x)
-        self.assertFuncsEqual(f, math.cos)
-        
-    def test_sin(self):
-        @mkufunc
-        def f(x): return math.sin(x)
-        self.assertFuncsEqual(f, math.sin)
-        
-    def test_tan(self):
-        @mkufunc
-        def f(x): return math.tan(x)
-        self.assertFuncsEqual(f, math.tan)
-        
-    def test_acos(self):
-        @mkufunc
-        def f(x): return math.acos(x)
-        self.assertFuncsEqual(f, math.acos)
-
-    def test_asin(self):
-        @mkufunc
-        def f(x): return math.asin(x)
-        self.assertFuncsEqual(f, math.asin)
-        
-    def test_atan(self):
-        @mkufunc
-        def f(x): return math.atan(x)
-        self.assertFuncsEqual(f, math.atan)
-
-    def test_atan2(self):
-        @mkufunc
-        def f(x, y):
-            return math.atan2(x, y)
-
-        self.assertClose(f(4, 5), math.atan2(4, 5))
-        
-        xx = array([1.0, 3.0, -2.4,  3.1, -2.3])
-        yy = array([1.0, 2.0,  7.5, -8.7,  0.0])
-        a = f(xx, yy)
-        b = [math.atan2(x, y) for x, y in zip(xx, yy)]
-        self.assertClose(a, b)
-        
-    def test_arithmetic(self):
-        def f(x):
-            return (4 * x + 2) / (x * x - 7 * x + 1)
-        uf = mkufunc(f)
-        x = arange(0, 2, 0.1)
-        self.assertClose(uf(x), f(x))
-
-
-class Control_Flow_Tests(unittest.TestCase):
-
-    def test_if(self):
-        @mkufunc(int)
-        def f(n):
-            if n < 4:
-                return n
-            else:
-                return n * n
-
-        self.assertEqual(f(3), 3)
-        self.assertEqual(f(4), 16)
-
-    def test_switch(self):
-        @mkufunc(int)
-        def f(n):
-            if n < 4:
-                return n
-            elif n == 4:
-                return 42
-            elif n == 5:
-                return 73
-            else:
-                return n * n
-
-        self.assertEqual(f(3), 3)
-        self.assertEqual(f(4), 42)
-        self.assertEqual(f(5), 73)
-        self.assertEqual(f(6), 36)
-
-    def test_loop(self):
-        @mkufunc(int)
-        def f(n):
-            res = 0
-            for i in xrange(n):
-                res += i*i
-            return res
-
-        self.assertEqual(f(3), 5)
-        self.assertEqual(f(95), 281295)
-
-
-class FreeVariable_Tests(unittest.TestCase, Util):
-
-    def test_const(self):
-        a = 13.6
-        @mkufunc
-        def f(x):
-            return a * x
-        
-        x = arange(0, 1, 0.1)
-        self.assertClose(f(x), a * x)
-
-    def test_const2(self):
-        from math import sin, pi, sqrt
-        @mkufunc
-        def sin_deg(angle):
-            return sin(angle / 180.0 * pi)
-        
-        self.assertClose(sin_deg([0, 30, 45, 60, 90, 180, 270, 360]),
-                         [0, 0.5, 1/sqrt(2), sqrt(3)/2, 1, 0, -1, 0])
-        
-
-class Misc_Tests(unittest.TestCase, Util):
-
-    pass
-
-
-if __name__ == '__main__':
-    unittest.main()

Copied: branches/refactor_fft/scipy/sandbox/mkufunc/mkufunc/test_mkufunc.py (from rev 4510, trunk/scipy/sandbox/mkufunc/mkufunc/test_mkufunc.py)

Deleted: branches/refactor_fft/scipy/sandbox/mkufunc/setup.py
===================================================================
--- trunk/scipy/sandbox/mkufunc/setup.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sandbox/mkufunc/setup.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,15 +0,0 @@
-from setuptools import setup, find_packages
-
-setup(
-    author       = 'Ilan Schnell',
-    author_email = 'ischnell at enthought.com',
-    description  = 'C compiled UFuncs from python source',
-
-    name         = "mkufunc",
-    version      = "0.1",
-    
-    zip_safe = False,
-    package_data = {'': ['*.h']},
-    packages = find_packages(),
-    install_requires = ['scipy >= 0.6.0']
-    )

Copied: branches/refactor_fft/scipy/sandbox/mkufunc/setup.py (from rev 4510, trunk/scipy/sandbox/mkufunc/setup.py)

Modified: branches/refactor_fft/scipy/setup.py
===================================================================
--- branches/refactor_fft/scipy/setup.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/setup.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -19,7 +19,6 @@
     config.add_subpackage('signal')
     config.add_subpackage('sparse')
     config.add_subpackage('special')
-    config.add_subpackage('splinalg')
     config.add_subpackage('stats')
     config.add_subpackage('ndimage')
     config.add_subpackage('stsci')

Modified: branches/refactor_fft/scipy/setupscons.py
===================================================================
--- branches/refactor_fft/scipy/setupscons.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/setupscons.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -18,7 +18,6 @@
     config.add_subpackage('signal')
     config.add_subpackage('sparse')
     config.add_subpackage('special')
-    config.add_subpackage('splinalg')
     config.add_subpackage('stats')
     config.add_subpackage('ndimage')
     config.add_subpackage('stsci')

Copied: branches/refactor_fft/scipy/signal/SConscript (from rev 4510, trunk/scipy/signal/SConscript)

Deleted: branches/refactor_fft/scipy/signal/SConstruct
===================================================================
--- branches/refactor_fft/scipy/signal/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/signal/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,19 +0,0 @@
-# Last Change: Wed Mar 05 05:00 PM 2008 J
-# vim:syntax=python
-from os.path import join
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numscons import GetNumpyEnvironment
-
-env = GetNumpyEnvironment(ARGUMENTS)
-
-env.AppendUnique(CPPPATH = get_numpy_include_dirs())
-env.NumpyPythonExtension('sigtools', 
-                         source = ['sigtoolsmodule.c',\
-                                   'firfilter.c', \
-                                   'medianfilter.c'])
-
-env.NumpyPythonExtension('spline', 
-                         source = ['splinemodule.c', 'S_bspline_util.c', 
-                                   'D_bspline_util.c', 'C_bspline_util.c', 
-                                   'Z_bspline_util.c','bspline_util.c'])

Copied: branches/refactor_fft/scipy/signal/SConstruct (from rev 4510, trunk/scipy/signal/SConstruct)

Modified: branches/refactor_fft/scipy/signal/signaltools.py
===================================================================
--- branches/refactor_fft/scipy/signal/signaltools.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/signal/signaltools.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1446,3 +1446,35 @@
         olddims = vals[:axis] + [0] + vals[axis:]
         ret = transpose(ret,tuple(olddims))
         return ret
+
+def filtfilt(b,a,x):
+    # FIXME:  For now only accepting 1d arrays
+    ntaps=max(len(a),len(b))
+    edge=ntaps*3
+
+    if x.ndim != 1:
+        raise ValueError, "Filiflit is only accepting 1 dimension arrays."
+
+    #x must be bigger than edge
+    if x.size < edge:
+        raise ValueError, "Input vector needs to be bigger than 3 * max(len(a),len(b)."
+
+    if len(a) < ntaps:
+        a=r_[a,zeros(len(b)-len(a))]
+
+    if len(b) < ntaps:
+        b=r_[b,zeros(len(a)-len(b))]
+
+    zi=lfilter_zi(b,a)
+
+    #Grow the signal to have edges for stabilizing 
+    #the filter with inverted replicas of the signal
+    s=r_[2*x[0]-x[edge:1:-1],x,2*x[-1]-x[-1:-edge:-1]]
+    #in the case of one go we only need one of the extrems 
+    # both are needed for filtfilt
+
+    (y,zf)=lfilter(b,a,s,-1,zi*s[0])
+
+    (y,zf)=lfilter(b,a,flipud(y),-1,zi*y[-1])
+
+    return flipud(y[edge-1:-edge+1])

Copied: branches/refactor_fft/scipy/sparse/.svnignore (from rev 4510, trunk/scipy/sparse/.svnignore)

Modified: branches/refactor_fft/scipy/sparse/base.py
===================================================================
--- branches/refactor_fft/scipy/sparse/base.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sparse/base.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -195,13 +195,14 @@
         """Return this matrix in a given sparse format
 
         Parameters
-        ==========
-            - format : desired sparse matrix format
-              - If format is None then no conversion is performed
-              - Other possible values include:
-                -  "csr" for csr_matrix format
-                -  "csc" for csc_matrix format
-                -  "dok" for dok_matrix format and so on
+        ----------
+        format : {string, None}
+            desired sparse matrix format
+                - None for no format conversion
+                - "csr" for csr_matrix format
+                - "csc" for csc_matrix format
+                - "lil" for lil_matrix format
+                - "dok" for dok_matrix format and so on
 
         """
 

Modified: branches/refactor_fft/scipy/sparse/compressed.py
===================================================================
--- branches/refactor_fft/scipy/sparse/compressed.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sparse/compressed.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -251,11 +251,13 @@
     def __truediv__(self,other):
         if isscalarlike(other):
             return self * (1./other)
+
         elif isspmatrix(other):
-            if (other.shape != self.shape):
-                raise ValueError, "inconsistent shapes"
+            if other.shape != self.shape:
+                raise ValueError('inconsistent shapes')
 
             return self._binopt(other,'_eldiv_')
+
         else:
             raise NotImplementedError
 
@@ -263,11 +265,11 @@
     def multiply(self, other):
         """Point-wise multiplication by another matrix
         """
-        if (other.shape != self.shape):
-            raise ValueError, "inconsistent shapes"
+        if other.shape != self.shape:
+            raise ValueError('inconsistent shapes')
 
         if isdense(other):
-            return multiply(self.todense(),other)
+            return numpy.multiply(self.todense(),other)
         else:
             other = self.__class__(other)
             return self._binopt(other,'_elmul_')

Modified: branches/refactor_fft/scipy/sparse/construct.py
===================================================================
--- branches/refactor_fft/scipy/sparse/construct.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sparse/construct.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -27,30 +27,32 @@
 
 
 def spdiags(data, diags, m, n, format=None):
-    """Return a sparse matrix given its diagonals.
+    """Return a sparse matrix from diagonals.
 
     Parameters
     ----------
-        - data   : matrix whose rows contain the diagonal values
-        - diags  : diagonals to set
-            - k = 0 - the main diagonal
-            - k > 0 - the k-th upper diagonal
-            - k < 0 - the k-th lower diagonal
-        - m, n   : dimensions of the result
-        - format : format of the result (e.g. "csr")
-            -  By default (format=None) an appropriate sparse matrix
-               format is returned.  This choice is subject to change.
+    data   : array_like
+        matrix diagonals stored row-wise
+    diags  : diagonals to set
+        - k = 0  the main diagonal
+        - k > 0  the k-th upper diagonal
+        - k < 0  the k-th lower diagonal
+    m, n : int
+        shape of the result
+    format : format of the result (e.g. "csr")
+        By default (format=None) an appropriate sparse matrix
+        format is returned.  This choice is subject to change.
 
     See Also
     --------
-        The dia_matrix class which implements the DIAgonal format.
+    The dia_matrix class which implements the DIAgonal format.
 
     Example
     -------
 
-    >>> data = array([[1,2,3,4]]).repeat(3,axis=0)
+    >>> data = array([[1,2,3,4],[1,2,3,4],[1,2,3,4]])
     >>> diags = array([0,-1,2])
-    >>> spdiags(data,diags,4,4).todense()
+    >>> spdiags(data, diags, 4, 4).todense()
     matrix([[1, 0, 3, 0],
             [1, 2, 0, 4],
             [0, 2, 3, 0],
@@ -87,8 +89,12 @@
 
     Parameters
     ----------
-    A,B    : dense or sparse matrices
-    format : format of the result (e.g. "csr")
+    A
+        matrix
+    B
+        matrix
+    format : string
+        format of the result (e.g. "csr")
 
     Returns
     -------
@@ -169,15 +175,19 @@
 
     Parameters
     ----------
-    A,B    : square dense or sparse matrices
-    format : format of the result (e.g. "csr")
+    A
+        square matrix
+    B
+        square matrix
+    format : string
+        format of the result (e.g. "csr")
 
     Returns
-    =======
-        kronecker sum in a sparse matrix format
+    -------
+    kronecker sum in a sparse matrix format
 
     Examples
-    ========
+    --------
 
 
     """
@@ -206,7 +216,8 @@
 
     blocks
         sequence of sparse matrices with compatible shapes
-    format : sparse format of the result (e.g. "csr")
+    format : string
+        sparse format of the result (e.g. "csr")
         by default an appropriate sparse matrix format is returned.
         This choice is subject to change.
 
@@ -232,7 +243,8 @@
 
     blocks
         sequence of sparse matrices with compatible shapes
-    format : sparse format of the result (e.g. "csr")
+    format : string
+        sparse format of the result (e.g. "csr")
         by default an appropriate sparse matrix format is returned.
         This choice is subject to change.
 

Copied: branches/refactor_fft/scipy/sparse/linalg/dsolve/SConscript (from rev 4510, trunk/scipy/sparse/linalg/dsolve/SConscript)

Deleted: branches/refactor_fft/scipy/sparse/linalg/dsolve/SConstruct
===================================================================
--- branches/refactor_fft/scipy/sparse/linalg/dsolve/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sparse/linalg/dsolve/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,47 +0,0 @@
-from os.path import join as pjoin
-import sys
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-
-from numscons import GetNumpyEnvironment
-from numscons import CheckF77LAPACK
-from numscons import write_info
-
-env = GetNumpyEnvironment(ARGUMENTS)
-
-#=======================
-# Starting Configuration
-#=======================
-config = env.NumpyConfigure(custom_tests = {'CheckLapack' : CheckF77LAPACK})
-
-#-----------------
-# Checking Lapack
-#-----------------
-st = config.CheckLapack()
-if not st:
-    raise RuntimeError("no lapack found, necessary for dsolve module")
-
-config.Finish()
-write_info(env)
-
-# Build superlu lib
-superlu_env = env.Clone()
-superlu_def = {}
-if sys.platform == 'win32':
-    superlu_def['NO_TIMER'] = 1
-superlu_def['USE_VENDOR_BLAS'] = 2
-superlu_env.Append(CPPDEFINES = superlu_def)
-
-superlu_src = superlu_env.NumpyGlob(pjoin('SuperLU', 'SRC', '*.c'))
-superlu = superlu_env.NumpyStaticExtLibrary('superlu_src', source = superlu_src)
-
-# Build python extensions
-pyenv = env.Clone()
-pyenv.Append(CPPPATH = [get_numpy_include_dirs(), env['src_dir']])
-pyenv.Prepend(LIBS = superlu)
-common_src = ['_superlu_utils.c', '_superluobject.c']
-
-for prec in ['z', 'd', 'c', 's']:
-    pyenv.NumpyPythonExtension('_%ssuperlu' % prec, 
-                               source = common_src + \
-                                        ['_%ssuperlumodule.c' % prec]) 

Copied: branches/refactor_fft/scipy/sparse/linalg/dsolve/SConstruct (from rev 4510, trunk/scipy/sparse/linalg/dsolve/SConstruct)

Copied: branches/refactor_fft/scipy/sparse/linalg/dsolve/umfpack/SConscript (from rev 4510, trunk/scipy/sparse/linalg/dsolve/umfpack/SConscript)

Deleted: branches/refactor_fft/scipy/sparse/linalg/dsolve/umfpack/SConstruct
===================================================================
--- branches/refactor_fft/scipy/sparse/linalg/dsolve/umfpack/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sparse/linalg/dsolve/umfpack/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,35 +0,0 @@
-from os.path import join as pjoin
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-
-from numscons import GetNumpyEnvironment
-from numscons import CheckF77BLAS, CheckF77Clib, NumpyCheckLibAndHeader
-from numscons import write_info
-
-env = GetNumpyEnvironment(ARGUMENTS)
-
-#=======================
-# Starting Configuration
-#=======================
-config = env.NumpyConfigure(custom_tests = 
-    {'CheckBLAS' : CheckF77BLAS,
-     'CheckF77Clib' : CheckF77Clib,
-     'NumpyCheckLibAndHeader' : NumpyCheckLibAndHeader})
-
-#-----------------
-# Checking Lapack
-#-----------------
-st = config.CheckBLAS()
-if not st:
-    raise RuntimeError("no blas found, necessary for umfpack module")
-
-has_umfpack = config.NumpyCheckLibAndHeader(
-    'umfpack', None, 'umfpack.h', section = 'umfpack', autoadd = 1)
-config.Finish()
-write_info(env)
-
-if has_umfpack:
-    env.Append(SWIGFLAGS = '-python')
-    env.Append(SWIGFLAGS = '$_CPPINCFLAGS')
-    env.Append(CPPPATH = get_numpy_include_dirs())
-    env.NumpyPythonExtension('__umfpack', source = 'umfpack.i') 

Copied: branches/refactor_fft/scipy/sparse/linalg/dsolve/umfpack/SConstruct (from rev 4510, trunk/scipy/sparse/linalg/dsolve/umfpack/SConstruct)

Copied: branches/refactor_fft/scipy/sparse/linalg/eigen/arpack/SConscript (from rev 4510, trunk/scipy/sparse/linalg/eigen/arpack/SConscript)

Deleted: branches/refactor_fft/scipy/sparse/linalg/eigen/arpack/SConstruct
===================================================================
--- branches/refactor_fft/scipy/sparse/linalg/eigen/arpack/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sparse/linalg/eigen/arpack/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,47 +0,0 @@
-from os.path import join as pjoin
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-
-from numscons import GetNumpyEnvironment
-from numscons import CheckF77LAPACK, CheckF77Clib
-from numscons import write_info
-
-env = GetNumpyEnvironment(ARGUMENTS)
-
-#=======================
-# Starting Configuration
-#=======================
-config = env.NumpyConfigure(custom_tests = {'CheckLAPACK' : CheckF77LAPACK,
-                                            'CheckF77Clib' : CheckF77Clib})
-
-env.Tool('numpyf2py')
-#-----------------
-# Checking Lapack
-#-----------------
-st = config.CheckF77Clib()
-st = config.CheckLAPACK(autoadd = 1)
-if not st:
-    raise RuntimeError("no lapack found, necessary for arpack module")
-
-config.Finish()
-write_info(env)
-
-# Build arpack
-arpack_src = env.NumpyGlob(pjoin('ARPACK', 'SRC', '*.f'))
-arpack_src += env.NumpyGlob(pjoin('ARPACK', 'UTIL', '*.f'))
-arpack_src += env.NumpyGlob(pjoin('ARPACK', 'LAPACK', '*.f'))
-
-src = [str(s) for s in arpack_src]
-
-env.AppendUnique(CPPPATH = pjoin('ARPACK', 'SRC'))
-env.AppendUnique(F77PATH = pjoin(env['src_dir'], 'ARPACK', 'SRC'))
-env.AppendUnique(LIBPATH = env['build_dir'])
-arpack_lib = env.NumpyStaticExtLibrary('arpack', source = src)
-
-# Build _arpack extension
-env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR'])
-env.AppendUnique(CPPPATH = get_numpy_include_dirs())
-
-env.NumpyFromFTemplate('arpack.pyf', 'arpack.pyf.src')
-env.Prepend(LIBS = 'arpack')
-env.NumpyPythonExtension('_arpack', 'arpack.pyf')

Copied: branches/refactor_fft/scipy/sparse/linalg/eigen/arpack/SConstruct (from rev 4510, trunk/scipy/sparse/linalg/eigen/arpack/SConstruct)

Modified: branches/refactor_fft/scipy/sparse/linalg/eigen/arpack/arpack.py
===================================================================
--- branches/refactor_fft/scipy/sparse/linalg/eigen/arpack/arpack.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sparse/linalg/eigen/arpack/arpack.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -62,7 +62,7 @@
 
     Parameters
     ----------
-    A : A : matrix, array, or object with matvec(x) method
+    A : matrix, array, or object with matvec(x) method
         An N x N matrix, array, or an object with matvec(x) method to perform
         the matrix vector product A * x.  The sparse matrix formats
         in scipy.sparse are appropriate for A.
@@ -76,8 +76,8 @@
         Array of k eigenvalues
 
     v : array
-       An array of k eigenvectors
-       The v[i] is the eigenvector corresponding to the eigenvector w[i]
+        An array of k eigenvectors
+        The v[i] is the eigenvector corresponding to the eigenvector w[i]
 
     Other Parameters
     ----------------
@@ -460,12 +460,16 @@
         else:
             break
 
-    if  info < -1 :
-        raise RuntimeError("Error info=%d in arpack"%info)
+    if info < -1 :
+        raise RuntimeError("Error info=%d in arpack" % info)
         return None
-    if info == -1:
-        warnings.warn("Maximum number of iterations taken: %s"%iparam[2])
 
+    if info == 1:
+        warnings.warn("Maximum number of iterations taken: %s" % iparam[2])
+
+    if iparam[4] < k:
+        warnings.warn("Only %d/%d eigenvectors converged" % (iparam[4], k))
+
     # now extract eigenvalues and (optionally) eigenvectors
     rvec = return_eigenvectors
     ierr = 0

Modified: branches/refactor_fft/scipy/sparse/linalg/eigen/arpack/speigs.py
===================================================================
--- branches/refactor_fft/scipy/sparse/linalg/eigen/arpack/speigs.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sparse/linalg/eigen/arpack/speigs.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -191,7 +191,7 @@
 def ARPACK_iteration(matvec, sigma_solve, n, bmat, which, nev, tol, ncv, mode):
     ncv, maxitr = check_init(n, nev, ncv)
     ipntr, d, resid, workd, workl, v = init_workspaces(n,nev,ncv)
-    init_debug()
+    #init_debug()
     ishfts = 1         # Some random arpack parameter
     # Some random arpack parameter (I think it tells ARPACK to solve the
     # general eigenproblem using shift-invert

Modified: branches/refactor_fft/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py
===================================================================
--- branches/refactor_fft/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -5,20 +5,20 @@
 
 License: BSD
 
-(c) Robert Cimrman, Andrew Knyazev
+Authors: Robert Cimrman, Andrew Knyazev
 
 Examples in tests directory contributed by Nils Wagner.
 """
 
-import types
 from warnings import warn
 
-import numpy as nm
-import scipy as sc
-import scipy.sparse as sp
-import scipy.io as io
+import numpy as np
+import scipy as sp
+
 from scipy.sparse.linalg import aslinearoperator, LinearOperator
 
+__all__ = ['lobpcg']
+
 ## try:
 ##     from symeig import symeig
 ## except:
@@ -28,7 +28,7 @@
     import scipy.linalg as sla
     import scipy.lib.lapack as ll
     if select is None:
-        if nm.iscomplexobj( mtxA ):
+        if np.iscomplexobj( mtxA ):
             if mtxB is None:
                 fun = ll.get_lapack_funcs( ['heev'], arrays = (mtxA,) )[0]
             else:
@@ -51,7 +51,7 @@
     else:
         out = sla.eig( mtxA, mtxB, right = eigenvectors )
         w = out[0]
-        ii = nm.argsort( w )
+        ii = np.argsort( w )
         w = w[slice( *select )]
         if eigenvectors:
             v = out[1][:,ii]
@@ -66,7 +66,8 @@
     raw_input()
 
 def save( ar, fileName ):
-    io.write_array( fileName, ar, precision = 8 )
+    from scipy.io import write_array
+    write_array( fileName, ar, precision = 8 )
 
 ##
 # 21.05.2007, c
@@ -78,7 +79,7 @@
     if ar.ndim == 2:
         return ar
     else: # Assume 1!
-        aux = nm.array( ar, copy = False )
+        aux = np.array( ar, copy = False )
         aux.shape = (ar.shape[0], 1)
         return aux
 
@@ -90,8 +91,9 @@
     Example
     -------
 
-    A = makeOperator( arrayA, (n, n) )
-    vectorB = A( vectorX )
+    >>> A = makeOperator( arrayA, (n, n) )
+    >>> vectorB = A( vectorX )
+
     """
     if operatorInput is None:
         def ident(x):
@@ -111,10 +113,10 @@
 
 def applyConstraints( blockVectorV, factYBY, blockVectorBY, blockVectorY ):
     """Internal. Changes blockVectorV in place."""
-    gramYBV = sc.dot( blockVectorBY.T, blockVectorV )
+    gramYBV = sp.dot( blockVectorBY.T, blockVectorV )
     import scipy.linalg as sla
     tmp = sla.cho_solve( factYBY, gramYBV )
-    blockVectorV -= sc.dot( blockVectorY, tmp )
+    blockVectorV -= sp.dot( blockVectorY, tmp )
 
 
 def b_orthonormalize( B, blockVectorV,
@@ -126,22 +128,22 @@
             blockVectorBV = B( blockVectorV )
         else:
             blockVectorBV = blockVectorV # Shared data!!!
-    gramVBV = sc.dot( blockVectorV.T, blockVectorBV )
+    gramVBV = sp.dot( blockVectorV.T, blockVectorBV )
     gramVBV = sla.cholesky( gramVBV )
     sla.inv( gramVBV, overwrite_a = True )
     # gramVBV is now R^{-1}.
-    blockVectorV = sc.dot( blockVectorV, gramVBV )
+    blockVectorV = sp.dot( blockVectorV, gramVBV )
     if B is not None:
-        blockVectorBV = sc.dot( blockVectorBV, gramVBV )
+        blockVectorBV = sp.dot( blockVectorBV, gramVBV )
 
     if retInvR:
         return blockVectorV, blockVectorBV, gramVBV
     else:
         return blockVectorV, blockVectorBV
 
-def lobpcg( blockVectorX, A,
-            B = None, M = None, blockVectorY = None,
-            residualTolerance = None, maxIterations = 20,
+def lobpcg( A, X,  
+            B=None, M=None, Y=None,
+            tol= None, maxiter=20,
             largest = True, verbosityLevel = 0,
             retLambdaHistory = False, retResidualNormsHistory = False ):
     """Solve symmetric partial eigenproblems with optional preconditioning
@@ -149,23 +151,24 @@
     This function implements the Locally Optimal Block Preconditioned
     Conjugate Gradient Method (LOBPCG).
 
-    TODO write in terms of Ax=lambda B x
-
+    
     Parameters
     ----------
-    blockVectorX : array_like
-        initial approximation to eigenvectors shape=(n,blockSize)
-    A : {dense matrix, sparse matrix, LinearOperator}
-        the linear operator of the problem, usually a sparse matrix
-        often called the "stiffness matrix"
+    A : {sparse matrix, dense matrix, LinearOperator}
+        The symmetric linear operator of the problem, usually a 
+        sparse matrix.  Often called the "stiffness matrix".
+    X : array_like
+        Initial approximation to the k eigenvectors. If A has 
+        shape=(n,n) then X should have shape shape=(n,k).
 
     Returns
     -------
-    (lambda,blockVectorV) : tuple of arrays
-        blockVectorX and lambda are computed blockSize eigenpairs, where
-        blockSize=size(blockVectorX,2) for the initial guess blockVectorX
-        if it is full rank.
+    w : array
+        Array of k eigenvalues
+    v : array
+        An array of k eigenvectors.  V has the same shape as X.
 
+
     Optional Parameters
     -------------------
     B : {dense matrix, sparse matrix, LinearOperator}
@@ -175,18 +178,19 @@
     M : {dense matrix, sparse matrix, LinearOperator}
         preconditioner to A; by default M = Identity
         M should approximate the inverse of A
-    blockVectorY : array_like
+    Y : array_like
         n-by-sizeY matrix of constraints, sizeY < n
         The iterations will be performed in the B-orthogonal complement
-        of the column-space of blockVectorY. blockVectorY must be full rank.
+        of the column-space of Y. Y must be full rank.
 
     Other Parameters
     ----------------
-    residualTolerance : scalar
-        solver tolerance. default: residualTolerance=n*sqrt(eps)
-    maxIterations: integer
+    tol : scalar
+        Solver tolerance (stopping criterion)
+        by default: tol=n*sqrt(eps)
+    maxiter: integer
         maximum number of iterations
-        by default: maxIterations=min(n,20)
+        by default: maxiter=min(n,20)
     largest : boolean
         when True, solve for the largest eigenvalues, otherwise the smallest
     verbosityLevel : integer
@@ -200,13 +204,18 @@
     Notes
     -----
     If both retLambdaHistory and retResidualNormsHistory are True, the
-    return tuple has the following format:
-        (lambda, blockVectorV, lambda history, residual norms history)
+    return tuple has the following format
+    (lambda, V, lambda history, residual norms history)
 
     """
     failureFlag = True
     import scipy.linalg as sla
 
+    blockVectorX = X
+    blockVectorY = Y
+    residualTolerance = tol
+    maxIterations = maxiter
+
     if blockVectorY is not None:
         sizeY = blockVectorY.shape[1]
     else:
@@ -214,11 +223,11 @@
 
     # Block size.
     if len(blockVectorX.shape) != 2:
-        raise ValueError('expected rank-2 array for argument blockVectorX')
+        raise ValueError('expected rank-2 array for argument X')
 
     n, sizeX = blockVectorX.shape
     if sizeX > n:
-        raise ValueError('blockVectorX column dimension exceeds the row dimension')
+        raise ValueError('X column dimension exceeds the row dimension')
 
     A = makeOperator(A, (n,n))
     B = makeOperator(B, (n,n))
@@ -236,10 +245,10 @@
         else:
             lohi = (1, sizeX)
 
-        A_dense = A(nm.eye(n))
+        A_dense = A(np.eye(n))
 
         if B is not None:
-            B_dense = B(nm.eye(n))
+            B_dense = B(np.eye(n))
             _lambda, eigBlockVector = symeig(A_dense, B_dense, select=lohi )
         else:
             _lambda, eigBlockVector = symeig(A_dense, select=lohi )
@@ -248,7 +257,7 @@
 
 
     if residualTolerance is None:
-        residualTolerance = nm.sqrt( 1e-15 ) * n
+        residualTolerance = np.sqrt( 1e-15 ) * n
 
     maxIterations = min( n, maxIterations )
 
@@ -283,7 +292,7 @@
             blockVectorBY = blockVectorY
 
         # gramYBY is a dense array.
-        gramYBY = sc.dot( blockVectorY.T, blockVectorBY )
+        gramYBY = sp.dot( blockVectorY.T, blockVectorBY )
         try:
             # gramYBY is a Cholesky factor from now on...
             gramYBY = sla.cho_factor( gramYBY )
@@ -299,32 +308,32 @@
     ##
     # Compute the initial Ritz vectors: solve the eigenproblem.
     blockVectorAX = A( blockVectorX )
-    gramXAX = sc.dot( blockVectorX.T, blockVectorAX )
+    gramXAX = sp.dot( blockVectorX.T, blockVectorAX )
     # gramXBX is X^T * X.
-    gramXBX = sc.dot( blockVectorX.T, blockVectorX )
+    gramXBX = sp.dot( blockVectorX.T, blockVectorX )
 
     _lambda, eigBlockVector = symeig( gramXAX )
-    ii = nm.argsort( _lambda )[:sizeX]
+    ii = np.argsort( _lambda )[:sizeX]
     if largest:
         ii = ii[::-1]
     _lambda = _lambda[ii]
 
-    eigBlockVector = nm.asarray( eigBlockVector[:,ii] )
-    blockVectorX  = sc.dot( blockVectorX,  eigBlockVector )
-    blockVectorAX = sc.dot( blockVectorAX, eigBlockVector )
+    eigBlockVector = np.asarray( eigBlockVector[:,ii] )
+    blockVectorX  = sp.dot( blockVectorX,  eigBlockVector )
+    blockVectorAX = sp.dot( blockVectorAX, eigBlockVector )
     if B is not None:
-        blockVectorBX = sc.dot( blockVectorBX, eigBlockVector )
+        blockVectorBX = sp.dot( blockVectorBX, eigBlockVector )
 
     ##
     # Active index set.
-    activeMask = nm.ones( (sizeX,), dtype = nm.bool )
+    activeMask = np.ones( (sizeX,), dtype = np.bool )
 
     lambdaHistory = [_lambda]
     residualNormsHistory = []
 
     previousBlockSize = sizeX
-    ident  = nm.eye( sizeX, dtype = A.dtype )
-    ident0 = nm.eye( sizeX, dtype = A.dtype )
+    ident  = np.eye( sizeX, dtype = A.dtype )
+    ident0 = np.eye( sizeX, dtype = A.dtype )
 
     ##
     # Main iteration loop.
@@ -332,15 +341,15 @@
         if verbosityLevel > 0:
             print 'iteration %d' %  iterationNumber
 
-        aux = blockVectorBX * _lambda[nm.newaxis,:]
+        aux = blockVectorBX * _lambda[np.newaxis,:]
         blockVectorR = blockVectorAX - aux
 
-        aux = nm.sum( blockVectorR.conjugate() * blockVectorR, 0 )
-        residualNorms = nm.sqrt( aux )
+        aux = np.sum( blockVectorR.conjugate() * blockVectorR, 0 )
+        residualNorms = np.sqrt( aux )
 
         residualNormsHistory.append( residualNorms )
 
-        ii = nm.where( residualNorms > residualTolerance, True, False )
+        ii = np.where( residualNorms > residualTolerance, True, False )
         activeMask = activeMask & ii
         if verbosityLevel > 2:
             print activeMask
@@ -348,7 +357,7 @@
         currentBlockSize = activeMask.sum()
         if currentBlockSize != previousBlockSize:
             previousBlockSize = currentBlockSize
-            ident = nm.eye( currentBlockSize, dtype = A.dtype )
+            ident = np.eye( currentBlockSize, dtype = A.dtype )
 
         if currentBlockSize == 0:
             failureFlag = False # All eigenpairs converged.
@@ -390,44 +399,44 @@
             aux = b_orthonormalize( B, activeBlockVectorP,
                                     activeBlockVectorBP, retInvR = True )
             activeBlockVectorP, activeBlockVectorBP, invR = aux
-            activeBlockVectorAP = sc.dot( activeBlockVectorAP, invR )
+            activeBlockVectorAP = sp.dot( activeBlockVectorAP, invR )
 
         ##
         # Perform the Rayleigh Ritz Procedure:
         # Compute symmetric Gram matrices:
 
-        xaw = sc.dot( blockVectorX.T,       activeBlockVectorAR )
-        waw = sc.dot( activeBlockVectorR.T, activeBlockVectorAR )
-        xbw = sc.dot( blockVectorX.T,       activeBlockVectorBR )
+        xaw = sp.dot( blockVectorX.T,       activeBlockVectorAR )
+        waw = sp.dot( activeBlockVectorR.T, activeBlockVectorAR )
+        xbw = sp.dot( blockVectorX.T,       activeBlockVectorBR )
 
         if iterationNumber > 0:
-            xap = sc.dot( blockVectorX.T,       activeBlockVectorAP )
-            wap = sc.dot( activeBlockVectorR.T, activeBlockVectorAP )
-            pap = sc.dot( activeBlockVectorP.T, activeBlockVectorAP )
-            xbp = sc.dot( blockVectorX.T,       activeBlockVectorBP )
-            wbp = sc.dot( activeBlockVectorR.T, activeBlockVectorBP )
+            xap = sp.dot( blockVectorX.T,       activeBlockVectorAP )
+            wap = sp.dot( activeBlockVectorR.T, activeBlockVectorAP )
+            pap = sp.dot( activeBlockVectorP.T, activeBlockVectorAP )
+            xbp = sp.dot( blockVectorX.T,       activeBlockVectorBP )
+            wbp = sp.dot( activeBlockVectorR.T, activeBlockVectorBP )
 
-            gramA = nm.bmat( [[nm.diag( _lambda ),   xaw,  xap],
+            gramA = np.bmat( [[np.diag( _lambda ),   xaw,  xap],
                               [             xaw.T,   waw,  wap],
                               [             xap.T, wap.T,  pap]] )
 
-            gramB = nm.bmat( [[ident0,    xbw,    xbp],
+            gramB = np.bmat( [[ident0,    xbw,    xbp],
                               [ xbw.T,  ident,    wbp],
                               [ xbp.T,  wbp.T,  ident]] )
         else:
-            gramA = nm.bmat( [[nm.diag( _lambda ),  xaw],
+            gramA = np.bmat( [[np.diag( _lambda ),  xaw],
                               [             xaw.T,  waw]] )
-            gramB = nm.bmat( [[ident0,    xbw],
+            gramB = np.bmat( [[ident0,    xbw],
                               [ xbw.T,  ident]] )
 
         try:
-            assert nm.allclose( gramA.T, gramA )
+            assert np.allclose( gramA.T, gramA )
         except:
             print gramA.T - gramA
             raise
 
         try:
-            assert nm.allclose( gramB.T, gramB )
+            assert np.allclose( gramB.T, gramB )
         except:
             print gramB.T - gramB
             raise
@@ -440,23 +449,23 @@
         # Solve the generalized eigenvalue problem.
 #        _lambda, eigBlockVector = la.eig( gramA, gramB )
         _lambda, eigBlockVector = symeig( gramA, gramB )
-        ii = nm.argsort( _lambda )[:sizeX]
+        ii = np.argsort( _lambda )[:sizeX]
         if largest:
             ii = ii[::-1]
         if verbosityLevel > 10:
             print ii
 
-        _lambda = _lambda[ii].astype( nm.float64 )
-        eigBlockVector = nm.asarray( eigBlockVector[:,ii].astype( nm.float64 ) )
+        _lambda = _lambda[ii].astype( np.float64 )
+        eigBlockVector = np.asarray( eigBlockVector[:,ii].astype( np.float64 ) )
 
         lambdaHistory.append( _lambda )
 
         if verbosityLevel > 10:
             print 'lambda:', _lambda
 ##         # Normalize eigenvectors!
-##         aux = nm.sum( eigBlockVector.conjugate() * eigBlockVector, 0 )
-##         eigVecNorms = nm.sqrt( aux )
-##         eigBlockVector = eigBlockVector / eigVecNorms[nm.newaxis,:]
+##         aux = np.sum( eigBlockVector.conjugate() * eigBlockVector, 0 )
+##         eigVecNorms = np.sqrt( aux )
+##         eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis,:]
 #        eigBlockVector, aux = b_orthonormalize( B, eigBlockVector )
 
         if verbosityLevel > 10:
@@ -470,21 +479,21 @@
             eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]
             eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]
 
-            pp  = sc.dot( activeBlockVectorR, eigBlockVectorR )
-            pp += sc.dot( activeBlockVectorP, eigBlockVectorP )
+            pp  = sp.dot( activeBlockVectorR, eigBlockVectorR )
+            pp += sp.dot( activeBlockVectorP, eigBlockVectorP )
 
-            app  = sc.dot( activeBlockVectorAR, eigBlockVectorR )
-            app += sc.dot( activeBlockVectorAP, eigBlockVectorP )
+            app  = sp.dot( activeBlockVectorAR, eigBlockVectorR )
+            app += sp.dot( activeBlockVectorAP, eigBlockVectorP )
 
-            bpp  = sc.dot( activeBlockVectorBR, eigBlockVectorR )
-            bpp += sc.dot( activeBlockVectorBP, eigBlockVectorP )
+            bpp  = sp.dot( activeBlockVectorBR, eigBlockVectorR )
+            bpp += sp.dot( activeBlockVectorBP, eigBlockVectorP )
         else:
             eigBlockVectorX = eigBlockVector[:sizeX]
             eigBlockVectorR = eigBlockVector[sizeX:]
 
-            pp  = sc.dot( activeBlockVectorR,  eigBlockVectorR )
-            app = sc.dot( activeBlockVectorAR, eigBlockVectorR )
-            bpp = sc.dot( activeBlockVectorBR, eigBlockVectorR )
+            pp  = sp.dot( activeBlockVectorR,  eigBlockVectorR )
+            app = sp.dot( activeBlockVectorAR, eigBlockVectorR )
+            bpp = sp.dot( activeBlockVectorBR, eigBlockVectorR )
 
         if verbosityLevel > 10:
             print pp
@@ -492,17 +501,17 @@
             print bpp
             pause()
 
-        blockVectorX  = sc.dot( blockVectorX, eigBlockVectorX )  + pp
-        blockVectorAX = sc.dot( blockVectorAX, eigBlockVectorX ) + app
-        blockVectorBX = sc.dot( blockVectorBX, eigBlockVectorX ) + bpp
+        blockVectorX  = sp.dot( blockVectorX, eigBlockVectorX )  + pp
+        blockVectorAX = sp.dot( blockVectorAX, eigBlockVectorX ) + app
+        blockVectorBX = sp.dot( blockVectorBX, eigBlockVectorX ) + bpp
 
         blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
 
-    aux = blockVectorBX * _lambda[nm.newaxis,:]
+    aux = blockVectorBX * _lambda[np.newaxis,:]
     blockVectorR = blockVectorAX - aux
 
-    aux = nm.sum( blockVectorR.conjugate() * blockVectorR, 0 )
-    residualNorms = nm.sqrt( aux )
+    aux = np.sum( blockVectorR.conjugate() * blockVectorR, 0 )
+    residualNorms = np.sqrt( aux )
 
 
     if verbosityLevel > 0:
@@ -522,31 +531,31 @@
 
 ###########################################################################
 if __name__ == '__main__':
-    from scipy.sparse import spdiags, speye
+    from scipy.sparse import spdiags, speye, issparse
     import time
 
 ##     def B( vec ):
 ##         return vec
 
     n = 100
-    vals = [nm.arange( n, dtype = nm.float64 ) + 1]
+    vals = [np.arange( n, dtype = np.float64 ) + 1]
     A = spdiags( vals, 0, n, n )
     B = speye( n, n )
 #    B[0,0] = 0
-    B = nm.eye( n, n )
-    Y = nm.eye( n, 3 )
+    B = np.eye( n, n )
+    Y = np.eye( n, 3 )
 
 
-#    X = sc.rand( n, 3 )
+#    X = sp.rand( n, 3 )
     xfile = {100 : 'X.txt', 1000 : 'X2.txt', 10000 : 'X3.txt'}
-    X = nm.fromfile( xfile[n], dtype = nm.float64, sep = ' ' )
+    X = np.fromfile( xfile[n], dtype = np.float64, sep = ' ' )
     X.shape = (n, 3)
 
     ivals = [1./vals[0]]
     def precond( x ):
         invA = spdiags( ivals, 0, n, n )
         y = invA  * x
-        if sp.issparse( y ):
+        if issparse( y ):
             y = y.toarray()
 
         return as2d( y )

Modified: branches/refactor_fft/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py
===================================================================
--- branches/refactor_fft/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env pytho n
 """ Test functions for the sparse.linalg.eigen.lobpcg module
 """
 
@@ -8,7 +8,7 @@
 from scipy import array, arange, ones, sort, cos, pi, rand, \
      set_printoptions, r_, diag, linalg
 from scipy.linalg import eig
-from scipy.sparse.linalg.eigen import lobpcg
+from scipy.sparse.linalg.eigen.lobpcg import lobpcg
 
 
 set_printoptions(precision=3,linewidth=90)
@@ -47,7 +47,7 @@
     V = rand(n,m)
     X = linalg.orth(V)
 
-    eigs,vecs = lobpcg.lobpcg(X,A,B,residualTolerance=1e-5, maxIterations=30)
+    eigs,vecs = lobpcg(A, X, B=B, tol=1e-5, maxiter=30)
     eigs.sort()
 
     #w,v = symeig(A,B)

Copied: branches/refactor_fft/scipy/sparse/linalg/isolve/SConscript (from rev 4510, trunk/scipy/sparse/linalg/isolve/SConscript)

Deleted: branches/refactor_fft/scipy/sparse/linalg/isolve/SConstruct
===================================================================
--- branches/refactor_fft/scipy/sparse/linalg/isolve/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sparse/linalg/isolve/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,58 +0,0 @@
-# Last Change: Sat May 03 02:00 PM 2008 J
-# vim:syntax=python
-
-from os.path import join as pjoin, splitext
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numscons import GetNumpyEnvironment
-from numscons import CheckF77LAPACK
-
-from numscons import write_info
-
-env = GetNumpyEnvironment(ARGUMENTS)
-env.Tool('numpyf2py')
-env.Append(CPPPATH = [get_numpy_include_dirs(), env['F2PYINCLUDEDIR']])
-#if os.name == 'nt':
-#    # NT needs the pythonlib to run any code importing Python.h, including
-#    # simple code using only typedef and so on, so we need it for configuration
-#    # checks
-#    env.AppendUnique(LIBPATH = [get_pythonlib_dir()])
-
-#=======================
-# Starting Configuration
-#=======================
-config = env.NumpyConfigure(custom_tests = {'CheckLAPACK' : CheckF77LAPACK})
-
-#-----------------
-# Checking Lapack
-#-----------------
-st = config.CheckLAPACK()
-if not st:
-    raise RuntimeError("no lapack found, necessary for isolve module")
-
-config.Finish()
-write_info(env)
-
-#--------------------
-# iterative methods
-#--------------------
-methods = ['BiCGREVCOM.f.src',
-           'BiCGSTABREVCOM.f.src',
-           'CGREVCOM.f.src',
-           'CGSREVCOM.f.src',
-#               'ChebyREVCOM.f.src',
-           'GMRESREVCOM.f.src',
-#               'JacobiREVCOM.f.src',
-           'QMRREVCOM.f.src',
-#               'SORREVCOM.f.src'
-           ]
-Util = ['STOPTEST2.f.src','getbreak.f.src']
-raw_sources = methods + Util + ['_iterative.pyf.src']
-
-sources = []
-for method in raw_sources:
-    target = splitext(method)[0]
-    res = env.NumpyFromFTemplate(target, pjoin('iterative', method))
-    sources.append(res[0])
-
-env.NumpyPythonExtension('_iterative', source = sources)

Copied: branches/refactor_fft/scipy/sparse/linalg/isolve/SConstruct (from rev 4510, trunk/scipy/sparse/linalg/isolve/SConstruct)

Modified: branches/refactor_fft/scipy/sparse/linalg/isolve/utils.py
===================================================================
--- branches/refactor_fft/scipy/sparse/linalg/isolve/utils.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sparse/linalg/isolve/utils.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,3 +1,7 @@
+__docformat__ = "restructuredtext en"
+
+__all__ = [] 
+
 from warnings import warn
 
 from numpy import asanyarray, asarray, asmatrix, array, matrix, zeros
@@ -24,27 +28,34 @@
 def make_system(A, M, x0, b, xtype=None):
     """Make a linear system Ax=b
 
-    Parameters:
-        A - LinearOperator
-            - sparse or dense matrix (or any valid input to aslinearoperator)
-        M - LinearOperator or None
-            - preconditioner
-            - sparse or dense matrix (or any valid input to aslinearoperator)
-        x0 - array_like or None
-            - initial guess to iterative method
-        b  - array_like
-            - right hand side
-        xtype - None or one of 'fdFD'
-            - dtype of the x vector
+    Parameters
+    ----------
+    A : LinearOperator
+        sparse or dense matrix (or any valid input to aslinearoperator)
+    M : {LinearOperator, Nones}
+        preconditioner
+        sparse or dense matrix (or any valid input to aslinearoperator)
+    x0 : {array_like, None}
+        initial guess to iterative method
+    b : array_like
+        right hand side
+    xtype : {'f', 'd', 'F', 'D', None}
+        dtype of the x vector
 
-    Returns:
-        (A, M, x, b, postprocess) where:
-            - A is a LinearOperator
-            - M is a LinearOperator
-            - x is the initial guess (rank 1 array)
-            - b is the rhs (rank 1 array)
-            - postprocess is a function that converts the solution vector
-              to the appropriate type and dimensions (e.g. (N,1) matrix)
+    Returns
+    -------
+    (A, M, x, b, postprocess)
+        A : LinearOperator
+            matrix of the linear system
+        M : LinearOperator
+            preconditioner
+        x : rank 1 ndarray
+            initial guess
+        b : rank 1 ndarray
+            right hand side
+        postprocess : function
+            converts the solution vector to the appropriate 
+            type and dimensions (e.g. (N,1) matrix)
 
     """
     A_ = A

Copied: branches/refactor_fft/scipy/sparse/sparsetools/SConscript (from rev 4510, trunk/scipy/sparse/sparsetools/SConscript)

Deleted: branches/refactor_fft/scipy/sparse/sparsetools/SConstruct
===================================================================
--- branches/refactor_fft/scipy/sparse/sparsetools/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sparse/sparsetools/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,12 +0,0 @@
-# Last Change: Wed Mar 05 09:00 PM 2008 J
-# vim:syntax=python
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numscons import GetNumpyEnvironment
-
-env = GetNumpyEnvironment(ARGUMENTS)
-
-env.AppendUnique(CPPPATH = get_numpy_include_dirs())
-
-for fmt in ['csr','csc','coo','bsr','dia']:
-    sources = [ fmt + '_wrap.cxx' ]
-    env.NumpyPythonExtension('_%s' % fmt, source = sources)

Copied: branches/refactor_fft/scipy/sparse/sparsetools/SConstruct (from rev 4510, trunk/scipy/sparse/sparsetools/SConstruct)

Modified: branches/refactor_fft/scipy/sparse/sparsetools/coo.h
===================================================================
--- branches/refactor_fft/scipy/sparse/sparsetools/coo.h	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sparse/sparsetools/coo.h	2008-07-01 04:52:00 UTC (rev 4511)
@@ -116,5 +116,37 @@
 }
 
 
+/*
+ * Compute Y += A*X for COO matrix A and dense vectors X,Y
+ *
+ *
+ * Input Arguments:
+ *   I  nnz           - number of nonzeros in A
+ *   I  Ai[nnz]       - row indices
+ *   I  Aj[nnz]       - column indices
+ *   T  Ax[nnz]       - nonzero values
+ *   T  Xx[n_col]     - input vector
+ *
+ * Output Arguments:
+ *   T  Yx[n_row]     - output vector
+ *
+ * Notes:
+ *   Output array Yx must be preallocated
+ *
+ *   Complexity: Linear.  Specifically O(nnz(A))
+ * 
+ */
+template <class I, class T>
+void coo_matvec(const I nnz,
+	            const I Ai[], 
+	            const I Aj[], 
+	            const T Ax[],
+	            const T Xx[],
+	                  T Yx[])
+{
+    for(I n = 0; n < nnz; n++){
+        Yx[Ai[n]] += Ax[n] * Xx[Aj[n]];
+    }
+}
 
 #endif

Modified: branches/refactor_fft/scipy/sparse/tests/test_base.py
===================================================================
--- branches/refactor_fft/scipy/sparse/tests/test_base.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/sparse/tests/test_base.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -215,19 +215,27 @@
         assert_array_equal(self.datsp - A.todense(),self.dat - A.todense())
 
     def test_elmul(self):
-        temp = self.dat.copy()
-        temp[0,2] = 2.0
-        temp = self.spmatrix(temp)
-        c = temp.multiply(self.datsp)
-        assert_array_equal(c.todense(),[[1,0,0,4],[9,0,1,0],[0,4,0,0]])
-
-        # complex
-        A = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]])
-        B = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]])
+        # real/real
+        A = array([[4,0,9],[2,-3,5]])
+        B = array([[0,7,0],[0,-4,0]])
         Asp = self.spmatrix(A)
         Bsp = self.spmatrix(B)
-        assert_almost_equal( Asp.multiply(Bsp).todense(), A*B)
+        assert_almost_equal( Asp.multiply(Bsp).todense(), A*B) #sparse/sparse
+        assert_almost_equal( Asp.multiply(B),             A*B) #sparse/dense
 
+        # complex/complex
+        C = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]])
+        D = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]])
+        Csp = self.spmatrix(C)
+        Dsp = self.spmatrix(D)
+        assert_almost_equal( Csp.multiply(Dsp).todense(), C*D) #sparse/sparse
+        assert_almost_equal( Csp.multiply(D),             C*D) #sparse/dense
+
+        # real/complex
+        assert_almost_equal( Asp.multiply(Dsp).todense(), A*D) #sparse/sparse
+        assert_almost_equal( Asp.multiply(D),             A*D) #sparse/dense
+        
+
     def test_eldiv(self):
         expected = [[1,0,0,1],[1,0,1,0],[0,1,0,0]]
         assert_array_equal((self.datsp / self.datsp).todense(),expected)

Copied: branches/refactor_fft/scipy/special/SConscript (from rev 4510, trunk/scipy/special/SConscript)

Deleted: branches/refactor_fft/scipy/special/SConstruct
===================================================================
--- branches/refactor_fft/scipy/special/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/special/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,65 +0,0 @@
-# Last Change: Sat May 03 02:00 PM 2008 J
-# vim:syntax=python
-from os.path import join as pjoin, basename as pbasename
-import sys
-
-from distutils.sysconfig import get_python_inc
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numscons import GetNumpyEnvironment
-from numscons import CheckF77Clib
-
-env = GetNumpyEnvironment(ARGUMENTS)
-
-env.Tool('numpyf2py')
-
-env.AppendUnique(CPPPATH = [get_python_inc(), get_numpy_include_dirs()])
-env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR'])
-
-if sys.platform=='win32':
-#        define_macros.append(('NOINFINITIES',None))
-#        define_macros.append(('NONANS',None))
-    env.AppendUnique(CPPDEFINES = '_USE_MATH_DEFINES')
-
-config = env.NumpyConfigure(custom_tests = {'CheckF77Clib' : CheckF77Clib})
-if not config.CheckF77Clib():
-    raise RuntimeError("Could not get C/F77 runtime information")
-config.Finish()
-
-def build_lib(name, ext, libname = None):
-    """ext should be .f or .c"""
-    if not libname:
-        libname = name
-    src = env.NumpyGlob(pjoin(name, '*%s' % ext))
-    assert len(src) > 0
-    env.NumpyStaticExtLibrary(libname, source = src)
-
-# C libraries
-build_lib('c_misc', '.c')
-build_lib('cephes', '.c')
-
-# F libraries
-# XXX: handle no opt flags for mach
-build_lib('mach', '.f')
-build_lib('toms', '.f')
-build_lib('amos', '.f')
-build_lib('cdflib', '.f', 'cdf')
-build_lib('specfun', '.f', 'specfunlib')
-
-env.AppendUnique(LIBPATH = [env['build_dir']])
-
-# Cephes extension
-src = ['_cephesmodule.c', 'amos_wrappers.c', 'specfun_wrappers.c', \
-       'toms_wrappers.c','cdf_wrappers.c','ufunc_extras.c']
-
-env.NumpyPythonExtension('_cephes', 
-                         source = src, 
-                         LIBS = ['amos', 'toms', 'c_misc', 'cephes', 'mach',\
-                                 'cdf', 'specfunlib'], 
-                         LINKFLAGSEND = env['F77_LDFLAGS'])
-
-# Specfun extension
-env.Prepend(LIBS = ['specfunlib'])
-env.NumpyPythonExtension('specfun', source = 'specfun.pyf',
-                         F2PYOPTIONS = ["--no-wrap-functions"],
-                         LINKFLAGSEND = env['F77_LDFLAGS'])

Copied: branches/refactor_fft/scipy/special/SConstruct (from rev 4510, trunk/scipy/special/SConstruct)

Modified: branches/refactor_fft/scipy/special/_cephesmodule.c
===================================================================
--- branches/refactor_fft/scipy/special/_cephesmodule.c	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/special/_cephesmodule.c	2008-07-01 04:52:00 UTC (rev 4511)
@@ -108,7 +108,7 @@
 static void * gdtri_data[] = { (void *)gdtri, (void *)gdtri, };
 */
 static void * hyp2f1_data[] = { (void *)hyp2f1, (void *)hyp2f1, (void *)chyp2f1_wrap, (void *)chyp2f1_wrap};
-static void * hyperg_data[] = { (void *)hyperg, (void *)hyperg, (void *)chyp1f1_wrap, (void *)chyp1f1_wrap};
+static void * hyp1f1_data[] = { (void *)hyp1f1_wrap, (void *)hyp1f1_wrap, (void *)chyp1f1_wrap, (void *)chyp1f1_wrap};
 static void * hypU_data[] = { (void *)hypU_wrap, (void *)hypU_wrap, };
 static void * hyp2f0_data[] = { (void *)hyp2f0, (void *)hyp2f0, };
 static void * threef0_data[] = { (void *)threef0, (void *)threef0, };
@@ -441,7 +441,7 @@
 	f = PyUFunc_FromFuncAndData(cephes4_functions, hyp2f1_data, cephes_5c2_types, 4, 4, 1, PyUFunc_None, "hyp2f1", hyp2f1_doc, 0);
 	PyDict_SetItemString(dictionary, "hyp2f1", f);
 	Py_DECREF(f);
-	f = PyUFunc_FromFuncAndData(cephes3_functions, hyperg_data, cephes_4c_types, 4, 3, 1, PyUFunc_None, "hyp1f1", hyp1f1_doc, 0);
+	f = PyUFunc_FromFuncAndData(cephes3_functions, hyp1f1_data, cephes_4c_types, 4, 3, 1, PyUFunc_None, "hyp1f1", hyp1f1_doc, 0);
 	PyDict_SetItemString(dictionary, "hyp1f1", f);
 	Py_DECREF(f);
 

Modified: branches/refactor_fft/scipy/special/specfun.pyf
===================================================================
--- branches/refactor_fft/scipy/special/specfun.pyf	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/special/specfun.pyf	2008-07-01 04:52:00 UTC (rev 4511)
@@ -242,7 +242,12 @@
         ! eix
         ! e1xb
 
-        ! chgm
+        subroutine chgm(a,b,x,hg) ! in :specfun:specfun.f
+             double precision intent(in) :: a
+             double precision intent(in) :: b
+             double precision intent(in) :: x
+             double precision intent(out) :: hg
+        end subroutine chgm
 
         ! stvh0
 

Modified: branches/refactor_fft/scipy/special/specfun_wrappers.c
===================================================================
--- branches/refactor_fft/scipy/special/specfun_wrappers.c	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/special/specfun_wrappers.c	2008-07-01 04:52:00 UTC (rev 4511)
@@ -29,6 +29,7 @@
 extern void F_FUNC(cpsi,CPSI)(double*,double*,double*,double*);
 extern void F_FUNC(hygfz,HYGFZ)(double*,double*,double*,Py_complex*,Py_complex*);
 extern void F_FUNC(cchg,CCHG)(double*,double*,Py_complex*,Py_complex*);
+extern void F_FUNC(chgm,CHGM)(double*,double*,double*,double*);
 extern void F_FUNC(chgu,CHGU)(double*,double*,double*,double*,int*);
 extern void F_FUNC(itairy,ITAIRY)(double*,double*,double*,double*,double*);
 extern void F_FUNC(e1xb,E1XB)(double*,double*);
@@ -147,6 +148,15 @@
   
 }
 
+double hyp1f1_wrap(double a, double b, double x) {
+   double outy;
+ 
+   F_FUNC(chgm,CHGM)(&a, &b, &x, &outy);
+   if (outy == 1e300) {
+     outy = INFINITY;
+   }
+   return outy;
+}
 
 int itairy_wrap(double x, double *apt, double *bpt, double *ant, double *bnt) {
   double tmp; 

Modified: branches/refactor_fft/scipy/special/specfun_wrappers.h
===================================================================
--- branches/refactor_fft/scipy/special/specfun_wrappers.h	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/special/specfun_wrappers.h	2008-07-01 04:52:00 UTC (rev 4511)
@@ -31,6 +31,7 @@
 Py_complex crgamma_wrap( Py_complex z);
 Py_complex chyp2f1_wrap( double a, double b, double c, Py_complex z);
 Py_complex chyp1f1_wrap( double a, double b, Py_complex z);
+double hyp1f1_wrap( double a, double b, double x);
 double hypU_wrap(double a, double b, double x);
 double exp1_wrap(double x);
 double expi_wrap(double x);

Modified: branches/refactor_fft/scipy/special/tests/test_basic.py
===================================================================
--- branches/refactor_fft/scipy/special/tests/test_basic.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/special/tests/test_basic.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -32,7 +32,7 @@
 #8   test_sh_jacobi
 #8   test_sh_legendre
 
-from numpy import dot
+from numpy import dot, array
 
 from scipy.testing import *
 
@@ -1177,6 +1177,116 @@
         hyp1 = hyp1f1(.1,.1,.3)
         assert_almost_equal(hyp1, 1.3498588075760032,7)
 
+        # test contributed by Moritz Deger (2008-05-29)
+        # http://projects.scipy.org/scipy/scipy/ticket/659
+        
+        # reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
+        # produced with test_hyp1f1.nb
+        ref_data = array([[ -8.38132975e+00,  -1.28436461e+01,  -2.91081397e+01,          1.04178330e+04],
+                          [  2.91076882e+00,  -6.35234333e+00,  -1.27083993e+01,          6.68132725e+00],
+                          [ -1.42938258e+01,   1.80869131e-01,   1.90038728e+01,          1.01385897e+05],
+                          [  5.84069088e+00,   1.33187908e+01,   2.91290106e+01,          1.59469411e+08],
+                          [ -2.70433202e+01,  -1.16274873e+01,  -2.89582384e+01,          1.39900152e+24],
+                          [  4.26344966e+00,  -2.32701773e+01,   1.91635759e+01,          6.13816915e+21],
+                          [  1.20514340e+01,  -3.40260240e+00,   7.26832235e+00,          1.17696112e+13],
+                          [  2.77372955e+01,  -1.99424687e+00,   3.61332246e+00,          3.07419615e+13],
+                          [  1.50310939e+01,  -2.91198675e+01,  -1.53581080e+01,         -3.79166033e+02],
+                          [  1.43995827e+01,   9.84311196e+00,   1.93204553e+01,          2.55836264e+10],
+                          [ -4.08759686e+00,   1.34437025e+01,  -1.42072843e+01,          1.70778449e+01],
+                          [  8.05595738e+00,  -1.31019838e+01,   1.52180721e+01,          3.06233294e+21],
+                          [  1.81815804e+01,  -1.42908793e+01,   9.57868793e+00,         -2.84771348e+20],
+                          [ -2.49671396e+01,   1.25082843e+01,  -1.71562286e+01,          2.36290426e+07],
+                          [  2.67277673e+01,   1.70315414e+01,   6.12701450e+00,          7.77917232e+03],
+                          [  2.49565476e+01,   2.91694684e+01,   6.29622660e+00,          2.35300027e+02],
+                          [  6.11924542e+00,  -1.59943768e+00,   9.57009289e+00,          1.32906326e+11],
+                          [ -1.47863653e+01,   2.41691301e+01,  -1.89981821e+01,          2.73064953e+03],
+                          [  2.24070483e+01,  -2.93647433e+00,   8.19281432e+00,         -6.42000372e+17],
+                          [  8.04042600e-01,   1.82710085e+01,  -1.97814534e+01,          5.48372441e-01],
+                          [  1.39590390e+01,   1.97318686e+01,   2.37606635e+00,          5.51923681e+00],
+                          [ -4.66640483e+00,  -2.00237930e+01,   7.40365095e+00,          4.50310752e+00],
+                          [  2.76821999e+01,  -6.36563968e+00,   1.11533984e+01,         -9.28725179e+23],
+                          [ -2.56764457e+01,   1.24544906e+00,   1.06407572e+01,          1.25922076e+01],
+                          [  3.20447808e+00,   1.30874383e+01,   2.26098014e+01,          2.03202059e+04],
+                          [ -1.24809647e+01,   4.15137113e+00,  -2.92265700e+01,          2.39621411e+08],
+                          [  2.14778108e+01,  -2.35162960e+00,  -1.13758664e+01,          4.46882152e-01],
+                          [ -9.85469168e+00,  -3.28157680e+00,   1.67447548e+01,         -1.07342390e+07],
+                          [  1.08122310e+01,  -2.47353236e+01,  -1.15622349e+01,         -2.91733796e+03],
+                          [ -2.67933347e+01,  -3.39100709e+00,   2.56006986e+01,         -5.29275382e+09],
+                          [ -8.60066776e+00,  -8.02200924e+00,   1.07231926e+01,          1.33548320e+06],
+                          [ -1.01724238e-01,  -1.18479709e+01,  -2.55407104e+01,          1.55436570e+00],
+                          [ -3.93356771e+00,   2.11106818e+01,  -2.57598485e+01,          2.13467840e+01],
+                          [  3.74750503e+00,   1.55687633e+01,  -2.92841720e+01,          1.43873509e-02],
+                          [  6.99726781e+00,   2.69855571e+01,  -1.63707771e+01,          3.08098673e-02],
+                          [ -2.31996011e+01,   3.47631054e+00,   9.75119815e-01,          1.79971073e-02],
+                          [  2.38951044e+01,  -2.91460190e+01,  -2.50774708e+00,          9.56934814e+00],
+                          [  1.52730825e+01,   5.77062507e+00,   1.21922003e+01,          1.32345307e+09],
+                          [  1.74673917e+01,   1.89723426e+01,   4.94903250e+00,          9.90859484e+01],
+                          [  1.88971241e+01,   2.86255413e+01,   5.52360109e-01,          1.44165360e+00],
+                          [  1.02002319e+01,  -1.66855152e+01,  -2.55426235e+01,          6.56481554e+02],
+                          [ -1.79474153e+01,   1.22210200e+01,  -1.84058212e+01,          8.24041812e+05],
+                          [ -1.36147103e+01,   1.32365492e+00,  -7.22375200e+00,          9.92446491e+05],
+                          [  7.57407832e+00,   2.59738234e+01,  -1.34139168e+01,          3.64037761e-02],
+                          [  2.21110169e+00,   1.28012666e+01,   1.62529102e+01,          1.33433085e+02],
+                          [ -2.64297569e+01,  -1.63176658e+01,  -1.11642006e+01,         -2.44797251e+13],
+                          [ -2.46622944e+01,  -3.02147372e+00,   8.29159315e+00,         -3.21799070e+05],
+                          [ -1.37215095e+01,  -1.96680183e+01,   2.91940118e+01,          3.21457520e+12],
+                          [ -5.45566105e+00,   2.81292086e+01,   1.72548215e-01,          9.66973000e-01],
+                          [ -1.55751298e+00,  -8.65703373e+00,   2.68622026e+01,         -3.17190834e+16],
+                          [  2.45393609e+01,  -2.70571903e+01,   1.96815505e+01,          1.80708004e+37],
+                          [  5.77482829e+00,   1.53203143e+01,   2.50534322e+01,          1.14304242e+06],
+                          [ -1.02626819e+01,   2.36887658e+01,  -2.32152102e+01,          7.28965646e+02],
+                          [ -1.30833446e+00,  -1.28310210e+01,   1.87275544e+01,         -9.33487904e+12],
+                          [  5.83024676e+00,  -1.49279672e+01,   2.44957538e+01,         -7.61083070e+27],
+                          [ -2.03130747e+01,   2.59641715e+01,  -2.06174328e+01,          4.54744859e+04],
+                          [  1.97684551e+01,  -2.21410519e+01,  -2.26728740e+01,          3.53113026e+06],
+                          [  2.73673444e+01,   2.64491725e+01,   1.57599882e+01,          1.07385118e+07],
+                          [  5.73287971e+00,   1.21111904e+01,   1.33080171e+01,          2.63220467e+03],
+                          [ -2.82751072e+01,   2.08605881e+01,   9.09838900e+00,         -6.60957033e-07],
+                          [  1.87270691e+01,  -1.74437016e+01,   1.52413599e+01,          6.59572851e+27],
+                          [  6.60681457e+00,  -2.69449855e+00,   9.78972047e+00,         -2.38587870e+12],
+                          [  1.20895561e+01,  -2.51355765e+01,   2.30096101e+01,          7.58739886e+32],
+                          [ -2.44682278e+01,   2.10673441e+01,  -1.36705538e+01,          4.54213550e+04],
+                          [ -4.50665152e+00,   3.72292059e+00,  -4.83403707e+00,          2.68938214e+01],
+                          [ -7.46540049e+00,  -1.08422222e+01,  -1.72203805e+01,         -2.09402162e+02],
+                          [ -2.00307551e+01,  -7.50604431e+00,  -2.78640020e+01,          4.15985444e+19],
+                          [  1.99890876e+01,   2.20677419e+01,  -2.51301778e+01,          1.23840297e-09],
+                          [  2.03183823e+01,  -7.66942559e+00,   2.10340070e+01,          1.46285095e+31],
+                          [ -2.90315825e+00,  -2.55785967e+01,  -9.58779316e+00,          2.65714264e-01],
+                          [  2.73960829e+01,  -1.80097203e+01,  -2.03070131e+00,          2.52908999e+02],
+                          [ -2.11708058e+01,  -2.70304032e+01,   2.48257944e+01,          3.09027527e+08],
+                          [  2.21959758e+01,   4.00258675e+00,  -1.62853977e+01,         -9.16280090e-09],
+                          [  1.61661840e+01,  -2.26845150e+01,   2.17226940e+01,         -8.24774394e+33],
+                          [ -3.35030306e+00,   1.32670581e+00,   9.39711214e+00,         -1.47303163e+01],
+                          [  7.23720726e+00,  -2.29763909e+01,   2.34709682e+01,         -9.20711735e+29],
+                          [  2.71013568e+01,   1.61951087e+01,  -7.11388906e-01,          2.98750911e-01],
+                          [  8.40057933e+00,  -7.49665220e+00,   2.95587388e+01,          6.59465635e+29],
+                          [ -1.51603423e+01,   1.94032322e+01,  -7.60044357e+00,          1.05186941e+02],
+                          [ -8.83788031e+00,  -2.72018313e+01,   1.88269907e+00,          1.81687019e+00],
+                          [ -1.87283712e+01,   5.87479570e+00,  -1.91210203e+01,          2.52235612e+08],
+                          [ -5.61338513e-01,   2.69490237e+01,   1.16660111e-01,          9.97567783e-01],
+                          [ -5.44354025e+00,  -1.26721408e+01,  -4.66831036e+00,          1.06660735e-01],
+                          [ -2.18846497e+00,   2.33299566e+01,   9.62564397e+00,          3.03842061e-01],
+                          [  6.65661299e+00,  -2.39048713e+01,   1.04191807e+01,          4.73700451e+13],
+                          [ -2.57298921e+01,  -2.60811296e+01,   2.74398110e+01,         -5.32566307e+11],
+                          [ -1.11431826e+01,  -1.59420160e+01,  -1.84880553e+01,         -1.01514747e+02],
+                          [  6.50301931e+00,   2.59859051e+01,  -2.33270137e+01,          1.22760500e-02],
+                          [ -1.94987891e+01,  -2.62123262e+01,   3.90323225e+00,          1.71658894e+01],
+                          [  7.26164601e+00,  -1.41469402e+01,   2.81499763e+01,         -2.50068329e+31],
+                          [ -1.52424040e+01,   2.99719005e+01,  -2.85753678e+01,          1.31906693e+04],
+                          [  5.24149291e+00,  -1.72807223e+01,   2.22129493e+01,          2.50748475e+25],
+                          [  3.63207230e-01,  -9.54120862e-02,  -2.83874044e+01,          9.43854939e-01],
+                          [ -2.11326457e+00,  -1.25707023e+01,   1.17172130e+00,          1.20812698e+00],
+                          [  2.48513582e+00,   1.03652647e+01,  -1.84625148e+01,          6.47910997e-02],
+                          [  2.65395942e+01,   2.74794672e+01,   1.29413428e+01,          2.89306132e+05],
+                          [ -9.49445460e+00,   1.59930921e+01,  -1.49596331e+01,          3.27574841e+02],
+                          [ -5.89173945e+00,   9.96742426e+00,   2.60318889e+01,         -3.15842908e-01],
+                          [ -1.15387239e+01,  -2.21433107e+01,  -2.17686413e+01,          1.56724718e-01],
+                          [ -5.30592244e+00,  -2.42752190e+01,   1.29734035e+00,          1.31985534e+00]])
+
+        for a,b,c,expected in ref_data:
+            result = hyp1f1(a,b,c)
+            assert(abs(expected - result)/expected < 1e-4)
+
     def test_hyp1f2(self):
         pass
 

Copied: branches/refactor_fft/scipy/stats/SConscript (from rev 4510, trunk/scipy/stats/SConscript)

Deleted: branches/refactor_fft/scipy/stats/SConstruct
===================================================================
--- branches/refactor_fft/scipy/stats/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/stats/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,39 +0,0 @@
-# Last Change: Sat May 03 02:00 PM 2008 J
-# vim:syntax=python
-from os.path import join as pjoin
-
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numscons import GetNumpyEnvironment, CheckF77Clib
-
-env = GetNumpyEnvironment(ARGUMENTS)
-
-t = env.Tool('numpyf2py')
-
-env.AppendUnique(CPPPATH = [get_numpy_include_dirs()])
-env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR'])
-
-config = env.NumpyConfigure(custom_tests = {'CheckF77Clib' : CheckF77Clib})
-if not config.CheckF77Clib():
-    raise RuntimeError("Could not get C/F77 runtime information")
-config.Finish()
-
-# Statlib library
-src = env.NumpyGlob(pjoin('statlib', '*.f' ))
-env.NumpyStaticExtLibrary('statlibimp', source = src)
-
-env.AppendUnique(LIBPATH = [env['build_dir']])
-
-# Statlib extension
-env.NumpyPythonExtension('statlib', source = 'statlib.pyf', 
-                         F2PYOPTIONS = ["--no-wrap-functions"],
-                         LIBS = 'statlibimp',
-                         LINKFLAGSEND = env['F77_LDFLAGS'])
-
-# futil extension
-futil_src = env.NumpyF2py(pjoin('futilmodule.c'), pjoin('futil.f'))
-env.NumpyPythonExtension('futil', source = futil_src + ['futil.f'],
-                         LINKFLAGSEND = env['F77_LDFLAGS'])
-
-# mvn extension
-env.NumpyPythonExtension('mvn', source = ['mvn.pyf', 'mvndst.f'],
-                         LINKFLAGSEND = env['F77_LDFLAGS'])

Copied: branches/refactor_fft/scipy/stats/SConstruct (from rev 4510, trunk/scipy/stats/SConstruct)

Modified: branches/refactor_fft/scipy/stats/models/formula.py
===================================================================
--- branches/refactor_fft/scipy/stats/models/formula.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/stats/models/formula.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -269,9 +269,9 @@
         if reference is None:
             reference = 0
         else:
-            try:
+            if reference in names:
                 reference = names.index(reference)
-            except IndexError:
+            else:
                 reference = int(reference)
 
         def maineffect_func(value, reference=reference):

Modified: branches/refactor_fft/scipy/stats/models/tests/test_bspline.py
===================================================================
--- branches/refactor_fft/scipy/stats/models/tests/test_bspline.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/stats/models/tests/test_bspline.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -6,17 +6,21 @@
 from scipy.testing import *
 
 import scipy.stats.models as S
-import scipy.stats.models.bspline as B
+try:
+    import scipy.stats.models.bspline as B
+except ImportError:
+    B = None
 
 
 class TestBSpline(TestCase):
 
     def test1(self):
-        b = B.BSpline(N.linspace(0,10,11), x=N.linspace(0,10,101))
-        old = b._basisx.shape
-        b.x = N.linspace(0,10,51)
-        new = b._basisx.shape
-        self.assertEqual((old[0], 51), new)
+        if B:
+            b = B.BSpline(N.linspace(0,10,11), x=N.linspace(0,10,101))
+            old = b._basisx.shape
+            b.x = N.linspace(0,10,51)
+            new = b._basisx.shape
+            self.assertEqual((old[0], 51), new)
 
 
 if __name__ == "__main__":

Modified: branches/refactor_fft/scipy/stats/mstats.py
===================================================================
--- branches/refactor_fft/scipy/stats/mstats.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/stats/mstats.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -518,8 +518,8 @@
 def kendalltau(x, y, use_ties=True, use_missing=False):
     """Computes Kendall's rank correlation tau on two variables *x* and *y*.
 
-Parameters
-----------
+    Parameters
+    ----------
     xdata: sequence
         First data list (for example, time).
     ydata: sequence
@@ -529,6 +529,13 @@
     use_missing: {False, True} optional
         Whether missing data should be allocated a rank of 0 (False) or the
         average rank (True)
+        
+    Returns
+    -------
+        tau : float
+            Kendall tau
+        prob : float
+            Approximate 2-side p-value.
     """
     (x, y, n) = _chk_size(x, y)
     (x, y) = (x.flatten(), y.flatten())
@@ -724,6 +731,17 @@
             Independent variable. If None, use arange(len(y)) instead.
         alpha : float
             Confidence degree.
+            
+    Returns
+    -------
+        medslope : float
+            Theil slope
+        medintercept : float
+            Intercept of the Theil line, as median(y)-medslope*median(x)
+        lo_slope : float
+            Lower bound of the confidence interval on medslope
+        up_slope : float
+            Upper bound of the confidence interval on medslope
 
     """
     y = ma.asarray(y).flatten()
@@ -755,8 +773,8 @@
     sigsq -= np.sum(v*k*(k-1)*(2*k+5) for (k,v) in yties.iteritems())
     sigma = np.sqrt(sigsq)
 
-    Ru = np.round((nt - z*sigma)/2. + 1)
-    Rl = np.round((nt + z*sigma)/2.)
+    Ru = min(np.round((nt - z*sigma)/2. + 1), len(slopes)-1)
+    Rl = max(np.round((nt + z*sigma)/2.), 0)
     delta = slopes[[Rl,Ru]]
     return medslope, medinter, delta[0], delta[1]
 

Modified: branches/refactor_fft/scipy/stats/stats.py
===================================================================
--- branches/refactor_fft/scipy/stats/stats.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/stats/stats.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -50,6 +50,10 @@
           kurtosis
           normaltest (for arrays only)
 
+MOMENTS HANDLING NAN: nanmean
+                      nanmedian
+                      nanstd
+
 ALTERED VERSIONS:  tmean
                    tvar
                    tstd
@@ -214,6 +218,7 @@
            'f_value', 'f_value_multivariate',
            'ss', 'square_of_sums',
            'fastsort', 'rankdata',
+           'nanmean', 'nanstd', 'nanmedian',
           ]
 
 

Modified: branches/refactor_fft/scipy/stats/tests/test_stats.py
===================================================================
--- branches/refactor_fft/scipy/stats/tests/test_stats.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/stats/tests/test_stats.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -197,47 +197,47 @@
 
     def test_nanmean_none(self):
         """Check nanmean when no values are nan."""
-        m = stats.stats.nanmean(X)
+        m = stats.nanmean(X)
         assert_approx_equal(m, X[4])
 
     def test_nanmean_some(self):
         """Check nanmean when some values only are nan."""
-        m = stats.stats.nanmean(self.Xsome)
+        m = stats.nanmean(self.Xsome)
         assert_approx_equal(m, 5.5)
 
     def test_nanmean_all(self):
         """Check nanmean when all values are nan."""
-        m = stats.stats.nanmean(self.Xall)
+        m = stats.nanmean(self.Xall)
         assert numpy.isnan(m)
 
     def test_nanstd_none(self):
         """Check nanstd when no values are nan."""
-        s = stats.stats.nanstd(self.X)
-        assert_approx_equal(s, stats.stats.std(self.X))
+        s = stats.nanstd(self.X)
+        assert_approx_equal(s, stats.std(self.X))
 
     def test_nanstd_some(self):
         """Check nanstd when some values only are nan."""
-        s = stats.stats.nanstd(self.Xsome)
-        assert_approx_equal(s, stats.stats.std(self.Xsomet))
+        s = stats.nanstd(self.Xsome)
+        assert_approx_equal(s, stats.std(self.Xsomet))
 
     def test_nanstd_all(self):
         """Check nanstd when all values are nan."""
-        s = stats.stats.nanstd(self.Xall)
+        s = stats.nanstd(self.Xall)
         assert numpy.isnan(s)
 
     def test_nanmedian_none(self):
         """Check nanmedian when no values are nan."""
-        m = stats.stats.nanmedian(self.X)
-        assert_approx_equal(m, stats.stats.median(self.X))
+        m = stats.nanmedian(self.X)
+        assert_approx_equal(m, stats.median(self.X))
 
     def test_nanmedian_some(self):
         """Check nanmedian when some values only are nan."""
-        m = stats.stats.nanmedian(self.Xsome)
-        assert_approx_equal(m, stats.stats.median(self.Xsomet))
+        m = stats.nanmedian(self.Xsome)
+        assert_approx_equal(m, stats.median(self.Xsomet))
 
     def test_nanmedian_all(self):
         """Check nanmedian when all values are nan."""
-        m = stats.stats.nanmedian(self.Xall)
+        m = stats.nanmedian(self.Xall)
         assert numpy.isnan(m)
 
 class TestCorr(TestCase):
@@ -482,6 +482,15 @@
         assert_almost_equal(intercept,0.0)
         assert_almost_equal(r,0.0)
 
+    def test_regress_simple(self):
+        """Regress a line with sinusoidal noise."""
+        x = numpy.linspace(0, 100, 100)
+        y = 0.2 * numpy.linspace(0, 100, 100) + 10
+        y += numpy.sin(numpy.linspace(0, 20, 100))
+
+        res = stats.linregress(x, y)
+        assert_almost_equal(res[4], 4.3609875083149268e-3)
+
 # Utility
 
 def compare_results(res,desired):


Property changes on: branches/refactor_fft/scipy/stats/tests/test_stats.py
___________________________________________________________________
Name: svn:executable
   - *

Copied: branches/refactor_fft/scipy/stsci/convolve/SConscript (from rev 4510, trunk/scipy/stsci/convolve/SConscript)

Deleted: branches/refactor_fft/scipy/stsci/convolve/SConstruct
===================================================================
--- branches/refactor_fft/scipy/stsci/convolve/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/stsci/convolve/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,13 +0,0 @@
-# Last Change: Wed Mar 05 09:00 PM 2008 J
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numpy import get_numarray_include
-from numscons import GetNumpyEnvironment
-
-env = GetNumpyEnvironment(ARGUMENTS)
-
-env.AppendUnique(CPPPATH = [get_numpy_include_dirs(), get_numarray_include()])
-env.AppendUnique(CPPDEFINES = {'NUMPY': '1'})
-
-# _correlate extension
-env.NumpyPythonExtension('_correlate', source = 'src/_correlatemodule.c') 
-env.NumpyPythonExtension('_lineshape', source = 'src/_lineshapemodule.c') 

Copied: branches/refactor_fft/scipy/stsci/convolve/SConstruct (from rev 4510, trunk/scipy/stsci/convolve/SConstruct)

Copied: branches/refactor_fft/scipy/stsci/image/SConscript (from rev 4510, trunk/scipy/stsci/image/SConscript)

Deleted: branches/refactor_fft/scipy/stsci/image/SConstruct
===================================================================
--- branches/refactor_fft/scipy/stsci/image/SConstruct	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/stsci/image/SConstruct	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,11 +0,0 @@
-# Last Change: Wed Mar 05 09:00 PM 2008 J
-from numpy.distutils.misc_util import get_numpy_include_dirs
-from numpy import get_numarray_include
-from numscons import GetNumpyEnvironment
-
-env = GetNumpyEnvironment(ARGUMENTS)
-
-env.AppendUnique(CPPPATH = [get_numpy_include_dirs(), get_numarray_include()])
-env.AppendUnique(CPPDEFINES = {'NUMPY': '1'})
-
-env.NumpyPythonExtension('_combine', source = 'src/_combinemodule.c') 

Copied: branches/refactor_fft/scipy/stsci/image/SConstruct (from rev 4510, trunk/scipy/stsci/image/SConstruct)

Modified: branches/refactor_fft/scipy/testing/__init__.py
===================================================================
--- branches/refactor_fft/scipy/testing/__init__.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/testing/__init__.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -8,11 +8,6 @@
 import unittest
 from unittest import TestCase
 
-try:
-    import nose
-except ImportError:
-    pass
-
 import decorators as dec
 from numpy.testing.utils import *
 from utils import *

Modified: branches/refactor_fft/scipy/testing/decorators.py
===================================================================
--- branches/refactor_fft/scipy/testing/decorators.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/testing/decorators.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -10,11 +10,6 @@
 
 """
 
-try:
-    import nose
-except ImportError:
-    pass
-
 def slow(t):
     """Labels a test as 'slow'.
 
@@ -76,6 +71,9 @@
     if msg is None:
         msg = 'Test skipped due to test condition'
     def skip_decorator(f):
+        # Local import to avoid a hard nose dependency and only incur the import
+        # time overhead at actual test-time.
+        import nose
         def skipper(*args, **kwargs):
             if skip_condition:
                 raise nose.SkipTest, msg
@@ -87,6 +85,9 @@
 def skipknownfailure(f):
     ''' Decorator to raise SkipTest for test known to fail
     '''
+    # Local import to avoid a hard nose dependency and only incur the import
+    # time overhead at actual test-time.
+    import nose
     def skipper(*args, **kwargs):
         raise nose.SkipTest, 'This test is known to fail'
     return nose.tools.make_decorator(f)(skipper)

Modified: branches/refactor_fft/scipy/testing/nosetester.py
===================================================================
--- branches/refactor_fft/scipy/testing/nosetester.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/testing/nosetester.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -7,8 +7,26 @@
 import sys
 import re
 
-import nose
 
+def import_nose():
+    """ Import nose only when needed.
+    """
+    fine_nose = True
+    try:
+        import nose
+    except ImportError:
+        fine_nose = False
+    else:
+        nose_version = nose.__versioninfo__
+        if nose_version[0] < 1 and nose_version[1] < 10:
+            fine_nose = False
+
+    if not fine_nose:
+        raise ImportError('Need nose >=0.10 for tests - see '
+            'http://somethingaboutorange.com/mrl/projects/nose')
+
+    return nose
+
 class NoseTester(object):
     """ Nose test runner.
 
@@ -112,6 +130,7 @@
         doctests : boolean
             If True, run doctests in module, default False
         '''
+        nose = import_nose()
         argv = self._test_argv(label, verbose, extra_argv)
         if doctests:
             argv+=['--with-doctest']
@@ -122,6 +141,7 @@
         ''' Run benchmarks for module using nose
 
         %(test_header)s'''
+        nose = import_nose()
         argv = self._test_argv(label, verbose, extra_argv)
         argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
         nose.run(argv=argv)

Deleted: branches/refactor_fft/scipy/testing/nulltester.py
===================================================================
--- branches/refactor_fft/scipy/testing/nulltester.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/testing/nulltester.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -1,15 +0,0 @@
-''' Null tester to signal nose tests disabled
-
-Merely returns error reporting lack of nose package or version number
-below requirements.
-
-See pkgtester, nosetester modules
-
-'''
-
-class NullTester(object):
-    def test(self, labels=None, *args, **kwargs):
-        raise ImportError, \
-              'Need nose >=0.10 for tests - see %s' % \
-              'http://somethingaboutorange.com/mrl/projects/nose'
-    bench = test

Modified: branches/refactor_fft/scipy/testing/pkgtester.py
===================================================================
--- branches/refactor_fft/scipy/testing/pkgtester.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/testing/pkgtester.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -11,17 +11,4 @@
 See nosetester module for test implementation
 
 '''
-fine_nose = True
-try:
-    import nose
-except ImportError:
-    fine_nose = False
-else:
-    nose_version = nose.__versioninfo__
-    if nose_version[0] < 1 and nose_version[1] < 10:
-        fine_nose = False
-
-if fine_nose:
-    from scipy.testing.nosetester import NoseTester as Tester
-else:
-    from scipy.testing.nulltester import NullTester as Tester
+from scipy.testing.nosetester import NoseTester as Tester

Modified: branches/refactor_fft/scipy/weave/tests/test_wx_spec.py
===================================================================
--- branches/refactor_fft/scipy/weave/tests/test_wx_spec.py	2008-07-01 00:46:27 UTC (rev 4510)
+++ branches/refactor_fft/scipy/weave/tests/test_wx_spec.py	2008-07-01 04:52:00 UTC (rev 4511)
@@ -10,15 +10,25 @@
 
 from scipy.testing import *
 
-from scipy.weave import ext_tools, wx_spec
+e = None
+DONOTRUN = False
+try:
+    from scipy.weave import ext_tools, wx_spec
+    import wx
+except ImportError, e:
+    wx = None
+    DONOTRUN = True
+except RuntimeError, e:
+    wx = None
+    DONOTRUN = True
 
+skip = dec.skipif(DONOTRUN, "(error was %s)" % str(e))
 
-import wx
-
 class TestWxConverter(TestCase):
     def setUp(self):
-        self.app = wx.App()
-        self.s = wx_spec.wx_converter()
+        if not DONOTRUN:
+            self.app = wx.App()
+            self.s = wx_spec.wx_converter()
 
     @dec.slow
     def test_type_match_string(self):
@@ -107,5 +117,7 @@
         c = wx_return.test(b)
         assert(c == 'hello')
 
+decorate_methods(TestWxConverter, skip)
+
 if __name__ == "__main__":
     nose.run(argv=['', __file__])




More information about the Scipy-svn mailing list