From scipy-svn at scipy.org Wed Apr 4 04:37:00 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 4 Apr 2007 03:37:00 -0500 (CDT) Subject: [Scipy-svn] r2896 - trunk/Lib/linsolve/umfpack Message-ID: <20070404083700.5F9BD39C013@new.scipy.org> Author: rc Date: 2007-04-04 03:36:55 -0500 (Wed, 04 Apr 2007) New Revision: 2896 Modified: trunk/Lib/linsolve/umfpack/umfpack.i Log: fixed swig/python detected a memory leak of type 'void *'... Modified: trunk/Lib/linsolve/umfpack/umfpack.i =================================================================== --- trunk/Lib/linsolve/umfpack/umfpack.i 2007-04-01 03:44:55 UTC (rev 2895) +++ trunk/Lib/linsolve/umfpack/umfpack.i 2007-04-04 08:36:55 UTC (rev 2896) @@ -117,6 +117,7 @@ return NULL; \ } \ $1 = (double *) obj->data; \ + Py_DECREF( obj ); \ }; /*! @@ -130,7 +131,7 @@ }; \ %typemap( argout ) ttype* opaque_argout { \ PyObject *obj; \ - obj = SWIG_NewPointerObj( (ttype) (*$1), $*1_descriptor, 1 ); \ + obj = SWIG_NewPointerObj( (ttype) (*$1), $*1_descriptor, 0 ); \ $result = helper_appendToTuple( $result, obj ); \ }; @@ -146,7 +147,7 @@ }; \ %typemap( argout ) ttype* opaque_arginout { \ PyObject *obj; \ - obj = SWIG_NewPointerObj( (ttype) (*$1), $*1_descriptor, 1 ); \ + obj = SWIG_NewPointerObj( (ttype) (*$1), $*1_descriptor, 0 ); \ $result = helper_appendToTuple( $result, obj ); \ }; From scipy-svn at scipy.org Wed Apr 4 10:04:59 2007 From: scipy-svn at scipy.org (Viagra.com) Date: Wed, 4 Apr 2007 09:04:59 -0500 (CDT) Subject: [Scipy-svn] Online MedHelp Message-ID: <20070404090850.95701.qmail@p3000> An HTML attachment was scrubbed... URL: From scipy-svn at scipy.org Wed Apr 4 13:27:43 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 4 Apr 2007 12:27:43 -0500 (CDT) Subject: [Scipy-svn] r2897 - trunk/Lib Message-ID: <20070404172743.64572C7C042@new.scipy.org> Author: rkern Date: 2007-04-04 12:27:42 -0500 (Wed, 04 Apr 2007) New Revision: 2897 Modified: trunk/Lib/version.py Log: Allow for change in numpy r3660 to not generate __svn_version__.py when outside of an SVN checkout. Modified: trunk/Lib/version.py =================================================================== --- trunk/Lib/version.py 2007-04-04 08:36:55 UTC (rev 2896) +++ trunk/Lib/version.py 2007-04-04 17:27:42 UTC (rev 2897) @@ -3,6 +3,7 @@ if not release: import os + version += '.dev' svn_version_file = os.path.join(os.path.dirname(__file__), '__svn_version__.py') @@ -12,4 +13,4 @@ open(svn_version_file), svn_version_file, ('.py','U',1)) - version += '.dev'+svn.version + version += svn.version From scipy-svn at scipy.org Thu Apr 5 14:41:58 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 5 Apr 2007 13:41:58 -0500 (CDT) Subject: [Scipy-svn] r2898 - trunk/Lib/sparse Message-ID: <20070405184158.894C839C0F6@new.scipy.org> Author: wnbell Date: 2007-04-05 13:41:56 -0500 (Thu, 05 Apr 2007) New Revision: 2898 Modified: trunk/Lib/sparse/sparse.py Log: faster implementation of spidentity() Modified: trunk/Lib/sparse/sparse.py =================================================================== --- trunk/Lib/sparse/sparse.py 2007-04-04 17:27:42 UTC (rev 2897) +++ trunk/Lib/sparse/sparse.py 2007-04-05 18:41:56 UTC (rev 2898) @@ -2620,8 +2620,7 @@ spidentity( n ) returns the identity matrix of shape (n, n) stored in CSC sparse matrix format. """ - diags = ones( (1, n), dtype = dtype ) - return spdiags( diags, 0, n, n ) + return csc_matrix((ones(n,dtype=dtype),arange(n),arange(n+1)),(n,n)) def speye(n, m, k = 0, dtype = 'd'): From scipy-svn at scipy.org Thu Apr 5 15:53:31 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 5 Apr 2007 14:53:31 -0500 (CDT) Subject: [Scipy-svn] r2899 - trunk/Lib/sandbox/timeseries/plotlib Message-ID: <20070405195331.4E48539C061@new.scipy.org> Author: mattknox_ca Date: 2007-04-05 14:53:28 -0500 (Thu, 05 Apr 2007) New Revision: 2899 Modified: trunk/Lib/sandbox/timeseries/plotlib/mpl_timeseries.py Log: series arg for tsfigure function defaults to None Modified: trunk/Lib/sandbox/timeseries/plotlib/mpl_timeseries.py =================================================================== --- trunk/Lib/sandbox/timeseries/plotlib/mpl_timeseries.py 2007-04-05 18:41:56 UTC (rev 2898) +++ trunk/Lib/sandbox/timeseries/plotlib/mpl_timeseries.py 2007-04-05 19:53:28 UTC (rev 2899) @@ -839,7 +839,7 @@ add_plot = add_tsplot TSFigure = TimeSeriesFigure #................................................ -def tsfigure(series, **figargs): +def tsfigure(series=None, **figargs): """Creates a new `TimeSeriesFigure` object. :Parameters: From scipy-svn at scipy.org Thu Apr 5 18:10:05 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 5 Apr 2007 17:10:05 -0500 (CDT) Subject: [Scipy-svn] r2900 - in trunk/Lib/sandbox/maskedarray: . tests Message-ID: <20070405221005.9159D39C123@new.scipy.org> Author: pierregm Date: 2007-04-05 17:10:01 -0500 (Thu, 05 Apr 2007) New Revision: 2900 Modified: trunk/Lib/sandbox/maskedarray/core.py trunk/Lib/sandbox/maskedarray/tests/test_core.py Log: core: enable to set mask=False on a scalar array Modified: trunk/Lib/sandbox/maskedarray/core.py =================================================================== --- trunk/Lib/sandbox/maskedarray/core.py 2007-04-05 19:53:28 UTC (rev 2899) +++ trunk/Lib/sandbox/maskedarray/core.py 2007-04-05 22:10:01 UTC (rev 2900) @@ -1307,8 +1307,11 @@ if m is nomask: res = self._data else: - if m.shape == () and m: - return str(f) + if m.shape == (): + if m: + return str(f) + else: + return str(self._data) # convert to object array to make filled work #CHECK: the two lines below seem more robust than the self._data.astype # res = numeric.empty(self._data.shape, object_) @@ -2640,33 +2643,3 @@ y = x + masked assert_equal(y.shape, x.shape) assert_equal(y._mask, [True, True]) -# if 0: -# a = arange(10) -# a[::3] = masked -# a.fill_value = 999 -# a_pickled = cPickle.loads(a.dumps()) -# assert_equal(a_pickled._mask, a._mask) -# assert_equal(a_pickled._data, a._data) -# assert_equal(a_pickled.fill_value, 999) -# # -# a = array(numpy.matrix(range(10)), mask=[1,0,1,0,0]*2) -# a_pickled = cPickle.loads(a.dumps()) -# assert_equal(a_pickled._mask, a._mask) -# assert_equal(a_pickled, a) -# assert(isinstance(a_pickled._data,numpy.matrix)) -# # -# -# # -# if 1: -# x = marray(numpy.linspace(-1.,1.,31),) -# x[:10] = x[-10:] = masked -# z = marray(numpy.empty((len(x),3), dtype=numpy.float_)) -# z[:,0] = x[:] -# for i in range(1,3): -# idx = numpy.arange(len(x)) -# numpy.random.shuffle(idx) -# z[:,i] = x[idx] -# # -# z.sort(0) -# - Modified: trunk/Lib/sandbox/maskedarray/tests/test_core.py =================================================================== --- trunk/Lib/sandbox/maskedarray/tests/test_core.py 2007-04-05 19:53:28 UTC (rev 2899) +++ trunk/Lib/sandbox/maskedarray/tests/test_core.py 2007-04-05 22:10:01 UTC (rev 2900) @@ -612,6 +612,15 @@ #self.failUnlessRaises(Exception, lambda x,y: x+y, masked, xx) #self.failUnlessRaises(Exception, lambda x,y: x+y, xx, masked) #........................ + def check_scalar(self): + "Checks masking a scalar" + x = masked_array(0) + assert_equal(str(x), '0') + x = masked_array(0,mask=True) + assert_equal(str(x), str(masked_print_option)) + x = masked_array(0, mask=False) + assert_equal(str(x), '0') + #........................ def check_usingmasked(self): "Checks that there's no collapsing to masked" x = masked_array([1,2]) From scipy-svn at scipy.org Sun Apr 8 11:27:47 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 8 Apr 2007 10:27:47 -0500 (CDT) Subject: [Scipy-svn] r2901 - trunk/Lib/optimize Message-ID: <20070408152747.6DE0639C041@new.scipy.org> Author: cookedm Date: 2007-04-08 10:27:44 -0500 (Sun, 08 Apr 2007) New Revision: 2901 Modified: trunk/Lib/optimize/__minpack.h Log: fix double DECREF in Lib/optimize/__minpack.h. AJennings, #392 Modified: trunk/Lib/optimize/__minpack.h =================================================================== --- trunk/Lib/optimize/__minpack.h 2007-04-05 22:10:01 UTC (rev 2900) +++ trunk/Lib/optimize/__minpack.h 2007-04-08 15:27:44 UTC (rev 2901) @@ -585,7 +585,6 @@ else { Py_DECREF(ap_fvec); Py_DECREF(ap_fjac); - Py_DECREF(ap_diag); Py_DECREF(ap_ipvt); Py_DECREF(ap_qtf); return Py_BuildValue("Ni",PyArray_Return(ap_x),info); From scipy-svn at scipy.org Mon Apr 9 15:50:24 2007 From: scipy-svn at scipy.org (Canadian Doctor Lela) Date: Mon, 9 Apr 2007 14:50:24 -0500 (CDT) Subject: [Scipy-svn] MedHelp 07964787 Message-ID: <20070409085024.32081.qmail@smtp.otisdale.co.uk> An HTML attachment was scrubbed... URL: From scipy-svn at scipy.org Tue Apr 10 18:06:18 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 10 Apr 2007 17:06:18 -0500 (CDT) Subject: [Scipy-svn] r2902 - in trunk/Lib/sandbox: rbf rbf/tests spline spline/tests Message-ID: <20070410220618.F1DCB39C044@new.scipy.org> Author: jtravs Date: 2007-04-10 17:04:33 -0500 (Tue, 10 Apr 2007) New Revision: 2902 Added: trunk/Lib/sandbox/rbf/tests/example1.py trunk/Lib/sandbox/rbf/tests/example2.py Removed: trunk/Lib/sandbox/rbf/tests/example.py Modified: trunk/Lib/sandbox/rbf/rbf.py trunk/Lib/sandbox/rbf/tests/test_rbf.py trunk/Lib/sandbox/spline/fitpack.py trunk/Lib/sandbox/spline/fitpack.pyf trunk/Lib/sandbox/spline/tests/dierckx_test_data.py trunk/Lib/sandbox/spline/tests/test_fitpack.py Log: Changes to spline/rbf sandbox packages. New unit tests. Modified: trunk/Lib/sandbox/rbf/rbf.py =================================================================== --- trunk/Lib/sandbox/rbf/rbf.py 2007-04-08 15:27:44 UTC (rev 2901) +++ trunk/Lib/sandbox/rbf/rbf.py 2007-04-10 22:04:33 UTC (rev 2902) @@ -1,9 +1,9 @@ #!/usr/bin/env python -""" -rbf - Radial basis functions for interpolation/smoothing scattered Nd data. +"""rbf - Radial basis functions for interpolation/smoothing scattered Nd data. Written by John Travers , February 2007 Based closely on Matlab code by Alex Chirokov +Additional, large, improvements by Robert Hetland Permission to use, modify, and distribute this software is given under the terms of the SciPy (BSD style) license. See LICENSE.txt that came with @@ -11,116 +11,141 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +Copyright (c) 2006-2007, Robert Hetland +Copyright (c) 2007, John Travers + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of Robert Hetland nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ -import scipy as s +from numpy import sqrt, log, asarray, newaxis, all, dot, float64, eye import scipy.linalg class Rbf(object): """ A class for radial basis function approximation/interpolation of n-dimensional scattered data. """ - def __init__(self,x,y, function='multiquadrics', constant=None, smooth=0): + + def _euclidean_norm(self, x1, x2): + return sqrt( ((x1 - x2)**2).sum(axis=0) ) + + def _function(self, r): + if self.function.lower() == 'multiquadric': + return sqrt((self.epsilon*r)**2 + 1) + elif self.function.lower() == 'inverse multiquadric': + return 1.0/sqrt((self.epsilon*r)**2 + 1) + elif self.function.lower() == 'gausian': + return exp(-(self.epsilon*r)**2) + elif self.function.lower() == 'cubic': + return r**3 + elif self.function.lower() == 'quintic': + return r**5 + elif self.function.lower() == 'thin-plate': + return r**2 * log(r) + else: + raise ValueError, 'Invalid basis function name' + + def __init__(self, *args, **kwargs): """ Constructor for Rbf class. - + Inputs: - x (dim, n) array of coordinates for the nodes - y (n,) array of values at the nodes - function the radial basis function - 'linear', 'cubic' 'thinplate', 'multiquadrics' - or 'gaussian', default is 'multiquadrics' - constant adjustable constant for gaussian or multiquadrics + x, y, z, ..., d + Where x, y, z, ... are the coordinates of the nodes + and d is the array of values at the nodes + + function the radial basis function, based on the radius, r, given + by the norm (defult is Euclidean distance); the default + is 'multiquadratic'. + + 'multiquadric': sqrt((self.epsilon*r)**2 + 1) + 'inverse multiquadric': 1.0/sqrt((self.epsilon*r)**2 + 1) + 'gausian': exp(-(self.epsilon*r)**2) + 'cubic': r**3 + 'quintic': r**5 + 'thin-plate': r**2 * log(r) + + epsilon adjustable constant for gaussian or multiquadrics functions - defaults to approximate average distance between nodes (which is a good start) + smooth values greater than zero increase the smoothness of the approximation. 0 is for interpolation (default), the function will always go through the nodal points in this case. + + norm A function that returns the 'distance' between two points, + with inputs as arrays of positions (x, y, z, ...), and an + output as an array of distance. E.g, the default is + + def euclidean_norm(self, x1, x2): + return sqrt( ((x1 - x2)**2).sum(axis=0) ) + + which is called with x1 = x1[ndims, newaxis, :] and + x2 = x2[ndims, :, newaxis] such that the result is a + symetric, square matrix of the distances between each point + to each other point. + + Outputs: + Interpolator object rbfi that returns interpolated values at new positions: + >>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance + >>> di = rbfi(xi, yi, zi) # interpolated values + """ + self.xi = asarray([asarray(a, dtype=float64).flatten() for a in args[:-1]]) + self.N = self.xi.shape[-1] + self.di = asarray(args[-1], dtype=float64).flatten() + + assert [x.size==self.di.size for x in self.xi], \ + 'All arrays must be equal length' + + self.norm = kwargs.pop('norm', self._euclidean_norm) + r = self._call_norm(self.xi, self.xi) + self.epsilon = kwargs.pop('epsilon', r.mean()) + self.function = kwargs.pop('function', 'multiquadric') + self.smooth = kwargs.pop('smooth', 0.0) + + self.A = self._function(r) - eye(self.N)*self.smooth + self.nodes = scipy.linalg.solve(self.A, self.di) + + def _call_norm(self, x1, x2): + if len(x1.shape) == 1: + x1 = x1[newaxis, :] + if len(x2.shape) == 1: + x2 = x2[newaxis, :] + x1 = x1[..., :, newaxis] + x2 = x2[..., newaxis, :] + return self.norm(x1, x2) + + def __call__(self, *args): + assert all([x.shape == y.shape \ + for x in args \ + for y in args]), 'Array lengths must be equal' + shp = args[0].shape + self.xa = asarray([a.flatten() for a in args], dtype=float64) + r = self._call_norm(self.xa, self.xi) + return dot(self._function(r), self.nodes).reshape(shp) - Outputs: None - """ - if len(x.shape) == 1: - nxdim = 1 - nx = x.shape[0] - else: - (nxdim, nx)=x.shape - if len(y.shape) == 1: - nydim = 1 - ny = y.shape[0] - else: - (nydim, ny)=y.shape - x.shape = (nxdim, nx) - y.shape = (nydim, ny) - if nx != ny: - raise ValueError, 'x and y should have the same number of points' - if nydim != 1: - raise ValueError, 'y should be a length n vector' - self.x = x - self.y = y - self.function = function - if (constant==None - and ((function == 'multiquadrics') or (function == 'gaussian'))): - # approx. average distance between the nodes - constant = (s.product(x.T.max(0)-x.T.min(0),axis=0)/nx)**(1/nxdim) - self.constant = constant - self.smooth = smooth - if self.function == 'linear': - self.phi = lambda r: r - elif self.function == 'cubic': - self.phi = lambda r: r*r*r - elif self.function == 'multiquadrics': - self.phi = lambda r: s.sqrt(1.0+r*r/(self.constant*self.constant)) - elif self.function == 'thinplate': - self.phi = lambda r: r*r*s.log(r+1) - elif self.function == 'gaussian': - self.phi = lambda r: s.exp(-0.5*r*r/(self.rbfconst*self.constant)) - else: - raise ValueError, 'unkown function' - A = self._rbf_assemble() - b=s.r_[y.T, s.zeros((nxdim+1, 1), float)] - self.coeff = s.linalg.solve(A,b) - - def __call__(self, xi): - """ Evaluate the radial basis function approximation at points xi. - - Inputs: - xi (dim, n) array of coordinates for the points to evaluate at - - Outputs: - y (n,) array of values at the points xi - """ - if len(xi.shape) == 1: - nxidim = 1 - nxi = xi.shape[0] - else: - (nxidim, nxi)=xi.shape - xi.shape = (nxidim, nxi) - (nxdim, nx) = self.x.shape - if nxdim != nxidim: - raise ValueError, 'xi should have the same number of rows as an' \ - ' array used to create RBF interpolation' - f = s.zeros(nxi, float) - r = s.zeros(nx, float) - for i in range(nxi): - st=0.0 - r = s.dot(xi[:,i,s.newaxis],s.ones((1,nx))) - self.x - r = s.sqrt(sum(r*r)) - st = self.coeff[nx,:] + s.sum(self.coeff[0:nx,:].flatten()*self.phi(r)) - for k in range(nxdim): - st=st+self.coeff[k+nx+1,:]*xi[k,i] - f[i] = st - return f - - def _rbf_assemble(self): - (nxdim, nx)=self.x.shape - A=s.zeros((nx,nx), float) - for i in range(nx): - for j in range(i+1): - r=s.linalg.norm(self.x[:,i]-self.x[:,j]) - temp=self.phi(r) - A[i,j]=temp - A[j,i]=temp - A[i,i] = A[i,i] - self.smooth - P = s.c_[s.ones((nx,1), float), self.x.T] - A = s.r_[s.c_[A, P], s.c_[P.T, s.zeros((nxdim+1,nxdim+1), float)]] - return A Deleted: trunk/Lib/sandbox/rbf/tests/example.py =================================================================== --- trunk/Lib/sandbox/rbf/tests/example.py 2007-04-08 15:27:44 UTC (rev 2901) +++ trunk/Lib/sandbox/rbf/tests/example.py 2007-04-10 22:04:33 UTC (rev 2902) @@ -1,52 +0,0 @@ -import scipy as s -import scipy.interpolate - -from scipy.sandbox.rbf import Rbf - -import matplotlib -matplotlib.use('Agg') -import pylab as p - -# 1d tests - setup data -x = s.linspace(0,10,9) -y = s.sin(x) -xi = s.linspace(0,10,101) - -# use interpolate methods -ius = s.interpolate.InterpolatedUnivariateSpline(x,y) -yi = ius(xi) -p.subplot(2,1,1) -p.plot(x,y,'o',xi,yi, xi, s.sin(xi),'r') -p.title('Interpolation using current scipy fitpack2') - -# use RBF method -rbf = Rbf(x, y) -fi = rbf(xi) -p.subplot(2,1,2) -p.plot(x,y,'bo',xi.flatten(),fi.flatten(),'g',xi.flatten(), - s.sin(xi.flatten()),'r') -p.title('RBF interpolation - multiquadrics') -p.savefig('rbf1dtest.png') -p.close() - -# 2-d tests - setup scattered data -x = s.rand(50,1)*4-2 -y = s.rand(50,1)*4-2 -z = x*s.exp(-x**2-y**2) -ti = s.linspace(-2.0,2.0,81) -(XI,YI) = s.meshgrid(ti,ti) - -# use RBF -rbf = Rbf(s.c_[x.flatten(),y.flatten()].T,z.T,constant=2) -ZI = rbf(s.c_[XI.flatten(), YI.flatten()].T) -ZI.shape = XI.shape - -# plot the result -from enthought.tvtk.tools import mlab -f=mlab.figure(browser=False) -su=mlab.Surf(XI,YI,ZI,ZI,scalar_visibility=True) -f.add(su) -su.lut_type='blue-red' -f.objects[0].axis.z_label='value' -pp = mlab.Spheres(s.c_[x.flatten(), y.flatten(), z.flatten()],radius=0.03) -f.add(pp) \ No newline at end of file Copied: trunk/Lib/sandbox/rbf/tests/example1.py (from rev 2897, trunk/Lib/sandbox/rbf/tests/example.py) =================================================================== --- trunk/Lib/sandbox/rbf/tests/example.py 2007-04-04 17:27:42 UTC (rev 2897) +++ trunk/Lib/sandbox/rbf/tests/example1.py 2007-04-10 22:04:33 UTC (rev 2902) @@ -0,0 +1,50 @@ +import scipy as s +import scipy.interpolate + +from scipy.sandbox.rbf import Rbf + +import matplotlib +matplotlib.use('Agg') +import pylab as p + +# 1d tests - setup data +x = s.linspace(0,10,9) +y = s.sin(x) +xi = s.linspace(0,10,101) + +# use interpolate methods +ius = s.interpolate.InterpolatedUnivariateSpline(x,y) +yi = ius(xi) +p.subplot(2,1,1) +p.plot(x,y,'o',xi,yi, xi, s.sin(xi),'r') +p.title('Interpolation using current scipy fitpack2') + +# use RBF method +rbf = Rbf(x, y) +fi = rbf(xi) +p.subplot(2,1,2) +p.plot(x,y,'bo',xi,fi,'g',xi, s.sin(xi),'r') +p.title('RBF interpolation - multiquadrics') +p.show() + +# 2-d tests - setup scattered data +x = s.rand(50,1)*4-2 +y = s.rand(50,1)*4-2 +z = x*s.exp(-x**2-y**2) +ti = s.linspace(-2.0,2.0,81) +(XI,YI) = s.meshgrid(ti,ti) + +# use RBF +rbf = Rbf(x.flatten(),y.flatten(),z.flatten(),eps=2) +ZI = rbf(XI.flatten(), YI.flatten()) +ZI.shape = XI.shape + +# plot the result +from enthought.tvtk.tools import mlab +f=mlab.figure(browser=False) +su=mlab.Surf(XI,YI,ZI,ZI,scalar_visibility=True) +f.add(su) +su.lut_type='blue-red' +f.objects[0].axis.z_label='value' +pp = mlab.Spheres(s.c_[x.flatten(), y.flatten(), z.flatten()],radius=0.03) +f.add(pp) Added: trunk/Lib/sandbox/rbf/tests/example2.py =================================================================== --- trunk/Lib/sandbox/rbf/tests/example2.py 2007-04-08 15:27:44 UTC (rev 2901) +++ trunk/Lib/sandbox/rbf/tests/example2.py 2007-04-10 22:04:33 UTC (rev 2902) @@ -0,0 +1,47 @@ +from numpy import sin, asarray, exp, random, mgrid, pi, cos, sqrt, ones +from scipy.sandbox.rbf import Rbf +import pylab as pl + +def truth_2d(x,y,w=2*pi): + "moguls" + return sin(w*x)*cos(w*y) + +def truth_nd(*args): + "a gausian sphere" + x = asarray(list(args), 'float64') + return exp( -sqrt((x**2).sum(axis=0)) ) + +# 2D example +N = 300 +xi = random.rand(N) +yi = random.rand(N) +di = truth_2d(xi, yi) +xa, ya = mgrid[0:1:50j, 0:1:50j] +s = Rbf(xi, yi, di) +da = s(xa, ya) +pl.figure() +n = pl.normalize(-1., 1.) +pl.pcolor(xa, ya, da, norm=n, cmap=pl.cm.jet) +pl.scatter(xi, yi, 100, di, norm=n, cmap=pl.cm.jet) +pl.axis([0., 1., 0., 1.]) +pl.colorbar() +pl.draw() +# 3d example +N = 300 +xi = 2.*random.randn(N) +yi = 2.*random.randn(N) +zi = 2.*random.randn(N) +di = truth_nd(xi, yi, zi) +zas = [-0.25, 0.0, 0.25, 0.75] +xa, ya = mgrid[-1:1:50j, -1:1:50j] +s = Rbf(xi, yi, zi, di) +fig = pl.figure(figsize=(12, 3)) +for idx, za in enumerate(zas): + da = s(xa, ya, za*ones(xa.shape, 'f')) + ax = fig.add_subplot(1,4,idx+1) + ax.pcolor(xa, ya, da, norm=pl.normalize(0, 1), \ + shading='flat', cmap=pl.cm.jet) + ax.set_aspect('equal') + +pl.show() + Modified: trunk/Lib/sandbox/rbf/tests/test_rbf.py =================================================================== --- trunk/Lib/sandbox/rbf/tests/test_rbf.py 2007-04-08 15:27:44 UTC (rev 2901) +++ trunk/Lib/sandbox/rbf/tests/test_rbf.py 2007-04-10 22:04:33 UTC (rev 2902) @@ -1,9 +1,9 @@ #!/usr/bin/env python -# Created by John Travers, February 2007 +# Created by John Travers, Robert Hetland, 2007 """ Test functions for rbf module """ from numpy.testing import * -import numpy as n +from numpy import linspace, sin, random, exp set_package_path() from rbf.rbf import Rbf @@ -11,21 +11,32 @@ class test_Rbf1D(NumpyTestCase): def check_multiquadrics(self): - x = n.linspace(0,10,9) - y = n.sin(x) + x = linspace(0,10,9) + y = sin(x) rbf = Rbf(x, y) yi = rbf(x) - assert_array_almost_equal(y.flatten(), yi) + assert_array_almost_equal(y, yi) class test_Rbf2D(NumpyTestCase): def check_multiquadrics(self): - x = n.random.rand(50,1)*4-2 - y = n.random.rand(50,1)*4-2 - z = x*n.exp(-x**2-y**2) - rbf = Rbf(n.c_[x.flatten(),y.flatten()].T,z.T,constant=2) - zi = rbf(n.c_[x.flatten(), y.flatten()].T) + x = random.rand(50,1)*4-2 + y = random.rand(50,1)*4-2 + z = x*exp(-x**2-y**2) + rbf = Rbf(x, y, z ,epsilon=2) + zi = rbf(x, y) zi.shape = x.shape assert_array_almost_equal(z, zi) +class test_Rbf3D(NumpyTestCase): + def check_multiquadrics(self): + x = random.rand(50,1)*4-2 + y = random.rand(50,1)*4-2 + z = random.rand(50,1)*4-2 + d = x*exp(-x**2-y**2) + rbf = Rbf(x, y, z, d ,epsilon=2) + di = rbf(x, y, z) + di.shape = x.shape + assert_array_almost_equal(di, d) + if __name__ == "__main__": - NumpyTest().run() \ No newline at end of file + NumpyTest().run() Modified: trunk/Lib/sandbox/spline/fitpack.py =================================================================== --- trunk/Lib/sandbox/spline/fitpack.py 2007-04-08 15:27:44 UTC (rev 2901) +++ trunk/Lib/sandbox/spline/fitpack.py 2007-04-10 22:04:33 UTC (rev 2902) @@ -25,8 +25,8 @@ $ python fitpack.py # run all available test programs TODO: Make interfaces to the following fitpack functions: - For univariate splines: cocosp, concon, fourco, insert - For bivariate splines: profil, regrid, parsur, surev + For univariate splines: cocosp, concon, fourco + For bivariate splines: profil, parsur, surev """ __all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde', @@ -219,27 +219,36 @@ if task==1: try: u=_parcur_cache['u'] - ub=_parcur_cache['ub'] - ue=_parcur_cache['ue'] t=_parcur_cache['t'] wrk=_parcur_cache['wrk'] iwrk=_parcur_cache['iwrk'] n=_parcur_cache['n'] + if not per: + ub=_parcur_cache['ub'] + ue=_parcur_cache['ue'] except KeyError: raise ValueError, 'task=1 can only be called after task=0' - u,ub,ue,n,t,c,fp,wrk,iwrk,ier=dfitpack.parcur_smth1(ipar,idim,u,x,w, - ub,ue,nest,n,t,wrk,iwrk,k=k,s=s) + if per: + u,n,t,c,fp,wrk,iwrk,ier=dfitpack.clocur_smth1(ipar,idim,u,x, + w,n,t,wrk,iwrk,k=k,s=s) + else: + u,ub,ue,n,t,c,fp,wrk,iwrk,ier=dfitpack.parcur_smth1(ipar,idim,u,x, + w,ub,ue,n,t,wrk,iwrk,k=k,s=s) if task==-1: - u,ub,ue,n,t,c,fp,ier=dfitpack.parcur_lsq(ipar,idim,u,x,w,ub,ue, - nest,n,t,k=k) + if per: + u,n,t,c,fp,ier=dfitpack.clocur_lsq(ipar,idim,u,x,w,n,t,k=k) + else: + u,ub,ue,n,t,c,fp,ier=dfitpack.parcur_lsq(ipar,idim,u,x,w,ub,ue, + n,t,k=k) if task>=0: _parcur_cache['n']=n _parcur_cache['u']=u - _parcur_cache['ub']=ub - _parcur_cache['ue']=ue _parcur_cache['t']=t _parcur_cache['wrk']=wrk _parcur_cache['iwrk']=iwrk + if not per: + _parcur_cache['ub']=ub + _parcur_cache['ue']=ue c = c[:n*idim] c.shape=idim,n c = c[:,:n-k-1] @@ -379,7 +388,7 @@ n,t,c,fp,wrk,iwrk,ier = dfitpack.percur_smth1(x,y,w,n,t,wrk,iwrk, k=k,s=s) elif task==-1: - n,t,c,fp,ier = dfitpack.percur_lsq(x,y,w,n,t,k=k) + n,t,c,fp,ier = dfitpack.percur_lsq(x,y,w,t,k=k) if task>=0: _percur_cache['t']=t _percur_cache['wrk']=wrk Modified: trunk/Lib/sandbox/spline/fitpack.pyf =================================================================== --- trunk/Lib/sandbox/spline/fitpack.pyf 2007-04-08 15:27:44 UTC (rev 2901) +++ trunk/Lib/sandbox/spline/fitpack.pyf 2007-04-10 22:04:33 UTC (rev 2902) @@ -147,7 +147,7 @@ integer optional,check(1<=k && k <=5),intent(in) :: k=3 real*8 intent(hide),check(s>=0.0) :: s = 0.0 integer intent(hide),depend(m,k) :: nest=m+2*k - integer intent(in,out) :: n + integer intent(out),depend(t) :: n=len(t) real*8 dimension(n),intent(in,out) :: t real*8 dimension(n),intent(out) :: c real*8 intent(out) :: fp @@ -167,10 +167,10 @@ real*8 dimension(m),depend(m),check(len(w)==m) :: w integer optional,check(1<=k && k <=5),intent(in) :: k=3 real*8 optional,check(s>=0.0) :: s = 0.0 - integer intent(hide),depend(m,k) :: nest=m+2*k - integer intent(in,out),depend(nest) :: n=nest + integer intent(hide),depend(t) :: nest=len(t) + integer intent(in,out) :: n real*8 dimension(nest),intent(in,out) :: t - real*8 dimension(n),intent(out) :: c + real*8 dimension(nest),intent(out) :: c real*8 intent(out) :: fp real*8 dimension(lwrk),intent(in,out) :: wrk integer intent(hide),depend(m,k,nest) :: lwrk=m*(k+1)+nest*(8+5*k) @@ -189,9 +189,9 @@ integer optional,check(1<=k && k <=5),intent(in) :: k=3 real*8 optional,check(s>=0.0) :: s = 0.0 integer intent(hide),depend(m,k) :: nest=m+2*k - integer intent(out),depend(nest) :: n=nest + integer intent(out) :: n real*8 dimension(nest),intent(out) :: t - real*8 dimension(n),intent(out) :: c + real*8 dimension(nest),intent(out) :: c real*8 intent(out) :: fp real*8 dimension(lwrk),intent(out) :: wrk integer intent(hide),depend(m,k,nest) :: lwrk=m*(k+1)+nest*(8+5*k) @@ -215,15 +215,15 @@ real*8 intent(in,out) :: ue integer check(1<=k && k<=5) :: k=3.0 real*8 intent(hide),check(s>=0.0) :: s = 0.0 - integer intent(in) :: nest + integer intent(hide) :: nest=n integer intent(in,out) :: n real*8 dimension(nest), intent(in,out) :: t integer intent(hide), depend(nest,idim) :: nc=idim*nest real*8 dimension(nc), intent(out) :: c real*8 intent(out) :: fp - real*8 dimension(lwrk), intent(cache) :: wrk + real*8 dimension(lwrk), intent(hide,cache) :: wrk integer intent(hide),depend(m,k,nest,idim) :: lwrk=m*(k+1)+nest*(6+idim+3*k) - integer dimension(nest), intent(cache) :: iwrk + integer dimension(nest), intent(hide,cache) :: iwrk integer intent(out) :: ier end subroutine parcur_lsq @@ -256,6 +256,59 @@ integer intent(out) :: ier end subroutine parcur_smth1 + subroutine parcur_smth0(iopt,ipar,idim,m,u,mx,x,w,ub,ue,k,s,nest,n,t,nc,& + c,fp,wrk,lwrk,iwrk,ier) + fortranname parcur + integer intent(hide) :: iopt = 0 + integer check(ipar == 1 || ipar == 0) :: ipar + integer check(idim > 0 && idim < 11) :: idim + integer intent(hide),depend(u,k),check(m>k) :: m=len(u) + real*8 dimension(m), intent(in,out) :: u + integer intent(hide),depend(x,idim,m),check(mx>=idim*m) :: mx=len(x) + real*8 dimension(mx) :: x + real*8 dimension(m) :: w + real*8 intent(in,out) :: ub + real*8 intent(in,out) :: ue + integer check(1<=k && k<=5) :: k=3.0 + real*8 check(s>=0.0) :: s = 0.0 + integer intent(in) :: nest + integer intent(out) :: n + real*8 dimension(nest), intent(out) :: t + integer intent(hide), depend(nest,idim) :: nc=idim*nest + real*8 dimension(nc), intent(out) :: c + real*8 intent(out) :: fp + real*8 dimension(lwrk), intent(out) :: wrk + integer intent(hide),depend(m,k,nest,idim) :: lwrk=m*(k+1)+nest*(6+idim+3*k) + integer dimension(nest), intent(out) :: iwrk + integer intent(out) :: ier + end subroutine parcur_smth0 + + subroutine clocur_lsq(iopt,ipar,idim,m,u,mx,x,w,k,s,nest,n,t,nc,c,fp,& + wrk,lwrk,iwrk,ier) + !u,n,t,c,fp,wrk,iwrk,ier=clocur_lsq(ipar,idim,u,x,w,nest,n,t,[k,s]) + fortranname clocur + integer intent(hide) :: iopt = -1 + integer check(ipar == 1 || ipar == 0) :: ipar + integer check(idim > 0 && idim < 11) :: idim + integer intent(hide),depend(u,k),check(m>k) :: m=len(u) + real*8 dimension(m), intent(in,out) :: u + integer intent(hide),depend(x,idim,m),check(mx>=idim*m) :: mx=len(x) + real*8 dimension(mx) :: x + real*8 dimension(m) :: w + integer check(1<=k && k<=5) :: k=3.0 + real*8 intent(hide) :: s = 0.0 + integer intent(hide) :: nest=n + integer intent(in,out) :: n + real*8 dimension(nest), intent(in,out) :: t + integer intent(hide), depend(nest,idim) :: nc=idim*nest + real*8 dimension(nc), intent(out) :: c + real*8 intent(out) :: fp + real*8 dimension(lwrk), intent(cache,hide) :: wrk + integer intent(hide),depend(m,k,nest,idim) :: lwrk=m*(k+1)+nest*(7+idim+5*k) + integer dimension(nest), intent(cache,hide) :: iwrk + integer intent(out) :: ier + end subroutine clocur_lsq + subroutine clocur_smth0(iopt,ipar,idim,m,u,mx,x,w,k,s,nest,n,t,nc,c,fp,& wrk,lwrk,iwrk,ier) !u,n,t,c,fp,wrk,iwrk,ier=clocur_smth0(ipar,idim,u,x,w,nest,[k,s]) @@ -282,10 +335,12 @@ integer intent(out) :: ier end subroutine clocur_smth0 - subroutine parcur_smth0(iopt,ipar,idim,m,u,mx,x,w,ub,ue,k,s,nest,n,t,nc,& - c,fp,wrk,lwrk,iwrk,ier) - fortranname parcur - integer intent(hide) :: iopt = 0 + subroutine clocur_smth1(iopt,ipar,idim,m,u,mx,x,w,k,s,nest,n,t,nc,c,fp,& + wrk,lwrk,iwrk,ier) + !u,n,t,c,fp,wrk,iwrk,ier=clocur_smth1(ipar,idim,u,x,w,nest, + ! n,t,wrk,iwrk,[k,s]) + fortranname clocur + integer intent(hide) :: iopt = 1 integer check(ipar == 1 || ipar == 0) :: ipar integer check(idim > 0 && idim < 11) :: idim integer intent(hide),depend(u,k),check(m>k) :: m=len(u) @@ -293,21 +348,19 @@ integer intent(hide),depend(x,idim,m),check(mx>=idim*m) :: mx=len(x) real*8 dimension(mx) :: x real*8 dimension(m) :: w - real*8 intent(in,out) :: ub - real*8 intent(in,out) :: ue integer check(1<=k && k<=5) :: k=3.0 real*8 check(s>=0.0) :: s = 0.0 integer intent(in) :: nest - integer intent(out) :: n - real*8 dimension(nest), intent(out) :: t + integer intent(in,out) :: n + real*8 dimension(nest), intent(in,out) :: t integer intent(hide), depend(nest,idim) :: nc=idim*nest real*8 dimension(nc), intent(out) :: c real*8 intent(out) :: fp - real*8 dimension(lwrk), intent(out) :: wrk - integer intent(hide),depend(m,k,nest,idim) :: lwrk=m*(k+1)+nest*(6+idim+3*k) - integer dimension(nest), intent(out) :: iwrk + real*8 dimension(lwrk), intent(in,out) :: wrk + integer intent(hide),depend(m,k,nest,idim) :: lwrk=m*(k+1)+nest*(7+idim+5*k) + integer dimension(nest), intent(in,out) :: iwrk integer intent(out) :: ier - end subroutine parcur_smth0 + end subroutine clocur_smth1 subroutine insert(iopt,t,n,c,k,x,tt,nn,cc,nest,ier) ! tt, nn, cc, ier = insert(per, t, c, k, x, nest) Modified: trunk/Lib/sandbox/spline/tests/dierckx_test_data.py =================================================================== --- trunk/Lib/sandbox/spline/tests/dierckx_test_data.py 2007-04-08 15:27:44 UTC (rev 2901) +++ trunk/Lib/sandbox/spline/tests/dierckx_test_data.py 2007-04-10 22:04:33 UTC (rev 2902) @@ -92,6 +92,460 @@ 3.0, 3.0, 2.4, 4.2, 3.5])] } +percur_test = { +'x' : array([0.0,3.922,7.843,11.765,15.686, + 19.608,23.509,27.451,31.373,35.294,39.216,43.137,47.059,50.980, + 54.902,58.824,62.745,66.667,70.588,74.510,78.431,82.353,86.275, + 90.196,94.118,98.039, 100.0]), +'y' : array([10.099,14.835,21.453,25.022,22.427, + 22.315,22.070,19.673,16.754,13.983,11.973,12.286,16.129,21.560, + 28.041,39.205,59.489,72.559,75.960,79.137,75.925,68.809,55.758, + 39.915,22.006,12.076, 10.099]), +'k' : [3, 3, 3, 3, 3, 3, 3, 5, 5, 5, 5, 5, 5], +'iopt' : [0, 1, 1, 1, 0, 0, -1, 0, 1, 1, 1, 0, 0, -1], +'s' : [65000.0, 500.0, 5.0, 20.0, 20.0, 0.0, -1.0, 65000.0, 500.0, 5.0, + 20.0, 20.0, 0.0, -1], +'res' : [0.140221E+05,0.500033E+03,0.499936E+01,0.200010E+02,0.199981E+02, + 0.000000E+00,0.638916E+02,0.140221E+05,0.499538E+03,0.500125E+01, + 0.200032E+02, 0.199998E+02, 0.000000E+00, 0.627153E+02], +'err' : [-2, 0, 0, 0, 0, -1, 0, -2, 0, 0, 0, 0, -1, 0], +'knots' : [[-300.000, -200.000, -100.000, 0.000, 100.000, 200.000, + 300.000, 400.000], + [-49.020, -33.333, -21.569, 0.000, 15.686, 27.451, + 50.980, 66.667, 78.431, 100.000, 115.686, 127.451, 150.980], + [-21.569, -9.804, -1.961, 0.000, 7.843, 15.686, 27.451, + 39.216, 50.980, 58.824, 62.745, 66.667, 70.588, 74.510, + 78.431, 90.196, 98.039, 100.000, 107.843, 115.686, 127.451], + [-21.569, -9.804, -1.961, 0.000, 7.843, 15.686, 27.451, + 39.216, 50.980, 58.824, 62.745, 66.667, 70.588, 74.510, + 78.431, 90.196, 98.039, 100.000, 107.843, 115.686, 127.451], + [-33.333, -21.569, -9.804, 0.000, 7.843, 15.686, 27.451, + 39.216, 50.980, 58.824, 62.745, 66.667, 78.431, 90.196, + 100.000, 107.843, 115.686, 127.451], + [-9.804, -5.882, -1.961, 0.000, 3.922, 7.843, 11.765, + 15.686, 19.608, 23.509, 27.451, 31.373, 35.294, 39.216, + 43.137, 47.059, 50.980, 54.902, 58.824, 62.745, 66.667, + 70.588, 74.510, 78.431, 82.353, 86.275, 90.196, 94.118, + 98.039, 100.000, 103.922, 107.843, 111.765], + [-30.000, -20.000, -10.000, 0.000, 10.000, 20.000, 30.000, + 40.000, 50.000, 60.000, 70.000, 80.000, 90.000, 100.000, + 110.000, 120.000, 130.000], + [-500.000, -400.000, -300.000, -200.000, -100.000, 0.000, 100.000, + 200.000, 300.000, 400.000, 500.000, 600.000], + [-100.000, -72.549, -49.020, -33.333, -21.569, 0.000, 27.451, + 50.980, 66.667, 78.431, 100.000, 127.451, 150.980, 166.667, + 178.431, 200.000], + [-33.333, -29.412, -25.490, -21.569, -9.804, 0.000, 7.843, + 15.686, 27.451, 39.216, 50.980, 54.902, 58.824, 62.745, + 66.667, 70.588, 74.510, 78.431, 90.196, 100.000, 107.843, + 115.686, 127.451, 139.216, 150.980], + [-33.333, -29.412, -25.490, -21.569, -9.804, 0.000, 7.843, + 15.686, 27.451, 39.216, 50.980, 54.902, 58.824, 62.745, + 66.667, 70.588, 74.510, 78.431, 90.196, 100.000, 107.843, + 115.686, 127.451, 139.216, 150.980], + [-41.176, -37.255, -33.333, -21.569, -9.804, 0.000, 7.843, + 15.686, 27.451, 39.216, 50.980, 58.824, 62.745, 66.667, + 78.431, 90.196, 100.000, 107.843, 115.686, 127.451, 139.216, + 150.980], + [-17.647, -13.725, -9.804, -5.882, -1.961, 0.000, 3.922, + 7.843, 11.765, 15.686, 19.608, 23.509, 27.451, 31.373, + 35.294, 39.216, 43.137, 47.059, 50.980, 54.902, 58.824, + 62.745, 66.667, 70.588, 74.510, 78.431, 82.353, 86.275, + 90.196, 94.118, 98.039, 100.000, 103.922, 107.843, 111.765, + 115.686, 119.608], + [-50.000, -40.000, -30.000, -20.000, -10.000, 0.000, 10.000, + 20.000, 30.000, 40.000, 50.000, 60.000, 70.000, 80.000, + 90.000, 100.000, 110.000, 120.000, 130.000, 140.000, 150.000]], +'coef' : [[33.8253, 33.8253, 33.8253, 33.8253], + [82.4859, 1.2603, 22.4815, 18.9314, 2.8969, 71.0974, 82.4859, + 1.2603, 22.4815], + [12.0792, 8.4592, 26.7900, 21.5705, 23.0754, 6.6585, 19.0962, + 30.3922, 60.6512, 74.6763, 75.6664, 79.8148, 75.9320, 47.5211, + 12.0792, 8.4592, 26.7900], + [13.3494, 7.8883, 25.8400, 23.0248, 21.3100, 8.9493, 15.7822, + 34.2918, 58.3060, 72.5457, 78.4441, 79.6678, 75.9053, 46.4422, + 13.3494, 7.8883, 25.8400], + [45.0851, -2.1518, 27.3155, 21.7801, 22.3697, 7.9200, 16.9739, + 33.1615, 59.0061, 80.3683, 83.5295, 45.0851, -2.1518, 27.3155], + [12.6861, 8.5744, 14.9941, 21.7397, 26.7676, 21.3221, 22.5031, + 22.5483, 19.6945, 16.7365, 13.8898, 11.6012, 11.5439, 15.9402, + 21.4673, 27.5536, 36.5639, 61.4126, 74.7277, 75.0275, 80.9237, + 76.0996, 70.2252, 55.8543, 40.9132, 19.9752, 12.6861, 8.5744, + 14.9941], + [41.9234, -3.2368, 32.0187, 19.5668, 20.2811, 9.2545, 14.9009, + 44.2629, 85.8093, 78.0205, 41.9234, -3.2368, 32.0187], + [33.8253, 33.8253, 33.8253, 33.8253, 33.8253, 33.8253], + [93.0061, 86.0013, -54.6819, 92.8215, -49.7800, 93.0061, 86.0013, + -54.6819, 92.8215, -49.7800], + [71.6700, 46.2404, -16.3198, 38.4125, 13.7980, 30.8425, 1.5483, + 15.9317, 23.2525, 35.9459, 61.8592, 75.7543, 75.6340, 81.9501, + 71.6700, 46.2404, -16.3198, 38.4125, 13.7980], + [72.7220, 44.8956, -14.3723, 36.0327, 16.7261, 27.3015, 4.5277, + 14.8756, 21.4604, 40.3602, 58.6143, 73.1526, 79.2995, 79.9220, + 72.7220, 44.8956, -14.3723, 36.0327, 16.7261], + [76.7467, 60.6977, -17.3393, 38.5959, 14.2538, 29.7441, 0.6054, + 18.3639, 23.376, 64.2515, 86.7496, 76.7467, 60.6977, -17.3393, + 38.5959, 14.2538], + [19.6912, 13.4825, 6.2608, 17.1249, 20.6067, 29.0827, 19.5270, + 23.2109, 22.6667, 19.6866, 16.7474, 13.8240, 11.5230, 10.9217, + 16.1154, 21.0846, 28.1517, 33.8236, 63.1731, 76.4907, 73.0356, + 83.3897, 74.8104, 72.2190, 54.5815, 43.0444, 19.6912, 13.4825, + 6.2608, 17.1249, 20.6067], + [76.0273, 49.0816, -19.3982, 45.0213, 11.9440, 25.8971, 4.7660, + 15.1631, 39.6956, 94.5539, 76.0273, 49.0816, -19.3982, 45.0213, + 11.9440]], +'sp' : [[ 33.825, 33.825, 33.825, 33.825, 33.825, 33.825, 33.825, +33.825, 33.825, 33.825, 33.825, 33.825, 33.825, 33.825, 33.825, +33.825, 33.825, 33.825, 33.825, 33.825, 33.825, 33.825, 33.825, +33.825, 33.825, 33.825, 33.825], +[ 17.610, 14.653, 15.155, 17.402, 19.674, 20.603, 20.196, 18.790, +16.816, 14.957, 13.966, 14.600, 17.612, 23.756, 33.347, 44.926, +56.587, 66.436, 72.800, 74.944, 72.364, 64.952, 54.201, 42.007, +30.257, 20.848, 17.610], +[ 10.154, 14.944, 21.971, 24.044, 23.063, 22.333, 21.684, 20.057, +16.896, 13.521, 11.727, 12.846, 16.349, 21.241, 27.607, 39.853, +59.205, 72.504, 76.193, 78.735, 76.496, 68.443, 55.748, 39.710, +22.700, 11.437, 10.154], +[ 10.229, 14.365, 21.449, 24.103, 23.577, 22.632, 21.458, 19.571, +16.798, 13.995, 12.290, 12.650, 15.388, 20.651, 28.832, 41.270, +57.676, 71.156, 77.665, 79.088, 76.371, 68.148, 55.324, 39.540, +23.223, 12.010, 10.229], +[ 9.743, 14.254, 21.991, 24.364, 23.262, 22.295, 21.465, 19.851, +16.958, 13.860, 12.026, 12.636, 15.698, 20.924, 28.550, 40.879, +57.911, 70.869, 77.996, 79.646, 76.068, 67.642, 55.253, 39.918, +23.566, 11.833, 9.743], +[ 10.099, 14.835, 21.453, 25.022, 22.427, 22.315, 22.070, 19.673, +16.754, 13.983, 11.973, 12.286, 16.129, 21.560, 28.041, 39.205, +59.489, 72.559, 75.960, 79.137, 75.925, 68.809, 55.758, 39.915, +22.006, 12.076, 10.099], +[ 10.166, 13.120, 20.713, 25.393, 24.704, 22.001, 20.333, 19.326, +17.518, 14.652, 12.293, 12.046, 14.701, 20.681, 30.057, 42.212, +56.273, 69.600, 78.495, 80.503, 76.383, 67.397, 54.752, 39.615, +23.955, 12.571, 10.166], +[ 33.825, 33.825, 33.825, 33.825, 33.825, 33.825, 33.825, 33.825, +33.825, 33.825, 33.825, 33.825, 33.825, 33.825, 33.825, 33.825, +33.825, 33.825, 33.825, 33.825, 33.825, 33.825, 33.825, 33.825, +33.825, 33.825, 33.825], +[ 17.190, 13.224, 12.783, 14.954, 18.402, 21.679, 23.510, 23.130, +20.528, 16.633, 13.010, 11.437, 13.452, 19.920, 30.599, 43.944, +57.555, 68.874, 75.847, 77.450, 73.684, 65.403, 54.103, 41.641, +29.918, 20.589, 17.189], +[ 10.339, 14.586, 21.670, 24.495, 23.365, 21.980, 21.373, 20.137, +17.207, 13.625, 11.619, 12.650, 16.287, 21.320, 27.802, 39.806, +59.078, 72.423, 76.539, 78.499, 76.312, 68.654, 55.998, 39.403, +22.514, 11.778, 10.339], +[ 10.522, 14.367, 21.239, 24.418, 23.743, 22.313, 21.259, 19.717, +16.995, 13.900, 12.112, 12.761, 15.686, 20.527, 28.450, 41.495, +57.716, 71.117, 77.873, 78.930, 75.929, 68.453, 55.899, 39.343, +22.654, 12.043, 10.522], +[ 10.165, 14.367, 21.641, 24.642, 23.519, 21.989, 21.225, 19.973, +17.211, 13.835, 11.858, 12.622, 15.837, 20.810, 28.467, 41.222, +57.621, 71.020, 78.150, 79.291, 75.669, 68.002, 55.951, 39.793, +22.835, 11.746, 10.165], +[ 10.099, 14.835, 21.453, 25.022, 22.427, 22.315, 22.070, 19.673, +16.754, 13.983, 11.973, 12.286, 16.129, 21.560, 28.041, 39.205, +59.489, 72.559, 75.960, 79.137, 75.925, 68.809, 55.758, 39.915, +22.006, 12.076, 10.099], +[ 10.453, 13.154, 20.451, 25.336, 25.008, 22.093, 20.038, 19.155, +17.686, 14.895, 12.260, 11.866, 14.719, 20.778, 29.917, 42.121, +56.387, 69.779, 78.508, 80.427, 76.147, 67.406, 55.065, 39.607, +23.626, 12.578, 10.453]] +} + +parcur_test = { +'u' : array([120.,128.,133.,136.,138.,141.,144.,146.,149.,151.,154., + 161.,170.,180.,190.,200.,210.,220.,230.,240.,250.,262.,269., + 273.,278.,282.,287.,291.,295.,299.,305.,315.]), +'xa' : [-1.5141,-2.0906,-1.9253,-0.8724,-0.3074,-0.5534,0.0192, + 1.2298,2.5479,2.4710,1.7063,1.1183,0.5534,0.4727,0.3574,0.1998, + 0.2882,0.2613,0.2652,0.2805,0.4112,0.9377,1.3527,1.5564,1.6141, + 1.6333,1.1567,0.8109,0.2498,-0.2306,-0.7571,-1.1222], +'xo' : [0.5150,1.3412,2.6094,3.2358,2.7401,2.7823,3.5932, + 3.8353,2.5863,1.3105,0.6841,0.2575,0.2460,0.3689,0.2460,0.2998, + 0.3651,0.3343,0.3881,0.4573,0.5918,0.7110,0.4035,0.0769,-0.3920, + -0.8570,-1.3412,-1.5641,-1.7409,-1.7178,-1.2989,-0.5572], +'ub':120, +'ue':320, +'k' : [3, 3, 3, 3, 3, 3, 5, 5, 5], +'s' : [100.0, 1.0, 0.05, 0.25, 0.25, 0.25, 0.25, 0.0, -1], +'ipar' : [1, 1, 1, 1, 1, 0, 0, 0, 0], +'iopt' : [0, 1, 1, 1, 0, 0, 0, 0, -1], +'res' : [0.559278E+02, 0.100021E+01, 0.499966E-01, 0.250216E+00, 0.249842E+00, + 0.250003E+00, 0.249993E+00, 0.000000E+00, 0.943099E+00], +'err' : [-2, 0, 0, 0, 0, 0, 0, -1, 0], +'knots' : [[120., 120., 120., 120., 320., 320., 320., 320.], + [120., 120., 120., 120., 133., 138., 141., 144., 149., 154., + 170., 210., 250., 278., 295., 320., 320., 320., 320.], + [120., 120., 120., 120., 133., 136., 138., 141., 144., 149., + 151., 154., 161., 170., 180., 190., 210., 250., 269., 278., + 287., 295., 320., 320., 320., 320.], + [120., 120., 120., 120., 133., 136., 138., 141., 144., 149., + 151., 154., 161., 170., 180., 190., 210., 250., 269., 278., + 287., 295., 320., 320., 320., 320.], + [120., 120., 120., 120., 133., 136., 138., 141., 144., 149., + 151., 154., 170., 210., 250., 278., 287., 295., 320., 320., + 320., 320.], + [0.0000, 0.0000, 0.0000, 0.0000, 0.1197, 0.1839, 0.2232, 0.2883, + 0.4480, 0.6343, 0.6653, 0.6838, 0.7840, 1.0000, 1.0000, 1.0000, + 1.0000], + [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.1197, 0.1839, + 0.2232, 0.4480, 0.6343, 0.6653, 0.6838, 0.7840, 1.0000, 1.0000, + 1.0000, 1.0000, 1.0000, 1.0000], + [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.1839, 0.2232, + 0.2363, 0.2883, 0.3529, 0.4480, 0.5149, 0.5667, 0.6047, 0.6343, + 0.6420, 0.6508, 0.6596, 0.6653, 0.6674, 0.6703, 0.6740, 0.6838, + 0.7121, 0.7391, 0.7593, 0.7840, 0.8084, 0.8440, 0.8655, 0.8963, + 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000], + [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.1250, 0.2500, + 0.3750, 0.5000, 0.6250, 0.7500, 0.8750, 1.0000, 1.0000, 1.0000, + 1.0000, 1.0000, 1.0000]], +'sx' : [[-1.1548, 2.7543, 0.7908, -0.1259], + [-1.5121, -1.2336, -3.5336, -0.1351, -0.6165, 0.0950, 3.1911, + 0.9120, 0.2207, 0.5287, -0.2985, 2.1710, 0.4299, -2.2076, -0.3992], + [-1.5141, -0.8415, -3.5384, -0.9552, -0.0336, -0.8011, -0.0719, + 2.8821, 2.4805, 1.1626, 1.2566, 0.3134, 0.6040, 0.1359, 0.4505, + -0.0664, 1.1284, 1.8397, 1.3619, -0.5533, -1.4796, -0.9382], + [-1.5126, -0.8460, -3.6051, -0.8411, -0.2731, -0.6593, 0.0111, + 2.6652, 2.6017, 1.1042, 1.2620, 0.3178, 0.6014, 0.1370, 0.4496, + -0.0659, 1.1283, 1.8398, 1.3618, -0.5531, -1.4799, -0.9379], + [-1.5137, -0.6761, -3.7166, -0.8442, -0.1873, -0.7240, -0.0382, + 2.8148, 2.3950, 0.9819, 0.2063, 0.4696, -0.1694, 1.7368, 1.5210, + -0.7724, -1.2116, -1.1237], + [-1.5126, -2.1295, -2.5681, -0.8519, -0.3894, 0.0129, 4.2840, + 1.2306, 0.2041, 0.6633, 3.2860, -1.1886, -1.0872], + [-1.5140, -0.2538, -3.9579, -2.0558, -0.1376, -0.7093, 3.9083, + 3.4195, -0.6167, 1.7279, 3.0307, -0.5555, -0.7361, -1.1278], + [-1.5141, -4.8893, 3.0277, -6.9243, 2.4051, -1.9430, 1.8101, 0.4452, + 3.0970, 2.7686, 2.0549, 1.1258, 1.0620, 0.2999, 0.5931, -0.1113, + 0.4195, 0.2149, 0.3078, 0.1882, 0.9922, 0.7559, 1.4062, 1.6328, + 1.5426, 1.8382, 1.1504, 0.6591, -0.3452, -0.7910, -1.0000, -1.1222], + [-1.5140, -1.1705, -3.7619, -0.4161, -0.5726, 0.7574, 5.4849, + -2.0698, 2.1028, 2.6719, -1.5004, -0.4675, -1.1283]], +'sy' : [[2.5710, 0.4640, 0.3999, -1.6208], + [0.5161, -1.0848, 3.3776, 2.8139, 2.7532, 4.3508, 2.1979, -0.3601, + 0.7465, -0.0769, 0.7883, 0.5575, -3.0389, -0.1368, -0.6145], + [0.5151, 0.5826, 1.3907, 3.5299, 2.5462, 2.5185, 4.2164, 3.5608, + 0.8278, 0.6202, 0.0979, 0.2647, 0.3987, 0.2091, 0.4205, 0.3339, + 0.9355, -0.4440, -1.3297, -2.2927, -0.4057, -0.4095], + [0.5160, -0.4483, 2.3159, 3.1579, 2.7441, 2.6293, 4.2337, 3.2004, + 1.2412, 0.0951, 0.3634, 0.1744, 0.4209, 0.2015, 0.4258, 0.3310, + 0.9364, -0.4443, -1.3297, -2.2925, -0.4061, -0.4092], + [0.5157, -0.0216, 1.9374, 3.3003, 2.7152, 2.4783, 4.3210, 3.3174, + 1.0242, 0.0591, 0.4637, 0.1370, 0.6160, 0.7280, -1.5065, -2.0501, + -0.7021, -0.2044], + [0.5173, 0.8949, 2.5415, 3.4316, 2.4271, 4.5986, 2.6667, -0.1322, + 0.3411, 0.9389, -1.1971, -2.5963, -0.5047], + [0.5160, 11.3627, -10.2865, 7.5268, -0.4078, 6.4015, 2.8602, 0.5783, + -0.1220, 1.5742, -2.4089, -1.9223, -1.6493, -0.5481], + [0.5150, 3.0202, -2.1849, 6.2691, 2.1376, 2.9157, 4.2898, 3.9404, + 2.7047, 2.0088, 0.0609, 1.2120, -0.6203, 0.6126, 0.2469, 0.0576, + 0.5412, 0.2445, 0.5039, 0.3663, 1.2118, 0.4929, 0.5866, 0.0256, + -0.4126, -1.0048, -1.2798, -1.8261, -1.8191, -1.7347, -0.6137, + -0.5572], + [0.5157, 0.1690, 2.5142, 3.5678, 1.7000, 5.7700, 0.8859, -0.8397, + 2.4599, -3.0704, -1.5557, -1.2391, -0.5543]], +'sp' : [[ -1.1548, 2.5710, -0.7135, 2.3277, -0.4651, 2.1850, -0.3258, +2.1025, -0.2370, 2.0489, -0.1097, 1.9703, 0.0107, 1.8938, 0.0872, +1.8441, 0.1963, 1.7711, 0.2654, 1.7236, 0.3637, 1.6539, 0.5685, +1.4983, 0.7840, 1.3113, 0.9646, 1.1183, 1.0882, 0.9380, 1.1601, +0.7672, 1.1854, 0.6032, 1.1693, 0.4427, 1.1171, 0.2830, 1.0338, +0.1208, 0.9247, -0.0466, 0.7670, -0.2589, 0.6649, -0.3903, 0.6040, +-0.4685, 0.5261, -0.5696, 0.4626, -0.6535, 0.3823, -0.7624, 0.3178, +-0.8530, 0.2534, -0.9470, 0.1895, -1.0444, 0.0951, -1.1975, -0.0553, +-1.4729], + [ -1.5121, 0.5161, -2.1158, 1.3155, -1.8367, 2.7813, -0.8954, 2.9112, + -0.4539, 2.8268, -0.4538, 2.9794, 0.1608, 3.7106, 1.1126, 3.4893, + 2.3766, 2.3660, 2.4950, 1.6528, 1.9671, 0.8609, 0.9699, 0.0639, + 0.5592, 0.1890, 0.4118, 0.3782, 0.3610, 0.3838, 0.3472, 0.3084, + 0.3113, 0.2549, 0.2207, 0.2994, 0.1499, 0.4137, 0.2004, 0.5428, + 0.4734, 0.6320, 1.1011, 0.5682, 1.4267, 0.3319, 1.5365, 0.0978, + 1.5511, -0.3202, 1.4407, -0.7531, 1.1510, -1.3044, 0.8062, -1.6447, + 0.3679, -1.7989, -0.1420, -1.7090, -0.8573, -1.2727, -1.1104, -0.5605], + [ -1.5141, 0.5151, -2.0904, 1.3399, -1.9278, 2.6176, -0.8627, 3.2135, + -0.3177, 2.7648, -0.5578, 2.7552, 0.0533, 3.6603, 1.1877, 3.7583, + 2.5699, 2.6222, 2.4649, 1.3059, 1.7052, 0.6802, 1.1204, 0.2567, + 0.5470, 0.2506, 0.4954, 0.3519, 0.3069, 0.2824, 0.2436, 0.2790, + 0.2870, 0.3327, 0.2711, 0.3674, 0.2337, 0.3967, 0.2638, 0.4558, + 0.4501, 0.5805, 0.9479, 0.6914, 1.3254, 0.4292, 1.5373, 0.0937, + 1.6705, -0.4295, 1.5788, -0.8456, 1.2139, -1.3107, 0.7606, -1.6031, + 0.2559, -1.7475, -0.2110, -1.6852, -0.7669, -1.3134, -1.1211, -0.5557], + [ -1.5126, 0.5160, -2.1059, 1.3246, -1.8862, 2.7128, -0.8361, 3.0274, + -0.4395, 2.8257, -0.4955, 2.8696, 0.1192, 3.6617, 1.1486, 3.6310, + 2.4908, 2.5597, 2.5133, 1.5174, 1.7232, 0.5758, 1.1087, 0.2696, + 0.5506, 0.2489, 0.4945, 0.3509, 0.3067, 0.2853, 0.2440, 0.2764, + 0.2870, 0.3323, 0.2709, 0.3685, 0.2336, 0.3974, 0.2638, 0.4555, + 0.4503, 0.5798, 0.9479, 0.6915, 1.3254, 0.4295, 1.5373, 0.0938, + 1.6705, -0.4296, 1.5788, -0.8458, 1.2138, -1.3107, 0.7606, -1.6031, + 0.2560, -1.7475, -0.2110, -1.6852, -0.7669, -1.3134, -1.1211, -0.5556], + [ -1.5137, 0.5157, -2.0941, 1.3313, -1.9242, 2.6683, -0.8241, 3.1080, + -0.3888, 2.8231, -0.5299, 2.7740, 0.0879, 3.6846, 1.1787, 3.7093, + 2.5018, 2.5558, 2.4167, 1.4002, 1.8823, 0.6816, 1.0126, 0.2367, + 0.5944, 0.2522, 0.4116, 0.3221, 0.3401, 0.3241, 0.3214, 0.3006, + 0.2964, 0.2937, 0.2303, 0.3351, 0.1832, 0.4139, 0.2389, 0.5090, + 0.4813, 0.5991, 1.0356, 0.5942, 1.3548, 0.3768, 1.4908, 0.1334, + 1.5810, -0.3273, 1.5562, -0.8151, 1.2789, -1.3812, 0.7977, -1.6444, + 0.2360, -1.7257, -0.2406, -1.6526, -0.7496, -1.3325, -1.1231, -0.5535], + [ -1.5126, 0.5173, -2.0962, 1.3284, -1.9154, 2.6519, -0.8579, 3.1350, + -0.4780, 2.7906, -0.3927, 2.8295, 0.0356, 3.5770, 1.1661, 3.8095, + 2.6447, 2.5908, 2.4131, 1.3715, 1.6818, 0.5972, 1.0403, 0.2693, + 0.5711, 0.2212, 0.4632, 0.2442, 0.3622, 0.2891, 0.3027, 0.3526, + 0.2937, 0.4048, 0.2976, 0.4261, 0.3085, 0.4553, 0.3314, 0.4940, + 0.4264, 0.5827, 0.8211, 0.5959, 1.2421, 0.3329, 1.5049, 0.0288, + 1.6804, -0.3960, 1.6292, -0.8022, 1.2238, -1.3065, 0.8512, -1.5331, + 0.2420, -1.7153, -0.2534, -1.7101, -0.8192, -1.4173, -1.0872, -0.5047], + [ -1.5140, 0.5160, -2.0913, 1.3343, -1.9224, 2.6454, -0.8413, 3.1098, + -0.5097, 2.8015, -0.4106, 2.8886, 0.1128, 3.5175, 1.1376, 3.8232, + 2.5443, 2.6009, 2.5286, 1.3516, 1.7687, 0.6085, 0.9935, 0.3006, + 0.4845, 0.2434, 0.3955, 0.2589, 0.3267, 0.2931, 0.2992, 0.3436, + 0.3045, 0.3845, 0.3113, 0.4009, 0.3240, 0.4231, 0.3471, 0.4529, + 0.4388, 0.5286, 0.8568, 0.6175, 1.2911, 0.4212, 1.5361, 0.1131, + 1.6758, -0.3655, 1.6119, -0.8419, 1.1950, -1.3926, 0.8147, -1.6001, + 0.2143, -1.7155, -0.2427, -1.6562, -0.7323, -1.3483, -1.1278, -0.5481], + [ -1.5141, 0.5150, -2.0906, 1.3412, -1.9253, 2.6094, -0.8724, 3.2358, + -0.3074, 2.7401, -0.5534, 2.7823, 0.0192, 3.5932, 1.2298, 3.8353, + 2.5479, 2.5863, 2.4710, 1.3105, 1.7063, 0.6841, 1.1183, 0.2575, + 0.5534, 0.2460, 0.4727, 0.3689, 0.3574, 0.2460, 0.1998, 0.2998, + 0.2882, 0.3651, 0.2613, 0.3343, 0.2652, 0.3881, 0.2805, 0.4573, + 0.4112, 0.5918, 0.9377, 0.7110, 1.3527, 0.4035, 1.5564, 0.0769, + 1.6141, -0.3920, 1.6333, -0.8570, 1.1567, -1.3412, 0.8109, -1.5641, + 0.2498, -1.7409, -0.2306, -1.7178, -0.7571, -1.2989, -1.1222, -0.5572], + [ -1.5140, 0.5157, -2.0927, 1.3305, -1.9113, 2.6869, -0.8937, 2.9204, + -0.4755, 2.9632, -0.3647, 3.0067, 0.1005, 3.3658, 1.0401, 3.7905, + 2.6920, 2.7554, 2.5788, 1.2773, 1.5805, 0.4841, 0.8142, 0.2697, + 0.4514, 0.2967, 0.4047, 0.3224, 0.3764, 0.3570, 0.3746, 0.3937, + 0.3870, 0.4178, 0.3943, 0.4265, 0.4060, 0.4376, 0.4249, 0.4515, + 0.4932, 0.4825, 0.8026, 0.4927, 1.1681, 0.3386, 1.4146, 0.1027, + 1.6128, -0.3052, 1.6351, -0.7716, 1.2950, -1.3882, 0.8918, -1.6327, + 0.1869, -1.7454, -0.3242, -1.6509, -0.7005, -1.3275, -1.1283, -0.5543]] +} + +clocur_test = { +'xa' : [-4.7,-7.048,-6.894,-3.75,-1.042,0.938,2.5,3.524,4.511,5.0,4.886, + 3.524,3.2,1.302,-1.424,-3.0,-3.064,-3.665, -4.7], +'xo' : [0.0,2.565,5.785,6.495,5.909,5.318,4.33,2.957,1.642,0.0,-1.779, + -2.957,-5.543,-7.386,-8.075,-5.196,-2.571,-1.334, 0.0], +'u' : arange(19.0)*20.0, +'k' : [3, 3, 3, 3, 3, 3, 5, 5, 5], +'ipar' : [1, 1, 1, 1, 1, 0, 0, 0, 0], +'iopt' : [0, 1, 1, 1, 0, 0, 0, 0, -1], +'s' : [900., 10., 0.1, 0.5, 0.5, 0.5, 0.5, 0.0, -1], +'res' : [0.653304E+03, 0.100003E+02, 0.999982E-01, 0.499976E+00, 0.499666E+00, + 0.499997E+00, 0.500067E+00, 0.000000E+00, 0.207778E+01], +'err' : [-2, 0, 0, 0, 0, 0, 0, -1, 0], +'knots' : [[ -1080., -720., -360., 0., 360., 720., 1080., 1440. ], + [-180., -80., -40., 0., 40., 60., 100., 180., 280., 320., + 360., 400., 420., 460.], + [-60., -40., -20., 0., 20., 40., 60., 100., 180., 200., + 220., 240., 260., 280., 300., 320., 340., 360., 380., 400., + 420.], + [-60., -40., -20., 0., 20., 40., 60., 100., 180., 200., + 220., 240., 260., 280., 300., 320., 340., 360., 380., 400., + 420.], + [-120., -80., -40., 0., 40., 60., 100., 180., 200., 220., + 240., 280., 320., 360., 400., 420., 460.], + [-0.3411, -0.2121, -0.0724, 0.0000, 0.0822, 0.1584, 0.2346, + 0.3490, 0.5125, 0.5547, 0.5973, 0.6589, 0.7879, 0.9276, 1.0000, + 1.0822, 1.1584, 1.2346], + [-0.4453, -0.4027, -0.3411, -0.2121, -0.0724, 0.0000, 0.1584, + 0.2346, 0.3490, 0.5125, 0.5547, 0.5973, 0.6589, 0.7879, + 0.9276, 1.0000, 1.1584, 1.2346, 1.3490, 1.5125, 1.5547], + [-0.2786, -0.2121, -0.1345, -0.0724, -0.0399, 0.0000, 0.0822, + 0.1584, 0.2346, 0.3001, 0.3490, 0.3927, 0.4332, 0.4720, + 0.5125, 0.5547, 0.5973, 0.6589, 0.7214, 0.7879, 0.8655, + 0.9276, 0.9601, 1.0000, 1.0822, 1.1584, 1.2346, 1.3001, + 1.3490], + [-0.6250, -0.5000, -0.3750, -0.2500, -0.1250, 0.0000, 0.1250, + 0.2500, 0.3750, 0.5000, 0.6250, 0.7500, 0.8750, 1.0000, + 1.1250, 1.2500, 1.3750, 1.5000, 1.6250]], +'sx' : [[ -0.2890, -0.2890, -0.2890, -0.2890], + [-3.6414, -5.1410, -7.1471, -3.6077, 2.9569, 7.0497, 0.8157, -3.6414, + -5.1410, -7.1471], + [-3.6393, -4.2795, -7.6104, -7.5242, -2.2356, 2.5034, 4.8626, 5.2229, + 3.1970, 3.5381, 1.5313, -1.7123, -3.3029, -2.9197, -3.6393, -4.2795, + -7.6104], + [-3.3998, -4.4768, -7.5699, -7.4089, -2.3288, 2.4959, 5.0209, 4.9720, + 3.6189, 3.2870, 1.5156, -1.6335, -3.2388, -3.1205, -3.3998, -4.4768, + -7.5699], + [-3.1406, -3.8616, -9.3043, -2.0703, 2.3649, 4.9458, 5.1866, 3.1343, + 3.9923, -2.7579, -3.1406, -3.8616, -9.3043], + [-2.7754, -4.3421, -7.6213, -7.4935, -2.9934, 1.5161, 5.3675, 4.7346, + 3.5203, 3.6277, -2.8775, -2.7754, -4.3421, -7.6213], + [-5.1336, -1.1870, -4.9707, -10.2635, -1.8832, 1.1682, 4.6332, + 5.9054, 2.4087, 4.9227, -5.1336, -1.1870, -4.9707, -10.2635, -1.8832], + [-2.3401, -3.8401, -5.2545, -7.7951, -8.2051, -3.8222, -1.6092, 0.6087, + 2.7131, 3.2464, 4.9215, 4.8089, 5.8715, 1.6678, 4.7786, 0.6271, + -1.5750, -3.8446, -2.3401, -3.8401, -5.2545, -7.7951, -8.2051], + [1.0044, -5.4019, -0.9630, -12.7083, -1.1542, 1.1652, 6.9462, + 3.4423, 1.0044, -5.4019, -0.9630, -12.7083, -1.1542]], +'sy' : [[ 0.0089, 0.0089, 0.0089, 0.0089], + [-2.8481, -0.2362, 5.1680, 7.0915, 4.1090, 1.8202, -10.7190, + -2.8481, -0.2362, 5.1680], + [-1.3506, -0.3010, 2.3785, 6.4303, 6.5434, 4.9464, 1.8911, -1.9032, + -2.6544, -5.6451, -7.5794, -8.8289, -5.1023, -2.2260, -1.3506, -0.3010, + 2.3785], + [-1.2300, -0.5075, 2.6980, 6.1354, 6.7093, 4.8744, 1.8243, -1.7559, + -2.9185, -5.3807, -7.8932, -8.4817, -5.3102, -2.2288, -1.2300, -0.5075, + 2.6980], + [-0.6661, -1.9633, 5.9976, 6.6889, 4.8408, 1.9295, -1.8934, -2.7464, + -6.3406, -10.2291, -0.6661, -1.9633, 5.9976], + [-2.8663, 0.0279, 2.2083, 6.4113, 6.5499, 5.6375, 1.6714, -1.6993, + -3.2769, -6.4064, -10.0517, -2.8663, 0.0279, 2.2083], + [-11.4885, -0.4907, -1.3396, 6.4019, 7.1875, 5.2912, 3.0296, + -1.2442,-3.8350, -7.0871, -11.4885, -0.4907, -1.3396, 6.4019, + 7.1875], + [-3.3543, -0.9872, 0.9217, 1.8210, 7.0937, 6.6891, 6.0142, 5.5460, + 4.6549, 2.8892, 1.7125, 0.0930, -2.5059, -2.4784, -6.6540, -7.0488, + -9.7840, -5.4089, -3.3543, -0.9872, 0.9217, 1.8210, 7.0937], + [-12.0940, -3.6528, -0.7300, 5.5358, 7.2803, 5.8203, -0.2992, + -2.7102, -12.0940, -3.6528, -0.7300, 5.5358, 7.2803]] , +'sp' : [[ -0.2890, 0.0089, -0.2890, 0.0089, -0.2890, 0.0089, -0.2890, + 0.0089, -0.2890, 0.0089, -0.2890, 0.0089, -0.2890, 0.0089, + -0.2890, 0.0089, -0.2890, 0.0089, -0.2890, 0.0089, -0.2890, + 0.0089, -0.2890, 0.0089, -0.2890, 0.0089, -0.2890, 0.0089, + -0.2890, 0.0089, -0.2890, 0.0089, -0.2890, 0.0089, -0.2890, 0.0089], + [ -5.2923, 0.4093, -6.1787, 2.9710, -6.0695, 5.3207, -4.2389, + 6.4366, -1.7409, 6.1224, 0.7042, 5.1065, 2.6665, 4.1203, 4.0480, + 3.1423, 4.8338, 1.9627, 5.0087, 0.3716, 4.5831, -1.7274, 3.6691, + -3.9781, 2.4042, -5.9109, 0.9256, -7.0561, -0.6291, -6.9443, + -2.1199, -5.3828, -3.3961, -3.2874, -4.3795, -1.5164], + [ -4.7279, -0.0293, -7.0409, 2.6072, -6.8775, 5.7691, -3.7728, + 6.4296, -1.0175, 6.0270, 0.9227, 5.2638, 2.4441, 4.2658, 3.6274, + 3.0527, 4.4745, 1.6179, 4.9874, -0.0452, 4.8553, -1.7122, 3.5915, + -3.0276, 3.1468, -5.4690, 1.3252, -7.4653, -1.4368, -7.9995, -2.9739, + -5.2440, -3.1035, -2.5595, -3.6261, -1.3216], + [ -4.8128, -0.0937, -7.0276, 2.7367, -6.8007, 5.6343, -3.7924, + 6.4306, -1.0745, 6.1030, 0.8984, 5.2829, 2.4686, 4.2353, 3.6817, + 3.0092, 4.5092, 1.5998, 4.9227, 0.0020, 4.7506, -1.6513, 3.7891, + -3.1351, 3.0471, -5.3891, 1.2860, -7.5725, -1.3762, -7.8550, -2.9516, + -5.3252, -3.1868, -2.5759, -3.5327, -1.2761], + [ -4.8300, -0.1549, -6.8257, 2.7970, -7.0124, 5.6512, -3.7882, + 6.4165, -0.9607, 6.0812, 0.9211, 5.2675, 2.4192, 4.2429, 3.6200, + 3.0380, 4.4912, 1.6245, 5.0005, -0.0262, 4.8245, -1.7170, 3.5836, + -3.0535, 3.2563, -5.4018, 1.1922, -7.5794, -1.4717, -7.8576, -2.7955, + -5.3774, -3.1970, -2.4761, -3.6292, -1.3149], + [ -4.5913, -0.0815, -7.0773, 2.6262, -6.8714, 5.7487, -3.7630, + 6.4357, -1.0052, 6.0385, 0.8565, 5.3025, 2.4386, 4.2481, 3.6968, + 2.9886, 4.5577, 1.5926, 4.9321, 0.0128, 4.6151, -1.6349, 3.8307, + -3.1175, 3.1953, -5.4544, 1.1602, -7.4783, -1.4290, -7.9455, -2.7722, + -5.3452, -3.2724, -2.4829, -3.7031, -1.2936], + [ -4.6787, -0.2073, -7.0474, 2.8389, -6.8782, 5.5855, -3.8179, + 6.5057, -0.9262, 6.0491, 0.8978, 5.2475, 2.3835, 4.2394, 3.6211, + 3.0351, 4.5499, 1.6308, 5.0208, -0.0120, 4.6762, -1.6795, 3.7408, + -3.1379, 3.0946, -5.3511, 1.3669, -7.5345, -1.5010, -8.0181, -2.8910, + -5.2226, -3.1657, -2.4427, -3.6475, -1.3664], + [ -4.7000, 0.0000, -7.0480, 2.5650, -6.8940, 5.7850, -3.7500, + 6.4950, -1.0420, 5.9090, 0.9380, 5.3180, 2.5000, 4.3300, 3.5240, + 2.9570, 4.5110, 1.6420, 5.0000, 0.0000, 4.8860, -1.7790, 3.5240, + -2.9570, 3.2000, -5.5430, 1.3020, -7.3860, -1.4240, -8.0750, -3.0000, + -5.1960, -3.0640, -2.5710, -3.6650, -1.3340], + [ -4.4547, -0.0336, -6.9471, 3.0553, -7.0308, 5.3645, -3.8468, + 6.4116, -0.8360, 6.2133, 0.9573, 5.3963, 2.3828, 4.2137, 3.5767, + 2.8525, 4.4535, 1.4584, 4.8942, 0.0339, 4.7651, -1.4260, 4.1475, + -3.0152, 2.7862, -5.6052, 1.0706, -7.6954, -1.0414, -7.7248, -2.9141, + -5.1502, -3.4430, -2.6947, -3.7218, -1.4943]] +} + + def f1(x,d=0): if d is None: return "sin" if x is None: return "sin(x)" Modified: trunk/Lib/sandbox/spline/tests/test_fitpack.py =================================================================== --- trunk/Lib/sandbox/spline/tests/test_fitpack.py 2007-04-08 15:27:44 UTC (rev 2901) +++ trunk/Lib/sandbox/spline/tests/test_fitpack.py 2007-04-10 22:04:33 UTC (rev 2902) @@ -13,10 +13,10 @@ import sys from numpy.testing import * -from numpy import array, arange, around, pi, sin, ravel +from numpy import array, arange, around, pi, sin, ravel, zeros, asarray set_package_path() -from spline.fitpack import splrep, splev, sproot, splint, spalde +from spline.fitpack import splprep, splrep, splev, sproot, splint, spalde from spline.fitpack import bisplev, bisplrep, splprep restore_path() @@ -24,7 +24,7 @@ from dierckx_test_data import * restore_path() -class test_splrep_slev(NumpyTestCase): +class test_splrep_splev(NumpyTestCase): def check_curfit_against_dierckx_smth(self): x,y = curfit_test['x'],curfit_test['y'] k,s = curfit_test_smth['k'],curfit_test_smth['s'] @@ -60,6 +60,101 @@ assert_array_almost_equal(around(sp,1), curfit_test_lsq['sp'][i]) + def check_percur_against_dierckx(self): + x,y = percur_test['x'], percur_test['y'] + k,s = percur_test['k'], percur_test['s'] + iopt, res = percur_test['iopt'], percur_test['res'] + err = percur_test['err'] + coef, knots = percur_test['coef'], percur_test['knots'] + sp = percur_test['sp'] + for i in range(len(k)): + if iopt[i] != -1: + tck,fp,ier,msg = splrep(x,y,k=k[i],task=iopt[i],s=s[i], + per=True,full_output=True) + else: + tck,fp,ier,msg = splrep(x,y,t=knots[i],k=k[i],task=iopt[i], + per=True,full_output=True) + tt,cc,kk = tck + tt,cc = asarray(tt), asarray(cc) + assert_almost_equal(ier,err[i]) + assert_almost_equal(fp,res[i],decimal=1) + assert_array_almost_equal(tt,knots[i], decimal=3) + assert_array_almost_equal(cc,coef[i], decimal=3) + yy = asarray(splev(x,tck)) + assert_array_almost_equal(yy,sp[i], decimal=3) + +class test_splprep_splev(NumpyTestCase): + def check_parcur_against_dierckx(self): + xa,xo = parcur_test['xa'], parcur_test['xo'] + k,s = parcur_test['k'], parcur_test['s'] + u = parcur_test['u'] + ub,ue = parcur_test['ub'], parcur_test['ue'] + iopt, res = parcur_test['iopt'], parcur_test['res'] + err, ipar = parcur_test['err'], parcur_test['ipar'] + knots = parcur_test['knots'] + sx, sy = parcur_test['sx'], parcur_test['sy'] + sp = parcur_test['sp'] + x = array([xa, xo]) + for i in range(len(k)): + if iopt[i] != -1: + if ipar[i] == 1: + tcku,fp,ier,msg = splprep(x,u=u,ub=ub,ue=ue,k=k[i], + task=iopt[i],s=s[i],full_output=True) + else: + tcku,fp,ier,msg = splprep(x,ub=ub,ue=ue,k=k[i], + task=iopt[i],s=s[i],full_output=True) + else: + tcku,fp,ier,msg = splprep(x,ub=ub,ue=ue,t=knots[i], + k=k[i],task=iopt[i],full_output=True) + tck,u = tcku + tt,cc,kk = tck + tt,cc = asarray(tt), asarray(cc) + assert_almost_equal(ier,err[i]) + assert_almost_equal(fp,res[i],decimal=3) + assert_array_almost_equal(tt,knots[i], decimal=3) + assert_array_almost_equal(cc[0],sx[i], decimal=3) + assert_array_almost_equal(cc[1],sy[i], decimal=3) + y = asarray(splev(u,tck)) + yy = zeros(64, 'float') + yy[0:-1:2] = y[0] + yy[1::2] = y[1] + assert_array_almost_equal(yy,sp[i], decimal=3) + + def check_clocur_against_dierckx(self): + xa,xo = clocur_test['xa'], clocur_test['xo'] + k,s = clocur_test['k'], clocur_test['s'] + u = clocur_test['u'] + iopt, res = clocur_test['iopt'], clocur_test['res'] + err, ipar = clocur_test['err'], clocur_test['ipar'] + knots = clocur_test['knots'] + sx, sy = clocur_test['sx'], clocur_test['sy'] + sp = clocur_test['sp'] + x = array([xa, xo]) + for i in range(len(k)): + if iopt[i] != -1: + if ipar[i] == 1: + tcku,fp,ier,msg = splprep(x,u=u,k=k[i],task=iopt[i], + s=s[i],per=True,full_output=True) + else: + tcku,fp,ier,msg = splprep(x,k=k[i],task=iopt[i], + s=s[i],per=True,full_output=True) + else: + tcku,fp,ier,msg = splprep(x,t=knots[i],k=k[i],task=iopt[i], + per=True,full_output=True) + tck,u = tcku + tt,cc,kk = tck + tt,cc = asarray(tt), asarray(cc) + assert_almost_equal(ier,err[i]) + assert_almost_equal(fp,res[i],decimal=3) + assert_array_almost_equal(tt,knots[i], decimal=3) + assert_array_almost_equal(cc[0],sx[i], decimal=3) + assert_array_almost_equal(cc[1],sy[i], decimal=3) + y = asarray(splev(u,tck)) + yy = zeros(36, 'float') + yy[0:-1:2] = y[0,:-1] + yy[1::2] = y[1,:-1] + assert_array_almost_equal(yy,sp[i], decimal=3) + class test_splint_spalde(NumpyTestCase): def check_splint_spalde(self): per = [0, 1, 0] @@ -139,9 +234,9 @@ tck=splrep(x,v,s=s,per=per,k=k) uv=splev(dx,tckp) assert_almost_equal(0.0, around(abs(uv[1]-f(uv[0])),2), - decimal=1) + decimal=1) assert_almost_equal(0.0, - around(abs(splev(uv[0],tck)-f(uv[0])),2),decimal=1) + around(abs(splev(uv[0],tck)-f(uv[0])),2),decimal=1) if __name__ == "__main__": - NumpyTest().run() \ No newline at end of file + NumpyTest().run() From scipy-svn at scipy.org Wed Apr 11 17:50:58 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 11 Apr 2007 16:50:58 -0500 (CDT) Subject: [Scipy-svn] r2903 - trunk/Lib/ndimage Message-ID: <20070411215058.CA5C039C087@new.scipy.org> Author: stefan Date: 2007-04-11 16:50:46 -0500 (Wed, 11 Apr 2007) New Revision: 2903 Modified: trunk/Lib/ndimage/interpolation.py Log: Warn against using 'reflect' mode for interpolation. Modified: trunk/Lib/ndimage/interpolation.py =================================================================== --- trunk/Lib/ndimage/interpolation.py 2007-04-10 22:04:33 UTC (rev 2902) +++ trunk/Lib/ndimage/interpolation.py 2007-04-11 21:50:46 UTC (rev 2903) @@ -30,10 +30,17 @@ import types import math +import warnings import numpy import _ni_support import _nd_image +def _extend_mode_to_code(mode): + mode = _ni_support._extend_mode_to_code(mode) + if mode == 2: + warnings.warn('Mode "reflect" may yield incorrect results on ' + 'boundaries. Please use "mirror" instead.') + return mode def spline_filter1d(input, order = 3, axis = -1, output = numpy.float64, output_type = None): @@ -126,7 +133,7 @@ output_shape = input.shape if input.ndim < 1 or len(output_shape) < 1: raise RuntimeError, 'input and output rank must be > 0' - mode = _ni_support._extend_mode_to_code(mode) + mode = _extend_mode_to_code(mode) if prefilter and order > 1: filtered = spline_filter(input, order, output = numpy.float64) else: @@ -188,7 +195,7 @@ raise RuntimeError, 'input and output rank must be > 0' if coordinates.shape[0] != input.ndim: raise RuntimeError, 'invalid shape for coordinate array' - mode = _ni_support._extend_mode_to_code(mode) + mode = _extend_mode_to_code(mode) if prefilter and order > 1: filtered = spline_filter(input, order, output = numpy.float64) else: @@ -229,7 +236,7 @@ output_shape = input.shape if input.ndim < 1 or len(output_shape) < 1: raise RuntimeError, 'input and output rank must be > 0' - mode = _ni_support._extend_mode_to_code(mode) + mode = _extend_mode_to_code(mode) if prefilter and order > 1: filtered = spline_filter(input, order, output = numpy.float64) else: @@ -277,7 +284,7 @@ raise TypeError, 'Complex type not supported' if input.ndim < 1: raise RuntimeError, 'input and output rank must be > 0' - mode = _ni_support._extend_mode_to_code(mode) + mode = _extend_mode_to_code(mode) if prefilter and order > 1: filtered = spline_filter(input, order, output = numpy.float64) else: @@ -310,7 +317,7 @@ raise TypeError, 'Complex type not supported' if input.ndim < 1: raise RuntimeError, 'input and output rank must be > 0' - mode = _ni_support._extend_mode_to_code(mode) + mode = _extend_mode_to_code(mode) if prefilter and order > 1: filtered = spline_filter(input, order, output = numpy.float64) else: From scipy-svn at scipy.org Thu Apr 12 10:11:59 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Apr 2007 09:11:59 -0500 (CDT) Subject: [Scipy-svn] r2904 - trunk/Lib/sandbox/timeseries/src Message-ID: <20070412141159.5555939C0D1@new.scipy.org> Author: mattknox_ca Date: 2007-04-12 09:11:45 -0500 (Thu, 12 Apr 2007) New Revision: 2904 Modified: trunk/Lib/sandbox/timeseries/src/cseries.c Log: adding some "moving" functions Modified: trunk/Lib/sandbox/timeseries/src/cseries.c =================================================================== --- trunk/Lib/sandbox/timeseries/src/cseries.c 2007-04-11 21:50:46 UTC (rev 2903) +++ trunk/Lib/sandbox/timeseries/src/cseries.c 2007-04-12 14:11:45 UTC (rev 2904) @@ -90,6 +90,9 @@ #define CHECK_ASFREQ(result) if ((result) == INT_ERR_CODE) return NULL +#define MEM_CHECK(item) if (item == NULL) { return PyErr_NoMemory(); } +#define ERR_CHECK(item) if (item == NULL) { return NULL; } + static int get_freq_group(int freq) { return (freq/1000)*1000; } @@ -2611,7 +2614,6 @@ cseries_check_freq_str(PyObject *self, PyObject *args) { PyObject *alias_tuple, *result, *freq_key; - int freq_val; if ((freq_key = cseries_check_freq(self, args)) == NULL) return NULL; @@ -2885,6 +2887,320 @@ return returnVal; } + +/* This function is directly copied from direct copy of function in */ +/* Return typenumber from dtype2 unless it is NULL, then return + NPY_DOUBLE if dtype1->type_num is integer or bool + and dtype1->type_num otherwise. +*/ +static int +_get_type_num_double(PyArray_Descr *dtype1, PyArray_Descr *dtype2) +{ + if (dtype2 != NULL) + return dtype2->type_num; + + /* For integer or bool data-types */ + if (dtype1->type_num < NPY_FLOAT) { + return NPY_DOUBLE; + } + else { + return dtype1->type_num; + } +} + +#define _CHKTYPENUM(typ) ((typ) ? (typ)->type_num : PyArray_NOTYPE) + +/* validates the standard arguments to moving functions and set the original + mask, original ndarray, and mask for the result */ +static PyObject * +check_mov_args(PyObject *orig_arrayobj, int window_size, int min_win_size, + PyArrayObject **orig_ndarray, PyArrayObject **orig_mask, + PyArrayObject **result_mask) { + + int *raw_result_mask; + + if (!PyArray_Check(orig_arrayobj)) { + PyErr_SetString(PyExc_ValueError, "array must be a valid subtype of ndarray"); + return NULL; + } + + // check if array has a mask, and if that mask is an array + if (PyObject_HasAttrString(orig_arrayobj, "_mask")) { + PyObject *tempMask = PyObject_GetAttrString(orig_arrayobj, "_mask"); + if (PyArray_Check(tempMask)) { + *orig_mask = (PyArrayObject*)PyArray_EnsureArray(tempMask); + } else { + Py_DECREF(tempMask); + } + } + + *orig_ndarray = (PyArrayObject*)PyArray_EnsureArray(orig_arrayobj); + + if ((*orig_ndarray)->nd != 1) { + PyErr_SetString(PyExc_ValueError, "array must be 1 dimensional"); + return NULL; + } + + if (window_size < min_win_size) { + char *error_str; + error_str = malloc(60 * sizeof(char)); + MEM_CHECK(error_str) + sprintf(error_str, + "window_size must be greater than or equal to %i", + min_win_size); + PyErr_SetString(PyExc_ValueError, error_str); + free(error_str); + return NULL; + } + + raw_result_mask = malloc((*orig_ndarray)->dimensions[0] * sizeof(int)); + MEM_CHECK(raw_result_mask) + + { + int i, valid_points=0, is_masked; + + for (i=0; i<((*orig_ndarray)->dimensions[0]); i++) { + + is_masked=0; + + if (*orig_mask != NULL) { + PyObject *valMask; + valMask = PyArray_GETITEM(*orig_mask, PyArray_GetPtr(*orig_mask, &i)); + is_masked = (int)PyInt_AsLong(valMask); + Py_DECREF(valMask); + } + + if (is_masked) { + valid_points=0; + } else { + if (valid_points < window_size) { valid_points += 1; } + if (valid_points < window_size) { is_masked = 1; } + } + + raw_result_mask[i] = is_masked; + } + } + + *result_mask = (PyArrayObject*)PyArray_SimpleNewFromData( + 1, (*orig_ndarray)->dimensions, + PyArray_INT32, raw_result_mask); + MEM_CHECK(*result_mask) + (*result_mask)->flags = ((*result_mask)->flags) | NPY_OWNDATA; +} + + +static PyObject *NP_ADD, *NP_MULTIPLY, *NP_SUBTRACT, *NP_SQRT; + +/* computation portion of moving sum. Appropriate mask is overlayed on top + afterwards */ +static PyArrayObject* +calc_mov_sum(PyArrayObject *orig_ndarray, int window_size, int rtype) +{ + PyArrayObject *result_ndarray=NULL; + int i, valid_points=0; + + result_ndarray = (PyArrayObject*)PyArray_ZEROS( + orig_ndarray->nd, + orig_ndarray->dimensions, + rtype, 0); + ERR_CHECK(result_ndarray) + + for (i=0; idimensions[0]; i++) { + + PyObject *val=NULL, *mov_sum_val=NULL; + + val = PyArray_GETITEM(orig_ndarray, PyArray_GetPtr(orig_ndarray, &i)); + + if (valid_points == 0) { + mov_sum_val = val; + valid_points += 1; + } else { + int prev_idx = i-1; + PyObject *mov_sum_prevval; + mov_sum_prevval= PyArray_GETITEM(result_ndarray, + PyArray_GetPtr(result_ndarray, &prev_idx)); + mov_sum_val = PyObject_CallFunction(NP_ADD, "OO", (PyArrayObject*)val, + mov_sum_prevval); + Py_DECREF(mov_sum_prevval); + ERR_CHECK(mov_sum_val) + + if (valid_points == window_size) { + PyObject *temp_val, *rem_val; + int rem_idx = i-window_size; + temp_val = mov_sum_val; + rem_val = PyArray_GETITEM(orig_ndarray, + PyArray_GetPtr(orig_ndarray, &rem_idx)); + + mov_sum_val = PyObject_CallFunction( + NP_SUBTRACT, + "OO", (PyArrayObject*)temp_val, + rem_val); + ERR_CHECK(mov_sum_val) + + Py_DECREF(temp_val); + Py_DECREF(rem_val); + + } else { + valid_points += 1; + } + } + + PyArray_SETITEM(result_ndarray, PyArray_GetPtr(result_ndarray, &i), mov_sum_val); + + if (mov_sum_val != val) { Py_DECREF(val); } + + Py_DECREF(mov_sum_val); + } + + return result_ndarray; + +} + +static char MaskedArray_mov_sum_doc[] = ""; +static PyObject * +MaskedArray_mov_sum(PyObject *self, PyObject *args, PyObject *kwds) +{ + PyObject *orig_arrayobj=NULL, *result_dict=NULL; + PyArrayObject *orig_ndarray=NULL, *orig_mask=NULL, + *result_ndarray=NULL, *result_mask=NULL; + + PyArray_Descr *dtype=NULL; + + int rtype, window_size; + + static char *kwlist[] = {"array", "window_size", "dtype", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "Oi|O&:mov_sum(array, window_size, dtype)", kwlist, + &orig_arrayobj, &window_size, + PyArray_DescrConverter2, &dtype)) return NULL; + + + check_mov_args(orig_arrayobj, window_size, 1, + &orig_ndarray, &orig_mask, &result_mask); + + rtype = _CHKTYPENUM(dtype); + + result_ndarray = calc_mov_sum(orig_ndarray, window_size, rtype); + ERR_CHECK(result_ndarray) + + result_dict = PyDict_New(); + MEM_CHECK(result_dict) + PyDict_SetItemString(result_dict, "array", (PyObject*)result_ndarray); + PyDict_SetItemString(result_dict, "mask", (PyObject*)result_mask); + + Py_DECREF(result_ndarray); + Py_DECREF(result_mask); + return result_dict; +} + + +static char MaskedArray_mov_stddev_doc[] = ""; +static PyObject * +MaskedArray_mov_stddev(PyObject *self, PyObject *args, PyObject *kwds) +{ + PyObject *orig_arrayobj=NULL, *result_dict=NULL; + PyArrayObject *orig_ndarray=NULL, *orig_mask=NULL, + *result_ndarray=NULL, *result_mask=NULL, + *result_temp1=NULL, *result_temp2=NULL, *result_temp3=NULL; + PyArrayObject *mov_sum=NULL, *mov_sum_sq=NULL; + PyObject *denom1=NULL, *denom2=NULL; + + PyArray_Descr *dtype=NULL; + + int *raw_result_mask; + + int rtype, window_size, is_variance, is_sample; + + static char *kwlist[] = {"array", "window_size", "is_variance", "is_sample", "dtype", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "Oiii|O&:mov_stddev(array, window_size, is_variance, is_sample, dtype)", kwlist, + &orig_arrayobj, &window_size, &is_variance, &is_sample, + PyArray_DescrConverter2, &dtype)) return NULL; + + + check_mov_args(orig_arrayobj, window_size, 2, + &orig_ndarray, &orig_mask, &result_mask); + + rtype = _get_type_num_double(orig_ndarray->descr, dtype); + + mov_sum = calc_mov_sum(orig_ndarray, window_size, rtype); + ERR_CHECK(mov_sum) + + result_temp1 = (PyArrayObject*)PyObject_CallFunction( + NP_MULTIPLY, "OO", + orig_ndarray, (PyObject*)orig_ndarray); + ERR_CHECK(result_temp1) + + mov_sum_sq = calc_mov_sum(result_temp1, window_size, rtype); + + Py_DECREF(result_temp1); + ERR_CHECK(mov_sum_sq) + + + /* + formulas from: + http://en.wikipedia.org/wiki/Standard_deviation#Rapid_calculation_methods + */ + if (is_sample) { + denom1 = PyFloat_FromDouble(1.0/(double)(window_size-1)); + denom2 = PyFloat_FromDouble(1.0/(double)(window_size*(window_size-1))); + } else { + denom1 = PyFloat_FromDouble(1.0/(double)window_size); + denom2 = PyFloat_FromDouble(1.0/(double)(window_size*window_size)); + } + + result_temp1 = (PyArrayObject*)PyObject_CallFunction( + NP_MULTIPLY, + "OO", mov_sum_sq, + denom1); + ERR_CHECK(result_temp1) + Py_DECREF(mov_sum_sq); + Py_DECREF(denom1); + + result_temp3 = (PyArrayObject*)PyObject_CallFunction( + NP_MULTIPLY, + "OO", mov_sum, + (PyObject*)mov_sum); + ERR_CHECK(result_temp3) + Py_DECREF(mov_sum); + result_temp2 = (PyArrayObject*)PyObject_CallFunction( + NP_MULTIPLY, + "OO", result_temp3, + denom2); + ERR_CHECK(result_temp2) + Py_DECREF(result_temp3); + Py_DECREF(denom2); + + result_temp3 = (PyArrayObject*)PyObject_CallFunction( + NP_SUBTRACT, + "OO", result_temp1, + (PyObject*)result_temp2); + ERR_CHECK(result_temp3) + Py_DECREF(result_temp1); + Py_DECREF(result_temp2); + + if (is_variance) { + result_ndarray = result_temp3; + } else { + result_temp1 = (PyArrayObject*)PyObject_CallFunction( + NP_SQRT, "(O)", result_temp3); + ERR_CHECK(result_temp1) + Py_DECREF(result_temp3); + result_ndarray = result_temp1; + } + + result_dict = PyDict_New(); + MEM_CHECK(result_dict) + PyDict_SetItemString(result_dict, "array", (PyObject*)result_ndarray); + PyDict_SetItemString(result_dict, "mask", (PyObject*)result_mask); + + Py_DECREF(result_ndarray); + Py_DECREF(result_mask); + return result_dict; +} + static char DateArray_asfreq_doc[] = ""; static PyObject * DateArray_asfreq(PyObject *self, PyObject *args) @@ -3022,28 +3338,40 @@ static PyMethodDef cseries_methods[] = { - {"TS_convert", TimeSeries_convert, METH_VARARGS, TimeSeries_convert_doc}, + {"MA_mov_sum", (PyCFunction)MaskedArray_mov_sum, + METH_VARARGS | METH_KEYWORDS, MaskedArray_mov_sum_doc}, + {"MA_mov_stddev", (PyCFunction)MaskedArray_mov_stddev, + METH_VARARGS | METH_KEYWORDS, MaskedArray_mov_stddev_doc}, - {"DA_asfreq", DateArray_asfreq, METH_VARARGS, DateArray_asfreq_doc}, - {"DA_getDateInfo", DateArray_getDateInfo, METH_VARARGS, DateArray_getDateInfo_doc}, + {"TS_convert", (PyCFunction)TimeSeries_convert, + METH_VARARGS, TimeSeries_convert_doc}, - {"thisday", cseries_thisday, METH_VARARGS, cseries_thisday_doc}, - {"check_freq", cseries_check_freq, METH_VARARGS, cseries_check_freq_doc}, - {"check_freq_str", cseries_check_freq_str, METH_VARARGS, cseries_check_freq_str_doc}, - {"get_freq_group", cseries_get_freq_group, METH_VARARGS, cseries_get_freq_group_doc}, + {"DA_asfreq", (PyCFunction)DateArray_asfreq, + METH_VARARGS, DateArray_asfreq_doc}, + {"DA_getDateInfo", (PyCFunction)DateArray_getDateInfo, + METH_VARARGS, DateArray_getDateInfo_doc}, - {"set_callback_DateFromString", set_callback_DateFromString, METH_VARARGS, - set_callback_DateFromString_doc}, - {"set_callback_DateTimeFromString", set_callback_DateTimeFromString, METH_VARARGS, - set_callback_DateTimeFromString_doc}, + {"thisday", (PyCFunction)cseries_thisday, + METH_VARARGS, cseries_thisday_doc}, + {"check_freq", (PyCFunction)cseries_check_freq, + METH_VARARGS, cseries_check_freq_doc}, + {"check_freq_str", (PyCFunction)cseries_check_freq_str, + METH_VARARGS, cseries_check_freq_str_doc}, + {"get_freq_group", (PyCFunction)cseries_get_freq_group, + METH_VARARGS, cseries_get_freq_group_doc}, + {"set_callback_DateFromString", (PyCFunction)set_callback_DateFromString, + METH_VARARGS, set_callback_DateFromString_doc}, + {"set_callback_DateTimeFromString", (PyCFunction)set_callback_DateTimeFromString, + METH_VARARGS, set_callback_DateTimeFromString_doc}, + {NULL, NULL} }; PyMODINIT_FUNC initcseries(void) { - PyObject *m; + PyObject *m, *ops_dict; if (PyType_Ready(&DateType) < 0) return; @@ -3058,7 +3386,18 @@ import_array(); PyDateTime_IMPORT; + ops_dict = PyArray_GetNumericOps(); + NP_ADD = PyDict_GetItemString(ops_dict, "add"); + NP_MULTIPLY = PyDict_GetItemString(ops_dict, "multiply"); + NP_SUBTRACT = PyDict_GetItemString(ops_dict, "subtract"); + NP_SQRT = PyDict_GetItemString(ops_dict, "sqrt"); + Py_INCREF(NP_ADD); + Py_INCREF(NP_MULTIPLY); + Py_INCREF(NP_SUBTRACT); + Py_INCREF(NP_SQRT); + Py_DECREF(ops_dict); + Py_INCREF(&DateType); PyModule_AddObject(m, "Date", (PyObject *)&DateType); From scipy-svn at scipy.org Thu Apr 12 10:13:03 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Apr 2007 09:13:03 -0500 (CDT) Subject: [Scipy-svn] r2905 - trunk/Lib/sandbox/timeseries/addons Message-ID: <20070412141303.1D451C7C039@new.scipy.org> Author: mattknox_ca Date: 2007-04-12 09:12:43 -0500 (Thu, 12 Apr 2007) New Revision: 2905 Added: trunk/Lib/sandbox/timeseries/addons/moving_funcs.py Log: new moving_funcs module Added: trunk/Lib/sandbox/timeseries/addons/moving_funcs.py =================================================================== --- trunk/Lib/sandbox/timeseries/addons/moving_funcs.py 2007-04-12 14:11:45 UTC (rev 2904) +++ trunk/Lib/sandbox/timeseries/addons/moving_funcs.py 2007-04-12 14:12:43 UTC (rev 2905) @@ -0,0 +1,285 @@ +""" + +A collection of moving functions for masked arrays and time series + +:author: Pierre GF Gerard-Marchant & Matt Knox +:contact: pierregm_at_uga_dot_edu - mattknox_ca_at_hotmail_dot_com +:version: $Id: filters.py 2819 2007-03-03 23:00:20Z pierregm $ +""" +__author__ = "Pierre GF Gerard-Marchant & Matt Knox ($Author: pierregm $)" +__version__ = '1.0' +__revision__ = "$Revision: 2819 $" +__date__ = '$Date: 2007-03-03 18:00:20 -0500 (Sat, 03 Mar 2007) $' + +import numpy as N +from numpy import bool_, float_ +narray = N.array + +from scipy.signal import convolve, get_window + +import maskedarray as MA +from maskedarray import MaskedArray, nomask, getmask, getmaskarray, masked +marray = MA.array + +from timeseries.cseries import MA_mov_stddev, MA_mov_sum + +__all__ = ['mov_sum', + 'mov_average_expw', + 'mov_stddev', 'mov_var', 'mov_sample_stddev', 'mov_sample_var', + 'cmov_average', 'cmov_mean', 'cmov_window' + ] + +def _process_result_dict(orig_data, result_dict): + "process the results from the c function" + + rtype = result_dict['array'].dtype + rmask = result_dict['mask'] + + # makes a copy of the appropriate type + data = orig_data.astype(rtype) + data[:] = result_dict['array'] + + return marray(data, mask=rmask, copy=True, subok=True) + + +def mov_sum(data, window_size, dtype=None): + kwargs = {'array':data, + 'window_size':window_size} + + if dtype is not None: + kwargs['dtype'] = dtype + + result_dict = MA_mov_sum(**kwargs) + return _process_result_dict(data, result_dict) + + +def _mov_var_stddev(data, window_size, is_variance, is_sample, dtype): + "helper function for mov_var and mov_stddev functions" + + kwargs = {'array':data, + 'window_size':window_size, + 'is_variance':is_variance, + 'is_sample':is_sample} + + if dtype is not None: + kwargs['dtype'] = dtype + + result_dict = MA_mov_stddev(**kwargs) + return _process_result_dict(data, result_dict) + + +def mov_var(data, window_size, dtype=None): + """Calculates the moving variance of a 1-D array. This is the population +variance. See "mov_sample_var" for moving sample variance. + +:Parameters: + data : ndarray + Data as a valid (subclass of) ndarray or MaskedArray. In particular, + TimeSeries objects are valid here. + window_size : int + Time periods to use for each calculation. + dtype : numpy data type specification (*None*) + Behaves the same as the dtype parameter for the numpy.var function. + +:Return value: + The result is always a masked array (preserves subclass attributes). The + result at index i uses values from [i-window_size:i+1], and will be masked + for the first `window_size` values. The result will also be masked at i + if any of the input values in the slice [i-window_size:i+1] are masked.""" + + + return _mov_var_stddev(data=data, window_size=window_size, + is_variance=1, is_sample=0, dtype=dtype) + +def mov_stddev(data, window_size, dtype=None): + """Calculates the moving standard deviation of a 1-D array. This is the +population standard deviation. See "mov_sample_stddev" for moving sample standard +deviation. + +:Parameters: + data : ndarray + Data as a valid (subclass of) ndarray or MaskedArray. In particular, + TimeSeries objects are valid here. + window_size : int + Time periods to use for each calculation. + dtype : numpy data type specification (*None*) + Behaves the same as the dtype parameter for the numpy.std function. + +:Return value: + The result is always a masked array (preserves subclass attributes). The + result at index i uses values from [i-window_size:i+1], and will be masked + for the first `window_size` values. The result will also be masked at i + if any of the input values in the slice [i-window_size:i+1] are masked.""" + + return _mov_var_stddev(data=data, window_size=window_size, + is_variance=0, is_sample=0, dtype=dtype) + + +def mov_sample_var(data, window_size, dtype=None): + """Calculates the moving sample variance of a 1-D array. + +:Parameters: + data : ndarray + Data as a valid (subclass of) ndarray or MaskedArray. In particular, + TimeSeries objects are valid here. + window_size : int + Time periods to use for each calculation. + dtype : numpy data type specification (*None*) + Behaves the same as the dtype parameter for the numpy.var function. + +:Return value: + The result is always a masked array (preserves subclass attributes). The + result at index i uses values from [i-window_size:i+1], and will be masked + for the first `window_size` values. The result will also be masked at i + if any of the input values in the slice [i-window_size:i+1] are masked.""" + + + return _mov_var_stddev(data=data, window_size=window_size, + is_variance=1, is_sample=1, dtype=dtype) + +def mov_sample_stddev(data, window_size, dtype=None): + """Calculates the moving sample standard deviation of a 1-D array. + +:Parameters: + data : ndarray + Data as a valid (subclass of) ndarray or MaskedArray. In particular, + TimeSeries objects are valid here. + window_size : int + Time periods to use for each calculation. + dtype : numpy data type specification (*None*) + Behaves the same as the dtype parameter for the numpy.std function. + +:Return value: + The result is always a masked array (preserves subclass attributes). The + result at index i uses values from [i-window_size:i+1], and will be masked + for the first `window_size` values. The result will also be masked at i + if any of the input values in the slice [i-window_size:i+1] are masked.""" + + return _mov_var_stddev(data=data, window_size=window_size, + is_variance=0, is_sample=1, dtype=dtype) + +def mov_average_expw(data, span, tol=1e-6): + """Calculates the exponentially weighted moving average of a series. + +:Parameters: + data : ndarray + Data as a valid (subclass of) ndarray or MaskedArray. In particular, + TimeSeries objects are valid here. + span : int + Time periods. The smoothing factor is 2/(span + 1) + tol : float, *[1e-6]* + Tolerance for the definition of the mask. When data contains masked + values, this parameter determinea what points in the result should be masked. + Values in the result that would not be "significantly" impacted (as + determined by this parameter) by the masked values are left unmasked. +""" + data = marray(data, copy=True, subok=True) + ismasked = (data._mask is not nomask) + data._mask = N.zeros(data.shape, bool_) + _data = data._data + # + k = 2./float(span + 1) + def expmave_sub(a, b): + return b + k * (a - b) + # + data._data.flat = N.frompyfunc(expmave_sub, 2, 1).accumulate(_data) + if ismasked: + _unmasked = N.logical_not(data._mask).astype(float_) + marker = 1. - N.frompyfunc(expmave_sub, 2, 1).accumulate(_unmasked) + data._mask[marker > tol] = True + data._mask[0] = True + # + return data + +""" +def weightmave(data, span): + data = marray(data, subok=True, copy=True) + data._mask = N.zeros(data.shape, bool_) + # Set the data + _data = data._data + tmp = N.empty_like(_data) + tmp[:span] = _data[:span] + s = 0 + for i in range(span, len(data)): + s += _data[i] - _data[i-span] + tmp[i] = span*_data[i] + tmp[i-1] - s + tmp *= 2./(span*(n+1)) + data._data.flat = tmp + # Set the mask + if data._mask is not nomask: + msk = data._mask.nonzero()[0].repeat(span).reshape(-1,span) + msk += range(span) + data._mask[msk.ravel()] = True + data._mask[:span] = True + return data +""" + +#............................................................................... +def cmov_window(data, span, window_type): + """Applies a centered moving window of type window_type and size span on the + data. + + Returns a (subclass of) MaskedArray. The k first and k last data are always + masked (with k=span//2). When data has a missing value at position i, + the result has missing values in the interval [i-k:i+k+1]. + + +:Parameters: + data : ndarray + Data to process. The array should be at most 2D. On 2D arrays, the window + is applied recursively on each column. + span : integer + The width of the window. + window_type : string/tuple/float + Window type (see Notes) + +Notes +----- + +The recognized window types are: boxcar, triang, blackman, hamming, hanning, +bartlett, parzen, bohman, blackmanharris, nuttall, barthann, kaiser (needs beta), +gaussian (needs std), general_gaussian (needs power, width), slepian (needs width). +If the window requires parameters, the window_type argument should be a tuple +with the first argument the string name of the window, and the next arguments +the needed parameters. If window_type is a floating point number, it is interpreted +as the beta parameter of the kaiser window. + +Note also that only boxcar has been thoroughly tested. + """ + # + data = marray(data, copy=True, subok=True) + if data._mask is nomask: + data._mask = N.zeros(data.shape, bool_) + window = get_window(window_type, span, fftbins=False) + (n, k) = (len(data), span//2) + # + if data.ndim == 1: + data._data.flat = convolve(data._data, window)[k:n+k] / float(span) + data._mask[:] = ((convolve(getmaskarray(data), window) > 0)[k:n+k]) + elif data.ndim == 2: + for i in range(data.shape[-1]): + _data = data._data[:,i] + _data.flat = convolve(_data, window)[k:n+k] / float(span) + data._mask[:,i] = (convolve(data._mask[:,i], window) > 0)[k:n+k] + else: + raise ValueError, "Data should be at most 2D" + data._mask[:k] = data._mask[-k:] = True + return data + +def cmov_average(data, span): + """Computes the centered moving average of size span on the data. + + Returns a (subclass of) MaskedArray. The k first and k last data are always + masked (with k=span//2). When data has a missing value at position i, + the result has missing values in the interval [i-k:i+k+1]. + +:Parameters: + data : ndarray + Data to process. The array should be at most 2D. On 2D arrays, the window + is applied recursively on each column. + span : integer + The width of the window. + """ + return cmov_window(data, span, 'boxcar') + +cmov_mean = cmov_average From scipy-svn at scipy.org Thu Apr 12 10:13:54 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Apr 2007 09:13:54 -0500 (CDT) Subject: [Scipy-svn] r2906 - trunk/Lib/sandbox/timeseries/addons Message-ID: <20070412141354.3024EC7C039@new.scipy.org> Author: mattknox_ca Date: 2007-04-12 09:13:39 -0500 (Thu, 12 Apr 2007) New Revision: 2906 Modified: trunk/Lib/sandbox/timeseries/addons/filters.py Log: moved functions into moving_funcs Modified: trunk/Lib/sandbox/timeseries/addons/filters.py =================================================================== --- trunk/Lib/sandbox/timeseries/addons/filters.py 2007-04-12 14:12:43 UTC (rev 2905) +++ trunk/Lib/sandbox/timeseries/addons/filters.py 2007-04-12 14:13:39 UTC (rev 2906) @@ -21,140 +21,8 @@ from maskedarray import MaskedArray, nomask, getmask, getmaskarray, masked marray = MA.array +from moving_funcs import mov_average_expw, cmov_average, cmov_mean, \ + cmov_window -__all__ = ['expmave' - 'running_window', 'running_mean' - ] - -#####--------------------------------------------------------------------------- -#---- --- Moving average functions --- -#####--------------------------------------------------------------------------- -def expmave(data, n, tol=1e-6): - """Calculates the exponential moving average of a series. - -:Parameters: - data : ndarray - Data as a valid (subclass of) ndarray or MaskedArray. In particular, - TimeSeries objects are valid here. - n : int - Time periods. The smoothing factor is 2/(n + 1) - tol : float, *[1e-6]* - Tolerance for the definition of the mask. When data contains masked - values, this parameter determinea what points in the result should be masked. - Values in the result that would not be "significantly" impacted (as - determined by this parameter) by the masked values are left unmasked. -""" - data = marray(data, copy=True, subok=True) - ismasked = (data._mask is not nomask) - data._mask = N.zeros(data.shape, bool_) - _data = data._data - # - k = 2./float(n + 1) - def expmave_sub(a, b): - return b + k * (a - b) - # - data._data.flat = N.frompyfunc(expmave_sub, 2, 1).accumulate(_data) - if ismasked: - _unmasked = N.logical_not(data._mask).astype(float_) - marker = 1. - N.frompyfunc(expmave_sub, 2, 1).accumulate(_unmasked) - data._mask[marker > tol] = True - data._mask[0] = True - # - return data - -def weightmave(data, n): - data = marray(data, subok=True, copy=True) - data._mask = N.zeros(data.shape, bool_) - # Set the data - _data = data._data - tmp = N.empty_like(_data) - tmp[:n] = _data[:n] - s = 0 - for i in range(n, len(data)): - s += _data[i] - _data[i-n] - tmp[i] = n*_data[i] + tmp[i-1] - s - tmp *= 2./(n*(n+1)) - data._data.flat = tmp - # Set the mask - if data._mask is not nomask: - msk = data._mask.nonzero()[0].repeat(n).reshape(-1,n) - msk += range(n) - data._mask[msk.ravel()] = True - data._mask[:n] = True - return data - - -#............................................................................... -def running_window(data, window_type, window_size): - """Applies a running window of type window_type and size window_size on the - data. - - Returns a (subclass of) MaskedArray. The k first and k last data are always - masked (with k=window_size//2). When data has a missing value at position i, - the result has missing values in the interval [i-k:i+k+1]. - - -:Parameters: - data : ndarray - Data to process. The array should be at most 2D. On 2D arrays, the window - is applied recursively on each column. - window_type : string/tuple/float - Window type (see Notes) - window_size : integer - The width of the window. - -Notes ------ - -The recognized window types are: boxcar, triang, blackman, hamming, hanning, -bartlett, parzen, bohman, blackmanharris, nuttall, barthann, kaiser (needs beta), -gaussian (needs std), general_gaussian (needs power, width), slepian (needs width). -If the window requires parameters, the window_type argument should be a tuple -with the first argument the string name of the window, and the next arguments -the needed parameters. If window_type is a floating point number, it is interpreted -as the beta parameter of the kaiser window. - -Note also that only boxcar has been thoroughly tested. - """ - # - data = marray(data, copy=True, subok=True) - if data._mask is nomask: - data._mask = N.zeros(data.shape, bool_) - window = get_window(window_type, window_size, fftbins=False) - (n, k) = (len(data), window_size//2) - # - if data.ndim == 1: - data._data.flat = convolve(data._data, window)[k:n+k] / float(window_size) - data._mask[:] = ((convolve(getmaskarray(data), window) > 0)[k:n+k]) - elif data.ndim == 2: - for i in range(data.shape[-1]): - _data = data._data[:,i] - _data.flat = convolve(_data, window)[k:n+k] / float(window_size) - data._mask[:,i] = (convolve(data._mask[:,i], window) > 0)[k:n+k] - else: - raise ValueError, "Data should be at most 2D" - data._mask[:k] = data._mask[-k:] = True - return data - -def running_mean(data, width): - """Computes the running mean of size width on the data. - - Returns a (subclass of) MaskedArray. The k first and k last data are always - masked (with k=window_size//2). When data has a missing value at position i, - the result has missing values in the interval [i-k:i+k+1]. - -:Parameters: - data : ndarray - Data to process. The array should be at most 2D. On 2D arrays, the window - is applied recursively on each column. - window_size : integer - The width of the window. - """ - return running_window(data, 'boxcar', width) - -################################################################################ -if __name__ == '__main__': - from maskedarray.testutils import assert_equal, assert_almost_equal - from timeseries import time_series, thisday - # - data = MA.arange(100) +__all__ = ['mov_average_expw' + 'cmov_average', 'cmov_mean', 'cmov_window'] From scipy-svn at scipy.org Thu Apr 12 10:14:44 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Apr 2007 09:14:44 -0500 (CDT) Subject: [Scipy-svn] r2907 - trunk/Lib/sandbox/timeseries Message-ID: <20070412141444.003A5C7C039@new.scipy.org> Author: mattknox_ca Date: 2007-04-12 09:14:34 -0500 (Thu, 12 Apr 2007) New Revision: 2907 Modified: trunk/Lib/sandbox/timeseries/__init__.py Log: added moving_funcs to script Modified: trunk/Lib/sandbox/timeseries/__init__.py =================================================================== --- trunk/Lib/sandbox/timeseries/__init__.py 2007-04-12 14:13:39 UTC (rev 2906) +++ trunk/Lib/sandbox/timeseries/__init__.py 2007-04-12 14:14:34 UTC (rev 2907) @@ -25,11 +25,12 @@ from tmulti import * import reportlib from reportlib import * -from addons import filters, interpolate +import addons +from addons import filters, interpolate, moving_funcs __all__ = ['const', 'tdates','tseries','tmulti','reportlib','filters', - 'interpolate'] + 'interpolate', 'moving_funcs'] __all__ += tdates.__all__ __all__ += tseries.__all__ __all__ += tmulti.__all__ From scipy-svn at scipy.org Thu Apr 12 16:01:13 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Apr 2007 15:01:13 -0500 (CDT) Subject: [Scipy-svn] r2908 - trunk/Lib/sandbox/timeseries Message-ID: <20070412200113.6BF9839C031@new.scipy.org> Author: mattknox_ca Date: 2007-04-12 15:01:06 -0500 (Thu, 12 Apr 2007) New Revision: 2908 Added: trunk/Lib/sandbox/timeseries/lib/ Removed: trunk/Lib/sandbox/timeseries/addons/ Log: Renamed remotely Copied: trunk/Lib/sandbox/timeseries/lib (from rev 2907, trunk/Lib/sandbox/timeseries/addons) From scipy-svn at scipy.org Thu Apr 12 16:05:38 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Apr 2007 15:05:38 -0500 (CDT) Subject: [Scipy-svn] r2909 - trunk/Lib/sandbox/timeseries/lib/tests Message-ID: <20070412200538.004EC39C031@new.scipy.org> Author: mattknox_ca Date: 2007-04-12 15:05:35 -0500 (Thu, 12 Apr 2007) New Revision: 2909 Added: trunk/Lib/sandbox/timeseries/lib/tests/test_moving_funcs.py Log: moving_funcs tests Added: trunk/Lib/sandbox/timeseries/lib/tests/test_moving_funcs.py =================================================================== --- trunk/Lib/sandbox/timeseries/lib/tests/test_moving_funcs.py 2007-04-12 20:01:06 UTC (rev 2908) +++ trunk/Lib/sandbox/timeseries/lib/tests/test_moving_funcs.py 2007-04-12 20:05:35 UTC (rev 2909) @@ -0,0 +1,84 @@ +# pylint: disable-msg=W0611, W0612, W0511,R0201 +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant & Matt Knox +:contact: pierregm_at_uga_dot_edu & mattknox_ca_at_hotmail_dot_com +:version: $Id: test_filters.py 2819 2007-03-03 23:00:20Z pierregm $ +""" +__author__ = "Pierre GF Gerard-Marchant & Matt Knox ($Author: pierregm $)" +__version__ = '1.0' +__revision__ = "$Revision: 2819 $" +__date__ = '$Date: 2007-03-03 18:00:20 -0500 (Sat, 03 Mar 2007) $' + +import numpy as N +import numpy.core.numeric as numeric + +from numpy.testing import NumpyTest, NumpyTestCase + +import maskedarray.testutils +from maskedarray.testutils import * + +import maskedarray.core as coremodule +from maskedarray.core import MaskedArray, masked + +from timeseries import time_series, thisday + +from timeseries.addons.moving_funcs import cmov_average + +class test_cmov_average(NumpyTestCase): + + def __init__(self, *args, **kwds): + NumpyTestCase.__init__(self, *args, **kwds) + self.data = numeric.arange(25) + self.maskeddata = MaskedArray(self.data) + self.maskeddata[10] = masked + # + def test_onregulararray(self): + data = self.data + for width in [3,5,7]: + k = (width-1)/2 + ravg = cmov_average(data,width) + assert(isinstance(ravg, MaskedArray)) + assert_equal(ravg, data) + assert_equal(ravg._mask, [1]*k+[0]*(len(data)-2*k)+[1]*k) + # + def test_onmaskedarray(self): + data = self.maskeddata + for width in [3,5,7]: + k = (width-1)/2 + ravg = cmov_average(data,width) + assert(isinstance(ravg, MaskedArray)) + assert_equal(ravg, data) + m = N.zeros(len(data), N.bool_) + m[:k] = m[-k:] = m[10-k:10+k+1] = True + assert_equal(ravg._mask, m) + # + def test_ontimeseries(self): + data = time_series(self.maskeddata, start_date=thisday('D')) + for width in [3,5,7]: + k = (width-1)/2 + ravg = cmov_average(data,width) + assert(isinstance(ravg, MaskedArray)) + assert_equal(ravg, data) + m = N.zeros(len(data), N.bool_) + m[:k] = m[-k:] = m[10-k:10+k+1] = True + assert_equal(ravg._mask, m) + assert_equal(ravg._dates, data._dates) + # + def tests_onmultitimeseries(self): + maskeddata = MaskedArray(N.random.random(75).reshape(25,3)) + maskeddata[10] = masked + data = time_series(maskeddata, start_date=thisday('D')) + for width in [3,5,7]: + k = (width-1)/2 + ravg = cmov_average(data,width) + assert(isinstance(ravg, MaskedArray)) + assert_almost_equal(ravg[18].squeeze(), data[18-k:18+k+1].mean(0)) + m = N.zeros(data.shape, N.bool_) + m[:k] = m[-k:] = m[10-k:10+k+1] = True + assert_equal(ravg._mask, m) + assert_equal(ravg._dates, data._dates) + +#------------------------------------------------------------------------------ +if __name__ == "__main__": + NumpyTest().run() \ No newline at end of file From scipy-svn at scipy.org Thu Apr 12 16:06:24 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Apr 2007 15:06:24 -0500 (CDT) Subject: [Scipy-svn] r2910 - trunk/Lib/sandbox/timeseries/lib/tests Message-ID: <20070412200624.B198839C031@new.scipy.org> Author: mattknox_ca Date: 2007-04-12 15:06:21 -0500 (Thu, 12 Apr 2007) New Revision: 2910 Removed: trunk/Lib/sandbox/timeseries/lib/tests/test_filters.py Log: replaced by test_moving_funcs.py Deleted: trunk/Lib/sandbox/timeseries/lib/tests/test_filters.py =================================================================== --- trunk/Lib/sandbox/timeseries/lib/tests/test_filters.py 2007-04-12 20:05:35 UTC (rev 2909) +++ trunk/Lib/sandbox/timeseries/lib/tests/test_filters.py 2007-04-12 20:06:21 UTC (rev 2910) @@ -1,87 +0,0 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 -"""Tests suite for MaskedArray & subclassing. - -:author: Pierre Gerard-Marchant & Matt Knox -:contact: pierregm_at_uga_dot_edu & mattknox_ca_at_hotmail_dot_com -:version: $Id$ -""" -__author__ = "Pierre GF Gerard-Marchant & Matt Knox ($Author$)" -__version__ = '1.0' -__revision__ = "$Revision$" -__date__ = '$Date$' - -import numpy as N -import numpy.core.numeric as numeric - -from numpy.testing import NumpyTest, NumpyTestCase - -import maskedarray.testutils -from maskedarray.testutils import * - -import maskedarray.core as coremodule -from maskedarray.core import MaskedArray, masked - -import tseries -from tseries import time_series, thisday - -import addons.filters -from addons.filters import running_mean - - -class test_runningmean(NumpyTestCase): - - def __init__(self, *args, **kwds): - NumpyTestCase.__init__(self, *args, **kwds) - self.data = numeric.arange(25) - self.maskeddata = MaskedArray(self.data) - self.maskeddata[10] = masked - # - def test_onregulararray(self): - data = self.data - for width in [3,5,7]: - k = (width-1)/2 - ravg = running_mean(data,width) - assert(isinstance(ravg, MaskedArray)) - assert_equal(ravg, data) - assert_equal(ravg._mask, [1]*k+[0]*(len(data)-2*k)+[1]*k) - # - def test_onmaskedarray(self): - data = self.maskeddata - for width in [3,5,7]: - k = (width-1)/2 - ravg = running_mean(data,width) - assert(isinstance(ravg, MaskedArray)) - assert_equal(ravg, data) - m = N.zeros(len(data), N.bool_) - m[:k] = m[-k:] = m[10-k:10+k+1] = True - assert_equal(ravg._mask, m) - # - def test_ontimeseries(self): - data = time_series(self.maskeddata, start_date=thisday('D')) - for width in [3,5,7]: - k = (width-1)/2 - ravg = running_mean(data,width) - assert(isinstance(ravg, MaskedArray)) - assert_equal(ravg, data) - m = N.zeros(len(data), N.bool_) - m[:k] = m[-k:] = m[10-k:10+k+1] = True - assert_equal(ravg._mask, m) - assert_equal(ravg._dates, data._dates) - # - def tests_onmultitimeseries(self): - maskeddata = MaskedArray(N.random.random(75).reshape(25,3)) - maskeddata[10] = masked - data = time_series(maskeddata, start_date=thisday('D')) - for width in [3,5,7]: - k = (width-1)/2 - ravg = running_mean(data,width) - assert(isinstance(ravg, MaskedArray)) - assert_almost_equal(ravg[18].squeeze(), data[18-k:18+k+1].mean(0)) - m = N.zeros(data.shape, N.bool_) - m[:k] = m[-k:] = m[10-k:10+k+1] = True - assert_equal(ravg._mask, m) - assert_equal(ravg._dates, data._dates) - -#------------------------------------------------------------------------------ -if __name__ == "__main__": - NumpyTest().run() \ No newline at end of file From scipy-svn at scipy.org Thu Apr 12 16:08:35 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Apr 2007 15:08:35 -0500 (CDT) Subject: [Scipy-svn] r2911 - trunk/Lib/sandbox/timeseries Message-ID: <20070412200835.5479CC7C016@new.scipy.org> Author: mattknox_ca Date: 2007-04-12 15:08:30 -0500 (Thu, 12 Apr 2007) New Revision: 2911 Modified: trunk/Lib/sandbox/timeseries/__init__.py Log: updated to reflect renaming of addons to lib Modified: trunk/Lib/sandbox/timeseries/__init__.py =================================================================== --- trunk/Lib/sandbox/timeseries/__init__.py 2007-04-12 20:06:21 UTC (rev 2910) +++ trunk/Lib/sandbox/timeseries/__init__.py 2007-04-12 20:08:30 UTC (rev 2911) @@ -25,8 +25,8 @@ from tmulti import * import reportlib from reportlib import * -import addons -from addons import filters, interpolate, moving_funcs +import lib +from lib import filters, interpolate, moving_funcs __all__ = ['const', 'tdates','tseries','tmulti','reportlib','filters', From scipy-svn at scipy.org Thu Apr 12 16:11:57 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Apr 2007 15:11:57 -0500 (CDT) Subject: [Scipy-svn] r2912 - trunk/Lib/sandbox/timeseries/lib/tests Message-ID: <20070412201157.EAA0839C031@new.scipy.org> Author: mattknox_ca Date: 2007-04-12 15:11:53 -0500 (Thu, 12 Apr 2007) New Revision: 2912 Modified: trunk/Lib/sandbox/timeseries/lib/tests/test_interpolate.py trunk/Lib/sandbox/timeseries/lib/tests/test_moving_funcs.py Log: updated for new directory structure Modified: trunk/Lib/sandbox/timeseries/lib/tests/test_interpolate.py =================================================================== --- trunk/Lib/sandbox/timeseries/lib/tests/test_interpolate.py 2007-04-12 20:08:30 UTC (rev 2911) +++ trunk/Lib/sandbox/timeseries/lib/tests/test_interpolate.py 2007-04-12 20:11:53 UTC (rev 2912) @@ -19,7 +19,7 @@ import maskedarray.core as coremodule from maskedarray.core import MaskedArray, masked -from addons.interpolate import backward_fill, forward_fill, interp_masked1d +from timeseries.lib.interpolate import backward_fill, forward_fill, interp_masked1d class test_funcs(NumpyTestCase): Modified: trunk/Lib/sandbox/timeseries/lib/tests/test_moving_funcs.py =================================================================== --- trunk/Lib/sandbox/timeseries/lib/tests/test_moving_funcs.py 2007-04-12 20:08:30 UTC (rev 2911) +++ trunk/Lib/sandbox/timeseries/lib/tests/test_moving_funcs.py 2007-04-12 20:11:53 UTC (rev 2912) @@ -23,7 +23,7 @@ from timeseries import time_series, thisday -from timeseries.addons.moving_funcs import cmov_average +from timeseries.lib.moving_funcs import cmov_average class test_cmov_average(NumpyTestCase): From scipy-svn at scipy.org Thu Apr 12 16:15:12 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Apr 2007 15:15:12 -0500 (CDT) Subject: [Scipy-svn] r2913 - trunk/Lib/sandbox/timeseries Message-ID: <20070412201512.03F5639C031@new.scipy.org> Author: mattknox_ca Date: 2007-04-12 15:15:09 -0500 (Thu, 12 Apr 2007) New Revision: 2913 Removed: trunk/Lib/sandbox/timeseries/doc/ Log: not needed. Can put this info on the wiki From scipy-svn at scipy.org Thu Apr 12 16:17:52 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Apr 2007 15:17:52 -0500 (CDT) Subject: [Scipy-svn] r2914 - trunk/Lib/sandbox/timeseries Message-ID: <20070412201752.EE49239C031@new.scipy.org> Author: mattknox_ca Date: 2007-04-12 15:17:48 -0500 (Thu, 12 Apr 2007) New Revision: 2914 Modified: trunk/Lib/sandbox/timeseries/readme.txt Log: Modified: trunk/Lib/sandbox/timeseries/readme.txt =================================================================== --- trunk/Lib/sandbox/timeseries/readme.txt 2007-04-12 20:15:09 UTC (rev 2913) +++ trunk/Lib/sandbox/timeseries/readme.txt 2007-04-12 20:17:48 UTC (rev 2914) @@ -1,12 +1,4 @@ -Requirements and warnings: +Please see the wiki for installation and requirements info, as well as module +documentation. -1. Only tested with numpy 1.0.1 -2. Only tested with Python 2.4.x -3. Only tested on Windows and x86_64 Platform -4. Requires maskedarray from the sandbox (not the standard masked array module with numpy) - - -Instructions: - -1. Check out the wiki. That is the main source of documentation for now. - http://www.scipy.org/TimeSeriesPackage +http://www.scipy.org/TimeSeriesPackage From scipy-svn at scipy.org Thu Apr 12 16:19:37 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Apr 2007 15:19:37 -0500 (CDT) Subject: [Scipy-svn] r2915 - trunk/Lib/sandbox/timeseries Message-ID: <20070412201937.965D739C031@new.scipy.org> Author: mattknox_ca Date: 2007-04-12 15:19:35 -0500 (Thu, 12 Apr 2007) New Revision: 2915 Removed: trunk/Lib/sandbox/timeseries/.project Log: eclipse project file that shouldn't be in svn Deleted: trunk/Lib/sandbox/timeseries/.project =================================================================== --- trunk/Lib/sandbox/timeseries/.project 2007-04-12 20:17:48 UTC (rev 2914) +++ trunk/Lib/sandbox/timeseries/.project 2007-04-12 20:19:35 UTC (rev 2915) @@ -1,17 +0,0 @@ - - - scipy_svn_timeseries - - - - - - org.python.pydev.PyDevBuilder - - - - - - org.python.pydev.pythonNature - - From scipy-svn at scipy.org Fri Apr 13 15:30:43 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 13 Apr 2007 14:30:43 -0500 (CDT) Subject: [Scipy-svn] r2916 - trunk/Lib/sandbox/timeseries Message-ID: <20070413193043.AF6AC39C1EE@new.scipy.org> Author: mattknox_ca Date: 2007-04-13 14:30:39 -0500 (Fri, 13 Apr 2007) New Revision: 2916 Modified: trunk/Lib/sandbox/timeseries/setup.py Log: updated to reflect new directory structure Modified: trunk/Lib/sandbox/timeseries/setup.py =================================================================== --- trunk/Lib/sandbox/timeseries/setup.py 2007-04-12 20:19:35 UTC (rev 2915) +++ trunk/Lib/sandbox/timeseries/setup.py 2007-04-13 19:30:39 UTC (rev 2916) @@ -15,8 +15,8 @@ sources=[sources,], include_dirs=[nxheader], ) - confgr.add_data_dir('doc') - confgr.add_subpackage('addons') + + confgr.add_subpackage('lib') confgr.add_subpackage('io') confgr.add_subpackage('plotlib') confgr.add_subpackage('tests') From scipy-svn at scipy.org Fri Apr 13 15:40:40 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 13 Apr 2007 14:40:40 -0500 (CDT) Subject: [Scipy-svn] r2917 - in trunk/Lib/sandbox/timeseries: lib src Message-ID: <20070413194040.8A77539C037@new.scipy.org> Author: mattknox_ca Date: 2007-04-13 14:40:35 -0500 (Fri, 13 Apr 2007) New Revision: 2917 Modified: trunk/Lib/sandbox/timeseries/lib/moving_funcs.py trunk/Lib/sandbox/timeseries/src/cseries.c Log: added mov_average function Modified: trunk/Lib/sandbox/timeseries/lib/moving_funcs.py =================================================================== --- trunk/Lib/sandbox/timeseries/lib/moving_funcs.py 2007-04-13 19:30:39 UTC (rev 2916) +++ trunk/Lib/sandbox/timeseries/lib/moving_funcs.py 2007-04-13 19:40:35 UTC (rev 2917) @@ -21,10 +21,10 @@ from maskedarray import MaskedArray, nomask, getmask, getmaskarray, masked marray = MA.array -from timeseries.cseries import MA_mov_stddev, MA_mov_sum +from timeseries.cseries import MA_mov_stddev, MA_mov_sum, MA_mov_average __all__ = ['mov_sum', - 'mov_average_expw', + 'mov_average', 'mov_mean', 'mov_average_expw', 'mov_stddev', 'mov_var', 'mov_sample_stddev', 'mov_sample_var', 'cmov_average', 'cmov_mean', 'cmov_window' ] @@ -52,7 +52,17 @@ result_dict = MA_mov_sum(**kwargs) return _process_result_dict(data, result_dict) +def mov_average(data, window_size, dtype=None): + kwargs = {'array':data, + 'window_size':window_size} + if dtype is not None: + kwargs['dtype'] = dtype + + result_dict = MA_mov_average(**kwargs) + return _process_result_dict(data, result_dict) +mov_mean = mov_average + def _mov_var_stddev(data, window_size, is_variance, is_sample, dtype): "helper function for mov_var and mov_stddev functions" Modified: trunk/Lib/sandbox/timeseries/src/cseries.c =================================================================== --- trunk/Lib/sandbox/timeseries/src/cseries.c 2007-04-13 19:30:39 UTC (rev 2916) +++ trunk/Lib/sandbox/timeseries/src/cseries.c 2007-04-13 19:40:35 UTC (rev 2917) @@ -2914,9 +2914,9 @@ mask, original ndarray, and mask for the result */ static PyObject * check_mov_args(PyObject *orig_arrayobj, int window_size, int min_win_size, - PyArrayObject **orig_ndarray, PyArrayObject **orig_mask, - PyArrayObject **result_mask) { + PyArrayObject **orig_ndarray, PyArrayObject **result_mask) { + PyArrayObject *orig_mask=NULL; int *raw_result_mask; if (!PyArray_Check(orig_arrayobj)) { @@ -2928,7 +2928,7 @@ if (PyObject_HasAttrString(orig_arrayobj, "_mask")) { PyObject *tempMask = PyObject_GetAttrString(orig_arrayobj, "_mask"); if (PyArray_Check(tempMask)) { - *orig_mask = (PyArrayObject*)PyArray_EnsureArray(tempMask); + orig_mask = (PyArrayObject*)PyArray_EnsureArray(tempMask); } else { Py_DECREF(tempMask); } @@ -2963,9 +2963,9 @@ is_masked=0; - if (*orig_mask != NULL) { + if (orig_mask != NULL) { PyObject *valMask; - valMask = PyArray_GETITEM(*orig_mask, PyArray_GetPtr(*orig_mask, &i)); + valMask = PyArray_GETITEM(orig_mask, PyArray_GetPtr(orig_mask, &i)); is_masked = (int)PyInt_AsLong(valMask); Py_DECREF(valMask); } @@ -3061,9 +3061,7 @@ MaskedArray_mov_sum(PyObject *self, PyObject *args, PyObject *kwds) { PyObject *orig_arrayobj=NULL, *result_dict=NULL; - PyArrayObject *orig_ndarray=NULL, *orig_mask=NULL, - *result_ndarray=NULL, *result_mask=NULL; - + PyArrayObject *orig_ndarray=NULL, *result_ndarray=NULL, *result_mask=NULL; PyArray_Descr *dtype=NULL; int rtype, window_size; @@ -3075,9 +3073,8 @@ &orig_arrayobj, &window_size, PyArray_DescrConverter2, &dtype)) return NULL; - check_mov_args(orig_arrayobj, window_size, 1, - &orig_ndarray, &orig_mask, &result_mask); + &orig_ndarray, &result_mask); rtype = _CHKTYPENUM(dtype); @@ -3094,14 +3091,63 @@ return result_dict; } +static char MaskedArray_mov_average_doc[] = ""; +static PyObject * +MaskedArray_mov_average(PyObject *self, PyObject *args, PyObject *kwds) +{ + PyObject *orig_arrayobj=NULL, *result_dict=NULL; + PyArrayObject *orig_ndarray=NULL, *result_ndarray=NULL, *result_mask=NULL, + *mov_sum=NULL; + PyObject *denom=NULL; + PyArray_Descr *dtype=NULL; + + int *raw_result_mask; + + int rtype, window_size; + + static char *kwlist[] = {"array", "window_size", "dtype", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "Oi|O&:mov_average(array, window_size, dtype)", kwlist, + &orig_arrayobj, &window_size, + PyArray_DescrConverter2, &dtype)) return NULL; + + + check_mov_args(orig_arrayobj, window_size, 2, + &orig_ndarray, &result_mask); + + rtype = _get_type_num_double(orig_ndarray->descr, dtype); + + mov_sum = calc_mov_sum(orig_ndarray, window_size, rtype); + ERR_CHECK(mov_sum) + + denom = PyFloat_FromDouble(1.0/(double)(window_size)); + + result_ndarray = (PyArrayObject*)PyObject_CallFunction( + NP_MULTIPLY, + "OO", mov_sum, + denom); + ERR_CHECK(result_ndarray) + Py_DECREF(mov_sum); + Py_DECREF(denom); + + result_dict = PyDict_New(); + MEM_CHECK(result_dict) + PyDict_SetItemString(result_dict, "array", (PyObject*)result_ndarray); + PyDict_SetItemString(result_dict, "mask", (PyObject*)result_mask); + + Py_DECREF(result_ndarray); + Py_DECREF(result_mask); + return result_dict; +} + static char MaskedArray_mov_stddev_doc[] = ""; static PyObject * MaskedArray_mov_stddev(PyObject *self, PyObject *args, PyObject *kwds) { PyObject *orig_arrayobj=NULL, *result_dict=NULL; - PyArrayObject *orig_ndarray=NULL, *orig_mask=NULL, - *result_ndarray=NULL, *result_mask=NULL, + PyArrayObject *orig_ndarray=NULL, *result_ndarray=NULL, *result_mask=NULL, *result_temp1=NULL, *result_temp2=NULL, *result_temp3=NULL; PyArrayObject *mov_sum=NULL, *mov_sum_sq=NULL; PyObject *denom1=NULL, *denom2=NULL; @@ -3121,7 +3167,7 @@ check_mov_args(orig_arrayobj, window_size, 2, - &orig_ndarray, &orig_mask, &result_mask); + &orig_ndarray, &result_mask); rtype = _get_type_num_double(orig_ndarray->descr, dtype); @@ -3340,6 +3386,8 @@ {"MA_mov_sum", (PyCFunction)MaskedArray_mov_sum, METH_VARARGS | METH_KEYWORDS, MaskedArray_mov_sum_doc}, + {"MA_mov_average", (PyCFunction)MaskedArray_mov_average, + METH_VARARGS | METH_KEYWORDS, MaskedArray_mov_average_doc}, {"MA_mov_stddev", (PyCFunction)MaskedArray_mov_stddev, METH_VARARGS | METH_KEYWORDS, MaskedArray_mov_stddev_doc}, From scipy-svn at scipy.org Fri Apr 13 17:46:08 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 13 Apr 2007 16:46:08 -0500 (CDT) Subject: [Scipy-svn] r2918 - trunk/Lib/signal Message-ID: <20070413214608.108CB39C27C@new.scipy.org> Author: oliphant Date: 2007-04-13 16:46:05 -0500 (Fri, 13 Apr 2007) New Revision: 2918 Modified: trunk/Lib/signal/signaltools.py Log: Fix doc error in lfilter. Modified: trunk/Lib/signal/signaltools.py =================================================================== --- trunk/Lib/signal/signaltools.py 2007-04-13 19:40:35 UTC (rev 2917) +++ trunk/Lib/signal/signaltools.py 2007-04-13 21:46:05 UTC (rev 2918) @@ -464,8 +464,8 @@ The filter function is implemented as a direct II transposed structure. This means that the filter implements - y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb] - - a[1]*y[n-1] + ... + a[na]*y[n-na] + a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb] + - a[1]*y[n-1] - ... - a[na]*y[n-na] using the following difference equations: From scipy-svn at scipy.org Sun Apr 15 00:59:07 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 14 Apr 2007 23:59:07 -0500 (CDT) Subject: [Scipy-svn] r2919 - trunk/Lib/cluster Message-ID: <20070415045907.C976239C04C@new.scipy.org> Author: rkern Date: 2007-04-14 23:59:04 -0500 (Sat, 14 Apr 2007) New Revision: 2919 Modified: trunk/Lib/cluster/vq.py Log: Remove dependency on scipy.stats Modified: trunk/Lib/cluster/vq.py =================================================================== --- trunk/Lib/cluster/vq.py 2007-04-13 21:46:05 UTC (rev 2918) +++ trunk/Lib/cluster/vq.py 2007-04-15 04:59:04 UTC (rev 2919) @@ -19,8 +19,8 @@ from numpy.random import randint from numpy import shape, zeros, subtract, sqrt, argmin, minimum, array, \ - newaxis, arange, compress, equal, common_type, single, double, take -from scipy.stats import std, mean + newaxis, arange, compress, equal, common_type, single, double, take, \ + std, mean def whiten(obs): """ Normalize a group of observations on a per feature basis From scipy-svn at scipy.org Sun Apr 15 01:12:47 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 15 Apr 2007 00:12:47 -0500 (CDT) Subject: [Scipy-svn] r2920 - trunk/Lib/special Message-ID: <20070415051247.F133C39C086@new.scipy.org> Author: rkern Date: 2007-04-15 00:12:42 -0500 (Sun, 15 Apr 2007) New Revision: 2920 Modified: trunk/Lib/special/orthogonal.py Log: Remove dependency on scipy.linalg in favor of numpy.dual Modified: trunk/Lib/special/orthogonal.py =================================================================== --- trunk/Lib/special/orthogonal.py 2007-04-15 04:59:04 UTC (rev 2919) +++ trunk/Lib/special/orthogonal.py 2007-04-15 05:12:42 UTC (rev 2920) @@ -62,7 +62,7 @@ # Scipy imports. import numpy as np from numpy import all, any, exp, inf, pi, sqrt -from scipy.linalg import eig +from numpy.dual import eig # Local imports. import _cephes as cephes From scipy-svn at scipy.org Sun Apr 15 01:16:18 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 15 Apr 2007 00:16:18 -0500 (CDT) Subject: [Scipy-svn] r2921 - trunk/Lib/odr Message-ID: <20070415051618.CF17739C0A5@new.scipy.org> Author: rkern Date: 2007-04-15 00:16:13 -0500 (Sun, 15 Apr 2007) New Revision: 2921 Modified: trunk/Lib/odr/odrpack.py Log: Remove dependency on scipy.linalg in favor of numpy.dual Modified: trunk/Lib/odr/odrpack.py =================================================================== --- trunk/Lib/odr/odrpack.py 2007-04-15 05:12:42 UTC (rev 2920) +++ trunk/Lib/odr/odrpack.py 2007-04-15 05:16:13 UTC (rev 2921) @@ -320,7 +320,7 @@ covx and covy are arrays of covariance matrices and are converted to weights by performing a matrix inversion on each observation's covariance matrix. - E.g. we[i] = scipy.linalg.inv(covy[i]) # i in range(len(covy)) + E.g. we[i] = numpy.linalg.inv(covy[i]) # i in range(len(covy)) # if covy.shape == (n,q,q) These arguments follow the same structured argument conventions as wd and we @@ -376,15 +376,15 @@ """ Convert covariance matrix(-ices) to weights. """ - from scipy import linalg + from numpy.dual import inv if len(cov.shape) == 2: - return linalg.inv(cov) + return inv(cov) else: weights = numpy.zeros(cov.shape, float) for i in range(cov.shape[-1]): # n - weights[:,:,i] = linalg.inv(cov[:,:,i]) + weights[:,:,i] = inv(cov[:,:,i]) return weights From scipy-svn at scipy.org Sun Apr 15 01:38:15 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 15 Apr 2007 00:38:15 -0500 (CDT) Subject: [Scipy-svn] r2922 - in trunk/Lib: integrate linalg optimize signal stats Message-ID: <20070415053815.0A2DD39C04C@new.scipy.org> Author: rkern Date: 2007-04-15 00:37:57 -0500 (Sun, 15 Apr 2007) New Revision: 2922 Modified: trunk/Lib/integrate/quadrature.py trunk/Lib/linalg/basic.py trunk/Lib/optimize/minpack.py trunk/Lib/signal/wavelets.py trunk/Lib/stats/distributions.py Log: Remove some interdependencies. Modified: trunk/Lib/integrate/quadrature.py =================================================================== --- trunk/Lib/integrate/quadrature.py 2007-04-15 05:16:13 UTC (rev 2921) +++ trunk/Lib/integrate/quadrature.py 2007-04-15 05:37:57 UTC (rev 2922) @@ -6,6 +6,7 @@ 'cumtrapz','newton_cotes','composite'] from scipy.special.orthogonal import p_roots +from scipy.special import gammaln from numpy import sum, ones, add, diff, isinf, isscalar, \ asarray, real, trapz, arange, empty import scipy as sp @@ -591,7 +592,7 @@ BN = BN - np.dot(yi**power, ai) p1 = power+1 - fac = power*math.log(N) - sp.special.gammaln(p1) + fac = power*math.log(N) - gammaln(p1) fac = math.exp(fac) return ai, BN*fac Modified: trunk/Lib/linalg/basic.py =================================================================== --- trunk/Lib/linalg/basic.py 2007-04-15 05:16:13 UTC (rev 2921) +++ trunk/Lib/linalg/basic.py 2007-04-15 05:37:57 UTC (rev 2922) @@ -20,10 +20,9 @@ import numpy from numpy import asarray_chkfinite, outer, concatenate, reshape, single from numpy import matrix as Matrix +from numpy.linalg import LinAlgError from scipy.linalg import calc_lwork -class LinAlgError(Exception): - pass def lu_solve((lu, piv), b, trans=0, overwrite_b=0): """ lu_solve((lu, piv), b, trans=0, overwrite_b=0) -> x Modified: trunk/Lib/optimize/minpack.py =================================================================== --- trunk/Lib/optimize/minpack.py 2007-04-15 05:16:13 UTC (rev 2921) +++ trunk/Lib/optimize/minpack.py 2007-04-15 05:37:57 UTC (rev 2922) @@ -300,13 +300,14 @@ mesg = errors[info][0] if full_output: - import scipy.linalg as sl + from numpy.dual import inv + from numpy.linalg import LinAlgError perm = take(eye(n),retval[1]['ipvt']-1,0) r = triu(transpose(retval[1]['fjac'])[:n,:]) R = dot(r, perm) try: - cov_x = sl.inv(dot(transpose(R),R)) - except sl.basic.LinAlgError: + cov_x = inv(dot(transpose(R),R)) + except LinAlgError: cov_x = None return (retval[0], cov_x) + retval[1:-1] + (mesg,info) else: Modified: trunk/Lib/signal/wavelets.py =================================================================== --- trunk/Lib/signal/wavelets.py 2007-04-15 05:16:13 UTC (rev 2921) +++ trunk/Lib/signal/wavelets.py 2007-04-15 05:37:57 UTC (rev 2922) @@ -1,9 +1,9 @@ -## Automatically adapted for scipy Oct 21, 2005 by convertcode.py - import numpy as sb -import scipy as s +from numpy.dual import eig +from scipy.misc import comb + def daub(p): """The coefficients for the FIR low-pass filter producing Daubechies wavelets. @@ -30,10 +30,10 @@ elif p<35: # construct polynomial and factor it if p<35: - P = [s.comb(p-1+k,k,exact=1) for k in range(p)][::-1] + P = [comb(p-1+k,k,exact=1) for k in range(p)][::-1] yj = sb.roots(P) else: # try different polynomial --- needs work - P = [s.comb(p-1+k,k,exact=1)/4.0**k for k in range(p)][::-1] + P = [comb(p-1+k,k,exact=1)/4.0**k for k in range(p)][::-1] yj = sb.roots(P) / 4 # for each root, compute two z roots, select the one with |z|>1 # Build up final polynomial @@ -124,7 +124,7 @@ psi = 0*x # find phi0, and phi1 - lam, v = s.linalg.eig(m[0,0]) + lam, v = eig(m[0,0]) ind = sb.argmin(sb.absolute(lam-1)) # a dictionary with a binary representation of the # evaluation points x < 1 -- i.e. position is 0.xxxx Modified: trunk/Lib/stats/distributions.py =================================================================== --- trunk/Lib/stats/distributions.py 2007-04-15 05:16:13 UTC (rev 2921) +++ trunk/Lib/stats/distributions.py 2007-04-15 05:37:57 UTC (rev 2922) @@ -6,8 +6,9 @@ from __future__ import nested_scopes import scipy -import scipy.special as special -import scipy.optimize as optimize +from scipy.misc import comb, derivative +from scipy import special +from scipy import optimize import inspect from numpy import alltrue, where, arange, put, putmask, \ ravel, take, ones, sum, shape, product, repeat, reshape, \ @@ -91,7 +92,7 @@ def _tosolve(self, x, q, *args): return apply(self.cdf, (x, )+args) - q def _single_call(self, q, *args): - return scipy.optimize.brentq(self._tosolve, self.xa, self.xb, args=(q,)+args, xtol=self.xtol) + return optimize.brentq(self._tosolve, self.xa, self.xb, args=(q,)+args, xtol=self.xtol) def __call__(self, q, *args): return self.vecfunc(q, *args) @@ -327,7 +328,7 @@ return apply(self.cdf, (x, )+args)-q def _ppf_single_call(self, q, *args): - return scipy.optimize.brentq(self._ppf_to_solve, self.xa, self.xb, args=(q,)+args, xtol=self.xtol) + return optimize.brentq(self._ppf_to_solve, self.xa, self.xb, args=(q,)+args, xtol=self.xtol) # moment from definition def _mom_integ0(self, x,m,*args): @@ -352,7 +353,7 @@ return cond def _pdf(self,x,*args): - return scipy.derivative(self._cdf,x,dx=1e-5,args=args,order=5) + return derivative(self._cdf,x,dx=1e-5,args=args,order=5) ## Could also define any of these (return 1-d using self._size to get number) def _rvs(self, *args): @@ -1600,7 +1601,7 @@ return vals def _munp(self, n, c): k = arange(0,n+1) - val = (-1.0/c)**n * sum(scipy.comb(n,k)*(-1)**k / (1.0-c*k),axis=0) + val = (-1.0/c)**n * sum(comb(n,k)*(-1)**k / (1.0-c*k),axis=0) return where(c*n < 1, val, inf) def _entropy(self, c): if (c > 0): @@ -1658,7 +1659,7 @@ return 1.0/c*(1-(-log(q))**c) def _munp(self, n, c): k = arange(0,n+1) - vals = 1.0/c**n * sum(scipy.comb(n,k) * (-1)**k * special.gamma(c*k + 1),axis=0) + vals = 1.0/c**n * sum(comb(n,k) * (-1)**k * special.gamma(c*k + 1),axis=0) return where(c*n > -1, vals, inf) genextreme = genextreme_gen(name='genextreme', longname="A generalized extreme value", @@ -3947,7 +3948,6 @@ return cond def _pmf(self, k, M, n, N): tot, good = M, n - comb = scipy.comb bad = tot - good return comb(good,k) * comb(bad,N-k) / comb(tot,N) def _stats(self, M, n, N): @@ -4038,6 +4038,7 @@ def _ppf(self, q, mu): vals = ceil(special.pdtrik(q,mu)) temp = special.pdtr(vals-1,mu) + # fixme: vals1 = vals-1? return where((temp >= q), vals1, vals) def _stats(self, mu): var = mu From scipy-svn at scipy.org Sun Apr 15 11:34:54 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 15 Apr 2007 10:34:54 -0500 (CDT) Subject: [Scipy-svn] r2923 - trunk/Lib/interpolate Message-ID: <20070415153454.BF7D639C08D@new.scipy.org> Author: stefan Date: 2007-04-15 10:34:44 -0500 (Sun, 15 Apr 2007) New Revision: 2923 Modified: trunk/Lib/interpolate/fitpack2.py Log: Add BivariateSpline to __all__. Modified: trunk/Lib/interpolate/fitpack2.py =================================================================== --- trunk/Lib/interpolate/fitpack2.py 2007-04-15 05:37:57 UTC (rev 2922) +++ trunk/Lib/interpolate/fitpack2.py 2007-04-15 15:34:44 UTC (rev 2923) @@ -12,6 +12,7 @@ 'InterpolatedUnivariateSpline', 'LSQUnivariateSpline', + 'BivariateSpline', 'LSQBivariateSpline', 'SmoothBivariateSpline', 'RectBivariateSpline'] From scipy-svn at scipy.org Mon Apr 16 11:51:30 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 16 Apr 2007 10:51:30 -0500 (CDT) Subject: [Scipy-svn] r2924 - trunk/Lib/io Message-ID: <20070416155130.46A3A39C16F@new.scipy.org> Author: wnbell Date: 2007-04-16 10:51:28 -0500 (Mon, 16 Apr 2007) New Revision: 2924 Modified: trunk/Lib/io/mio4.py Log: Fixed the following error that occurs when storing rank > 2 tensors: /usr/lib/python2.4/site-packages/scipy/io/mio4.py in arr_to_2d(self) 242 if len(dims) > 2: 243 dims = [N.product(dims[:-1]), dims[-1]] --> 244 self.arr = reshape(self.arr, dims) 245 246 def write(self): NameError: global name 'reshape' is not defined Modified: trunk/Lib/io/mio4.py =================================================================== --- trunk/Lib/io/mio4.py 2007-04-15 15:34:44 UTC (rev 2923) +++ trunk/Lib/io/mio4.py 2007-04-16 15:51:28 UTC (rev 2924) @@ -240,8 +240,7 @@ self.arr = N.atleast_2d(self.arr) dims = self.arr.shape if len(dims) > 2: - dims = [N.product(dims[:-1]), dims[-1]] - self.arr = reshape(self.arr, dims) + self.arr = self.arr.reshape(-1,dims[-1]) def write(self): assert False, 'Not implemented' From scipy-svn at scipy.org Mon Apr 16 13:03:09 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 16 Apr 2007 12:03:09 -0500 (CDT) Subject: [Scipy-svn] r2925 - trunk/Lib/stsci/convolve/lib Message-ID: <20070416170309.ACFDA39C040@new.scipy.org> Author: chanley Date: 2007-04-16 12:03:07 -0500 (Mon, 16 Apr 2007) New Revision: 2925 Modified: trunk/Lib/stsci/convolve/lib/iraf_frame.py Log: iraf_frame.py module in convolve ported to numpy by Nadia Dencheva. Modified: trunk/Lib/stsci/convolve/lib/iraf_frame.py =================================================================== --- trunk/Lib/stsci/convolve/lib/iraf_frame.py 2007-04-16 15:51:28 UTC (rev 2924) +++ trunk/Lib/stsci/convolve/lib/iraf_frame.py 2007-04-16 17:03:07 UTC (rev 2925) @@ -12,7 +12,8 @@ and the contents of 'a' in the center. The boundary pixels are copied from the nearest edge pixel in 'a'. - >>> a = num.arange(16, shape=(4,4)) + >>> a = num.arange(16) + >>> a.shape=(4,4) >>> frame_nearest(a, (8,8)) array([[ 0, 0, 0, 1, 2, 3, 3, 3], [ 0, 0, 0, 1, 2, 3, 3, 3], @@ -50,7 +51,8 @@ and the contents of 'a' in the center. The boundary pixels are reflected from the nearest edge pixels in 'a'. - >>> a = num.arange(16, shape=(4,4)) + >>> a = num.arange(16) + >>> a.shape = (4,4) >>> frame_reflect(a, (8,8)) array([[ 5, 4, 4, 5, 6, 7, 7, 6], [ 1, 0, 0, 1, 2, 3, 3, 2], @@ -62,7 +64,7 @@ [ 9, 8, 8, 9, 10, 11, 11, 10]]) """ - b = num.zeros(shape, typecode=a.type()) + b = num.zeros(shape, dtype=a.dtype) delta = (num.array(b.shape) - num.array(a.shape)) dy = delta[0] // 2 dx = delta[1] // 2 @@ -87,7 +89,8 @@ and the contents of 'a' in the center. The boundary pixels are wrapped around to the opposite edge pixels in 'a'. - >>> a = num.arange(16, shape=(4,4)) + >>> a = num.arange(16) + >>> a.shape=(4,4) >>> frame_wrap(a, (8,8)) array([[10, 11, 8, 9, 10, 11, 8, 9], [14, 15, 12, 13, 14, 15, 12, 13], @@ -100,7 +103,7 @@ """ - b = num.zeros(shape, typecode=a.type()) + b = num.zeros(shape, dtype=a.dtype) delta = (num.array(b.shape) - num.array(a.shape)) dy = delta[0] // 2 dx = delta[1] // 2 @@ -125,7 +128,8 @@ and the contents of 'a' in the center. The boundary pixels are copied from the nearest edge pixel in 'a'. - >>> a = num.arange(16, shape=(4,4)) + >>> a = num.arange(16) + >>> a.shape=(4,4) >>> frame_constant(a, (8,8), cval=42) array([[42, 42, 42, 42, 42, 42, 42, 42], [42, 42, 42, 42, 42, 42, 42, 42], @@ -138,7 +142,7 @@ """ - b = num.zeros(shape, typecode=a.type()) + b = num.zeros(shape, dtype=a.dtype) delta = (num.array(b.shape) - num.array(a.shape)) dy = delta[0] // 2 dx = delta[1] // 2 From scipy-svn at scipy.org Mon Apr 16 17:54:16 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 16 Apr 2007 16:54:16 -0500 (CDT) Subject: [Scipy-svn] r2926 - in trunk/Lib/misc: . tests Message-ID: <20070416215416.38E1839C0DA@new.scipy.org> Author: stefan Date: 2007-04-16 16:54:00 -0500 (Mon, 16 Apr 2007) New Revision: 2926 Added: trunk/Lib/misc/tests/ trunk/Lib/misc/tests/test_pilutil.py Modified: trunk/Lib/misc/pilutil.py trunk/Lib/misc/setup.py Log: Fix imresize, add test. Modified: trunk/Lib/misc/pilutil.py =================================================================== --- trunk/Lib/misc/pilutil.py 2007-04-16 17:03:07 UTC (rev 2925) +++ trunk/Lib/misc/pilutil.py 2007-04-16 21:54:00 UTC (rev 2926) @@ -5,7 +5,8 @@ import tempfile from numpy import amin, amax, ravel, asarray, cast, arange, \ - ones, newaxis, transpose, mgrid, iscomplexobj, sum, zeros, uint8 + ones, newaxis, transpose, mgrid, iscomplexobj, sum, zeros, uint8, \ + issubdtype, array import Image import ImageFilter @@ -252,10 +253,10 @@ """ im = toimage(arr) ts = type(size) - if ts is types.IntType: + if issubdtype(ts,int): size = size / 100.0 - if type(size) is types.FloatType: - size = (im.size[0]*size,im.size[1]*size) + elif issubdtype(type(size),float): + size = (array(im.size)*size).astype(int) else: size = (size[1],size[0]) imnew = im.resize(size) Modified: trunk/Lib/misc/setup.py =================================================================== --- trunk/Lib/misc/setup.py 2007-04-16 17:03:07 UTC (rev 2925) +++ trunk/Lib/misc/setup.py 2007-04-16 21:54:00 UTC (rev 2926) @@ -3,6 +3,7 @@ from numpy.distutils.misc_util import Configuration config = Configuration('misc',parent_package,top_path) config.add_data_files('lena.dat') + config.add_data_dir('tests') return config if __name__ == '__main__': Added: trunk/Lib/misc/tests/test_pilutil.py =================================================================== --- trunk/Lib/misc/tests/test_pilutil.py 2007-04-16 17:03:07 UTC (rev 2925) +++ trunk/Lib/misc/tests/test_pilutil.py 2007-04-16 21:54:00 UTC (rev 2926) @@ -0,0 +1,16 @@ +from numpy.testing import * +set_package_path() +import scipy.misc.pilutil as pilutil +restore_path() + +import numpy as N + +class test_pilutil(NumpyTestCase): + def check_imresize(self): + im = N.random.random((10,20)) + for T in N.sctypes['float'] + [float]: + im1 = pilutil.imresize(im,T(1.1)) + assert_equal(im1.shape,(11,22)) + +if __name__ == "__main__": + NumpyTest().run() From scipy-svn at scipy.org Tue Apr 17 06:12:06 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 17 Apr 2007 05:12:06 -0500 (CDT) Subject: [Scipy-svn] r2927 - in trunk/Lib: interpolate sandbox/arpack Message-ID: <20070417101206.CA78039C04D@new.scipy.org> Author: fullung Date: 2007-04-17 05:11:39 -0500 (Tue, 17 Apr 2007) New Revision: 2927 Modified: trunk/Lib/interpolate/__fitpack.h trunk/Lib/sandbox/arpack/setup.py Log: Fixed two Windows compilation issues. Modified: trunk/Lib/interpolate/__fitpack.h =================================================================== --- trunk/Lib/interpolate/__fitpack.h 2007-04-16 21:54:00 UTC (rev 2926) +++ trunk/Lib/interpolate/__fitpack.h 2007-04-17 10:11:39 UTC (rev 2927) @@ -556,6 +556,7 @@ double *t, *c, *tt, *cc; PyArrayObject *ap_t = NULL, *ap_c = NULL, *ap_tt = NULL, *ap_cc = NULL; PyObject *t_py = NULL, *c_py = NULL; + PyObject *ret = NULL; if (!PyArg_ParseTuple(args, "iOOidi",&iopt,&t_py,&c_py,&k, &x, &m)) return NULL; ap_t = (PyArrayObject *)PyArray_ContiguousFromObject(t_py, PyArray_DOUBLE, 0, 1); ap_c = (PyArrayObject *)PyArray_ContiguousFromObject(c_py, PyArray_DOUBLE, 0, 1); @@ -577,7 +578,7 @@ } Py_DECREF(ap_c); Py_DECREF(ap_t); - PyObject* ret = Py_BuildValue("NNi",PyArray_Return(ap_tt),PyArray_Return(ap_cc),ier); + ret = Py_BuildValue("NNi",PyArray_Return(ap_tt),PyArray_Return(ap_cc),ier); return ret; fail: Modified: trunk/Lib/sandbox/arpack/setup.py =================================================================== --- trunk/Lib/sandbox/arpack/setup.py 2007-04-16 21:54:00 UTC (rev 2926) +++ trunk/Lib/sandbox/arpack/setup.py 2007-04-17 10:11:39 UTC (rev 2927) @@ -18,7 +18,8 @@ # arpack_sources.extend([os.path.join('ARPACK','BLAS', '*.f')]) arpack_sources.extend([os.path.join('ARPACK','LAPACK', '*.f')]) - config.add_library('arpack', sources=arpack_sources) + config.add_library('arpack', sources=arpack_sources, + include_dirs=[os.path.join(['ARPACK', 'SRC'])]) config.add_extension('_arpack', From scipy-svn at scipy.org Tue Apr 17 06:41:13 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 17 Apr 2007 05:41:13 -0500 (CDT) Subject: [Scipy-svn] r2928 - in trunk/Lib: sparse/sparsetools special Message-ID: <20070417104113.0EEDF39C0C1@new.scipy.org> Author: fullung Date: 2007-04-17 05:41:09 -0500 (Tue, 17 Apr 2007) New Revision: 2928 Modified: trunk/Lib/sparse/sparsetools/sparsetools.h trunk/Lib/special/setup.py Log: Fixed some more MSVC compilation issues. Modified: trunk/Lib/sparse/sparsetools/sparsetools.h =================================================================== --- trunk/Lib/sparse/sparsetools/sparsetools.h 2007-04-17 10:11:39 UTC (rev 2927) +++ trunk/Lib/sparse/sparsetools/sparsetools.h 2007-04-17 10:41:09 UTC (rev 2928) @@ -928,7 +928,7 @@ T Ax[]) { const T zero = ZERO(); - I isort[ n_col ]; + I* isort = new I[ n_col ]; std::vector itemp(n_col,0); std::vector atemp(n_col,zero); @@ -952,6 +952,7 @@ Ax[jj] = atemp[ii]; } } + delete[] isort; } Modified: trunk/Lib/special/setup.py =================================================================== --- trunk/Lib/special/setup.py 2007-04-17 10:11:39 UTC (rev 2927) +++ trunk/Lib/special/setup.py 2007-04-17 10:41:09 UTC (rev 2928) @@ -1,6 +1,7 @@ #!/usr/bin/env python import os +import sys from os.path import join from distutils.sysconfig import get_python_inc @@ -9,9 +10,10 @@ config = Configuration('special', parent_package, top_path) define_macros = [] -# if sys.platform=='win32': + if sys.platform=='win32': # define_macros.append(('NOINFINITIES',None)) # define_macros.append(('NONANS',None)) + define_macros.append(('_USE_MATH_DEFINES',None)) # C libraries config.add_library('c_misc',sources=[join('c_misc','*.c')]) From scipy-svn at scipy.org Tue Apr 17 09:23:09 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 17 Apr 2007 08:23:09 -0500 (CDT) Subject: [Scipy-svn] r2929 - trunk/Lib/sandbox/arpack Message-ID: <20070417132309.466D039C142@new.scipy.org> Author: fullung Date: 2007-04-17 08:23:03 -0500 (Tue, 17 Apr 2007) New Revision: 2929 Modified: trunk/Lib/sandbox/arpack/setup.py Log: Fixed ARPACK includes for Windows. Modified: trunk/Lib/sandbox/arpack/setup.py =================================================================== --- trunk/Lib/sandbox/arpack/setup.py 2007-04-17 10:41:09 UTC (rev 2928) +++ trunk/Lib/sandbox/arpack/setup.py 2007-04-17 13:23:03 UTC (rev 2929) @@ -19,7 +19,7 @@ arpack_sources.extend([os.path.join('ARPACK','LAPACK', '*.f')]) config.add_library('arpack', sources=arpack_sources, - include_dirs=[os.path.join(['ARPACK', 'SRC'])]) + include_dirs=[os.path.join('ARPACK', 'SRC')]) config.add_extension('_arpack', From scipy-svn at scipy.org Tue Apr 17 09:24:25 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 17 Apr 2007 08:24:25 -0500 (CDT) Subject: [Scipy-svn] r2930 - trunk/Lib/sandbox/arpack Message-ID: <20070417132425.867F139C240@new.scipy.org> Author: fullung Date: 2007-04-17 08:24:22 -0500 (Tue, 17 Apr 2007) New Revision: 2930 Removed: trunk/Lib/sandbox/arpack/build/ Log: Removed build directory. From scipy-svn at scipy.org Tue Apr 17 12:09:55 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 17 Apr 2007 11:09:55 -0500 (CDT) Subject: [Scipy-svn] r2931 - in trunk/Lib/sandbox/timeseries: lib src Message-ID: <20070417160955.E409E39C258@new.scipy.org> Author: mattknox_ca Date: 2007-04-17 11:09:47 -0500 (Tue, 17 Apr 2007) New Revision: 2931 Modified: trunk/Lib/sandbox/timeseries/lib/moving_funcs.py trunk/Lib/sandbox/timeseries/src/cseries.c Log: added mov_median function Modified: trunk/Lib/sandbox/timeseries/lib/moving_funcs.py =================================================================== --- trunk/Lib/sandbox/timeseries/lib/moving_funcs.py 2007-04-17 13:24:22 UTC (rev 2930) +++ trunk/Lib/sandbox/timeseries/lib/moving_funcs.py 2007-04-17 16:09:47 UTC (rev 2931) @@ -21,9 +21,10 @@ from maskedarray import MaskedArray, nomask, getmask, getmaskarray, masked marray = MA.array -from timeseries.cseries import MA_mov_stddev, MA_mov_sum, MA_mov_average +from timeseries.cseries import MA_mov_stddev, MA_mov_sum, MA_mov_average, \ + MA_mov_median -__all__ = ['mov_sum', +__all__ = ['mov_sum', 'mov_median', 'mov_average', 'mov_mean', 'mov_average_expw', 'mov_stddev', 'mov_var', 'mov_sample_stddev', 'mov_sample_var', 'cmov_average', 'cmov_mean', 'cmov_window' @@ -52,6 +53,16 @@ result_dict = MA_mov_sum(**kwargs) return _process_result_dict(data, result_dict) +def mov_median(data, window_size, dtype=None): + kwargs = {'array':data, + 'window_size':window_size} + + if dtype is not None: + kwargs['dtype'] = dtype + + result_dict = MA_mov_median(**kwargs) + return _process_result_dict(data, result_dict) + def mov_average(data, window_size, dtype=None): kwargs = {'array':data, 'window_size':window_size} Modified: trunk/Lib/sandbox/timeseries/src/cseries.c =================================================================== --- trunk/Lib/sandbox/timeseries/src/cseries.c 2007-04-17 13:24:22 UTC (rev 2930) +++ trunk/Lib/sandbox/timeseries/src/cseries.c 2007-04-17 16:09:47 UTC (rev 2931) @@ -2989,15 +2989,16 @@ } -static PyObject *NP_ADD, *NP_MULTIPLY, *NP_SUBTRACT, *NP_SQRT; +static PyObject *NP_ADD, *NP_MULTIPLY, *NP_SUBTRACT, *NP_SQRT, + *NP_GREATER, *NP_GREATER_EQUAL; /* computation portion of moving sum. Appropriate mask is overlayed on top afterwards */ -static PyArrayObject* +static PyObject* calc_mov_sum(PyArrayObject *orig_ndarray, int window_size, int rtype) { PyArrayObject *result_ndarray=NULL; - int i, valid_points=0; + int i; result_ndarray = (PyArrayObject*)PyArray_ZEROS( orig_ndarray->nd, @@ -3011,9 +3012,8 @@ val = PyArray_GETITEM(orig_ndarray, PyArray_GetPtr(orig_ndarray, &i)); - if (valid_points == 0) { + if (i == 0) { mov_sum_val = val; - valid_points += 1; } else { int prev_idx = i-1; PyObject *mov_sum_prevval; @@ -3024,7 +3024,7 @@ Py_DECREF(mov_sum_prevval); ERR_CHECK(mov_sum_val) - if (valid_points == window_size) { + if (i >= (window_size - 1)) { PyObject *temp_val, *rem_val; int rem_idx = i-window_size; temp_val = mov_sum_val; @@ -3039,9 +3039,6 @@ Py_DECREF(temp_val); Py_DECREF(rem_val); - - } else { - valid_points += 1; } } @@ -3052,7 +3049,7 @@ Py_DECREF(mov_sum_val); } - return result_ndarray; + return (PyObject*)result_ndarray; } @@ -3078,7 +3075,7 @@ rtype = _CHKTYPENUM(dtype); - result_ndarray = calc_mov_sum(orig_ndarray, window_size, rtype); + result_ndarray = (PyArrayObject*)calc_mov_sum(orig_ndarray, window_size, rtype); ERR_CHECK(result_ndarray) result_dict = PyDict_New(); @@ -3102,8 +3099,6 @@ PyArray_Descr *dtype=NULL; - int *raw_result_mask; - int rtype, window_size; static char *kwlist[] = {"array", "window_size", "dtype", NULL}; @@ -3119,7 +3114,7 @@ rtype = _get_type_num_double(orig_ndarray->descr, dtype); - mov_sum = calc_mov_sum(orig_ndarray, window_size, rtype); + mov_sum = (PyArrayObject*)calc_mov_sum(orig_ndarray, window_size, rtype); ERR_CHECK(mov_sum) denom = PyFloat_FromDouble(1.0/(double)(window_size)); @@ -3142,6 +3137,281 @@ return result_dict; } +static PyObject* +np_add(PyObject *left_val, PyObject *right_val) { + + PyObject *result; + + result = PyObject_CallFunction( + NP_ADD, "OO", + (PyArrayObject*)left_val, + right_val); + return result; +} + +static PyObject* +np_subtract(PyObject *left_val, PyObject *right_val) { + + PyObject *result; + + result = PyObject_CallFunction( + NP_SUBTRACT, "OO", + (PyArrayObject*)left_val, + right_val); + return result; +} + +static PyObject* +np_multiply(PyObject *left_val, PyObject *right_val) { + + PyObject *result; + + result = PyObject_CallFunction( + NP_MULTIPLY, "OO", + (PyArrayObject*)left_val, + right_val); + return result; +} + +static int np_greater(PyObject *left_val, PyObject *right_val) { + + PyObject *temp; + int result; + + temp = PyObject_CallFunction( + NP_GREATER, "OO", + (PyArrayObject*)left_val, + right_val); + + result = (int)PyInt_AsLong(temp); + Py_DECREF(temp); + return result; +} + +static int np_greater_equal(PyObject *left_val, PyObject *right_val) { + + PyObject *temp; + int result; + + temp = PyObject_CallFunction( + NP_GREATER_EQUAL, "OO", + (PyArrayObject*)left_val, + right_val); + + result = (int)PyInt_AsLong(temp); + Py_DECREF(temp); + return result; +} + +/* computation portion of moving median. Appropriate mask is overlayed on top + afterwards. + + The algorithm used here is based on the code found at: + http://cran.r-project.org/src/contrib/Devel/runStat_1.1.tar.gz + + This code was originally released under the GPL, but the author + (David Brahm) has granted me (and scipy) permission to use it under the BSD + license. */ +static PyObject* +calc_mov_median(PyArrayObject *orig_ndarray, int window_size, int rtype) +{ + PyArrayObject *result_ndarray=NULL; + PyObject **result_array, **ref_array, **even_array=NULL; + PyObject *new_val, *old_val, *temp; + PyObject *temp_add, *one_half; + int a, i, k, R, arr_size, z, is_odd; + int *r; + + arr_size = orig_ndarray->dimensions[0]; + + result_ndarray = (PyArrayObject*)PyArray_ZEROS( + orig_ndarray->nd, + orig_ndarray->dimensions, + rtype, 0); + ERR_CHECK(result_ndarray) + + if (arr_size >= window_size) { + result_array = calloc(arr_size, sizeof(PyObject*)); + MEM_CHECK(result_array) + + /* this array will be used for quick access to the data in the original + array (so PyArray_GETITEM doesn't have to be used over and over in the + main loop) */ + ref_array = malloc(arr_size * sizeof(PyObject*)); + MEM_CHECK(ref_array) + + for (i=0; i= window_size-1; i--) { + a = window_size; + z = i - window_size + 1; + old_val = ref_array[i+1]; + new_val = ref_array[i-window_size+1]; + + for (k=window_size-1; k > 0; k--) { + r[k] = r[k-1]; /* Shift previous iteration's ranks */ + if (np_greater_equal(ref_array[z+k], new_val)) {r[k]++; a--;} + if (np_greater(ref_array[z+k], old_val)) {r[k]--;} + + if (r[k]==R) { + result_array[i] = ref_array[z+k]; + } + + if (even_array != NULL) { + if (r[k]==R) { + even_array[0] = ref_array[z+k]; + } else if (r[k] == (R+1)) { + even_array[1] = ref_array[z+k]; + } + } else { + if (r[k]==R) { + result_array[i] = ref_array[z+k]; + } + } + + } + + r[0] = a; + + if (even_array != NULL) { + if (a==R) { + even_array[0] = new_val; + } else if (a == (R+1)) { + even_array[1] = new_val; + } + + temp_add = np_add(even_array[0], even_array[1]); + result_array[i] = np_multiply(temp_add, one_half);; + Py_DECREF(temp_add); + + } else { + if (a==R) { + result_array[i] = new_val; + } + } + + } + + Py_DECREF(one_half); + + for (i=window_size-1; idescr, dtype); - mov_sum = calc_mov_sum(orig_ndarray, window_size, rtype); + mov_sum = (PyArrayObject*)calc_mov_sum(orig_ndarray, window_size, rtype); ERR_CHECK(mov_sum) result_temp1 = (PyArrayObject*)PyObject_CallFunction( @@ -3179,7 +3447,7 @@ orig_ndarray, (PyObject*)orig_ndarray); ERR_CHECK(result_temp1) - mov_sum_sq = calc_mov_sum(result_temp1, window_size, rtype); + mov_sum_sq = (PyArrayObject*)calc_mov_sum(result_temp1, window_size, rtype); Py_DECREF(result_temp1); ERR_CHECK(mov_sum_sq) @@ -3386,6 +3654,8 @@ {"MA_mov_sum", (PyCFunction)MaskedArray_mov_sum, METH_VARARGS | METH_KEYWORDS, MaskedArray_mov_sum_doc}, + {"MA_mov_median", (PyCFunction)MaskedArray_mov_median, + METH_VARARGS | METH_KEYWORDS, MaskedArray_mov_median_doc}, {"MA_mov_average", (PyCFunction)MaskedArray_mov_average, METH_VARARGS | METH_KEYWORDS, MaskedArray_mov_average_doc}, {"MA_mov_stddev", (PyCFunction)MaskedArray_mov_stddev, @@ -3439,11 +3709,15 @@ NP_MULTIPLY = PyDict_GetItemString(ops_dict, "multiply"); NP_SUBTRACT = PyDict_GetItemString(ops_dict, "subtract"); NP_SQRT = PyDict_GetItemString(ops_dict, "sqrt"); + NP_GREATER = PyDict_GetItemString(ops_dict, "greater"); + NP_GREATER_EQUAL = PyDict_GetItemString(ops_dict, "greater_equal"); Py_INCREF(NP_ADD); Py_INCREF(NP_MULTIPLY); Py_INCREF(NP_SUBTRACT); Py_INCREF(NP_SQRT); + Py_INCREF(NP_GREATER); + Py_INCREF(NP_GREATER_EQUAL); Py_DECREF(ops_dict); Py_INCREF(&DateType); From scipy-svn at scipy.org Tue Apr 17 15:52:35 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 17 Apr 2007 14:52:35 -0500 (CDT) Subject: [Scipy-svn] r2932 - trunk/Lib/special/cephes Message-ID: <20070417195235.9975339C057@new.scipy.org> Author: fullung Date: 2007-04-17 14:52:32 -0500 (Tue, 17 Apr 2007) New Revision: 2932 Modified: trunk/Lib/special/cephes/mconf.h Log: Fixed compilation with MSVC. Modified: trunk/Lib/special/cephes/mconf.h =================================================================== --- trunk/Lib/special/cephes/mconf.h 2007-04-17 16:09:47 UTC (rev 2931) +++ trunk/Lib/special/cephes/mconf.h 2007-04-17 19:52:32 UTC (rev 2932) @@ -163,7 +163,7 @@ #define ANSIC 1 /* Get ANSI function prototypes, if you want them. */ -#ifdef __STDC__ +#if defined(__STDC__) || defined(_MSC_EXTENSIONS) #define ANSIPROT #include "protos.h" #else From scipy-svn at scipy.org Wed Apr 18 23:19:20 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 18 Apr 2007 22:19:20 -0500 (CDT) Subject: [Scipy-svn] r2933 - in trunk/Lib: misc sandbox/pysparse/src Message-ID: <20070419031920.50CDB39C097@new.scipy.org> Author: oliphant Date: 2007-04-18 22:19:14 -0500 (Wed, 18 Apr 2007) New Revision: 2933 Modified: trunk/Lib/misc/common.py trunk/Lib/sandbox/pysparse/src/ll_mat.c Log: Fix up comb, factorial, and factorial2 to use a slightly different (faster?) algorithm. Fix include problem with pysparse to that it grabs the no-prefix option for backward compatibility. Modified: trunk/Lib/misc/common.py =================================================================== --- trunk/Lib/misc/common.py 2007-04-17 19:52:32 UTC (rev 2932) +++ trunk/Lib/misc/common.py 2007-04-19 03:19:14 UTC (rev 2933) @@ -32,12 +32,9 @@ if exact: if n < 0: return 0L - n = long(n) val = 1L - k = 1L - while (k < n+1L): - val = val*k - k += 1 + for k in xrange(1,n+1): + val *= k return val else: from scipy import special @@ -64,12 +61,9 @@ return 0L if n <= 0: return 1L - n = long(n) val = 1L - k = n - while (k > 0): - val = val*k - k -= 2 + for k in xrange(n,0,-2): + val *= k return val else: from scipy import special @@ -94,12 +88,9 @@ return 0L if n<=0: return 1L - n = long(n) val = 1L - j = n - while (j > 0): + for j in xrange(n,0,-k): val = val*j - j -= k return val else: raise NotImplementedError @@ -118,16 +109,9 @@ if exact: if (k > N) or (N < 0) or (k < 0): return 0L - N,k = map(long,(N,k)) - top = N val = 1L - while (top > (N-k)): - val *= top - top -= 1 - n = 1L - while (n < k+1L): - val /= n - n += 1 + for j in xrange(min(k, N-k)): + val = (val*(N-j))//(j+1) return val else: from scipy import special Modified: trunk/Lib/sandbox/pysparse/src/ll_mat.c =================================================================== --- trunk/Lib/sandbox/pysparse/src/ll_mat.c 2007-04-17 19:52:32 UTC (rev 2932) +++ trunk/Lib/sandbox/pysparse/src/ll_mat.c 2007-04-19 03:19:14 UTC (rev 2933) @@ -9,7 +9,7 @@ #include "pysparse/spmatrix.h" #define PY_ARRAY_UNIQUE_SYMBOL spmatrix -#include "numpy/arrayobject.h" +#include "numpy/noprefix.h" #define INCREASE_FACTOR 1.5 /* increase rate for memory reallocation of ll_mat arrays */ #define PPRINT_ROW_THRESH 500 /* row threshold for choosing between print formats */ From scipy-svn at scipy.org Thu Apr 19 15:49:54 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 19 Apr 2007 14:49:54 -0500 (CDT) Subject: [Scipy-svn] r2934 - trunk/Lib/sandbox/numexpr Message-ID: <20070419194954.37CF839C053@new.scipy.org> Author: cookedm Date: 2007-04-19 14:49:51 -0500 (Thu, 19 Apr 2007) New Revision: 2934 Modified: trunk/Lib/sandbox/numexpr/expressions.py trunk/Lib/sandbox/numexpr/interpreter.c Log: [numexpr] include arcsin, arccos, and arctan functions Modified: trunk/Lib/sandbox/numexpr/expressions.py =================================================================== --- trunk/Lib/sandbox/numexpr/expressions.py 2007-04-19 03:19:14 UTC (rev 2933) +++ trunk/Lib/sandbox/numexpr/expressions.py 2007-04-19 19:49:51 UTC (rev 2934) @@ -198,10 +198,13 @@ functions = { 'copy' : func(numpy.copy), 'ones_like' : func(numpy.ones_like), + 'sqrt' : func(numpy.sqrt, 'float'), 'sin' : func(numpy.sin, 'float'), 'cos' : func(numpy.cos, 'float'), 'tan' : func(numpy.tan, 'float'), - 'sqrt' : func(numpy.sqrt, 'float'), + 'arcsin' : func(numpy.arcsin, 'float'), + 'arccos' : func(numpy.arccos, 'float'), + 'arctan' : func(numpy.arctan, 'float'), 'sinh' : func(numpy.sinh, 'float'), 'cosh' : func(numpy.cosh, 'float'), Modified: trunk/Lib/sandbox/numexpr/interpreter.c =================================================================== --- trunk/Lib/sandbox/numexpr/interpreter.c 2007-04-19 03:19:14 UTC (rev 2933) +++ trunk/Lib/sandbox/numexpr/interpreter.c 2007-04-19 19:49:51 UTC (rev 2934) @@ -277,6 +277,9 @@ To add a function opcode, just copy OP_SIN or OP_ARCTAN2. + Some functions are repeated in this table that are opcodes, but there's + no problem with that as the compiler selects opcodes over functions, + and this makes it easier to compare opcode vs. function speeds. */ enum FuncFFCodes { @@ -296,6 +299,7 @@ typedef double (*FuncFFPtr)(double); +/* The order of this array must match the FuncFFCodes enum above */ FuncFFPtr functions_f[] = { sqrt, sin, @@ -339,6 +343,7 @@ typedef void (*FuncCCPtr)(cdouble*, cdouble*); +/* The order of this array must match the FuncCCCodes enum above */ FuncCCPtr functions_cc[] = { nc_sqrt, nc_sin, From scipy-svn at scipy.org Thu Apr 19 15:57:25 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 19 Apr 2007 14:57:25 -0500 (CDT) Subject: [Scipy-svn] r2935 - in trunk/Lib/sandbox/numexpr: . tests Message-ID: <20070419195725.6885639C053@new.scipy.org> Author: cookedm Date: 2007-04-19 14:57:23 -0500 (Thu, 19 Apr 2007) New Revision: 2935 Modified: trunk/Lib/sandbox/numexpr/expressions.py trunk/Lib/sandbox/numexpr/tests/test_numexpr.py Log: [numexpr] fix float/int comparision [ivilata] Modified: trunk/Lib/sandbox/numexpr/expressions.py =================================================================== --- trunk/Lib/sandbox/numexpr/expressions.py 2007-04-19 19:49:51 UTC (rev 2934) +++ trunk/Lib/sandbox/numexpr/expressions.py 2007-04-19 19:57:23 UTC (rev 2935) @@ -75,12 +75,15 @@ return type_to_kind[converter] def binop(opname, reversed=False, kind=None): + # Getting the named method from self (after reversal) does not + # always work (e.g. int constants do not have a __lt__ method). + opfunc = getattr(operator, "__%s__" % opname) @ophelper def operation(self, other): if reversed: self, other = other, self if allConstantNodes([self, other]): - return ConstantNode(getattr(self.value, "__%s__" % opname)(other.value)) + return ConstantNode(opfunc(self.value, other.value)) else: return OpNode(opname, (self, other), kind=kind) return operation Modified: trunk/Lib/sandbox/numexpr/tests/test_numexpr.py =================================================================== --- trunk/Lib/sandbox/numexpr/tests/test_numexpr.py 2007-04-19 19:49:51 UTC (rev 2934) +++ trunk/Lib/sandbox/numexpr/tests/test_numexpr.py 2007-04-19 19:57:23 UTC (rev 2935) @@ -186,6 +186,7 @@ 'sinh(a)', '2*a + (cos(3)+5)*sinh(cos(b))', '2*a + arctan2(a, b)', + 'arcsin(0.5)', 'where(a, 2, b)', 'where((a-10).real, a, 2)', 'cos(1+1)', @@ -210,6 +211,7 @@ cmptests.append("a/2+5 %s b" % op) cmptests.append("a/2+5 %s 7" % op) cmptests.append("7 %s b" % op) + cmptests.append("7.0 %s 5" % op) tests.append(('COMPARISONS', cmptests)) func1tests = [] From scipy-svn at scipy.org Tue Apr 24 15:39:27 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 24 Apr 2007 14:39:27 -0500 (CDT) Subject: [Scipy-svn] r2936 - in trunk/Lib/sandbox/timeseries: lib src Message-ID: <20070424193927.19E5F39C04E@new.scipy.org> Author: mattknox_ca Date: 2007-04-24 14:39:22 -0500 (Tue, 24 Apr 2007) New Revision: 2936 Modified: trunk/Lib/sandbox/timeseries/lib/moving_funcs.py trunk/Lib/sandbox/timeseries/src/cseries.c Log: added mov_corr and mov_covar functions. Also modified mov functions to work on 2D arrays Modified: trunk/Lib/sandbox/timeseries/lib/moving_funcs.py =================================================================== --- trunk/Lib/sandbox/timeseries/lib/moving_funcs.py 2007-04-19 19:57:23 UTC (rev 2935) +++ trunk/Lib/sandbox/timeseries/lib/moving_funcs.py 2007-04-24 19:39:22 UTC (rev 2936) @@ -26,7 +26,7 @@ __all__ = ['mov_sum', 'mov_median', 'mov_average', 'mov_mean', 'mov_average_expw', - 'mov_stddev', 'mov_var', 'mov_sample_stddev', 'mov_sample_var', + 'mov_stddev', 'mov_var', 'mov_covar', 'mov_corr', 'cmov_average', 'cmov_mean', 'cmov_window' ] @@ -40,160 +40,162 @@ data = orig_data.astype(rtype) data[:] = result_dict['array'] - return marray(data, mask=rmask, copy=True, subok=True) + return marray(data, mask=rmask, copy=False, subok=True) +def _moving_func(data, cfunc, kwargs): -def mov_sum(data, window_size, dtype=None): - kwargs = {'array':data, - 'window_size':window_size} + if data.ndim == 1: + kwargs['array'] = data - if dtype is not None: - kwargs['dtype'] = dtype - - result_dict = MA_mov_sum(**kwargs) - return _process_result_dict(data, result_dict) + result_dict = cfunc(**kwargs) + return _process_result_dict(data, result_dict) -def mov_median(data, window_size, dtype=None): - kwargs = {'array':data, - 'window_size':window_size} + elif data.ndim == 2: + for i in range(data.shape[-1]): + kwargs['array'] = data[:,i] + result_dict = cfunc(**kwargs) + + if i == 0: + rtype = result_dict['array'].dtype + result = data.astype(rtype) + print data.dtype, result.dtype + + rmask = result_dict['mask'] - if dtype is not None: - kwargs['dtype'] = dtype - - result_dict = MA_mov_median(**kwargs) - return _process_result_dict(data, result_dict) + curr_col = marray(result_dict['array'], mask=rmask, copy=False) + result[:,i] = curr_col -def mov_average(data, window_size, dtype=None): - kwargs = {'array':data, - 'window_size':window_size} + return result + else: + raise ValueError, "Data should be at most 2D" + +#............................................................................... +def mov_sum(data, span, dtype=None): + """Calculates the moving sum of a series. + +:Parameters: + $$data$$ + $$span$$ + $$dtype$$""" + + kwargs = {'span':span} + if dtype is None: dtype = data.dtype + kwargs['dtype'] = dtype + + + return _moving_func(data, MA_mov_sum, kwargs) +#............................................................................... +def mov_median(data, span, dtype=None): + """Calculates the moving median of a series. + +:Parameters: + $$data$$ + $$span$$ + $$dtype$$""" + + kwargs = {'span':span} + if dtype is None: dtype = data.dtype + kwargs['dtype'] = dtype + + return _moving_func(data, MA_mov_median, kwargs) +#............................................................................... +def mov_average(data, span, dtype=None): + """Calculates the moving average of a series. + +:Parameters: + $$data$$ + $$span$$ + $$dtype$$""" + + kwargs = {'span':span} if dtype is not None: kwargs['dtype'] = dtype - - result_dict = MA_mov_average(**kwargs) - return _process_result_dict(data, result_dict) + + return _moving_func(data, MA_mov_average, kwargs) mov_mean = mov_average - -def _mov_var_stddev(data, window_size, is_variance, is_sample, dtype): +#............................................................................... +def _mov_var_stddev(data, span, is_variance, bias, dtype): "helper function for mov_var and mov_stddev functions" - kwargs = {'array':data, - 'window_size':window_size, + kwargs = {'span':span, 'is_variance':is_variance, - 'is_sample':is_sample} - + 'bias':bias} if dtype is not None: kwargs['dtype'] = dtype - - result_dict = MA_mov_stddev(**kwargs) - return _process_result_dict(data, result_dict) + return _moving_func(data, MA_mov_stddev, kwargs) +#............................................................................... +def mov_var(data, span, bias=False, dtype=None): + """Calculates the moving variance of a 1-D array. -def mov_var(data, window_size, dtype=None): - """Calculates the moving variance of a 1-D array. This is the population -variance. See "mov_sample_var" for moving sample variance. - :Parameters: - data : ndarray - Data as a valid (subclass of) ndarray or MaskedArray. In particular, - TimeSeries objects are valid here. - window_size : int - Time periods to use for each calculation. - dtype : numpy data type specification (*None*) - Behaves the same as the dtype parameter for the numpy.var function. - -:Return value: - The result is always a masked array (preserves subclass attributes). The - result at index i uses values from [i-window_size:i+1], and will be masked - for the first `window_size` values. The result will also be masked at i - if any of the input values in the slice [i-window_size:i+1] are masked.""" - + $$data$$ + $$span$$ + $$bias$$ + $$dtype$$""" - return _mov_var_stddev(data=data, window_size=window_size, - is_variance=1, is_sample=0, dtype=dtype) + return _mov_var_stddev(data=data, span=span, + is_variance=1, bias=int(bias), dtype=dtype) +#............................................................................... +def mov_stddev(data, span, bias=False, dtype=None): + """Calculates the moving standard deviation of a 1-D array. -def mov_stddev(data, window_size, dtype=None): - """Calculates the moving standard deviation of a 1-D array. This is the -population standard deviation. See "mov_sample_stddev" for moving sample standard -deviation. - :Parameters: - data : ndarray - Data as a valid (subclass of) ndarray or MaskedArray. In particular, - TimeSeries objects are valid here. - window_size : int - Time periods to use for each calculation. - dtype : numpy data type specification (*None*) - Behaves the same as the dtype parameter for the numpy.std function. - -:Return value: - The result is always a masked array (preserves subclass attributes). The - result at index i uses values from [i-window_size:i+1], and will be masked - for the first `window_size` values. The result will also be masked at i - if any of the input values in the slice [i-window_size:i+1] are masked.""" + $$data$$ + $$span$$ + $$bias$$ + $$dtype$$""" - return _mov_var_stddev(data=data, window_size=window_size, - is_variance=0, is_sample=0, dtype=dtype) + return _mov_var_stddev(data=data, span=span, + is_variance=0, bias=int(bias), dtype=dtype) +#............................................................................... +def mov_covar(x, y, span, bias=False, dtype=None): + """Calculates the moving covariance of two 1-D arrays. - -def mov_sample_var(data, window_size, dtype=None): - """Calculates the moving sample variance of a 1-D array. - :Parameters: - data : ndarray - Data as a valid (subclass of) ndarray or MaskedArray. In particular, - TimeSeries objects are valid here. - window_size : int - Time periods to use for each calculation. - dtype : numpy data type specification (*None*) - Behaves the same as the dtype parameter for the numpy.var function. - -:Return value: - The result is always a masked array (preserves subclass attributes). The - result at index i uses values from [i-window_size:i+1], and will be masked - for the first `window_size` values. The result will also be masked at i - if any of the input values in the slice [i-window_size:i+1] are masked.""" - + $$x$$ + $$y$$ + $$span$$ + $$bias$$ + $$dtype$$""" - return _mov_var_stddev(data=data, window_size=window_size, - is_variance=1, is_sample=1, dtype=dtype) + result = x - mov_average(x, span, dtype=dtype) + result = result * (y - mov_average(y, span, dtype=dtype)) + + if bias: denom = span + else: denom = span - 1 + + return result/denom +#............................................................................... +def mov_corr(x, y, span, dtype=None): + """Calculates the moving correlation of two 1-D arrays. -def mov_sample_stddev(data, window_size, dtype=None): - """Calculates the moving sample standard deviation of a 1-D array. - :Parameters: - data : ndarray - Data as a valid (subclass of) ndarray or MaskedArray. In particular, - TimeSeries objects are valid here. - window_size : int - Time periods to use for each calculation. - dtype : numpy data type specification (*None*) - Behaves the same as the dtype parameter for the numpy.std function. - -:Return value: - The result is always a masked array (preserves subclass attributes). The - result at index i uses values from [i-window_size:i+1], and will be masked - for the first `window_size` values. The result will also be masked at i - if any of the input values in the slice [i-window_size:i+1] are masked.""" - - return _mov_var_stddev(data=data, window_size=window_size, - is_variance=0, is_sample=1, dtype=dtype) + $$x$$ + $$y$$ + $$span$$ + $$dtype$$""" + result = mov_covar(x, y, span, bias=True, dtype=dtype) + result = result / mov_stddev(x, span, bias=True, dtype=dtype) + result = result / mov_stddev(y, span, bias=True, dtype=dtype) + + return result +#............................................................................... def mov_average_expw(data, span, tol=1e-6): """Calculates the exponentially weighted moving average of a series. :Parameters: - data : ndarray - Data as a valid (subclass of) ndarray or MaskedArray. In particular, - TimeSeries objects are valid here. + $$data$$ span : int Time periods. The smoothing factor is 2/(span + 1) tol : float, *[1e-6]* Tolerance for the definition of the mask. When data contains masked values, this parameter determinea what points in the result should be masked. Values in the result that would not be "significantly" impacted (as - determined by this parameter) by the masked values are left unmasked. -""" + determined by this parameter) by the masked values are left unmasked.""" + data = marray(data, copy=True, subok=True) ismasked = (data._mask is not nomask) data._mask = N.zeros(data.shape, bool_) @@ -211,40 +213,16 @@ data._mask[0] = True # return data - -""" -def weightmave(data, span): - data = marray(data, subok=True, copy=True) - data._mask = N.zeros(data.shape, bool_) - # Set the data - _data = data._data - tmp = N.empty_like(_data) - tmp[:span] = _data[:span] - s = 0 - for i in range(span, len(data)): - s += _data[i] - _data[i-span] - tmp[i] = span*_data[i] + tmp[i-1] - s - tmp *= 2./(span*(n+1)) - data._data.flat = tmp - # Set the mask - if data._mask is not nomask: - msk = data._mask.nonzero()[0].repeat(span).reshape(-1,span) - msk += range(span) - data._mask[msk.ravel()] = True - data._mask[:span] = True - return data -""" - #............................................................................... def cmov_window(data, span, window_type): - """Applies a centered moving window of type window_type and size span on the - data. + """Applies a centered moving window of type window_type and size span on the +data. + +Returns a (subclass of) MaskedArray. The k first and k last data are always +masked (with k=span//2). When data has a missing value at position i, the +result has missing values in the interval [i-k:i+k+1]. - Returns a (subclass of) MaskedArray. The k first and k last data are always - masked (with k=span//2). When data has a missing value at position i, - the result has missing values in the interval [i-k:i+k+1]. - :Parameters: data : ndarray Data to process. The array should be at most 2D. On 2D arrays, the window @@ -265,9 +243,8 @@ the needed parameters. If window_type is a floating point number, it is interpreted as the beta parameter of the kaiser window. -Note also that only boxcar has been thoroughly tested. - """ - # +Note also that only boxcar has been thoroughly tested.""" + data = marray(data, copy=True, subok=True) if data._mask is nomask: data._mask = N.zeros(data.shape, bool_) @@ -299,8 +276,55 @@ Data to process. The array should be at most 2D. On 2D arrays, the window is applied recursively on each column. span : integer - The width of the window. - """ + The width of the window.""" return cmov_window(data, span, 'boxcar') cmov_mean = cmov_average + +param_doc = {} +param_doc['data'] = \ +"""data : ndarray + Data must be an ndarray (or subclass). In particular, note that + TimeSeries objects are valid here.""" + +param_doc['x'] = \ +"""x : ndarray + First array to be included in the calculation. x must be an ndarray (or + subclass). In particular, note that TimeSeries objects are valid here.""" + +param_doc['y'] = \ +"""y : ndarray + Second array to be included in the calculation. y must be an ndarray (or + subclass). In particular, note that TimeSeries objects are valid here.""" + +param_doc['span'] = \ +"""span : int + Time periods to use for each calculation.""" + +param_doc['bias'] = \ +"""bias : boolean (*False*) + If False, Normalization is by (N-1) where N == span (unbiased + estimate). If True then normalization is by N.""" + +param_doc['dtype'] = \ +"""dtype : numpy data type specification (*None*) + dtype for the result""" + +mov_result_doc = \ +""" + +:Return value: + The result is always a masked array (preserves subclass attributes). The + result at index i uses values from [i-span:i+1], and will be masked for the + first `span` values. The result will also be masked at i if any of the + input values in the slice [i-span:i+1] are masked.""" + +_g = globals() + +# generate function doc strings +for fn in (x for x in __all__ if x[:4] == 'mov_' and x[4:] != 'mean'): + fdoc = _g[fn].func_doc + for prm, dc in param_doc.iteritems(): + fdoc = fdoc.replace('$$'+prm+'$$', dc) + fdoc += mov_result_doc + _g[fn].func_doc = fdoc Modified: trunk/Lib/sandbox/timeseries/src/cseries.c =================================================================== --- trunk/Lib/sandbox/timeseries/src/cseries.c 2007-04-19 19:57:23 UTC (rev 2935) +++ trunk/Lib/sandbox/timeseries/src/cseries.c 2007-04-24 19:39:22 UTC (rev 2936) @@ -2887,7 +2887,81 @@ return returnVal; } +static PyObject *NP_ADD, *NP_MULTIPLY, *NP_SUBTRACT, *NP_SQRT, + *NP_GREATER, *NP_GREATER_EQUAL; +static PyObject* +np_add(PyObject *left_val, PyObject *right_val) { + + PyObject *result; + + result = PyObject_CallFunction( + NP_ADD, "OO", + (PyArrayObject*)left_val, + right_val); + return result; +} + +static PyObject* +np_subtract(PyObject *left_val, PyObject *right_val) { + + PyObject *result; + + result = PyObject_CallFunction( + NP_SUBTRACT, "OO", + (PyArrayObject*)left_val, + right_val); + return result; +} + +static PyObject* +np_multiply(PyObject *left_val, PyObject *right_val) { + + PyObject *result; + + result = PyObject_CallFunction( + NP_MULTIPLY, "OO", + (PyArrayObject*)left_val, + right_val); + return result; +} + +static PyObject* +np_sqrt(PyObject *val) { + return PyObject_CallFunction(NP_SQRT, "(O)", val); +} + +static int np_greater(PyObject *left_val, PyObject *right_val) { + + PyObject *temp; + int result; + + temp = PyObject_CallFunction( + NP_GREATER, "OO", + (PyArrayObject*)left_val, + right_val); + + result = (int)PyInt_AsLong(temp); + Py_DECREF(temp); + return result; +} + +static int np_greater_equal(PyObject *left_val, PyObject *right_val) { + + PyObject *temp; + int result; + + temp = PyObject_CallFunction( + NP_GREATER_EQUAL, "OO", + (PyArrayObject*)left_val, + right_val); + + result = (int)PyInt_AsLong(temp); + Py_DECREF(temp); + return result; +} + + /* This function is directly copied from direct copy of function in */ /* Return typenumber from dtype2 unless it is NULL, then return NPY_DOUBLE if dtype1->type_num is integer or bool @@ -2913,10 +2987,11 @@ /* validates the standard arguments to moving functions and set the original mask, original ndarray, and mask for the result */ static PyObject * -check_mov_args(PyObject *orig_arrayobj, int window_size, int min_win_size, - PyArrayObject **orig_ndarray, PyArrayObject **result_mask) { +check_mov_args(PyObject *orig_arrayobj, int span, int min_win_size, + PyObject **orig_ndarray, PyObject **result_mask) { - PyArrayObject *orig_mask=NULL; + PyObject *orig_mask=NULL; + PyArrayObject **orig_ndarray_tmp, **result_mask_tmp; int *raw_result_mask; if (!PyArray_Check(orig_arrayobj)) { @@ -2928,44 +3003,49 @@ if (PyObject_HasAttrString(orig_arrayobj, "_mask")) { PyObject *tempMask = PyObject_GetAttrString(orig_arrayobj, "_mask"); if (PyArray_Check(tempMask)) { - orig_mask = (PyArrayObject*)PyArray_EnsureArray(tempMask); + orig_mask = PyArray_EnsureArray(tempMask); } else { Py_DECREF(tempMask); } } - *orig_ndarray = (PyArrayObject*)PyArray_EnsureArray(orig_arrayobj); + *orig_ndarray = PyArray_EnsureArray(orig_arrayobj); + orig_ndarray_tmp = (PyArrayObject**)orig_ndarray; - if ((*orig_ndarray)->nd != 1) { + if ((*orig_ndarray_tmp)->nd != 1) { PyErr_SetString(PyExc_ValueError, "array must be 1 dimensional"); return NULL; } - if (window_size < min_win_size) { + if (span < min_win_size) { char *error_str; error_str = malloc(60 * sizeof(char)); MEM_CHECK(error_str) sprintf(error_str, - "window_size must be greater than or equal to %i", + "span must be greater than or equal to %i", min_win_size); PyErr_SetString(PyExc_ValueError, error_str); free(error_str); return NULL; } - raw_result_mask = malloc((*orig_ndarray)->dimensions[0] * sizeof(int)); + raw_result_mask = malloc((*orig_ndarray_tmp)->dimensions[0] * sizeof(int)); MEM_CHECK(raw_result_mask) { + PyArrayObject *orig_mask_tmp; int i, valid_points=0, is_masked; - for (i=0; i<((*orig_ndarray)->dimensions[0]); i++) { + orig_mask_tmp = (PyArrayObject*)orig_mask; + for (i=0; i<((*orig_ndarray_tmp)->dimensions[0]); i++) { + is_masked=0; if (orig_mask != NULL) { PyObject *valMask; - valMask = PyArray_GETITEM(orig_mask, PyArray_GetPtr(orig_mask, &i)); + valMask = PyArray_GETITEM(orig_mask_tmp, + PyArray_GetPtr(orig_mask_tmp, &i)); is_masked = (int)PyInt_AsLong(valMask); Py_DECREF(valMask); } @@ -2973,29 +3053,26 @@ if (is_masked) { valid_points=0; } else { - if (valid_points < window_size) { valid_points += 1; } - if (valid_points < window_size) { is_masked = 1; } + if (valid_points < span) { valid_points += 1; } + if (valid_points < span) { is_masked = 1; } } raw_result_mask[i] = is_masked; } } - *result_mask = (PyArrayObject*)PyArray_SimpleNewFromData( - 1, (*orig_ndarray)->dimensions, - PyArray_INT32, raw_result_mask); + *result_mask = PyArray_SimpleNewFromData( + 1, (*orig_ndarray_tmp)->dimensions, + PyArray_INT32, raw_result_mask); MEM_CHECK(*result_mask) - (*result_mask)->flags = ((*result_mask)->flags) | NPY_OWNDATA; + result_mask_tmp = (PyArrayObject**)result_mask; + (*result_mask_tmp)->flags = ((*result_mask_tmp)->flags) | NPY_OWNDATA; } - -static PyObject *NP_ADD, *NP_MULTIPLY, *NP_SUBTRACT, *NP_SQRT, - *NP_GREATER, *NP_GREATER_EQUAL; - /* computation portion of moving sum. Appropriate mask is overlayed on top afterwards */ static PyObject* -calc_mov_sum(PyArrayObject *orig_ndarray, int window_size, int rtype) +calc_mov_sum(PyArrayObject *orig_ndarray, int span, int rtype) { PyArrayObject *result_ndarray=NULL; int i; @@ -3019,22 +3096,18 @@ PyObject *mov_sum_prevval; mov_sum_prevval= PyArray_GETITEM(result_ndarray, PyArray_GetPtr(result_ndarray, &prev_idx)); - mov_sum_val = PyObject_CallFunction(NP_ADD, "OO", (PyArrayObject*)val, - mov_sum_prevval); + mov_sum_val = np_add(val, mov_sum_prevval); Py_DECREF(mov_sum_prevval); ERR_CHECK(mov_sum_val) - if (i >= (window_size - 1)) { + if (i >= span) { PyObject *temp_val, *rem_val; - int rem_idx = i-window_size; + int rem_idx = i-span; temp_val = mov_sum_val; rem_val = PyArray_GETITEM(orig_ndarray, PyArray_GetPtr(orig_ndarray, &rem_idx)); - mov_sum_val = PyObject_CallFunction( - NP_SUBTRACT, - "OO", (PyArrayObject*)temp_val, - rem_val); + mov_sum_val = np_subtract(temp_val, rem_val); ERR_CHECK(mov_sum_val) Py_DECREF(temp_val); @@ -3042,7 +3115,9 @@ } } - PyArray_SETITEM(result_ndarray, PyArray_GetPtr(result_ndarray, &i), mov_sum_val); + PyArray_SETITEM(result_ndarray, + PyArray_GetPtr(result_ndarray, &i), + mov_sum_val); if (mov_sum_val != val) { Py_DECREF(val); } @@ -3057,31 +3132,33 @@ static PyObject * MaskedArray_mov_sum(PyObject *self, PyObject *args, PyObject *kwds) { - PyObject *orig_arrayobj=NULL, *result_dict=NULL; - PyArrayObject *orig_ndarray=NULL, *result_ndarray=NULL, *result_mask=NULL; + PyObject *orig_arrayobj=NULL, *orig_ndarray=NULL, + *result_ndarray=NULL, *result_mask=NULL, + *result_dict=NULL; PyArray_Descr *dtype=NULL; - int rtype, window_size; + int rtype, span; - static char *kwlist[] = {"array", "window_size", "dtype", NULL}; + static char *kwlist[] = {"array", "span", "dtype", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, - "Oi|O&:mov_sum(array, window_size, dtype)", kwlist, - &orig_arrayobj, &window_size, + "Oi|O&:mov_sum(array, span, dtype)", kwlist, + &orig_arrayobj, &span, PyArray_DescrConverter2, &dtype)) return NULL; - check_mov_args(orig_arrayobj, window_size, 1, + check_mov_args(orig_arrayobj, span, 1, &orig_ndarray, &result_mask); rtype = _CHKTYPENUM(dtype); - result_ndarray = (PyArrayObject*)calc_mov_sum(orig_ndarray, window_size, rtype); + result_ndarray = calc_mov_sum((PyArrayObject*)orig_ndarray, + span, rtype); ERR_CHECK(result_ndarray) result_dict = PyDict_New(); MEM_CHECK(result_dict) - PyDict_SetItemString(result_dict, "array", (PyObject*)result_ndarray); - PyDict_SetItemString(result_dict, "mask", (PyObject*)result_mask); + PyDict_SetItemString(result_dict, "array", result_ndarray); + PyDict_SetItemString(result_dict, "mask", result_mask); Py_DECREF(result_ndarray); Py_DECREF(result_mask); @@ -3092,117 +3169,49 @@ static PyObject * MaskedArray_mov_average(PyObject *self, PyObject *args, PyObject *kwds) { - PyObject *orig_arrayobj=NULL, *result_dict=NULL; - PyArrayObject *orig_ndarray=NULL, *result_ndarray=NULL, *result_mask=NULL, - *mov_sum=NULL; - PyObject *denom=NULL; - + PyObject *orig_arrayobj=NULL, *orig_ndarray=NULL, + *result_ndarray=NULL, *result_mask=NULL, + *result_dict=NULL, + *mov_sum=NULL, *denom=NULL; PyArray_Descr *dtype=NULL; - int rtype, window_size; + int rtype, span; - static char *kwlist[] = {"array", "window_size", "dtype", NULL}; + static char *kwlist[] = {"array", "span", "dtype", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, - "Oi|O&:mov_average(array, window_size, dtype)", kwlist, - &orig_arrayobj, &window_size, + "Oi|O&:mov_average(array, span, dtype)", kwlist, + &orig_arrayobj, &span, PyArray_DescrConverter2, &dtype)) return NULL; - check_mov_args(orig_arrayobj, window_size, 2, + check_mov_args(orig_arrayobj, span, 2, &orig_ndarray, &result_mask); - rtype = _get_type_num_double(orig_ndarray->descr, dtype); + rtype = _get_type_num_double(((PyArrayObject*)orig_ndarray)->descr, dtype); - mov_sum = (PyArrayObject*)calc_mov_sum(orig_ndarray, window_size, rtype); + mov_sum = calc_mov_sum((PyArrayObject*)orig_ndarray, span, rtype); ERR_CHECK(mov_sum) - denom = PyFloat_FromDouble(1.0/(double)(window_size)); + denom = PyFloat_FromDouble(1.0/(double)(span)); - result_ndarray = (PyArrayObject*)PyObject_CallFunction( - NP_MULTIPLY, - "OO", mov_sum, - denom); + result_ndarray = np_multiply(mov_sum, denom); ERR_CHECK(result_ndarray) + Py_DECREF(mov_sum); Py_DECREF(denom); result_dict = PyDict_New(); MEM_CHECK(result_dict) - PyDict_SetItemString(result_dict, "array", (PyObject*)result_ndarray); - PyDict_SetItemString(result_dict, "mask", (PyObject*)result_mask); + PyDict_SetItemString(result_dict, "array", result_ndarray); + PyDict_SetItemString(result_dict, "mask", result_mask); Py_DECREF(result_ndarray); Py_DECREF(result_mask); return result_dict; } -static PyObject* -np_add(PyObject *left_val, PyObject *right_val) { - PyObject *result; - - result = PyObject_CallFunction( - NP_ADD, "OO", - (PyArrayObject*)left_val, - right_val); - return result; -} - -static PyObject* -np_subtract(PyObject *left_val, PyObject *right_val) { - - PyObject *result; - - result = PyObject_CallFunction( - NP_SUBTRACT, "OO", - (PyArrayObject*)left_val, - right_val); - return result; -} - -static PyObject* -np_multiply(PyObject *left_val, PyObject *right_val) { - - PyObject *result; - - result = PyObject_CallFunction( - NP_MULTIPLY, "OO", - (PyArrayObject*)left_val, - right_val); - return result; -} - -static int np_greater(PyObject *left_val, PyObject *right_val) { - - PyObject *temp; - int result; - - temp = PyObject_CallFunction( - NP_GREATER, "OO", - (PyArrayObject*)left_val, - right_val); - - result = (int)PyInt_AsLong(temp); - Py_DECREF(temp); - return result; -} - -static int np_greater_equal(PyObject *left_val, PyObject *right_val) { - - PyObject *temp; - int result; - - temp = PyObject_CallFunction( - NP_GREATER_EQUAL, "OO", - (PyArrayObject*)left_val, - right_val); - - result = (int)PyInt_AsLong(temp); - Py_DECREF(temp); - return result; -} - /* computation portion of moving median. Appropriate mask is overlayed on top afterwards. @@ -3213,13 +3222,13 @@ (David Brahm) has granted me (and scipy) permission to use it under the BSD license. */ static PyObject* -calc_mov_median(PyArrayObject *orig_ndarray, int window_size, int rtype) +calc_mov_median(PyArrayObject *orig_ndarray, int span, int rtype) { PyArrayObject *result_ndarray=NULL; PyObject **result_array, **ref_array, **even_array=NULL; - PyObject *new_val, *old_val, *temp; + PyObject *new_val, *old_val; PyObject *temp_add, *one_half; - int a, i, k, R, arr_size, z, is_odd; + int a, i, k, R, arr_size, z; int *r; arr_size = orig_ndarray->dimensions[0]; @@ -3230,7 +3239,7 @@ rtype, 0); ERR_CHECK(result_ndarray) - if (arr_size >= window_size) { + if (arr_size >= span) { result_array = calloc(arr_size, sizeof(PyObject*)); MEM_CHECK(result_array) @@ -3246,35 +3255,33 @@ /* this array wll be used for keeping track of the "ranks" of the values in the current window */ - r = malloc(window_size * sizeof(int)); + r = malloc(span * sizeof(int)); MEM_CHECK(r) - for (i=0; i < window_size; i++) { + for (i=0; i < span; i++) { r[i] = 1; } - if ((window_size % 2) == 0) { - // array to store two median values when window_size is an even # + if ((span % 2) == 0) { + // array to store two median values when span is an even # even_array = calloc(2, sizeof(PyObject*)); MEM_CHECK(even_array) } - R = (window_size + 1)/2; + R = (span + 1)/2; one_half = PyFloat_FromDouble(0.5); - z = arr_size - window_size; + z = arr_size - span; - //printf("yep 1: %f, %f\n", PyFloat_AsDouble(data_array[z+i]), PyFloat_AsDouble(data_array[z+k])); - /* Calculate initial ranks "r" */ - for (i=0; i < window_size; i++) { + for (i=0; i < span; i++) { for (k=0; k < i; k++) { if (np_greater_equal(ref_array[z+i], ref_array[z+k])) { r[i]++; } } - for (k=i+1; k < window_size; k++) { + for (k=i+1; k < span; k++) { if (np_greater(ref_array[z+i], ref_array[z+k])) { r[i]++; } @@ -3300,13 +3307,13 @@ Py_DECREF(temp_add); } - for (i=arr_size-2; i >= window_size-1; i--) { - a = window_size; - z = i - window_size + 1; + for (i=arr_size-2; i >= span-1; i--) { + a = span; + z = i - span + 1; old_val = ref_array[i+1]; - new_val = ref_array[i-window_size+1]; + new_val = ref_array[i-span+1]; - for (k=window_size-1; k > 0; k--) { + for (k=span-1; k > 0; k--) { r[k] = r[k-1]; /* Shift previous iteration's ranks */ if (np_greater_equal(ref_array[z+k], new_val)) {r[k]++; a--;} if (np_greater(ref_array[z+k], old_val)) {r[k]--;} @@ -3352,7 +3359,7 @@ Py_DECREF(one_half); - for (i=window_size-1; idescr, dtype); + rtype = _get_type_num_double(((PyArrayObject*)orig_ndarray)->descr, dtype); - mov_sum = (PyArrayObject*)calc_mov_sum(orig_ndarray, window_size, rtype); + mov_sum = calc_mov_sum((PyArrayObject*)orig_ndarray, span, rtype); ERR_CHECK(mov_sum) - result_temp1 = (PyArrayObject*)PyObject_CallFunction( - NP_MULTIPLY, "OO", - orig_ndarray, (PyObject*)orig_ndarray); + result_temp1 = np_multiply(orig_ndarray, orig_ndarray); ERR_CHECK(result_temp1) - mov_sum_sq = (PyArrayObject*)calc_mov_sum(result_temp1, window_size, rtype); - + mov_sum_sq = calc_mov_sum((PyArrayObject*)result_temp1, span, rtype); Py_DECREF(result_temp1); ERR_CHECK(mov_sum_sq) @@ -3457,40 +3464,29 @@ formulas from: http://en.wikipedia.org/wiki/Standard_deviation#Rapid_calculation_methods */ - if (is_sample) { - denom1 = PyFloat_FromDouble(1.0/(double)(window_size-1)); - denom2 = PyFloat_FromDouble(1.0/(double)(window_size*(window_size-1))); + if (bias == 0) { + denom1 = PyFloat_FromDouble(1.0/(double)(span-1)); + denom2 = PyFloat_FromDouble(1.0/(double)(span*(span-1))); } else { - denom1 = PyFloat_FromDouble(1.0/(double)window_size); - denom2 = PyFloat_FromDouble(1.0/(double)(window_size*window_size)); + denom1 = PyFloat_FromDouble(1.0/(double)span); + denom2 = PyFloat_FromDouble(1.0/(double)(span*span)); } - result_temp1 = (PyArrayObject*)PyObject_CallFunction( - NP_MULTIPLY, - "OO", mov_sum_sq, - denom1); + result_temp1 = np_multiply(mov_sum_sq, denom1); ERR_CHECK(result_temp1) Py_DECREF(mov_sum_sq); Py_DECREF(denom1); - result_temp3 = (PyArrayObject*)PyObject_CallFunction( - NP_MULTIPLY, - "OO", mov_sum, - (PyObject*)mov_sum); + result_temp3 = np_multiply(mov_sum, mov_sum); ERR_CHECK(result_temp3) Py_DECREF(mov_sum); - result_temp2 = (PyArrayObject*)PyObject_CallFunction( - NP_MULTIPLY, - "OO", result_temp3, - denom2); + + result_temp2 = np_multiply(result_temp3, denom2); ERR_CHECK(result_temp2) Py_DECREF(result_temp3); Py_DECREF(denom2); - result_temp3 = (PyArrayObject*)PyObject_CallFunction( - NP_SUBTRACT, - "OO", result_temp1, - (PyObject*)result_temp2); + result_temp3 = np_subtract(result_temp1, result_temp2); ERR_CHECK(result_temp3) Py_DECREF(result_temp1); Py_DECREF(result_temp2); @@ -3498,8 +3494,7 @@ if (is_variance) { result_ndarray = result_temp3; } else { - result_temp1 = (PyArrayObject*)PyObject_CallFunction( - NP_SQRT, "(O)", result_temp3); + result_temp1 = np_sqrt(result_temp3); ERR_CHECK(result_temp1) Py_DECREF(result_temp3); result_ndarray = result_temp1; @@ -3507,14 +3502,15 @@ result_dict = PyDict_New(); MEM_CHECK(result_dict) - PyDict_SetItemString(result_dict, "array", (PyObject*)result_ndarray); - PyDict_SetItemString(result_dict, "mask", (PyObject*)result_mask); + PyDict_SetItemString(result_dict, "array", result_ndarray); + PyDict_SetItemString(result_dict, "mask", result_mask); Py_DECREF(result_ndarray); Py_DECREF(result_mask); return result_dict; } + static char DateArray_asfreq_doc[] = ""; static PyObject * DateArray_asfreq(PyObject *self, PyObject *args) From scipy-svn at scipy.org Tue Apr 24 18:02:10 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 24 Apr 2007 17:02:10 -0500 (CDT) Subject: [Scipy-svn] r2937 - trunk/Lib/stats Message-ID: <20070424220210.8A3E339C053@new.scipy.org> Author: oliphant Date: 2007-04-24 17:02:08 -0500 (Tue, 24 Apr 2007) New Revision: 2937 Modified: trunk/Lib/stats/stats.py Log: Use Pearson's definition of kurtosis in kurtosis-test. Modified: trunk/Lib/stats/stats.py =================================================================== --- trunk/Lib/stats/stats.py 2007-04-24 19:39:22 UTC (rev 2936) +++ trunk/Lib/stats/stats.py 2007-04-24 22:02:08 UTC (rev 2937) @@ -828,7 +828,7 @@ warnings.warn( "kurtosistest only valid for n>=20 ... continuing anyway, n=%i" % int(n)) - b2 = kurtosis(a, axis) + b2 = kurtosis(a, axis, fisher=False) E = 3.0*(n-1) /(n+1) varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5)) x = (b2-E)/np.sqrt(varb2) @@ -848,8 +848,6 @@ # fixme: find reference """Tests whether skew and/or kurtosis of dataset differs from normal curve. - This is the omnibus test of D'Agostino and Pearson, 1973 - Parameters ---------- a : array From scipy-svn at scipy.org Tue Apr 24 18:16:14 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 24 Apr 2007 17:16:14 -0500 (CDT) Subject: [Scipy-svn] r2938 - trunk/Lib/stats Message-ID: <20070424221614.4047539C053@new.scipy.org> Author: oliphant Date: 2007-04-24 17:16:11 -0500 (Tue, 24 Apr 2007) New Revision: 2938 Modified: trunk/Lib/stats/stats.py Log: Use math.sqrt on floats which is faster. Add references to normaltest. Modified: trunk/Lib/stats/stats.py =================================================================== --- trunk/Lib/stats/stats.py 2007-04-24 22:02:08 UTC (rev 2937) +++ trunk/Lib/stats/stats.py 2007-04-24 22:16:11 UTC (rev 2938) @@ -184,6 +184,7 @@ # Standard library imports. import warnings +import math # Scipy imports. from numpy import array, asarray, dot, ma, zeros, sum @@ -795,11 +796,11 @@ warnings.warn( "skewtest only valid for n>=8 ... continuing anyway, n=%i" % int(n)) - y = b2 * np.sqrt(((n+1)*(n+3)) / (6.0*(n-2)) ) + y = b2 * math.sqrt(((n+1)*(n+3)) / (6.0*(n-2)) ) beta2 = ( 3.0*(n*n+27*n-70)*(n+1)*(n+3) ) / ( (n-2.0)*(n+5)*(n+7)*(n+9) ) - W2 = -1 + np.sqrt(2*(beta2-1)) - delta = 1/np.sqrt(np.log(np.sqrt(W2))) - alpha = np.sqrt(2.0/(W2-1)) + W2 = -1 + math.sqrt(2*(beta2-1)) + delta = 1/math.sqrt(0.5*math.log(W2)) + alpha = math.sqrt(2.0/(W2-1)) y = np.where(y==0, 1, y) Z = delta*np.log(y/alpha + np.sqrt((y/alpha)**2+1)) return Z, (1.0 - zprob(Z))*2 @@ -845,7 +846,6 @@ def normaltest(a, axis=0): - # fixme: find reference """Tests whether skew and/or kurtosis of dataset differs from normal curve. Parameters @@ -857,6 +857,16 @@ ------- (Chi^2 score, 2-tail probability) + + Based on the D'Agostino and Pearson's test that combines skew and + kurtosis to produce an omnibus test of normality. + + D'Agostino, R. B. and Pearson, E. S. (1971), "An Omnibus Test of Normality for + Moderate and Large Sample Size," Biometrika, 58, 341-348 + + D'Agostino, R. B. and Pearson, E. S. (1973), "Testing for departures from + Normality," Biometrika, 60, 613-622 + """ a, axis = _chk_asarray(a, axis) s,p = skewtest(a,axis) From scipy-svn at scipy.org Wed Apr 25 18:51:40 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 25 Apr 2007 17:51:40 -0500 (CDT) Subject: [Scipy-svn] r2939 - in trunk/Lib/misc: . tests Message-ID: <20070425225140.1C03C39C0FC@new.scipy.org> Author: stefan Date: 2007-04-25 17:51:23 -0500 (Wed, 25 Apr 2007) New Revision: 2939 Modified: trunk/Lib/misc/pilutil.py trunk/Lib/misc/tests/test_pilutil.py Log: Fix dtype comparison. Modified: trunk/Lib/misc/pilutil.py =================================================================== --- trunk/Lib/misc/pilutil.py 2007-04-24 22:16:11 UTC (rev 2938) +++ trunk/Lib/misc/pilutil.py 2007-04-25 22:51:23 UTC (rev 2939) @@ -16,13 +16,11 @@ # Returns a byte-scaled image def bytescale(data, cmin=None, cmax=None, high=255, low=0): - if data.dtype is uint8: + if data.dtype == uint8: return data high = high - low - if cmin is None: - cmin = amin(ravel(data)) - if cmax is None: - cmax = amax(ravel(data)) + if cmin is None: cmin = data.min() + if cmax is None: cmax = data.max() scale = high *1.0 / (cmax-cmin or 1) bytedata = ((data*1.0-cmin)*scale + 0.4999).astype(uint8) return bytedata + cast[uint8](low) Modified: trunk/Lib/misc/tests/test_pilutil.py =================================================================== --- trunk/Lib/misc/tests/test_pilutil.py 2007-04-24 22:16:11 UTC (rev 2938) +++ trunk/Lib/misc/tests/test_pilutil.py 2007-04-25 22:51:23 UTC (rev 2939) @@ -12,5 +12,11 @@ im1 = pilutil.imresize(im,T(1.1)) assert_equal(im1.shape,(11,22)) + def check_bytescale(self): + x = N.array([0,1,2],N.uint8) + y = N.array([0,1,2]) + assert_equal(pilutil.bytescale(x),x) + assert_equal(pilutil.bytescale(y),[0,127,255]) + if __name__ == "__main__": NumpyTest().run() From scipy-svn at scipy.org Thu Apr 26 03:59:31 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 26 Apr 2007 02:59:31 -0500 (CDT) Subject: [Scipy-svn] r2940 - in trunk/Lib/cluster: . tests Message-ID: <20070426075931.98C4239C03A@new.scipy.org> Author: cdavid Date: 2007-04-26 02:59:02 -0500 (Thu, 26 Apr 2007) New Revision: 2940 Added: trunk/Lib/cluster/tests/data.txt Modified: trunk/Lib/cluster/tests/test_vq.py trunk/Lib/cluster/vq.py Log: Replace (python) sum by numpy sum (solves #275), and clean up the docstrings Added: trunk/Lib/cluster/tests/data.txt =================================================================== --- trunk/Lib/cluster/tests/data.txt 2007-04-25 22:51:23 UTC (rev 2939) +++ trunk/Lib/cluster/tests/data.txt 2007-04-26 07:59:02 UTC (rev 2940) @@ -0,0 +1 @@ +-2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68, -2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45, 2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28, -4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07, -0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29, -2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25, 2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21, -2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67, -2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94, -2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33, 2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8, -1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29, 2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75, -1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17, 0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44, -0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83, 0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28, 3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62, -1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35, 3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84, -2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75, -0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86, -2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83, 0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75, -2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03, 3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0, 3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99, -1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21, 1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75, 4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37, -3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0, -1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84, 2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69, -2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51, -0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71, -2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61, 2.11 \ No newline at end of file Modified: trunk/Lib/cluster/tests/test_vq.py =================================================================== --- trunk/Lib/cluster/tests/test_vq.py 2007-04-25 22:51:23 UTC (rev 2939) +++ trunk/Lib/cluster/tests/test_vq.py 2007-04-26 07:59:02 UTC (rev 2940) @@ -1,7 +1,7 @@ #! /usr/bin/env python # David Cournapeau -# Last Change: Mon Oct 23 04:00 PM 2006 J +# Last Change: Thu Apr 26 04:00 PM 2007 J # For now, just copy the tests from sandbox.pyem, so we can check that # kmeans works OK for trivial examples. @@ -12,7 +12,7 @@ import numpy as N set_package_path() -from cluster.vq import kmeans +from cluster.vq import kmeans, kmeans_, py_vq, py_vq2 restore_path() # #Optional: @@ -25,21 +25,55 @@ [9, 2], [5, 1], [6, 2], [9, 4], [5, 2], [5, 4], [7, 4], [6, 5]]) -codet1 = N.array([[3.0000, 3.0000], +CODET1 = N.array([[3.0000, 3.0000], [6.2000, 4.0000], [5.8000, 1.8000]]) -codet2 = N.array([[11.0/3, 8.0/3], +CODET2 = N.array([[11.0/3, 8.0/3], [6.7500, 4.2500], [6.2500, 1.7500]]) +LABEL1 = N.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1]) + +class test_vq(NumpyTestCase): + def check_py_vq(self, level=1): + initc = N.concatenate(([[X[0]], [X[1]], [X[2]]])) + code = initc.copy() + label1 = py_vq(X, initc)[0] + assert_array_equal(label1, LABEL1) + + def check_py_vq2(self, level=1): + initc = N.concatenate(([[X[0]], [X[1]], [X[2]]])) + code = initc.copy() + label1 = py_vq2(X, initc)[0] + assert_array_equal(label1, LABEL1) + + def check_vq(self, level=1): + initc = N.concatenate(([[X[0]], [X[1]], [X[2]]])) + code = initc.copy() + try: + import _vq + label1 = _vq.double_vq(X, initc)[0] + assert_array_equal(label1, LABEL1) + except ImportError: + print "== Error while importing _vq, not testing C imp of vq ==" + class test_kmean(NumpyTestCase): def check_kmeans(self, level=1): initc = N.concatenate(([[X[0]], [X[1]], [X[2]]])) code = initc.copy() - code1 = kmeans(X, code)[0] + #code1 = kmeans(X, code, iter = 1)[0] - assert_array_almost_equal(code1, codet2) + #assert_array_almost_equal(code1, CODET2) + def check_kmeans_lost_cluster(self, level=1): + """This will cause kmean to have a cluster with no points.""" + data = N.fromfile(open("data.txt"), sep = ", ") + data = data.reshape((200, 2)) + initk = N.array([[-1.8127404, -0.67128041], [ 2.04621601, 0.07401111], + [-2.31149087,-0.05160469]]) + + res = kmeans(data, initk) + if __name__ == "__main__": NumpyTest().run() Modified: trunk/Lib/cluster/vq.py =================================================================== --- trunk/Lib/cluster/vq.py 2007-04-25 22:51:23 UTC (rev 2939) +++ trunk/Lib/cluster/vq.py 2007-04-26 07:59:02 UTC (rev 2940) @@ -18,9 +18,10 @@ __all__ = ['whiten', 'vq', 'kmeans'] from numpy.random import randint -from numpy import shape, zeros, subtract, sqrt, argmin, minimum, array, \ +from numpy import shape, zeros, sqrt, argmin, minimum, array, \ newaxis, arange, compress, equal, common_type, single, double, take, \ std, mean +import numpy as N def whiten(obs): """ Normalize a group of observations on a per feature basis @@ -65,10 +66,10 @@ [ 1.43684242, 0.57469577, 5.88897275]]) """ - std_dev = std(obs,axis=0) + std_dev = std(obs, axis=0) return obs / std_dev -def vq(obs,code_book): +def vq(obs, code_book): """ Vector Quantization: assign features sets to codes in a code book. Description: @@ -129,61 +130,101 @@ c_obs = obs.astype(ct) c_code_book = code_book.astype(ct) if ct is single: - results = _vq.float_vq(c_obs,c_code_book) + results = _vq.float_vq(c_obs, c_code_book) elif ct is double: - results = _vq.double_vq(c_obs,c_code_book) + results = _vq.double_vq(c_obs, c_code_book) else: - results = py_vq(obs,code_book) + results = py_vq(obs, code_book) except ImportError: - results = py_vq(obs,code_book) + results = py_vq(obs, code_book) return results -def py_vq(obs,code_book): +def py_vq(obs, code_book): """ Python version of vq algorithm. + The algorithm simply computes the euclidian distance between each + observation and every frame in the code_book/ + + :Parameters: + obs : ndarray + Expect a rank 2 array. Each row is one observation. + code_book : ndarray + Code book to use. Same format than obs. Should have same number of + features (eg columns) than obs. + + :Note: This function is slower than the C versions, but it works for all input types. If the inputs have the wrong types for the C versions of the function, this one is called as a last resort. Its about 20 times slower than the C versions. + + :Returns: + code : ndarray + code[i] gives the label of the ith obversation, that its code is + code_book[code[i]]. + mind_dist : ndarray + min_dist[i] gives the distance between the ith observation and its + corresponding code. """ - No,Nf = shape(obs) #No = observation count, Nf = feature count + # n = number of observations + # d = number of features + (n, d) = shape(obs) + # code books and observations should have same number of features - assert(Nf == code_book.shape[1]) - code = [];min_dist = [] - #create a memory block to use for the difference calculations - diff = zeros(shape(code_book), common_type(obs,code_book)) - for o in obs: - subtract(code_book,o,diff) # faster version of --> diff = code_book - o - dist = sqrt(sum(diff*diff,-1)) - code.append(argmin(dist,axis=-1)) - #something weird here dst does not work reliably because it sometime - #returns an array of goofy length. Try except fixes it, but is ugly. - dst = minimum.reduce(dist,0) - try: dst = dst[0] - except: pass - min_dist.append(dst) - return array(code,dtype=int), array(min_dist) + if not d == code_book.shape[1]: + raise ValueError(""" + code book(%d) and obs(%d) should have the same + number of features (eg columns)""" % (code_book.shape[1], d)) + + code = zeros(n, dtype = int) + min_dist = zeros(n) + for i in range(n): + dist = N.sum((obs[i] - code_book) ** 2, 1) + code[i] = argmin(dist) + min_dist[i] = dist[code[i]] -def py_vq2(obs,code_book): - """ This could be faster when number of codebooks is small, but it becomes - a real memory hog when codebook is large. It requires NxMxO storage - where N=number of obs, M = number of features, and O = number of - codes. + return code, sqrt(min_dist) + +def py_vq2(obs, code_book): + """2nd Python version of vq algorithm. + + The algorithm simply computes the euclidian distance between each + observation and every frame in the code_book/ + + :Parameters: + obs : ndarray + Expect a rank 2 array. Each row is one observation. + code_book : ndarray + Code book to use. Same format than obs. Should have same number of + features (eg columns) than obs. + + :Note: + This could be faster when number of codebooks is small, but it becomes + a real memory hog when codebook is large. It requires NxMxO storage + where N=number of obs, M = number of features, and O = number of codes. + + :Returns: + code : ndarray + code[i] gives the label of the ith obversation, that its code is + code_book[code[i]]. + mind_dist : ndarray + min_dist[i] gives the distance between the ith observation and its + corresponding code. """ - No,Nf = shape(obs) #No = observation count, Nf = feature count + No, Nf = shape(obs) #No = observation count, Nf = feature count # code books and observations should have same number of features assert(Nf == code_book.shape[1]) diff = obs[newaxis,:,:]-code_book[:,newaxis,:] - dist = sqrt(sum(diff*diff,-1)) - code = argmin(dist,0) - min_dist = minimum.reduce(dist,0) #the next line I think is equivalent + dist = sqrt(N.sum(diff*diff, -1)) + code = argmin(dist, 0) + min_dist = minimum.reduce(dist, 0) #the next line I think is equivalent # - and should be faster #min_dist = choose(code,dist) # but in practice, didn't seem to make # much difference. return code, min_dist -def kmeans_(obs,guess,thresh=1e-5): +def kmeans_(obs, guess, thresh=1e-5): """ See kmeans Outputs @@ -192,6 +233,9 @@ avg_dist -- the average distance a observation is from a code in the book. Lower means the code_book matches the data better. + + XXX should have an axis variable here. + Test Note: not whitened in this example. @@ -233,7 +277,7 @@ #print avg_dist return code_book, avg_dist[-1] -def kmeans(obs,k_or_guess,iter=20,thresh=1e-5): +def kmeans(obs, k_or_guess, iter=20, thresh=1e-5): """ Generate a code book with minimum distortion Description From scipy-svn at scipy.org Thu Apr 26 04:35:41 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 26 Apr 2007 03:35:41 -0500 (CDT) Subject: [Scipy-svn] r2941 - trunk/Lib/cluster Message-ID: <20070426083541.5FFD439C0FD@new.scipy.org> Author: cdavid Date: 2007-04-26 03:34:53 -0500 (Thu, 26 Apr 2007) New Revision: 2941 Modified: trunk/Lib/cluster/__init__.py Log: Add missing test definition in scipy.cluster Modified: trunk/Lib/cluster/__init__.py =================================================================== --- trunk/Lib/cluster/__init__.py 2007-04-26 07:59:02 UTC (rev 2940) +++ trunk/Lib/cluster/__init__.py 2007-04-26 08:34:53 UTC (rev 2941) @@ -7,3 +7,5 @@ __all__ = ['vq'] import vq +from numpy.testing import NumpyTest +test = NumpyTest().test From scipy-svn at scipy.org Thu Apr 26 04:56:21 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 26 Apr 2007 03:56:21 -0500 (CDT) Subject: [Scipy-svn] r2942 - trunk/Lib/cluster/tests Message-ID: <20070426085621.3EAA339C044@new.scipy.org> Author: cdavid Date: 2007-04-26 03:56:17 -0500 (Thu, 26 Apr 2007) New Revision: 2942 Modified: trunk/Lib/cluster/tests/test_vq.py Log: Set correctly filepath for test data in scipy.clusters Modified: trunk/Lib/cluster/tests/test_vq.py =================================================================== --- trunk/Lib/cluster/tests/test_vq.py 2007-04-26 08:34:53 UTC (rev 2941) +++ trunk/Lib/cluster/tests/test_vq.py 2007-04-26 08:56:17 UTC (rev 2942) @@ -1,7 +1,7 @@ #! /usr/bin/env python # David Cournapeau -# Last Change: Thu Apr 26 04:00 PM 2007 J +# Last Change: Thu Apr 26 05:00 PM 2007 J # For now, just copy the tests from sandbox.pyem, so we can check that # kmeans works OK for trivial examples. @@ -15,10 +15,12 @@ from cluster.vq import kmeans, kmeans_, py_vq, py_vq2 restore_path() -# #Optional: -# set_local_path() -# # import modules that are located in the same directory as this file. -# restore_path() +#Optional: +set_local_path() +# import modules that are located in the same directory as this file. +import os.path +DATAFILE1 = os.path.join(sys.path[0], "data.txt") +restore_path() # Global data X = N.array([[3.0, 3], [4, 3], [4, 2], @@ -68,7 +70,7 @@ def check_kmeans_lost_cluster(self, level=1): """This will cause kmean to have a cluster with no points.""" - data = N.fromfile(open("data.txt"), sep = ", ") + data = N.fromfile(open(DATAFILE1), sep = ", ") data = data.reshape((200, 2)) initk = N.array([[-1.8127404, -0.67128041], [ 2.04621601, 0.07401111], [-2.31149087,-0.05160469]]) From scipy-svn at scipy.org Thu Apr 26 06:01:41 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 26 Apr 2007 05:01:41 -0500 (CDT) Subject: [Scipy-svn] r2943 - trunk/Lib/cluster Message-ID: <20070426100141.0B88539C07A@new.scipy.org> Author: cdavid Date: 2007-04-26 05:01:36 -0500 (Thu, 26 Apr 2007) New Revision: 2943 Modified: trunk/Lib/cluster/vq.py Log: Convert docstrings to new numpy format Modified: trunk/Lib/cluster/vq.py =================================================================== --- trunk/Lib/cluster/vq.py 2007-04-26 08:56:17 UTC (rev 2942) +++ trunk/Lib/cluster/vq.py 2007-04-26 10:01:36 UTC (rev 2943) @@ -15,8 +15,11 @@ Train a codebook for mimimum distortion using the kmeans algorithm """ +__docformat__ = 'restructuredtext' + __all__ = ['whiten', 'vq', 'kmeans'] + from numpy.random import randint from numpy import shape, zeros, sqrt, argmin, minimum, array, \ newaxis, arange, compress, equal, common_type, single, double, take, \ @@ -24,47 +27,44 @@ import numpy as N def whiten(obs): - """ Normalize a group of observations on a per feature basis + """ Normalize a group of observations on a per feature basis. - Description + Before running kmeans algorithms, it is beneficial to "whiten", or + scale, the observation data on a per feature basis. This is done + by dividing each feature by its standard deviation across all + observations. - Before running kmeans algorithms, it is beneficial to "whiten", or - scale, the observation data on a per feature basis. This is done - by dividing each feature by its standard deviation across all - observations. + :Parameters: + obs : ndarray + Each row of the array is an observation. The + columns are the "features" seen during each observation + :: - Arguments + # f0 f1 f2 + obs = [[ 1., 1., 1.], #o0 + [ 2., 2., 2.], #o1 + [ 3., 3., 3.], #o2 + [ 4., 4., 4.]]) #o3 - obs -- 2D array. - Each row of the array is an observation. The - columns are the "features" seen during each observation - - # f0 f1 f2 - obs = [[ 1., 1., 1.], #o0 - [ 2., 2., 2.], #o1 - [ 3., 3., 3.], #o2 - [ 4., 4., 4.]]) #o3 - XXX perhaps should have an axis variable here. - Outputs + :Returns: + result : ndarray + Contains the values in obs scaled by the standard devation + of each column. - result -- 2D array. - Contains the values in obs scaled by the standard devation - of each column. + Examples + -------- - Test - - >>> from numpy import array - >>> from scipy.cluster.vq import whiten - >>> features = array([[ 1.9,2.3,1.7], - ... [ 1.5,2.5,2.2], - ... [ 0.8,0.6,1.7,]]) - >>> whiten(features) - array([[ 3.41250074, 2.20300046, 5.88897275], - [ 2.69407953, 2.39456571, 7.62102355], - [ 1.43684242, 0.57469577, 5.88897275]]) - + >>> from numpy import array + >>> from scipy.cluster.vq import whiten + >>> features = array([[ 1.9,2.3,1.7], + ... [ 1.5,2.5,2.2], + ... [ 0.8,0.6,1.7,]]) + >>> whiten(features) + array([[ 3.41250074, 2.20300046, 5.88897275], + [ 2.69407953, 2.39456571, 7.62102355], + [ 1.43684242, 0.57469577, 5.88897275]]) """ std_dev = std(obs, axis=0) return obs / std_dev @@ -72,57 +72,56 @@ def vq(obs, code_book): """ Vector Quantization: assign features sets to codes in a code book. - Description: - Vector quantization determines which code in the code book best - represents an observation of a target. The features of each - observation are compared to each code in the book, and assigned - the one closest to it. The observations are contained in the obs - array. These features should be "whitened," or nomalized by the - standard deviation of all the features before being quantized. - The code book can be created using the kmeans algorithm or - something similar. + Vector quantization determines which code in the code book best represents + an observation of a target. The features of each observation are compared + to each code in the book, and assigned the one closest to it. The + observations are contained in the obs array. These features should be + "whitened," or nomalized by the standard deviation of all the features + before being quantized. The code book can be created using the kmeans + algorithm or something similar. - Note: - This currently forces 32 bit math precision for speed. Anyone know - of a situation where this undermines the accuracy of the algorithm? + :Parameters: + obs : ndarray + Each row of the array is an observation. The columns are the + "features" seen during each observation The features must be + whitened first using the whiten function or something equivalent. + code_book : ndarray. + The code book is usually generated using the kmeans algorithm. + Each row of the array holds a different code, and the columns are + the features of the code. + :: - Arguments: - obs -- 2D array. - Each row of the array is an observation. The - columns are the "features" seen during each observation - The features must be whitened first using the - whiten function or something equivalent. - code_book -- 2D array. - The code book is usually generated using the kmeans - algorithm. Each row of the array holds a different - code, and the columns are the features of the code. - # f0 f1 f2 f3 - code_book = [[ 1., 2., 3., 4.], #c0 - [ 1., 2., 3., 4.], #c1 - [ 1., 2., 3., 4.]]) #c2 - Outputs: - code -- 1D array. - If obs is a NxM array, then a length N array - is returned that holds the selected code book index for - each observation. - dist -- 1D array. - The distortion (distance) between the observation and - its nearest code - Reference + # f0 f1 f2 f3 + code_book = [[ 1., 2., 3., 4.], #c0 + [ 1., 2., 3., 4.], #c1 + [ 1., 2., 3., 4.]]) #c2 - Test + :Returns: + code : ndarray + If obs is a NxM array, then a length N array is returned that holds + the selected code book index for each observation. + dist : ndarray + The distortion (distance) between the observation and its nearest + code - >>> from numpy import array - >>> from scipy.cluster.vq import vq - >>> code_book = array([[1.,1.,1.], - ... [2.,2.,2.]]) - >>> features = array([[ 1.9,2.3,1.7], - ... [ 1.5,2.5,2.2], - ... [ 0.8,0.6,1.7]]) - >>> vq(features,code_book) - (array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239])) + Notes + ----- + This currently forces 32 bit math precision for speed. Anyone know + of a situation where this undermines the accuracy of the algorithm? + Examples + -------- + >>> from numpy import array + >>> from scipy.cluster.vq import vq + >>> code_book = array([[1.,1.,1.], + ... [2.,2.,2.]]) + >>> features = array([[ 1.9,2.3,1.7], + ... [ 1.5,2.5,2.2], + ... [ 0.8,0.6,1.7]]) + >>> vq(features,code_book) + (array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239])) + """ try: import _vq @@ -225,32 +224,36 @@ return code, min_dist def kmeans_(obs, guess, thresh=1e-5): - """ See kmeans + """ "raw" version of kmeans. - Outputs + :Returns: + code_book : + the lowest distortion codebook found. + avg_dist : + the average distance a observation is from a code in the book. + Lower means the code_book matches the data better. - code_book -- the lowest distortion codebook found. - avg_dist -- the average distance a observation is - from a code in the book. Lower means - the code_book matches the data better. + :SeeAlso: + - kmeans : wrapper around kmeans XXX should have an axis variable here. - Test + Examples + -------- - Note: not whitened in this example. + Note: not whitened in this example. - >>> from numpy import array - >>> from scipy.cluster.vq import kmeans_ - >>> features = array([[ 1.9,2.3], - ... [ 1.5,2.5], - ... [ 0.8,0.6], - ... [ 0.4,1.8], - ... [ 1.0,1.0]]) - >>> book = array((features[0],features[2])) - >>> kmeans_(features,book) - (array([[ 1.7 , 2.4 ], - [ 0.73333333, 1.13333333]]), 0.40563916697728591) + >>> from numpy import array + >>> from scipy.cluster.vq import kmeans_ + >>> features = array([[ 1.9,2.3], + ... [ 1.5,2.5], + ... [ 0.8,0.6], + ... [ 0.4,1.8], + ... [ 1.0,1.0]]) + >>> book = array((features[0],features[2])) + >>> kmeans_(features,book) + (array([[ 1.7 , 2.4 ], + [ 0.73333333, 1.13333333]]), 0.40563916697728591) """ @@ -278,67 +281,61 @@ return code_book, avg_dist[-1] def kmeans(obs, k_or_guess, iter=20, thresh=1e-5): - """ Generate a code book with minimum distortion + """ Generate a code book with minimum distortion. - Description - - Arguments - - obs -- 2D array - Each row of the array is an observation. The - columns are the "features" seen during each observation - The features must be whitened first using the - whiten function or something equivalent. - k_or_guess -- integer or 2D array. - If integer, it is the number of code book elements. - If a 2D array, the array is used as the intial guess for - the code book. The array should have k rows, and the - same number of columns (features) as the obs array. - iter -- integer. - The number of times to restart the kmeans algorithm with - a new initial guess. If k_or_guess is a 2D array (codebook), - this argument is ignored and only 1 iteration is run. - thresh -- float - Terminate each kmeans run when the distortion change from - one iteration to the next is less than this value. - Outputs - - codesbook -- 2D array. + :Parameters: + obs : ndarray + Each row of the array is an observation. The columns are the + "features" seen during each observation The features must be + whitened first using the whiten function or something equivalent. + k_or_guess : int or ndarray + If integer, it is the number of code book elements. If a 2D array, + the array is used as the intial guess for the code book. The array + should have k rows, and the same number of columns (features) as + the obs array. + iter : int + The number of times to restart the kmeans algorithm with a new + initial guess. If k_or_guess is a 2D array (codebook), this + argument is ignored and only 1 iteration is run. + thresh : float + Terminate each kmeans run when the distortion change from one + iteration to the next is less than this value. + :Returns: + codesbook : ndarray The codes that best fit the observation - distortion -- float + distortion : float The distortion between the observations and the codes. - Reference + Examples + -------- - Test + ("Not checked carefully for accuracy..." he said sheepishly) - ("Not checked carefully for accuracy..." he said sheepishly) + >>> from numpy import array + >>> from scipy.cluster.vq import vq, kmeans + >>> features = array([[ 1.9,2.3], + ... [ 1.5,2.5], + ... [ 0.8,0.6], + ... [ 0.4,1.8], + ... [ 0.1,0.1], + ... [ 0.2,1.8], + ... [ 2.0,0.5], + ... [ 0.3,1.5], + ... [ 1.0,1.0]]) + >>> whitened = whiten(features) + >>> book = array((whitened[0],whitened[2])) + >>> kmeans(whitened,book) + (array([[ 2.3110306 , 2.86287398], + [ 0.93218041, 1.24398691]]), 0.85684700941625547) - >>> from numpy import array - >>> from scipy.cluster.vq import vq, kmeans - >>> features = array([[ 1.9,2.3], - ... [ 1.5,2.5], - ... [ 0.8,0.6], - ... [ 0.4,1.8], - ... [ 0.1,0.1], - ... [ 0.2,1.8], - ... [ 2.0,0.5], - ... [ 0.3,1.5], - ... [ 1.0,1.0]]) - >>> whitened = whiten(features) - >>> book = array((whitened[0],whitened[2])) - >>> kmeans(whitened,book) - (array([[ 2.3110306 , 2.86287398], - [ 0.93218041, 1.24398691]]), 0.85684700941625547) + >>> import RandomArray + >>> RandomArray.seed(1000,2000) + >>> codes = 3 + >>> kmeans(whitened,codes) + (array([[ 2.3110306 , 2.86287398], + [ 1.32544402, 0.65607529], + [ 0.40782893, 2.02786907]]), 0.5196582527686241) - >>> import RandomArray - >>> RandomArray.seed(1000,2000) - >>> codes = 3 - >>> kmeans(whitened,codes) - (array([[ 2.3110306 , 2.86287398], - [ 1.32544402, 0.65607529], - [ 0.40782893, 2.02786907]]), 0.5196582527686241) - """ if int(iter) < 1: raise ValueError, 'iter must be >= to 1.' From scipy-svn at scipy.org Thu Apr 26 06:28:30 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 26 Apr 2007 05:28:30 -0500 (CDT) Subject: [Scipy-svn] r2944 - trunk/Lib/cluster Message-ID: <20070426102830.49F6339C20E@new.scipy.org> Author: cdavid Date: 2007-04-26 05:28:26 -0500 (Thu, 26 Apr 2007) New Revision: 2944 Modified: trunk/Lib/cluster/vq.py Log: Some minor cosmetic changes Modified: trunk/Lib/cluster/vq.py =================================================================== --- trunk/Lib/cluster/vq.py 2007-04-26 10:01:36 UTC (rev 2943) +++ trunk/Lib/cluster/vq.py 2007-04-26 10:28:26 UTC (rev 2944) @@ -211,11 +211,16 @@ min_dist[i] gives the distance between the ith observation and its corresponding code. """ - No, Nf = shape(obs) #No = observation count, Nf = feature count + d = shape(obs)[1] + # code books and observations should have same number of features - assert(Nf == code_book.shape[1]) - diff = obs[newaxis,:,:]-code_book[:,newaxis,:] - dist = sqrt(N.sum(diff*diff, -1)) + if not d == code_book.shape[1]: + raise ValueError(""" + code book(%d) and obs(%d) should have the same + number of features (eg columns)""" % (code_book.shape[1], d)) + + diff = obs[newaxis, :, :] - code_book[:, newaxis, :] + dist = sqrt(N.sum(diff * diff, -1)) code = argmin(dist, 0) min_dist = minimum.reduce(dist, 0) #the next line I think is equivalent # - and should be faster @@ -223,7 +228,7 @@ # much difference. return code, min_dist -def kmeans_(obs, guess, thresh=1e-5): +def _kmeans(obs, guess, thresh=1e-5): """ "raw" version of kmeans. :Returns: @@ -244,37 +249,37 @@ Note: not whitened in this example. >>> from numpy import array - >>> from scipy.cluster.vq import kmeans_ + >>> from scipy.cluster.vq import _kmeans >>> features = array([[ 1.9,2.3], ... [ 1.5,2.5], ... [ 0.8,0.6], ... [ 0.4,1.8], ... [ 1.0,1.0]]) >>> book = array((features[0],features[2])) - >>> kmeans_(features,book) + >>> _kmeans(features,book) (array([[ 1.7 , 2.4 ], [ 0.73333333, 1.13333333]]), 0.40563916697728591) """ - code_book = array(guess,copy=True) + code_book = array(guess, copy = True) Nc = code_book.shape[0] - avg_dist=[] + avg_dist = [] diff = thresh+1. - while diff>thresh: + while diff > thresh: #compute membership and distances between obs and code_book - obs_code, distort = vq(obs,code_book) - avg_dist.append(mean(distort,axis=-1)) + obs_code, distort = vq(obs, code_book) + avg_dist.append(mean(distort, axis=-1)) #recalc code_book as centroids of associated obs if(diff > thresh): has_members = [] for i in arange(Nc): - cell_members = compress(equal(obs_code,i),obs,0) + cell_members = compress(equal(obs_code, i), obs, 0) if cell_members.shape[0] > 0: - code_book[i] = mean(cell_members,0) + code_book[i] = mean(cell_members, 0) has_members.append(i) #remove code_books that didn't have any members - code_book = take(code_book,has_members,0) + code_book = take(code_book, has_members, 0) if len(avg_dist) > 1: diff = avg_dist[-2] - avg_dist[-1] #print avg_dist @@ -306,6 +311,10 @@ distortion : float The distortion between the observations and the codes. + :SeeAlso: + - kmeans2: similar function, but with more options for initialization, + and returns label of each observation + Examples -------- @@ -340,21 +349,20 @@ if int(iter) < 1: raise ValueError, 'iter must be >= to 1.' if type(k_or_guess) == type(array([])): - guess = k_or_guess - result = kmeans_(obs,guess,thresh=thresh) + guess = k_or_guess + result = _kmeans(obs, guess, thresh = thresh) else: - best_dist = 100000 #initialize best distance value to a large value + #initialize best distance value to a large value + best_dist = 100000 No = obs.shape[0] k = k_or_guess #print 'kmeans iter: ', for i in range(iter): - #print i, #the intial code book is randomly selected from observations - guess = take(obs,randint(0,No,k),0) - book,dist = kmeans_(obs,guess,thresh=thresh) + guess = take(obs, randint(0, No, k), 0) + book, dist = _kmeans(obs, guess, thresh = thresh) if dist < best_dist: best_book = book best_dist = dist - #print - result = best_book,best_dist + result = best_book, best_dist return result From scipy-svn at scipy.org Thu Apr 26 08:24:26 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 26 Apr 2007 07:24:26 -0500 (CDT) Subject: [Scipy-svn] r2945 - in trunk/Lib/cluster: . tests Message-ID: <20070426122426.87CA439C227@new.scipy.org> Author: cdavid Date: 2007-04-26 07:24:16 -0500 (Thu, 26 Apr 2007) New Revision: 2945 Modified: trunk/Lib/cluster/tests/test_vq.py trunk/Lib/cluster/vq.py Log: Add kmeans2, a more sophisticated kmeans implementation (different initialization methods available) Modified: trunk/Lib/cluster/tests/test_vq.py =================================================================== --- trunk/Lib/cluster/tests/test_vq.py 2007-04-26 10:28:26 UTC (rev 2944) +++ trunk/Lib/cluster/tests/test_vq.py 2007-04-26 12:24:16 UTC (rev 2945) @@ -1,7 +1,7 @@ #! /usr/bin/env python # David Cournapeau -# Last Change: Thu Apr 26 05:00 PM 2007 J +# Last Change: Thu Apr 26 09:00 PM 2007 J # For now, just copy the tests from sandbox.pyem, so we can check that # kmeans works OK for trivial examples. @@ -12,7 +12,7 @@ import numpy as N set_package_path() -from cluster.vq import kmeans, kmeans_, py_vq, py_vq2 +from cluster.vq import kmeans, kmeans2, py_vq, py_vq2, _py_vq_1d restore_path() #Optional: @@ -60,22 +60,60 @@ except ImportError: print "== Error while importing _vq, not testing C imp of vq ==" + #def check_vq_1d(self, level=1): + # data = X[:, 0] + # initc = data[:3] + # code = initc.copy() + # print _py_vq_1d(data, initc) + class test_kmean(NumpyTestCase): - def check_kmeans(self, level=1): + def check_kmeans_simple(self, level=1): initc = N.concatenate(([[X[0]], [X[1]], [X[2]]])) code = initc.copy() - #code1 = kmeans(X, code, iter = 1)[0] + code1 = kmeans(X, code, iter = 1)[0] - #assert_array_almost_equal(code1, CODET2) + assert_array_almost_equal(code1, CODET2) def check_kmeans_lost_cluster(self, level=1): """This will cause kmean to have a cluster with no points.""" data = N.fromfile(open(DATAFILE1), sep = ", ") data = data.reshape((200, 2)) - initk = N.array([[-1.8127404, -0.67128041], [ 2.04621601, 0.07401111], + initk = N.array([[-1.8127404, -0.67128041], + [ 2.04621601, 0.07401111], [-2.31149087,-0.05160469]]) res = kmeans(data, initk) + def check_kmeans2_simple(self, level=1): + """Testing simple call to kmeans2 and its results.""" + initc = N.concatenate(([[X[0]], [X[1]], [X[2]]])) + code = initc.copy() + code1 = kmeans2(X, code, niter = 1)[0] + code2 = kmeans2(X, code, niter = 2)[0] + + assert_array_almost_equal(code1, CODET1) + assert_array_almost_equal(code2, CODET2) + + #def check_kmeans2_rank1(self, level=1): + # """Testing simple call to kmeans2 with rank 1 data.""" + # data = N.fromfile(open(DATAFILE1), sep = ", ") + # data = data.reshape((200, 2)) + # data1 = data[:, 0] + # data2 = data[:, 1] + + # initc = data1[:3] + # code = initc.copy() + # print _py_vq_1d(data1, code) + # code1 = kmeans2(data1, code, niter = 1)[0] + # code2 = kmeans2(data1, code, niter = 2)[0] + + def check_kmeans2_init(self, level = 1): + """Testing that kmeans2 init methods work.""" + data = N.fromfile(open(DATAFILE1), sep = ", ") + data = data.reshape((200, 2)) + + kmeans2(data, 3, minit = 'random') + kmeans2(data, 3, minit = 'points') + if __name__ == "__main__": NumpyTest().run() Modified: trunk/Lib/cluster/vq.py =================================================================== --- trunk/Lib/cluster/vq.py 2007-04-26 10:28:26 UTC (rev 2944) +++ trunk/Lib/cluster/vq.py 2007-04-26 12:24:16 UTC (rev 2945) @@ -19,6 +19,7 @@ __all__ = ['whiten', 'vq', 'kmeans'] +import warnings from numpy.random import randint from numpy import shape, zeros, sqrt, argmin, minimum, array, \ @@ -146,7 +147,7 @@ :Parameters: obs : ndarray - Expect a rank 2 array. Each row is one observation. + Expects a rank 2 array. Each row is one observation. code_book : ndarray Code book to use. Same format than obs. Should have same number of features (eg columns) than obs. @@ -168,10 +169,18 @@ """ # n = number of observations # d = number of features - (n, d) = shape(obs) + if N.ndim(obs) == 1: + if not N.ndim(obs) == N.ndim(code_book): + raise ValueError("Observation and code_book should have the same rank") + else: + return _py_vq_1d(obs, code_book) + else: + (n, d) = shape(obs) - # code books and observations should have same number of features - if not d == code_book.shape[1]: + # code books and observations should have same number of features and same shape + if not N.ndim(obs) == N.ndim(code_book): + raise ValueError("Observation and code_book should have the same rank") + elif not d == code_book.shape[1]: raise ValueError(""" code book(%d) and obs(%d) should have the same number of features (eg columns)""" % (code_book.shape[1], d)) @@ -185,6 +194,35 @@ return code, sqrt(min_dist) +def _py_vq_1d(obs, code_book): + """ Python version of vq algorithm for rank 1 only. + + :Parameters: + obs : ndarray + Expects a rank 1 array. Each item is one observation. + code_book : ndarray + Code book to use. Same format than obs. Should rank 1 too. + + :Returns: + code : ndarray + code[i] gives the label of the ith obversation, that its code is + code_book[code[i]]. + mind_dist : ndarray + min_dist[i] gives the distance between the ith observation and its + corresponding code. + """ + raise RuntimeError("_py_vq_1d buggy, do not use rank 1 arrays for now") + n = obs.size + nc = code_book.size + dist = N.zeros((n, nc)) + for i in range(nc): + dist[:, i] = N.sum(obs - code_book[i]) + print dist + code = argmin(dist) + min_dist= dist[code] + + return code, sqrt(min_dist) + def py_vq2(obs, code_book): """2nd Python version of vq algorithm. @@ -366,3 +404,147 @@ best_dist = dist result = best_book, best_dist return result + +def _kpoints(data, k): + """Pick k points at random in data (one row = one observation). + + This is done by taking the k first values of a random permutation of 1..N + where N is the number of observation. + + :Parameters: + data : ndarray + Expect a rank 1 or 2 array. Rank 1 are assumed to describe one + dimensional data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + """ + if data.ndim > 1: + n = data.shape[0] + else: + n = data.size + + p = N.random.permutation(n) + x = data[p[:k], :].copy() + + return x + +def _krandinit(data, k): + """Returns k samples of a random variable which parameters depend on data. + + More precisely, it returns k observations sampled from a Gaussian random + variable which mean and covariances are the one estimated from data. + + :Parameters: + data : ndarray + Expect a rank 1 or 2 array. Rank 1 are assumed to describe one + dimensional data, rank 2 multidimensional data, in which case one + row is one observation. + k : int + Number of samples to generate. + """ + mu = N.mean(data, 0) + cov = N.cov(data, rowvar = 0) + + # k rows, d cols (one row = one obs) + # Generate k sample of a random variable ~ Gaussian(mu, cov) + x = N.random.randn(k, mu.size) + x = N.dot(x, N.linalg.cholesky(cov).T) + mu + + return x + +_valid_init_meth = {'random': _krandinit, 'points': _kpoints} + +def kmeans2(data, k, minit = 'random', niter = 10): + """Classify a set of points into k clusters using kmean algorithm. + + The algorithm works by minimizing the euclidian distance between data points + of cluster means. This version is more complete than kmean (has several + initialisation methods). + + :Parameters: + data : ndarray + Expect a rank 1 or 2 array. Rank 1 are assumed to describe one + dimensional data, rank 2 multidimensional data, in which case one + row is one observation. + k : int or ndarray + Number of clusters. If a ndarray is given instead, it is + interpreted as initial cluster to use instead. + minit : string + Method for initialization. Available methods are random, points and + uniform: + + random uses k points drawn from a Gaussian random generator which + mean and variances are estimated from the data. + + points choses k points at random from the points in data. + + uniform choses k points from the data such are they form a uniform + grid od the dataset. + + niter : int + Number of iterations to run. + + :Returns: + clusters : ndarray + the found clusters (one cluster per row). + label : ndarray + label[i] gives the label of the ith obversation, that its centroid is + cluster[label[i]]. + + """ + # If data is rank 1, then we have 1 dimension problem. + nd = N.ndim(data) + if nd == 1: + d = 1 + elif nd == 2: + d = data.shape[1] + else: + raise ValueError("Input of rank > 2 not supported") + + # If k is not a single value, then it should be compatible with data's + # shape + if N.size(k) > 1: + if not nd == N.ndim(k): + raise ValueError("k is not an int and has not same rank than data") + if d == 1: + nc = len(k) + else: + (nc, dc) = k.shape + if not dc == d: + raise ValueError("k is not an int and has not same rank than\ + data") + clusters = k.copy() + else: + nc = k + try: + init = _valid_init_meth[minit] + except KeyError: + raise ValueError("unknown init method %s" % str(minit)) + clusters = init(data, k) + + assert not niter == 0 + return _kmeans2(data, clusters, niter, nc) + +def _kmeans2(data, code, niter, nc): + """ "raw" version of kmeans2. Do not use directly. + + Run kmeans with a given initial codebook. + + :undocumented + """ + for i in range(niter): + # Compute the nearest neighbour for each obs + # using the current code book + label = vq(data, code)[0] + # Update the code by computing centroids using the new code book + for j in range(nc): + mbs = N.where(label==j) + if mbs[0].size > 0: + code[j, :] = N.mean(data[mbs], axis=0) + else: + warnings.warn("one cluster has no member anymore ! You should"\ + " rerun kmean with different initialization !") + + return code, label + From scipy-svn at scipy.org Thu Apr 26 08:47:55 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 26 Apr 2007 07:47:55 -0500 (CDT) Subject: [Scipy-svn] r2946 - trunk/Lib/cluster Message-ID: <20070426124755.9466439C0C2@new.scipy.org> Author: cdavid Date: 2007-04-26 07:47:51 -0500 (Thu, 26 Apr 2007) New Revision: 2946 Modified: trunk/Lib/cluster/vq.py Log: Add kmeans2 to the official functions of vq (necessary for epydoc) Modified: trunk/Lib/cluster/vq.py =================================================================== --- trunk/Lib/cluster/vq.py 2007-04-26 12:24:16 UTC (rev 2945) +++ trunk/Lib/cluster/vq.py 2007-04-26 12:47:51 UTC (rev 2946) @@ -17,7 +17,7 @@ """ __docformat__ = 'restructuredtext' -__all__ = ['whiten', 'vq', 'kmeans'] +__all__ = ['whiten', 'vq', 'kmeans', 'kmeans2'] import warnings From scipy-svn at scipy.org Mon Apr 30 09:56:02 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 30 Apr 2007 08:56:02 -0500 (CDT) Subject: [Scipy-svn] r2947 - trunk/Lib/ndimage/src Message-ID: <20070430135602.5B1BF39C0BE@new.scipy.org> Author: stefan Date: 2007-04-30 08:55:42 -0500 (Mon, 30 Apr 2007) New Revision: 2947 Modified: trunk/Lib/ndimage/src/nd_image.h Log: Rely on numpy to determine width of LONG. Modified: trunk/Lib/ndimage/src/nd_image.h =================================================================== --- trunk/Lib/ndimage/src/nd_image.h 2007-04-26 12:47:51 UTC (rev 2946) +++ trunk/Lib/ndimage/src/nd_image.h 2007-04-30 13:55:42 UTC (rev 2947) @@ -61,12 +61,8 @@ tComplex128=PyArray_COMPLEX128, tObject=PyArray_OBJECT, /* placeholder... does nothing */ tMaxType=PyArray_NTYPES, - tDefault = tFloat64, -#if NPY_BITSOF_LONG == 64 - tLong = tInt64, -#else - tLong = tInt32, -#endif + tDefault=PyArray_FLOAT64, + tLong=PyArray_LONG, } NumarrayType; #define NI_MAXDIM NPY_MAXDIMS From scipy-svn at scipy.org Mon Apr 30 10:50:51 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 30 Apr 2007 09:50:51 -0500 (CDT) Subject: [Scipy-svn] r2948 - in trunk/Lib/cluster: . tests Message-ID: <20070430145051.AA20039C2C5@new.scipy.org> Author: stefan Date: 2007-04-30 09:49:05 -0500 (Mon, 30 Apr 2007) New Revision: 2948 Modified: trunk/Lib/cluster/tests/test_vq.py trunk/Lib/cluster/tests/vq_test.py trunk/Lib/cluster/vq.py Log: Comply with the style guide (PEP08). Fix vq_test for numpy. Modified: trunk/Lib/cluster/tests/test_vq.py =================================================================== --- trunk/Lib/cluster/tests/test_vq.py 2007-04-30 13:55:42 UTC (rev 2947) +++ trunk/Lib/cluster/tests/test_vq.py 2007-04-30 14:49:05 UTC (rev 2948) @@ -19,98 +19,98 @@ set_local_path() # import modules that are located in the same directory as this file. import os.path -DATAFILE1 = os.path.join(sys.path[0], "data.txt") +DATAFILE1 = os.path.join(sys.path[0], "data.txt") restore_path() # Global data X = N.array([[3.0, 3], [4, 3], [4, 2], - [9, 2], [5, 1], [6, 2], [9, 4], - [5, 2], [5, 4], [7, 4], [6, 5]]) + [9, 2], [5, 1], [6, 2], [9, 4], + [5, 2], [5, 4], [7, 4], [6, 5]]) CODET1 = N.array([[3.0000, 3.0000], - [6.2000, 4.0000], - [5.8000, 1.8000]]) - -CODET2 = N.array([[11.0/3, 8.0/3], - [6.7500, 4.2500], - [6.2500, 1.7500]]) + [6.2000, 4.0000], + [5.8000, 1.8000]]) +CODET2 = N.array([[11.0/3, 8.0/3], + [6.7500, 4.2500], + [6.2500, 1.7500]]) + LABEL1 = N.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1]) class test_vq(NumpyTestCase): def check_py_vq(self, level=1): - initc = N.concatenate(([[X[0]], [X[1]], [X[2]]])) - code = initc.copy() - label1 = py_vq(X, initc)[0] + initc = N.concatenate(([[X[0]], [X[1]], [X[2]]])) + code = initc.copy() + label1 = py_vq(X, initc)[0] assert_array_equal(label1, LABEL1) def check_py_vq2(self, level=1): - initc = N.concatenate(([[X[0]], [X[1]], [X[2]]])) - code = initc.copy() - label1 = py_vq2(X, initc)[0] + initc = N.concatenate(([[X[0]], [X[1]], [X[2]]])) + code = initc.copy() + label1 = py_vq2(X, initc)[0] assert_array_equal(label1, LABEL1) def check_vq(self, level=1): - initc = N.concatenate(([[X[0]], [X[1]], [X[2]]])) - code = initc.copy() + initc = N.concatenate(([[X[0]], [X[1]], [X[2]]])) + code = initc.copy() try: import _vq - label1 = _vq.double_vq(X, initc)[0] + label1 = _vq.double_vq(X, initc)[0] assert_array_equal(label1, LABEL1) except ImportError: print "== Error while importing _vq, not testing C imp of vq ==" #def check_vq_1d(self, level=1): - # data = X[:, 0] - # initc = data[:3] - # code = initc.copy() + # data = X[:, 0] + # initc = data[:3] + # code = initc.copy() # print _py_vq_1d(data, initc) class test_kmean(NumpyTestCase): def check_kmeans_simple(self, level=1): - initc = N.concatenate(([[X[0]], [X[1]], [X[2]]])) - code = initc.copy() - code1 = kmeans(X, code, iter = 1)[0] + initc = N.concatenate(([[X[0]], [X[1]], [X[2]]])) + code = initc.copy() + code1 = kmeans(X, code, iter = 1)[0] assert_array_almost_equal(code1, CODET2) def check_kmeans_lost_cluster(self, level=1): """This will cause kmean to have a cluster with no points.""" - data = N.fromfile(open(DATAFILE1), sep = ", ") - data = data.reshape((200, 2)) - initk = N.array([[-1.8127404, -0.67128041], - [ 2.04621601, 0.07401111], - [-2.31149087,-0.05160469]]) + data = N.fromfile(open(DATAFILE1), sep = ", ") + data = data.reshape((200, 2)) + initk = N.array([[-1.8127404, -0.67128041], + [ 2.04621601, 0.07401111], + [-2.31149087,-0.05160469]]) - res = kmeans(data, initk) + res = kmeans(data, initk) def check_kmeans2_simple(self, level=1): """Testing simple call to kmeans2 and its results.""" - initc = N.concatenate(([[X[0]], [X[1]], [X[2]]])) - code = initc.copy() - code1 = kmeans2(X, code, niter = 1)[0] - code2 = kmeans2(X, code, niter = 2)[0] + initc = N.concatenate(([[X[0]], [X[1]], [X[2]]])) + code = initc.copy() + code1 = kmeans2(X, code, niter = 1)[0] + code2 = kmeans2(X, code, niter = 2)[0] assert_array_almost_equal(code1, CODET1) assert_array_almost_equal(code2, CODET2) #def check_kmeans2_rank1(self, level=1): # """Testing simple call to kmeans2 with rank 1 data.""" - # data = N.fromfile(open(DATAFILE1), sep = ", ") - # data = data.reshape((200, 2)) - # data1 = data[:, 0] - # data2 = data[:, 1] + # data = N.fromfile(open(DATAFILE1), sep = ", ") + # data = data.reshape((200, 2)) + # data1 = data[:, 0] + # data2 = data[:, 1] - # initc = data1[:3] - # code = initc.copy() + # initc = data1[:3] + # code = initc.copy() # print _py_vq_1d(data1, code) - # code1 = kmeans2(data1, code, niter = 1)[0] - # code2 = kmeans2(data1, code, niter = 2)[0] + # code1 = kmeans2(data1, code, niter = 1)[0] + # code2 = kmeans2(data1, code, niter = 2)[0] def check_kmeans2_init(self, level = 1): """Testing that kmeans2 init methods work.""" - data = N.fromfile(open(DATAFILE1), sep = ", ") - data = data.reshape((200, 2)) + data = N.fromfile(open(DATAFILE1), sep = ", ") + data = data.reshape((200, 2)) kmeans2(data, 3, minit = 'random') kmeans2(data, 3, minit = 'points') Modified: trunk/Lib/cluster/tests/vq_test.py =================================================================== --- trunk/Lib/cluster/tests/vq_test.py 2007-04-30 13:55:42 UTC (rev 2947) +++ trunk/Lib/cluster/tests/vq_test.py 2007-04-30 14:49:05 UTC (rev 2948) @@ -1,5 +1,6 @@ -from numpy import * -import vq_c as vq +import numpy as N +from scipy.cluster import vq +#import vq_c as vq def python_vq(all_data,code_book): import time @@ -11,8 +12,8 @@ print ' first dist:', dist1[:5] print ' last codes:', codes1[-5:] print ' last dist:', dist1[-5:] - float_obs = all_data.astype(Float32) - float_code = code_book.astype(Float32) + float_obs = all_data.astype(N.float32) + float_code = code_book.astype(N.float32) t1 = time.time() codes1,dist1 = vq.vq(float_obs,float_code) t2 = time.time() @@ -33,13 +34,12 @@ return array(data) def main(): - import scipy.stats - scipy.stats.seed(1000,1000) + N.random.seed((1000,1000)) Ncodes = 40 Nfeatures = 16 Nobs = 4000 - code_book = RandomArray.normal(0,1,(Ncodes,Nfeatures)) - features = RandomArray.normal(0,1,(Nobs,Nfeatures)) + code_book = N.random.normal(0,1,(Ncodes,Nfeatures)) + features = N.random.normal(0,1,(Nobs,Nfeatures)) codes,dist = python_vq(features,code_book) if __name__ == '__main__': Modified: trunk/Lib/cluster/vq.py =================================================================== --- trunk/Lib/cluster/vq.py 2007-04-30 13:55:42 UTC (rev 2947) +++ trunk/Lib/cluster/vq.py 2007-04-30 14:49:05 UTC (rev 2948) @@ -66,6 +66,7 @@ array([[ 3.41250074, 2.20300046, 5.88897275], [ 2.69407953, 2.39456571, 7.62102355], [ 1.43684242, 0.57469577, 5.88897275]]) + """ std_dev = std(obs, axis=0) return obs / std_dev @@ -82,7 +83,7 @@ algorithm or something similar. :Parameters: - obs : ndarray + obs : ndarray Each row of the array is an observation. The columns are the "features" seen during each observation The features must be whitened first using the whiten function or something equivalent. @@ -143,7 +144,7 @@ """ Python version of vq algorithm. The algorithm simply computes the euclidian distance between each - observation and every frame in the code_book/ + observation and every frame in the code_book. :Parameters: obs : ndarray @@ -166,6 +167,7 @@ mind_dist : ndarray min_dist[i] gives the distance between the ith observation and its corresponding code. + """ # n = number of observations # d = number of features @@ -175,21 +177,20 @@ else: return _py_vq_1d(obs, code_book) else: - (n, d) = shape(obs) + (n, d) = shape(obs) # code books and observations should have same number of features and same shape if not N.ndim(obs) == N.ndim(code_book): raise ValueError("Observation and code_book should have the same rank") elif not d == code_book.shape[1]: - raise ValueError(""" - code book(%d) and obs(%d) should have the same - number of features (eg columns)""" % (code_book.shape[1], d)) - - code = zeros(n, dtype = int) - min_dist = zeros(n) + raise ValueError("Code book(%d) and obs(%d) should have the same " \ + "number of features (eg columns)""" % (code_book.shape[1], d)) + + code = zeros(n, dtype=int) + min_dist = zeros(n) for i in range(n): - dist = N.sum((obs[i] - code_book) ** 2, 1) - code[i] = argmin(dist) + dist = N.sum((obs[i] - code_book) ** 2, 1) + code[i] = argmin(dist) min_dist[i] = dist[code[i]] return code, sqrt(min_dist) @@ -210,16 +211,17 @@ mind_dist : ndarray min_dist[i] gives the distance between the ith observation and its corresponding code. + """ raise RuntimeError("_py_vq_1d buggy, do not use rank 1 arrays for now") - n = obs.size - nc = code_book.size - dist = N.zeros((n, nc)) + n = obs.size + nc = code_book.size + dist = N.zeros((n, nc)) for i in range(nc): - dist[:, i] = N.sum(obs - code_book[i]) + dist[:,i] = N.sum(obs - code_book[i]) print dist - code = argmin(dist) - min_dist= dist[code] + code = argmin(dist) + min_dist = dist[code] return code, sqrt(min_dist) @@ -248,16 +250,17 @@ mind_dist : ndarray min_dist[i] gives the distance between the ith observation and its corresponding code. + """ d = shape(obs)[1] # code books and observations should have same number of features if not d == code_book.shape[1]: raise ValueError(""" - code book(%d) and obs(%d) should have the same + code book(%d) and obs(%d) should have the same number of features (eg columns)""" % (code_book.shape[1], d)) - - diff = obs[newaxis, :, :] - code_book[:, newaxis, :] + + diff = obs[newaxis,:,:] - code_book[:,newaxis,:] dist = sqrt(N.sum(diff * diff, -1)) code = argmin(dist, 0) min_dist = minimum.reduce(dist, 0) #the next line I think is equivalent @@ -324,7 +327,7 @@ return code_book, avg_dist[-1] def kmeans(obs, k_or_guess, iter=20, thresh=1e-5): - """ Generate a code book with minimum distortion. + """Generate a code book with minimum distortion. :Parameters: obs : ndarray @@ -356,10 +359,8 @@ Examples -------- - ("Not checked carefully for accuracy..." he said sheepishly) - >>> from numpy import array - >>> from scipy.cluster.vq import vq, kmeans + >>> from scipy.cluster.vq import vq, kmeans, whiten >>> features = array([[ 1.9,2.3], ... [ 1.5,2.5], ... [ 0.8,0.6], @@ -375,8 +376,8 @@ (array([[ 2.3110306 , 2.86287398], [ 0.93218041, 1.24398691]]), 0.85684700941625547) - >>> import RandomArray - >>> RandomArray.seed(1000,2000) + >>> from numpy import random + >>> random.seed((1000,2000)) >>> codes = 3 >>> kmeans(whitened,codes) (array([[ 2.3110306 , 2.86287398], @@ -387,8 +388,8 @@ if int(iter) < 1: raise ValueError, 'iter must be >= to 1.' if type(k_or_guess) == type(array([])): - guess = k_or_guess - result = _kmeans(obs, guess, thresh = thresh) + guess = k_or_guess + result = _kmeans(obs, guess, thresh = thresh) else: #initialize best distance value to a large value best_dist = 100000 @@ -397,8 +398,8 @@ #print 'kmeans iter: ', for i in range(iter): #the intial code book is randomly selected from observations - guess = take(obs, randint(0, No, k), 0) - book, dist = _kmeans(obs, guess, thresh = thresh) + guess = take(obs, randint(0, No, k), 0) + book, dist = _kmeans(obs, guess, thresh = thresh) if dist < best_dist: best_book = book best_dist = dist @@ -406,32 +407,33 @@ return result def _kpoints(data, k): - """Pick k points at random in data (one row = one observation). - + """Pick k points at random in data (one row = one observation). + This is done by taking the k first values of a random permutation of 1..N where N is the number of observation. - + :Parameters: data : ndarray Expect a rank 1 or 2 array. Rank 1 are assumed to describe one dimensional data, rank 2 multidimensional data, in which case one row is one observation. - k : int + k : int Number of samples to generate. + """ if data.ndim > 1: - n = data.shape[0] + n = data.shape[0] else: - n = data.size + n = data.size - p = N.random.permutation(n) - x = data[p[:k], :].copy() + p = N.random.permutation(n) + x = data[p[:k], :].copy() return x def _krandinit(data, k): """Returns k samples of a random variable which parameters depend on data. - + More precisely, it returns k observations sampled from a Gaussian random variable which mean and covariances are the one estimated from data. @@ -440,24 +442,25 @@ Expect a rank 1 or 2 array. Rank 1 are assumed to describe one dimensional data, rank 2 multidimensional data, in which case one row is one observation. - k : int + k : int Number of samples to generate. + """ - mu = N.mean(data, 0) + mu = N.mean(data, 0) cov = N.cov(data, rowvar = 0) # k rows, d cols (one row = one obs) # Generate k sample of a random variable ~ Gaussian(mu, cov) - x = N.random.randn(k, mu.size) - x = N.dot(x, N.linalg.cholesky(cov).T) + mu + x = N.random.randn(k, mu.size) + x = N.dot(x, N.linalg.cholesky(cov).T) + mu return x -_valid_init_meth = {'random': _krandinit, 'points': _kpoints} +_valid_init_meth = {'random': _krandinit, 'points': _kpoints} -def kmeans2(data, k, minit = 'random', niter = 10): +def kmeans2(data, k, minit='random', niter=10): """Classify a set of points into k clusters using kmean algorithm. - + The algorithm works by minimizing the euclidian distance between data points of cluster means. This version is more complete than kmean (has several initialisation methods). @@ -473,7 +476,7 @@ minit : string Method for initialization. Available methods are random, points and uniform: - + random uses k points drawn from a Gaussian random generator which mean and variances are estimated from the data. @@ -496,9 +499,9 @@ # If data is rank 1, then we have 1 dimension problem. nd = N.ndim(data) if nd == 1: - d = 1 + d = 1 elif nd == 2: - d = data.shape[1] + d = data.shape[1] else: raise ValueError("Input of rank > 2 not supported") @@ -508,43 +511,43 @@ if not nd == N.ndim(k): raise ValueError("k is not an int and has not same rank than data") if d == 1: - nc = len(k) + nc = len(k) else: - (nc, dc) = k.shape + (nc, dc) = k.shape if not dc == d: raise ValueError("k is not an int and has not same rank than\ data") - clusters = k.copy() + clusters = k.copy() else: - nc = k + nc = k try: - init = _valid_init_meth[minit] + init = _valid_init_meth[minit] except KeyError: raise ValueError("unknown init method %s" % str(minit)) - clusters = init(data, k) + clusters = init(data, k) assert not niter == 0 return _kmeans2(data, clusters, niter, nc) def _kmeans2(data, code, niter, nc): """ "raw" version of kmeans2. Do not use directly. - + Run kmeans with a given initial codebook. - + :undocumented + """ for i in range(niter): # Compute the nearest neighbour for each obs # using the current code book - label = vq(data, code)[0] + label = vq(data, code)[0] # Update the code by computing centroids using the new code book for j in range(nc): mbs = N.where(label==j) if mbs[0].size > 0: - code[j, :] = N.mean(data[mbs], axis=0) + code[j,:] = N.mean(data[mbs], axis=0) else: - warnings.warn("one cluster has no member anymore ! You should"\ - " rerun kmean with different initialization !") + warnings.warn("One of the clusters are empty. " \ + "Re-run kmean with a different initialization.") return code, label - From scipy-svn at scipy.org Mon Apr 30 12:12:54 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 30 Apr 2007 11:12:54 -0500 (CDT) Subject: [Scipy-svn] r2949 - trunk/Lib/sandbox/numexpr Message-ID: <20070430161254.9231639C042@new.scipy.org> Author: cookedm Date: 2007-04-30 11:12:50 -0500 (Mon, 30 Apr 2007) New Revision: 2949 Modified: trunk/Lib/sandbox/numexpr/interpreter.c Log: [numexpr] change op_signature to return an int instead of char Modified: trunk/Lib/sandbox/numexpr/interpreter.c =================================================================== --- trunk/Lib/sandbox/numexpr/interpreter.c 2007-04-30 14:49:05 UTC (rev 2948) +++ trunk/Lib/sandbox/numexpr/interpreter.c 2007-04-30 16:12:50 UTC (rev 2949) @@ -103,7 +103,7 @@ }; /* returns the sig of the nth op, '\0' if no more ops -1 on failure */ -static char op_signature(int op, int n) { +static int op_signature(int op, int n) { switch (op) { case OP_NOOP: break; @@ -512,8 +512,8 @@ { unsigned char *program; Py_ssize_t prog_len, n_buffers, n_inputs; - int rno, pc, arg, argloc, argno; - char sig, *fullsig, *signature; + int rno, pc, arg, argloc, argno, sig; + char *fullsig, *signature; if (PyString_AsStringAndSize(self->program, (char **)&program, &prog_len) < 0) { From scipy-svn at scipy.org Mon Apr 30 18:15:10 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 30 Apr 2007 17:15:10 -0500 (CDT) Subject: [Scipy-svn] r2950 - trunk/Lib/sandbox/timeseries Message-ID: <20070430221510.A11A839C0F0@new.scipy.org> Author: pierregm Date: 2007-04-30 17:15:08 -0500 (Mon, 30 Apr 2007) New Revision: 2950 Added: trunk/Lib/sandbox/timeseries/.project trunk/Lib/sandbox/timeseries/.pydevproject Log: Added: trunk/Lib/sandbox/timeseries/.project =================================================================== --- trunk/Lib/sandbox/timeseries/.project 2007-04-30 16:12:50 UTC (rev 2949) +++ trunk/Lib/sandbox/timeseries/.project 2007-04-30 22:15:08 UTC (rev 2950) @@ -0,0 +1,17 @@ + + + scipy_svn_timeseries + + + + + + org.python.pydev.PyDevBuilder + + + + + + org.python.pydev.pythonNature + + Added: trunk/Lib/sandbox/timeseries/.pydevproject =================================================================== --- trunk/Lib/sandbox/timeseries/.pydevproject 2007-04-30 16:12:50 UTC (rev 2949) +++ trunk/Lib/sandbox/timeseries/.pydevproject 2007-04-30 22:15:08 UTC (rev 2950) @@ -0,0 +1,13 @@ + + + + +python 2.4 + +/scipy_svn_timeseries +/scipy_svn_timeseries/mtimeseries + + +/home/backtopop/workspace/numpyalt/src + +