From scipy-svn at scipy.org Sat Sep 1 18:01:07 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 1 Sep 2007 17:01:07 -0500 (CDT) Subject: [Scipy-svn] r3298 - trunk/scipy/sandbox/rkern Message-ID: <20070901220107.F249B39C288@new.scipy.org> Author: stefan Date: 2007-09-01 17:00:50 -0500 (Sat, 01 Sep 2007) New Revision: 3298 Modified: trunk/scipy/sandbox/rkern/diffev.py Log: Permutation and rand moved. Use their new locations. Modified: trunk/scipy/sandbox/rkern/diffev.py =================================================================== --- trunk/scipy/sandbox/rkern/diffev.py 2007-08-30 23:19:19 UTC (rev 3297) +++ trunk/scipy/sandbox/rkern/diffev.py 2007-09-01 22:00:50 UTC (rev 3298) @@ -1,4 +1,4 @@ -"""Differantial Evolution Optimization +"""Differential Evolution Optimization :Author: Robert Kern @@ -143,7 +143,7 @@ scale=None, strategy=('rand', 2, 'bin'), eps=1e-6): lbound = sp.asarray(lbound) ubound = sp.asarray(ubound) - pop0 = stats.rand(npop, len(lbound))*(ubound-lbound) + lbound + pop0 = sp.rand(npop, len(lbound))*(ubound-lbound) + lbound return cls(func, pop0, crossover_rate=crossover_rate, scale=scale, strategy=strategy, eps=eps) frombounds = classmethod(frombounds) @@ -154,13 +154,13 @@ return max(0.3, 1.-rat) def bin_crossover(self, oldgene, newgene): - mask = stats.rand(self.ndim) < self.crossover_rate + mask = sp.rand(self.ndim) < self.crossover_rate return sp.where(mask, newgene, oldgene) def select_samples(self, candidate, nsamples): possibilities = range(self.npop) possibilities.remove(candidate) - return stats.permutation(possibilities)[:nsamples] + return stats.distributions.permutation(possibilities)[:nsamples] def diff1(self, candidate): i1, i2 = self.select_samples(candidate, 2) From scipy-svn at scipy.org Sun Sep 2 16:04:07 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 2 Sep 2007 15:04:07 -0500 (CDT) Subject: [Scipy-svn] r3299 - trunk/scipy/sandbox/rkern Message-ID: <20070902200407.5B62D39C098@new.scipy.org> Author: rkern Date: 2007-09-02 15:04:05 -0500 (Sun, 02 Sep 2007) New Revision: 3299 Modified: trunk/scipy/sandbox/rkern/diffev.py Log: Update to modern idioms. Modified: trunk/scipy/sandbox/rkern/diffev.py =================================================================== --- trunk/scipy/sandbox/rkern/diffev.py 2007-09-01 22:00:50 UTC (rev 3298) +++ trunk/scipy/sandbox/rkern/diffev.py 2007-09-02 20:04:05 UTC (rev 3299) @@ -5,12 +5,11 @@ Copyright 2005 by Robert Kern. """ -import scipy as sp -from scipy import stats +import numpy as np # Notes: for future modifications: -# Ali, M. M., and A. Toern. Topographical differential evoltion using +# Ali, M. M., and A. Toern. Topographical differential evolution using # pre-calculated differentials. _Stochastic and Global Optimization_. 1--17. # # A good scale value: @@ -75,6 +74,8 @@ The third element is (currently) 'bin' to specify binomial crossover. eps -- if the maximum and minimum function values of a given generation are with eps of each other, convergence has been achieved. + prng -- a RandomState instance. By default, this is the global + numpy.random instance. DiffEvolver.frombounds(func, lbound, ubound, npop, crossover_rate=0.5, scale=None, strategy=('rand', 2, 'bin'), eps=1e-6) @@ -101,9 +102,9 @@ func, args, crossover_rate, scale, strategy, eps -- from constructor """ def __init__(self, func, pop0, args=(), crossover_rate=0.5, scale=None, - strategy=('rand', 2, 'bin'), eps=1e-6): + strategy=('rand', 2, 'bin'), eps=1e-6, prng=np.random): self.func = func - self.population = sp.array(pop0) + self.population = np.array(pop0) self.npop, self.ndim = self.population.shape self.args = args self.crossover_rate = crossover_rate @@ -111,7 +112,7 @@ self.eps = eps self.pop_values = [self.func(m, *args) for m in self.population] - bestidx = sp.argmin(self.pop_values) + bestidx = np.argmin(self.pop_values) self.best_vector = self.population[bestidx] self.best_value = self.pop_values[bestidx] @@ -140,10 +141,10 @@ self.pop_values = [self.func(m, *self.args) for m in self.population] def frombounds(cls, func, lbound, ubound, npop, crossover_rate=0.5, - scale=None, strategy=('rand', 2, 'bin'), eps=1e-6): - lbound = sp.asarray(lbound) - ubound = sp.asarray(ubound) - pop0 = sp.rand(npop, len(lbound))*(ubound-lbound) + lbound + scale=None, strategy=('rand', 2, 'bin'), eps=1e-6, prng=np.random): + lbound = np.asarray(lbound) + ubound = np.asarray(ubound) + pop0 = prng.uniform(lbound, ubound, size=(npop, len(lbound))) return cls(func, pop0, crossover_rate=crossover_rate, scale=scale, strategy=strategy, eps=eps) frombounds = classmethod(frombounds) @@ -154,13 +155,13 @@ return max(0.3, 1.-rat) def bin_crossover(self, oldgene, newgene): - mask = sp.rand(self.ndim) < self.crossover_rate - return sp.where(mask, newgene, oldgene) + mask = self.prng.rand(self.ndim) < self.crossover_rate + return np.where(mask, newgene, oldgene) def select_samples(self, candidate, nsamples): possibilities = range(self.npop) possibilities.remove(candidate) - return stats.distributions.permutation(possibilities)[:nsamples] + return self.prng.permutation(possibilities)[:nsamples] def diff1(self, candidate): i1, i2 = self.select_samples(candidate, 2) @@ -175,7 +176,7 @@ return self.best_vector def choose_rand(self, candidate): - i = self.select_samples(candidate, 1) + i = self.select_samples(candidate, 1)[0] return self.population[i] def choose_rand_to_best(self, candidate): From scipy-svn at scipy.org Sun Sep 2 16:22:04 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 2 Sep 2007 15:22:04 -0500 (CDT) Subject: [Scipy-svn] r3300 - trunk/scipy/sandbox/timeseries Message-ID: <20070902202204.0FBCF39C0E0@new.scipy.org> Author: mattknox_ca Date: 2007-09-02 15:21:59 -0500 (Sun, 02 Sep 2007) New Revision: 3300 Modified: trunk/scipy/sandbox/timeseries/readme.txt Log: updated documentation url in readme.txt Modified: trunk/scipy/sandbox/timeseries/readme.txt =================================================================== --- trunk/scipy/sandbox/timeseries/readme.txt 2007-09-02 20:04:05 UTC (rev 3299) +++ trunk/scipy/sandbox/timeseries/readme.txt 2007-09-02 20:21:59 UTC (rev 3300) @@ -1,4 +1,4 @@ Please see the wiki for installation and requirements info, as well as module documentation. -http://www.scipy.org/TimeSeriesPackage +http://scipy.org/SciPyPackages/TimeSeries From scipy-svn at scipy.org Tue Sep 4 18:08:25 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 4 Sep 2007 17:08:25 -0500 (CDT) Subject: [Scipy-svn] r3301 - in trunk/scipy/sparse: . tests Message-ID: <20070904220825.5BE9739C1BF@new.scipy.org> Author: wnbell Date: 2007-09-04 17:08:22 -0500 (Tue, 04 Sep 2007) New Revision: 3301 Modified: trunk/scipy/sparse/sparse.py trunk/scipy/sparse/tests/test_sparse.py Log: added support for inplace scalar multiplication and division Modified: trunk/scipy/sparse/sparse.py =================================================================== --- trunk/scipy/sparse/sparse.py 2007-09-02 20:21:59 UTC (rev 3300) +++ trunk/scipy/sparse/sparse.py 2007-09-04 22:08:22 UTC (rev 3301) @@ -271,10 +271,13 @@ def __imul__(self, other): raise NotImplementedError - + def __idiv__(self, other): - raise TypeError("No support for matrix division.") + return self.__itruediv__(other) + def __itruediv__(self, other): + raise NotImplementedError + def __getattr__(self, attr): if attr == 'A': return self.toarray() @@ -585,7 +588,7 @@ # Convert this matrix to a dense matrix and add them return self.todense() - other else: - raise NotImplemented + raise NotImplementedError def __mul__(self, other): # self * other @@ -608,6 +611,12 @@ tr = asarray(other).transpose() return self.transpose().dot(tr).transpose() + def __imul__(self, other): #self *= other + if isscalarlike(other): + self.data *= other + return self + else: + raise NotImplementedError def __neg__(self): return self._with_data(-self.data) @@ -621,9 +630,16 @@ raise ValueError, "inconsistent shapes" return self._binopt(other,fn) else: - raise NotImplemented + raise NotImplementedError + + def __itruediv__(self, other): #self *= other + if isscalarlike(other): + recip = 1.0 / other + self.data *= recip + return self + else: + raise NotImplementedError - def __pow__(self, other, fn): """ Element-by-element power (unless other is a scalar, in which case return the matrix power.) @@ -633,7 +649,7 @@ elif isspmatrix(other): return self._binopt(other,fn) else: - raise NotImplemented + raise NotImplementedError def _matmat(self, other, fn): @@ -1826,6 +1842,16 @@ else: return self.dot(other) + def __imul__(self, other): # self * other + if isscalarlike(other): + # Multiply this scalar by every element. + for (key, val) in self.iteritems(): + self[key] = val * other + #new.dtype.char = self.dtype.char + return self + else: + return NotImplementedError + def __rmul__(self, other): # other * self if isscalarlike(other): new = dok_matrix(self.shape, dtype=self.dtype) @@ -1841,7 +1867,28 @@ except AttributeError: tr = asarray(other).transpose() return self.transpose().dot(tr).transpose() + + def __truediv__(self, other): # self * other + if isscalarlike(other): + new = dok_matrix(self.shape, dtype=self.dtype) + # Multiply this scalar by every element. + for (key, val) in self.iteritems(): + new[key] = val / other + #new.dtype.char = self.dtype.char + return new + else: + return self.tocsr() / other + + def __itruediv__(self, other): # self * other + if isscalarlike(other): + # Multiply this scalar by every element. + for (key, val) in self.iteritems(): + self[key] = val / other + return self + else: + return NotImplementedError + # What should len(sparse) return? For consistency with dense matrices, # perhaps it should be the number of rows? For now it returns the number # of non-zeros. @@ -2259,8 +2306,15 @@ self[:,:] = self * other return self else: - raise TypeError("In-place matrix multiplication not supported.") + raise NotImplementedError + def __itruediv__(self,other): + if isscalarlike(other): + self[:,:] = self / other + return self + else: + raise NotImplementedError + # Whenever the dimensions change, empty lists should be created for each # row @@ -2463,6 +2517,16 @@ else: return self.dot(other) + def __truediv__(self, other): # self / other + if isscalarlike(other): + new = self.copy() + # Divide every element by this scalar + new.data = numpy.array([[val/other for val in rowvals] for + rowvals in new.data], dtype=object) + return new + else: + return self.tocsr() / other + def multiply(self, other): """Point-wise multiplication by another lil_matrix. Modified: trunk/scipy/sparse/tests/test_sparse.py =================================================================== --- trunk/scipy/sparse/tests/test_sparse.py 2007-09-02 20:21:59 UTC (rev 3300) +++ trunk/scipy/sparse/tests/test_sparse.py 2007-09-04 22:08:22 UTC (rev 3301) @@ -99,6 +99,24 @@ assert_array_equal(self.dat*2,(self.datsp*2).todense()) assert_array_equal(self.dat*17.3,(self.datsp*17.3).todense()) + def check_imul_scalar(self): + a = self.datsp.copy() + a *= 2 + assert_array_equal(self.dat*2,a.todense()) + + a = self.datsp.copy() + a *= 17.3 + assert_array_equal(self.dat*17.3,a.todense()) + + def check_idiv_scalar(self): + a = self.datsp.copy() + a /= 2 + assert_array_equal(self.dat/2,a.todense()) + + a = self.datsp.copy() + a /= 17.3 + assert_array_equal(self.dat/17.3,a.todense()) + def check_rmul_scalar(self): assert_array_equal(2*self.dat,(2*self.datsp).todense()) assert_array_equal(17.3*self.dat,(17.3*self.datsp).todense()) From scipy-svn at scipy.org Wed Sep 5 11:59:39 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 5 Sep 2007 10:59:39 -0500 (CDT) Subject: [Scipy-svn] r3302 - in trunk/scipy/sparse: . sparsetools tests Message-ID: <20070905155939.EB3FE39C199@new.scipy.org> Author: rc Date: 2007-09-05 10:59:21 -0500 (Wed, 05 Sep 2007) New Revision: 3302 Modified: trunk/scipy/sparse/sparse.py trunk/scipy/sparse/sparsetools/sparsetools.h trunk/scipy/sparse/sparsetools/sparsetools.i trunk/scipy/sparse/sparsetools/sparsetools.py trunk/scipy/sparse/sparsetools/sparsetools_wrap.cxx trunk/scipy/sparse/tests/test_sparse.py Log: added csr_matrix.get_submatrix() Modified: trunk/scipy/sparse/sparse.py =================================================================== --- trunk/scipy/sparse/sparse.py 2007-09-04 22:08:22 UTC (rev 3301) +++ trunk/scipy/sparse/sparse.py 2007-09-05 15:59:21 UTC (rev 3302) @@ -1462,6 +1462,17 @@ """ return _cs_matrix._ensure_sorted_indices(self, self.shape[0], self.shape[1], inplace) + def get_submatrix( self, slice0, slice1 ): + """Return a submatrix of this matrix (new matrix is created).""" + aux = sparsetools.get_csr_submatrix( self.shape[0], self.shape[1], + self.indptr, self.indices, + self.data, + slice0.start, slice0.stop, + slice1.start, slice1.stop ) + data, indices, indptr = aux[2], aux[1], aux[0] + return self.__class__( (data, indices, indptr), + dims = (slice0.stop - slice0.start, + slice1.stop - slice1.start) ) # This function was for sorting dictionary keys by the second tuple element. # (We now use the Schwartzian transform instead for efficiency.) Modified: trunk/scipy/sparse/sparsetools/sparsetools.h =================================================================== --- trunk/scipy/sparse/sparsetools/sparsetools.h 2007-09-04 22:08:22 UTC (rev 3301) +++ trunk/scipy/sparse/sparsetools/sparsetools.h 2007-09-05 15:59:21 UTC (rev 3302) @@ -858,7 +858,60 @@ } } +template +void get_csr_submatrix(const I n_row, + const I n_col, + const I Ap[], + const I Aj[], + const T Ax[], + const I ir0, + const I ir1, + const I ic0, + const I ic1, + std::vector* Bp, + std::vector* Bj, + std::vector* Bx) +{ + I new_n_row = ir1 - ir0; + I new_n_col = ic1 - ic0; + I new_nnz = 0; + I kk = 0; + // Count nonzeros total/per row. + for(I i = 0; i < new_n_row; i++){ + I row_start = Ap[ir0+i]; + I row_end = Ap[ir0+i+1]; + + for(I jj = row_start; jj < row_end; jj++){ + if ((Aj[jj] >= ic0) && (Aj[jj] < ic1)) { + new_nnz++; + } + } + } + + // Allocate. + Bp->resize(new_n_row+1); + Bj->resize(new_nnz); + Bx->resize(new_nnz); + + // Assign. + (*Bp)[0] = 0; + for(I i = 0; i < new_n_row; i++){ + I row_start = Ap[ir0+i]; + I row_end = Ap[ir0+i+1]; + + for(I jj = row_start; jj < row_end; jj++){ + if ((Aj[jj] >= ic0) && (Aj[jj] < ic1)) { + (*Bj)[kk] = Aj[jj] - ic0; + (*Bx)[kk] = Ax[jj]; + kk++; + } + } + (*Bp)[i+1] = kk; + } +} + + /* * Derived methods */ Modified: trunk/scipy/sparse/sparsetools/sparsetools.i =================================================================== --- trunk/scipy/sparse/sparsetools/sparsetools.i 2007-09-04 22:08:22 UTC (rev 3301) +++ trunk/scipy/sparse/sparsetools/sparsetools.i 2007-09-05 15:59:21 UTC (rev 3302) @@ -244,10 +244,11 @@ INSTANTIATE_ALL(sort_csr_indices) INSTANTIATE_ALL(sort_csc_indices) + /* * Sum duplicate CSR/CSC entries. */ INSTANTIATE_ALL(sum_csr_duplicates) INSTANTIATE_ALL(sum_csc_duplicates) - +INSTANTIATE_ALL(get_csr_submatrix) Modified: trunk/scipy/sparse/sparsetools/sparsetools.py =================================================================== --- trunk/scipy/sparse/sparsetools/sparsetools.py 2007-09-04 22:08:22 UTC (rev 3301) +++ trunk/scipy/sparse/sparsetools/sparsetools.py 2007-09-05 15:59:21 UTC (rev 3302) @@ -1,5 +1,5 @@ # This file was automatically generated by SWIG (http://www.swig.org). -# Version 1.3.32 +# Version 1.3.31 # # Don't modify this file, modify the SWIG interface instead. # This file is compatible with both classic and new-style classes. @@ -579,3 +579,28 @@ """ return _sparsetools.sum_csc_duplicates(*args) +def get_csr_submatrix(*args): + """ + get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, int Ax, int ir0, + int ir1, int ic0, int ic1, std::vector<(int)> Bp, + std::vector<(int)> Bj, std::vector<(int)> Bx) + get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, long Ax, int ir0, + int ir1, int ic0, int ic1, std::vector<(int)> Bp, + std::vector<(int)> Bj, std::vector<(long)> Bx) + get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, float Ax, int ir0, + int ir1, int ic0, int ic1, std::vector<(int)> Bp, + std::vector<(int)> Bj, std::vector<(float)> Bx) + get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, double Ax, int ir0, + int ir1, int ic0, int ic1, std::vector<(int)> Bp, + std::vector<(int)> Bj, std::vector<(double)> Bx) + get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, + int ir0, int ir1, int ic0, int ic1, + std::vector<(int)> Bp, std::vector<(int)> Bj, + std::vector<(npy_cfloat_wrapper)> Bx) + get_csr_submatrix(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, + int ir0, int ir1, int ic0, int ic1, + std::vector<(int)> Bp, std::vector<(int)> Bj, + std::vector<(npy_cdouble_wrapper)> Bx) + """ + return _sparsetools.get_csr_submatrix(*args) + Modified: trunk/scipy/sparse/sparsetools/sparsetools_wrap.cxx =================================================================== --- trunk/scipy/sparse/sparsetools/sparsetools_wrap.cxx 2007-09-04 22:08:22 UTC (rev 3301) +++ trunk/scipy/sparse/sparsetools/sparsetools_wrap.cxx 2007-09-05 15:59:21 UTC (rev 3302) @@ -1,6 +1,6 @@ /* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). - * Version 1.3.32 + * Version 1.3.31 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make @@ -34,14 +34,14 @@ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR -# if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) -# define SWIGTEMPLATEDISAMBIGUATOR template -# elif defined(__HP_aCC) -/* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ -/* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ -# define SWIGTEMPLATEDISAMBIGUATOR template +# if defined(__SUNPRO_CC) +# if (__SUNPRO_CC <= 0x560) +# define SWIGTEMPLATEDISAMBIGUATOR template +# else +# define SWIGTEMPLATEDISAMBIGUATOR +# endif # else -# define SWIGTEMPLATEDISAMBIGUATOR +# define SWIGTEMPLATEDISAMBIGUATOR # endif #endif @@ -1608,11 +1608,9 @@ (unaryfunc)0, /*nb_float*/ (unaryfunc)PySwigObject_oct, /*nb_oct*/ (unaryfunc)PySwigObject_hex, /*nb_hex*/ -#if PY_VERSION_HEX >= 0x02050000 /* 2.5.0 */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_index */ -#elif PY_VERSION_HEX >= 0x02020000 /* 2.2.0 */ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_inplace_true_divide */ -#elif PY_VERSION_HEX >= 0x02000000 /* 2.0.0 */ +#if PY_VERSION_HEX >= 0x02020000 + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_inplace_true_divide */ +#elif PY_VERSION_HEX >= 0x02000000 0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_inplace_or */ #endif }; @@ -2495,7 +2493,7 @@ #define SWIG_name "_sparsetools" -#define SWIGVERSION 0x010332 +#define SWIGVERSION 0x010331 #define SWIG_VERSION SWIGVERSION @@ -30399,455 +30397,1336 @@ } +SWIGINTERN PyObject *_wrap_get_csr_submatrix__SWIG_1(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + int *arg5 ; + int arg6 ; + int arg7 ; + int arg8 ; + int arg9 ; + std::vector *arg10 = (std::vector *) 0 ; + std::vector *arg11 = (std::vector *) 0 ; + std::vector *arg12 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + int val6 ; + int ecode6 = 0 ; + int val7 ; + int ecode7 = 0 ; + int val8 ; + int ecode8 = 0 ; + int val9 ; + int ecode9 = 0 ; + std::vector *tmp10 ; + std::vector *tmp11 ; + std::vector *tmp12 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + PyObject * obj5 = 0 ; + PyObject * obj6 = 0 ; + PyObject * obj7 = 0 ; + PyObject * obj8 = 0 ; + + { + tmp10 = new std::vector(); + arg10 = tmp10; + } + { + tmp11 = new std::vector(); + arg11 = tmp11; + } + { + tmp12 = new std::vector(); + arg12 = tmp12; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:get_csr_submatrix",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "get_csr_submatrix" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "get_csr_submatrix" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_INT, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (int*) array5->data; + } + ecode6 = SWIG_AsVal_int(obj5, &val6); + if (!SWIG_IsOK(ecode6)) { + SWIG_exception_fail(SWIG_ArgError(ecode6), "in method '" "get_csr_submatrix" "', argument " "6"" of type '" "int""'"); + } + arg6 = static_cast< int >(val6); + ecode7 = SWIG_AsVal_int(obj6, &val7); + if (!SWIG_IsOK(ecode7)) { + SWIG_exception_fail(SWIG_ArgError(ecode7), "in method '" "get_csr_submatrix" "', argument " "7"" of type '" "int""'"); + } + arg7 = static_cast< int >(val7); + ecode8 = SWIG_AsVal_int(obj7, &val8); + if (!SWIG_IsOK(ecode8)) { + SWIG_exception_fail(SWIG_ArgError(ecode8), "in method '" "get_csr_submatrix" "', argument " "8"" of type '" "int""'"); + } + arg8 = static_cast< int >(val8); + ecode9 = SWIG_AsVal_int(obj8, &val9); + if (!SWIG_IsOK(ecode9)) { + SWIG_exception_fail(SWIG_ArgError(ecode9), "in method '" "get_csr_submatrix" "', argument " "9"" of type '" "int""'"); + } + arg9 = static_cast< int >(val9); + get_csr_submatrix(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(int const (*))arg5,arg6,arg7,arg8,arg9,arg10,arg11,arg12); + resultobj = SWIG_Py_Void(); + { + int length = (arg10)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); + memcpy(PyArray_DATA(obj),&((*(arg10))[0]),sizeof(int)*length); + delete arg10; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + int length = (arg11)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); + memcpy(PyArray_DATA(obj),&((*(arg11))[0]),sizeof(int)*length); + delete arg11; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + int length = (arg12)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); + memcpy(PyArray_DATA(obj),&((*(arg12))[0]),sizeof(int)*length); + delete arg12; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_get_csr_submatrix__SWIG_2(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + long *arg5 ; + int arg6 ; + int arg7 ; + int arg8 ; + int arg9 ; + std::vector *arg10 = (std::vector *) 0 ; + std::vector *arg11 = (std::vector *) 0 ; + std::vector *arg12 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + int val6 ; + int ecode6 = 0 ; + int val7 ; + int ecode7 = 0 ; + int val8 ; + int ecode8 = 0 ; + int val9 ; + int ecode9 = 0 ; + std::vector *tmp10 ; + std::vector *tmp11 ; + std::vector *tmp12 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + PyObject * obj5 = 0 ; + PyObject * obj6 = 0 ; + PyObject * obj7 = 0 ; + PyObject * obj8 = 0 ; + + { + tmp10 = new std::vector(); + arg10 = tmp10; + } + { + tmp11 = new std::vector(); + arg11 = tmp11; + } + { + tmp12 = new std::vector(); + arg12 = tmp12; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:get_csr_submatrix",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "get_csr_submatrix" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "get_csr_submatrix" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_LONG, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (long*) array5->data; + } + ecode6 = SWIG_AsVal_int(obj5, &val6); + if (!SWIG_IsOK(ecode6)) { + SWIG_exception_fail(SWIG_ArgError(ecode6), "in method '" "get_csr_submatrix" "', argument " "6"" of type '" "int""'"); + } + arg6 = static_cast< int >(val6); + ecode7 = SWIG_AsVal_int(obj6, &val7); + if (!SWIG_IsOK(ecode7)) { + SWIG_exception_fail(SWIG_ArgError(ecode7), "in method '" "get_csr_submatrix" "', argument " "7"" of type '" "int""'"); + } + arg7 = static_cast< int >(val7); + ecode8 = SWIG_AsVal_int(obj7, &val8); + if (!SWIG_IsOK(ecode8)) { + SWIG_exception_fail(SWIG_ArgError(ecode8), "in method '" "get_csr_submatrix" "', argument " "8"" of type '" "int""'"); + } + arg8 = static_cast< int >(val8); + ecode9 = SWIG_AsVal_int(obj8, &val9); + if (!SWIG_IsOK(ecode9)) { + SWIG_exception_fail(SWIG_ArgError(ecode9), "in method '" "get_csr_submatrix" "', argument " "9"" of type '" "int""'"); + } + arg9 = static_cast< int >(val9); + get_csr_submatrix(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(long const (*))arg5,arg6,arg7,arg8,arg9,arg10,arg11,arg12); + resultobj = SWIG_Py_Void(); + { + int length = (arg10)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); + memcpy(PyArray_DATA(obj),&((*(arg10))[0]),sizeof(int)*length); + delete arg10; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + int length = (arg11)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); + memcpy(PyArray_DATA(obj),&((*(arg11))[0]),sizeof(int)*length); + delete arg11; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + int length = (arg12)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_LONG); + memcpy(PyArray_DATA(obj),&((*(arg12))[0]),sizeof(long)*length); + delete arg12; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_get_csr_submatrix__SWIG_3(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + float *arg5 ; + int arg6 ; + int arg7 ; + int arg8 ; + int arg9 ; + std::vector *arg10 = (std::vector *) 0 ; + std::vector *arg11 = (std::vector *) 0 ; + std::vector *arg12 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + int val6 ; + int ecode6 = 0 ; + int val7 ; + int ecode7 = 0 ; + int val8 ; + int ecode8 = 0 ; + int val9 ; + int ecode9 = 0 ; + std::vector *tmp10 ; + std::vector *tmp11 ; + std::vector *tmp12 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + PyObject * obj5 = 0 ; + PyObject * obj6 = 0 ; + PyObject * obj7 = 0 ; + PyObject * obj8 = 0 ; + + { + tmp10 = new std::vector(); + arg10 = tmp10; + } + { + tmp11 = new std::vector(); + arg11 = tmp11; + } + { + tmp12 = new std::vector(); + arg12 = tmp12; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:get_csr_submatrix",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "get_csr_submatrix" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "get_csr_submatrix" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_FLOAT, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (float*) array5->data; + } + ecode6 = SWIG_AsVal_int(obj5, &val6); + if (!SWIG_IsOK(ecode6)) { + SWIG_exception_fail(SWIG_ArgError(ecode6), "in method '" "get_csr_submatrix" "', argument " "6"" of type '" "int""'"); + } + arg6 = static_cast< int >(val6); + ecode7 = SWIG_AsVal_int(obj6, &val7); + if (!SWIG_IsOK(ecode7)) { + SWIG_exception_fail(SWIG_ArgError(ecode7), "in method '" "get_csr_submatrix" "', argument " "7"" of type '" "int""'"); + } + arg7 = static_cast< int >(val7); + ecode8 = SWIG_AsVal_int(obj7, &val8); + if (!SWIG_IsOK(ecode8)) { + SWIG_exception_fail(SWIG_ArgError(ecode8), "in method '" "get_csr_submatrix" "', argument " "8"" of type '" "int""'"); + } + arg8 = static_cast< int >(val8); + ecode9 = SWIG_AsVal_int(obj8, &val9); + if (!SWIG_IsOK(ecode9)) { + SWIG_exception_fail(SWIG_ArgError(ecode9), "in method '" "get_csr_submatrix" "', argument " "9"" of type '" "int""'"); + } + arg9 = static_cast< int >(val9); + get_csr_submatrix(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(float const (*))arg5,arg6,arg7,arg8,arg9,arg10,arg11,arg12); + resultobj = SWIG_Py_Void(); + { + int length = (arg10)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); + memcpy(PyArray_DATA(obj),&((*(arg10))[0]),sizeof(int)*length); + delete arg10; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + int length = (arg11)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); + memcpy(PyArray_DATA(obj),&((*(arg11))[0]),sizeof(int)*length); + delete arg11; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + int length = (arg12)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_FLOAT); + memcpy(PyArray_DATA(obj),&((*(arg12))[0]),sizeof(float)*length); + delete arg12; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_get_csr_submatrix__SWIG_4(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + double *arg5 ; + int arg6 ; + int arg7 ; + int arg8 ; + int arg9 ; + std::vector *arg10 = (std::vector *) 0 ; + std::vector *arg11 = (std::vector *) 0 ; + std::vector *arg12 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + int val6 ; + int ecode6 = 0 ; + int val7 ; + int ecode7 = 0 ; + int val8 ; + int ecode8 = 0 ; + int val9 ; + int ecode9 = 0 ; + std::vector *tmp10 ; + std::vector *tmp11 ; + std::vector *tmp12 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + PyObject * obj5 = 0 ; + PyObject * obj6 = 0 ; + PyObject * obj7 = 0 ; + PyObject * obj8 = 0 ; + + { + tmp10 = new std::vector(); + arg10 = tmp10; + } + { + tmp11 = new std::vector(); + arg11 = tmp11; + } + { + tmp12 = new std::vector(); + arg12 = tmp12; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:get_csr_submatrix",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "get_csr_submatrix" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "get_csr_submatrix" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_DOUBLE, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (double*) array5->data; + } + ecode6 = SWIG_AsVal_int(obj5, &val6); + if (!SWIG_IsOK(ecode6)) { + SWIG_exception_fail(SWIG_ArgError(ecode6), "in method '" "get_csr_submatrix" "', argument " "6"" of type '" "int""'"); + } + arg6 = static_cast< int >(val6); + ecode7 = SWIG_AsVal_int(obj6, &val7); + if (!SWIG_IsOK(ecode7)) { + SWIG_exception_fail(SWIG_ArgError(ecode7), "in method '" "get_csr_submatrix" "', argument " "7"" of type '" "int""'"); + } + arg7 = static_cast< int >(val7); + ecode8 = SWIG_AsVal_int(obj7, &val8); + if (!SWIG_IsOK(ecode8)) { + SWIG_exception_fail(SWIG_ArgError(ecode8), "in method '" "get_csr_submatrix" "', argument " "8"" of type '" "int""'"); + } + arg8 = static_cast< int >(val8); + ecode9 = SWIG_AsVal_int(obj8, &val9); + if (!SWIG_IsOK(ecode9)) { + SWIG_exception_fail(SWIG_ArgError(ecode9), "in method '" "get_csr_submatrix" "', argument " "9"" of type '" "int""'"); + } + arg9 = static_cast< int >(val9); + get_csr_submatrix(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(double const (*))arg5,arg6,arg7,arg8,arg9,arg10,arg11,arg12); + resultobj = SWIG_Py_Void(); + { + int length = (arg10)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); + memcpy(PyArray_DATA(obj),&((*(arg10))[0]),sizeof(int)*length); + delete arg10; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + int length = (arg11)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); + memcpy(PyArray_DATA(obj),&((*(arg11))[0]),sizeof(int)*length); + delete arg11; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + int length = (arg12)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_DOUBLE); + memcpy(PyArray_DATA(obj),&((*(arg12))[0]),sizeof(double)*length); + delete arg12; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_get_csr_submatrix__SWIG_5(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + npy_cfloat_wrapper *arg5 ; + int arg6 ; + int arg7 ; + int arg8 ; + int arg9 ; + std::vector *arg10 = (std::vector *) 0 ; + std::vector *arg11 = (std::vector *) 0 ; + std::vector *arg12 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + int val6 ; + int ecode6 = 0 ; + int val7 ; + int ecode7 = 0 ; + int val8 ; + int ecode8 = 0 ; + int val9 ; + int ecode9 = 0 ; + std::vector *tmp10 ; + std::vector *tmp11 ; + std::vector *tmp12 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + PyObject * obj5 = 0 ; + PyObject * obj6 = 0 ; + PyObject * obj7 = 0 ; + PyObject * obj8 = 0 ; + + { + tmp10 = new std::vector(); + arg10 = tmp10; + } + { + tmp11 = new std::vector(); + arg11 = tmp11; + } + { + tmp12 = new std::vector(); + arg12 = tmp12; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:get_csr_submatrix",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "get_csr_submatrix" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "get_csr_submatrix" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CFLOAT, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (npy_cfloat_wrapper*) array5->data; + } + ecode6 = SWIG_AsVal_int(obj5, &val6); + if (!SWIG_IsOK(ecode6)) { + SWIG_exception_fail(SWIG_ArgError(ecode6), "in method '" "get_csr_submatrix" "', argument " "6"" of type '" "int""'"); + } + arg6 = static_cast< int >(val6); + ecode7 = SWIG_AsVal_int(obj6, &val7); + if (!SWIG_IsOK(ecode7)) { + SWIG_exception_fail(SWIG_ArgError(ecode7), "in method '" "get_csr_submatrix" "', argument " "7"" of type '" "int""'"); + } + arg7 = static_cast< int >(val7); + ecode8 = SWIG_AsVal_int(obj7, &val8); + if (!SWIG_IsOK(ecode8)) { + SWIG_exception_fail(SWIG_ArgError(ecode8), "in method '" "get_csr_submatrix" "', argument " "8"" of type '" "int""'"); + } + arg8 = static_cast< int >(val8); + ecode9 = SWIG_AsVal_int(obj8, &val9); + if (!SWIG_IsOK(ecode9)) { + SWIG_exception_fail(SWIG_ArgError(ecode9), "in method '" "get_csr_submatrix" "', argument " "9"" of type '" "int""'"); + } + arg9 = static_cast< int >(val9); + get_csr_submatrix(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cfloat_wrapper const (*))arg5,arg6,arg7,arg8,arg9,arg10,arg11,arg12); + resultobj = SWIG_Py_Void(); + { + int length = (arg10)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); + memcpy(PyArray_DATA(obj),&((*(arg10))[0]),sizeof(int)*length); + delete arg10; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + int length = (arg11)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); + memcpy(PyArray_DATA(obj),&((*(arg11))[0]),sizeof(int)*length); + delete arg11; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + int length = (arg12)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_CFLOAT); + memcpy(PyArray_DATA(obj),&((*(arg12))[0]),sizeof(npy_cfloat_wrapper)*length); + delete arg12; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_get_csr_submatrix__SWIG_6(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { + PyObject *resultobj = 0; + int arg1 ; + int arg2 ; + int *arg3 ; + int *arg4 ; + npy_cdouble_wrapper *arg5 ; + int arg6 ; + int arg7 ; + int arg8 ; + int arg9 ; + std::vector *arg10 = (std::vector *) 0 ; + std::vector *arg11 = (std::vector *) 0 ; + std::vector *arg12 = (std::vector *) 0 ; + int val1 ; + int ecode1 = 0 ; + int val2 ; + int ecode2 = 0 ; + PyArrayObject *array3 = NULL ; + int is_new_object3 ; + PyArrayObject *array4 = NULL ; + int is_new_object4 ; + PyArrayObject *array5 = NULL ; + int is_new_object5 ; + int val6 ; + int ecode6 = 0 ; + int val7 ; + int ecode7 = 0 ; + int val8 ; + int ecode8 = 0 ; + int val9 ; + int ecode9 = 0 ; + std::vector *tmp10 ; + std::vector *tmp11 ; + std::vector *tmp12 ; + PyObject * obj0 = 0 ; + PyObject * obj1 = 0 ; + PyObject * obj2 = 0 ; + PyObject * obj3 = 0 ; + PyObject * obj4 = 0 ; + PyObject * obj5 = 0 ; + PyObject * obj6 = 0 ; + PyObject * obj7 = 0 ; + PyObject * obj8 = 0 ; + + { + tmp10 = new std::vector(); + arg10 = tmp10; + } + { + tmp11 = new std::vector(); + arg11 = tmp11; + } + { + tmp12 = new std::vector(); + arg12 = tmp12; + } + if (!PyArg_ParseTuple(args,(char *)"OOOOOOOOO:get_csr_submatrix",&obj0,&obj1,&obj2,&obj3,&obj4,&obj5,&obj6,&obj7,&obj8)) SWIG_fail; + ecode1 = SWIG_AsVal_int(obj0, &val1); + if (!SWIG_IsOK(ecode1)) { + SWIG_exception_fail(SWIG_ArgError(ecode1), "in method '" "get_csr_submatrix" "', argument " "1"" of type '" "int""'"); + } + arg1 = static_cast< int >(val1); + ecode2 = SWIG_AsVal_int(obj1, &val2); + if (!SWIG_IsOK(ecode2)) { + SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "get_csr_submatrix" "', argument " "2"" of type '" "int""'"); + } + arg2 = static_cast< int >(val2); + { + npy_intp size[1] = { + -1 + }; + array3 = obj_to_array_contiguous_allow_conversion(obj2, PyArray_INT, &is_new_object3); + if (!array3 || !require_dimensions(array3,1) || !require_size(array3,size,1)) SWIG_fail; + arg3 = (int*) array3->data; + } + { + npy_intp size[1] = { + -1 + }; + array4 = obj_to_array_contiguous_allow_conversion(obj3, PyArray_INT, &is_new_object4); + if (!array4 || !require_dimensions(array4,1) || !require_size(array4,size,1)) SWIG_fail; + arg4 = (int*) array4->data; + } + { + npy_intp size[1] = { + -1 + }; + array5 = obj_to_array_contiguous_allow_conversion(obj4, PyArray_CDOUBLE, &is_new_object5); + if (!array5 || !require_dimensions(array5,1) || !require_size(array5,size,1)) SWIG_fail; + arg5 = (npy_cdouble_wrapper*) array5->data; + } + ecode6 = SWIG_AsVal_int(obj5, &val6); + if (!SWIG_IsOK(ecode6)) { + SWIG_exception_fail(SWIG_ArgError(ecode6), "in method '" "get_csr_submatrix" "', argument " "6"" of type '" "int""'"); + } + arg6 = static_cast< int >(val6); + ecode7 = SWIG_AsVal_int(obj6, &val7); + if (!SWIG_IsOK(ecode7)) { + SWIG_exception_fail(SWIG_ArgError(ecode7), "in method '" "get_csr_submatrix" "', argument " "7"" of type '" "int""'"); + } + arg7 = static_cast< int >(val7); + ecode8 = SWIG_AsVal_int(obj7, &val8); + if (!SWIG_IsOK(ecode8)) { + SWIG_exception_fail(SWIG_ArgError(ecode8), "in method '" "get_csr_submatrix" "', argument " "8"" of type '" "int""'"); + } + arg8 = static_cast< int >(val8); + ecode9 = SWIG_AsVal_int(obj8, &val9); + if (!SWIG_IsOK(ecode9)) { + SWIG_exception_fail(SWIG_ArgError(ecode9), "in method '" "get_csr_submatrix" "', argument " "9"" of type '" "int""'"); + } + arg9 = static_cast< int >(val9); + get_csr_submatrix(arg1,arg2,(int const (*))arg3,(int const (*))arg4,(npy_cdouble_wrapper const (*))arg5,arg6,arg7,arg8,arg9,arg10,arg11,arg12); + resultobj = SWIG_Py_Void(); + { + int length = (arg10)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); + memcpy(PyArray_DATA(obj),&((*(arg10))[0]),sizeof(int)*length); + delete arg10; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + int length = (arg11)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_INT); + memcpy(PyArray_DATA(obj),&((*(arg11))[0]),sizeof(int)*length); + delete arg11; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + int length = (arg12)->size(); + PyObject *obj = PyArray_FromDims(1, &length,PyArray_CDOUBLE); + memcpy(PyArray_DATA(obj),&((*(arg12))[0]),sizeof(npy_cdouble_wrapper)*length); + delete arg12; + resultobj = helper_appendToTuple( resultobj, (PyObject *)obj ); + } + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return resultobj; +fail: + { + if (is_new_object3 && array3) Py_DECREF(array3); + } + { + if (is_new_object4 && array4) Py_DECREF(array4); + } + { + if (is_new_object5 && array5) Py_DECREF(array5); + } + return NULL; +} + + +SWIGINTERN PyObject *_wrap_get_csr_submatrix(PyObject *self, PyObject *args) { + int argc; + PyObject *argv[10]; + int ii; + + if (!PyTuple_Check(args)) SWIG_fail; + argc = PyObject_Length(args); + for (ii = 0; (ii < argc) && (ii < 9); ii++) { + argv[ii] = PyTuple_GET_ITEM(args,ii); + } + if (argc == 9) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[5], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[6], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[7], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[8], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + return _wrap_get_csr_submatrix__SWIG_1(self, args); + } + } + } + } + } + } + } + } + } + } + if (argc == 9) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_LONG)) ? 1 : 0; + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[5], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[6], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[7], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[8], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + return _wrap_get_csr_submatrix__SWIG_2(self, args); + } + } + } + } + } + } + } + } + } + } + if (argc == 9) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_FLOAT)) ? 1 : 0; + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[5], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[6], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[7], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[8], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + return _wrap_get_csr_submatrix__SWIG_3(self, args); + } + } + } + } + } + } + } + } + } + } + if (argc == 9) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_DOUBLE)) ? 1 : 0; + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[5], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[6], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[7], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[8], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + return _wrap_get_csr_submatrix__SWIG_4(self, args); + } + } + } + } + } + } + } + } + } + } + if (argc == 9) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CFLOAT)) ? 1 : 0; + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[5], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[6], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[7], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[8], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + return _wrap_get_csr_submatrix__SWIG_5(self, args); + } + } + } + } + } + } + } + } + } + } + if (argc == 9) { + int _v; + { + int res = SWIG_AsVal_int(argv[0], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[1], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + _v = (is_array(argv[2]) && PyArray_CanCastSafely(PyArray_TYPE(argv[2]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[3]) && PyArray_CanCastSafely(PyArray_TYPE(argv[3]),PyArray_INT)) ? 1 : 0; + } + if (_v) { + { + _v = (is_array(argv[4]) && PyArray_CanCastSafely(PyArray_TYPE(argv[4]),PyArray_CDOUBLE)) ? 1 : 0; + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[5], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[6], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[7], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + { + int res = SWIG_AsVal_int(argv[8], NULL); + _v = SWIG_CheckState(res); + } + if (_v) { + return _wrap_get_csr_submatrix__SWIG_6(self, args); + } + } + } + } + } + } + } + } + } + } + +fail: + SWIG_SetErrorMsg(PyExc_NotImplementedError,"Wrong number of arguments for overloaded function 'get_csr_submatrix'.\n Possible C/C++ prototypes are:\n get_csr_submatrix<(int,int)>(int const,int const,int const [],int const [],int const [],int const,int const,int const,int const,std::vector *,std::vector *,std::vector *)\n get_csr_submatrix<(int,long)>(int const,int const,int const [],int const [],long const [],int const,int const,int const,int const,std::vector *,std::vector *,std::vector *)\n get_csr_submatrix<(int,float)>(int const,int const,int const [],int const [],float const [],int const,int const,int const,int const,std::vector *,std::vector *,std::vector *)\n get_csr_submatrix<(int,double)>(int const,int const,int const [],int const [],double const [],int const,int const,int const,int const,std::vector *,std::vector *,std::vector *)\n get_csr_submatrix<(int,npy_cfloat_wrapper)>(int const,int const,int const [],int const [],npy_cfloat_wrapper const [],int const,int const,int const,int const,std::vector *,std::vector *,std::vector *)\n get_csr_submatrix<(int,npy_cdouble_wrapper)>(int const,int const,int const [],int const [],npy_cdouble_wrapper const [],int const,int const,int const,int const,std::vector *,std::vector *,std::vector *)\n"); + return NULL; +} + + static PyMethodDef SwigMethods[] = { - { (char *)"extract_csr_diagonal", _wrap_extract_csr_diagonal, METH_VARARGS, (char *)"\n" - "extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, int Ax, std::vector<(int)> Yx)\n" - "extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, long Ax, std::vector<(long)> Yx)\n" - "extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, float Ax, std::vector<(float)> Yx)\n" - "extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, double Ax, std::vector<(double)> Yx)\n" - "extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, \n" - " std::vector<(npy_cfloat_wrapper)> Yx)\n" - "extract_csr_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, \n" - " std::vector<(npy_cdouble_wrapper)> Yx)\n" - ""}, - { (char *)"extract_csc_diagonal", _wrap_extract_csc_diagonal, METH_VARARGS, (char *)"\n" - "extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, int Ax, std::vector<(int)> Yx)\n" - "extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, long Ax, std::vector<(long)> Yx)\n" - "extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, float Ax, std::vector<(float)> Yx)\n" - "extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, double Ax, std::vector<(double)> Yx)\n" - "extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, \n" - " std::vector<(npy_cfloat_wrapper)> Yx)\n" - "extract_csc_diagonal(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, \n" - " std::vector<(npy_cdouble_wrapper)> Yx)\n" - ""}, - { (char *)"csrtocsc", _wrap_csrtocsc, METH_VARARGS, (char *)"\n" - "csrtocsc(int n_row, int n_col, int Ap, int Aj, int Ax, std::vector<(int)> Bp, \n" - " std::vector<(int)> Bi, std::vector<(int)> Bx)\n" - "csrtocsc(int n_row, int n_col, int Ap, int Aj, long Ax, std::vector<(int)> Bp, \n" - " std::vector<(int)> Bi, std::vector<(long)> Bx)\n" - "csrtocsc(int n_row, int n_col, int Ap, int Aj, float Ax, std::vector<(int)> Bp, \n" - " std::vector<(int)> Bi, std::vector<(float)> Bx)\n" - "csrtocsc(int n_row, int n_col, int Ap, int Aj, double Ax, std::vector<(int)> Bp, \n" - " std::vector<(int)> Bi, std::vector<(double)> Bx)\n" - "csrtocsc(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, \n" - " std::vector<(int)> Bp, std::vector<(int)> Bi, \n" - " std::vector<(npy_cfloat_wrapper)> Bx)\n" - "csrtocsc(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, \n" - " std::vector<(int)> Bp, std::vector<(int)> Bi, \n" - " std::vector<(npy_cdouble_wrapper)> Bx)\n" - ""}, - { (char *)"csctocsr", _wrap_csctocsr, METH_VARARGS, (char *)"\n" - "csctocsr(int n_row, int n_col, int Ap, int Ai, int Ax, std::vector<(int)> Bp, \n" - " std::vector<(int)> Bj, std::vector<(int)> Bx)\n" - "csctocsr(int n_row, int n_col, int Ap, int Ai, long Ax, std::vector<(int)> Bp, \n" - " std::vector<(int)> Bj, std::vector<(long)> Bx)\n" - "csctocsr(int n_row, int n_col, int Ap, int Ai, float Ax, std::vector<(int)> Bp, \n" - " std::vector<(int)> Bj, std::vector<(float)> Bx)\n" - "csctocsr(int n_row, int n_col, int Ap, int Ai, double Ax, std::vector<(int)> Bp, \n" - " std::vector<(int)> Bj, std::vector<(double)> Bx)\n" - "csctocsr(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, \n" - " std::vector<(int)> Bp, std::vector<(int)> Bj, \n" - " std::vector<(npy_cfloat_wrapper)> Bx)\n" - "csctocsr(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, \n" - " std::vector<(int)> Bp, std::vector<(int)> Bj, \n" - " std::vector<(npy_cdouble_wrapper)> Bx)\n" - ""}, - { (char *)"csrtocoo", _wrap_csrtocoo, METH_VARARGS, (char *)"\n" - "csrtocoo(int n_row, int n_col, int Ap, int Aj, int Ax, std::vector<(int)> Bi, \n" - " std::vector<(int)> Bj, std::vector<(int)> Bx)\n" - "csrtocoo(int n_row, int n_col, int Ap, int Aj, long Ax, std::vector<(int)> Bi, \n" - " std::vector<(int)> Bj, std::vector<(long)> Bx)\n" - "csrtocoo(int n_row, int n_col, int Ap, int Aj, float Ax, std::vector<(int)> Bi, \n" - " std::vector<(int)> Bj, std::vector<(float)> Bx)\n" - "csrtocoo(int n_row, int n_col, int Ap, int Aj, double Ax, std::vector<(int)> Bi, \n" - " std::vector<(int)> Bj, std::vector<(double)> Bx)\n" - "csrtocoo(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, \n" - " std::vector<(int)> Bi, std::vector<(int)> Bj, \n" - " std::vector<(npy_cfloat_wrapper)> Bx)\n" - "csrtocoo(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, \n" - " std::vector<(int)> Bi, std::vector<(int)> Bj, \n" - " std::vector<(npy_cdouble_wrapper)> Bx)\n" - ""}, - { (char *)"csctocoo", _wrap_csctocoo, METH_VARARGS, (char *)"\n" - "csctocoo(int n_row, int n_col, int Ap, int Ai, int Ax, std::vector<(int)> Bi, \n" - " std::vector<(int)> Bj, std::vector<(int)> Bx)\n" - "csctocoo(int n_row, int n_col, int Ap, int Ai, long Ax, std::vector<(int)> Bi, \n" - " std::vector<(int)> Bj, std::vector<(long)> Bx)\n" - "csctocoo(int n_row, int n_col, int Ap, int Ai, float Ax, std::vector<(int)> Bi, \n" - " std::vector<(int)> Bj, std::vector<(float)> Bx)\n" - "csctocoo(int n_row, int n_col, int Ap, int Ai, double Ax, std::vector<(int)> Bi, \n" - " std::vector<(int)> Bj, std::vector<(double)> Bx)\n" - "csctocoo(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, \n" - " std::vector<(int)> Bi, std::vector<(int)> Bj, \n" - " std::vector<(npy_cfloat_wrapper)> Bx)\n" - "csctocoo(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, \n" - " std::vector<(int)> Bi, std::vector<(int)> Bj, \n" - " std::vector<(npy_cdouble_wrapper)> Bx)\n" - ""}, - { (char *)"cootocsr", _wrap_cootocsr, METH_VARARGS, (char *)"\n" - "cootocsr(int n_row, int n_col, int NNZ, int Ai, int Aj, int Ax, \n" - " std::vector<(int)> Bp, std::vector<(int)> Bj, \n" - " std::vector<(int)> Bx)\n" - "cootocsr(int n_row, int n_col, int NNZ, int Ai, int Aj, long Ax, \n" - " std::vector<(int)> Bp, std::vector<(int)> Bj, \n" - " std::vector<(long)> Bx)\n" - "cootocsr(int n_row, int n_col, int NNZ, int Ai, int Aj, float Ax, \n" - " std::vector<(int)> Bp, std::vector<(int)> Bj, \n" - " std::vector<(float)> Bx)\n" - "cootocsr(int n_row, int n_col, int NNZ, int Ai, int Aj, double Ax, \n" - " std::vector<(int)> Bp, std::vector<(int)> Bj, \n" - " std::vector<(double)> Bx)\n" - "cootocsr(int n_row, int n_col, int NNZ, int Ai, int Aj, npy_cfloat_wrapper Ax, \n" - " std::vector<(int)> Bp, std::vector<(int)> Bj, \n" - " std::vector<(npy_cfloat_wrapper)> Bx)\n" - "cootocsr(int n_row, int n_col, int NNZ, int Ai, int Aj, npy_cdouble_wrapper Ax, \n" - " std::vector<(int)> Bp, std::vector<(int)> Bj, \n" - " std::vector<(npy_cdouble_wrapper)> Bx)\n" - ""}, - { (char *)"cootocsc", _wrap_cootocsc, METH_VARARGS, (char *)"\n" - "cootocsc(int n_row, int n_col, int NNZ, int Ai, int Aj, int Ax, \n" - " std::vector<(int)> Bp, std::vector<(int)> Bi, \n" - " std::vector<(int)> Bx)\n" - "cootocsc(int n_row, int n_col, int NNZ, int Ai, int Aj, long Ax, \n" - " std::vector<(int)> Bp, std::vector<(int)> Bi, \n" - " std::vector<(long)> Bx)\n" - "cootocsc(int n_row, int n_col, int NNZ, int Ai, int Aj, float Ax, \n" - " std::vector<(int)> Bp, std::vector<(int)> Bi, \n" - " std::vector<(float)> Bx)\n" - "cootocsc(int n_row, int n_col, int NNZ, int Ai, int Aj, double Ax, \n" - " std::vector<(int)> Bp, std::vector<(int)> Bi, \n" - " std::vector<(double)> Bx)\n" - "cootocsc(int n_row, int n_col, int NNZ, int Ai, int Aj, npy_cfloat_wrapper Ax, \n" - " std::vector<(int)> Bp, std::vector<(int)> Bi, \n" - " std::vector<(npy_cfloat_wrapper)> Bx)\n" - "cootocsc(int n_row, int n_col, int NNZ, int Ai, int Aj, npy_cdouble_wrapper Ax, \n" - " std::vector<(int)> Bp, std::vector<(int)> Bi, \n" - " std::vector<(npy_cdouble_wrapper)> Bx)\n" - ""}, - { (char *)"csrmucsr", _wrap_csrmucsr, METH_VARARGS, (char *)"\n" - "csrmucsr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, \n" - " int Bj, int Bx, std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(int)> Cx)\n" - "csrmucsr(int n_row, int n_col, int Ap, int Aj, long Ax, int Bp, \n" - " int Bj, long Bx, std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(long)> Cx)\n" - "csrmucsr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, \n" - " int Bj, float Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Cj, std::vector<(float)> Cx)\n" - "csrmucsr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, \n" - " int Bj, double Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Cj, std::vector<(double)> Cx)\n" - "csrmucsr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bj, npy_cfloat_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(npy_cfloat_wrapper)> Cx)\n" - "csrmucsr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bj, npy_cdouble_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(npy_cdouble_wrapper)> Cx)\n" - ""}, - { (char *)"cscmucsc", _wrap_cscmucsc, METH_VARARGS, (char *)"\n" - "cscmucsc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, \n" - " int Bi, int Bx, std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(int)> Cx)\n" - "cscmucsc(int n_row, int n_col, int Ap, int Ai, long Ax, int Bp, \n" - " int Bi, long Bx, std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(long)> Cx)\n" - "cscmucsc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, \n" - " int Bi, float Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Ci, std::vector<(float)> Cx)\n" - "cscmucsc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, \n" - " int Bi, double Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Ci, std::vector<(double)> Cx)\n" - "cscmucsc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bi, npy_cfloat_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(npy_cfloat_wrapper)> Cx)\n" - "cscmucsc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_cdouble_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(npy_cdouble_wrapper)> Cx)\n" - ""}, - { (char *)"csrmux", _wrap_csrmux, METH_VARARGS, (char *)"\n" - "csrmux(int n_row, int n_col, int Ap, int Aj, int Ax, int Xx, \n" - " std::vector<(int)> Yx)\n" - "csrmux(int n_row, int n_col, int Ap, int Aj, long Ax, long Xx, \n" - " std::vector<(long)> Yx)\n" - "csrmux(int n_row, int n_col, int Ap, int Aj, float Ax, float Xx, \n" - " std::vector<(float)> Yx)\n" - "csrmux(int n_row, int n_col, int Ap, int Aj, double Ax, double Xx, \n" - " std::vector<(double)> Yx)\n" - "csrmux(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, \n" - " npy_cfloat_wrapper Xx, std::vector<(npy_cfloat_wrapper)> Yx)\n" - "csrmux(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, \n" - " npy_cdouble_wrapper Xx, std::vector<(npy_cdouble_wrapper)> Yx)\n" - ""}, - { (char *)"cscmux", _wrap_cscmux, METH_VARARGS, (char *)"\n" - "cscmux(int n_row, int n_col, int Ap, int Ai, int Ax, int Xx, \n" - " std::vector<(int)> Yx)\n" - "cscmux(int n_row, int n_col, int Ap, int Ai, long Ax, long Xx, \n" - " std::vector<(long)> Yx)\n" - "cscmux(int n_row, int n_col, int Ap, int Ai, float Ax, float Xx, \n" - " std::vector<(float)> Yx)\n" - "cscmux(int n_row, int n_col, int Ap, int Ai, double Ax, double Xx, \n" - " std::vector<(double)> Yx)\n" - "cscmux(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, \n" - " npy_cfloat_wrapper Xx, std::vector<(npy_cfloat_wrapper)> Yx)\n" - "cscmux(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, \n" - " npy_cdouble_wrapper Xx, std::vector<(npy_cdouble_wrapper)> Yx)\n" - ""}, - { (char *)"csr_elmul_csr", _wrap_csr_elmul_csr, METH_VARARGS, (char *)"\n" - "csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, \n" - " int Bj, int Bx, std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(int)> Cx)\n" - "csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, long Ax, int Bp, \n" - " int Bj, long Bx, std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(long)> Cx)\n" - "csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, \n" - " int Bj, float Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Cj, std::vector<(float)> Cx)\n" - "csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, \n" - " int Bj, double Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Cj, std::vector<(double)> Cx)\n" - "csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bj, npy_cfloat_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(npy_cfloat_wrapper)> Cx)\n" - "csr_elmul_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bj, npy_cdouble_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(npy_cdouble_wrapper)> Cx)\n" - ""}, - { (char *)"csr_eldiv_csr", _wrap_csr_eldiv_csr, METH_VARARGS, (char *)"\n" - "csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, \n" - " int Bj, int Bx, std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(int)> Cx)\n" - "csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, long Ax, int Bp, \n" - " int Bj, long Bx, std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(long)> Cx)\n" - "csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, \n" - " int Bj, float Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Cj, std::vector<(float)> Cx)\n" - "csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, \n" - " int Bj, double Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Cj, std::vector<(double)> Cx)\n" - "csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bj, npy_cfloat_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(npy_cfloat_wrapper)> Cx)\n" - "csr_eldiv_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bj, npy_cdouble_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(npy_cdouble_wrapper)> Cx)\n" - ""}, - { (char *)"csr_plus_csr", _wrap_csr_plus_csr, METH_VARARGS, (char *)"\n" - "csr_plus_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, \n" - " int Bj, int Bx, std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(int)> Cx)\n" - "csr_plus_csr(int n_row, int n_col, int Ap, int Aj, long Ax, int Bp, \n" - " int Bj, long Bx, std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(long)> Cx)\n" - "csr_plus_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, \n" - " int Bj, float Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Cj, std::vector<(float)> Cx)\n" - "csr_plus_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, \n" - " int Bj, double Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Cj, std::vector<(double)> Cx)\n" - "csr_plus_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bj, npy_cfloat_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(npy_cfloat_wrapper)> Cx)\n" - "csr_plus_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bj, npy_cdouble_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(npy_cdouble_wrapper)> Cx)\n" - ""}, - { (char *)"csr_minus_csr", _wrap_csr_minus_csr, METH_VARARGS, (char *)"\n" - "csr_minus_csr(int n_row, int n_col, int Ap, int Aj, int Ax, int Bp, \n" - " int Bj, int Bx, std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(int)> Cx)\n" - "csr_minus_csr(int n_row, int n_col, int Ap, int Aj, long Ax, int Bp, \n" - " int Bj, long Bx, std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(long)> Cx)\n" - "csr_minus_csr(int n_row, int n_col, int Ap, int Aj, float Ax, int Bp, \n" - " int Bj, float Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Cj, std::vector<(float)> Cx)\n" - "csr_minus_csr(int n_row, int n_col, int Ap, int Aj, double Ax, int Bp, \n" - " int Bj, double Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Cj, std::vector<(double)> Cx)\n" - "csr_minus_csr(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bj, npy_cfloat_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(npy_cfloat_wrapper)> Cx)\n" - "csr_minus_csr(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bj, npy_cdouble_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Cj, \n" - " std::vector<(npy_cdouble_wrapper)> Cx)\n" - ""}, - { (char *)"csc_elmul_csc", _wrap_csc_elmul_csc, METH_VARARGS, (char *)"\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, \n" - " int Bi, int Bx, std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(int)> Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, long Ax, int Bp, \n" - " int Bi, long Bx, std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(long)> Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, \n" - " int Bi, float Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Ci, std::vector<(float)> Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, \n" - " int Bi, double Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Ci, std::vector<(double)> Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bi, npy_cfloat_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(npy_cfloat_wrapper)> Cx)\n" - "csc_elmul_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_cdouble_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(npy_cdouble_wrapper)> Cx)\n" - ""}, - { (char *)"csc_eldiv_csc", _wrap_csc_eldiv_csc, METH_VARARGS, (char *)"\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, \n" - " int Bi, int Bx, std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(int)> Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, long Ax, int Bp, \n" - " int Bi, long Bx, std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(long)> Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, \n" - " int Bi, float Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Ci, std::vector<(float)> Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, \n" - " int Bi, double Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Ci, std::vector<(double)> Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bi, npy_cfloat_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(npy_cfloat_wrapper)> Cx)\n" - "csc_eldiv_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_cdouble_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(npy_cdouble_wrapper)> Cx)\n" - ""}, - { (char *)"csc_plus_csc", _wrap_csc_plus_csc, METH_VARARGS, (char *)"\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, \n" - " int Bi, int Bx, std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(int)> Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, long Ax, int Bp, \n" - " int Bi, long Bx, std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(long)> Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, \n" - " int Bi, float Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Ci, std::vector<(float)> Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, \n" - " int Bi, double Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Ci, std::vector<(double)> Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bi, npy_cfloat_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(npy_cfloat_wrapper)> Cx)\n" - "csc_plus_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_cdouble_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(npy_cdouble_wrapper)> Cx)\n" - ""}, - { (char *)"csc_minus_csc", _wrap_csc_minus_csc, METH_VARARGS, (char *)"\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, int Ax, int Bp, \n" - " int Bi, int Bx, std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(int)> Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, long Ax, int Bp, \n" - " int Bi, long Bx, std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(long)> Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, float Ax, int Bp, \n" - " int Bi, float Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Ci, std::vector<(float)> Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, double Ax, int Bp, \n" - " int Bi, double Bx, std::vector<(int)> Cp, \n" - " std::vector<(int)> Ci, std::vector<(double)> Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax, \n" - " int Bp, int Bi, npy_cfloat_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(npy_cfloat_wrapper)> Cx)\n" - "csc_minus_csc(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax, \n" - " int Bp, int Bi, npy_cdouble_wrapper Bx, \n" - " std::vector<(int)> Cp, std::vector<(int)> Ci, \n" - " std::vector<(npy_cdouble_wrapper)> Cx)\n" - ""}, - { (char *)"spdiags", _wrap_spdiags, METH_VARARGS, (char *)"\n" - "spdiags(int n_row, int n_col, int n_diag, int offsets, int diags, \n" - " std::vector<(int)> Ap, std::vector<(int)> Ai, \n" - " std::vector<(int)> Ax)\n" - "spdiags(int n_row, int n_col, int n_diag, int offsets, long diags, \n" - " std::vector<(int)> Ap, std::vector<(int)> Ai, \n" - " std::vector<(long)> Ax)\n" - "spdiags(int n_row, int n_col, int n_diag, int offsets, float diags, \n" - " std::vector<(int)> Ap, std::vector<(int)> Ai, \n" - " std::vector<(float)> Ax)\n" - "spdiags(int n_row, int n_col, int n_diag, int offsets, double diags, \n" - " std::vector<(int)> Ap, std::vector<(int)> Ai, \n" - " std::vector<(double)> Ax)\n" - "spdiags(int n_row, int n_col, int n_diag, int offsets, npy_cfloat_wrapper diags, \n" - " std::vector<(int)> Ap, \n" - " std::vector<(int)> Ai, std::vector<(npy_cfloat_wrapper)> Ax)\n" - "spdiags(int n_row, int n_col, int n_diag, int offsets, npy_cdouble_wrapper diags, \n" - " std::vector<(int)> Ap, \n" - " std::vector<(int)> Ai, std::vector<(npy_cdouble_wrapper)> Ax)\n" - ""}, - { (char *)"csrtodense", _wrap_csrtodense, METH_VARARGS, (char *)"\n" - "csrtodense(int n_row, int n_col, int Ap, int Aj, int Ax, int Mx)\n" - "csrtodense(int n_row, int n_col, int Ap, int Aj, long Ax, long Mx)\n" - "csrtodense(int n_row, int n_col, int Ap, int Aj, float Ax, float Mx)\n" - "csrtodense(int n_row, int n_col, int Ap, int Aj, double Ax, double Mx)\n" - "csrtodense(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax, \n" - " npy_cfloat_wrapper Mx)\n" - "csrtodense(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax, \n" - " npy_cdouble_wrapper Mx)\n" - ""}, - { (char *)"densetocsr", _wrap_densetocsr, METH_VARARGS, (char *)"\n" - "densetocsr(int n_row, int n_col, int Mx, std::vector<(int)> Ap, \n" - " std::vector<(int)> Aj, std::vector<(int)> Ax)\n" - "densetocsr(int n_row, int n_col, long Mx, std::vector<(int)> Ap, \n" - " std::vector<(int)> Aj, std::vector<(long)> Ax)\n" - "densetocsr(int n_row, int n_col, float Mx, std::vector<(int)> Ap, \n" - " std::vector<(int)> Aj, std::vector<(float)> Ax)\n" - "densetocsr(int n_row, int n_col, double Mx, std::vector<(int)> Ap, \n" - " std::vector<(int)> Aj, std::vector<(double)> Ax)\n" - "densetocsr(int n_row, int n_col, npy_cfloat_wrapper Mx, std::vector<(int)> Ap, \n" - " std::vector<(int)> Aj, std::vector<(npy_cfloat_wrapper)> Ax)\n" - "densetocsr(int n_row, int n_col, npy_cdouble_wrapper Mx, std::vector<(int)> Ap, \n" - " std::vector<(int)> Aj, std::vector<(npy_cdouble_wrapper)> Ax)\n" - ""}, - { (char *)"sort_csr_indices", _wrap_sort_csr_indices, METH_VARARGS, (char *)"\n" - "sort_csr_indices(int n_row, int n_col, int Ap, int Aj, int Ax)\n" - "sort_csr_indices(int n_row, int n_col, int Ap, int Aj, long Ax)\n" - "sort_csr_indices(int n_row, int n_col, int Ap, int Aj, float Ax)\n" - "sort_csr_indices(int n_row, int n_col, int Ap, int Aj, double Ax)\n" - "sort_csr_indices(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax)\n" - "sort_csr_indices(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax)\n" - ""}, - { (char *)"sort_csc_indices", _wrap_sort_csc_indices, METH_VARARGS, (char *)"\n" - "sort_csc_indices(int n_row, int n_col, int Ap, int Ai, int Ax)\n" - "sort_csc_indices(int n_row, int n_col, int Ap, int Ai, long Ax)\n" - "sort_csc_indices(int n_row, int n_col, int Ap, int Ai, float Ax)\n" - "sort_csc_indices(int n_row, int n_col, int Ap, int Ai, double Ax)\n" - "sort_csc_indices(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax)\n" - "sort_csc_indices(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax)\n" - ""}, - { (char *)"sum_csr_duplicates", _wrap_sum_csr_duplicates, METH_VARARGS, (char *)"\n" - "sum_csr_duplicates(int n_row, int n_col, int Ap, int Aj, int Ax)\n" - "sum_csr_duplicates(int n_row, int n_col, int Ap, int Aj, long Ax)\n" - "sum_csr_duplicates(int n_row, int n_col, int Ap, int Aj, float Ax)\n" - "sum_csr_duplicates(int n_row, int n_col, int Ap, int Aj, double Ax)\n" - "sum_csr_duplicates(int n_row, int n_col, int Ap, int Aj, npy_cfloat_wrapper Ax)\n" - "sum_csr_duplicates(int n_row, int n_col, int Ap, int Aj, npy_cdouble_wrapper Ax)\n" - ""}, - { (char *)"sum_csc_duplicates", _wrap_sum_csc_duplicates, METH_VARARGS, (char *)"\n" - "sum_csc_duplicates(int n_row, int n_col, int Ap, int Ai, int Ax)\n" - "sum_csc_duplicates(int n_row, int n_col, int Ap, int Ai, long Ax)\n" - "sum_csc_duplicates(int n_row, int n_col, int Ap, int Ai, float Ax)\n" - "sum_csc_duplicates(int n_row, int n_col, int Ap, int Ai, double Ax)\n" - "sum_csc_duplicates(int n_row, int n_col, int Ap, int Ai, npy_cfloat_wrapper Ax)\n" - "sum_csc_duplicates(int n_row, int n_col, int Ap, int Ai, npy_cdouble_wrapper Ax)\n" - ""}, + { (char *)"extract_csr_diagonal", _wrap_extract_csr_diagonal, METH_VARARGS, NULL}, + { (char *)"extract_csc_diagonal", _wrap_extract_csc_diagonal, METH_VARARGS, NULL}, + { (char *)"csrtocsc", _wrap_csrtocsc, METH_VARARGS, NULL}, + { (char *)"csctocsr", _wrap_csctocsr, METH_VARARGS, NULL}, + { (char *)"csrtocoo", _wrap_csrtocoo, METH_VARARGS, NULL}, + { (char *)"csctocoo", _wrap_csctocoo, METH_VARARGS, NULL}, + { (char *)"cootocsr", _wrap_cootocsr, METH_VARARGS, NULL}, + { (char *)"cootocsc", _wrap_cootocsc, METH_VARARGS, NULL}, + { (char *)"csrmucsr", _wrap_csrmucsr, METH_VARARGS, NULL}, + { (char *)"cscmucsc", _wrap_cscmucsc, METH_VARARGS, NULL}, + { (char *)"csrmux", _wrap_csrmux, METH_VARARGS, NULL}, + { (char *)"cscmux", _wrap_cscmux, METH_VARARGS, NULL}, + { (char *)"csr_elmul_csr", _wrap_csr_elmul_csr, METH_VARARGS, NULL}, + { (char *)"csr_eldiv_csr", _wrap_csr_eldiv_csr, METH_VARARGS, NULL}, + { (char *)"csr_plus_csr", _wrap_csr_plus_csr, METH_VARARGS, NULL}, + { (char *)"csr_minus_csr", _wrap_csr_minus_csr, METH_VARARGS, NULL}, + { (char *)"csc_elmul_csc", _wrap_csc_elmul_csc, METH_VARARGS, NULL}, + { (char *)"csc_eldiv_csc", _wrap_csc_eldiv_csc, METH_VARARGS, NULL}, + { (char *)"csc_plus_csc", _wrap_csc_plus_csc, METH_VARARGS, NULL}, + { (char *)"csc_minus_csc", _wrap_csc_minus_csc, METH_VARARGS, NULL}, + { (char *)"spdiags", _wrap_spdiags, METH_VARARGS, NULL}, + { (char *)"csrtodense", _wrap_csrtodense, METH_VARARGS, NULL}, + { (char *)"densetocsr", _wrap_densetocsr, METH_VARARGS, NULL}, + { (char *)"sort_csr_indices", _wrap_sort_csr_indices, METH_VARARGS, NULL}, + { (char *)"sort_csc_indices", _wrap_sort_csc_indices, METH_VARARGS, NULL}, + { (char *)"sum_csr_duplicates", _wrap_sum_csr_duplicates, METH_VARARGS, NULL}, + { (char *)"sum_csc_duplicates", _wrap_sum_csc_duplicates, METH_VARARGS, NULL}, + { (char *)"get_csr_submatrix", _wrap_get_csr_submatrix, METH_VARARGS, NULL}, { NULL, NULL, 0, NULL } }; Modified: trunk/scipy/sparse/tests/test_sparse.py =================================================================== --- trunk/scipy/sparse/tests/test_sparse.py 2007-09-04 22:08:22 UTC (rev 3301) +++ trunk/scipy/sparse/tests/test_sparse.py 2007-09-05 15:59:21 UTC (rev 3302) @@ -641,6 +641,19 @@ for ic in range( asp.shape[1] ): assert_equal( asp[ir, ic], bsp[ir, ic] ) + def check_get_submatrix(self): + a = sp.csr_matrix( array([[1,2,3],[1,2,3],[0,2,0]]) ) + i0 = slice( 0, 2 ) + i1 = slice( 1, 3 ) + b = a.get_submatrix( i0, i1 ) + + aa = a.toarray() + ab = b.toarray() + + assert b.dtype == a.dtype + assert b.shape == (2,2) + assert_equal( ab, aa[i0,i1] ) + class test_csc(_test_cs, _test_vert_slicing, _test_arith, NumpyTestCase): spmatrix = csc_matrix From scipy-svn at scipy.org Wed Sep 5 13:37:14 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 5 Sep 2007 12:37:14 -0500 (CDT) Subject: [Scipy-svn] r3303 - in trunk/scipy/sparse: . tests Message-ID: <20070905173714.15A3639C25A@new.scipy.org> Author: wnbell Date: 2007-09-05 12:37:09 -0500 (Wed, 05 Sep 2007) New Revision: 3303 Modified: trunk/scipy/sparse/sparse.py trunk/scipy/sparse/tests/test_sparse.py Log: added tolil() to spmatrix fixed typo in test_sparse Modified: trunk/scipy/sparse/sparse.py =================================================================== --- trunk/scipy/sparse/sparse.py 2007-09-05 15:59:21 UTC (rev 3302) +++ trunk/scipy/sparse/sparse.py 2007-09-05 17:37:09 UTC (rev 3303) @@ -419,6 +419,9 @@ csc = self.tocsc() return csc.tocoo() + def tolil(self): + return lil_matrix(self.tocsr()) + def toself(self, copy=False): if copy: new = self.copy() Modified: trunk/scipy/sparse/tests/test_sparse.py =================================================================== --- trunk/scipy/sparse/tests/test_sparse.py 2007-09-05 15:59:21 UTC (rev 3302) +++ trunk/scipy/sparse/tests/test_sparse.py 2007-09-05 17:37:09 UTC (rev 3303) @@ -642,7 +642,7 @@ assert_equal( asp[ir, ic], bsp[ir, ic] ) def check_get_submatrix(self): - a = sp.csr_matrix( array([[1,2,3],[1,2,3],[0,2,0]]) ) + a = csr_matrix( array([[1,2,3],[1,2,3],[0,2,0]]) ) i0 = slice( 0, 2 ) i1 = slice( 1, 3 ) b = a.get_submatrix( i0, i1 ) From scipy-svn at scipy.org Wed Sep 5 20:46:27 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 5 Sep 2007 19:46:27 -0500 (CDT) Subject: [Scipy-svn] r3304 - in trunk/scipy/sandbox/maskedarray: . tests Message-ID: <20070906004627.A751E39C115@new.scipy.org> Author: pierregm Date: 2007-09-05 19:46:24 -0500 (Wed, 05 Sep 2007) New Revision: 3304 Modified: trunk/scipy/sandbox/maskedarray/core.py trunk/scipy/sandbox/maskedarray/tests/test_core.py Log: maskedarray core : fixed setting of ._fill_value in __new__ core : fixed the fill_value in getitem Modified: trunk/scipy/sandbox/maskedarray/core.py =================================================================== --- trunk/scipy/sandbox/maskedarray/core.py 2007-09-05 17:37:09 UTC (rev 3303) +++ trunk/scipy/sandbox/maskedarray/core.py 2007-09-06 00:46:24 UTC (rev 3304) @@ -31,7 +31,7 @@ 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'ceil', 'choose', 'compressed', 'concatenate', 'conjugate', 'cos', 'cosh', 'count', - 'diagonal', 'divide', 'dump', 'dumps', + 'default_fill_value', 'diagonal', 'divide', 'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp', 'fabs', 'fmod', 'filled', 'floor', 'floor_divide', 'getmask', 'getmaskarray', 'greater', 'greater_equal', 'hypot', @@ -111,12 +111,12 @@ 'V' : '???', } max_filler = ntypes._minvals -max_filler.update([(k,-numeric.inf) for k in [numpy.float32, numpy.float64]]) +max_filler.update([(k,-numpy.inf) for k in [numpy.float32, numpy.float64]]) min_filler = ntypes._maxvals -min_filler.update([(k,numeric.inf) for k in [numpy.float32, numpy.float64]]) +min_filler.update([(k,numpy.inf) for k in [numpy.float32, numpy.float64]]) if 'float128' in ntypes.typeDict: - max_filler.update([(numpy.float128,-numeric.inf)]) - min_filler.update([(numpy.float128, numeric.inf)]) + max_filler.update([(numpy.float128,-numpy.inf)]) + min_filler.update([(numpy.float128, numpy.inf)]) def default_fill_value(obj): @@ -237,6 +237,21 @@ rcls = cls return rcls +def get_data(a, copy=False, subok=True): + """Return the ._data part of a (if any), or a as a ndarray.""" + if hasattr(a,'_data'): + if copy: + if subok: + return a._data.copy() + return a._data.view(ndarray).copy() + elif subok: + return a._data + return a._data.view(ndarray) + return numpy.ndarray(a, copy=copy, subok=subok) + + + + #####-------------------------------------------------------------------------- #---- --- Ufuncs --- #####-------------------------------------------------------------------------- @@ -1033,9 +1048,11 @@ # Update fill_value....... - _data._fill_value = getattr(data, '_fill_value', fill_value) - if _data._fill_value is None: - _data._fill_value = default_fill_value(_data) + if fill_value is None: + _data._fill_value = getattr(data, '_fill_value', + default_fill_value(_data)) + else: + _data._fill_value = fill_value # Process extra options .. _data._hardmask = hard_mask _data._smallmask = small_mask @@ -1115,6 +1132,8 @@ # Not a scalar: make sure that dout is a MA dout = dout.view(type(self)) dout._smallmask = self._smallmask + dout._hardmask = self._hardmask + dout._fill_value = self._fill_value if m is not nomask: # use _set_mask to take care of the shape dout.__setmask__(m[indx]) @@ -1275,7 +1294,8 @@ else: result = self._data.copy() try: - result[m] = fill_value + numpy.putmask(result, m, fill_value) + #result[m] = fill_value except (TypeError, AttributeError): fill_value = numeric.array(fill_value, dtype=object) d = result.astype(object) @@ -2661,7 +2681,7 @@ if __name__ == '__main__': from testutils import assert_equal, assert_almost_equal - if 1: + if 0: x = arange(10) assert(x.ctypes.data == x.filled().ctypes.data) if 0: @@ -2680,7 +2700,7 @@ x = array([0,0], mask=0) (I,J) = (x.ctypes.data, x.filled().ctypes.data) print (I,J) - if 1: + if 0: x = array(numpy.arange(12)) x[[1,-2]] = masked xlist = x.tolist() @@ -2694,5 +2714,34 @@ assert_equal(xlist[1],[4,5,6,7]) assert_equal(xlist[2],[8,9,None,11]) + if 0: + xl = numpy.random.rand(100,100) + yl = numpy.random.rand(100,100) + maskx = xl > 0.8 + masky = yl < 0.2 + mxl = array(xl, mask=maskx) + myl = array(yl, mask=masky) - \ No newline at end of file + zz = mxl + myl + + if 0: + print "x is ndarray" + x = array(numpy.random.rand(50,50)) + print "set x._mask" + x[x > 0.8] = masked + print "set y" + y = array(numpy.random.rand(50,50)) + print "set y._mask" + ymask = y._data < 0.2 + print "set y._mask" + y.__setmask__(ymask) + print "add x + y" + z = x + y + + r.__setmask__() + + if 1: + "Check that we don't lose the fill_value" + data = masked_array([1,2,3],fill_value=-999) + series = data[[0,2,1]] + assert_equal(series._fill_value, data._fill_value) Modified: trunk/scipy/sandbox/maskedarray/tests/test_core.py =================================================================== --- trunk/scipy/sandbox/maskedarray/tests/test_core.py 2007-09-05 17:37:09 UTC (rev 3303) +++ trunk/scipy/sandbox/maskedarray/tests/test_core.py 2007-09-06 00:46:24 UTC (rev 3304) @@ -728,6 +728,12 @@ assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled, a) assert(isinstance(a_pickled._data,numpy.matrix)) + # + def check_fillvalue(self): + "Check that we don't lose the fill_value" + data = masked_array([1,2,3],fill_value=-999) + series = data[[0,2,1]] + assert_equal(series._fill_value, data._fill_value) #............................................................................... From scipy-svn at scipy.org Wed Sep 5 21:13:35 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 5 Sep 2007 20:13:35 -0500 (CDT) Subject: [Scipy-svn] r3305 - in trunk/scipy/sandbox/timeseries: . tests Message-ID: <20070906011335.00BAB39C017@new.scipy.org> Author: pierregm Date: 2007-09-05 20:13:29 -0500 (Wed, 05 Sep 2007) New Revision: 3305 Added: trunk/scipy/sandbox/timeseries/tests/test_extras.py trunk/scipy/sandbox/timeseries/textras.py Modified: trunk/scipy/sandbox/timeseries/tdates.py trunk/scipy/sandbox/timeseries/tests/test_dates.py trunk/scipy/sandbox/timeseries/tests/test_multitimeseries.py trunk/scipy/sandbox/timeseries/tests/test_timeseries.py trunk/scipy/sandbox/timeseries/tmulti.py trunk/scipy/sandbox/timeseries/tseries.py Log: timeseries: tseries : * simplified TimeSeries.__new__ to only accept DateArrays as dates. To create a new TimeSeries object, use time_series extras : introducing isleapyear, count_missing, accept_atmost_missing Modified: trunk/scipy/sandbox/timeseries/tdates.py =================================================================== --- trunk/scipy/sandbox/timeseries/tdates.py 2007-09-06 00:46:24 UTC (rev 3304) +++ trunk/scipy/sandbox/timeseries/tdates.py 2007-09-06 01:13:29 UTC (rev 3305) @@ -214,6 +214,7 @@ def __getitem__(self, indx): reset_full = True + # Determine what kind of index is used if isinstance(indx, Date): indx = self.find_dates(indx) reset_full = False @@ -222,7 +223,12 @@ indx = self.find_dates(indx) except AttributeError: pass + # Select the data r = ndarray.__getitem__(self, indx) + # Select the corresponding unsorted indices (if needed) + if self._unsorted is not None: + unsorted = self._unsorted[indx] + # Case 1. A simple integer if isinstance(r, (generic, int)): return Date(self.freq, value=r) elif hasattr(r, 'size') and r.size == 1: @@ -679,41 +685,42 @@ if __name__ == '__main__': import maskedarray.testutils from maskedarray.testutils import assert_equal - if 0: - dlist = ['2007-%02i' % i for i in range(1,5)+range(7,13)] - mdates = date_array_fromlist(dlist, 'M') - # Using an integer - assert_equal(mdates[0].value, 24073) - assert_equal(mdates[-1].value, 24084) - # Using a date - lag = mdates.find_dates(mdates[0]) - print mdates[lag] - assert_equal(mdates[lag], mdates[0]) - if 0: - hodie = today('D') - D = DateArray(today('D')) - assert_equal(D.freq, 6000) - if 0: - freqs = [x[0] for x in corelib.freq_dict.values() if x[0] != 'U'] - print freqs - for f in freqs: - print f - today = thisday(f) - assert(Date(freq=f, value=today.value) == today) - if 0: - D = date_array(freq='U', start_date=Date('U',1), length=10) - if 0: - dlist = ['2007-01-%02i' % i for i in (1,2,4,5,7,8,10,11,13)] - ords = numpy.fromiter((DateTimeFromString(s).toordinal() for s in dlist), - float_) - if 0: - "Tests the automatic sorting of dates." - D = date_array_fromlist(dlist=['2006-01','2005-01','2004-01'],freq='M') - assert_equal(D.view(ndarray), [24037, 24049, 24061]) +# if 0: +# dlist = ['2007-%02i' % i for i in range(1,5)+range(7,13)] +# mdates = date_array_fromlist(dlist, 'M') +# # Using an integer +# assert_equal(mdates[0].value, 24073) +# assert_equal(mdates[-1].value, 24084) +# # Using a date +# lag = mdates.find_dates(mdates[0]) +# print mdates[lag] +# assert_equal(mdates[lag], mdates[0]) +# if 0: +# hodie = today('D') +# D = DateArray(today('D')) +# assert_equal(D.freq, 6000) +# if 0: +# freqs = [x[0] for x in corelib.freq_dict.values() if x[0] != 'U'] +# print freqs +# for f in freqs: +# print f +# today = thisday(f) +# assert(Date(freq=f, value=today.value) == today) +# if 0: +# D = date_array(freq='U', start_date=Date('U',1), length=10) +# if 0: +# dlist = ['2007-01-%02i' % i for i in (1,2,4,5,7,8,10,11,13)] +# ords = numpy.fromiter((DateTimeFromString(s).toordinal() for s in dlist), +# float_) +# if 0: +# "Tests the automatic sorting of dates." +# D = date_array_fromlist(dlist=['2006-01','2005-01','2004-01'],freq='M') +# assert_equal(D.view(ndarray), [24037, 24049, 24061]) if 1: dlist = ['2007-%02i' % i for i in range(1,5)+range(7,13)] mdates = date_array_fromlist(dlist, 'M') - print mdates.tostr() - \ No newline at end of file + if 2: + dlist = ['2007-01','2007-03','2007-04','2007-02'] + mdates = date_array_fromlist(dlist, 'M') Modified: trunk/scipy/sandbox/timeseries/tests/test_dates.py =================================================================== --- trunk/scipy/sandbox/timeseries/tests/test_dates.py 2007-09-06 00:46:24 UTC (rev 3304) +++ trunk/scipy/sandbox/timeseries/tests/test_dates.py 2007-09-06 01:13:29 UTC (rev 3305) @@ -28,7 +28,8 @@ import timeseries as ts from timeseries import const as C from timeseries.parser import DateFromString, DateTimeFromString -from timeseries import * +from timeseries import Date, DateArray,\ + thisday, today, date_array, date_array_fromlist from timeseries.cseries import freq_dict Added: trunk/scipy/sandbox/timeseries/tests/test_extras.py =================================================================== --- trunk/scipy/sandbox/timeseries/tests/test_extras.py 2007-09-06 00:46:24 UTC (rev 3304) +++ trunk/scipy/sandbox/timeseries/tests/test_extras.py 2007-09-06 01:13:29 UTC (rev 3305) @@ -0,0 +1,84 @@ +# pylint: disable-msg=W0611, W0612, W0511,R0201 +"""Tests suite for MaskedArray. +Adapted from the original test_ma by Pierre Gerard-Marchant + +:author: Pierre Gerard-Marchant & Matt Knox +:contact: pierregm_at_uga_dot_edu & mattknox_ca_at_hotmail_dot_com +:version: $Id$ +""" +__author__ = "Pierre GF Gerard-Marchant & Matt Knox ($Author$)" +__version__ = '1.0' +__revision__ = "$Revision$" +__date__ = '$Date$' + + +import numpy +from numpy.testing import NumpyTest, NumpyTestCase +import maskedarray +from maskedarray import masked +from maskedarray.testutils import assert_equal, assert_almost_equal + +from timeseries import time_series, Date +from timeseries import extras +from timeseries.extras import * + +#.............................................................................. +class test_misc(NumpyTestCase): + "Base test class for MaskedArrays." + def __init__(self, *args, **kwds): + NumpyTestCase.__init__(self, *args, **kwds) + # + def test_leapyear(self): + leap = isleapyear([1900,1901,1902,1903,1904,2000,2001,2002,2003,2004]) + assert_equal(leap, [0,0,0,0,1,1,0,0,0,1]) + +#.............................................................................. +class test_countmissing(NumpyTestCase): + # + def __init__(self, *args, **kwds): + NumpyTestCase.__init__(self, *args, **kwds) + data = time_series(numpy.arange(731), + start_date=Date(string='2003-01-01', freq='D'), + freq='D') + self.data = data + + def test_count_missing(self): + data = self.data + assert_equal(count_missing(data), 0) + assert_equal(count_missing(data.convert('A')), (0,0)) + assert_equal(count_missing(data.convert('M')), [0]*24) + # + series = data.copy() + series[numpy.logical_not(data.day % 10)] = masked + assert_equal(count_missing(series), 70) + assert_equal(count_missing(series.convert('A')), (35,35)) + assert_equal(count_missing(series.convert('M')), + [3,2,3,3,3,3,3,3,3,3,3,3]*2) + # + series[series.day == 31] = masked + assert_equal(count_missing(series), 84) + assert_equal(count_missing(series.convert('A')), (42,42)) + assert_equal(count_missing(series.convert('M')), + [4,2,4,3,4,3,4,4,3,4,3,4]*2) + # + def test_accept_atmost_missing(self): + series = self.data.copy() + series[numpy.logical_not(self.data.day % 10)] = masked + result = accept_atmost_missing(series.convert('M'),3,True) + assert_equal(result._mask.all(-1), [0]*24) + result = accept_atmost_missing(series.convert('M'),3,False) + assert_equal(result._mask.all(-1), [1,0,1,1,1,1,1,1,1,1,1,1]*2) + result = accept_atmost_missing(series.convert('M'),0.1,True) + assert_equal(result._mask.all(-1), [0]*24) + result = accept_atmost_missing(series.convert('A'),35,True) + assert_equal(result._mask.all(-1), [0,0]) + result = accept_atmost_missing(series.convert('A'),35,False) + assert_equal(result._mask.all(-1), [1,1]) + result = accept_atmost_missing(series.convert('A'),0.05,True) + assert_equal(result._mask.all(-1), [1,1]) + + +############################################################################### +#------------------------------------------------------------------------------ +if __name__ == "__main__": + NumpyTest().run() \ No newline at end of file Property changes on: trunk/scipy/sandbox/timeseries/tests/test_extras.py ___________________________________________________________________ Name: svn:keywords + Date Author Revision Id Modified: trunk/scipy/sandbox/timeseries/tests/test_multitimeseries.py =================================================================== --- trunk/scipy/sandbox/timeseries/tests/test_multitimeseries.py 2007-09-06 00:46:24 UTC (rev 3304) +++ trunk/scipy/sandbox/timeseries/tests/test_multitimeseries.py 2007-09-06 01:13:29 UTC (rev 3305) @@ -12,7 +12,7 @@ import types -import numpy as N +import numpy import numpy.core.fromnumeric as fromnumeric from numpy.testing import NumpyTest, NumpyTestCase from numpy.testing.utils import build_err_msg @@ -41,10 +41,10 @@ def setup(self): "Generic setup" - d = N.arange(5) + d = numpy.arange(5) m = MA.make_mask([1,0,0,1,1]) - base_d = N.r_[d,d[::-1]].reshape(2,-1).T - base_m = N.r_[[m, m[::-1]]].T + base_d = numpy.r_[d,d[::-1]].reshape(2,-1).T + base_m = numpy.r_[[m, m[::-1]]].T base = MA.array(base_d, mask=base_m) mrec = MR.fromarrays(base.T,) dlist = ['2007-%02i' % (i+1) for i in d] @@ -62,9 +62,10 @@ assert_equal(mts['f0']._mask, m) # assert(isinstance(mts[0], MultiTimeSeries)) - assert_equal(mts._data[0], mrec[0]) + assert_equal(mts._data[0], mrec._data[0]) # We can't use assert_equal here, as it tries to convert the tuple into a singleton - assert(mts[0]._data.view(N.ndarray) == mrec[0]) +# assert(mts[0]._data.view(numpyndarray) == mrec[0]) + assert_equal(numpy.asarray(mts._data[0]), mrec[0]) assert_equal(mts._dates[0], dates[0]) assert_equal(mts[0]._dates, dates[0]) # @@ -75,8 +76,8 @@ assert(isinstance(mts.f0, TimeSeries)) assert_equal(mts.f0, time_series(d, dates=dates, mask=m)) assert_equal(mts.f1, time_series(d[::-1], dates=dates, mask=m[::-1])) - assert((mts._fieldmask == N.core.records.fromarrays([m, m[::-1]])).all()) - assert_equal(mts._mask, N.r_[[m,m[::-1]]].all(0)) + assert((mts._fieldmask == numpy.core.records.fromarrays([m, m[::-1]])).all()) + assert_equal(mts._mask, numpy.r_[[m,m[::-1]]].all(0)) assert_equal(mts.f0[1], mts[1].f0) # assert(isinstance(mts[:2], MultiTimeSeries)) @@ -124,7 +125,7 @@ mts.harden_mask() assert(mts._hardmask) mts._mask = nomask - assert_equal(mts._mask, N.r_[[m,m[::-1]]].all(0)) + assert_equal(mts._mask, numpy.r_[[m,m[::-1]]].all(0)) mts.soften_mask() assert(not mts._hardmask) mts._mask = nomask @@ -141,7 +142,7 @@ def test_fromrecords(self): "Test from recarray." [d, m, mrec, dlist, dates, ts, mts] = self.data - nrec = N.core.records.fromarrays(N.r_[[d,d[::-1]]]) + nrec = numpy.core.records.fromarrays(numpy.r_[[d,d[::-1]]]) mrecfr = fromrecords(nrec.tolist(), dates=dates) assert_equal(mrecfr.f0, mrec.f0) assert_equal(mrecfr.dtype, mrec.dtype) Modified: trunk/scipy/sandbox/timeseries/tests/test_timeseries.py =================================================================== --- trunk/scipy/sandbox/timeseries/tests/test_timeseries.py 2007-09-06 00:46:24 UTC (rev 3304) +++ trunk/scipy/sandbox/timeseries/tests/test_timeseries.py 2007-09-06 01:13:29 UTC (rev 3305) @@ -11,7 +11,7 @@ __revision__ = "$Revision$" __date__ = '$Date$' -import numpy as N +import numpy from numpy import bool_, complex_, float_, int_, object_ import numpy.core.fromnumeric as fromnumeric import numpy.core.numeric as numeric @@ -105,14 +105,15 @@ series = time_series(data,dlist) assert_equal(series._data,[30,20,10]) # - series = TimeSeries(data, dlist) + dates = date_array_fromlist(dlist, freq='D') + series = TimeSeries(data, dates) assert_equal(series._data,[30,20,10]) # - series = TimeSeries(data, dlist, mask=[1,0,0]) + series = time_series(data, dlist, mask=[1,0,0]) assert_equal(series._mask,[0,0,1]) # data = masked_array([10,20,30],mask=[1,0,0]) - series = TimeSeries(data, dlist) + series = time_series(data, dlist) assert_equal(series._mask,[0,0,1]) #............................................................................... @@ -265,7 +266,7 @@ # With set series[:5] = 0 assert_equal(series[:5]._series, [0,0,0,0,0]) - dseries = N.log(series) + dseries = numpy.log(series) series[-5:] = dseries[-5:] assert_equal(series[-5:], dseries[-5:]) # Now, using dates ! @@ -275,7 +276,7 @@ def test_on2d(self): "Tests getitem on a 2D series" (a,b,d) = ([1,2,3],[3,2,1], date_array(thisday('M'),length=3)) - ser_x = time_series(N.column_stack((a,b)), dates=d) + ser_x = time_series(numpy.column_stack((a,b)), dates=d) assert_equal(ser_x[0,0], time_series(a[0],d[0])) assert_equal(ser_x[0,:], time_series([(a[0],b[0])], d[0])) assert_equal(ser_x[:,0], time_series(a, d)) @@ -285,20 +286,20 @@ "Tests getitem on a nD series" hodie = thisday('D') # Case 1D - series = time_series(N.arange(5), mask=[1,0,0,0,0], start_date=hodie) + series = time_series(numpy.arange(5), mask=[1,0,0,0,0], start_date=hodie) assert_equal(series[0], 0) # Case 1D + mask - series = time_series(N.arange(5), mask=[1,0,0,0,0], start_date=hodie) + series = time_series(numpy.arange(5), mask=[1,0,0,0,0], start_date=hodie) assert series[0] is tsmasked # Case 2D - series = time_series(N.arange(10).reshape(5,2), start_date=hodie) + series = time_series(numpy.arange(10).reshape(5,2), start_date=hodie) assert_equal(len(series), 5) assert_equal(series[0], [[0,1]]) assert_equal(series[0]._dates[0], (hodie)) assert_equal(series[:,0], [0,2,4,6,8]) assert_equal(series[:,0]._dates, series._dates) # Case 2D + mask - series = time_series(N.arange(10).reshape(5,2), start_date=hodie, + series = time_series(numpy.arange(10).reshape(5,2), start_date=hodie, mask=[[1,1],[0,0],[0,0],[0,0],[0,0]]) assert_equal(len(series), 5) assert_equal(series[0], [[0,1]]) @@ -308,7 +309,7 @@ assert_equal(series[:,0]._mask, [1,0,0,0,0]) assert_equal(series[:,0]._dates, series._dates) # Case 3D - series = time_series(N.arange(30).reshape(5,3,2), start_date=hodie) + series = time_series(numpy.arange(30).reshape(5,3,2), start_date=hodie) x = series[0] assert_equal(len(series), 5) assert_equal(series[0], [[[0,1],[2,3],[4,5]]]) @@ -337,7 +338,7 @@ assert_equal(dseries, series[3:-2]) dseries = adjust_endpoints(series, end_date=Date('D', string='2007-01-31')) assert_equal(dseries.size, 31) - assert_equal(dseries._mask, N.r_[series._mask, [1]*16]) + assert_equal(dseries._mask, numpy.r_[series._mask, [1]*16]) dseries = adjust_endpoints(series, end_date=Date('D', string='2007-01-06')) assert_equal(dseries.size, 6) assert_equal(dseries, series[:6]) @@ -345,7 +346,7 @@ start_date=Date('D', string='2007-01-06'), end_date=Date('D', string='2007-01-31')) assert_equal(dseries.size, 26) - assert_equal(dseries._mask, N.r_[series._mask[5:], [1]*16]) + assert_equal(dseries._mask, numpy.r_[series._mask[5:], [1]*16]) # def test_alignseries(self): "Tests align_series & align_with" @@ -382,7 +383,7 @@ # def test_split(self): """Test the split function.""" - ms = time_series(N.arange(62).reshape(31,2), + ms = time_series(numpy.arange(62).reshape(31,2), start_date=Date(freq='d', year=2005, month=7, day=1)) d1,d2 = split(ms) assert_array_equal(d1.data, ms.data[:,0]) @@ -400,11 +401,11 @@ date conversion algorithms already tested by asfreq in the test_dates test suite. """ - lowFreqSeries = time_series(N.arange(10), + lowFreqSeries = time_series(numpy.arange(10), start_date=Date(freq='m', year=2005, month=6)) - highFreqSeries = time_series(N.arange(100), + highFreqSeries = time_series(numpy.arange(100), start_date=Date(freq='b', year=2005, month=6, day=1)) - ndseries = time_series(N.arange(124).reshape(62,2), + ndseries = time_series(numpy.arange(124).reshape(62,2), start_date=Date(freq='d', year=2005, month=7, day=1)) lowToHigh_start = lowFreqSeries.convert('B', position='START') @@ -456,7 +457,7 @@ assert(not filled_ser.has_duplicated_dates()) assert_equal(filled_ser.size, _end - _start + 1) # - data = N.arange(5*24).reshape(5,24) + data = numpy.arange(5*24).reshape(5,24) datelist = ['2007-07-01','2007-07-02','2007-07-03','2007-07-05','2007-07-06'] dates = date_array_fromlist(datelist, 'D') dseries = time_series(data, dates) @@ -482,7 +483,7 @@ (start, end) = ('2007-01-06', '2007-01-12') mask = mask_period(series, start, end, inside=True, include_edges=True, inplace=False) - assert_equal(mask._mask, N.array([0,0,0,0,0,1,1,1,1,1,1,1,0,0,0])) + assert_equal(mask._mask, numpy.array([0,0,0,0,0,1,1,1,1,1,1,1,0,0,0])) mask = mask_period(series, start, end, inside=True, include_edges=False, inplace=False) assert_equal(mask._mask, [0,0,0,0,0,0,1,1,1,1,1,0,0,0,0]) @@ -497,7 +498,7 @@ series = time_series(data, dates=dates) mask = mask_period(series, start, end, inside=True, include_edges=True, inplace=False) - result = N.array([0,0,0,0,0,1,1,1,1,1,1,1,0,0,0]) + result = numpy.array([0,0,0,0,0,1,1,1,1,1,1,1,0,0,0]) assert_equal(mask._mask, result.repeat(2).reshape(-1,2)) # def test_pickling(self): @@ -509,14 +510,14 @@ assert_equal(series_pickled._data, series._data) assert_equal(series_pickled._mask, series._mask) # - data = masked_array(N.matrix(range(10)).T, mask=[1,0,0,0,0]*2) + data = masked_array(numpy.matrix(range(10)).T, mask=[1,0,0,0,0]*2) dates = date_array(start_date=thisday('D'), length=10) series = time_series(data,dates=dates) series_pickled = cPickle.loads(series.dumps()) assert_equal(series_pickled._dates, series._dates) assert_equal(series_pickled._data, series._data) assert_equal(series_pickled._mask, series._mask) - assert(isinstance(series_pickled._data, N.matrix)) + assert(isinstance(series_pickled._data, numpy.matrix)) def test_empty_timeseries(self): @@ -529,25 +530,25 @@ def test__timeseriescompat_multiple(self): "Tests the compatibility of multiple time series." - seriesM_10 = time_series(N.arange(10), + seriesM_10 = time_series(numpy.arange(10), date_array( start_date=Date(freq='m', year=2005, month=1), length=10) ) - seriesD_10 = time_series(N.arange(10), + seriesD_10 = time_series(numpy.arange(10), date_array( start_date=Date(freq='d', year=2005, month=1, day=1), length=10) ) - seriesD_5 = time_series(N.arange(5), + seriesD_5 = time_series(numpy.arange(5), date_array( start_date=Date(freq='d', year=2005, month=1, day=1), length=5) ) - seriesD_5_apr = time_series(N.arange(5), + seriesD_5_apr = time_series(numpy.arange(5), date_array( start_date=Date(freq='d', year=2005, month=4, day=1), length=5) @@ -583,7 +584,7 @@ data = masked_array(numeric.arange(15), mask=[1,0,0,0,0]*3, dtype=float_) series = time_series(data, dlist) # - keeper = N.array([0,1,1,1,1]*3, dtype=bool_) + keeper = numpy.array([0,1,1,1,1]*3, dtype=bool_) c_series = series.compressed() assert_equal(c_series._data, [1,2,3,4,6,7,8,9,11,12,13,14]) assert_equal(c_series._mask, nomask) @@ -593,7 +594,7 @@ dates=dates) c_series = series_st.compressed() d = [1,2,3,6,7,8,11,12,13] - assert_equal(c_series._data, N.c_[(d,list(reversed(d)))]) + assert_equal(c_series._data, numpy.c_[(d,list(reversed(d)))]) assert_equal(c_series._mask, nomask) assert_equal(c_series._dates, dates[d]) Added: trunk/scipy/sandbox/timeseries/textras.py =================================================================== --- trunk/scipy/sandbox/timeseries/textras.py 2007-09-06 00:46:24 UTC (rev 3304) +++ trunk/scipy/sandbox/timeseries/textras.py 2007-09-06 01:13:29 UTC (rev 3305) @@ -0,0 +1,106 @@ +""" +Extras functions for time series. + +:author: Pierre GF Gerard-Marchant & Matt Knox +:contact: pierregm_at_uga_dot_edu - mattknox_ca_at_hotmail_dot_com +:version: $Id$ +""" +__author__ = "Pierre GF Gerard-Marchant & Matt Knox ($Author$)" +__version__ = '1.0' +__revision__ = "$Revision$" +__date__ = '$Date$' + + +import numpy +import maskedarray +from maskedarray import masked + +import const as _c +from tseries import TimeSeries + + + +__all__ = ['isleapyear', 'count_missing', 'accept_atmost_missing'] + +#.............................................................................. +def isleapyear(year): + """Returns true if year is a leap year. + +:Input: + year : integer / sequence + A given (list of) year(s). + """ + year = numpy.asarray(year) + return numpy.logical_or(year % 400 == 0, + numpy.logical_and(year % 4 == 0, year % 100 > 0)) + +#.............................................................................. +def count_missing(series): + """Returns the number of missing data per period. + + +Notes +----- +This function is designed to return the actual number of missing values when +a series has been converted from one frequency to a smaller frequency. + +For example, converting a 12-month-long daily series to months will yield +a (12x31) array, with missing values in February, April, June... +count_missing will discard these extra missing values. + """ + if not isinstance(series, TimeSeries): + raise TypeError, "The input data should be a valid TimeSeries object! "\ + "(got %s instead)" % type(series) + if series.ndim == 1: + return len(series) - series.count() + elif series.ndim != 2: + raise NotImplementedError + # + missing = series.shape[-1] - series.count(axis=-1) + period = series.shape[-1] + freq = series.freq + if (period == 366) and (freq//_c.FR_ANN == 1): + # row: years, cols: days + missing -= ~isleapyear(series.year) + elif period == 31 and (freq//_c.FR_MTH == 1): + months = series.months + # row: months, cols: days + missing[numpy.array([m in [4,6,9,11] for m in months])] -= 1 + isfeb = (months == 2) + missing[isfeb] -= 2 + missing[isfeb & ~isleapyear(series.year)] -= 1 + elif period not in (12,7): + raise NotImplementedError, "Not yet implemented for that frequency..." + return missing + +#............................................................................. +def accept_atmost_missing(series, max_missing, strict=False): + """Masks the rows of the series that contains more than max_missing missing data. + Returns a new masked series. + +:Inputs: + series : TimeSeries + Input time series. + max_missing : float + Number of maximum acceptable missing values per row (if larger than 1), + or maximum acceptable percentage of missing values (if lower than 1). + strict : boolean *[False]* + Whether the + """ + series = numpy.array(series, copy=True, subok=True) + if not isinstance(series, TimeSeries): + raise TypeError, "The input data should be a valid TimeSeries object! "\ + "(got %s instead)" % type(series) + # Find the number of missing values .... + missing = count_missing(series) + # Transform an acceptable percentage in a number + if max_missing < 1: + max_missing = numpy.round(max_missing * series.shape[-1],0) + # + series.unshare_mask() + if strict: + series[missing > max_missing] = masked + else: + series[missing >= max_missing] = masked + return series + \ No newline at end of file Property changes on: trunk/scipy/sandbox/timeseries/textras.py ___________________________________________________________________ Name: svn:keywords + Date Author Revision Id Modified: trunk/scipy/sandbox/timeseries/tmulti.py =================================================================== --- trunk/scipy/sandbox/timeseries/tmulti.py 2007-09-06 00:46:24 UTC (rev 3304) +++ trunk/scipy/sandbox/timeseries/tmulti.py 2007-09-06 01:13:29 UTC (rev 3305) @@ -516,16 +516,9 @@ self_data = [d, m, mrec, dlist, dates, ts, mts] assert(isinstance(mts.f0, TimeSeries)) - - if 0: - mts[:2] = 5 - assert_equal(mts.f0._data, [5,5,2,3,4]) - assert_equal(mts.f1._data, [5,5,2,1,0]) - assert_equal(mts.f0._mask, [0,0,0,1,1]) - assert_equal(mts.f1._mask, [0,0,0,0,1]) - mts.harden_mask() - mts[-2:] = 5 - assert_equal(mts.f0._data, [5,5,2,3,4]) - assert_equal(mts.f1._data, [5,5,2,5,0]) - assert_equal(mts.f0._mask, [0,0,0,1,1]) - assert_equal(mts.f1._mask, [0,0,0,0,1]) \ No newline at end of file + # + if 1: + recfirst = mts._data[0] + print recfirst, type(recfirst) + print mrec[0], type(mrec[0]) + Modified: trunk/scipy/sandbox/timeseries/tseries.py =================================================================== --- trunk/scipy/sandbox/timeseries/tseries.py 2007-09-06 00:46:24 UTC (rev 3304) +++ trunk/scipy/sandbox/timeseries/tseries.py 2007-09-06 01:13:29 UTC (rev 3305) @@ -18,7 +18,7 @@ import numpy from numpy import ndarray -from numpy.core import bool_, complex_, float_, int_, object_ +from numpy import bool_, complex_, float_, int_, object_ from numpy.core.multiarray import dtype import numpy.core.fromnumeric as fromnumeric import numpy.core.numeric as numeric @@ -222,7 +222,7 @@ common_freq = unique_freqs.item() except ValueError: raise TimeSeriesError, \ - "All series must have same frequency!" + "All series must have same frequency! (got %s instead)" % unique_freqs return common_freq ##### -------------------------------------------------------------------------- @@ -337,49 +337,63 @@ options = None _defaultobserved = None _genattributes = ['fill_value', 'observed'] - def __new__(cls, data, dates=None, mask=nomask, - freq=None, observed=None, start_date=None, length=None, + def __new__(cls, data, dates, mask=nomask, +# freq=None, + observed=None, #start_date=None, length=None, dtype=None, copy=False, fill_value=None, subok=True, keep_mask=True, small_mask=True, hard_mask=False, **options): maparms = dict(copy=copy, dtype=dtype, fill_value=fill_value,subok=subok, keep_mask=keep_mask, small_mask=small_mask, hard_mask=hard_mask,) _data = MaskedArray(data, mask=mask, **maparms) - # Get the frequency .......................... - freq = check_freq(freq) +# # Get the frequency .......................... +# freq = check_freq(freq) # Get the dates .............................. - if dates is None: - newdates = getattr(data, '_dates', None) - else: - newdates = dates - if newdates is not None: - if not hasattr(newdates, 'freq'): - newdates = date_array(dlist=dates, freq=freq) - if freq != _c.FR_UND and newdates.freq != freq: - newdates = newdates.asfreq(freq) - else: - dshape = _data.shape - if len(dshape) > 0: - if length is None: - length = dshape[0] - newdates = date_array(start_date=start_date, length=length, - freq=freq) - else: - newdates = date_array([], freq=freq) + if not isinstance(dates, (Date, DateArray)): + raise TypeError("The input dates should be a valid Date or DateArray object! "\ + "(got %s instead)" % type(dates)) +# newdates = date_array(dates) +# elif isinstance(dates, (tuple, list, ndarray)): +# newdates = date_array(dlist=dates, freq=freq) +# if newdates is not None: +# if freq != _c.FR_UND and newdates.freq != freq: +# newdates = newdates.asfreq(freq) +# else: +# dshape = _data.shape +# if len(dshape) > 0: +# if length is None: +# length = dshape[0] +# newdates = date_array(start_date=start_date, length=length, +# freq=freq) +# else: +# newdates = date_array([], freq=freq) # Get observed ............................... observed = getattr(data, 'observed', fmtObserv(observed)) # Get the data ............................... - if newdates._unsorted is not None: - _data = _data[newdates._unsorted] if not subok or not isinstance(_data,TimeSeries): _data = _data.view(cls) if _data is masked: assert(numeric.size(newdates)==1) return _data.view(cls) - assert(_datadatescompat(_data,newdates)) - _data._dates = newdates - if _data._dates.size == _data.size and _data.ndim > 1: - _data._dates.shape = _data.shape + assert(_datadatescompat(_data,dates)) +# assert(_datadatescompat(_data,newdates)) + # +# _data._dates = newdates + _data._dates = dates + if _data._dates.size == _data.size: + if _data.ndim > 1: + current_shape = data.shape +# if newdates._unsorted is not None: + if dates._unsorted is not None: + _data.shape = (-1,) +# _data = _data[newdates._unsorted] + _data = _data[dates._unsorted] + _data.shape = current_shape + _data._dates.shape = current_shape + elif dates._unsorted is not None: + _data = _data[dates._unsorted] +# elif newdates._unsorted is not None: +# _data = _data[newdates._unsorted] _data.observed = observed return _data #............................................ @@ -919,17 +933,6 @@ TimeSeries.tofile = tofile #............................................ -def tolist(self, fill_value=None): - """Copies the date and data portion of the time series to a hierarchical -python list and returns that list. Data items are converted to the nearest -compatible Python type. Dates are converted to standard Python datetime -objects. Masked values are filled with `fill_value`""" - return [(d.datetime, v) for (d,v) in \ - zip(self.dates, self._series.tolist())] -TimeSeries.tolist = tolist - -#............................................ - def asrecords(series): """Returns the masked time series as a recarray. Fields are `_dates`, `_data` and _`mask`. @@ -990,27 +993,45 @@ `data` : Array of data. """ - data = numeric.array(data, copy=False, subok=True) + maparms = dict(copy=copy, dtype=dtype, fill_value=fill_value, subok=True, + keep_mask=keep_mask, small_mask=small_mask, + hard_mask=hard_mask,) + data = masked_array(data, mask=mask, **maparms) + # data = data.view(MaskedArray) + freq = check_freq(freq) + # if dates is None: + _dates = getattr(data, '_dates', None) + elif isinstance(dates, (Date, DateArray)): + _dates = date_array(dates) + elif isinstance(dates, (tuple, list, ndarray)): + _dates = date_array(dlist=dates, freq=freq) + else: + _dates = date_array([], freq=freq) + # + if _dates is not None: + # Make sure _dates has the proper freqncy + if (freq != _c.FR_UND) and (_dates.freq != freq): + _dates = _dates.asfreq(freq) + else: dshape = data.shape if len(dshape) > 0: if length is None: length = dshape[0] if len(dshape) > 0: - dates = date_array(start_date=start_date, end_date=end_date, + _dates = date_array(start_date=start_date, end_date=end_date, length=length, freq=freq) else: - dates = date_array([], freq=freq) - elif not isinstance(dates, DateArray): - dates = date_array(dlist=dates, freq=freq) - if dates._unsorted is not None: - idx = dates._unsorted + _dates = date_array([], freq=freq) + # + if _dates._unsorted is not None: + idx = _dates._unsorted data = data[idx] - if mask is not nomask: - mask = mask[idx] - dates._unsorted = None - return TimeSeries(data=data, dates=dates, mask=mask, - observed=observed, copy=copy, dtype=dtype, + _dates._unsorted = None + return TimeSeries(data=data, dates=_dates, mask=data._mask, +# freq=freq, + observed=observed, + copy=copy, dtype=dtype, fill_value=fill_value, keep_mask=keep_mask, small_mask=small_mask, hard_mask=hard_mask,) @@ -1597,7 +1618,7 @@ ################################################################################ if __name__ == '__main__': from maskedarray.testutils import assert_equal, assert_array_equal - if 1: + if 0: dlist = ['2007-01-%02i' % i for i in range(1,16)] dates = date_array_fromlist(dlist) data = masked_array(numeric.arange(15), mask=[1,0,0,0,0]*3) @@ -1611,7 +1632,7 @@ assert_equal(a[-5:], series[:5]) assert_equal(b[:5], series[-5:]) # - if 1: + if 0: data = numpy.arange(5*24).reshape(5,24) datelist = ['2007-07-01','2007-07-02','2007-07-03','2007-07-05','2007-07-06'] dates = date_array_fromlist(datelist, 'D') @@ -1632,4 +1653,49 @@ assert_equal(fseries._mask, [0,0,0,1,0,]) # fseries = fill_missing_dates(data, date_array_fromlist(datelist,'D')) + # + if 0: + "Make sure we're not losing the fill_value" + dlist = ['2007-01-%02i' % i for i in range(1,16)] + dates = date_array_fromlist(dlist) + series = time_series(MA.zeros(dates.shape), dates=dates, fill_value=-9999) + assert_equal(series.fill_value, -9999) + if 0: + "Check time_series w/ an existing time series" + dlist = ['2007-01-%02i' % i for i in range(1,16)] + dates = date_array_fromlist(dlist) + series = time_series(MA.zeros(dates.shape), dates=dates, fill_value=-9999) + newseries = time_series(series, fill_value=+9999) + assert_equal(newseries._data, series._data) + assert_equal(newseries._mask, series._mask) + assert_equal(newseries.fill_value, +9999) + if 0: + data = numpy.arange(5*24).reshape(5,24) + datelist = ['2007-07-01','2007-07-02','2007-07-03','2007-07-05','2007-07-06'] + dates = date_array_fromlist(datelist, 'D') +# dseries = time_series(data, dates) + ndates = date_array_fromrange(start_date=dates[0],end_date=dates[-2]) + # + (A,B) = (data.ravel()[:4].reshape(2,2), dates[:-1]) + series = time_series(A,B) + fseries = fill_missing_dates(series) + assert_equal(fseries.shape, (5,)) + assert_equal(fseries._mask, [0,0,0,1,0,]) + # + if 1: + dlist = ['2007-01-%02i' % i for i in (3,2,1)] + data = [10,20,30] +# series = time_series(data, dlist, mask=[1,0,0]) +# data = masked_array([10,20,30],mask=[1,0,0]) +# series = time_series(data, dlist) + series = time_series(data, dlist, mask=[1,0,0]) + assert_equal(series._mask,[0,0,1]) + if 1: + dlist = ['2007-01-%02i' % i for i in range(1,16)] + dates = date_array_fromlist(dlist) + data = masked_array(numeric.arange(15), mask=[1,0,0,0,0]*3) + series = time_series(data, dlist) + + empty_series = time_series([], freq='d') + a, b = align_series(series, empty_series) From scipy-svn at scipy.org Thu Sep 6 04:26:44 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 6 Sep 2007 03:26:44 -0500 (CDT) Subject: [Scipy-svn] r3306 - trunk/scipy/fftpack/src Message-ID: <20070906082644.A2F7E39C279@new.scipy.org> Author: cdavid Date: 2007-09-06 03:26:39 -0500 (Thu, 06 Sep 2007) New Revision: 3306 Modified: trunk/scipy/fftpack/src/zfftnd_fftw.c Log: Fix typo in fftw2 wrapper in nd case Modified: trunk/scipy/fftpack/src/zfftnd_fftw.c =================================================================== --- trunk/scipy/fftpack/src/zfftnd_fftw.c 2007-09-06 01:13:29 UTC (rev 3305) +++ trunk/scipy/fftpack/src/zfftnd_fftw.c 2007-09-06 08:26:39 UTC (rev 3306) @@ -3,7 +3,7 @@ * * Original code by Pearu Peaterson * - * Last Change: Wed Aug 08 03:00 PM 2007 J + * Last Change: Thu Sep 06 05:00 PM 2007 J */ GEN_CACHE(zfftnd_fftw, (int n, int *dims, int d, int flags) @@ -25,7 +25,7 @@ free(caches_zfftnd_fftw[id].dims);, 10) -extern void zfftnd_mkl(complex_double * inout, int rank, +extern void zfftnd_fftw(complex_double * inout, int rank, int *dims, int direction, int howmany, int normalize) { From scipy-svn at scipy.org Fri Sep 7 15:56:01 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 7 Sep 2007 14:56:01 -0500 (CDT) Subject: [Scipy-svn] r3307 - in trunk/scipy/sandbox/multigrid: . multigridtools tests Message-ID: <20070907195601.02A4739C129@new.scipy.org> Author: wnbell Date: 2007-09-07 14:55:57 -0500 (Fri, 07 Sep 2007) New Revision: 3307 Added: trunk/scipy/sandbox/multigrid/adaptive.py trunk/scipy/sandbox/multigrid/tests/test_coarsen.py trunk/scipy/sandbox/multigrid/tests/test_utils.py Modified: trunk/scipy/sandbox/multigrid/coarsen.py trunk/scipy/sandbox/multigrid/multigridtools/smoothed_aggregation.h trunk/scipy/sandbox/multigrid/multilevel.py trunk/scipy/sandbox/multigrid/relaxation.py trunk/scipy/sandbox/multigrid/tests/test_relaxation.py trunk/scipy/sandbox/multigrid/utils.py Log: added test cases for utils and coarsening added preliminary adaptive SA code Added: trunk/scipy/sandbox/multigrid/adaptive.py =================================================================== --- trunk/scipy/sandbox/multigrid/adaptive.py 2007-09-06 08:26:39 UTC (rev 3306) +++ trunk/scipy/sandbox/multigrid/adaptive.py 2007-09-07 19:55:57 UTC (rev 3307) @@ -0,0 +1,371 @@ +import numpy,scipy,scipy.sparse +from numpy import sqrt,ravel,diff,zeros,zeros_like,inner,concatenate +from scipy.sparse import csr_matrix,coo_matrix + +from relaxation import gauss_seidel +from multilevel import multilevel_solver +from coarsen import sa_constant_interpolation +from utils import infinity_norm + + +def fit_candidate(I,x): + """ + For each aggregate in I (i.e. each column of I) compute vector R and + sparse matrix Q (having the sparsity of I) such that the following holds: + + Q*R = x and Q^T*Q = I + + In otherwords, find a prolongator Q with orthonormal columns so that + x is represented exactly on the coarser level by R. + """ + Q = csr_matrix((x.copy(),I.indices,I.indptr),dims=I.shape,check=False) + R = sqrt(ravel(csr_matrix((x*x,I.indices,I.indptr),dims=I.shape,check=False).sum(axis=0))) #column 2-norms + Q.data *= (1.0/R)[Q.indices] + + #print "norm(R)",scipy.linalg.norm(R) + #print "min(R),max(R)",min(R),max(R) + #print "infinity_norm(Q.T*Q - I) ",infinity_norm((Q.T.tocsr() * Q - scipy.sparse.spidentity(Q.shape[1]))) + #print "norm(Q*R - x)",scipy.linalg.norm(Q*R - x) + #print "norm(x - Q*Q.Tx)",scipy.linalg.norm(x - Q*(Q.T*x)) + return Q,R + + +##def orthonormalize_candidate(I,x,basis): +## Px = csr_matrix((x,I.indices,I.indptr),dims=I.shape,check=False) +## Rs = [] +## #othogonalize columns of Px against other candidates +## for b in basis: +## Pb = csr_matrix((b,I.indices,I.indptr),dims=I.shape,check=False) +## R = ravel(csr_matrix((Pb.data*Px.data,I.indices,I.indptr),dims=I.shape,check=False).sum(axis=0)) # columnwise projection of Px on Pb +## Px.data -= R[I.indices] * Pb.data #subtract component in b direction +## Rs.append(R) +## +## #filter columns here, set unused cols to 0, add to mask +## +## #normalize columns of Px +## R = ravel(csr_matrix((x**x,I.indices,I.indptr),dims=I.shape,check=False).sum(axis=0)) +## Px.data *= (1.0/R)[I.indices] +## Rs.append(R.reshape(-1,1)) +## return Rs + +def hstack_csr(A,B): + #OPTIMIZE THIS + assert(A.shape[0] == B.shape[0]) + A = A.tocoo() + B = B.tocoo() + I = concatenate((A.row,B.row)) + J = concatenate((A.col,B.col+A.shape[1])) + V = concatenate((A.data,B.data)) + return coo_matrix((V,(I,J)),dims=(A.shape[0],A.shape[1]+B.shape[1])).tocsr() + + +def vstack_csr(A,B): + #OPTIMIZE THIS + assert(A.shape[1] == B.shape[1]) + A = A.tocoo() + B = B.tocoo() + I = concatenate((A.row,B.row+A.shape[0])) + J = concatenate((A.col,B.col)) + V = concatenate((A.data,B.data)) + return coo_matrix((V,(I,J)),dims=(A.shape[0]+B.shape[0],A.shape[1])).tocsr() + + + +def orthonormalize_prolongator(P_l,x_l,W_l,W_m): + """ + + """ + X = csr_matrix((x_l,W_l.indices,W_l.indptr),dims=W_l.shape,check=False) #candidate prolongator (assumes every value from x is used) + + R = (P_l.T.tocsr() * X) # R has at most 1 nz per row + X = X - P_l*R # othogonalize X against P_l + + #DROP REDUNDANT COLUMNS FROM P (AND R?) HERE (NULL OUT R ACCORDINGLY?) + #REMOVE CORRESPONDING COLUMNS FROM W_l AND ROWS FROM A_m ALSO + W_l_new = W_l + W_m_new = W_m + + #normalize surviving columns of X + col_norms = ravel(sqrt(csr_matrix((X.data*X.data,X.indices,X.indptr),dims=X.shape,check=False).sum(axis=0))) + print "zero cols",sum(col_norms == 0) + print "small cols",sum(col_norms < 1e-8) + Xcopy = X.copy() + X.data *= (1.0/col_norms)[X.indices] + + P_l_new = hstack_csr(P_l,X) + + + #check orthonormality + print "norm(P.T*P - I) ",scipy.linalg.norm((P_l_new.T * P_l_new - scipy.sparse.spidentity(P_l_new.shape[1])).data) + #assert(scipy.linalg.norm((P_l_new.T * P_l_new - scipy.sparse.spidentity(P_l_new.shape[1])).data)<1e-8) + + x_m = zeros(P_l_new.shape[1],dtype=x_l.dtype) + x_m[:P_l.shape[1]][diff(R.indptr).astype('bool')] = R.data + x_m[P_l.shape[1]:] = col_norms + + print "||x_l - P_l*x_m||",scipy.linalg.norm(P_l_new* x_m - x_l) #see if x_l is represented exactly + + return P_l_new,x_m,W_l,W_m + + +def smoothed_prolongator(P,A): + #just use Richardson for now + #omega = 4.0/(3.0*infinity_norm(A)) + #return P - omega*(A*P) + #return P + D = diag_sparse(A) + D_inv_A = diag_sparse(1.0/D)*A + omega = 4.0/(3.0*infinity_norm(D_inv_A)) + D_inv_A *= omega + return P - D_inv_A*P + + +def sa_hierarchy(A,Ws,x): + """ + Construct multilevel hierarchy using Smoothed Aggregation + Inputs: + A - matrix + Is - list of constant prolongators + x - "candidate" basis function to be approximated + Ouputs: + (As,Is,Ps) - tuple of lists + - As - [A, Ps[0].T*A*Ps[0], Ps[1].T*A*Ps[1], ... ] + - Is - smoothed prolongators + - Ps - tentative prolongators + """ + As = [A] + Is = [] + Ps = [] + + for W in Ws: + P,x = fit_candidate(W,x) + I = smoothed_prolongator(P,A) + A = I.T.tocsr() * A * I + As.append(A) + Ps.append(P) + Is.append(I) + return As,Is,Ps + +def make_bridge(I,N): + tail = I.indptr[-1].repeat(N - I.shape[0]) + ptr = concatenate((I.indptr,tail)) + return csr_matrix((I.data,I.indices,ptr),dims=(N,I.shape[1]),check=False) + +class adaptive_sa_solver: + def __init__(self,A,options=None): + self.A = A + + self.Rs = [] + self.__construct_hierarchy(A) + + def __construct_hierarchy(self,A): + #if self.A.shape[0] <= self.opts['coarse: max size']: + # raise ValueError,'small matrices not handled yet' + + x,AggOps = self.__initialization_stage(A) #first candidate + Ws = AggOps + + #x[:] = 1 #TEST + + self.candidates = [x] + #self.candidates = [1.0/D.data] + + #create SA using x here + As,Is,Ps = sa_hierarchy(A,Ws,x) + + for i in range(0): + x = self.__develop_candidate(A,As,Is,Ps,Ws,AggOps) + #if i == 0: + # x = arange(20).repeat(20).astype(float) + #elif i == 1: + # x = arange(20).repeat(20).astype(float) + # x = numpy.ravel(transpose(x.reshape((20,20)))) + + As,Is,Ps,Ws = self.__augment_cycle(A,As,Ps,Ws,AggOps,x) + + self.candidates.append(x) + + self.Ps = Ps + self.solver = multilevel_solver(As,Is) + self.AggOps = AggOps + + + + def __develop_candidate(self,A,As,Is,Ps,Ws,AggOps): + x = scipy.rand(A.shape[0]) + b = zeros_like(x) + + + #x = arange(200).repeat(200).astype(float) + #x[:] = 1 #TEST + + mu = 5 + + solver = multilevel_solver(As,Is) + + for n in range(mu): + x = solver.solve(b, x0=x, tol=1e-8, maxiter=1) + #TEST FOR CONVERGENCE HERE + + A_l,P_l,W_l,x_l = As[0],Ps[0],Ws[0],x + + temp_Is = [] + for i in range(len(As) - 2): + P_l_new, x_m, W_l_new, W_m_new = orthonormalize_prolongator(P_l, x_l, W_l, AggOps[i+1]) + + I_l_new = smoothed_prolongator(P_l_new,A_l) + A_m_new = I_l_new.T.tocsr() * A_l * I_l_new + bridge = make_bridge(Is[i+1],A_m_new.shape[0]) + + temp_solver = multilevel_solver( [A_m_new] + As[i+2:], [bridge] + Is[i+2:] ) + + for n in range(mu): + x_m = temp_solver.solve(zeros_like(x_m), x0=x_m, tol=1e-8, maxiter=1) + + temp_Is.append(I_l_new) + + W_l = vstack_csr(Ws[i+1],W_m_new) #prepare for next iteration + A_l = A_m_new + x_l = x_m + P_l = make_bridge(Ps[i+1],A_m_new.shape[0]) + + x = x_l + for I in reversed(temp_Is): + x = I*x + + return x + + + def __augment_cycle(self,A,As,Ps,Ws,AggOps,x): + #make a new cycle using the new candidate + A_l,P_l,W_l,x_l = As[0],Ps[0],AggOps[0],x + + new_As,new_Is,new_Ps,new_Ws = [A],[],[],[AggOps[0]] + + for i in range(len(As) - 2): + P_l_new, x_m, W_l_new, W_m_new = orthonormalize_prolongator(P_l, x_l, W_l, AggOps[i+1]) + + I_l_new = smoothed_prolongator(P_l_new,A_l) + A_m_new = I_l_new.T.tocsr() * A_l * I_l_new + W_m_new = vstack_csr(Ws[i+1],W_m_new) + + new_As.append(A_m_new) + new_Ws.append(W_m_new) + new_Is.append(I_l_new) + new_Ps.append(P_l_new) + + #prepare for next iteration + W_l = W_m_new + A_l = A_m_new + x_l = x_m + P_l = make_bridge(Ps[i+1],A_m_new.shape[0]) + + P_l_new, x_m, W_l_new, W_m_new = orthonormalize_prolongator(P_l, x_l, W_l, csr_matrix((P_l.shape[1],1))) + I_l_new = smoothed_prolongator(P_l_new,A_l) + A_m_new = I_l_new.T.tocsr() * A_l * I_l_new + + new_As.append(A_m_new) + new_Is.append(I_l_new) + new_Ps.append(P_l_new) + + return new_As,new_Is,new_Ps,new_Ws + + + def __initialization_stage(self,A): + max_levels = 10 + max_coarse = 50 + + AggOps = [] + Is = [] + + # aSA parameters + mu = 5 # number of test relaxation iterations + epsilon = 0.1 # minimum acceptable relaxation convergence factor + + scipy.random.seed(0) + + #step 1 + A_l = A + x = scipy.rand(A_l.shape[0]) + skip_f_to_i = False + + #step 2 + b = zeros_like(x) + gauss_seidel(A_l,x,b,iterations=mu) + #step 3 + #test convergence rate here + + As = [A] + + while len(AggOps) + 1 < max_levels and A_l.shape[0] > max_coarse: + W_l = sa_constant_interpolation(A_l,epsilon=0.08*0.5**(len(AggOps)-1)) #step 4b #TEST + #W_l = sa_constant_interpolation(A_l,epsilon=0) #step 4b #TEST + P_l,x = fit_candidate(W_l,x) #step 4c + I_l = smoothed_prolongator(P_l,A_l) #step 4d + A_l = I_l.T.tocsr() * A_l * I_l #step 4e + + AggOps.append(W_l) + Is.append(I_l) + As.append(A_l) + + if A_l.shape <= max_coarse: break + + if not skip_f_to_i: + print "." + x_hat = x.copy() #step 4g + gauss_seidel(A_l,x,zeros_like(x),iterations=mu) #step 4h + x_A_x = inner(x,A_l*x) + if (x_A_x/inner(x_hat,A_l*x_hat))**(1.0/mu) < epsilon: #step 4i + print "sufficient convergence, skipping" + skip_f_to_i = True + if x_A_x == 0: + x = x_hat #need to restore x + + #update fine-level candidate + for A_l,I in reversed(zip(As[1:],Is)): + gauss_seidel(A_l,x,zeros_like(x),iterations=mu) #TEST + x = I * x + + gauss_seidel(A,x,b,iterations=mu) #TEST + + return x,AggOps #first candidate,aggregation + + + + +from scipy import * +from utils import diag_sparse +from multilevel import poisson_problem1D,poisson_problem2D +#A = poisson_problem2D(100) +A = io.mmread("tests/sample_data/laplacian_40_3dcube.mtx").tocsr() + +#A = A*A +#D = diag_sparse(1.0/sqrt(10**(12*rand(A.shape[0])-6))).tocsr() +#A = D * A * D +#A = io.mmread("nos2.mtx").tocsr() +asa = adaptive_sa_solver(A) +x = rand(A.shape[0]) +b = zeros_like(x) + + +print "solving" +x_sol,residuals = asa.solver.solve(b,x,tol=1e-12,maxiter=30,return_residuals=True) +residuals = array(residuals)/residuals[0] +print "residuals ",residuals + +print asa.solver + +print "constant Rayleigh quotient",dot(ones(A.shape[0]),A*ones(A.shape[0]))/float(A.shape[0]) + +for c in asa.candidates: + print "candidate Rayleigh quotient",dot(c,A*c)/dot(c,c) + + + +##W = asa.AggOps[0]*asa.AggOps[1] +##pcolor((W * rand(W.shape[1])).reshape((200,200))) + +def plot2d(x): + from pylab import pcolor + pcolor(x.reshape(sqrt(len(x)),sqrt(len(x)))) + Modified: trunk/scipy/sandbox/multigrid/coarsen.py =================================================================== --- trunk/scipy/sandbox/multigrid/coarsen.py 2007-09-06 08:26:39 UTC (rev 3306) +++ trunk/scipy/sandbox/multigrid/coarsen.py 2007-09-07 19:55:57 UTC (rev 3307) @@ -1,10 +1,9 @@ -from scipy import * import multigridtools import scipy import numpy -from utils import diag_sparse,inf_norm +from utils import diag_sparse,infinity_norm def rs_strong_connections(A,theta): @@ -33,37 +32,45 @@ if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') Sp,Sj,Sx = multigridtools.sa_strong_connections(A.shape[0],epsilon,A.indptr,A.indices,A.data) + return scipy.sparse.csr_matrix((Sx,Sj,Sp),A.shape) -def sa_constant_interpolation(A,epsilon=None): +def sa_constant_interpolation(A,epsilon): if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') - if epsilon is not None: - S = sa_strong_connections(A,epsilon) - else: - S = A - + S = sa_strong_connections(A,epsilon) + + #S.ensure_sorted_indices() + #tentative (non-smooth) interpolation operator I - Ij = multigridtools.sa_get_aggregates(A.shape[0],S.indptr,S.indices) - Ip = numpy.arange(len(Ij)+1) - Ix = numpy.ones(len(Ij)) + Pj = multigridtools.sa_get_aggregates(S.shape[0],S.indptr,S.indices) + Pp = numpy.arange(len(Pj)+1) + Px = numpy.ones(len(Pj)) - return scipy.sparse.csr_matrix((Ix,Ij,Ip)) + return scipy.sparse.csr_matrix((Px,Pj,Pp)) - +##def sa_smoother(A,S,omega): +## Bp,Bj,Bx = multigridtools.sa_smoother(A.shape[0],omega,A.indptr,A.indices,A.data,S.indptr,S.indices,S.data) +## +## return csr_matrix((Bx,Bj,Bp),dims=A.shape) + def sa_interpolation(A,epsilon,omega=4.0/3.0): if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') + + P = sa_constant_interpolation(A,epsilon) + +## As = sa_strong_connections(A,epsilon) +## S = sa_smoother(A,S,omega) - I = sa_constant_interpolation(A,epsilon) D_inv = diag_sparse(1.0/diag_sparse(A)) D_inv_A = D_inv * A - D_inv_A *= -omega/inf_norm(D_inv_A) + D_inv_A *= omega/infinity_norm(D_inv_A) - P = I + (D_inv_A*I) #same as P=S*I, (faster?) - - return P + I = P - (D_inv_A*P) #same as I=S*P, (faster?) + + return I Modified: trunk/scipy/sandbox/multigrid/multigridtools/smoothed_aggregation.h =================================================================== --- trunk/scipy/sandbox/multigrid/multigridtools/smoothed_aggregation.h 2007-09-06 08:26:39 UTC (rev 3306) +++ trunk/scipy/sandbox/multigrid/multigridtools/smoothed_aggregation.h 2007-09-07 19:55:57 UTC (rev 3307) @@ -5,8 +5,8 @@ #include #include #include +#include - //#define DEBUG @@ -20,24 +20,22 @@ Sp->push_back(0); //compute diagonal values - std::vector diags(n_row); + std::vector diags(n_row,T(0)); for(int i = 0; i < n_row; i++){ int row_start = Ap[i]; int row_end = Ap[i+1]; for(int jj = row_start; jj < row_end; jj++){ - if(Aj[jj] == i){ - diags[i] = Ax[jj]; - break; - } + if(Aj[jj] == i){ + diags[i] = Ax[jj]; + break; + } } } #ifdef DEBUG for(int i = 0; i < n_row; i++){ assert(diags[i] > 0); } #endif - - for(int i = 0; i < n_row; i++){ int row_start = Ap[i]; int row_end = Ap[i+1]; @@ -45,14 +43,15 @@ T eps_Aii = epsilon*epsilon*diags[i]; for(int jj = row_start; jj < row_end; jj++){ - const int& j = Aj[jj]; - const T& Aij = Ax[jj]; + const int j = Aj[jj]; + const T Aij = Ax[jj]; if(i == j){continue;} - if(Aij*Aij >= eps_Aii * diags[j]){ - Sj->push_back(j); - Sx->push_back(Aij); + // |A(i,j)| < epsilon * sqrt(|A(i,i)|*|A(j,j)|) + if(Aij*Aij >= std::abs(eps_Aii * diags[j])){ + Sj->push_back(j); + Sx->push_back(Aij); } } Sp->push_back(Sj->size()); @@ -61,9 +60,9 @@ void sa_get_aggregates(const int n_row, - const int Ap[], const int Aj[], - std::vector * Bj){ - + const int Ap[], const int Aj[], + std::vector * Bj) +{ std::vector aggregates(n_row,-1); int num_aggregates = 0; @@ -72,21 +71,19 @@ for(int i = 0; i < n_row; i++){ if(aggregates[i] >= 0){ continue; } //already marked - const int& row_start = Ap[i]; - const int& row_end = Ap[i+1]; + const int row_start = Ap[i]; + const int row_end = Ap[i+1]; - //Determine whether all neighbors of this node are free (not already aggregates) bool free_neighborhood = true; for(int jj = row_start; jj < row_end; jj++){ if(aggregates[Aj[jj]] >= 0){ - free_neighborhood = false; - break; + free_neighborhood = false; + break; } } if(!free_neighborhood){ continue; } //bail out - //Make an aggregate out of this node and its strong neigbors aggregates[i] = num_aggregates; for(int jj = row_start; jj < row_end; jj++){ @@ -96,52 +93,49 @@ } - //Pass #2 std::vector aggregates_copy(aggregates); for(int i = 0; i < n_row; i++){ if(aggregates[i] >= 0){ continue; } //already marked - const int& row_start = Ap[i]; - const int& row_end = Ap[i+1]; + const int row_start = Ap[i]; + const int row_end = Ap[i+1]; for(int jj = row_start; jj < row_end; jj++){ - const int& j = Aj[jj]; + const int j = Aj[jj]; if(aggregates_copy[j] >= 0){ - aggregates[i] = aggregates_copy[j]; - break; + aggregates[i] = aggregates_copy[j]; + break; } } } - //Pass #3 for(int i = 0; i < n_row; i++){ if(aggregates[i] >= 0){ continue; } //already marked - - const int& row_start = Ap[i]; - const int& row_end = Ap[i+1]; + + const int row_start = Ap[i]; + const int row_end = Ap[i+1]; aggregates[i] = num_aggregates; for(int jj = row_start; jj < row_end; jj++){ - const int& j = Aj[jj]; + const int j = Aj[jj]; if(aggregates[j] < 0){ //unmarked neighbors - aggregates[j] = num_aggregates; + aggregates[j] = num_aggregates; } } num_aggregates++; } - #ifdef DEBUG for(int i = 0; i < n_row; i++){ assert(aggregates[i] >= 0 && aggregates[i] < num_aggregates); } #endif - *Bj = aggregates; + aggregates.swap(*Bj); } Modified: trunk/scipy/sandbox/multigrid/multilevel.py =================================================================== --- trunk/scipy/sandbox/multigrid/multilevel.py 2007-09-06 08:26:39 UTC (rev 3306) +++ trunk/scipy/sandbox/multigrid/multilevel.py 2007-09-07 19:55:57 UTC (rev 3307) @@ -10,9 +10,9 @@ from coarsen import sa_interpolation,rs_interpolation from relaxation import gauss_seidel,jacobi +from utils import infinity_norm - def poisson_problem1D(N): """ Return a sparse CSR matrix for the 1d poisson problem @@ -21,7 +21,7 @@ """ D = 2*numpy.ones(N) O = -numpy.ones(N) - return scipy.sparse.spdiags([D,O,O],[0,-1,1],N,N).tocsr() + return scipy.sparse.spdiags([D,O,O],[0,-1,1],N,N).tocoo().tocsr() #eliminate zeros def poisson_problem2D(N): """ @@ -33,7 +33,8 @@ T = -numpy.ones(N*N) O = -numpy.ones(N*N) T[N-1::N] = 0 - return scipy.sparse.spdiags([D,O,T,T,O],[0,-N,-1,1,N],N*N,N*N).tocsr() + return scipy.sparse.spdiags([D,O,T,T,O],[0,-N,-1,1,N],N*N,N*N).tocoo().tocsr() #eliminate zeros + def ruge_stuben_solver(A,max_levels=10,max_coarse=500): """ @@ -41,8 +42,8 @@ References: "Multigrid" - Trottenberg, U., C. W. Oosterlee, and Anton Schuller. San Diego: Academic Press, 2001. - See Appendix A + Trottenberg, U., C. W. Oosterlee, and Anton Schuller. San Diego: Academic Press, 2001. + See Appendix A """ As = [A] @@ -58,7 +59,7 @@ return multilevel_solver(As,Ps) -def smoothed_aggregation_solver(A,max_levels=10,max_coarse=500): +def smoothed_aggregation_solver(A,max_levels=10,max_coarse=500,epsilon=0.08): """ Create a multilevel solver using Smoothed Aggregation (SA) @@ -72,8 +73,9 @@ Ps = [] while len(As) < max_levels and A.shape[0] > max_coarse: - P = sa_interpolation(A,epsilon=0.08*0.5**(len(As)-1)) - + P = sa_interpolation(A,epsilon=epsilon*0.5**(len(As)-1)) + #P = sa_interpolation(A,epsilon=0.0) + A = (P.T.tocsr() * A) * P #galerkin operator As.append(A) @@ -108,7 +110,10 @@ """number of unknowns on all levels / number of unknowns on the finest level""" return sum([A.shape[0] for A in self.As])/float(self.As[0].shape[0]) - + + def psolve(self, b): + return self.solve(b,maxiter=1) + def solve(self, b, x0=None, tol=1e-5, maxiter=100, callback=None, return_residuals=False): """ TODO @@ -122,12 +127,12 @@ #TODO change use of tol (relative tolerance) to agree with other iterative solvers A = self.As[0] - residuals = [norm(b-A*x,2)] + residuals = [scipy.linalg.norm(b-A*x)] while len(residuals) <= maxiter and residuals[-1]/residuals[0] > tol: self.__solve(0,x,b) - residuals.append(scipy.linalg.norm(b-A*x,2)) + residuals.append(scipy.linalg.norm(b-A*x)) if callback is not None: callback(x) @@ -142,11 +147,11 @@ A = self.As[lvl] if len(self.As) == 1: - x[:] = scipy.linalg.solve(A.todense(),b) - return x + x[:] = scipy.linsolve.spsolve(A,b) + return self.presmoother(A,x,b) - + residual = b - A*x coarse_x = zeros((self.As[lvl+1].shape[0])) @@ -154,7 +159,9 @@ if lvl == len(self.As) - 2: #direct solver on coarsest level - coarse_x[:] = scipy.linalg.solve(self.As[-1].todense(),coarse_b) + coarse_x[:] = scipy.linsolve.spsolve(self.As[-1],coarse_b) + #coarse_x[:] = scipy.linalg.cg(self.As[-1],coarse_b,tol=1e-12)[0] + #print "coarse residual norm",scipy.linalg.norm(coarse_b - self.As[-1]*coarse_x) else: self.__solve(lvl+1,coarse_x,coarse_b) @@ -165,26 +172,32 @@ def presmoother(self,A,x,b): gauss_seidel(A,x,b,iterations=1,sweep="forward") + #x += 4.0/(3.0*infinity_norm(A))*(b - A*x) def postsmoother(self,A,x,b): - gauss_seidel(A,x,b,iterations=1,sweep="backward") + gauss_seidel(A,x,b,iterations=1,sweep="forward") + #gauss_seidel(A,x,b,iterations=1,sweep="backward") + #x += 4.0/(3.0*infinity_norm(A))*(b - A*x) if __name__ == '__main__': from scipy import * A = poisson_problem2D(200) + #A = io.mmread("rocker_arm_surface.mtx").tocsr() + ml = smoothed_aggregation_solver(A) #ml = ruge_stuben_solver(A) + x = rand(A.shape[0]) b = zeros_like(x) + #b = rand(A.shape[0]) - resid = [] - - for n in range(10): - x = ml.solve(b,x,maxiter=1) - resid.append(linalg.norm(A*x)) + x_sol,residuals = ml.solve(b,x0=x,maxiter=40,tol=1e-10,return_residuals=True) + residuals = array(residuals)/residuals[0] + print residuals + Modified: trunk/scipy/sandbox/multigrid/relaxation.py =================================================================== --- trunk/scipy/sandbox/multigrid/relaxation.py 2007-09-06 08:26:39 UTC (rev 3306) +++ trunk/scipy/sandbox/multigrid/relaxation.py 2007-09-07 19:55:57 UTC (rev 3307) @@ -13,6 +13,12 @@ iterations - number of iterations to perform (default: 1) sweep - slice of unknowns to relax (default: all in forward direction) """ + if A.shape[0] != A.shape[1]: + raise ValueError,'expected symmetric matrix' + + if A.shape[1] != len(x) or len(x) != len(b): + raise ValueError,'unexpected number of unknowns' + if sweep == 'forward': row_start,row_stop,row_step = 0,len(x),1 elif sweep == 'backward': Added: trunk/scipy/sandbox/multigrid/tests/test_coarsen.py =================================================================== --- trunk/scipy/sandbox/multigrid/tests/test_coarsen.py 2007-09-06 08:26:39 UTC (rev 3306) +++ trunk/scipy/sandbox/multigrid/tests/test_coarsen.py 2007-09-07 19:55:57 UTC (rev 3307) @@ -0,0 +1,160 @@ +from numpy.testing import * + +from numpy import sqrt,empty,ones,arange,array_split +from scipy import rand +from scipy.sparse import spdiags,csr_matrix,lil_matrix +import numpy + +set_package_path() +import scipy.multigrid +from scipy.multigrid.coarsen import sa_strong_connections,sa_constant_interpolation +from scipy.multigrid.multilevel import poisson_problem1D,poisson_problem2D +restore_path() + + +def reference_sa_strong_connections(A,epsilon): + A_coo = A.tocoo() + S = lil_matrix(A.shape) + + for (i,j,v) in zip(A_coo.row,A_coo.col,A_coo.data): + if i == j: continue #skip diagonal + + if abs(A[i,j]) >= epsilon*sqrt(abs(A[i,i])*abs(A[j,j])): + S[i,j] = v + + return S.tocsr() + + +# note that this method only tests the current implementation, not +# all possible implementations +def reference_sa_constant_interpolation(A,epsilon): + S = sa_strong_connections(A,epsilon) + S = array_split(S.indices,S.indptr[1:-1]) + + n = A.shape[0] + + R = set(range(n)) + j = 0 + + aggregates = empty(n,dtype=A.indices.dtype) + aggregates[:] = -1 + + + # Pass #1 + for i,row in enumerate(S): + Ni = set(row) | set([i]) + + if Ni.issubset(R): + R -= Ni + for x in Ni: + aggregates[x] = j + j += 1 + + # Pass #2 + Old_R = R.copy() + for i,row in enumerate(S): + if i not in R: continue + + for x in row: + if x not in Old_R: + aggregates[i] = aggregates[x] + R.remove(i) + break + + + # Pass #3 + for i,row in enumerate(S): + if i not in R: continue + Ni = set(row) | set([i]) + + for x in Ni: + if x in R: + aggregates[x] = j + j += 1 + + assert(len(R) == 0) + + Pj = aggregates + Pp = arange(n+1) + Px = ones(n) + + return csr_matrix((Px,Pj,Pp)) + +class test_sa_strong_connections(NumpyTestCase): + def check_simple(self): + N = 4 + A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).tocsr() + S = spdiags([ -ones(N),-ones(N)],[-1,1],N,N).tocsr() + assert_array_equal(sa_strong_connections(A,0.50).todense(),S.todense()) #all connections are strong + assert_array_equal(sa_strong_connections(A,0.51).todense(),0*S.todense()) #no connections are strong + + N = 100 + A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).tocsr() + S = spdiags([ -ones(N),-ones(N)],[-1,1],N,N).tocsr() + assert_array_equal(sa_strong_connections(A,0.50).todense(),S.todense()) #all connections are strong + assert_array_equal(sa_strong_connections(A,0.51).todense(),0*S.todense()) #no connections are strong + + def check_random(self): + numpy.random.seed(0) + + for N in [2,3,5,10]: + A = csr_matrix(rand(N,N)) + for epsilon in [0.0,0.1,0.5,0.8,1.0,10.0]: + S_result = sa_strong_connections(A,epsilon) + S_expected = reference_sa_strong_connections(A,epsilon) + assert_array_equal(S_result.todense(),S_expected.todense()) + + def check_poisson1D(self): + for N in [2,3,5,7,10,11,19]: + A = poisson_problem1D(N) + for epsilon in [0.0,0.1,0.5,0.8,1.0]: + S_result = sa_strong_connections(A,epsilon) + S_expected = reference_sa_strong_connections(A,epsilon) + assert_array_equal(S_result.todense(),S_expected.todense()) + + def check_poisson2D(self): + for N in [2,3,5,7,10,11,19]: + A = poisson_problem2D(N) + for epsilon in [0.0,0.1,0.5,0.8,1.0]: + S_result = sa_strong_connections(A,epsilon) + S_expected = reference_sa_strong_connections(A,epsilon) + assert_array_equal(S_result.todense(),S_expected.todense()) + +## def check_sample_data(self): +## for filename in all_matrices: +## A = open_matrix(filename) + + +S_result = None +S_expected = None +class test_sa_constant_interpolation(NumpyTestCase): + def check_random(self): + numpy.random.seed(0) + + for N in [2,3,5,10]: + A = csr_matrix(rand(N,N)) + for epsilon in [0.0,0.1,0.5,0.8,1.0]: + S_result = sa_constant_interpolation(A,epsilon) + S_expected = reference_sa_constant_interpolation(A,epsilon) + assert_array_equal(S_result.todense(),S_expected.todense()) + + def check_poisson1D(self): + for N in [2,3,5,7,10,11,20,21,29,30]: + A = poisson_problem1D(N) + for epsilon in [0.0,0.1,0.5,0.8,1.0]: + S_result = sa_constant_interpolation(A,epsilon) + S_expected = reference_sa_constant_interpolation(A,epsilon) + assert_array_equal(S_result.todense(),S_expected.todense()) + + def check_poisson2D(self): + for N in [2,3,5,7,10,11,20,21,29,30]: + A = poisson_problem2D(N) + for epsilon in [0.0,0.1,0.5,0.8,1.0]: + S_result = sa_constant_interpolation(A,epsilon) + S_expected = reference_sa_constant_interpolation(A,epsilon) + assert_array_equal(S_result.todense(),S_expected.todense()) + + +if __name__ == '__main__': + NumpyTest().run() + Modified: trunk/scipy/sandbox/multigrid/tests/test_relaxation.py =================================================================== --- trunk/scipy/sandbox/multigrid/tests/test_relaxation.py 2007-09-06 08:26:39 UTC (rev 3306) +++ trunk/scipy/sandbox/multigrid/tests/test_relaxation.py 2007-09-07 19:55:57 UTC (rev 3307) @@ -7,6 +7,7 @@ set_package_path() +import scipy.multigrid from scipy.multigrid.relaxation import polynomial_smoother,gauss_seidel,jacobi restore_path() @@ -36,7 +37,51 @@ polynomial_smoother(A,x,b,[-0.14285714, 1., -2.]) assert_almost_equal(x,x0 - 0.14285714*A*A*r + A*r - 2*r) + def check_jacobi(self): + N = 1 + A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).T + x = arange(N).astype(numpy.float64) + b = zeros(N) + jacobi(A,x,b) + assert_almost_equal(x,array([0])) + N = 3 + A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).T + x = zeros(N) + b = arange(N).astype(numpy.float64) + jacobi(A,x,b) + assert_almost_equal(x,array([0.0,0.5,1.0])) + + N = 3 + A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).T + x = arange(N).astype(numpy.float64) + b = zeros(N) + jacobi(A,x,b) + assert_almost_equal(x,array([0.5,1.0,0.5])) + + N = 1 + A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).T + x = arange(N).astype(numpy.float64) + b = array([10]) + jacobi(A,x,b) + assert_almost_equal(x,array([5])) + + N = 3 + A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).T + x = arange(N).astype(numpy.float64) + b = array([10,20,30]) + jacobi(A,x,b) + assert_almost_equal(x,array([5.5,11.0,15.5])) + + N = 3 + A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).T + x = arange(N).astype(numpy.float64) + x_copy = x.copy() + b = array([10,20,30]) + jacobi(A,x,b,omega=1.0/3.0) + assert_almost_equal(x,2.0/3.0*x_copy + 1.0/3.0*array([5.5,11.0,15.5])) + + def check_gauss_seidel(self): N = 1 A = spdiags([2*ones(N),-ones(N),-ones(N)],[0,-1,1],N,N).T Added: trunk/scipy/sandbox/multigrid/tests/test_utils.py =================================================================== --- trunk/scipy/sandbox/multigrid/tests/test_utils.py 2007-09-06 08:26:39 UTC (rev 3306) +++ trunk/scipy/sandbox/multigrid/tests/test_utils.py 2007-09-07 19:55:57 UTC (rev 3307) @@ -0,0 +1,60 @@ +from numpy.testing import * + +import numpy +import scipy +from scipy import matrix,array,diag +from scipy.sparse import csr_matrix + + +set_package_path() +from scipy.multigrid.utils import infinity_norm,diag_sparse +restore_path() + + +class test_utils(NumpyTestCase): + def check_infinity_norm(self): + A = matrix([[-4]]) + assert_equal(infinity_norm(csr_matrix(A)),4) + + A = matrix([[1,0,-5],[-2,5,0]]) + assert_equal(infinity_norm(csr_matrix(A)),7) + + A = matrix([[0,1],[0,-5]]) + assert_equal(infinity_norm(csr_matrix(A)),5) + + A = matrix([[1.3,-4.7,0],[-2.23,5.5,0],[9,0,-2]]) + assert_equal(infinity_norm(csr_matrix(A)),11) + + def check_diag_sparse(self): + #check sparse -> array + A = matrix([[-4]]) + assert_equal(diag_sparse(csr_matrix(A)),[-4]) + + A = matrix([[1,0,-5],[-2,5,0]]) + assert_equal(diag_sparse(csr_matrix(A)),[1,5]) + + A = matrix([[0,1],[0,-5]]) + assert_equal(diag_sparse(csr_matrix(A)),[0,-5]) + + A = matrix([[1.3,-4.7,0],[-2.23,5.5,0],[9,0,-2]]) + assert_equal(diag_sparse(csr_matrix(A)),[1.3,5.5,-2]) + + #check array -> sparse + A = matrix([[-4]]) + assert_equal(diag_sparse(array([-4])).todense(),csr_matrix(A).todense()) + + A = matrix([[1,0],[0,5]]) + assert_equal(diag_sparse(array([1,5])).todense(),csr_matrix(A).todense()) + + A = matrix([[0,0],[0,-5]]) + assert_equal(diag_sparse(array([0,-5])).todense(),csr_matrix(A).todense()) + + A = matrix([[1.3,0,0],[0,5.5,0],[0,0,-2]]) + assert_equal(diag_sparse(array([1.3,5.5,-2])).todense(),csr_matrix(A).todense()) + + + + +if __name__ == '__main__': + NumpyTest().run() + Modified: trunk/scipy/sandbox/multigrid/utils.py =================================================================== --- trunk/scipy/sandbox/multigrid/utils.py 2007-09-06 08:26:39 UTC (rev 3306) +++ trunk/scipy/sandbox/multigrid/utils.py 2007-09-07 19:55:57 UTC (rev 3307) @@ -6,17 +6,18 @@ csr_matrix,csc_matrix,extract_diagonal -def inf_norm(A): +def infinity_norm(A): """ Infinity norm of a sparse matrix (maximum absolute row sum). This serves as an upper bound on spectral radius. """ - if not isspmatrix_csr(A): - return ValueError,'expected csr_matrix' - - abs_A = csr_matrix((abs(A.data),A.indices,A.indptr),dims=A.shape,check=False) - return (abs_A * numpy.ones(A.shape[1],dtype=A.dtype)).max() + if isspmatrix_csr(A) or isspmatrix_csc(A): + #avoid copying index and ptr arrays + abs_A = A.__class__((abs(A.data),A.indices,A.indptr),dims=A.shape,check=False) + return (abs_A * numpy.ones(A.shape[1],dtype=A.dtype)).max() + else: + return (abs(A) * numpy.ones(A.shape[1],dtype=A.dtype)).max() def diag_sparse(A): """ From scipy-svn at scipy.org Fri Sep 7 20:37:12 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 7 Sep 2007 19:37:12 -0500 (CDT) Subject: [Scipy-svn] r3308 - in trunk/scipy/sandbox/maskedarray: . tests Message-ID: <20070908003712.80BD239C00B@new.scipy.org> Author: pierregm Date: 2007-09-07 19:37:09 -0500 (Fri, 07 Sep 2007) New Revision: 3308 Modified: trunk/scipy/sandbox/maskedarray/core.py trunk/scipy/sandbox/maskedarray/morestats.py trunk/scipy/sandbox/maskedarray/tests/test_core.py Log: core.arraymethods : force to return masked when the result has no dimension but the mask is (an array of) True morestats.hdquantiles : prevents using apply_along_axis on 1D data morestats : introduction of hdmedian (as a shortcut to hdquantiles) Modified: trunk/scipy/sandbox/maskedarray/core.py =================================================================== --- trunk/scipy/sandbox/maskedarray/core.py 2007-09-07 19:55:57 UTC (rev 3307) +++ trunk/scipy/sandbox/maskedarray/core.py 2007-09-08 00:37:09 UTC (rev 3308) @@ -932,6 +932,9 @@ result._mask = mask elif mask is not nomask: result.__setmask__(getattr(mask, methodname)(*args, **params)) + else: + if mask.ndim and mask.all(): + return masked return result #.......................................................... @@ -2745,3 +2748,14 @@ data = masked_array([1,2,3],fill_value=-999) series = data[[0,2,1]] assert_equal(series._fill_value, data._fill_value) + + if 1: + "Check squeeze" + data = masked_array([[1,2,3]]) + assert_equal(data.squeeze(), [1,2,3]) + data = masked_array([[1,2,3]], mask=[[1,1,1]]) + assert_equal(data.squeeze(), [1,2,3]) + assert_equal(data.squeeze()._mask, [1,1,1]) + data = masked_array([[1]], mask=True) + assert(data.squeeze() is masked) + Modified: trunk/scipy/sandbox/maskedarray/morestats.py =================================================================== --- trunk/scipy/sandbox/maskedarray/morestats.py 2007-09-07 19:55:57 UTC (rev 3307) +++ trunk/scipy/sandbox/maskedarray/morestats.py 2007-09-08 00:37:09 UTC (rev 3308) @@ -29,7 +29,7 @@ from scipy.stats.distributions import norm, beta, t, binom from scipy.stats.morestats import find_repeats -__all__ = ['hdquantiles', 'hdquantiles_sd', +__all__ = ['hdquantiles', 'hdmedian', 'hdquantiles_sd', 'trimmed_mean_ci', 'mjci', 'rank_data'] @@ -57,9 +57,10 @@ The function is restricted to 2D arrays. """ def _hd_1D(data,prob,var): - "Computes the HD quantiles for a 1D array." + "Computes the HD quantiles for a 1D array. Returns nan for invalid data." xsorted = numpy.squeeze(numpy.sort(data.compressed().view(ndarray))) - n = len(xsorted) + # Don't use length here, in case we have a numpy scalar + n = xsorted.size #......... hd = empty((2,len(prob)), float_) if n < 2: @@ -88,7 +89,7 @@ data = masked_array(data, copy=False, dtype=float_) p = numpy.array(prob, copy=False, ndmin=1) # Computes quantiles along axis (or globally) - if (axis is None): + if (axis is None) or (data.ndim == 1): result = _hd_1D(data, p, var) else: assert data.ndim <= 2, "Array should be 2D at most !" @@ -97,6 +98,22 @@ return masked_array(result, mask=numpy.isnan(result)) #.............................................................................. +def hdmedian(data, axis=-1, var=False): + """Returns the Harrell-Davis estimate of the median along the given axis. + +:Inputs: + data: ndarray + Data array. + axis : integer *[None]* + Axis along which to compute the quantiles. If None, use a flattened array. + var : boolean *[False]* + Whether to return the variance of the estimate. + """ + result = hdquantiles(data,[0.5], axis=axis, var=var) + return result.squeeze() + + +#.............................................................................. def hdquantiles_sd(data, prob=list([.25,.5,.75]), axis=None): """Computes the standard error of the Harrell-Davis quantile estimates by jackknife. Modified: trunk/scipy/sandbox/maskedarray/tests/test_core.py =================================================================== --- trunk/scipy/sandbox/maskedarray/tests/test_core.py 2007-09-07 19:55:57 UTC (rev 3307) +++ trunk/scipy/sandbox/maskedarray/tests/test_core.py 2007-09-08 00:37:09 UTC (rev 3308) @@ -1245,6 +1245,16 @@ assert_equal(xlist[1],[4,5,6,7]) assert_equal(xlist[2],[8,9,None,11]) + def check_squeeze(self): + "Check squeeze" + data = masked_array([[1,2,3]]) + assert_equal(data.squeeze(), [1,2,3]) + data = masked_array([[1,2,3]], mask=[[1,1,1]]) + assert_equal(data.squeeze(), [1,2,3]) + assert_equal(data.squeeze()._mask, [1,1,1]) + data = masked_array([[1]], mask=True) + assert(data.squeeze() is masked) + #.............................................................................. ############################################################################### From scipy-svn at scipy.org Sat Sep 8 03:46:15 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 8 Sep 2007 02:46:15 -0500 (CDT) Subject: [Scipy-svn] r3309 - in trunk/scipy/weave: . tests Message-ID: <20070908074615.B757439C030@new.scipy.org> Author: eric Date: 2007-09-08 02:46:10 -0500 (Sat, 08 Sep 2007) New Revision: 3309 Added: trunk/scipy/weave/numpy_scalar_spec.py trunk/scipy/weave/tests/test_numpy_scalar_spec.py Modified: trunk/scipy/weave/converters.py Log: - added the start of converters to handle the numpy scalar types. - only complex types are supported at the moment, and complex128 is the only tested one. - added the numpy scalar types to the default converter set. Modified: trunk/scipy/weave/converters.py =================================================================== --- trunk/scipy/weave/converters.py 2007-09-08 00:37:09 UTC (rev 3308) +++ trunk/scipy/weave/converters.py 2007-09-08 07:46:10 UTC (rev 3309) @@ -21,7 +21,7 @@ #common_spec.module_converter()] #---------------------------------------------------------------------------- -# add numeric array converters to the default +# add numpy array converters to the default # converter list. #---------------------------------------------------------------------------- try: @@ -31,6 +31,16 @@ pass #---------------------------------------------------------------------------- +# add numpy scalar converters to the default +# converter list. +#---------------------------------------------------------------------------- +try: + import numpy_scalar_spec + default.append(numpy_scalar_spec.numpy_complex_scalar_converter()) +except ImportError: + pass + +#---------------------------------------------------------------------------- # Add wxPython support # # RuntimeError can occur if wxPython isn't installed. Added: trunk/scipy/weave/numpy_scalar_spec.py =================================================================== --- trunk/scipy/weave/numpy_scalar_spec.py 2007-09-08 00:37:09 UTC (rev 3308) +++ trunk/scipy/weave/numpy_scalar_spec.py 2007-09-08 07:46:10 UTC (rev 3309) @@ -0,0 +1,20 @@ +""" Converters for all of NumPy's scalar types such as + int32, float32, complex128, etc. +""" +import numpy +import c_spec + +class numpy_complex_scalar_converter(c_spec.complex_converter): + """ Handles conversion of all the NumPy complex types. + This uses the same machinery as the standard python + complex converter. + """ + def init_info(self): + # First, set up all the same specifications the normal + # complex converter uses. + c_spec.complex_converter.init_info(self) + + # But set this converter up to match the numpy complex + # types. + self.matching_types = [numpy.complex128, numpy.complex192, + numpy.complex64] Property changes on: trunk/scipy/weave/numpy_scalar_spec.py ___________________________________________________________________ Name: svn:eol-style + native Added: trunk/scipy/weave/tests/test_numpy_scalar_spec.py =================================================================== --- trunk/scipy/weave/tests/test_numpy_scalar_spec.py 2007-09-08 00:37:09 UTC (rev 3308) +++ trunk/scipy/weave/tests/test_numpy_scalar_spec.py 2007-09-08 07:46:10 UTC (rev 3309) @@ -0,0 +1,159 @@ +import time +import os,sys + +# Note: test_dir is global to this file. +# It is made by setup_test_location() + +#globals +global test_dir +test_dir = '' + +import numpy +from numpy.testing import * +set_package_path() +from weave import inline_tools,ext_tools +from weave.build_tools import msvc_exists, gcc_exists +from weave.catalog import unique_file +from weave.numpy_scalar_spec import numpy_complex_scalar_converter + +restore_path() + + +def unique_mod(d,file_name): + f = os.path.basename(unique_file(d,file_name)) + m = os.path.splitext(f)[0] + return m + +def remove_whitespace(in_str): + import string + out = string.replace(in_str," ","") + out = string.replace(out,"\t","") + out = string.replace(out,"\n","") + return out + +def print_assert_equal(test_string,actual,desired): + """this should probably be in scipy_test.testing + """ + import pprint + try: + assert(actual == desired) + except AssertionError: + import cStringIO + msg = cStringIO.StringIO() + msg.write(test_string) + msg.write(' failed\nACTUAL: \n') + pprint.pprint(actual,msg) + msg.write('DESIRED: \n') + pprint.pprint(desired,msg) + raise AssertionError, msg.getvalue() + +#---------------------------------------------------------------------------- +# Scalar conversion test classes +# int, float, complex +#---------------------------------------------------------------------------- + +class test_numpy_complex_scalar_converter(NumpyTestCase): + compiler = '' + + def setUp(self): + self.converter = numpy_complex_scalar_converter() + + def check_type_match_string(self,level=5): + assert( not self.converter.type_match('string') ) + def check_type_match_int(self,level=5): + assert( not self.converter.type_match(5)) + def check_type_match_float(self,level=5): + assert( not self.converter.type_match(5.)) + def check_type_match_complex128(self,level=5): + assert(self.converter.type_match(numpy.complex128(5.+1j))) + + def check_complex_var_in(self,level=5): + mod_name = sys._getframe().f_code.co_name + self.compiler + mod_name = unique_mod(test_dir,mod_name) + mod = ext_tools.ext_module(mod_name) + a = numpy.complex(1.+1j) + code = "a=std::complex(2.,2.);" + test = ext_tools.ext_function('test',code,['a']) + mod.add_function(test) + mod.compile(location = test_dir, compiler = self.compiler) + exec 'from ' + mod_name + ' import test' + b=numpy.complex128(1.+1j) + test(b) + try: + b = 1. + test(b) + except TypeError: + pass + try: + b = 'abc' + test(b) + except TypeError: + pass + + def check_complex_return(self,level=5): + mod_name = sys._getframe().f_code.co_name + self.compiler + mod_name = unique_mod(test_dir,mod_name) + mod = ext_tools.ext_module(mod_name) + a = 1.+1j + code = """ + a= a + std::complex(2.,2.); + return_val = PyComplex_FromDoubles(a.real(),a.imag()); + """ + test = ext_tools.ext_function('test',code,['a']) + mod.add_function(test) + mod.compile(location = test_dir, compiler = self.compiler) + exec 'from ' + mod_name + ' import test' + b=1.+1j + c = test(b) + assert( c == 3.+3j) + + def check_inline(self, level=5): + a = numpy.complex128(1+1j) + result = inline_tools.inline("return_val=1.0/a;",['a']) + assert( result==.5-.5j) + +class test_msvc_numpy_complex_scalar_converter( + test_numpy_complex_scalar_converter): + compiler = 'msvc' +class test_unix_numpy_complex_scalar_converter( + test_numpy_complex_scalar_converter): + compiler = '' +class test_gcc_numpy_complex_scalar_converter( + test_numpy_complex_scalar_converter): + compiler = 'gcc' + + +def setup_test_location(): + import tempfile + #test_dir = os.path.join(tempfile.gettempdir(),'test_files') + test_dir = tempfile.mktemp() + if not os.path.exists(test_dir): + os.mkdir(test_dir) + sys.path.insert(0,test_dir) + return test_dir + +test_dir = setup_test_location() + +def teardown_test_location(): + import tempfile + test_dir = os.path.join(tempfile.gettempdir(),'test_files') + if sys.path[0] == test_dir: + sys.path = sys.path[1:] + return test_dir + +def remove_file(name): + test_dir = os.path.abspath(name) + +if not msvc_exists(): + for _n in dir(): + if _n[:10]=='test_msvc_': exec 'del '+_n +else: + for _n in dir(): + if _n[:10]=='test_unix_': exec 'del '+_n + +if not (gcc_exists() and msvc_exists() and sys.platform == 'win32'): + for _n in dir(): + if _n[:9]=='test_gcc_': exec 'del '+_n + +if __name__ == "__main__": + NumpyTest('weave.numpy_scalar_spec').run() Property changes on: trunk/scipy/weave/tests/test_numpy_scalar_spec.py ___________________________________________________________________ Name: svn:eol-style + native From scipy-svn at scipy.org Sat Sep 8 22:13:31 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 8 Sep 2007 21:13:31 -0500 (CDT) Subject: [Scipy-svn] r3310 - trunk Message-ID: <20070909021331.62C0439C0D8@new.scipy.org> Author: oliphant Date: 2007-09-08 21:13:29 -0500 (Sat, 08 Sep 2007) New Revision: 3310 Modified: trunk/setup.py Log: Fix problem with version information being doubled. Modified: trunk/setup.py =================================================================== --- trunk/setup.py 2007-09-08 07:46:10 UTC (rev 3309) +++ trunk/setup.py 2007-09-09 02:13:29 UTC (rev 3310) @@ -35,10 +35,8 @@ sys.path.insert(0,os.path.join(local_path,'scipy')) # to retrive version try: - from version import version as version setup( name = 'scipy', - version = version, # will be overwritten by configuration version maintainer = "SciPy Developers", maintainer_email = "scipy-dev at scipy.org", description = "Scientific Algorithms Library for Python", From scipy-svn at scipy.org Fri Sep 14 14:37:39 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 14 Sep 2007 13:37:39 -0500 (CDT) Subject: [Scipy-svn] r3311 - in branches/0.6.x: . scipy/sandbox scipy/sandbox/models scipy/sandbox/models/family scipy/sandbox/models/robust scipy/sandbox/models/tests scipy/stats Message-ID: <20070914183739.047B839C1EF@new.scipy.org> Author: jarrod.millman Date: 2007-09-14 13:37:31 -0500 (Fri, 14 Sep 2007) New Revision: 3311 Added: branches/0.6.x/scipy/sandbox/models/ Removed: branches/0.6.x/scipy/stats/models/ Modified: branches/0.6.x/MANIFEST.in branches/0.6.x/scipy/sandbox/models/__init__.py branches/0.6.x/scipy/sandbox/models/bspline.py branches/0.6.x/scipy/sandbox/models/contrast.py branches/0.6.x/scipy/sandbox/models/cox.py branches/0.6.x/scipy/sandbox/models/family/__init__.py branches/0.6.x/scipy/sandbox/models/family/family.py branches/0.6.x/scipy/sandbox/models/formula.py branches/0.6.x/scipy/sandbox/models/gam.py branches/0.6.x/scipy/sandbox/models/glm.py branches/0.6.x/scipy/sandbox/models/mixed.py branches/0.6.x/scipy/sandbox/models/model.py branches/0.6.x/scipy/sandbox/models/regression.py branches/0.6.x/scipy/sandbox/models/rlm.py branches/0.6.x/scipy/sandbox/models/robust/__init__.py branches/0.6.x/scipy/sandbox/models/setup.py branches/0.6.x/scipy/sandbox/models/smoothers.py branches/0.6.x/scipy/sandbox/models/tests/test_bspline.py branches/0.6.x/scipy/sandbox/models/tests/test_formula.py branches/0.6.x/scipy/sandbox/models/tests/test_glm.py branches/0.6.x/scipy/sandbox/models/tests/test_regression.py branches/0.6.x/scipy/sandbox/models/tests/test_rlm.py branches/0.6.x/scipy/sandbox/models/tests/test_utils.py branches/0.6.x/scipy/sandbox/models/utils.py branches/0.6.x/scipy/stats/setup.py Log: moved models code back to sandbox for 0.6 release Modified: branches/0.6.x/MANIFEST.in =================================================================== --- branches/0.6.x/MANIFEST.in 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/MANIFEST.in 2007-09-14 18:37:31 UTC (rev 3311) @@ -42,6 +42,10 @@ include scipy/sandbox/image/* include scipy/sandbox/maskedarray/* include scipy/sandbox/maskedarray/tests/* +include scipy/sandbox/models/* +include scipy/sandbox/models/family/* +include scipy/sandbox/models/robust/* +include scipy/sandbox/models/tests/* include scipy/sandbox/montecarlo/* include scipy/sandbox/montecarlo/src/* include scipy/sandbox/montecarlo/tests/* Copied: branches/0.6.x/scipy/sandbox/models (from rev 3310, branches/0.6.x/scipy/stats/models) Modified: branches/0.6.x/scipy/sandbox/models/__init__.py =================================================================== --- branches/0.6.x/scipy/stats/models/__init__.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/__init__.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -4,15 +4,15 @@ __docformat__ = 'restructuredtext' -from scipy.stats.models.info import __doc__ +from scipy.sandbox.models.info import __doc__ -import scipy.stats.models.model -import scipy.stats.models.formula -import scipy.stats.models.regression -import scipy.stats.models.robust -import scipy.stats.models.family -from scipy.stats.models.glm import model as glm -from scipy.stats.models.rlm import model as rlm +import scipy.sandbox.models.model +import scipy.sandbox.models.formula +import scipy.sandbox.models.regression +import scipy.sandbox.models.robust +import scipy.sandbox.models.family +from scipy.sandbox.models.glm import model as glm +from scipy.sandbox.models.rlm import model as rlm __all__ = filter(lambda s:not s.startswith('_'),dir()) Modified: branches/0.6.x/scipy/sandbox/models/bspline.py =================================================================== --- branches/0.6.x/scipy/stats/models/bspline.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/bspline.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -20,7 +20,7 @@ from scipy.linalg import solveh_banded from scipy.optimize import golden -from scipy.stats.models import _bspline +from scipy.sandbox.models import _bspline def _band2array(a, lower=0, symmetric=False, hermitian=False): """ Modified: branches/0.6.x/scipy/sandbox/models/contrast.py =================================================================== --- branches/0.6.x/scipy/stats/models/contrast.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/contrast.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -1,6 +1,6 @@ import numpy as N from numpy.linalg import pinv -from scipy.stats.models import utils +from scipy.sandbox.models import utils class ContrastResults: """ Modified: branches/0.6.x/scipy/sandbox/models/cox.py =================================================================== --- branches/0.6.x/scipy/stats/models/cox.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/cox.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -3,7 +3,7 @@ import numpy as N -from scipy.stats.models import survival, model +from scipy.sandbox.models import survival, model class discrete: @@ -199,7 +199,7 @@ for i in range(2*n): subjects[i].X = X[i] - import scipy.stats.models.formula as F + import scipy.sandbox.models.formula as F x = F.quantitative('X') f = F.formula(x) Modified: branches/0.6.x/scipy/sandbox/models/family/__init__.py =================================================================== --- branches/0.6.x/scipy/stats/models/family/__init__.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/family/__init__.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -10,7 +10,7 @@ ''' -from scipy.stats.models.family.family import Gaussian, Family, \ +from scipy.sandbox.models.family.family import Gaussian, Family, \ Poisson, Gamma, InverseGaussian, Binomial Modified: branches/0.6.x/scipy/sandbox/models/family/family.py =================================================================== --- branches/0.6.x/scipy/stats/models/family/family.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/family/family.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -1,6 +1,6 @@ import numpy as N -from scipy.stats.models.family import links as L -from scipy.stats.models.family import varfuncs as V +from scipy.sandbox.models.family import links as L +from scipy.sandbox.models.family import varfuncs as V class Family(object): Modified: branches/0.6.x/scipy/sandbox/models/formula.py =================================================================== --- branches/0.6.x/scipy/stats/models/formula.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/formula.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -247,7 +247,7 @@ of another term, i.e. to take powers: >>> import numpy as N - >>> from scipy.stats.models import formula + >>> from scipy.sandbox.models import formula >>> X = N.linspace(0,10,101) >>> x = formula.term('X') >>> x.namespace={'X':X} @@ -600,7 +600,7 @@ only term in the formula, then a keywords argument \'nrow\' is needed. ->>> from scipy.stats.models.formula import formula, I +>>> from scipy.sandbox.models.formula import formula, I >>> I() array(1.0) >>> I(nrow=5) Modified: branches/0.6.x/scipy/sandbox/models/gam.py =================================================================== --- branches/0.6.x/scipy/stats/models/gam.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/gam.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -5,9 +5,9 @@ import numpy as N -from scipy.stats.models import family -from scipy.stats.models.bspline import SmoothingSpline -from scipy.stats.models.glm import model as glm +from scipy.sandbox.models import family +from scipy.sandbox.models.bspline import SmoothingSpline +from scipy.sandbox.models.glm import model as glm def default_smoother(x): _x = x.copy() Modified: branches/0.6.x/scipy/sandbox/models/glm.py =================================================================== --- branches/0.6.x/scipy/stats/models/glm.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/glm.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -3,8 +3,8 @@ -------------------- """ import numpy as N -from scipy.stats.models import family -from scipy.stats.models.regression import wls_model +from scipy.sandbox.models import family +from scipy.sandbox.models.regression import wls_model class model(wls_model): Modified: branches/0.6.x/scipy/sandbox/models/mixed.py =================================================================== --- branches/0.6.x/scipy/stats/models/mixed.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/mixed.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -4,7 +4,7 @@ import numpy as N import numpy.linalg as L -from scipy.stats.models.formula import formula, I +from scipy.sandbox.models.formula import formula, I class Unit: """ @@ -311,7 +311,7 @@ n = 3 - from scipy.stats.models.formula import term + from scipy.sandbox.models.formula import term fixed = term('f') random = term('r') response = term('y') Modified: branches/0.6.x/scipy/sandbox/models/model.py =================================================================== --- branches/0.6.x/scipy/stats/models/model.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/model.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -2,8 +2,8 @@ from numpy.linalg import inv #from scipy import optimize -from scipy.stats.models.contrast import ContrastResults -from scipy.stats.models.utils import recipr +from scipy.sandbox.models.contrast import ContrastResults +from scipy.sandbox.models.utils import recipr class Model(object): """ Modified: branches/0.6.x/scipy/sandbox/models/regression.py =================================================================== --- branches/0.6.x/scipy/stats/models/regression.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/regression.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -22,9 +22,9 @@ import numpy.linalg as L from scipy.linalg import norm, toeplitz -from scipy.stats.models.model import likelihood_model, \ +from scipy.sandbox.models.model import likelihood_model, \ likelihood_model_results -from scipy.stats.models import utils +from scipy.sandbox.models import utils class ols_model(likelihood_model): """ @@ -34,8 +34,8 @@ -------- >>> import numpy as N >>> - >>> from scipy.stats.models.formula import term, I - >>> from scipy.stats.models.regression import ols_model + >>> from scipy.sandbox.models.formula import term, I + >>> from scipy.sandbox.models.regression import ols_model >>> >>> data={'Y':[1,3,4,5,2,3,4], ... 'X':range(1,8)} @@ -136,8 +136,8 @@ >>> import numpy as N >>> import numpy.random as R >>> - >>> from scipy.stats.models.formula import term, I - >>> from scipy.stats.models.regression import ar_model + >>> from scipy.sandbox.models.formula import term, I + >>> from scipy.sandbox.models.regression import ar_model >>> >>> data={'Y':[1,3,4,5,8,10,9], ... 'X':range(1,8)} @@ -273,8 +273,8 @@ >>> import numpy as N >>> - >>> from scipy.stats.models.formula import term, I - >>> from scipy.stats.models.regression import wls_model + >>> from scipy.sandbox.models.formula import term, I + >>> from scipy.sandbox.models.regression import wls_model >>> >>> data={'Y':[1,3,4,5,2,3,4], ... 'X':range(1,8)} Modified: branches/0.6.x/scipy/sandbox/models/rlm.py =================================================================== --- branches/0.6.x/scipy/stats/models/rlm.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/rlm.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -3,8 +3,8 @@ """ import numpy as N -from scipy.stats.models.regression import wls_model -from scipy.stats.models.robust import norms, scale +from scipy.sandbox.models.regression import wls_model +from scipy.sandbox.models.robust import norms, scale class model(wls_model): Modified: branches/0.6.x/scipy/sandbox/models/robust/__init__.py =================================================================== --- branches/0.6.x/scipy/stats/models/robust/__init__.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/robust/__init__.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -4,5 +4,5 @@ import numpy as N import numpy.linalg as L -from scipy.stats.models.robust import norms -from scipy.stats.models.robust.scale import MAD +from scipy.sandbox.models.robust import norms +from scipy.sandbox.models.robust.scale import MAD Modified: branches/0.6.x/scipy/sandbox/models/setup.py =================================================================== --- branches/0.6.x/scipy/stats/models/setup.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/setup.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -9,7 +9,7 @@ try: import sys - from scipy.stats.models.bspline_module import mod + from scipy.sandbox.models.bspline_module import mod n, s, d = weave_ext(mod) config.add_extension(n, s, **d) except ImportError: pass @@ -25,4 +25,4 @@ if __name__ == '__main__': from numpy.distutils.core import setup - setup(**configuration(top_path='', package_name='scipy.stats.models').todict()) + setup(**configuration(top_path='', package_name='scipy.sandbox.models').todict()) Modified: branches/0.6.x/scipy/sandbox/models/smoothers.py =================================================================== --- branches/0.6.x/scipy/stats/models/smoothers.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/smoothers.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -9,8 +9,8 @@ from scipy.linalg import solveh_banded from scipy.optimize import golden -from scipy.stats.models import _bspline -from scipy.stats.models.bspline import bspline, _band2array +from scipy.sandbox.models import _bspline +from scipy.sandbox.models.bspline import bspline, _band2array class poly_smoother: Modified: branches/0.6.x/scipy/sandbox/models/tests/test_bspline.py =================================================================== --- branches/0.6.x/scipy/stats/models/tests/test_bspline.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/tests/test_bspline.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -5,8 +5,8 @@ import numpy as N from numpy.testing import NumpyTest, NumpyTestCase -import scipy.stats.models as S -import scipy.stats.models.bspline as B +import scipy.sandbox.models as S +import scipy.sandbox.models.bspline as B class test_BSpline(NumpyTestCase): Modified: branches/0.6.x/scipy/sandbox/models/tests/test_formula.py =================================================================== --- branches/0.6.x/scipy/stats/models/tests/test_formula.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/tests/test_formula.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -9,7 +9,7 @@ import numpy.linalg as L from numpy.testing import assert_almost_equal, NumpyTest, NumpyTestCase -from scipy.stats.models import utils, formula, contrast +from scipy.sandbox.models import utils, formula, contrast class test_term(NumpyTestCase): Modified: branches/0.6.x/scipy/sandbox/models/tests/test_glm.py =================================================================== --- branches/0.6.x/scipy/stats/models/tests/test_glm.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/tests/test_glm.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -6,8 +6,8 @@ import numpy.random as R from numpy.testing import NumpyTest, NumpyTestCase -import scipy.stats.models as S -import scipy.stats.models.glm as models +import scipy.sandbox.models as S +import scipy.sandbox.models.glm as models W = R.standard_normal Modified: branches/0.6.x/scipy/sandbox/models/tests/test_regression.py =================================================================== --- branches/0.6.x/scipy/stats/models/tests/test_regression.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/tests/test_regression.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -5,7 +5,7 @@ from numpy.random import standard_normal from numpy.testing import NumpyTest, NumpyTestCase -from scipy.stats.models.regression import ols_model, ar_model +from scipy.sandbox.models.regression import ols_model, ar_model W = standard_normal Modified: branches/0.6.x/scipy/sandbox/models/tests/test_rlm.py =================================================================== --- branches/0.6.x/scipy/stats/models/tests/test_rlm.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/tests/test_rlm.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -5,7 +5,7 @@ import numpy.random as R from numpy.testing import NumpyTest, NumpyTestCase -import scipy.stats.models.rlm as models +import scipy.sandbox.models.rlm as models W = R.standard_normal Modified: branches/0.6.x/scipy/sandbox/models/tests/test_utils.py =================================================================== --- branches/0.6.x/scipy/stats/models/tests/test_utils.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/tests/test_utils.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -6,7 +6,7 @@ import numpy.random as R from numpy.testing import assert_almost_equal, NumpyTest, NumpyTestCase -from scipy.stats.models import utils +from scipy.sandbox.models import utils class test_Utils(NumpyTestCase): Modified: branches/0.6.x/scipy/sandbox/models/utils.py =================================================================== --- branches/0.6.x/scipy/stats/models/utils.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/sandbox/models/utils.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -82,7 +82,7 @@ Examples -------- >>> from numpy import arange - >>> from scipy.stats.models.utils import StepFunction + >>> from scipy.sandbox.models.utils import StepFunction >>> >>> x = arange(20) >>> y = arange(20) Modified: branches/0.6.x/scipy/stats/setup.py =================================================================== --- branches/0.6.x/scipy/stats/setup.py 2007-09-09 02:13:29 UTC (rev 3310) +++ branches/0.6.x/scipy/stats/setup.py 2007-09-14 18:37:31 UTC (rev 3311) @@ -6,7 +6,6 @@ from numpy.distutils.misc_util import Configuration config = Configuration('stats', parent_package, top_path) - config.add_subpackage('models') config.add_data_dir('tests') config.add_library('statlib', From scipy-svn at scipy.org Fri Sep 14 17:31:46 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 14 Sep 2007 16:31:46 -0500 (CDT) Subject: [Scipy-svn] r3312 - trunk/scipy/weave Message-ID: <20070914213146.06DB239C2D3@new.scipy.org> Author: eric Date: 2007-09-14 16:31:42 -0500 (Fri, 14 Sep 2007) New Revision: 3312 Modified: trunk/scipy/weave/converters.py Log: - comment out the latest numpy scalar converters, as they caused problems on Nils machine (complex192 not defined). We'll get numpy scalar conversion fixed up in the next release. Modified: trunk/scipy/weave/converters.py =================================================================== --- trunk/scipy/weave/converters.py 2007-09-14 18:37:31 UTC (rev 3311) +++ trunk/scipy/weave/converters.py 2007-09-14 21:31:42 UTC (rev 3312) @@ -34,11 +34,11 @@ # add numpy scalar converters to the default # converter list. #---------------------------------------------------------------------------- -try: - import numpy_scalar_spec - default.append(numpy_scalar_spec.numpy_complex_scalar_converter()) -except ImportError: - pass +#try: +# import numpy_scalar_spec +# default.append(numpy_scalar_spec.numpy_complex_scalar_converter()) +#except ImportError: +# pass #---------------------------------------------------------------------------- # Add wxPython support From scipy-svn at scipy.org Fri Sep 14 19:43:59 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 14 Sep 2007 18:43:59 -0500 (CDT) Subject: [Scipy-svn] r3313 - tags Message-ID: <20070914234359.8ACE939C235@new.scipy.org> Author: jarrod.millman Date: 2007-09-14 18:43:55 -0500 (Fri, 14 Sep 2007) New Revision: 3313 Added: tags/0.6.0/ Log: Create the new release tag. Copied: tags/0.6.0 (from rev 3312, branches/0.6.x) From scipy-svn at scipy.org Fri Sep 14 19:47:24 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 14 Sep 2007 18:47:24 -0500 (CDT) Subject: [Scipy-svn] r3314 - branches/0.6.x/scipy Message-ID: <20070914234724.7981E39C235@new.scipy.org> Author: jarrod.millman Date: 2007-09-14 18:47:20 -0500 (Fri, 14 Sep 2007) New Revision: 3314 Modified: branches/0.6.x/scipy/version.py Log: Update version number on 0.6.x branch Modified: branches/0.6.x/scipy/version.py =================================================================== --- branches/0.6.x/scipy/version.py 2007-09-14 23:43:55 UTC (rev 3313) +++ branches/0.6.x/scipy/version.py 2007-09-14 23:47:20 UTC (rev 3314) @@ -1,4 +1,4 @@ -version = '0.6.0' +version = '0.6.1' release=False if not release: From scipy-svn at scipy.org Fri Sep 14 19:48:32 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 14 Sep 2007 18:48:32 -0500 (CDT) Subject: [Scipy-svn] r3315 - tags/0.6.0/scipy Message-ID: <20070914234832.6A91439C235@new.scipy.org> Author: jarrod.millman Date: 2007-09-14 18:48:17 -0500 (Fri, 14 Sep 2007) New Revision: 3315 Modified: tags/0.6.0/scipy/version.py Log: Make 0.6.0 tag a version release. Modified: tags/0.6.0/scipy/version.py =================================================================== --- tags/0.6.0/scipy/version.py 2007-09-14 23:47:20 UTC (rev 3314) +++ tags/0.6.0/scipy/version.py 2007-09-14 23:48:17 UTC (rev 3315) @@ -1,5 +1,5 @@ version = '0.6.0' -release=False +release=True if not release: import os From scipy-svn at scipy.org Sun Sep 16 07:42:37 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 16 Sep 2007 06:42:37 -0500 (CDT) Subject: [Scipy-svn] r3316 - trunk/scipy/sandbox/ga Message-ID: <20070916114237.0A0CF39C269@new.scipy.org> Author: jarrod.millman Date: 2007-09-16 06:42:35 -0500 (Sun, 16 Sep 2007) New Revision: 3316 Added: trunk/scipy/sandbox/ga/info.py Removed: trunk/scipy/sandbox/ga/info_ga.py Modified: trunk/scipy/sandbox/ga/__init__.py Log: cleaning up info.py to conform with standard Modified: trunk/scipy/sandbox/ga/__init__.py =================================================================== --- trunk/scipy/sandbox/ga/__init__.py 2007-09-14 23:48:17 UTC (rev 3315) +++ trunk/scipy/sandbox/ga/__init__.py 2007-09-16 11:42:35 UTC (rev 3316) @@ -2,8 +2,9 @@ # ga - Genetic Algorithms # -#from pre___init__ import __doc__ +from info import __doc__ + import tree import algorithm import ga_util Copied: trunk/scipy/sandbox/ga/info.py (from rev 3315, trunk/scipy/sandbox/ga/info_ga.py) Deleted: trunk/scipy/sandbox/ga/info_ga.py =================================================================== --- trunk/scipy/sandbox/ga/info_ga.py 2007-09-14 23:48:17 UTC (rev 3315) +++ trunk/scipy/sandbox/ga/info_ga.py 2007-09-16 11:42:35 UTC (rev 3316) @@ -1,6 +0,0 @@ -""" -Genetic Algorithms -================== - -""" - From scipy-svn at scipy.org Mon Sep 17 19:00:49 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 17 Sep 2007 18:00:49 -0500 (CDT) Subject: [Scipy-svn] r3317 - trunk/scipy/io Message-ID: <20070917230049.6893239C0C1@new.scipy.org> Author: matthew.brett at gmail.com Date: 2007-09-17 17:59:47 -0500 (Mon, 17 Sep 2007) New Revision: 3317 Added: trunk/scipy/io/datasource.py trunk/scipy/io/path.py Log: support for remote data repositories (moved from nipy) Added: trunk/scipy/io/datasource.py =================================================================== --- trunk/scipy/io/datasource.py 2007-09-16 11:42:35 UTC (rev 3316) +++ trunk/scipy/io/datasource.py 2007-09-17 22:59:47 UTC (rev 3317) @@ -0,0 +1,247 @@ +import os +import gzip +import bz2 +from urlparse import urlparse +from urllib2 import urlopen +from tempfile import mkstemp + +# TODO: replace with newer tuple-based path module +from path import path + +zipexts = (".gz",".bz2") +file_openers = {".gz":gzip.open, ".bz2":bz2.BZ2File, None:file} + +def iszip(filename): + """ Is this filename a zip file. + + :Returns: ``bool`` + """ + _, ext = path(filename).splitext() + return ext in zipexts + +def unzip(filename): + """ Unzip the given file into another file. Return the new file's name. + + :Returns: ``string`` + """ + if not iszip(filename): + raise ValueError("file %s is not zipped"%filename) + unzip_name, zipext = splitzipext(filename) + opener = file_openers[zipext] + outfile = file(unzip_name,'w') + outfile.write(opener(filename).read()) + outfile.close() + return unzip_name + +def iswritemode(mode): + """ Test if the given mode will open a file for writing. + + :Parameters: + `mode` : string + The mode to be checked + + :Returns: ``bool`` + """ + return mode.find("w")>-1 or mode.find("+")>-1 + + + +def splitzipext(filename): + """ + return (base, zip_extension) from filename. + If filename does not have a zip extention then + base = filename and zip_extension = None + """ + if iszip(filename): + return path(filename).splitext() + else: + return filename, None + + + + +def isurl(pathstr): + """ + Check whether a given string can be parsed as a URL. + + :Parameters: + `pathstr` : string + The string to be checked. + + :Returns: ``bool`` + """ + scheme, netloc, _, _, _, _ = urlparse(pathstr) + return bool(scheme and netloc) + + + + +def ensuredirs(directory): + """ + Ensure that the given directory path actually exists. + If it doesn't, create it. + + :Returns: ``None`` + """ + if not isinstance(directory, path): + directory = path(directory) + if not directory.exists(): + directory.makedirs() + + + + +class Cache (object): + """ + A file cache. The path of the cache can be specified + or else use ~/.nipy/cache by default. + """ + + def __init__(self, cachepath=None): + if cachepath is not None: + self.path = path(cachepath) + elif os.name == 'posix': + self.path = path(os.environ["HOME"]).joinpath(".nipy","cache") + elif os.name == 'nt': + self.path = path(os.environ["HOMEPATH"]).joinpath(".nipy","cache") + if not self.path.exists(): + ensuredirs(self.path) + + def tempfile(self,suffix='', prefix=''): + """ Return an temporary file name in the cache""" + _, fname = mkstemp(suffix, prefix, self.path) + return fname + + def filepath(self, uri): + """ + Return the complete path + filename within the cache. + """ + (_, netloc, upath, _, _, _) = urlparse(uri) + return self.path.joinpath(netloc, upath[1:]) + + def filename(self, uri): + """ + Return the complete path + filename within the cache. + + :Returns: ``string`` + """ + return str(self.filepath(uri)) + + def cache(self, uri): + """ + Copy a file into the cache. + + :Returns: ``None`` + """ + if self.iscached(uri): + return + upath = self.filepath(uri) + ensuredirs(upath.dirname()) + try: + openedurl = urlopen(uri) + except: + raise IOError("url not found: "+str(uri)) + file(upath, 'w').write(openedurl.read()) + + def clear(self): + """ Delete all files in the cache. + + :Returns: ``None`` + """ + for f in self.path.files(): + f.rm() + + def iscached(self, uri): + """ Check if a file exists in the cache. + + :Returns: ``bool`` + """ + return self.filepath(uri).exists() + + def retrieve(self, uri): + """ + Retrieve a file from the cache. + If not already there, create the file and + add it to the cache. + + :Returns: ``file`` + """ + self.cache(uri) + return file(self.filename(uri)) + + +class DataSource (object): + + def __init__(self, cachepath=os.curdir): + self._cache = Cache(cachepath) + + def tempfile(self,suffix='', prefix=''): + ''' Return an temporary file name in the cache''' + return self._cache.tempfile(suffix, prefix) + + def _possible_names(self, filename): + names = [filename] + if not iszip(filename): + for zipext in zipexts: + names.append(filename+zipext) + return tuple(names) + + def cache(self, pathstr): + if isurl(pathstr): + self._cache.cache(pathstr) + + def filename(self, pathstr): + found = None + for name in self._possible_names(pathstr): + try: + if isurl(name): + self.cache(name) + found = self._cache.filename(name) + else: + raise Exception + except: + if path(name).exists(): + found = name + if found: + break + if found is None: + raise IOError("%s not found"%pathstr) + return found + + def exists(self, pathstr): + try: + _ = self.filename(pathstr) + return True + except IOError: + return False + + def open(self, pathstr, mode='r'): + if isurl(pathstr) and iswritemode(mode): + raise ValueError("URLs are not writeable") + found = self.filename(pathstr) + _, ext = splitzipext(found) + if ext == 'bz2': + mode.replace("+", "") + return file_openers[ext](found, mode=mode) + + def _fullpath(self, pathstr): + return pathstr + + +class Repository (DataSource): + """DataSource with an implied root.""" + def __init__(self, baseurl, cachepath=None): + DataSource.__init__(self, cachepath=cachepath) + self._baseurl = baseurl + + def _fullpath(self, pathstr): + return path(self._baseurl).joinpath(pathstr) + + def filename(self, pathstr): + return DataSource.filename(self, str(self._fullpath(pathstr))) + + def exists(self, pathstr): + return DataSource.exists(self, self._fullpath(pathstr)) + + def open(self, pathstr, mode='r'): + return DataSource.open(self, self._fullpath(pathstr), mode) Added: trunk/scipy/io/path.py =================================================================== --- trunk/scipy/io/path.py 2007-09-16 11:42:35 UTC (rev 3316) +++ trunk/scipy/io/path.py 2007-09-17 22:59:47 UTC (rev 3317) @@ -0,0 +1,969 @@ +""" path.py - An object representing a path to a file or directory. + +Example: + +from neuroimaging.utils.path import path +d = path('/home/guido/bin') +for f in d.files('*.py'): + f.chmod(0755) + +This module requires Python 2.2 or later. + + +URL: http://www.jorendorff.com/articles/python/path +Author: Jason Orendorff (and others - see the url!) +Date: 7 Mar 2004 +""" + + +# TODO +# - Tree-walking functions don't avoid symlink loops. Matt Harrison sent me a patch for this. +# - Tree-walking functions can't ignore errors. Matt Harrison asked for this. +# +# - Two people asked for path.chdir(). This just seems wrong to me, +# I dunno. chdir() is moderately evil anyway. +# +# - Bug in write_text(). It doesn't support Universal newline mode. +# - Better error message in listdir() when self isn't a +# directory. (On Windows, the error message really sucks.) +# - Make sure everything has a good docstring. +# - Add methods for regex find and replace. +# - guess_content_type() method? +# - Perhaps support arguments to touch(). +# - Could add split() and join() methods that generate warnings. + +import sys, warnings, os, fnmatch, glob, shutil, codecs, md5 + +__version__ = '2.1' +__all__ = ['path'] + +# Platform-specific support for path.owner +if os.name == 'nt': + try: + import win32security + except ImportError: + win32security = None +else: + try: + import pwd + except ImportError: + pwd = None + +# Pre-2.3 support. Are unicode filenames supported? +_base = str +_getcwd = os.getcwd +try: + if os.path.supports_unicode_filenames: + _base = unicode + _getcwd = os.getcwdu +except AttributeError: + pass + +# Pre-2.3 workaround for booleans +try: + True, False +except NameError: + True, False = 1, 0 + +# Pre-2.3 workaround for basestring. +try: + basestring +except NameError: + basestring = (str, unicode) + +# Universal newline support +_textmode = 'r' +if hasattr(file, 'newlines'): + _textmode = 'U' + + +class TreeWalkWarning(Warning): + pass + +class path(_base): + """ Represents a filesystem path. + + For documentation on individual methods, consult their + counterparts in os.path. + """ + + # --- Special Python methods. + + def __repr__(self): + return 'path(%s)' % _base.__repr__(self) + + # Adding a path and a string yields a path. + def __add__(self, more): + try: + resultStr = _base.__add__(self, more) + except TypeError: #Python bug + resultStr = NotImplemented + if resultStr is NotImplemented: + return resultStr + return self.__class__(resultStr) + + def __radd__(self, other): + if isinstance(other, basestring): + return self.__class__(other.__add__(self)) + else: + return NotImplemented + + # The / operator joins paths. + def __div__(self, rel): + """ fp.__div__(rel) == fp / rel == fp.joinpath(rel) + + Join two path components, adding a separator character if + needed. + """ + return self.__class__(os.path.join(self, rel)) + + # Make the / operator work even when true division is enabled. + __truediv__ = __div__ + + def getcwd(cls): + """ Return the current working directory as a path object. """ + return cls(_getcwd()) + getcwd = classmethod(getcwd) + + + # --- Operations on path strings. + + isabs = os.path.isabs + def abspath(self): return self.__class__(os.path.abspath(self)) + def normcase(self): return self.__class__(os.path.normcase(self)) + def normpath(self): return self.__class__(os.path.normpath(self)) + def realpath(self): return self.__class__(os.path.realpath(self)) + def expanduser(self): return self.__class__(os.path.expanduser(self)) + def expandvars(self): return self.__class__(os.path.expandvars(self)) + def dirname(self): return self.__class__(os.path.dirname(self)) + basename = os.path.basename + + def expand(self): + """ Clean up a filename by calling expandvars(), + expanduser(), and normpath() on it. + + This is commonly everything needed to clean up a filename + read from a configuration file, for example. + """ + return self.expandvars().expanduser().normpath() + + def _get_namebase(self): + base, ext = os.path.splitext(self.name) + return base + + def _get_ext(self): + f, ext = os.path.splitext(_base(self)) + return ext + + def _get_drive(self): + drive, r = os.path.splitdrive(self) + return self.__class__(drive) + + parent = property( + dirname, None, None, + """ This path's parent directory, as a new path object. + + For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib') + """) + + name = property( + basename, None, None, + """ The name of this file or directory without the full path. + + For example, path('/usr/local/lib/libpython.so').name == 'libpython.so' + """) + + namebase = property( + _get_namebase, None, None, + """ The same as path.name, but with one file extension stripped off. + + For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz', + but path('/home/guido/python.tar.gz').namebase == 'python.tar' + """) + + ext = property( + _get_ext, None, None, + """ The file extension, for example '.py'. """) + + drive = property( + _get_drive, None, None, + """ The drive specifier, for example 'C:'. + This is always empty on systems that don't use drive specifiers. + """) + + def splitpath(self): + """ p.splitpath() -> Return (p.parent, p.name). """ + parent, child = os.path.split(self) + return self.__class__(parent), child + + def splitdrive(self): + """ p.splitdrive() -> Return (p.drive, ). + + Split the drive specifier from this path. If there is + no drive specifier, p.drive is empty, so the return value + is simply (path(''), p). This is always the case on Unix. + """ + drive, rel = os.path.splitdrive(self) + return self.__class__(drive), rel + + def splitext(self): + """ p.splitext() -> Return (p.stripext(), p.ext). + + Split the filename extension from this path and return + the two parts. Either part may be empty. + + The extension is everything from '.' to the end of the + last path segment. This has the property that if + (a, b) == p.splitext(), then a + b == p. + """ + filename, ext = os.path.splitext(self) + return self.__class__(filename), ext + + def stripext(self): + """ p.stripext() -> Remove one file extension from the path. + + For example, path('/home/guido/python.tar.gz').stripext() + returns path('/home/guido/python.tar'). + """ + return self.splitext()[0] + + if hasattr(os.path, 'splitunc'): + def splitunc(self): + unc, rest = os.path.splitunc(self) + return self.__class__(unc), rest + + def _get_uncshare(self): + unc, r = os.path.splitunc(self) + return self.__class__(unc) + + uncshare = property( + _get_uncshare, None, None, + """ The UNC mount point for this path. + This is empty for paths on local drives. """) + + def joinpath(self, *args): + """ Join two or more path components, adding a separator + character (os.sep) if needed. Returns a new path + object. + """ + return self.__class__(os.path.join(self, *args)) + + def splitall(self): + r""" Return a list of the path components in this path. + + The first item in the list will be a path. Its value will be + either os.curdir, os.pardir, empty, or the root directory of + this path (for example, '/' or 'C:\\'). The other items in + the list will be strings. + + path.path.joinpath(*result) will yield the original path. + """ + parts = [] + loc = self + while loc != os.curdir and loc != os.pardir: + prev = loc + loc, child = prev.splitpath() + if loc == prev: + break + parts.append(child) + parts.append(loc) + parts.reverse() + return parts + + def relpath(self): + """ Return this path as a relative path, + based from the current working directory. + """ + cwd = self.__class__(os.getcwd()) + return cwd.relpathto(self) + + def relpathto(self, dest): + """ Return a relative path from self to dest. + + If there is no relative path from self to dest, for example if + they reside on different drives in Windows, then this returns + dest.abspath(). + """ + origin = self.abspath() + dest = self.__class__(dest).abspath() + + orig_list = origin.normcase().splitall() + # Don't normcase dest! We want to preserve the case. + dest_list = dest.splitall() + + if orig_list[0] != os.path.normcase(dest_list[0]): + # Can't get here from there. + return dest + + # Find the location where the two paths start to differ. + i = 0 + for start_seg, dest_seg in zip(orig_list, dest_list): + if start_seg != os.path.normcase(dest_seg): + break + i += 1 + + # Now i is the point where the two paths diverge. + # Need a certain number of "os.pardir"s to work up + # from the origin to the point of divergence. + segments = [os.pardir] * (len(orig_list) - i) + # Need to add the diverging part of dest_list. + segments += dest_list[i:] + if len(segments) == 0: + # If they happen to be identical, use os.curdir. + relpath = os.curdir + else: + relpath = os.path.join(*segments) + return self.__class__(relpath) + + # --- Listing, searching, walking, and matching + + def listdir(self, pattern=None): + """ D.listdir() -> List of items in this directory. + + Use D.files() or D.dirs() instead if you want a listing + of just files or just subdirectories. + + The elements of the list are path objects. + + With the optional 'pattern' argument, this only lists + items whose names match the given pattern. + """ + names = os.listdir(self) + if pattern is not None: + names = fnmatch.filter(names, pattern) + return [self / child for child in names] + + def dirs(self, pattern=None): + """ D.dirs() -> List of this directory's subdirectories. + + The elements of the list are path objects. + This does not walk recursively into subdirectories + (but see path.walkdirs). + + With the optional 'pattern' argument, this only lists + directories whose names match the given pattern. For + example, d.dirs('build-*'). + """ + return [p for p in self.listdir(pattern) if p.isdir()] + + def files(self, pattern=None): + """ D.files() -> List of the files in this directory. + + The elements of the list are path objects. + This does not walk into subdirectories (see path.walkfiles). + + With the optional 'pattern' argument, this only lists files + whose names match the given pattern. For example, + d.files('*.pyc'). + """ + + return [p for p in self.listdir(pattern) if p.isfile()] + + def walk(self, pattern=None, errors='strict'): + """ D.walk() -> iterator over files and subdirs, recursively. + + The iterator yields path objects naming each child item of + this directory and its descendants. This requires that + D.isdir(). + + This performs a depth-first traversal of the directory tree. + Each directory is returned just before all its children. + + The errors= keyword argument controls behavior when an + error occurs. The default is 'strict', which causes an + exception. The other allowed values are 'warn', which + reports the error via warnings.warn(), and 'ignore'. + """ + if errors not in ('strict', 'warn', 'ignore'): + raise ValueError("invalid errors parameter") + + try: + childList = self.listdir() + except Exception: + if errors == 'ignore': + return + elif errors == 'warn': + warnings.warn( + "Unable to list directory '%s': %s" + % (self, sys.exc_info()[1]), + TreeWalkWarning) + else: + raise + + for child in childList: + if pattern is None or child.fnmatch(pattern): + yield child + try: + isdir = child.isdir() + except Exception: + if errors == 'ignore': + isdir = False + elif errors == 'warn': + warnings.warn( + "Unable to access '%s': %s" + % (child, sys.exc_info()[1]), + TreeWalkWarning) + isdir = False + else: + raise + + if isdir: + for item in child.walk(pattern, errors): + yield item + + def walkdirs(self, pattern=None, errors='strict'): + """ D.walkdirs() -> iterator over subdirs, recursively. + + With the optional 'pattern' argument, this yields only + directories whose names match the given pattern. For + example, mydir.walkdirs('*test') yields only directories + with names ending in 'test'. + + The errors= keyword argument controls behavior when an + error occurs. The default is 'strict', which causes an + exception. The other allowed values are 'warn', which + reports the error via warnings.warn(), and 'ignore'. + """ + if errors not in ('strict', 'warn', 'ignore'): + raise ValueError("invalid errors parameter") + + try: + dirs = self.dirs() + except Exception: + if errors == 'ignore': + return + elif errors == 'warn': + warnings.warn( + "Unable to list directory '%s': %s" + % (self, sys.exc_info()[1]), + TreeWalkWarning) + else: + raise + + for child in dirs: + if pattern is None or child.fnmatch(pattern): + yield child + for subsubdir in child.walkdirs(pattern, errors): + yield subsubdir + + def walkfiles(self, pattern=None, errors='strict'): + """ D.walkfiles() -> iterator over files in D, recursively. + + The optional argument, pattern, limits the results to files + with names that match the pattern. For example, + mydir.walkfiles('*.tmp') yields only files with the .tmp + extension. + """ + if errors not in ('strict', 'warn', 'ignore'): + raise ValueError("invalid errors parameter") + + try: + childList = self.listdir() + except Exception: + if errors == 'ignore': + return + elif errors == 'warn': + warnings.warn( + "Unable to list directory '%s': %s" + % (self, sys.exc_info()[1]), + TreeWalkWarning) + else: + raise + + for child in childList: + try: + isfile = child.isfile() + isdir = not isfile and child.isdir() + except: + if errors == 'ignore': + return + elif errors == 'warn': + warnings.warn( + "Unable to access '%s': %s" + % (self, sys.exc_info()[1]), + TreeWalkWarning) + else: + raise + + if isfile: + if pattern is None or child.fnmatch(pattern): + yield child + elif isdir: + for f in child.walkfiles(pattern, errors): + yield f + + def fnmatch(self, pattern): + """ Return True if self.name matches the given pattern. + + pattern - A filename pattern with wildcards, + for example '*.py'. + """ + return fnmatch.fnmatch(self.name, pattern) + + def glob(self, pattern): + """ Return a list of path objects that match the pattern. + + pattern - a path relative to this directory, with wildcards. + + For example, path('/users').glob('*/bin/*') returns a list + of all the files users have in their bin directories. + """ + cls = self.__class__ + return [cls(s) for s in glob.glob(_base(self / pattern))] + + + # --- Reading or writing an entire file at once. + + def open(self, mode='r'): + """ Open this file. Return a file object. """ + return file(self, mode) + + def bytes(self): + """ Open this file, read all bytes, return them as a string. """ + f = self.open('rb') + try: + return f.read() + finally: + f.close() + + def write_bytes(self, bytes, append=False): + """ Open this file and write the given bytes to it. + + Default behavior is to overwrite any existing file. + Call p.write_bytes(bytes, append=True) to append instead. + """ + if append: + mode = 'ab' + else: + mode = 'wb' + f = self.open(mode) + try: + f.write(bytes) + finally: + f.close() + + def text(self, encoding=None, errors='strict'): + r""" Open this file, read it in, return the content as a string. + + This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r' + are automatically translated to '\n'. + + Optional arguments: + + encoding - The Unicode encoding (or character set) of + the file. If present, the content of the file is + decoded and returned as a unicode object; otherwise + it is returned as an 8-bit str. + errors - How to handle Unicode errors; see help(str.decode) + for the options. Default is 'strict'. + """ + if encoding is None: + # 8-bit + f = self.open(_textmode) + try: + return f.read() + finally: + f.close() + else: + # Unicode + f = codecs.open(self, 'r', encoding, errors) + # (Note - Can't use 'U' mode here, since codecs.open + # doesn't support 'U' mode, even in Python 2.3.) + try: + t = f.read() + finally: + f.close() + return (t.replace(u'\r\n', u'\n') + .replace(u'\r\x85', u'\n') + .replace(u'\r', u'\n') + .replace(u'\x85', u'\n') + .replace(u'\u2028', u'\n')) + + def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False): + r""" Write the given text to this file. + + The default behavior is to overwrite any existing file; + to append instead, use the 'append=True' keyword argument. + + There are two differences between path.write_text() and + path.write_bytes(): newline handling and Unicode handling. + See below. + + Parameters: + + - text - str/unicode - The text to be written. + + - encoding - str - The Unicode encoding that will be used. + This is ignored if 'text' isn't a Unicode string. + + - errors - str - How to handle Unicode encoding errors. + Default is 'strict'. See help(unicode.encode) for the + options. This is ignored if 'text' isn't a Unicode + string. + + - linesep - keyword argument - str/unicode - The sequence of + characters to be used to mark end-of-line. The default is + os.linesep. You can also specify None; this means to + leave all newlines as they are in 'text'. + + - append - keyword argument - bool - Specifies what to do if + the file already exists (True: append to the end of it; + False: overwrite it.) The default is False. + + + --- Newline handling. + + write_text() converts all standard end-of-line sequences + ('\n', '\r', and '\r\n') to your platform's default end-of-line + sequence (see os.linesep; on Windows, for example, the + end-of-line marker is '\r\n'). + + If you don't like your platform's default, you can override it + using the 'linesep=' keyword argument. If you specifically want + write_text() to preserve the newlines as-is, use 'linesep=None'. + + This applies to Unicode text the same as to 8-bit text, except + there are three additional standard Unicode end-of-line sequences: + u'\x85', u'\r\x85', and u'\u2028'. + + (This is slightly different from when you open a file for + writing with fopen(filename, "w") in C or file(filename, 'w') + in Python.) + + + --- Unicode + + If 'text' isn't Unicode, then apart from newline handling, the + bytes are written verbatim to the file. The 'encoding' and + 'errors' arguments are not used and must be omitted. + + If 'text' is Unicode, it is first converted to bytes using the + specified 'encoding' (or the default encoding if 'encoding' + isn't specified). The 'errors' argument applies only to this + conversion. + + """ + if isinstance(text, unicode): + if linesep is not None: + # Convert all standard end-of-line sequences to + # ordinary newline characters. + text = (text.replace(u'\r\n', u'\n') + .replace(u'\r\x85', u'\n') + .replace(u'\r', u'\n') + .replace(u'\x85', u'\n') + .replace(u'\u2028', u'\n')) + text = text.replace(u'\n', linesep) + if encoding is None: + encoding = sys.getdefaultencoding() + bytes = text.encode(encoding, errors) + else: + # It is an error to specify an encoding if 'text' is + # an 8-bit string. + assert encoding is None + + if linesep is not None: + text = (text.replace('\r\n', '\n') + .replace('\r', '\n')) + bytes = text.replace('\n', linesep) + + self.write_bytes(bytes, append) + + def lines(self, encoding=None, errors='strict', retain=True): + r""" Open this file, read all lines, return them in a list. + + Optional arguments: + encoding - The Unicode encoding (or character set) of + the file. The default is None, meaning the content + of the file is read as 8-bit characters and returned + as a list of (non-Unicode) str objects. + errors - How to handle Unicode errors; see help(str.decode) + for the options. Default is 'strict' + retain - If true, retain newline characters; but all newline + character combinations ('\r', '\n', '\r\n') are + translated to '\n'. If false, newline characters are + stripped off. Default is True. + + This uses 'U' mode in Python 2.3 and later. + """ + if encoding is None and retain: + f = self.open(_textmode) + try: + return f.readlines() + finally: + f.close() + else: + return self.text(encoding, errors).splitlines(retain) + + def write_lines(self, lines, encoding=None, errors='strict', + linesep=os.linesep, append=False): + r""" Write the given lines of text to this file. + + By default this overwrites any existing file at this path. + + This puts a platform-specific newline sequence on every line. + See 'linesep' below. + + lines - A list of strings. + + encoding - A Unicode encoding to use. This applies only if + 'lines' contains any Unicode strings. + + errors - How to handle errors in Unicode encoding. This + also applies only to Unicode strings. + + linesep - The desired line-ending. This line-ending is + applied to every line. If a line already has any + standard line ending ('\r', '\n', '\r\n', u'\x85', + u'\r\x85', u'\u2028'), that will be stripped off and + this will be used instead. The default is os.linesep, + which is platform-dependent ('\r\n' on Windows, '\n' on + Unix, etc.) Specify None to write the lines as-is, + like file.writelines(). + + Use the keyword argument append=True to append lines to the + file. The default is to overwrite the file. Warning: + When you use this with Unicode data, if the encoding of the + existing data in the file is different from the encoding + you specify with the encoding= parameter, the result is + mixed-encoding data, which can really confuse someone trying + to read the file later. + """ + if append: + mode = 'ab' + else: + mode = 'wb' + f = self.open(mode) + try: + for line in lines: + isUnicode = isinstance(line, unicode) + if linesep is not None: + # Strip off any existing line-end and add the + # specified linesep string. + if isUnicode: + if line[-2:] in (u'\r\n', u'\x0d\x85'): + line = line[:-2] + elif line[-1:] in (u'\r', u'\n', + u'\x85', u'\u2028'): + line = line[:-1] + else: + if line[-2:] == '\r\n': + line = line[:-2] + elif line[-1:] in ('\r', '\n'): + line = line[:-1] + line += linesep + if isUnicode: + if encoding is None: + encoding = sys.getdefaultencoding() + line = line.encode(encoding, errors) + f.write(line) + finally: + f.close() + + def read_md5(self): + """ Calculate the md5 hash for this file. + + This reads through the entire file. + """ + f = self.open('rb') + try: + m = md5.new() + while True: + d = f.read(8192) + if not d: + break + m.update(d) + finally: + f.close() + return m.digest() + + # --- Methods for querying the filesystem. + + exists = os.path.exists + isdir = os.path.isdir + isfile = os.path.isfile + islink = os.path.islink + ismount = os.path.ismount + + if hasattr(os.path, 'samefile'): + samefile = os.path.samefile + + getatime = os.path.getatime + atime = property( + getatime, None, None, + """ Last access time of the file. """) + + getmtime = os.path.getmtime + mtime = property( + getmtime, None, None, + """ Last-modified time of the file. """) + + if hasattr(os.path, 'getctime'): + getctime = os.path.getctime + ctime = property( + getctime, None, None, + """ Creation time of the file. """) + + getsize = os.path.getsize + size = property( + getsize, None, None, + """ Size of the file, in bytes. """) + + if hasattr(os, 'access'): + def access(self, mode): + """ Return true if current user has access to this path. + + mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK + """ + return os.access(self, mode) + + def stat(self): + """ Perform a stat() system call on this path. """ + return os.stat(self) + + def lstat(self): + """ Like path.stat(), but do not follow symbolic links. """ + return os.lstat(self) + + def get_owner(self): + r""" Return the name of the owner of this file or directory. + + This follows symbolic links. + + On Windows, this returns a name of the form ur'DOMAIN\User Name'. + On Windows, a group can own a file or directory. + """ + if os.name == 'nt': + if win32security is None: + raise Exception("path.owner requires win32all to be installed") + desc = win32security.GetFileSecurity( + self, win32security.OWNER_SECURITY_INFORMATION) + sid = desc.GetSecurityDescriptorOwner() + account, domain, typecode = win32security.LookupAccountSid(None, sid) + return domain + u'\\' + account + else: + if pwd is None: + raise NotImplementedError("path.owner is not implemented on this platform.") + st = self.stat() + return pwd.getpwuid(st.st_uid).pw_name + + owner = property( + get_owner, None, None, + """ Name of the owner of this file or directory. """) + + if hasattr(os, 'statvfs'): + def statvfs(self): + """ Perform a statvfs() system call on this path. """ + return os.statvfs(self) + + if hasattr(os, 'pathconf'): + def pathconf(self, name): + return os.pathconf(self, name) + + + # --- Modifying operations on files and directories + + def utime(self, times): + """ Set the access and modified times of this file. """ + os.utime(self, times) + + def chmod(self, mode): + os.chmod(self, mode) + + if hasattr(os, 'chown'): + def chown(self, uid, gid): + os.chown(self, uid, gid) + + def rename(self, new): + os.rename(self, new) + + def renames(self, new): + os.renames(self, new) + + + # --- Create/delete operations on directories + + def mkdir(self, mode=0777): + os.mkdir(self, mode) + + def makedirs(self, mode=0777): + os.makedirs(self, mode) + + def rmdir(self): + os.rmdir(self) + + def removedirs(self): + os.removedirs(self) + + + # --- Modifying operations on files + + def touch(self): + """ Set the access/modified times of this file to the current time. + Create the file if it does not exist. + """ + fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666) + os.close(fd) + os.utime(self, None) + + def remove(self): + os.remove(self) + + def unlink(self): + os.unlink(self) + + + # --- Links + + if hasattr(os, 'link'): + def link(self, newpath): + """ Create a hard link at 'newpath', pointing to this file. """ + os.link(self, newpath) + + if hasattr(os, 'symlink'): + def symlink(self, newlink): + """ Create a symbolic link at 'newlink', pointing here. """ + os.symlink(self, newlink) + + if hasattr(os, 'readlink'): + def readlink(self): + """ Return the path to which this symbolic link points. + + The result may be an absolute or a relative path. + """ + return self.__class__(os.readlink(self)) + + def readlinkabs(self): + """ Return the path to which this symbolic link points. + + The result is always an absolute path. + """ + p = self.readlink() + if p.isabs(): + return p + else: + return (self.parent / p).abspath() + + + # --- High-level functions from shutil + + copyfile = shutil.copyfile + copymode = shutil.copymode + copystat = shutil.copystat + copy = shutil.copy + copy2 = shutil.copy2 + copytree = shutil.copytree + if hasattr(shutil, 'move'): + move = shutil.move + rmtree = shutil.rmtree + + + # --- Special stuff from os + + if hasattr(os, 'chroot'): + def chroot(self): + os.chroot(self) + + if hasattr(os, 'startfile'): + def startfile(self): + os.startfile(self) + From scipy-svn at scipy.org Mon Sep 17 23:18:38 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 17 Sep 2007 22:18:38 -0500 (CDT) Subject: [Scipy-svn] r3318 - trunk/scipy/sandbox/timeseries Message-ID: <20070918031838.7FFEA39C0D8@new.scipy.org> Author: mattknox_ca Date: 2007-09-17 22:18:35 -0500 (Mon, 17 Sep 2007) New Revision: 3318 Removed: trunk/scipy/sandbox/timeseries/io/ Log: fame module is no longer maintained and of limited usefulness to numpy/scipy users anyway From scipy-svn at scipy.org Tue Sep 18 11:04:56 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 18 Sep 2007 10:04:56 -0500 (CDT) Subject: [Scipy-svn] r3319 - in trunk/scipy/sparse: . tests Message-ID: <20070918150456.AC7FD39C0F4@new.scipy.org> Author: rc Date: 2007-09-18 10:04:50 -0500 (Tue, 18 Sep 2007) New Revision: 3319 Modified: trunk/scipy/sparse/sparse.py trunk/scipy/sparse/tests/test_sparse.py Log: get_submatrix() for csc_matrix csr_matrix, csc_matrix.__getitem__() now uses appropriate get_submatrix() - indexing by row and column slices is possible, though it returns a new matrix, not a view Modified: trunk/scipy/sparse/sparse.py =================================================================== --- trunk/scipy/sparse/sparse.py 2007-09-18 03:18:35 UTC (rev 3318) +++ trunk/scipy/sparse/sparse.py 2007-09-18 15:04:50 UTC (rev 3319) @@ -762,8 +762,51 @@ else: return self._toother()._toother() + def _get_submatrix( self, shape0, shape1, slice0, slice1 ): + """Return a submatrix of this matrix (new matrix is created).""" + def _process_slice( sl, num ): + if isinstance( sl, slice ): + i0, i1 = sl.start, sl.stop + if i0 is None: + i0 = 0 + elif i0 < 0: + i0 = num + i0 + if i1 is None: + i1 = num + elif i1 < 0: + i1 = num + i1 + return i0, i1 + + elif isscalar( sl ): + if sl < 0: + sl += num + + return sl, sl + 1 + + else: + return sl[0], sl[1] + + def _in_bounds( i0, i1, num ): + if not (0<=i0 Author: chris.burns Date: 2007-09-18 12:59:23 -0500 (Tue, 18 Sep 2007) New Revision: 3320 Modified: trunk/scipy/weave/converters.py trunk/scipy/weave/numpy_scalar_spec.py Log: Add numpy scalar converter back in. Reference numpy.sctypes complex types instead of undefined complex192. Modified: trunk/scipy/weave/converters.py =================================================================== --- trunk/scipy/weave/converters.py 2007-09-18 15:04:50 UTC (rev 3319) +++ trunk/scipy/weave/converters.py 2007-09-18 17:59:23 UTC (rev 3320) @@ -34,11 +34,11 @@ # add numpy scalar converters to the default # converter list. #---------------------------------------------------------------------------- -#try: -# import numpy_scalar_spec -# default.append(numpy_scalar_spec.numpy_complex_scalar_converter()) -#except ImportError: -# pass +try: + import numpy_scalar_spec + default.append(numpy_scalar_spec.numpy_complex_scalar_converter()) +except ImportError: + pass #---------------------------------------------------------------------------- # Add wxPython support Modified: trunk/scipy/weave/numpy_scalar_spec.py =================================================================== --- trunk/scipy/weave/numpy_scalar_spec.py 2007-09-18 15:04:50 UTC (rev 3319) +++ trunk/scipy/weave/numpy_scalar_spec.py 2007-09-18 17:59:23 UTC (rev 3320) @@ -16,5 +16,4 @@ # But set this converter up to match the numpy complex # types. - self.matching_types = [numpy.complex128, numpy.complex192, - numpy.complex64] + self.matching_types = numpy.sctypes['complex'] From scipy-svn at scipy.org Tue Sep 18 16:19:08 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 18 Sep 2007 15:19:08 -0500 (CDT) Subject: [Scipy-svn] r3322 - trunk/scipy/io/nifti Message-ID: <20070918201908.3D82C39C137@new.scipy.org> Author: matthew.brett at gmail.com Date: 2007-09-18 15:19:02 -0500 (Tue, 18 Sep 2007) New Revision: 3322 Added: trunk/scipy/io/nifti/nifticlib/ Removed: trunk/scipy/io/nifti/nifticlib-0.6/ Modified: trunk/scipy/io/nifti/.setup.py.swp Log: remove version number from nifticlib name Modified: trunk/scipy/io/nifti/.setup.py.swp =================================================================== (Binary files differ) Copied: trunk/scipy/io/nifti/nifticlib (from rev 3321, trunk/scipy/io/nifti/nifticlib-0.6) From scipy-svn at scipy.org Tue Sep 18 16:35:29 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 18 Sep 2007 15:35:29 -0500 (CDT) Subject: [Scipy-svn] r3323 - trunk/scipy/io/nifti Message-ID: <20070918203529.1EF2439C166@new.scipy.org> Author: matthew.brett at gmail.com Date: 2007-09-18 15:35:23 -0500 (Tue, 18 Sep 2007) New Revision: 3323 Modified: trunk/scipy/io/nifti/.setup.py.swp trunk/scipy/io/nifti/setup.py Log: build nifticlib libs Modified: trunk/scipy/io/nifti/.setup.py.swp =================================================================== (Binary files differ) Modified: trunk/scipy/io/nifti/setup.py =================================================================== --- trunk/scipy/io/nifti/setup.py 2007-09-18 20:19:02 UTC (rev 3322) +++ trunk/scipy/io/nifti/setup.py 2007-09-18 20:35:23 UTC (rev 3323) @@ -1,7 +1,18 @@ #!/usr/bin/env python -from os.path import join +import os +from os.path import isfile, join, dirname import sys +import numpy +nifti_wrapper_file = join('nifti', 'nifticlib.py') + +# create an empty file to workaround crappy swig wrapper installation +if not isfile(nifti_wrapper_file): + open(nifti_wrapper_file, 'w') + +# find numpy headers +numpy_headers = join(dirname(numpy.__file__),'core','include') + def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info @@ -9,18 +20,26 @@ config = Configuration('nifti',parent_package,top_path) #config.add_data_dir('tests') - include_dirs = ['.'] + include_dirs = [ + '.', + './nifticlib/fsliolib', + './nifticlib/niftilib', + './nifticlib/znzlib'] # Libraries - config.add_library('fslio', sources=['fslio.c'], include_dirs=include_dirs) - config.add_library('niftiio', sources=['nifti1_io.c'], include_dirs=include_dirs) - config.add_library('znz', sources=['znzlib.c'], include_dirs=include_dirs) + config.add_library('fslio', + sources=['./nifticlib/fsliolib/fslio.c'], include_dirs=include_dirs) + config.add_library('niftiio', + sources=['./nifticlib/niftilib/nifti1_io.c'], include_dirs=include_dirs) + config.add_library('znz', + sources=['./nifticlib/znzlib/znzlib.c'], include_dirs=include_dirs) # Extension config.add_extension('_nifticlib', - sources = ['nifticlib_wrap.c'], - include_dirs=include_dirs, - libraries = ['niftiio', 'fslio', 'znz',]) + sources = ['nifticlib.i', 'nifticlib_wrap.c'], + include_dirs = include_dirs, + libraries = ['niftiio', 'fslio', 'znz',], + swig_opts = ['-I/usr/include/nifti', '-I'+numpy_headers]) return config From scipy-svn at scipy.org Tue Sep 18 16:36:32 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 18 Sep 2007 15:36:32 -0500 (CDT) Subject: [Scipy-svn] r3324 - trunk/scipy/io/nifti Message-ID: <20070918203632.A501839C0A1@new.scipy.org> Author: matthew.brett at gmail.com Date: 2007-09-18 15:36:28 -0500 (Tue, 18 Sep 2007) New Revision: 3324 Removed: trunk/scipy/io/nifti/.setup.py.swp Log: remove editor crud Deleted: trunk/scipy/io/nifti/.setup.py.swp =================================================================== (Binary files differ) From scipy-svn at scipy.org Tue Sep 18 16:49:48 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 18 Sep 2007 15:49:48 -0500 (CDT) Subject: [Scipy-svn] r3325 - in trunk/scipy/sandbox/timeseries: . src Message-ID: <20070918204948.AF3F439C168@new.scipy.org> Author: mattknox_ca Date: 2007-09-18 15:49:42 -0500 (Tue, 18 Sep 2007) New Revision: 3325 Modified: trunk/scipy/sandbox/timeseries/license.txt trunk/scipy/sandbox/timeseries/parser.py trunk/scipy/sandbox/timeseries/src/c_tdates.c Log: updated parser.py to incorporate enhancements in version 3.0.0 of mx.DateTime. updated license.txt to reflect use of slightly newer version of the egenix public license. Modified: trunk/scipy/sandbox/timeseries/license.txt =================================================================== --- trunk/scipy/sandbox/timeseries/license.txt 2007-09-18 20:36:28 UTC (rev 3324) +++ trunk/scipy/sandbox/timeseries/license.txt 2007-09-18 20:49:42 UTC (rev 3325) @@ -1,14 +1,18 @@ The timeseries module contains code borrowed from the EGENIX mx.DateTime -package. As such, it is subject to the terms of the EGENIX PUBLIC -LICENSE AGREEMENT VERSION 1.0.0 (included below). +package. -Functions in cseries.c marked in the section labelled +Functions in c_tdates.c marked in the section labelled //DERIVED FROM mx.DateTime are slightly modified versions of functions found in mxDateTime.c in the -mx.DateTime source code. +mx.DateTime source code. They are based on version 2.0.6 of the mx.DateTime +package and hence the code is subect to the terms of the EGENIX PUBLIC LICENSE +AGREEMENT VERSION 1.0.0 (included below) -parser.py is a slightly modified version of Parser.py found in mx.DateTime +The code in parser.py is based on code from version 3.0.0 of the mx.DateTime +package, and hence it is subject to the terms of the EGENIX PUBLIC LICENSE +AGREEMENT VERSION 1.1.0 (included below after the 1.0.0 version of the license) + ================================================================= EGENIX.COM PUBLIC LICENSE AGREEMENT VERSION 1.0.0 @@ -97,3 +101,110 @@ By downloading, copying, installing or otherwise using the Software, Licensee agrees to be bound by the terms and conditions of this License Agreement. + +________________________________________________________________________ + +EGENIX.COM PUBLIC LICENSE AGREEMENT VERSION 1.1.0 +________________________________________________________________________ + +1. Introduction + + This "License Agreement" is between eGenix.com Software, Skills + and Services GmbH ("eGenix.com"), having an office at + Pastor-Loeh-Str. 48, D-40764 Langenfeld, Germany, and the + Individual or Organization ("Licensee") accessing and otherwise + using this software in source or binary form and its associated + documentation ("the Software"). + +2. License + + Subject to the terms and conditions of this eGenix.com Public + License Agreement, eGenix.com hereby grants Licensee a + non-exclusive, royalty-free, world-wide license to reproduce, + analyze, test, perform and/or display publicly, prepare derivative + works, distribute, and otherwise use the Software alone or in any + derivative version, provided, however, that the eGenix.com Public + License Agreement is retained in the Software, or in any + derivative version of the Software prepared by Licensee. + +3. NO WARRANTY + + eGenix.com is making the Software available to Licensee on an "AS + IS" basis. SUBJECT TO ANY STATUTORY WARRANTIES WHICH CAN NOT BE + EXCLUDED, EGENIX.COM MAKES NO REPRESENTATIONS OR WARRANTIES, + EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, + EGENIX.COM MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY + OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT + THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. + +4. LIMITATION OF LIABILITY + + EGENIX.COM SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF + THE SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES + OR LOSS (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF + BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF BUSINESS + INFORMATION, OR OTHER PECUNIARY LOSS) AS A RESULT OF USING, + MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY DERIVATIVE THEREOF, + EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + + SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF + INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THE ABOVE EXCLUSION OR + LIMITATION MAY NOT APPLY TO LICENSEE. + +5. Termination + + This License Agreement will automatically terminate upon a + material breach of its terms and conditions. + +6. Third Party Rights + + Any software or documentation in source or binary form provided + along with the Software that is associated with a separate license + agreement is licensed to Licensee under the terms of that license + agreement. This License Agreement does not apply to those portions + of the Software. Copies of the third party licenses are included + in the Software Distribution. + +7. General + + Nothing in this License Agreement affects any statutory rights of + consumers that cannot be waived or limited by contract. + + Nothing in this License Agreement shall be deemed to create any + relationship of agency, partnership, or joint venture between + eGenix.com and Licensee. + + If any provision of this License Agreement shall be unlawful, + void, or for any reason unenforceable, such provision shall be + modified to the extent necessary to render it enforceable without + losing its intent, or, if no such modification is possible, be + severed from this License Agreement and shall not affect the + validity and enforceability of the remaining provisions of this + License Agreement. + + This License Agreement shall be governed by and interpreted in all + respects by the law of Germany, excluding conflict of law + provisions. It shall not be governed by the United Nations + Convention on Contracts for International Sale of Goods. + + This License Agreement does not grant permission to use eGenix.com + trademarks or trade names in a trademark sense to endorse or + promote products or services of Licensee, or any third party. + + The controlling language of this License Agreement is English. If + Licensee has received a translation into another language, it has + been provided for Licensee's convenience only. + +8. Agreement + + By downloading, copying, installing or otherwise using the + Software, Licensee agrees to be bound by the terms and conditions + of this License Agreement. + + + For question regarding this License Agreement, please write to: + + eGenix.com Software, Skills and Services GmbH + Pastor-Loeh-Str. 48 + D-40764 Langenfeld + Germany Modified: trunk/scipy/sandbox/timeseries/parser.py =================================================================== --- trunk/scipy/sandbox/timeseries/parser.py 2007-09-18 20:36:28 UTC (rev 3324) +++ trunk/scipy/sandbox/timeseries/parser.py 2007-09-18 20:49:42 UTC (rev 3325) @@ -2,23 +2,24 @@ """ Date/Time string parsing module. This code is a slightly modified version of Parser.py found in mx.DateTime +version 3.0.0 -As such, it is subject to the terms of the eGenix public license. Please see -license.txt for more details. +As such, it is subject to the terms of the eGenix public license version 1.1.0. +Please see license.txt for more details. """ -import re,string -import datetime as dt -from string import atoi, atof, lower, upper __all__ = [ 'DateFromString', 'DateTimeFromString' ] +import types,re,string +import datetime as dt + +class RangeError(Exception): pass + # Enable to produce debugging output _debug = 0 -class RangeError(Exception): pass - # REs for matching date and time parts in a string; These REs # parse a superset of ARPA, ISO, American and European style dates. # Timezones are supported via the Timezone submodule. @@ -80,33 +81,34 @@ _hour = '(?P[012]?\d)' _minute = '(?P[0-6]\d)' -_second = '(?P[0-6]\d(?:\.\d+)?)' +_second = '(?P[0-6]\d(?:[.,]\d+)?)' -_days = '(?P\d*\d(?:\.\d+)?)' -_hours = '(?P\d*\d(?:\.\d+)?)' -_minutes = '(?P\d*\d(?:\.\d+)?)' -_seconds = '(?P\d*\d(?:\.\d+)?)' +_days = '(?P\d*\d(?:[.,]\d+)?)' +_hours = '(?P\d*\d(?:[.,]\d+)?)' +_minutes = '(?P\d*\d(?:[.,]\d+)?)' +_seconds = '(?P\d*\d(?:[.,]\d+)?)' -_reldays = '(?:\((?P[-+]?\d+(?:\.\d+)?)\))' -_relhours = '(?:\((?P[-+]?\d+(?:\.\d+)?)\))' -_relminutes = '(?:\((?P[-+]?\d+(?:\.\d+)?)\))' -_relseconds = '(?:\((?P[-+]?\d+(?:\.\d+)?)\))' +_reldays = '(?:\((?P[-+]?\d+(?:[.,]\d+)?)\))' +_relhours = '(?:\((?P[-+]?\d+(?:[.,]\d+)?)\))' +_relminutes = '(?:\((?P[-+]?\d+(?:[.,]\d+)?)\))' +_relseconds = '(?:\((?P[-+]?\d+(?:[.,]\d+)?)\))' _sign = '(?:(?P[-+]) *)' _week = 'W(?P\d?\d)' -_zone = ('(?P[A-Z]+|[+-]\d\d?:?(?:\d\d)?)') +_zone = '(?P[A-Z]+|[+-]\d\d?:?(?:\d\d)?)' _ampm = '(?P[ap][m.]+)' -_time = (_hour + ':' + _minute + '(?::' + _second + ')? *' +_time = (_hour + ':' + _minute + '(?::' + _second + '|[^:]|$) *' + _ampm + '? *' + _zone + '?') _isotime = _hour + ':?' + _minute + ':?' + _second + '? *' + _zone + '?' _weekdate = _year + '-?(?:' + _week + '-?' + _day + '?)?' _eurodate = _day + '\.' + _month + '\.' + _year_epoch + '?' -_usdate = _month + '/' + _day + '(?:/' + _year_epoch + ')?' +_usdate = _month + '/' + _day + '(?:/' + _year_epoch + '|[^/]|$)' _altusdate = _month + '-' + _day + '-' + _fullyear_epoch -_isodate = _year + '-' + _fullmonth + '-?' + _fullday + '?(?!:)' +_isodate = _year + '-' + _month + '-?' + _day + '?(?!:)' _altisodate = _year + _fullmonth + _fullday + '(?!:)' +_usisodate = _fullyear + '/' + _fullmonth + '/' + _fullday _litdate = ('(?:'+ _litday + ',? )? *' + _usday + ' *' + '[- ] *(?:' + _litmonth + '|'+ _month +') *[- ] *' + @@ -147,11 +149,15 @@ '(?:' + _hours + ' *h[a-z]*[,; ]*)?' + '(?:' + _minutes + ' *m[a-z]*[,; ]*)?' + '(?:' + _seconds + ' *s[a-z]*[,; ]*)?') +_litdelta2 = (_sign + '?' + + '(?:' + _days + ' *d[a-z]*[,; ]*)?' + + _hours + ':' + _minutes + '(?::' + _seconds + ')?') _timeRE = re.compile(_time, re.I) _isotimeRE = re.compile(_isotime, re.I) _isodateRE = re.compile(_isodate, re.I) _altisodateRE = re.compile(_altisodate, re.I) +_usisodateRE = re.compile(_usisodate, re.I) _eurodateRE = re.compile(_eurodate, re.I) _usdateRE = re.compile(_usdate, re.I) _altusdateRE = re.compile(_altusdate, re.I) @@ -164,29 +170,32 @@ _isodelta2RE = re.compile(_isodelta2) _isodelta3RE = re.compile(_isodelta3) _litdeltaRE = re.compile(_litdelta) +_litdelta2RE = re.compile(_litdelta2) _relisotimeRE = re.compile(_relisotime, re.I) # Available date parsers _date_formats = ('euro', - 'us', 'altus', - 'iso', 'altiso', + 'usiso', 'us', 'altus', + 'iso', 'altiso', 'lit', 'altlit', 'eurlit', 'unknown') +# Available time parsers +_time_formats = ('standard', + 'iso', + 'unknown') -# time zone parsing +_zoneoffset = ('(?:' + '(?P[+-])?' + '(?P\d\d?)' + ':?' + '(?P\d\d)?' + '(?P\d+)?' + ')' + ) -_zoneoffset = '(?:(?P[+-])?(?P\d\d?):?(?P\d\d)?)' - -# Compiled RE objects _zoneoffsetRE = re.compile(_zoneoffset) - -### Time zone offset table -# -# The offset given here represent the difference between UTC and the -# given time zone. -# _zonetable = { # Timezone abbreviations # Std Summer @@ -219,6 +228,7 @@ 'CAST':9.5, 'CADT':10.5, # Central 'EAST':10, 'EADT':11, # Eastern 'WAST':8, 'WADT':9, # Western + 'SAST':9.5, 'SADT':10.5, # Southern # US military time zones 'Z': 0, @@ -248,6 +258,7 @@ 'Y':-12 } + def utc_offset(zone): """ utc_offset(zonestring) @@ -263,31 +274,33 @@ """ if not zone: return 0 - uzone = upper(zone) + uzone = zone.upper() if _zonetable.has_key(uzone): return _zonetable[uzone]*60 offset = _zoneoffsetRE.match(zone) if not offset: raise ValueError,'wrong format or unkown time zone: "%s"' % zone - zonesign,hours,minutes = offset.groups() + zonesign,hours,minutes,extra = offset.groups() + if extra: + raise ValueError,'illegal time zone offset: "%s"' % zone offset = int(hours or 0) * 60 + int(minutes or 0) if zonesign == '-': offset = -offset return offset def add_century(year): + """ Sliding window approach to the Y2K problem: adds a suitable century to the given year and returns it as integer. - The window used depends on the current year (at import time). - If adding the current century to the given year gives a year - within the range current_year-70...current_year+30 [both - inclusive], then the current century is added. Otherwise the - century (current + 1 or - 1) producing the least difference is - chosen. + The window used depends on the current year. If adding the current + century to the given year gives a year within the range + current_year-70...current_year+30 [both inclusive], then the + current century is added. Otherwise the century (current + 1 or + - 1) producing the least difference is chosen. """ - + current_year=dt.datetime.now().year current_century=(dt.datetime.now().year / 100) * 100 @@ -303,9 +316,9 @@ else: return year - 100 -def _parse_date(text, formats=_date_formats, defaultdate=None, - now=dt.datetime.now): +def _parse_date(text): + """ Parses the date part given in text and returns a tuple (text,day,month,year,style) with the following meanings: @@ -320,6 +333,7 @@ 'altus' - the alternative US date parser (with '-' instead of '/') 'iso' - the ISO date parser 'altiso' - the alternative ISO date parser (without '-') + 'usiso' - US style ISO date parser (yyyy/mm/dd) 'lit' - the US literal date parser 'altlit' - the alternative US literal date parser 'eurlit' - the Eurpean literal date parser @@ -339,7 +353,14 @@ """ match = None style = '' + + formats = _date_formats + us_formats=('us', 'altus') + iso_formats=('iso', 'altiso', 'usiso') + + now=dt.datetime.now + # Apply parsers in the order given in formats for format in formats: @@ -351,85 +372,88 @@ if year: if len(year) == 2: # Y2K problem: - year = add_century(atoi(year)) + year = add_century(int(year)) else: - year = atoi(year) + year = int(year) else: - if defaultdate is None: - defaultdate = dt.datetime.now() + defaultdate = now() year = defaultdate.year if epoch and 'B' in epoch: year = -year + 1 - month = atoi(month) + month = int(month) + day = int(day) # Could have mistaken euro format for us style date # which uses month, day order if month > 12 or month == 0: + match = None continue - day = atoi(day) break - elif format == 'us' or format == 'altus': - # US style date - if format == 'us': - match = _usdateRE.search(text) - else: - match = _altusdateRE.search(text) - if match is not None: - month,day,year,epoch = match.groups() - if year: - if len(year) == 2: - # Y2K problem: - year = add_century(atoi(year)) - else: - year = atoi(year) - else: - if defaultdate is None: - defaultdate = dt.datetime.now() - year = defaultdate.year - if epoch and 'B' in epoch: - year = -year + 1 - month = atoi(month) - # Could have mistaken us format for euro style date - # which uses day, month order - if month > 12 or month == 0: - continue - # Default to 1 if no day is given - if day: - day = atoi(day) - else: - day = 1 - break - - elif format == 'iso' or format == 'altiso': + elif format in iso_formats: # ISO style date if format == 'iso': match = _isodateRE.search(text) - else: + elif format == 'altiso': match = _altisodateRE.search(text) # Avoid mistaking ISO time parts ('Thhmmss') for dates if match is not None: left, right = match.span() if left > 0 and \ text[left - 1:left] == 'T': + match = None continue + else: + match = _usisodateRE.search(text) if match is not None: year,month,day = match.groups() if len(year) == 2: # Y2K problem: - year = add_century(atoi(year)) + year = add_century(int(year)) else: - year = atoi(year) + year = int(year) # Default to January 1st if not month: month = 1 else: - month = atoi(month) + month = int(month) if not day: day = 1 else: - day = atoi(day) + day = int(day) break + elif format in us_formats: + # US style date + if format == 'us': + match = _usdateRE.search(text) + else: + match = _altusdateRE.search(text) + if match is not None: + month,day,year,epoch = match.groups() + if year: + if len(year) == 2: + # Y2K problem: + year = add_century(int(year)) + else: + year = int(year) + else: + defaultdate = now() + year = defaultdate.year + if epoch and 'B' in epoch: + year = -year + 1 + # Default to 1 if no day is given + if day: + day = int(day) + else: + day = 1 + month = int(month) + # Could have mistaken us format for euro style date + # which uses day, month order + if month > 12 or month == 0: + match = None + continue + break + elif format == 'lit': # US style literal date match = _litdateRE.search(text) @@ -455,8 +479,7 @@ elif format == 'unknown': # No date part: use defaultdate - if defaultdate is None: - defaultdate = dt.datetime.now() + defaultdate = now() year = defaultdate.year month = defaultdate.month day = defaultdate.day @@ -482,36 +505,36 @@ if 0 and _debug: print match.groups() # Default to current year, January 1st if not year: - if defaultdate is None: - defaultdate = dt.datetime.now() + defaultdate = now() year = defaultdate.year else: if len(year) == 2: # Y2K problem: - year = add_century(atoi(year)) + year = add_century(int(year)) else: - year = atoi(year) + year = int(year) if epoch and 'B' in epoch: year = -year + 1 if litmonth: - litmonth = lower(litmonth) + litmonth = litmonth.lower() try: month = litmonthtable[litmonth] except KeyError: raise ValueError,\ 'wrong month name: "%s"' % litmonth elif month: - month = atoi(month) + month = int(month) else: month = 1 if day: - day = atoi(day) + day = int(day) else: day = 1 + #print '_parse_date:',text,day,month,year,style return text,day,month,year,style -def _parse_time(text, formats=('iso','unknown')): +def _parse_time(text): """ Parses a time part given in text and returns a tuple (text,hour,minute,second,offset,style) with the following @@ -537,6 +560,8 @@ match = None style = '' + formats=_time_formats + # Apply parsers in the order given in formats for format in formats: @@ -569,23 +594,33 @@ # Post-processing if match is not None: + if zone: # Convert to UTC offset offset = utc_offset(zone) else: offset = 0 - hour = atoi(hour) + + hour = int(hour) if ampm: if ampm[0] in ('p', 'P'): - hour = hour + 12 + # 12pm = midday + if hour < 12: + hour = hour + 12 + else: + # 12am = midnight + if hour >= 12: + hour = hour - 12 if minute: - minute = atoi(minute) + minute = int(minute) else: minute = 0 if not second: second = 0.0 else: - second = atof(second) + if ',' in second: + second = second.replace(',', '.') + second = float(second) # Remove time from text left,right = match.span() @@ -594,11 +629,12 @@ 'giving:',hour,minute,second,offset text = text[:left] + text[right:] + #print '_parse_time:',text,hour,minute,second,offset,style return text,hour,minute,second,offset,style ### -def DateTimeFromString(text, formats=_date_formats, defaultdate=None): +def DateTimeFromString(text): """ DateTimeFromString(text, [formats, defaultdate]) @@ -616,71 +652,54 @@ 'altus' - the alternative US date parser (with '-' instead of '/') 'iso' - the ISO date parser 'altiso' - the alternative ISO date parser (without '-') + 'usiso' - US style ISO date parser (yyyy/mm/dd) 'lit' - the US literal date parser 'altlit' - the alternative US literal date parser 'eurlit' - the Eurpean literal date parser 'unknown' - if no date part is found, use defaultdate defaultdate provides the defaults to use in case no date part - is found. Most other parsers default to the current year + is found. Most of the parsers default to the current year January 1 if some of these date parts are missing. - If 'unknown' is not given in formats and the date/time cannot + If 'unknown' is not given in formats and the date cannot be parsed, a ValueError is raised. + time_formats may be set to a tuple of strings specifying which + of the following parsers to use and in which order to try + them. Default is to try all of them in the order given below: + + 'standard' - standard time format HH:MM:SS (with ':' delimiter) + 'iso' - ISO time format (superset of 'standard') + 'unknown' - default to 00:00:00 in case the time format + cannot be parsed + + Defaults to 00:00:00.00 for time parts that are not included + in the textual representation. + + If 'unknown' is not given in time_formats and the time cannot + be parsed, a ValueError is raised. + """ origtext = text - formats = tuple(formats) - if formats is _date_formats or \ - 'iso' in formats or \ - 'altiso' in formats: - # First try standard order (parse time, then date) - if formats[0] not in ('iso', 'altiso'): - text,hour,minute,second,offset,timestyle = _parse_time( - origtext, - ('standard', 'iso', 'unknown')) - text,day,month,year,datestyle = _parse_date( - text, - formats + ('unknown',), - defaultdate) - if 0 and _debug: - print 'tried time/date on %s, date=%s, time=%s' % (origtext, - datestyle, - timestyle) - else: - timestyle = 'iso' - - # If this fails, try the ISO order - if timestyle in ('iso', 'unknown'): - text,day,month,year,datestyle = _parse_date( - origtext, - formats, - defaultdate) - text,hour,minute,second,offset,timestyle = _parse_time( - text, - ('iso', 'unknown')) - if 0 and _debug: - print 'tried ISO on %s, date=%s, time=%s' % (origtext, - datestyle, - timestyle) - else: - # Standard order: time part, then date part - text,hour,minute,second,offset,timestyle = _parse_time( - origtext, - ('standard', 'unknown')) - text,day,month,year,datestyle = _parse_date( - text, - formats, - defaultdate) + text,hour,minute,second,offset,timestyle = _parse_time(origtext) + text,day,month,year,datestyle = _parse_date(text) - if (datestyle == 'unknown' or \ - timestyle == 'unknown') and \ - 'unknown' not in formats: - raise ValueError,\ - 'Failed to parse "%s": found "%s" date, "%s" time' % \ - (origtext, datestyle, timestyle) - + if 0 and _debug: + print 'tried time/date on %s, date=%s, time=%s' % (origtext, + datestyle, + timestyle) + + # If this fails, try the ISO order (date, then time) + if timestyle in ('iso', 'unknown'): + text,day,month,year,datestyle = _parse_date(origtext) + text,hour,minute,second,offset,timestyle = _parse_time(text) + if 0 and _debug: + print 'tried ISO on %s, date=%s, time=%s' % (origtext, + datestyle, + timestyle) + try: microsecond = int(1000000 * (second % 1)) second = int(second) @@ -690,7 +709,7 @@ raise RangeError,\ 'Failed to parse "%s": %s' % (origtext, why) -def DateFromString(text, formats=_date_formats, defaultdate=None): +def DateFromString(text): """ DateFromString(text, [formats, defaultdate]) @@ -701,21 +720,15 @@ DateTimeFromString(). """ - _text,day,month,year,datestyle = _parse_date(text, formats, defaultdate) + _text,day,month,year,datestyle = _parse_date(text) - if datestyle == 'unknown' and \ - 'unknown' not in formats: - raise ValueError,\ - 'Failed to parse "%s": found "%s" date' % \ - (origtext, datestyle) - try: return dt.datetime(year,month,day) except ValueError, why: raise RangeError,\ 'Failed to parse "%s": %s' % (text, why) -def validateDateTimeString(text, formats=_date_formats): +def validateDateTimeString(text): """ validateDateTimeString(text, [formats, defaultdate]) @@ -729,17 +742,15 @@ XXX Undocumented ! """ - formats = list(formats) - if 'unknown' in formats: - formats.remove('unknown') try: - DateTimeFromString(text, formats) + DateTimeFromString(text) except ValueError, why: return 0 return 1 -def validateDateString(text, formats=_date_formats): +def validateDateString(text): + """ validateDateString(text, [formats, defaultdate]) Validates the given text and returns 1/0 depending on whether @@ -752,16 +763,12 @@ XXX Undocumented ! """ - formats = list(formats) - if 'unknown' in formats: - formats.remove('unknown') try: - DateFromString(text, formats) + DateFromString(text) except ValueError, why: return 0 return 1 - ### Tests def _test(): @@ -769,6 +776,7 @@ import sys t = dt.datetime.now() + _date = t.strftime('%Y-%m-%d') print 'Testing DateTime Parser...' @@ -788,6 +796,7 @@ ('Sonntag, der 6. November 1994, 08:49:37 GMT', '1994-11-06 08:49:37.00'), ('6. November 2001, 08:49:37', '2001-11-06 08:49:37.00'), ('sep 6', '%s-09-06 00:00:00.00' % t.year), + ('sep 6 2000', '2000-09-06 00:00:00.00'), ('September 29', '%s-09-29 00:00:00.00' % t.year), ('Sep. 29', '%s-09-29 00:00:00.00' % t.year), ('6 sep', '%s-09-06 00:00:00.00' % t.year), @@ -799,6 +808,7 @@ ('sep 6 01', '2001-09-06 00:00:00.00'), ('Sep 6, 01', '2001-09-06 00:00:00.00'), ('September 6, 01', '2001-09-06 00:00:00.00'), + ('30 Apr 2006 20:19:00', '2006-04-30 20:19:00.00'), # ISO formats ('1994-11-06 08:49:37', '1994-11-06 08:49:37.00'), @@ -815,7 +825,15 @@ ('20000824T020301', '2000-08-24 02:03:01.00'), ('20000824 020301', '2000-08-24 02:03:01.00'), ('2000-08-24 02:03:01.00', '2000-08-24 02:03:01.00'), - ('T020311', '%s 02:03:11.00' % t.strftime('%Y-%m-%d')), + ('T020311', '%s 02:03:11.00' % _date), + ('2003-12-9', '2003-12-09 00:00:00.00'), + ('03-12-9', '2003-12-09 00:00:00.00'), + ('003-12-9', '0003-12-09 00:00:00.00'), + ('0003-12-9', '0003-12-09 00:00:00.00'), + ('2003-1-9', '2003-01-09 00:00:00.00'), + ('03-1-9', '2003-01-09 00:00:00.00'), + ('003-1-9', '0003-01-09 00:00:00.00'), + ('0003-1-9', '0003-01-09 00:00:00.00'), # US formats ('06/11/94 08:49:37', '1994-06-11 08:49:37.00'), @@ -834,22 +852,63 @@ ('09-6-2001', '2001-09-06 00:00:00.00'), ('9-06-2001', '2001-09-06 00:00:00.00'), ('09-06-2001', '2001-09-06 00:00:00.00'), + ('2002/05/28 13:10:56.1147 GMT+2', '2002-05-28 13:10:56.114699'), + ('1970/01/01', '1970-01-01 00:00:00.00'), + ('20021025 12:00 PM', '2002-10-25 12:00:00.00'), + ('20021025 12:30 PM', '2002-10-25 12:30:00.00'), + ('20021025 12:00 AM', '2002-10-25 00:00:00.00'), + ('20021025 12:30 AM', '2002-10-25 00:30:00.00'), + ('20021025 1:00 PM', '2002-10-25 13:00:00.00'), + ('20021025 2:00 AM', '2002-10-25 02:00:00.00'), + ('Thursday, February 06, 2003 12:40 PM', '2003-02-06 12:40:00.00'), + ('Mon, 18 Sep 2006 23:03:00', '2006-09-18 23:03:00.00'), # European formats ('6.11.2001, 08:49:37', '2001-11-06 08:49:37.00'), ('06.11.2001, 08:49:37', '2001-11-06 08:49:37.00'), ('06.11. 08:49:37', '%s-11-06 08:49:37.00' % t.year), + #('21/12/2002', '2002-12-21 00:00:00.00'), + #('21/08/2002', '2002-08-21 00:00:00.00'), + #('21-08-2002', '2002-08-21 00:00:00.00'), + #('13/01/03', '2003-01-13 00:00:00.00'), + #('13/1/03', '2003-01-13 00:00:00.00'), + #('13/1/3', '2003-01-13 00:00:00.00'), + #('13/01/3', '2003-01-13 00:00:00.00'), # Time only formats - ('01:03', '%s 01:03:00.00' % t.strftime('%Y-%m-%d')), - ('01:03:11', '%s 01:03:11.00' % t.strftime('%Y-%m-%d')), - ('01:03:11.50', '%s 01:03:11.50' % t.strftime('%Y-%m-%d')), - ('01:03:11.50 AM', '%s 01:03:11.50' % t.strftime('%Y-%m-%d')), - ('01:03:11.50 PM', '%s 13:03:11.50' % t.strftime('%Y-%m-%d')), - ('01:03:11.50 a.m.', '%s 01:03:11.50' % t.strftime('%Y-%m-%d')), - ('01:03:11.50 p.m.', '%s 13:03:11.50' % t.strftime('%Y-%m-%d')), + ('01:03', '%s 01:03:00.00' % _date), + ('01:03:11', '%s 01:03:11.00' % _date), + ('01:03:11.50', '%s 01:03:11.500000' % _date), + ('01:03:11.50 AM', '%s 01:03:11.500000' % _date), + ('01:03:11.50 PM', '%s 13:03:11.500000' % _date), + ('01:03:11.50 a.m.', '%s 01:03:11.500000' % _date), + ('01:03:11.50 p.m.', '%s 13:03:11.500000' % _date), + + # Invalid formats + ('6..2001, 08:49:37', '%s 08:49:37.00' % _date), + ('9//2001', 'ignore'), + ('06--94 08:49:37', 'ignore'), + ('20-03 00:00:00.00', 'ignore'), + ('9/2001', 'ignore'), + ('9-6', 'ignore'), + ('09-6', 'ignore'), + ('9-06', 'ignore'), + ('09-06', 'ignore'), + ('20000824/23', 'ignore'), + ('November 1994 08:49:37', 'ignore'), ] + # Add Unicode versions + try: + unicode + except NameError: + pass + else: + k = [] + for text, result in l: + k.append((unicode(text), result)) + l.extend(k) + for text, reference in l: try: value = DateTimeFromString(text) @@ -860,10 +919,9 @@ value = str(sys.exc_info()[1]) valid_datetime = validateDateTimeString(text) valid_date = validateDateString(text) - + if reference[-3:] == '.00': reference = reference[:-3] - elif reference[-3:] == '.50': reference = reference + '0000' - + if str(value) != reference and \ not reference == 'ignore': print 'Failed to parse "%s"' % text @@ -877,6 +935,8 @@ if not valid_date: print ' "%s" failed date validation' % text + et = dt.datetime.now() + print 'done. (after %f seconds)' % ((et-t).seconds) if __name__ == '__main__': _test() Modified: trunk/scipy/sandbox/timeseries/src/c_tdates.c =================================================================== --- trunk/scipy/sandbox/timeseries/src/c_tdates.c 2007-09-18 20:36:28 UTC (rev 3324) +++ trunk/scipy/sandbox/timeseries/src/c_tdates.c 2007-09-18 20:49:42 UTC (rev 3325) @@ -26,11 +26,9 @@ //DERIVED FROM mx.DateTime /* -===================================================== -== Functions in the following section are borrowed == -== from mx.DateTime, and in many cases slightly == -== modified == -===================================================== + Functions in the following section are borrowed from mx.DateTime version + 2.0.6, and hence this code is subject to the terms of the egenix public + license version 1.0.0 */ #define Py_AssertWithArg(x,errortype,errorstr,a1) {if (!(x)) {PyErr_Format(errortype,errorstr,a1);goto onError;}} From scipy-svn at scipy.org Tue Sep 18 19:22:36 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 18 Sep 2007 18:22:36 -0500 (CDT) Subject: [Scipy-svn] r3326 - trunk/scipy/sandbox/timeseries Message-ID: <20070918232236.C12FF39C04A@new.scipy.org> Author: mattknox_ca Date: 2007-09-18 18:22:21 -0500 (Tue, 18 Sep 2007) New Revision: 3326 Removed: trunk/scipy/sandbox/timeseries/tcore.py Modified: trunk/scipy/sandbox/timeseries/__init__.py trunk/scipy/sandbox/timeseries/tdates.py trunk/scipy/sandbox/timeseries/tseries.py Log: removed tcore.py and moved its functions into the areas where they are actually used Modified: trunk/scipy/sandbox/timeseries/__init__.py =================================================================== --- trunk/scipy/sandbox/timeseries/__init__.py 2007-09-18 20:49:42 UTC (rev 3325) +++ trunk/scipy/sandbox/timeseries/__init__.py 2007-09-18 23:22:21 UTC (rev 3326) @@ -1,19 +1,5 @@ """TimeSeries - - -__author__ = "Pierre GF Gerard-Marchant & Matt Knox ($Author$)" -__version__ = '1.0' -__revision__ = "$Revision$" -__date__ = '$Date$' -import tcore -from tcore import * -import tdates -from tdates import * -import tseries -from tseries import * -import tmulti -from tmulti import * :author: Pierre GF Gerard-Marchant & Matt Knox :contact: pierregm_at_uga_dot_edu - mattknox_ca_at_hotmail_dot_com :version: $Id$ @@ -25,9 +11,6 @@ __revision__ = "$Revision$" __date__ = '$Date$' -# initialize python callbacks for C code -import tcore -from tcore import * import const import tdates from tdates import * Deleted: trunk/scipy/sandbox/timeseries/tcore.py =================================================================== --- trunk/scipy/sandbox/timeseries/tcore.py 2007-09-18 20:49:42 UTC (rev 3325) +++ trunk/scipy/sandbox/timeseries/tcore.py 2007-09-18 23:22:21 UTC (rev 3326) @@ -1,58 +0,0 @@ -""" -A collection of tools for timeseries - -:author: Pierre GF Gerard-Marchant & Matt Knox -:contact: pierregm_at_uga_dot_edu - mattknox_ca_at_hotmail_dot_com -:version: $Id$ -""" -__author__ = "Pierre GF Gerard-Marchant & Matt Knox ($Author$)" -__version__ = '1.0' -__revision__ = "$Revision$" -__date__ = '$Date$' - -import numpy -import numpy.core.numeric as numeric - -import maskedarray as MA - -#####--------------------------------------------------------------------------- -#---- --- Generic functions --- -#####--------------------------------------------------------------------------- -def first_unmasked_val(a): - "Returns the first unmasked value in a 1d maskedarray." - (i,j) = MA.extras.flatnotmasked_edges(a) - return a[i] - -def last_unmasked_val(a): - "Returns the last unmasked value in a 1d maskedarray." - (i,j) = MA.extras.flatnotmasked_edges(a) - return a[j] - -def reverse_dict(d): - "Reverses the keys and values of a dictionary." - alt = [] - tmp = [alt.extend([(w,k) for w in v]) for (k,v) in d.iteritems()] - return dict(alt) - - - -#####--------------------------------------------------------------------------- -#---- --- Misc functions --- -#####--------------------------------------------------------------------------- -#http://aspn.activestate.com/ASPN/Mail/Message/python-tutor/2302348 -def flatten_sequence(iterable): - """Flattens a compound of nested iterables.""" - itm = iter(iterable) - for elm in itm: - if hasattr(elm,'__iter__') and not isinstance(elm, basestring): - for f in flatten_sequence(elm): - yield f - else: - yield elm - -def flatargs(*args): - "Flattens the arguments." - if not hasattr(args, '__iter__'): - return args - else: - return flatten_sequence(args) Modified: trunk/scipy/sandbox/timeseries/tdates.py =================================================================== --- trunk/scipy/sandbox/timeseries/tdates.py 2007-09-18 20:49:42 UTC (rev 3325) +++ trunk/scipy/sandbox/timeseries/tdates.py 2007-09-18 23:22:21 UTC (rev 3326) @@ -29,10 +29,10 @@ from parser import DateFromString, DateTimeFromString -import tcore as corelib import const as _c import cseries +# initialize python callbacks for C code cseries.set_callback_DateFromString(DateFromString) cseries.set_callback_DateTimeFromString(DateTimeFromString) @@ -396,9 +396,28 @@ #...................................................... def find_dates(self, *dates): "Returns the indices corresponding to given dates, as an array." + + #http://aspn.activestate.com/ASPN/Mail/Message/python-tutor/2302348 + def flatten_sequence(iterable): + """Flattens a compound of nested iterables.""" + itm = iter(iterable) + for elm in itm: + if hasattr(elm,'__iter__') and not isinstance(elm, basestring): + for f in flatten_sequence(elm): + yield f + else: + yield elm + + def flatargs(*args): + "Flattens the arguments." + if not hasattr(args, '__iter__'): + return args + else: + return flatten_sequence(args) + ifreq = self.freq c = numpy.zeros(self.shape, bool_) - for d in corelib.flatargs(*dates): + for d in flatargs(*dates): if d.freq != ifreq: d = d.asfreq(ifreq) c += (self == d.value) @@ -685,37 +704,6 @@ if __name__ == '__main__': import maskedarray.testutils from maskedarray.testutils import assert_equal -# if 0: -# dlist = ['2007-%02i' % i for i in range(1,5)+range(7,13)] -# mdates = date_array_fromlist(dlist, 'M') -# # Using an integer -# assert_equal(mdates[0].value, 24073) -# assert_equal(mdates[-1].value, 24084) -# # Using a date -# lag = mdates.find_dates(mdates[0]) -# print mdates[lag] -# assert_equal(mdates[lag], mdates[0]) -# if 0: -# hodie = today('D') -# D = DateArray(today('D')) -# assert_equal(D.freq, 6000) -# if 0: -# freqs = [x[0] for x in corelib.freq_dict.values() if x[0] != 'U'] -# print freqs -# for f in freqs: -# print f -# today = thisday(f) -# assert(Date(freq=f, value=today.value) == today) -# if 0: -# D = date_array(freq='U', start_date=Date('U',1), length=10) -# if 0: -# dlist = ['2007-01-%02i' % i for i in (1,2,4,5,7,8,10,11,13)] -# ords = numpy.fromiter((DateTimeFromString(s).toordinal() for s in dlist), -# float_) -# if 0: -# "Tests the automatic sorting of dates." -# D = date_array_fromlist(dlist=['2006-01','2005-01','2004-01'],freq='M') -# assert_equal(D.view(ndarray), [24037, 24049, 24061]) if 1: dlist = ['2007-%02i' % i for i in range(1,5)+range(7,13)] Modified: trunk/scipy/sandbox/timeseries/tseries.py =================================================================== --- trunk/scipy/sandbox/timeseries/tseries.py 2007-09-18 20:49:42 UTC (rev 3325) +++ trunk/scipy/sandbox/timeseries/tseries.py 2007-09-18 23:22:21 UTC (rev 3326) @@ -31,7 +31,6 @@ filled, getmask, getmaskarray, hsplit, make_mask_none, mask_or, make_mask, \ masked_array -import tcore as corelib import const as _c import tdates @@ -54,6 +53,7 @@ 'day_of_week','day_of_year','day','month','quarter','year', 'hour','minute','second', 'tofile','asrecords','flatten', 'check_observed', +'first_unmasked_val', 'last_unmasked_val' ] @@ -68,9 +68,19 @@ 'MAXIMUM': ['MAX','MAXIMUM','HIGH'], 'MINIMUM': ['MIN','MINIMUM','LOW']} +def first_unmasked_val(a): + "Returns the first unmasked value in a 1d maskedarray." + (i,j) = MA.extras.flatnotmasked_edges(a) + return a[i] + +def last_unmasked_val(a): + "Returns the last unmasked value in a 1d maskedarray." + (i,j) = MA.extras.flatnotmasked_edges(a) + return a[j] + obs_dict = {"UNDEFINED":None, - "BEGINNING": corelib.first_unmasked_val, - "ENDING": corelib.last_unmasked_val, + "BEGINNING": first_unmasked_val, + "ENDING": last_unmasked_val, "AVERAGED": MA.average, "SUMMED": MA.sum, "MAXIMUM": MA.maximum, @@ -82,8 +92,15 @@ for al in aliases: alias_obs_dict[al] = obs_dict[ob] obs_dict.update(alias_obs_dict) -fmtobs_revdict = corelib.reverse_dict(fmtobs_dict) +def _reverse_dict(d): + "Reverses the keys and values of a dictionary." + alt = [] + tmp = [alt.extend([(w,k) for w in v]) for (k,v) in d.iteritems()] + return dict(alt) + +fmtobs_revdict = _reverse_dict(fmtobs_dict) + def fmtObserv(obStr): "Converts a possible 'Observed' string into acceptable values." if obStr is None: From scipy-svn at scipy.org Tue Sep 18 20:51:24 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 18 Sep 2007 19:51:24 -0500 (CDT) Subject: [Scipy-svn] r3327 - in trunk/scipy/sandbox/timeseries: . tests Message-ID: <20070919005124.0980439C04A@new.scipy.org> Author: mattknox_ca Date: 2007-09-18 19:51:14 -0500 (Tue, 18 Sep 2007) New Revision: 3327 Added: trunk/scipy/sandbox/timeseries/dates.py trunk/scipy/sandbox/timeseries/extras.py trunk/scipy/sandbox/timeseries/report.py trunk/scipy/sandbox/timeseries/tests/test_trecords.py trunk/scipy/sandbox/timeseries/trecords.py Removed: trunk/scipy/sandbox/timeseries/reportlib.py trunk/scipy/sandbox/timeseries/tdates.py trunk/scipy/sandbox/timeseries/tests/test_multitimeseries.py trunk/scipy/sandbox/timeseries/textras.py trunk/scipy/sandbox/timeseries/tmulti.py Modified: trunk/scipy/sandbox/timeseries/__init__.py trunk/scipy/sandbox/timeseries/tseries.py Log: renamed several files for organizational purposes renamed MultiTimeSeries class to TimeSeriesRecords Modified: trunk/scipy/sandbox/timeseries/__init__.py =================================================================== --- trunk/scipy/sandbox/timeseries/__init__.py 2007-09-18 23:22:21 UTC (rev 3326) +++ trunk/scipy/sandbox/timeseries/__init__.py 2007-09-19 00:51:14 UTC (rev 3327) @@ -12,27 +12,22 @@ __date__ = '$Date$' import const -import tdates -from tdates import * +import dates +from dates import * import tseries from tseries import * -import tmulti -from tmulti import * -import reportlib - -from reportlib import * +import trecords +from trecords import * + +import report +from report import * + import lib from lib import filters, interpolate, moving_funcs - -__all__ = ['tdates', 'tseries','tmulti','reportlib','filters','interpolate'] -__all__ += tdates.__all__ -__all__ += tseries.__all__ - -__all__ = ['const', 'tdates','tseries','tmulti','reportlib','filters', +__all__ = ['const', 'dates','tseries','trecords','report','filters', 'interpolate', 'moving_funcs'] -__all__ += tdates.__all__ +__all__ += dates.__all__ __all__ += tseries.__all__ -__all__ += tmulti.__all__ -__all__ += reportlib.__all__ - +__all__ += trecords.__all__ +__all__ += report.__all__ Copied: trunk/scipy/sandbox/timeseries/dates.py (from rev 3326, trunk/scipy/sandbox/timeseries/tdates.py) Copied: trunk/scipy/sandbox/timeseries/extras.py (from rev 3319, trunk/scipy/sandbox/timeseries/textras.py) Copied: trunk/scipy/sandbox/timeseries/report.py (from rev 3319, trunk/scipy/sandbox/timeseries/reportlib.py) Deleted: trunk/scipy/sandbox/timeseries/reportlib.py =================================================================== --- trunk/scipy/sandbox/timeseries/reportlib.py 2007-09-18 23:22:21 UTC (rev 3326) +++ trunk/scipy/sandbox/timeseries/reportlib.py 2007-09-19 00:51:14 UTC (rev 3327) @@ -1,538 +0,0 @@ -""" -Reporting functions - -:author: Pierre GF Gerard-Marchant & Matt Knox -:contact: pierregm_at_uga_dot_edu - mattknox_ca_at_hotmail_dot_com -:version: $Id$ - -Ideas borrowed from: - -- George Sakkis - http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/267662 - -- Mike Brown - http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/148061 - -:Examples: - - import numpy as np - import timeseries as ts - import maskedarray as ma - from timeseries import Report, wrap_onspace - - series1 = ts.time_series(np.random.uniform(-100,100,15), start_date=ts.thisday('b')-15) - series2 = ts.time_series(np.random.uniform(-100,100,13), start_date=ts.thisday('b')-10) - series3 = ts.time_series(['string1', 'another string', 'yet another string']*3, start_date=ts.thisday('b')-10) - - darray = ts.date_array(start_date=ts.thisday('b')-8, end_date=ts.thisday('b')-3) - - txt_o = open('myfile.txt', 'w') - html_o = open('myfile.html', 'w') - - # report containing only numerical series, showing 2 decimal places - num_report = Report(series1, series2, fmtfunc=lambda x:'%.2f' % x) - - # report containing some string and numerical data - mixed_report = Report(series1, series2, series3) - - # output a csv report suitable for excel to sys.stdout, show masked values as "N/A" - num_report(delim=', ', mask_rep='N/A') - - # format one column one with 2 decimal places, and column two with 4. - # Add a sum footer. Write the output to txt_o - num_report(fmtfunc=[(lambda x:'%.2f' % x), (lambda x:'%.4f' % x)], - footer_func=ma.sum, footer_label='sum', output=txt_o) - - # create an html table of the data over a specified range. - # Wrap text in cells to width 10. Output to html_o - html_o.write("") - mixed_report(series1, series2, series3, dates=darray, - delim="", - wrapfunc=wrap_onspace(10, nls='
'), output=html_o) - html_o.write("
", prefix="
", postfix="
") - -""" -__author__ = "Pierre GF Gerard-Marchant & Matt Knox ($Author$)" -__version__ = '1.0' -__revision__ = "$Revision$" -__date__ = '$Date$' - -import sys -import operator, types, copy -import timeseries as ts -import maskedarray as ma - -__all__ = [ - 'Report', 'wrap_onspace', 'wrap_onspace_strict', - 'wrap_always'] - -class fmtfunc_wrapper: - """wraps a formatting function such that it handles masked values - -:IVariables: - - `fmtfunc` : formatting function. - - `mask_rep` : string to use for masked values - """ - def __init__ (self, fmtfunc, mask_rep): - if fmtfunc is None: - self.f = str - else: - self.f = fmtfunc - self.mr = mask_rep - - def __call__ (self, item): - "Execute the call behavior." - - if hasattr(item, "_mask") and isinstance(item._mask, bool) and item._mask: - return self.mr - else: - return self.f(item) - - -_default_options = { - 'dates':None, - 'header_row':None, - 'header_char':'-', - 'header_justify':None, - 'row_char':None, - 'footer_label':None, - 'footer_char':'-', - 'footer_func':None, - 'delim':' | ', - 'justify':None, - 'prefix':'', - 'postfix':'', - 'mask_rep':'--', - 'datefmt':None, - 'fmtfunc':str, - 'wrapfunc':lambda x:x, - 'col_width':None, - 'nls':'\n', - 'output':sys.stdout, - 'fixed_width':True -} - -class Report(object): - """Create a tabular TimeSeries report with dates in the left column. -All instance variables are optional and simply serve as the defaults when calling -the report. Parameters for calling the report are the exact same as for -initialization. When calling the report, new options specified will not be saved -to the instance. - -:IVariables: - - `*tseries` : time series objects. Must all be at the same frequency, but - do not need to be aligned. - - - `dates` (DateArray, *[None]*) : dates at which values of all the series - will be output. If not specified, data will be output from the minimum - start_date to the maximum end_date of all the time series objects - - - `header_row` (list, *[None]*) : List of column headers. Specifying - the header for the date column is optional. - - - `header_char` (string, *['-']*): Character to be used for the row separator - line between the header and first row of data. None for no separator. This - is ignored if `header_row` is None. - - - `header_justify` (List of strings or single string, *[None]*) : Determines - how headers are justified. If not specified, all headers are left justified. - If a string is specified, it must be one of 'left', 'right', or 'center' - and all headers will be justified the same way. If a list is specified, each - header will be justified according to the specification for that header in - the list. Specifying the justification for the date column is header is - optional. - - - `row_char` (string, *[None]*): Character to be used for the row separator - line between each row of data. None for no separator - - - `footer_func` (List of functions or single function, *[None]*) : A function or - list of functions for summarizing each data column in the report. For example, - ma.sum to get the sum of the column. If a list of functions is provided - there must be exactly one function for each column. Do not specify a function - for the Date column. - - - `footer_char` (string, *['-']*): Character to be used for the row separator - line between the last row of data and the footer. None for no separator. This - is ignored if `footer_func` is None. - - - `footer_label` (string, *[None]*) : label for the footer row. This goes at the - end of the date column. This is ignored if footer_func is None. - - - `justify` (List of strings or single string, *[None]*) : Determines how data - are justified in their column. If not specified, the date column and string - columns are left justified, and everything else is right justified. If a - string is specified, it must be one of 'left', 'right', or 'center' and all - columns will be justified the same way. If a list is specified, each column - will be justified according to the specification for that column in the list - Specifying the justification for the date column is optional. - - - `prefix` (string, *['']*) : A string prepended to each printed row. - - - `postfix` (string, *['']*) : A string appended to each printed row. - - - `mask_rep` (string, *['--']*): String used to represent masked values in - output - - - `datefmt` (string, *[None]*) : Formatting string used for displaying the - dates in the date column. If None, str() is simply called on the dates - - - `fmtfunc` (List of functions or single function, *[None]*) : A function or - list of functions for formatting each data column in the report. If not - specified, str() is simply called on each item. If a list of functions is - provided, there must be exactly one function for each column. Do not specify - a function for the Date column, that is handled by the datefmt argument - - - `wrapfunc` (List of functions or single function, *[lambda x:x]*): A function - f(text) for wrapping text; each element in the column is first wrapped by this - function. Instances of wrap_onspace, wrap_onspace_strict, and wrap_always - (which are part of this module) work well for this. Eg. wrapfunc=wrap_onspace(10) - If a list is specified, each column will be wrapped according to the - specification for that column in the list. Specifying a function for the Date - column is optional - - - `col_width` (list of integers or single integer, *[None]*): use this to specify - a width for all columns (single integer), or each column individually (list - of integers). The column will be at least as wide as col_width, but may be - larger if cell contents exceed col_width. If specifying a list, you may - optionally specify the width for the Date column as the first entry - - - `output` (buffer, *[sys.stdout]*): `output` must have a write method. - - - `fixed_width` (boolean, *[True]*): If True, columns are fixed width (ie. - cells will be padded with spaces to ensure all cells in a given column are - the same width). If False, `col_width` will be ignored and cells will not - be padded.""" - - def __init__(self, *tseries, **kwargs): - - self.options = {} - self.tseries = None - if len(tseries) > 0: - self.tseries = tseries - self.options = self.__make_dict(**kwargs) - - def __make_dict(self, **kwargs): - - option_dict = copy.copy(self.options) - - option_list = list(_default_options) - - for x in [kw for kw in kwargs if kw in option_list]: - option_dict[x] = kwargs.pop(x) - - if len(kwargs) > 0: - raise KeyError("Unrecognized keyword(s): %s" % (", ".join(kwargs.keys()))) - - return option_dict - - def set_series(self, *tseries): - """set new time series for the report - -:Paramaters: - - `*tseries` : the TimeSeries objects to be used in the report""" - self.tseries = tseries - - def set_options(self, **kwargs): - """set new options or modify options in the report - -:Paramaters: - - `**kwargs` : the options to be used in the report. See the __doc__ - string for the Report class for valid options""" - self.options = self.__make_dict(**kwargs) - - - def __call__(self, *tseries, **kwargs): - """generate a report - -:Paramaters: - - `*tseries` : the TimeSeries objects to be used in the report. If - omitted, the previously set TimeSeries objects will be used - - `**kwargs` : the options to be used in the report. See the __doc__ - string for the Report class for valid options. If omitted, the - previously set options will be used""" - - option_dict = self.__make_dict(**kwargs) - if len(tseries) == 0: - tseries = self.tseries - - def option(kw): - return option_dict.get(kw, _default_options[kw]) - - dates = option('dates') - header_row = option('header_row') - header_char = option('header_char') - header_justify = option('header_justify') - row_char = option('row_char') - footer_label = option('footer_label') - footer_char = option('footer_char') - footer_func = option('footer_func') - delim = option('delim') - justify = option('justify') - prefix = option('prefix') - postfix = option('postfix') - mask_rep = option('mask_rep') - datefmt = option('datefmt') - fmtfunc = option('fmtfunc') - wrapfunc = option('wrapfunc') - col_width = option('col_width') - nls=option('nls') - output=option('output') - fixed_width=option('fixed_width') - - if header_row is not None: - has_header=True - if len(header_row) == len(tseries)+1: - # label for date column included - rows = [header_row] - elif len(header_row) == len(tseries): - # label for date column not included - rows = [['']+header_row] - else: - raise ValueError("mismatch with number of headers and series") - else: - has_header=False - rows=[] - - if fixed_width: - - def _standardize_justify(userspec): - if isinstance(userspec, str): - # justify all columns the the same way - return [userspec for x in range(len(tseries)+1)] - elif isinstance(userspec, list): - if len(userspec) == len(tseries): - # justification for date column not included, so set that - # to left by default - return ['left'] + userspec - else: - raise ValueError("invalid `justify` specification") - - if justify is not None: - justify = _standardize_justify(justify) - else: - # default column justification - justify = ['left'] - for ser in tseries: - if ser.dtype.char in 'SUO': justify.append('left') - else: justify.append('right') - - - if header_justify is not None: - header_justify = _standardize_justify(header_justify) - else: - # default column justification - header_justify = ['left' for x in range(len(tseries)+1)] - else: - justify = [None for x in range(len(tseries)+1)] - - if datefmt is None: - def datefmt_func(date): return str(date) - else: - def datefmt_func(date): return date.strfmt(datefmt) - - if dates is None: - tseries = ts.align_series(*tseries) - dates = ts.date_array(start_date=tseries[0].start_date, - end_date=tseries[0].end_date) - else: - tseries = ts.align_series(start_date=dates[0], end_date=dates[-1], *tseries) - - if isinstance(fmtfunc, list): - fmtfunc = [fmtfunc_wrapper(f, mask_rep) for f in fmtfunc] - else: - fmtfunc = [fmtfunc_wrapper(fmtfunc, mask_rep)]*len(tseries) - - def wrapfunc_default(func): - if func is None: return lambda x:x - else: return func - - if isinstance(wrapfunc, list): - if len(wrapfunc) == len(tseries): - wrapfunc = [lambda x: x] + wrapfunc - wrapfunc = [wrapfunc_default(func) for func in wrapfunc] - else: - wrapfunc = [wrapfunc_default(wrapfunc) for x in range(len(tseries)+1)] - - - if isinstance(col_width, list): - if len(col_width) == len(tseries): - col_width = [None] + col_width - else: - col_width = [col_width for x in range(len(tseries)+1)] - - def getval(series, date): - try: - val = series[date] - except IndexError: - val = ma.masked - return val - - for d in dates: - rows.append([datefmt_func(d)]+[fmtfunc[i](getval(ser, d)) for i, ser in enumerate(tseries)]) - - if footer_func is not None: - has_footer=True - if not isinstance(footer_func, list): - footer_func = [footer_func]*len(tseries) - - if footer_label is None: footer_label = [''] - else: footer_label = [footer_label] - - footer_data = [] - for i, ser in enumerate(tseries): - if footer_func[i] is None: - footer_data.append('') - else: - footer_data.append(fmtfunc[i](footer_func[i](ser[dates]))) - - rows.append(footer_label + footer_data) - else: - has_footer=False - - - def rowWrapper(row): - newRows = [wrapfunc[i](item).split('\n') for i, item in enumerate(row)] - return [[(substr or '') for substr in item] for item in map(None,*newRows)] - # break each logical row into one or more physical ones - logicalRows = [rowWrapper(row) for row in rows] - numLogicalRows = len(logicalRows) - # columns of physical rows - columns = map(None,*reduce(operator.add,logicalRows)) - numCols = len(columns) - colNums = list(range(numCols)) - - # get the maximum of each column by the string length of its items - maxWidths = [max(col_width[i], *[len(str(item)) for item in column]) - for i, column in enumerate(columns)] - - def getSeparator(char, separate): - if char is not None and separate: - return char * (len(prefix) + len(postfix) + sum(maxWidths) + \ - len(delim)*(len(maxWidths)-1)) - else: - return None - - header_separator = getSeparator(header_char, has_header) - footer_separator = getSeparator(footer_char, has_footer) - row_separator = getSeparator(row_char, True) - - # select the appropriate justify method - justify_funcs = {'center':str.center, 'right':str.rjust, 'left':str.ljust, - 'none':(lambda text, width: text)} - - if has_header and has_footer: - data_start = 1 - data_end = numLogicalRows-3 - elif has_header: - data_start = 1 - data_end = numLogicalRows-2 - elif has_footer: - data_start = 0 - data_end = numLogicalRows-3 - else: - data_start = 0 - data_end = numLogicalRows-2 - - for rowNum, physicalRows in enumerate(logicalRows): - for row in physicalRows: - if rowNum == 0 and header_separator: - _justify = header_justify - else: - _justify = justify - - output.write(prefix \ - + delim.join([justify_funcs[str(_justify[colNum]).lower()](str(item),width) for (colNum,item,width) in zip(colNums,row,maxWidths)]) \ - + postfix + nls) - - if row_separator and (data_start <= rowNum <= data_end): - output.write(row_separator + nls) - elif header_separator and rowNum < data_start: - output.write(header_separator + nls) - elif footer_separator and rowNum == data_end + 1: - output.write(footer_separator + nls) - - -class wrap_onspace(object): - """A callable word-wrap class that preserves existing line breaks -and most spaces in the text. - -:IVariables: - - `width` (int): width to wrap at. Won't split up words wider than `width` - - `nls` (str, *['\n']*): New line separator. Assumes existing line - breaks use this new line separator as well. - -:Parameters (for __call__ method): - - `text` (str): text to wrap""" - - def __init__(self, width, nls='\n'): - self.width = width - self.nls = nls - - def __call__(self, text): - - width = self.width - nls = self.nls - - def break_or_space(line, word, width): - temp_idx = (len(line[line.rfind(nls)+1:]) + len(word.split(nls,1)[0]) >= width) - if temp_idx: - return nls - else: - return ' ' - - return reduce(lambda line, word, width=width: '%s%s%s' % - (line, - break_or_space(line, word, width), - word), - text.split(' ') - ) - - -import re -class wrap_onspace_strict(object): - """A callable word-wrap class similar to wrap_onspace, but -enforces the width constraint: words longer than width are split. - -:IVariables: - - `width` (int): width to wrap at. Will split up words wider than `width` - - `nls` (str, *['\n']*): New line separator. Assumes existing line - breaks use this new line separator as well. - -:Parameters (for __call__ method): - - `text` (str): text to wrap""" - - def __init__(self, width, nls='\n'): - self.width = width - self.nls = nls - - def __call__(self, text): - - width = self.width - nls = self.nls - - wordRegex = re.compile(r'\S{'+str(width)+r',}') - return wrap_onspace(wordRegex.sub(lambda m: wrap_always(m.group(),width, nls=nls),text),width, nls=nls) - - -import math -class wrap_always(object): - """A callable word-wrap class that wraps text on exactly width -characters. It doesn't split the text into words. - -:IVariables: - - `width` (int): width to wrap at. - - `nls` (str, *['\n']*): New line separator. - -:Parameters (for __call__ method): - - `text` (str): text to wrap""" - - def __init__(self, width, nls='\n'): - self.width = width - self.nls = nls - - def __call__(self, text): - - width = self.width - nls = self.nls - return nls.join([ text[width*i:width*(i+1)] \ - for i in xrange(int(math.ceil(1.*len(text)/width))) ]) Deleted: trunk/scipy/sandbox/timeseries/tdates.py =================================================================== --- trunk/scipy/sandbox/timeseries/tdates.py 2007-09-18 23:22:21 UTC (rev 3326) +++ trunk/scipy/sandbox/timeseries/tdates.py 2007-09-19 00:51:14 UTC (rev 3327) @@ -1,714 +0,0 @@ -""" -Classes definition for the support of individual dates and array of dates. - -:author: Pierre GF Gerard-Marchant & Matt Knox -:contact: pierregm_at_uga_dot_edu - mattknox_ca_at_hotmail_dot_com -:version: $Id$ -""" -__author__ = "Pierre GF Gerard-Marchant & Matt Knox ($Author$)" -__version__ = '1.0' -__revision__ = "$Revision$" -__date__ = '$Date$' - -import datetime as dt - -import itertools -import warnings -import types - - -import numpy -from numpy import bool_, float_, int_, object_ -from numpy import ndarray -import numpy.core.numeric as numeric -import numpy.core.fromnumeric as fromnumeric -import numpy.core.numerictypes as ntypes -from numpy.core.numerictypes import generic - -import maskedarray as MA - -from parser import DateFromString, DateTimeFromString - -import const as _c -import cseries - -# initialize python callbacks for C code -cseries.set_callback_DateFromString(DateFromString) -cseries.set_callback_DateTimeFromString(DateTimeFromString) - -from cseries import Date, thisday, check_freq, check_freq_str, get_freq_group,\ - DateCalc_Error, DateCalc_RangeError -today = thisday - -__all__ = [ -'Date', 'DateArray','isDate','isDateArray', -'DateError', 'ArithmeticDateError', 'FrequencyDateError','InsufficientDateError', -'datearray','date_array', 'date_array_fromlist', 'date_array_fromrange', -'day_of_week','day_of_year','day','month','quarter','year','hour','minute', -'second','thisday','today','prevbusday','period_break', 'check_freq', -'check_freq_str','get_freq_group', 'DateCalc_Error', 'DateCalc_RangeError' - ] - - -#####--------------------------------------------------------------------------- -#---- --- Date Exceptions --- -#####--------------------------------------------------------------------------- -class DateError(Exception): - "Defines a generic DateArrayError." - def __init__ (self, value=None): - "Creates an exception." - self.value = value - def __str__(self): - "Calculates the string representation." - return str(self.value) - __repr__ = __str__ - -class InsufficientDateError(DateError): - """Defines the exception raised when there is not enough information - to create a Date object.""" - def __init__(self, msg=None): - if msg is None: - msg = "Insufficient parameters given to create a date at the given frequency" - DateError.__init__(self, msg) - -class FrequencyDateError(DateError): - """Defines the exception raised when the frequencies are incompatible.""" - def __init__(self, msg, freql=None, freqr=None): - msg += " : Incompatible frequencies!" - if not (freql is None or freqr is None): - msg += " (%s<>%s)" % (freql, freqr) - DateError.__init__(self, msg) - -class ArithmeticDateError(DateError): - """Defines the exception raised when dates are used in arithmetic expressions.""" - def __init__(self, msg=''): - msg += " Cannot use dates for arithmetics!" - DateError.__init__(self, msg) - - -#####--------------------------------------------------------------------------- -#---- --- Functions --- -#####--------------------------------------------------------------------------- - -def prevbusday(day_end_hour=18, day_end_min=0): - """Returns the previous business day (Monday-Friday) at business frequency. - -:Parameters: - - day_end_hour : (int, *[18]* ) - - day_end_min : (int, *[0]*) - -:Return values: - If it is currently Saturday or Sunday, then the preceding Friday will be - returned. If it is later than the specified day_end_hour and day_end_min, - thisday('b') will be returned. Otherwise, thisday('b')-1 will be returned. -""" - tempDate = dt.datetime.now() - dateNum = tempDate.hour + float(tempDate.minute)/60 - checkNum = day_end_hour + float(day_end_min)/60 - if dateNum < checkNum: - return thisday(_c.FR_BUS) - 1 - else: - return thisday(_c.FR_BUS) - - -def isDate(data): - "Returns whether `data` is an instance of Date." - return isinstance(data, Date) or \ - (hasattr(data,'freq') and hasattr(data,'value')) - - -#####--------------------------------------------------------------------------- -#---- --- DateArray --- -#####--------------------------------------------------------------------------- -ufunc_dateOK = ['add','subtract', - 'equal','not_equal','less','less_equal', 'greater','greater_equal', - 'isnan'] - -class _datearithmetics(object): - """Defines a wrapper for arithmetic methods. -Instead of directly calling a ufunc, the corresponding method of the `array._data` -object is called instead. -If `asdates` is True, a DateArray object is returned , else a regular ndarray -is returned. - """ - def __init__ (self, methodname, asdates=True): - """ -:Parameters: - - `methodname` (String) : Method name. - """ - self.methodname = methodname - self._asdates = asdates - self.__doc__ = getattr(methodname, '__doc__') - self.obj = None - # - def __get__(self, obj, objtype=None): - self.obj = obj - return self - # - def __call__ (self, other, *args, **kwargs): - "Execute the call behavior." - instance = self.obj - freq = instance.freq - if 'context' not in kwargs: - kwargs['context'] = 'DateOK' - method = getattr(super(DateArray,instance), self.methodname) - if isinstance(other, DateArray): - if other.freq != freq: - raise FrequencyDateError("Cannot operate on dates", \ - freq, other.freq) - elif isinstance(other, Date): - if other.freq != freq: - raise FrequencyDateError("Cannot operate on dates", \ - freq, other.freq) - other = other.value - elif isinstance(other, ndarray): - if other.dtype.kind not in ['i','f']: - raise ArithmeticDateError - if self._asdates: - return instance.__class__(method(other, *args), - freq=freq) - else: - return method(other, *args) - -class DateArray(ndarray): - """Defines a ndarray of dates, as ordinals. - -When viewed globally (array-wise), DateArray is an array of integers. -When viewed element-wise, DateArray is a sequence of dates. -For example, a test such as : ->>> DateArray(...) = value -will be valid only if value is an integer, not a Date -However, a loop such as : ->>> for d in DateArray(...): -accesses the array element by element. Therefore, `d` is a Date object. - """ - def __new__(cls, dates=None, freq=None, copy=False): - # Get the frequency ...... - if freq is None: - _freq = getattr(dates, 'freq', _c.FR_UND) - else: - _freq = check_freq(freq) - # Get the dates .......... - _dates = numeric.array(dates, copy=copy, dtype=int_, subok=1) - if _dates.ndim == 0: - _dates.shape = (1,) - _dates = _dates.view(cls) - _dates.freq = _freq - _dates._unsorted = None - return _dates - - def __array_wrap__(self, obj, context=None): - if context is None: - return self - elif context[0].__name__ not in ufunc_dateOK: - raise ArithmeticDateError, "(function %s)" % context[0].__name__ - - def __array_finalize__(self, obj): - self.freq = getattr(obj, 'freq', _c.FR_UND) - self._unsorted = getattr(obj,'_unsorted',None) - self._cachedinfo = dict(toobj=None, tostr=None, toord=None, - steps=None, full=None, hasdups=None) - if hasattr(obj,'_cachedinfo'): - self._cachedinfo.update(obj._cachedinfo) - return - - def __getitem__(self, indx): - reset_full = True - # Determine what kind of index is used - if isinstance(indx, Date): - indx = self.find_dates(indx) - reset_full = False - elif numeric.asarray(indx).dtype.kind == 'O': - try: - indx = self.find_dates(indx) - except AttributeError: - pass - # Select the data - r = ndarray.__getitem__(self, indx) - # Select the corresponding unsorted indices (if needed) - if self._unsorted is not None: - unsorted = self._unsorted[indx] - # Case 1. A simple integer - if isinstance(r, (generic, int)): - return Date(self.freq, value=r) - elif hasattr(r, 'size') and r.size == 1: - # need to check if it has a size attribute for situations - # like when the datearray is the data for a maskedarray - # or some other subclass of ndarray with wierd getitem - # behaviour - return Date(self.freq, value=r.item()) - else: - if hasattr(r, '_cachedinfo'): - _cache = r._cachedinfo - _cache.update(dict([(k,_cache[k][indx]) - for k in ('toobj', 'tostr', 'toord') - if _cache[k] is not None])) - _cache['steps'] = None - if reset_full: - _cache['full'] = None - _cache['hasdups'] = None - return r - - def __getslice__(self, i, j): - r = ndarray.__getslice__(self, i, j) - if hasattr(r, '_cachedinfo'): - _cache = r._cachedinfo - _cache.update(dict([(k,_cache[k][i:j]) - for k in ('toobj', 'tostr', 'toord') - if _cache[k] is not None])) - _cache['steps'] = None - return r - - def __repr__(self): - return ndarray.__repr__(self)[:-1] + \ - ",\n freq='%s')" % self.freqstr - #...................................................... - __add__ = _datearithmetics('__add__', asdates=True) - __radd__ = _datearithmetics('__add__', asdates=True) - __sub__ = _datearithmetics('__sub__', asdates=True) - __rsub__ = _datearithmetics('__rsub__', asdates=True) - __le__ = _datearithmetics('__le__', asdates=False) - __lt__ = _datearithmetics('__lt__', asdates=False) - __ge__ = _datearithmetics('__ge__', asdates=False) - __gt__ = _datearithmetics('__gt__', asdates=False) - __eq__ = _datearithmetics('__eq__', asdates=False) - __ne__ = _datearithmetics('__ne__', asdates=False) - #...................................................... - @property - def freqstr(self): - "Returns the frequency string code." - return check_freq_str(self.freq) - @property - def day(self): - "Returns the day of month." - return self.__getdateinfo__('D') - @property - def day_of_week(self): - "Returns the day of week." - return self.__getdateinfo__('W') - @property - def day_of_year(self): - "Returns the day of year." - return self.__getdateinfo__('R') - @property - def month(self): - "Returns the month." - return self.__getdateinfo__('M') - @property - def quarter(self): - "Returns the quarter." - return self.__getdateinfo__('Q') - @property - def year(self): - "Returns the year." - return self.__getdateinfo__('Y') - @property - def qyear(self): - """For quarterly frequency dates, returns the year corresponding to the -year end (start) month. When using QTR or QTR-E based quarterly -frequencies, this is the fiscal year in a financial context. - -For non-quarterly dates, this simply returns the year of the date.""" - - return self.__getdateinfo__('F') - @property - def second(self): - "Returns the seconds." - return self.__getdateinfo__('S') - @property - def minute(self): - "Returns the minutes." - return self.__getdateinfo__('T') - @property - def hour(self): - "Returns the hour." - return self.__getdateinfo__('H') - @property - def week(self): - "Returns the week." - return self.__getdateinfo__('I') - - days = day - weekdays = day_of_week - yeardays = day_of_year - months = month - quarters = quarter - years = year - seconds = second - minutes = minute - hours = hour - weeks = week - - def __getdateinfo__(self, info): - return numeric.asarray(cseries.DA_getDateInfo(numeric.asarray(self), - self.freq, info, - int(self.isfull())), - dtype=int_) - __getDateInfo = __getdateinfo__ - #.... Conversion methods .................... - # - def tovalue(self): - "Converts the dates to integer values." - return numeric.asarray(self) - # - def toordinal(self): - "Converts the dates from values to ordinals." - # Note: we better try to cache the result - if self._cachedinfo['toord'] is None: -# diter = (Date(self.freq, value=d).toordinal() for d in self) - if self.freq == _c.FR_UND: - diter = (d.value for d in self) - else: - diter = (d.toordinal() for d in self) - toord = numeric.fromiter(diter, dtype=float_) - self._cachedinfo['toord'] = toord - return self._cachedinfo['toord'] - # - def tostring(self): - "Converts the dates to strings." - # Note: we better cache the result - if self._cachedinfo['tostr'] is None: - firststr = str(self[0]) - if self.size > 0: - ncharsize = len(firststr) - tostr = numpy.fromiter((str(d) for d in self), - dtype='|S%i' % ncharsize) - else: - tostr = firststr - self._cachedinfo['tostr'] = tostr - return self._cachedinfo['tostr'] - # - def asfreq(self, freq=None, relation="AFTER"): - "Converts the dates to another frequency." - # Note: As we define a new object, we don't need caching - if freq is None or freq == _c.FR_UND: - return self - tofreq = check_freq(freq) - if tofreq == self.freq: - return self - _rel = relation.upper()[0] - fromfreq = self.freq - if fromfreq == _c.FR_UND: - fromfreq = _c.FR_DAY - new = cseries.DA_asfreq(numeric.asarray(self), fromfreq, tofreq, _rel) - return DateArray(new, freq=freq) - - #...................................................... - def find_dates(self, *dates): - "Returns the indices corresponding to given dates, as an array." - - #http://aspn.activestate.com/ASPN/Mail/Message/python-tutor/2302348 - def flatten_sequence(iterable): - """Flattens a compound of nested iterables.""" - itm = iter(iterable) - for elm in itm: - if hasattr(elm,'__iter__') and not isinstance(elm, basestring): - for f in flatten_sequence(elm): - yield f - else: - yield elm - - def flatargs(*args): - "Flattens the arguments." - if not hasattr(args, '__iter__'): - return args - else: - return flatten_sequence(args) - - ifreq = self.freq - c = numpy.zeros(self.shape, bool_) - for d in flatargs(*dates): - if d.freq != ifreq: - d = d.asfreq(ifreq) - c += (self == d.value) - c = c.nonzero() - if fromnumeric.size(c) == 0: - raise IndexError, "Date out of bounds!" - return c - - def date_to_index(self, date): - "Returns the index corresponding to one given date, as an integer." - if self.isvalid(): - index = date.value - self[0].value - if index < 0 or index > self.size: - raise IndexError, "Date out of bounds!" - return index - else: - index_asarray = (self == date.value).nonzero() - if fromnumeric.size(index_asarray) == 0: - raise IndexError, "Date out of bounds!" - return index_asarray[0][0] - #...................................................... - def get_steps(self): - """Returns the time steps between consecutive dates. - The timesteps have the same unit as the frequency of the series.""" - if self.freq == _c.FR_UND: - warnings.warn("Undefined frequency: assuming integers!") - if self._cachedinfo['steps'] is None: - _cached = self._cachedinfo - val = numeric.asarray(self).ravel() - if val.size > 1: - steps = val[1:] - val[:-1] - if _cached['full'] is None: - _cached['full'] = (steps.max() == 1) - if _cached['hasdups'] is None: - _cached['hasdups'] = (steps.min() == 0) - else: - _cached['full'] = True - _cached['hasdups'] = False - steps = numeric.array([], dtype=int_) - self._cachedinfo['steps'] = steps - return self._cachedinfo['steps'] - - def has_missing_dates(self): - "Returns whether the DateArray have missing dates." - if self._cachedinfo['full'] is None: - steps = self.get_steps() - return not(self._cachedinfo['full']) - - def isfull(self): - "Returns whether the DateArray has no missing dates." - if self._cachedinfo['full'] is None: - steps = self.get_steps() - return self._cachedinfo['full'] - - def has_duplicated_dates(self): - "Returns whether the DateArray has duplicated dates." - if self._cachedinfo['hasdups'] is None: - steps = self.get_steps() - return self._cachedinfo['hasdups'] - - def isvalid(self): - "Returns whether the DateArray is valid: no missing/duplicated dates." - return (self.isfull() and not self.has_duplicated_dates()) - #...................................................... - -#............................ - - -#####--------------------------------------------------------------------------- -#---- --- DateArray functions --- -#####--------------------------------------------------------------------------- -def isDateArray(a): - "Tests whether an array is a DateArray object." - return isinstance(a,DateArray) - -def guess_freq(dates): - """Tries to estimate the frequency of a list of dates, by checking the steps - between consecutive dates The steps should be in days. - Returns a frequency code (alpha character).""" - ddif = numeric.asarray(numpy.diff(dates)) - ddif.sort() - if ddif.size == 0: - fcode = _c.FR_UND - elif ddif[0] == ddif[-1] == 1.: - fcode = _c.FR_DAY - elif (ddif[0] == 1.) and (ddif[-1] == 3.): - fcode = _c.FR_BUS - elif (ddif[0] > 3.) and (ddif[-1] == 7.): - fcode = _c.FR_WK - elif (ddif[0] >= 28.) and (ddif[-1] <= 31.): - fcode = _c.FR_MTH - elif (ddif[0] >= 90.) and (ddif[-1] <= 92.): - fcode = _c.FR_QTR - elif (ddif[0] >= 365.) and (ddif[-1] <= 366.): - fcode = _c.FR_ANN - elif numpy.abs(24.*ddif[0] - 1) <= 1e-5 and \ - numpy.abs(24.*ddif[-1] - 1) <= 1e-5: - fcode = _c.FR_HR - elif numpy.abs(1440.*ddif[0] - 1) <= 1e-5 and \ - numpy.abs(1440.*ddif[-1] - 1) <= 1e-5: - fcode = _c.FR_MIN - elif numpy.abs(86400.*ddif[0] - 1) <= 1e-5 and \ - numpy.abs(86400.*ddif[-1] - 1) <= 1e-5: - fcode = _c.FR_SEC - else: - warnings.warn("Unable to estimate the frequency! %.3f<>%.3f" %\ - (ddif[0], ddif[-1])) - fcode = _c.FR_UND - return fcode - - -def _listparser(dlist, freq=None): - "Constructs a DateArray from a list." - dlist = numeric.asarray(dlist) - idx = dlist.argsort() - dlist = dlist[idx] - if dlist.ndim == 0: - dlist.shape = (1,) - # Case #1: dates as strings ................. - if dlist.dtype.kind in 'SU': - #...construct a list of ordinals - ords = numpy.fromiter((DateTimeFromString(s).toordinal() for s in dlist), - float_) - ords += 1 - #...try to guess the frequency - if freq is None or freq == _c.FR_UND: - freq = guess_freq(ords) - #...construct a list of dates - for s in dlist: - x = Date(freq, string=s) - dates = [Date(freq, string=s) for s in dlist] - # Case #2: dates as numbers ................. - elif dlist.dtype.kind in 'if': - #...hopefully, they are values - if freq is None or freq == _c.FR_UND: - freq = guess_freq(dlist) - dates = dlist - # Case #3: dates as objects ................. - elif dlist.dtype.kind == 'O': - template = dlist[0] - #...as Date objects - if isinstance(template, Date): - dates = numpy.fromiter((d.value for d in dlist), int_) - #...as mx.DateTime objects - elif hasattr(template,'absdays'): - # no freq given: try to guess it from absdays - if freq == _c.FR_UND: - ords = numpy.fromiter((s.absdays for s in dlist), float_) - ords += 1 - freq = guess_freq(ords) - dates = [Date(freq, datetime=m) for m in dlist] - #...as datetime objects - elif hasattr(template, 'toordinal'): - ords = numpy.fromiter((d.toordinal() for d in dlist), float_) - if freq == _c.FR_UND: - freq = guess_freq(ords) - dates = [Date(freq, datetime=dt.datetime.fromordinal(a)) for a in ords] - # - result = DateArray(dates, freq) - result._unsorted = idx - return result - - -def date_array(dlist=None, start_date=None, end_date=None, length=None, - freq=None): - """Constructs a DateArray from: - - a starting date and either an ending date or a given length. - - a list of dates. - """ - freq = check_freq(freq) - # Case #1: we have a list ................... - if dlist is not None: - # Already a DateArray.................... - if isinstance(dlist, DateArray): - if (freq != _c.FR_UND) and (dlist.freq != check_freq(freq)): - return dlist.asfreq(freq) - else: - return dlist - # Make sure it's a sequence, else that's a start_date - if hasattr(dlist,'__len__'): - return _listparser(dlist, freq) - elif start_date is not None: - if end_date is not None: - dmsg = "What starting date should be used ? '%s' or '%s' ?" - raise DateError, dmsg % (dlist, start_date) - else: - (start_date, end_date) = (dlist, start_date) - else: - start_date = dlist - # Case #2: we have a starting date .......... - if start_date is None: - if length == 0: - return DateArray([], freq=freq) - raise InsufficientDateError - if not isDate(start_date): - dmsg = "Starting date should be a valid Date instance! " - dmsg += "(got '%s' instead)" % type(start_date) - raise DateError, dmsg - # Check if we have an end_date - if end_date is None: - if length is None: -# raise ValueError,"No length precised!" - length = 1 - else: - if not isDate(end_date): - raise DateError, "Ending date should be a valid Date instance!" - length = int(end_date - start_date) + 1 -# dlist = [(start_date+i).value for i in range(length)] - dlist = numeric.arange(length, dtype=int_) - dlist += start_date.value - if freq == _c.FR_UND: - freq = start_date.freq - return DateArray(dlist, freq=freq) -datearray = date_array - -def date_array_fromlist(dlist, freq=None): - "Constructs a DateArray from a list of dates." - return date_array(dlist=dlist, freq=freq) - -def date_array_fromrange(start_date, end_date=None, length=None, - freq=None): - """Constructs a DateArray from a starting date and either an ending date or - a length.""" - return date_array(start_date=start_date, end_date=end_date, - length=length, freq=freq) - -#####--------------------------------------------------------------------------- -#---- --- Definition of functions from the corresponding methods --- -#####--------------------------------------------------------------------------- -class _frommethod(object): - """Defines functions from existing MaskedArray methods. -:ivar _methodname (String): Name of the method to transform. - """ - def __init__(self, methodname): - self._methodname = methodname - self.__doc__ = self.getdoc() - def getdoc(self): - "Returns the doc of the function (from the doc of the method)." - try: - return getattr(DateArray, self._methodname).__doc__ - except AttributeError: - return "???" - # - def __call__(self, caller, *args, **params): - if hasattr(caller, self._methodname): - method = getattr(caller, self._methodname) - # If method is not callable, it's a property, and don't call it - if hasattr(method, '__call__'): - return method.__call__(*args, **params) - return method - method = getattr(fromnumeric.asarray(caller), self._methodname) - try: - return method(*args, **params) - except SystemError: - return getattr(numpy,self._methodname).__call__(caller, *args, **params) -#............................ -day_of_week = _frommethod('day_of_week') -day_of_year = _frommethod('day_of_year') -year = _frommethod('year') -quarter = _frommethod('quarter') -month = _frommethod('month') -day = _frommethod('day') -hour = _frommethod('hour') -minute = _frommethod('minute') -second = _frommethod('second') - - -def period_break(dates, period): - """Returns the indices where the given period changes. - -:Parameters: - dates : DateArray - Array of dates to monitor. - period : string - Name of the period to monitor. - """ - current = getattr(dates, period) - previous = getattr(dates-1, period) - return (current - previous).nonzero()[0] - - -################################################################################ - -if __name__ == '__main__': - import maskedarray.testutils - from maskedarray.testutils import assert_equal - - if 1: - dlist = ['2007-%02i' % i for i in range(1,5)+range(7,13)] - mdates = date_array_fromlist(dlist, 'M') - - if 2: - dlist = ['2007-01','2007-03','2007-04','2007-02'] - mdates = date_array_fromlist(dlist, 'M') Deleted: trunk/scipy/sandbox/timeseries/tests/test_multitimeseries.py =================================================================== --- trunk/scipy/sandbox/timeseries/tests/test_multitimeseries.py 2007-09-18 23:22:21 UTC (rev 3326) +++ trunk/scipy/sandbox/timeseries/tests/test_multitimeseries.py 2007-09-19 00:51:14 UTC (rev 3327) @@ -1,189 +0,0 @@ -# pylint: disable-msg=W0611, W0612, W0511,R0201 -"""Tests suite for mrecarray. - -:author: Pierre Gerard-Marchant & Matt Knox -:contact: pierregm_at_uga_dot_edu & mattknox_ca_at_hotmail_dot_com -:version: $Id$ -""" -__author__ = "Pierre GF Gerard-Marchant & Matt Knox ($Author$)" -__version__ = '1.0' -__revision__ = "$Revision$" -__date__ = '$Date$' - -import types - -import numpy -import numpy.core.fromnumeric as fromnumeric -from numpy.testing import NumpyTest, NumpyTestCase -from numpy.testing.utils import build_err_msg - -import maskedarray.testutils -from maskedarray.testutils import assert_equal, assert_array_equal - -import maskedarray.core as MA -import maskedarray.mrecords as MR -from maskedarray.mrecords import addfield - -from maskedarray.core import getmaskarray, nomask, masked_array - -from timeseries import tmulti -from timeseries.tmulti import MultiTimeSeries, TimeSeries,\ - fromarrays, fromtextfile, fromrecords, \ - date_array, time_series - - -#.............................................................................. -class test_mrecords(NumpyTestCase): - "Base test class for MaskedArrays." - def __init__(self, *args, **kwds): - NumpyTestCase.__init__(self, *args, **kwds) - self.setup() - - def setup(self): - "Generic setup" - d = numpy.arange(5) - m = MA.make_mask([1,0,0,1,1]) - base_d = numpy.r_[d,d[::-1]].reshape(2,-1).T - base_m = numpy.r_[[m, m[::-1]]].T - base = MA.array(base_d, mask=base_m) - mrec = MR.fromarrays(base.T,) - dlist = ['2007-%02i' % (i+1) for i in d] - dates = date_array(dlist) - ts = time_series(mrec,dates) - mts = MultiTimeSeries(mrec,dates) - self.data = [d, m, mrec, dlist, dates, ts, mts] - - def test_get(self): - "Tests fields retrieval" - [d, m, mrec, dlist, dates, ts, mts] = self.data - assert(isinstance(mts['f0'], TimeSeries)) - assert_equal(mts['f0']._dates, dates) - assert_equal(mts['f0']._data, d) - assert_equal(mts['f0']._mask, m) - # - assert(isinstance(mts[0], MultiTimeSeries)) - assert_equal(mts._data[0], mrec._data[0]) - # We can't use assert_equal here, as it tries to convert the tuple into a singleton -# assert(mts[0]._data.view(numpyndarray) == mrec[0]) - assert_equal(numpy.asarray(mts._data[0]), mrec[0]) - assert_equal(mts._dates[0], dates[0]) - assert_equal(mts[0]._dates, dates[0]) - # - assert(isinstance(mts['2007-01'], MultiTimeSeries)) - assert(mts['2007-01']._data == mrec[0]) - assert_equal(mts['2007-01']._dates, dates[0]) - # - assert(isinstance(mts.f0, TimeSeries)) - assert_equal(mts.f0, time_series(d, dates=dates, mask=m)) - assert_equal(mts.f1, time_series(d[::-1], dates=dates, mask=m[::-1])) - assert((mts._fieldmask == numpy.core.records.fromarrays([m, m[::-1]])).all()) - assert_equal(mts._mask, numpy.r_[[m,m[::-1]]].all(0)) - assert_equal(mts.f0[1], mts[1].f0) - # - assert(isinstance(mts[:2], MultiTimeSeries)) - assert_equal(mts[:2]._data.f0, mrec[:2].f0) - assert_equal(mts[:2]._data.f1, mrec[:2].f1) - assert_equal(mts[:2]._dates, dates[:2]) - - def test_set(self): - "Tests setting fields/attributes." - [d, m, mrec, dlist, dates, ts, mts] = self.data - mts.f0._data[:] = 5 - assert_equal(mts['f0']._data, [5,5,5,5,5]) - mts.f0 = 1 - assert_equal(mts['f0']._data, [1]*5) - assert_equal(getmaskarray(mts['f0']), [0]*5) - mts.f1 = MA.masked - assert_equal(mts.f1.mask, [1]*5) - assert_equal(getmaskarray(mts['f1']), [1]*5) - mts._mask = MA.masked - assert_equal(getmaskarray(mts['f1']), [1]*5) - assert_equal(mts['f0']._mask, mts['f1']._mask) - mts._mask = MA.nomask - assert_equal(getmaskarray(mts['f1']), [0]*5) - assert_equal(mts['f0']._mask, mts['f1']._mask) - - def test_setslices(self): - "Tests setting slices." - [d, m, mrec, dlist, dates, ts, mts] = self.data - # - mts[:2] = 5 - assert_equal(mts.f0._data, [5,5,2,3,4]) - assert_equal(mts.f1._data, [5,5,2,1,0]) - assert_equal(mts.f0._mask, [0,0,0,1,1]) - assert_equal(mts.f1._mask, [0,0,0,0,1]) - mts.harden_mask() - mts[-2:] = 5 - assert_equal(mts.f0._data, [5,5,2,3,4]) - assert_equal(mts.f1._data, [5,5,2,5,0]) - assert_equal(mts.f0._mask, [0,0,0,1,1]) - assert_equal(mts.f1._mask, [0,0,0,0,1]) - - def test_hardmask(self): - "Test hardmask" - [d, m, mrec, dlist, dates, ts, mts] = self.data - mts.harden_mask() - assert(mts._hardmask) - mts._mask = nomask - assert_equal(mts._mask, numpy.r_[[m,m[::-1]]].all(0)) - mts.soften_mask() - assert(not mts._hardmask) - mts._mask = nomask - assert(mts['f1']._mask is nomask) - assert_equal(mts['f0']._mask,mts['f1']._mask) - - def test_addfield(self): - "Tests addfield" - [d, m, mrec, dlist, dates, ts, mts] = self.data - mts = addfield(mts, masked_array(d+10, mask=m[::-1])) - assert_equal(mts.f2, d+10) - assert_equal(mts.f2._mask, m[::-1]) - - def test_fromrecords(self): - "Test from recarray." - [d, m, mrec, dlist, dates, ts, mts] = self.data - nrec = numpy.core.records.fromarrays(numpy.r_[[d,d[::-1]]]) - mrecfr = fromrecords(nrec.tolist(), dates=dates) - assert_equal(mrecfr.f0, mrec.f0) - assert_equal(mrecfr.dtype, mrec.dtype) - #.................... - altrec = [tuple([d,]+list(r)) for (d,r) in zip(dlist,nrec)] - mrecfr = fromrecords(altrec, names='dates,f0,f1') - assert_equal(mrecfr.f0, mrec.f0) - assert_equal(mrecfr.dtype, mrec.dtype) - #.................... - tmp = MultiTimeSeries(mts._series[::-1], dates=mts.dates) - mrecfr = fromrecords(tmp) - assert_equal(mrecfr.f0, mrec.f0[::-1]) - - def test_fromtextfile(self): - "Tests reading from a text file." - fcontent = """# -'Dates', 'One (S)','Two (I)','Three (F)','Four (M)','Five (-)','Six (C)' -'2007-01', 'strings',1,1.0,'mixed column',,1 -'2007-02', 'with embedded "double quotes"',2,2.0,1.0,,1 -'2007-03', 'strings',3,3.0E5,3,,1 -'2007-05','strings',4,-1e-10,,,1 -""" - import os - from datetime import datetime - fname = 'tmp%s' % datetime.now().strftime("%y%m%d%H%M%S%s") - f = open(fname, 'w') - f.write(fcontent) - f.close() - mrectxt = fromtextfile(fname,delimitor=',',varnames='ABCDEFG', - dates_column=0) - os.unlink(fname) - # - dlist = ['2007-%02i' % i for i in (1,2,3,5)] - assert(isinstance(mrectxt, MultiTimeSeries)) - assert_equal(mrectxt._dates, date_array(dlist,'M')) - assert_equal(mrectxt.dtype.names, ['B','C','D','E','F','G']) - assert_equal(mrectxt.G, [1,1,1,1]) - assert_equal(mrectxt.F._mask, [1,1,1,1]) - assert_equal(mrectxt.D, [1,2,3.e+5,-1e-10]) - -############################################################################### -#------------------------------------------------------------------------------ -if __name__ == "__main__": - NumpyTest().run() \ No newline at end of file Copied: trunk/scipy/sandbox/timeseries/tests/test_trecords.py (from rev 3319, trunk/scipy/sandbox/timeseries/tests/test_multitimeseries.py) =================================================================== --- trunk/scipy/sandbox/timeseries/tests/test_multitimeseries.py 2007-09-18 15:04:50 UTC (rev 3319) +++ trunk/scipy/sandbox/timeseries/tests/test_trecords.py 2007-09-19 00:51:14 UTC (rev 3327) @@ -0,0 +1,189 @@ +# pylint: disable-msg=W0611, W0612, W0511,R0201 +"""Tests suite for trecords. + +:author: Pierre Gerard-Marchant & Matt Knox +:contact: pierregm_at_uga_dot_edu & mattknox_ca_at_hotmail_dot_com +:version: $Id$ +""" +__author__ = "Pierre GF Gerard-Marchant & Matt Knox ($Author$)" +__version__ = '1.0' +__revision__ = "$Revision$" +__date__ = '$Date$' + +import types + +import numpy +import numpy.core.fromnumeric as fromnumeric +from numpy.testing import NumpyTest, NumpyTestCase +from numpy.testing.utils import build_err_msg + +import maskedarray.testutils +from maskedarray.testutils import assert_equal, assert_array_equal + +import maskedarray.core as MA +import maskedarray.mrecords as MR +from maskedarray.mrecords import addfield + +from maskedarray.core import getmaskarray, nomask, masked_array + +from timeseries import trecords +from timeseries.trecords import TimeSeriesRecords, TimeSeries,\ + fromarrays, fromtextfile, fromrecords, \ + date_array, time_series + + +#.............................................................................. +class test_mrecords(NumpyTestCase): + "Base test class for MaskedArrays." + def __init__(self, *args, **kwds): + NumpyTestCase.__init__(self, *args, **kwds) + self.setup() + + def setup(self): + "Generic setup" + d = numpy.arange(5) + m = MA.make_mask([1,0,0,1,1]) + base_d = numpy.r_[d,d[::-1]].reshape(2,-1).T + base_m = numpy.r_[[m, m[::-1]]].T + base = MA.array(base_d, mask=base_m) + mrec = MR.fromarrays(base.T,) + dlist = ['2007-%02i' % (i+1) for i in d] + dates = date_array(dlist) + ts = time_series(mrec,dates) + mts = TimeSeriesRecords(mrec,dates) + self.data = [d, m, mrec, dlist, dates, ts, mts] + + def test_get(self): + "Tests fields retrieval" + [d, m, mrec, dlist, dates, ts, mts] = self.data + assert(isinstance(mts['f0'], TimeSeries)) + assert_equal(mts['f0']._dates, dates) + assert_equal(mts['f0']._data, d) + assert_equal(mts['f0']._mask, m) + # + assert(isinstance(mts[0], TimeSeriesRecords)) + assert_equal(mts._data[0], mrec._data[0]) + # We can't use assert_equal here, as it tries to convert the tuple into a singleton +# assert(mts[0]._data.view(numpyndarray) == mrec[0]) + assert_equal(numpy.asarray(mts._data[0]), mrec[0]) + assert_equal(mts._dates[0], dates[0]) + assert_equal(mts[0]._dates, dates[0]) + # + assert(isinstance(mts['2007-01'], TimeSeriesRecords)) + assert(mts['2007-01']._data == mrec[0]) + assert_equal(mts['2007-01']._dates, dates[0]) + # + assert(isinstance(mts.f0, TimeSeries)) + assert_equal(mts.f0, time_series(d, dates=dates, mask=m)) + assert_equal(mts.f1, time_series(d[::-1], dates=dates, mask=m[::-1])) + assert((mts._fieldmask == numpy.core.records.fromarrays([m, m[::-1]])).all()) + assert_equal(mts._mask, numpy.r_[[m,m[::-1]]].all(0)) + assert_equal(mts.f0[1], mts[1].f0) + # + assert(isinstance(mts[:2], TimeSeriesRecords)) + assert_equal(mts[:2]._data.f0, mrec[:2].f0) + assert_equal(mts[:2]._data.f1, mrec[:2].f1) + assert_equal(mts[:2]._dates, dates[:2]) + + def test_set(self): + "Tests setting fields/attributes." + [d, m, mrec, dlist, dates, ts, mts] = self.data + mts.f0._data[:] = 5 + assert_equal(mts['f0']._data, [5,5,5,5,5]) + mts.f0 = 1 + assert_equal(mts['f0']._data, [1]*5) + assert_equal(getmaskarray(mts['f0']), [0]*5) + mts.f1 = MA.masked + assert_equal(mts.f1.mask, [1]*5) + assert_equal(getmaskarray(mts['f1']), [1]*5) + mts._mask = MA.masked + assert_equal(getmaskarray(mts['f1']), [1]*5) + assert_equal(mts['f0']._mask, mts['f1']._mask) + mts._mask = MA.nomask + assert_equal(getmaskarray(mts['f1']), [0]*5) + assert_equal(mts['f0']._mask, mts['f1']._mask) + + def test_setslices(self): + "Tests setting slices." + [d, m, mrec, dlist, dates, ts, mts] = self.data + # + mts[:2] = 5 + assert_equal(mts.f0._data, [5,5,2,3,4]) + assert_equal(mts.f1._data, [5,5,2,1,0]) + assert_equal(mts.f0._mask, [0,0,0,1,1]) + assert_equal(mts.f1._mask, [0,0,0,0,1]) + mts.harden_mask() + mts[-2:] = 5 + assert_equal(mts.f0._data, [5,5,2,3,4]) + assert_equal(mts.f1._data, [5,5,2,5,0]) + assert_equal(mts.f0._mask, [0,0,0,1,1]) + assert_equal(mts.f1._mask, [0,0,0,0,1]) + + def test_hardmask(self): + "Test hardmask" + [d, m, mrec, dlist, dates, ts, mts] = self.data + mts.harden_mask() + assert(mts._hardmask) + mts._mask = nomask + assert_equal(mts._mask, numpy.r_[[m,m[::-1]]].all(0)) + mts.soften_mask() + assert(not mts._hardmask) + mts._mask = nomask + assert(mts['f1']._mask is nomask) + assert_equal(mts['f0']._mask,mts['f1']._mask) + + def test_addfield(self): + "Tests addfield" + [d, m, mrec, dlist, dates, ts, mts] = self.data + mts = addfield(mts, masked_array(d+10, mask=m[::-1])) + assert_equal(mts.f2, d+10) + assert_equal(mts.f2._mask, m[::-1]) + + def test_fromrecords(self): + "Test from recarray." + [d, m, mrec, dlist, dates, ts, mts] = self.data + nrec = numpy.core.records.fromarrays(numpy.r_[[d,d[::-1]]]) + mrecfr = fromrecords(nrec.tolist(), dates=dates) + assert_equal(mrecfr.f0, mrec.f0) + assert_equal(mrecfr.dtype, mrec.dtype) + #.................... + altrec = [tuple([d,]+list(r)) for (d,r) in zip(dlist,nrec)] + mrecfr = fromrecords(altrec, names='dates,f0,f1') + assert_equal(mrecfr.f0, mrec.f0) + assert_equal(mrecfr.dtype, mrec.dtype) + #.................... + tmp = TimeSeriesRecords(mts._series[::-1], dates=mts.dates) + mrecfr = fromrecords(tmp) + assert_equal(mrecfr.f0, mrec.f0[::-1]) + + def test_fromtextfile(self): + "Tests reading from a text file." + fcontent = """# +'Dates', 'One (S)','Two (I)','Three (F)','Four (M)','Five (-)','Six (C)' +'2007-01', 'strings',1,1.0,'mixed column',,1 +'2007-02', 'with embedded "double quotes"',2,2.0,1.0,,1 +'2007-03', 'strings',3,3.0E5,3,,1 +'2007-05','strings',4,-1e-10,,,1 +""" + import os + from datetime import datetime + fname = 'tmp%s' % datetime.now().strftime("%y%m%d%H%M%S%s") + f = open(fname, 'w') + f.write(fcontent) + f.close() + mrectxt = fromtextfile(fname,delimitor=',',varnames='ABCDEFG', + dates_column=0) + os.unlink(fname) + # + dlist = ['2007-%02i' % i for i in (1,2,3,5)] + assert(isinstance(mrectxt, TimeSeriesRecords)) + assert_equal(mrectxt._dates, date_array(dlist,'M')) + assert_equal(mrectxt.dtype.names, ['B','C','D','E','F','G']) + assert_equal(mrectxt.G, [1,1,1,1]) + assert_equal(mrectxt.F._mask, [1,1,1,1]) + assert_equal(mrectxt.D, [1,2,3.e+5,-1e-10]) + +############################################################################### +#------------------------------------------------------------------------------ +if __name__ == "__main__": + NumpyTest().run() \ No newline at end of file Deleted: trunk/scipy/sandbox/timeseries/textras.py =================================================================== --- trunk/scipy/sandbox/timeseries/textras.py 2007-09-18 23:22:21 UTC (rev 3326) +++ trunk/scipy/sandbox/timeseries/textras.py 2007-09-19 00:51:14 UTC (rev 3327) @@ -1,106 +0,0 @@ -""" -Extras functions for time series. - -:author: Pierre GF Gerard-Marchant & Matt Knox -:contact: pierregm_at_uga_dot_edu - mattknox_ca_at_hotmail_dot_com -:version: $Id$ -""" -__author__ = "Pierre GF Gerard-Marchant & Matt Knox ($Author$)" -__version__ = '1.0' -__revision__ = "$Revision$" -__date__ = '$Date$' - - -import numpy -import maskedarray -from maskedarray import masked - -import const as _c -from tseries import TimeSeries - - - -__all__ = ['isleapyear', 'count_missing', 'accept_atmost_missing'] - -#.............................................................................. -def isleapyear(year): - """Returns true if year is a leap year. - -:Input: - year : integer / sequence - A given (list of) year(s). - """ - year = numpy.asarray(year) - return numpy.logical_or(year % 400 == 0, - numpy.logical_and(year % 4 == 0, year % 100 > 0)) - -#.............................................................................. -def count_missing(series): - """Returns the number of missing data per period. - - -Notes ------ -This function is designed to return the actual number of missing values when -a series has been converted from one frequency to a smaller frequency. - -For example, converting a 12-month-long daily series to months will yield -a (12x31) array, with missing values in February, April, June... -count_missing will discard these extra missing values. - """ - if not isinstance(series, TimeSeries): - raise TypeError, "The input data should be a valid TimeSeries object! "\ - "(got %s instead)" % type(series) - if series.ndim == 1: - return len(series) - series.count() - elif series.ndim != 2: - raise NotImplementedError - # - missing = series.shape[-1] - series.count(axis=-1) - period = series.shape[-1] - freq = series.freq - if (period == 366) and (freq//_c.FR_ANN == 1): - # row: years, cols: days - missing -= ~isleapyear(series.year) - elif period == 31 and (freq//_c.FR_MTH == 1): - months = series.months - # row: months, cols: days - missing[numpy.array([m in [4,6,9,11] for m in months])] -= 1 - isfeb = (months == 2) - missing[isfeb] -= 2 - missing[isfeb & ~isleapyear(series.year)] -= 1 - elif period not in (12,7): - raise NotImplementedError, "Not yet implemented for that frequency..." - return missing - -#............................................................................. -def accept_atmost_missing(series, max_missing, strict=False): - """Masks the rows of the series that contains more than max_missing missing data. - Returns a new masked series. - -:Inputs: - series : TimeSeries - Input time series. - max_missing : float - Number of maximum acceptable missing values per row (if larger than 1), - or maximum acceptable percentage of missing values (if lower than 1). - strict : boolean *[False]* - Whether the - """ - series = numpy.array(series, copy=True, subok=True) - if not isinstance(series, TimeSeries): - raise TypeError, "The input data should be a valid TimeSeries object! "\ - "(got %s instead)" % type(series) - # Find the number of missing values .... - missing = count_missing(series) - # Transform an acceptable percentage in a number - if max_missing < 1: - max_missing = numpy.round(max_missing * series.shape[-1],0) - # - series.unshare_mask() - if strict: - series[missing > max_missing] = masked - else: - series[missing >= max_missing] = masked - return series - \ No newline at end of file Deleted: trunk/scipy/sandbox/timeseries/tmulti.py =================================================================== --- trunk/scipy/sandbox/timeseries/tmulti.py 2007-09-18 23:22:21 UTC (rev 3326) +++ trunk/scipy/sandbox/timeseries/tmulti.py 2007-09-19 00:51:14 UTC (rev 3327) @@ -1,524 +0,0 @@ -# pylint: disable-msg=W0201, W0212 -""" -Support for multi-variable time series, through masked recarrays. - -:author: Pierre GF Gerard-Marchant & Matt Knox -:contact: pierregm_at_uga_dot_edu - mattknox_ca_at_hotmail_dot_com -:version: $Id$ -""" -__author__ = "Pierre GF Gerard-Marchant & Matt Knox ($Author$)" -__version__ = '1.0' -__revision__ = "$Revision$" -__date__ = '$Date$' - - -import sys - -import numpy -from numpy import bool_, complex_, float_, int_, str_, object_ -import numpy.core.fromnumeric as fromnumeric -import numpy.core.numeric as numeric -from numpy.core.numeric import ndarray -import numpy.core.numerictypes as ntypes -import numpy.core.umath as umath -from numpy.core.defchararray import chararray -from numpy.core.records import find_duplicate -from numpy.core.records import format_parser, recarray, record -from numpy.core.records import fromarrays as recfromarrays - -import maskedarray as MA -#MaskedArray = MA.MaskedArray -from maskedarray.core import MaskedArray, MAError, default_fill_value, \ - masked_print_option -from maskedarray.core import masked, nomask, getmask, getmaskarray, make_mask,\ - make_mask_none, mask_or, masked_array, filled - -import maskedarray.mrecords as MR -from maskedarray.mrecords import _checknames, _guessvartypes, openfile,\ - MaskedRecords -from maskedarray.mrecords import fromrecords as mrecfromrecords - -from tseries import TimeSeries, time_series, _getdatalength -from tdates import Date, DateArray, date_array - -#ndarray = numeric.ndarray -_byteorderconv = numpy.core.records._byteorderconv -_typestr = ntypes._typestr - -reserved_fields = MR.reserved_fields + ['_dates'] - -import warnings - -__all__ = [ -'MultiTimeSeries','fromarrays','fromrecords','fromtextfile', -] - -def _getformats(data): - """Returns the formats of each array of arraylist as a comma-separated - string.""" - if isinstance(data, record): - return ",".join([desc[1] for desc in data.dtype.descr]) - - formats = '' - for obj in data: - obj = numeric.asarray(obj) -# if not isinstance(obj, ndarray): -## if not isinstance(obj, ndarray): -# raise ValueError, "item in the array list must be an ndarray." - formats += _typestr[obj.dtype.type] - if issubclass(obj.dtype.type, ntypes.flexible): - formats += `obj.itemsize` - formats += ',' - return formats[:-1] - - - - -class MultiTimeSeries(TimeSeries, MaskedRecords, object): - """ - -:IVariables: - - `__localfdict` : Dictionary - Dictionary of local fields (`f0_data`, `f0_mask`...) - - `__globalfdict` : Dictionary - Dictionary of global fields, as the combination of a `_data` and a `_mask`. - (`f0`) - """ - _defaultfieldmask = nomask - _defaulthardmask = False - def __new__(cls, data, dates=None, mask=nomask, dtype=None, - freq=None, observed=None, start_date=None, - hard_mask=False, fill_value=None, -# offset=0, strides=None, - formats=None, names=None, titles=None, - byteorder=None, aligned=False): - tsoptions = dict(fill_value=fill_value, hard_mask=hard_mask,) - mroptions = dict(fill_value=fill_value, hard_mask=hard_mask, - formats=formats, names=names, titles=titles, - byteorder=byteorder, aligned=aligned) - # - if isinstance(data, MultiTimeSeries): -# if copy: -# data = data.copy() - data._hardmask = data._hardmask | hard_mask - return data - # ....................................... - _data = MaskedRecords(data, mask=mask, dtype=dtype, **mroptions).view(cls) - if dates is None: - length = _getdatalength(data) - newdates = date_array(start_date=start_date, length=length, - freq=freq) - elif not hasattr(dates, 'freq'): - newdates = date_array(dlist=dates, freq=freq) - else: - newdates = dates - _data._dates = newdates - _data._observed = observed - cls._defaultfieldmask = _data._fieldmask - # - return _data - - def __array_finalize__(self,obj): - if isinstance(obj, (MaskedRecords)): - self.__dict__.update(_fieldmask=obj._fieldmask, - _hardmask=obj._hardmask, - _fill_value=obj._fill_value, - _names = obj.dtype.names - ) - if isinstance(obj, MultiTimeSeries): - self.__dict__.update(observed=obj.observed, - _dates=obj._dates) - else: - self.__dict__.update(observed=None, - _dates=[]) - else: - self.__dict__.update(_dates = [], - observed=None, - _fieldmask = nomask, - _hardmask = False, - fill_value = None, - _names = self.dtype.names - ) - return - - - def _getdata(self): - "Returns the data as a recarray." - return self.view(recarray) - _data = property(fget=_getdata) - - def _getseries(self): - "Returns the data as a MaskedRecord array." - return self.view(MaskedRecords) - _series = property(fget=_getseries) - - #...................................................... - def __getattribute__(self, attr): - getattribute = MaskedRecords.__getattribute__ - _dict = getattribute(self,'__dict__') - if attr in _dict.get('_names',[]): - obj = getattribute(self,attr).view(TimeSeries) - obj._dates = _dict['_dates'] - return obj - return getattribute(self,attr) - - - def __setattr__(self, attr, val): - newattr = attr not in self.__dict__ - try: - # Is attr a generic attribute ? - ret = object.__setattr__(self, attr, val) - except: - # Not a generic attribute: exit if it's not a valid field - fielddict = self.dtype.names or {} - if attr not in fielddict: - exctype, value = sys.exc_info()[:2] - raise exctype, value - else: - if attr not in list(self.dtype.names) + ['_dates','_mask']: - return ret - if newattr: # We just added this one - try: # or this setattr worked on an internal - # attribute. - object.__delattr__(self, attr) - except: - return ret - # Case #1.: Basic field ............ - base_fmask = self._fieldmask - _names = self.dtype.names - if attr in _names: - fval = filled(val) - mval = getmaskarray(val) - if self._hardmask: - mval = mask_or(mval, base_fmask.__getattr__(attr)) - self._data.__setattr__(attr, fval) - base_fmask.__setattr__(attr, mval) - return - elif attr == '_mask': - if self._hardmask: - val = make_mask(val) - if val is not nomask: -# mval = getmaskarray(val) - for k in _names: - m = mask_or(val, base_fmask.__getattr__(k)) - base_fmask.__setattr__(k, m) - else: - mval = getmaskarray(val) - for k in _names: - base_fmask.__setattr__(k, mval) - return - #............................................ - def __getitem__(self, indx): - """Returns all the fields sharing the same fieldname base. - The fieldname base is either `_data` or `_mask`.""" - _localdict = self.__dict__ - # We want a field ........ - if indx in self.dtype.names: - obj = self._data[indx].view(TimeSeries) - obj._dates = _localdict['_dates'] - obj._mask = make_mask(_localdict['_fieldmask'][indx]) - return obj - # We want some elements .. - (sindx, dindx) = self._TimeSeries__checkindex(indx) -# obj = numeric.array(self._data[sindx], -# copy=False, subok=True).view(type(self)) - obj = numeric.array(self._data[sindx], copy=False, subok=True) - obj = obj.view(type(self)) - obj.__dict__.update(_dates=_localdict['_dates'][dindx], - _fieldmask=_localdict['_fieldmask'][sindx], - _fill_value=_localdict['_fill_value']) - return obj - - def __getslice__(self, i, j): - """Returns the slice described by [i,j].""" - _localdict = self.__dict__ - (si, di) = super(MultiTimeSeries, self)._TimeSeries__checkindex(i) - (sj, dj) = super(MultiTimeSeries, self)._TimeSeries__checkindex(j) - newdata = self._data[si:sj].view(type(self)) - newdata.__dict__.update(_dates=_localdict['_dates'][di:dj], - _mask=_localdict['_fieldmask'][si:sj]) - return newdata - - def __setslice__(self, i, j, value): - """Sets the slice described by [i,j] to `value`.""" - self.view(MaskedRecords).__setslice__(i,j,value) - return - - #...................................................... - def __str__(self): - """x.__str__() <==> str(x) -Calculates the string representation, using masked for fill if it is enabled. -Otherwise, fills with fill value. - """ - if self.size > 1: - mstr = ["(%s)" % ",".join([str(i) for i in s]) - for s in zip(*[getattr(self,f) for f in self.dtype.names])] - return "[%s]" % ", ".join(mstr) - else: - mstr = numeric.asarray(self._data.item(), dtype=object_) - mstr[list(self._fieldmask)] = masked_print_option - return str(mstr) - - def __repr__(self): - """x.__repr__() <==> repr(x) -Calculates the repr representation, using masked for fill if it is enabled. -Otherwise fill with fill value. - """ - _names = self.dtype.names - _dates = self._dates - if numeric.size(_dates) > 2 and self._dates.isvalid(): - timestr = "[%s ... %s]" % (str(_dates[0]),str(_dates[-1])) - else: - timestr = str(_dates) - fmt = "%%%is : %%s" % (max([len(n) for n in _names])+4,) - reprstr = [fmt % (f,getattr(self,f)) for f in self.dtype.names] - reprstr.insert(0,'multitimeseries(') - reprstr.extend([fmt % ('dates', timestr), - fmt % (' fill_value', self._fill_value), - ' )']) - return str("\n".join(reprstr)) - #............................................. - def copy(self): - "Returns a copy of the argument." - _localdict = self.__dict__ - return MultiTimeSeries(_localdict['_data'].copy(), - dates=_localdict['_dates'].copy(), - mask=_localdict['_fieldmask'].copy(), - dtype=self.dtype) - - -#####--------------------------------------------------------------------------- -#---- --- Constructors --- -#####--------------------------------------------------------------------------- - -def fromarrays(arraylist, dates=None, - dtype=None, shape=None, formats=None, - names=None, titles=None, aligned=False, byteorder=None): - """Creates a mrecarray from a (flat) list of masked arrays. - -:Parameters: - - `arraylist` : Sequence - A list of (masked) arrays. Each element of the sequence is first converted - to a masked array if needed. If a 2D array is passed as argument, it is - processed line by line - - `dtype` : numeric.dtype - Data type descriptor. - - `shape` : Integer *[None]* - Number of records. If None, `shape` is defined from the shape of the first - array in the list. - - `formats` : - (Description to write) - - `names` : - (description to write) - - `titles`: - (Description to write) - - `aligned`: Boolen *[False]* - (Description to write, not used anyway) - - `byteorder`: Boolen *[None]* - (Description to write, not used anyway) - - - """ - arraylist = [MA.asarray(x) for x in arraylist] - # Define/check the shape..................... - if shape is None or shape == 0: - shape = arraylist[0].shape - if isinstance(shape, int): - shape = (shape,) - # Define formats from scratch ............... - if formats is None and dtype is None: - formats = _getformats(arraylist) - # Define the dtype .......................... - if dtype is not None: - descr = numeric.dtype(dtype) - _names = descr.names - else: - parsed = format_parser(formats, names, titles, aligned, byteorder) - _names = parsed._names - descr = parsed._descr - # Determine shape from data-type............. - if len(descr) != len(arraylist): - msg = "Mismatch between the number of fields (%i) and the number of "\ - "arrays (%i)" - raise ValueError, msg % (len(descr), len(arraylist)) - d0 = descr[0].shape - nn = len(d0) - if nn > 0: - shape = shape[:-nn] - # Make sure the shape is the correct one .... - for k, obj in enumerate(arraylist): - nn = len(descr[k].shape) - testshape = obj.shape[:len(obj.shape)-nn] - if testshape != shape: - raise ValueError, "Array-shape mismatch in array %d" % k - # Reconstruct the descriptor, by creating a _data and _mask version - return MultiTimeSeries(arraylist, dtype=descr) - -def __getdates(dates=None, newdates=None, length=None, freq=None, - start_date=None): - """Determines new dates (private function not meant to be used).""" - if dates is None: - if newdates is not None: - if not hasattr(newdates, 'freq'): - newdates = date_array(dlist=newdates, freq=freq) - else: - newdates = date_array(start_date=start_date, length=length, - freq=freq) - elif not hasattr(dates, 'freq'): - newdates = date_array(dlist=dates, freq=freq) - else: - newdates = dates - return newdates - -#.............................................................................. -def fromrecords(reclist, dates=None, freq=None, start_date=None, - dtype=None, shape=None, formats=None, names=None, - titles=None, aligned=False, byteorder=None): - """Creates a MaskedRecords from a list of records. - - The data in the same field can be heterogeneous, they will be promoted - to the highest data type. This method is intended for creating - smaller record arrays. If used to create large array without formats - defined, it can be slow. - - If formats is None, then this will auto-detect formats. Use a list of - tuples rather than a list of lists for faster processing. - """ - # reclist is in fact a mrecarray ................. - if isinstance(reclist, MultiTimeSeries): - mdescr = reclist.dtype - shape = reclist.shape - return MultiTimeSeries(reclist, dtype=mdescr) - # No format, no dtype: create from to arrays ..... - _data = mrecfromrecords(reclist, dtype=dtype, shape=shape, formats=formats, - names=names, titles=titles, aligned=aligned, - byteorder=byteorder) - _dtype = _data.dtype - # Check the names for a '_dates' ................. - newdates = None - _names = list(_dtype.names) - reserved = [n for n in _names if n.lower() in ['dates', '_dates']] - if len(reserved) > 0: - newdates = _data[reserved[-1]] - [_names.remove(n) for n in reserved] - _dtype = numeric.dtype([t for t in _dtype.descr \ - if t[0] not in reserved ]) - _data = [_data[n] for n in _names] - # - newdates = __getdates(dates=dates, newdates=newdates, length=len(_data), - freq=freq, start_date=start_date) - # - return MultiTimeSeries(_data, dates=newdates, dtype=_dtype, - names=_names) - - -def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='', - dates_column=None, varnames=None, vartypes=None, - dates=None): - """Creates a multitimeseries from data stored in the file `filename`. - -:Parameters: - - `filename` : file name/handle - Handle of an opened file. - - `delimitor` : Character *None* - Alphanumeric character used to separate columns in the file. - If None, any (group of) white spacestring(s) will be used. - - `commentchar` : String *['#']* - Alphanumeric character used to mark the start of a comment. - - `missingchar` : String *['']* - String indicating missing data, and used to create the masks. - - `datescol` : Integer *[None]* - Position of the columns storing dates. If None, a position will be - estimated from the variable names. - - `varnames` : Sequence *[None]* - Sequence of the variable names. If None, a list will be created from - the first non empty line of the file. - - `vartypes` : Sequence *[None]* - Sequence of the variables dtypes. If None, the sequence will be estimated - from the first non-commented line. - - - Ultra simple: the varnames are in the header, one line""" - # Try to open the file ...................... - f = openfile(fname) - # Get the first non-empty line as the varnames - while True: - line = f.readline() - firstline = line[:line.find(commentchar)].strip() - _varnames = firstline.split(delimitor) - if len(_varnames) > 1: - break - if varnames is None: - varnames = _varnames - # Get the data .............................. - _variables = MA.asarray([line.strip().split(delimitor) for line in f - if line[0] != commentchar and len(line) > 1]) - (nvars, nfields) = _variables.shape - # Check if we need to get the dates.......... - if dates_column is None: - dates_column = [i for (i,n) in enumerate(list(varnames)) - if n.lower() in ['_dates','dates']] - elif isinstance(dates_column,(int,float)): - if dates_column > nfields: - raise ValueError,\ - "Invalid column number: %i > %i" % (dates_column, nfields) - dates_column = [dates_column,] - if len(dates_column) > 0: - cols = range(nfields) - [cols.remove(i) for i in dates_column] - newdates = date_array(_variables[:,dates_column[-1]]) - _variables = _variables[:,cols] - varnames = [varnames[i] for i in cols] - if vartypes is not None: - vartypes = [vartypes[i] for i in cols] - nfields -= len(dates_column) - else: - newdates = None - # Try to guess the dtype .................... - if vartypes is None: - vartypes = _guessvartypes(_variables[0]) - else: - vartypes = [numeric.dtype(v) for v in vartypes] - if len(vartypes) != nfields: - msg = "Attempting to %i dtypes for %i fields!" - msg += " Reverting to default." - warnings.warn(msg % (len(vartypes), nfields)) - vartypes = _guessvartypes(_variables[0]) - # Construct the descriptor .................. - mdescr = [(n,f) for (n,f) in zip(varnames, vartypes)] - # Get the data and the mask ................. - # We just need a list of masked_arrays. It's easier to create it like that: - _mask = (_variables.T == missingchar) - _datalist = [masked_array(a,mask=m,dtype=t) - for (a,m,t) in zip(_variables.T, _mask, vartypes)] - # - newdates = __getdates(dates=dates, newdates=newdates, length=nvars, - freq=None, start_date=None) - return MultiTimeSeries(_datalist, dates=newdates, dtype=mdescr) - - - -################################################################################ -if __name__ == '__main__': - import numpy as N - from maskedarray.testutils import assert_equal - if 1: - d = N.arange(5) - m = MA.make_mask([1,0,0,1,1]) - base_d = N.r_[d,d[::-1]].reshape(2,-1).T - base_m = N.r_[[m, m[::-1]]].T - base = MA.array(base_d, mask=base_m) - mrec = MR.fromarrays(base.T,) - dlist = ['2007-%02i' % (i+1) for i in d] - dates = date_array(dlist) - ts = time_series(mrec,dates) - mts = MultiTimeSeries(mrec,dates) - self_data = [d, m, mrec, dlist, dates, ts, mts] - - assert(isinstance(mts.f0, TimeSeries)) - # - if 1: - recfirst = mts._data[0] - print recfirst, type(recfirst) - print mrec[0], type(mrec[0]) - Copied: trunk/scipy/sandbox/timeseries/trecords.py (from rev 3319, trunk/scipy/sandbox/timeseries/tmulti.py) =================================================================== --- trunk/scipy/sandbox/timeseries/tmulti.py 2007-09-18 15:04:50 UTC (rev 3319) +++ trunk/scipy/sandbox/timeseries/trecords.py 2007-09-19 00:51:14 UTC (rev 3327) @@ -0,0 +1,524 @@ +# pylint: disable-msg=W0201, W0212 +""" +Support for multi-variable time series, through masked recarrays. + +:author: Pierre GF Gerard-Marchant & Matt Knox +:contact: pierregm_at_uga_dot_edu - mattknox_ca_at_hotmail_dot_com +:version: $Id$ +""" +__author__ = "Pierre GF Gerard-Marchant & Matt Knox ($Author$)" +__version__ = '1.0' +__revision__ = "$Revision$" +__date__ = '$Date$' + + +import sys + +import numpy +from numpy import bool_, complex_, float_, int_, str_, object_ +import numpy.core.fromnumeric as fromnumeric +import numpy.core.numeric as numeric +from numpy.core.numeric import ndarray +import numpy.core.numerictypes as ntypes +import numpy.core.umath as umath +from numpy.core.defchararray import chararray +from numpy.core.records import find_duplicate +from numpy.core.records import format_parser, recarray, record +from numpy.core.records import fromarrays as recfromarrays + +import maskedarray as MA +#MaskedArray = MA.MaskedArray +from maskedarray.core import MaskedArray, MAError, default_fill_value, \ + masked_print_option +from maskedarray.core import masked, nomask, getmask, getmaskarray, make_mask,\ + make_mask_none, mask_or, masked_array, filled + +import maskedarray.mrecords as MR +from maskedarray.mrecords import _checknames, _guessvartypes, openfile,\ + MaskedRecords +from maskedarray.mrecords import fromrecords as mrecfromrecords + +from tseries import TimeSeries, time_series, _getdatalength +from dates import Date, DateArray, date_array + +#ndarray = numeric.ndarray +_byteorderconv = numpy.core.records._byteorderconv +_typestr = ntypes._typestr + +reserved_fields = MR.reserved_fields + ['_dates'] + +import warnings + +__all__ = [ +'TimeSeriesRecords','fromarrays','fromrecords','fromtextfile', +] + +def _getformats(data): + """Returns the formats of each array of arraylist as a comma-separated + string.""" + if isinstance(data, record): + return ",".join([desc[1] for desc in data.dtype.descr]) + + formats = '' + for obj in data: + obj = numeric.asarray(obj) +# if not isinstance(obj, ndarray): +## if not isinstance(obj, ndarray): +# raise ValueError, "item in the array list must be an ndarray." + formats += _typestr[obj.dtype.type] + if issubclass(obj.dtype.type, ntypes.flexible): + formats += `obj.itemsize` + formats += ',' + return formats[:-1] + + + + +class TimeSeriesRecords(TimeSeries, MaskedRecords, object): + """ + +:IVariables: + - `__localfdict` : Dictionary + Dictionary of local fields (`f0_data`, `f0_mask`...) + - `__globalfdict` : Dictionary + Dictionary of global fields, as the combination of a `_data` and a `_mask`. + (`f0`) + """ + _defaultfieldmask = nomask + _defaulthardmask = False + def __new__(cls, data, dates=None, mask=nomask, dtype=None, + freq=None, observed=None, start_date=None, + hard_mask=False, fill_value=None, +# offset=0, strides=None, + formats=None, names=None, titles=None, + byteorder=None, aligned=False): + tsoptions = dict(fill_value=fill_value, hard_mask=hard_mask,) + mroptions = dict(fill_value=fill_value, hard_mask=hard_mask, + formats=formats, names=names, titles=titles, + byteorder=byteorder, aligned=aligned) + # + if isinstance(data, TimeSeriesRecords): +# if copy: +# data = data.copy() + data._hardmask = data._hardmask | hard_mask + return data + # ....................................... + _data = MaskedRecords(data, mask=mask, dtype=dtype, **mroptions).view(cls) + if dates is None: + length = _getdatalength(data) + newdates = date_array(start_date=start_date, length=length, + freq=freq) + elif not hasattr(dates, 'freq'): + newdates = date_array(dlist=dates, freq=freq) + else: + newdates = dates + _data._dates = newdates + _data._observed = observed + cls._defaultfieldmask = _data._fieldmask + # + return _data + + def __array_finalize__(self,obj): + if isinstance(obj, (MaskedRecords)): + self.__dict__.update(_fieldmask=obj._fieldmask, + _hardmask=obj._hardmask, + _fill_value=obj._fill_value, + _names = obj.dtype.names + ) + if isinstance(obj, TimeSeriesRecords): + self.__dict__.update(observed=obj.observed, + _dates=obj._dates) + else: + self.__dict__.update(observed=None, + _dates=[]) + else: + self.__dict__.update(_dates = [], + observed=None, + _fieldmask = nomask, + _hardmask = False, + fill_value = None, + _names = self.dtype.names + ) + return + + + def _getdata(self): + "Returns the data as a recarray." + return self.view(recarray) + _data = property(fget=_getdata) + + def _getseries(self): + "Returns the data as a MaskedRecord array." + return self.view(MaskedRecords) + _series = property(fget=_getseries) + + #...................................................... + def __getattribute__(self, attr): + getattribute = MaskedRecords.__getattribute__ + _dict = getattribute(self,'__dict__') + if attr in _dict.get('_names',[]): + obj = getattribute(self,attr).view(TimeSeries) + obj._dates = _dict['_dates'] + return obj + return getattribute(self,attr) + + + def __setattr__(self, attr, val): + newattr = attr not in self.__dict__ + try: + # Is attr a generic attribute ? + ret = object.__setattr__(self, attr, val) + except: + # Not a generic attribute: exit if it's not a valid field + fielddict = self.dtype.names or {} + if attr not in fielddict: + exctype, value = sys.exc_info()[:2] + raise exctype, value + else: + if attr not in list(self.dtype.names) + ['_dates','_mask']: + return ret + if newattr: # We just added this one + try: # or this setattr worked on an internal + # attribute. + object.__delattr__(self, attr) + except: + return ret + # Case #1.: Basic field ............ + base_fmask = self._fieldmask + _names = self.dtype.names + if attr in _names: + fval = filled(val) + mval = getmaskarray(val) + if self._hardmask: + mval = mask_or(mval, base_fmask.__getattr__(attr)) + self._data.__setattr__(attr, fval) + base_fmask.__setattr__(attr, mval) + return + elif attr == '_mask': + if self._hardmask: + val = make_mask(val) + if val is not nomask: +# mval = getmaskarray(val) + for k in _names: + m = mask_or(val, base_fmask.__getattr__(k)) + base_fmask.__setattr__(k, m) + else: + mval = getmaskarray(val) + for k in _names: + base_fmask.__setattr__(k, mval) + return + #............................................ + def __getitem__(self, indx): + """Returns all the fields sharing the same fieldname base. + The fieldname base is either `_data` or `_mask`.""" + _localdict = self.__dict__ + # We want a field ........ + if indx in self.dtype.names: + obj = self._data[indx].view(TimeSeries) + obj._dates = _localdict['_dates'] + obj._mask = make_mask(_localdict['_fieldmask'][indx]) + return obj + # We want some elements .. + (sindx, dindx) = self._TimeSeries__checkindex(indx) +# obj = numeric.array(self._data[sindx], +# copy=False, subok=True).view(type(self)) + obj = numeric.array(self._data[sindx], copy=False, subok=True) + obj = obj.view(type(self)) + obj.__dict__.update(_dates=_localdict['_dates'][dindx], + _fieldmask=_localdict['_fieldmask'][sindx], + _fill_value=_localdict['_fill_value']) + return obj + + def __getslice__(self, i, j): + """Returns the slice described by [i,j].""" + _localdict = self.__dict__ + (si, di) = super(TimeSeriesRecords, self)._TimeSeries__checkindex(i) + (sj, dj) = super(TimeSeriesRecords, self)._TimeSeries__checkindex(j) + newdata = self._data[si:sj].view(type(self)) + newdata.__dict__.update(_dates=_localdict['_dates'][di:dj], + _mask=_localdict['_fieldmask'][si:sj]) + return newdata + + def __setslice__(self, i, j, value): + """Sets the slice described by [i,j] to `value`.""" + self.view(MaskedRecords).__setslice__(i,j,value) + return + + #...................................................... + def __str__(self): + """x.__str__() <==> str(x) +Calculates the string representation, using masked for fill if it is enabled. +Otherwise, fills with fill value. + """ + if self.size > 1: + mstr = ["(%s)" % ",".join([str(i) for i in s]) + for s in zip(*[getattr(self,f) for f in self.dtype.names])] + return "[%s]" % ", ".join(mstr) + else: + mstr = numeric.asarray(self._data.item(), dtype=object_) + mstr[list(self._fieldmask)] = masked_print_option + return str(mstr) + + def __repr__(self): + """x.__repr__() <==> repr(x) +Calculates the repr representation, using masked for fill if it is enabled. +Otherwise fill with fill value. + """ + _names = self.dtype.names + _dates = self._dates + if numeric.size(_dates) > 2 and self._dates.isvalid(): + timestr = "[%s ... %s]" % (str(_dates[0]),str(_dates[-1])) + else: + timestr = str(_dates) + fmt = "%%%is : %%s" % (max([len(n) for n in _names])+4,) + reprstr = [fmt % (f,getattr(self,f)) for f in self.dtype.names] + reprstr.insert(0,'TimeSeriesRecords(') + reprstr.extend([fmt % ('dates', timestr), + fmt % (' fill_value', self._fill_value), + ' )']) + return str("\n".join(reprstr)) + #............................................. + def copy(self): + "Returns a copy of the argument." + _localdict = self.__dict__ + return TimeSeriesRecords(_localdict['_data'].copy(), + dates=_localdict['_dates'].copy(), + mask=_localdict['_fieldmask'].copy(), + dtype=self.dtype) + + +#####--------------------------------------------------------------------------- +#---- --- Constructors --- +#####--------------------------------------------------------------------------- + +def fromarrays(arraylist, dates=None, + dtype=None, shape=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """Creates a mrecarray from a (flat) list of masked arrays. + +:Parameters: + - `arraylist` : Sequence + A list of (masked) arrays. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + - `dtype` : numeric.dtype + Data type descriptor. + - `shape` : Integer *[None]* + Number of records. If None, `shape` is defined from the shape of the first + array in the list. + - `formats` : + (Description to write) + - `names` : + (description to write) + - `titles`: + (Description to write) + - `aligned`: Boolen *[False]* + (Description to write, not used anyway) + - `byteorder`: Boolen *[None]* + (Description to write, not used anyway) + + + """ + arraylist = [MA.asarray(x) for x in arraylist] + # Define/check the shape..................... + if shape is None or shape == 0: + shape = arraylist[0].shape + if isinstance(shape, int): + shape = (shape,) + # Define formats from scratch ............... + if formats is None and dtype is None: + formats = _getformats(arraylist) + # Define the dtype .......................... + if dtype is not None: + descr = numeric.dtype(dtype) + _names = descr.names + else: + parsed = format_parser(formats, names, titles, aligned, byteorder) + _names = parsed._names + descr = parsed._descr + # Determine shape from data-type............. + if len(descr) != len(arraylist): + msg = "Mismatch between the number of fields (%i) and the number of "\ + "arrays (%i)" + raise ValueError, msg % (len(descr), len(arraylist)) + d0 = descr[0].shape + nn = len(d0) + if nn > 0: + shape = shape[:-nn] + # Make sure the shape is the correct one .... + for k, obj in enumerate(arraylist): + nn = len(descr[k].shape) + testshape = obj.shape[:len(obj.shape)-nn] + if testshape != shape: + raise ValueError, "Array-shape mismatch in array %d" % k + # Reconstruct the descriptor, by creating a _data and _mask version + return TimeSeriesRecords(arraylist, dtype=descr) + +def __getdates(dates=None, newdates=None, length=None, freq=None, + start_date=None): + """Determines new dates (private function not meant to be used).""" + if dates is None: + if newdates is not None: + if not hasattr(newdates, 'freq'): + newdates = date_array(dlist=newdates, freq=freq) + else: + newdates = date_array(start_date=start_date, length=length, + freq=freq) + elif not hasattr(dates, 'freq'): + newdates = date_array(dlist=dates, freq=freq) + else: + newdates = dates + return newdates + +#.............................................................................. +def fromrecords(reclist, dates=None, freq=None, start_date=None, + dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None): + """Creates a MaskedRecords from a list of records. + + The data in the same field can be heterogeneous, they will be promoted + to the highest data type. This method is intended for creating + smaller record arrays. If used to create large array without formats + defined, it can be slow. + + If formats is None, then this will auto-detect formats. Use a list of + tuples rather than a list of lists for faster processing. + """ + # reclist is in fact a mrecarray ................. + if isinstance(reclist, TimeSeriesRecords): + mdescr = reclist.dtype + shape = reclist.shape + return TimeSeriesRecords(reclist, dtype=mdescr) + # No format, no dtype: create from to arrays ..... + _data = mrecfromrecords(reclist, dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, aligned=aligned, + byteorder=byteorder) + _dtype = _data.dtype + # Check the names for a '_dates' ................. + newdates = None + _names = list(_dtype.names) + reserved = [n for n in _names if n.lower() in ['dates', '_dates']] + if len(reserved) > 0: + newdates = _data[reserved[-1]] + [_names.remove(n) for n in reserved] + _dtype = numeric.dtype([t for t in _dtype.descr \ + if t[0] not in reserved ]) + _data = [_data[n] for n in _names] + # + newdates = __getdates(dates=dates, newdates=newdates, length=len(_data), + freq=freq, start_date=start_date) + # + return TimeSeriesRecords(_data, dates=newdates, dtype=_dtype, + names=_names) + + +def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='', + dates_column=None, varnames=None, vartypes=None, + dates=None): + """Creates a TimeSeriesRecords from data stored in the file `filename`. + +:Parameters: + - `filename` : file name/handle + Handle of an opened file. + - `delimitor` : Character *None* + Alphanumeric character used to separate columns in the file. + If None, any (group of) white spacestring(s) will be used. + - `commentchar` : String *['#']* + Alphanumeric character used to mark the start of a comment. + - `missingchar` : String *['']* + String indicating missing data, and used to create the masks. + - `datescol` : Integer *[None]* + Position of the columns storing dates. If None, a position will be + estimated from the variable names. + - `varnames` : Sequence *[None]* + Sequence of the variable names. If None, a list will be created from + the first non empty line of the file. + - `vartypes` : Sequence *[None]* + Sequence of the variables dtypes. If None, the sequence will be estimated + from the first non-commented line. + + + Ultra simple: the varnames are in the header, one line""" + # Try to open the file ...................... + f = openfile(fname) + # Get the first non-empty line as the varnames + while True: + line = f.readline() + firstline = line[:line.find(commentchar)].strip() + _varnames = firstline.split(delimitor) + if len(_varnames) > 1: + break + if varnames is None: + varnames = _varnames + # Get the data .............................. + _variables = MA.asarray([line.strip().split(delimitor) for line in f + if line[0] != commentchar and len(line) > 1]) + (nvars, nfields) = _variables.shape + # Check if we need to get the dates.......... + if dates_column is None: + dates_column = [i for (i,n) in enumerate(list(varnames)) + if n.lower() in ['_dates','dates']] + elif isinstance(dates_column,(int,float)): + if dates_column > nfields: + raise ValueError,\ + "Invalid column number: %i > %i" % (dates_column, nfields) + dates_column = [dates_column,] + if len(dates_column) > 0: + cols = range(nfields) + [cols.remove(i) for i in dates_column] + newdates = date_array(_variables[:,dates_column[-1]]) + _variables = _variables[:,cols] + varnames = [varnames[i] for i in cols] + if vartypes is not None: + vartypes = [vartypes[i] for i in cols] + nfields -= len(dates_column) + else: + newdates = None + # Try to guess the dtype .................... + if vartypes is None: + vartypes = _guessvartypes(_variables[0]) + else: + vartypes = [numeric.dtype(v) for v in vartypes] + if len(vartypes) != nfields: + msg = "Attempting to %i dtypes for %i fields!" + msg += " Reverting to default." + warnings.warn(msg % (len(vartypes), nfields)) + vartypes = _guessvartypes(_variables[0]) + # Construct the descriptor .................. + mdescr = [(n,f) for (n,f) in zip(varnames, vartypes)] + # Get the data and the mask ................. + # We just need a list of masked_arrays. It's easier to create it like that: + _mask = (_variables.T == missingchar) + _datalist = [masked_array(a,mask=m,dtype=t) + for (a,m,t) in zip(_variables.T, _mask, vartypes)] + # + newdates = __getdates(dates=dates, newdates=newdates, length=nvars, + freq=None, start_date=None) + return TimeSeriesRecords(_datalist, dates=newdates, dtype=mdescr) + + + +################################################################################ +if __name__ == '__main__': + import numpy as N + from maskedarray.testutils import assert_equal + if 1: + d = N.arange(5) + m = MA.make_mask([1,0,0,1,1]) + base_d = N.r_[d,d[::-1]].reshape(2,-1).T + base_m = N.r_[[m, m[::-1]]].T + base = MA.array(base_d, mask=base_m) + mrec = MR.fromarrays(base.T,) + dlist = ['2007-%02i' % (i+1) for i in d] + dates = date_array(dlist) + ts = time_series(mrec,dates) + mts = TimeSeriesRecords(mrec,dates) + self_data = [d, m, mrec, dlist, dates, ts, mts] + + assert(isinstance(mts.f0, TimeSeries)) + # + if 1: + recfirst = mts._data[0] + print recfirst, type(recfirst) + print mrec[0], type(mrec[0]) + Modified: trunk/scipy/sandbox/timeseries/tseries.py =================================================================== --- trunk/scipy/sandbox/timeseries/tseries.py 2007-09-18 23:22:21 UTC (rev 3326) +++ trunk/scipy/sandbox/timeseries/tseries.py 2007-09-19 00:51:14 UTC (rev 3327) @@ -33,16 +33,14 @@ import const as _c -import tdates -from tdates import DateError, InsufficientDateError -from tdates import Date, isDate, DateArray, isDateArray, \ +import dates +from dates import DateError, InsufficientDateError +from dates import Date, isDate, DateArray, isDateArray, \ date_array, date_array_fromlist, date_array_fromrange, thisday, today, \ check_freq, check_freq_str import cseries - - __all__ = [ 'TimeSeriesError','TimeSeriesCompatibilityError','TimeSeries','isTimeSeries', 'time_series', 'tsmasked', From scipy-svn at scipy.org Tue Sep 18 21:01:21 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 18 Sep 2007 20:01:21 -0500 (CDT) Subject: [Scipy-svn] r3328 - trunk/scipy/sandbox/timeseries Message-ID: <20070919010121.B3E4939C0EE@new.scipy.org> Author: mattknox_ca Date: 2007-09-18 20:01:15 -0500 (Tue, 18 Sep 2007) New Revision: 3328 Added: trunk/scipy/sandbox/timeseries/plotlib.py Removed: trunk/scipy/sandbox/timeseries/plotlib/ Log: got rid of plotlib subfolder and moved mpl_timeseries into the main timeseries folder and renamed it to plotlib.py (importing and usage of plotlib remains unchanged) Copied: trunk/scipy/sandbox/timeseries/plotlib.py (from rev 3319, trunk/scipy/sandbox/timeseries/plotlib/mpl_timeseries.py) From scipy-svn at scipy.org Tue Sep 18 23:32:33 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 18 Sep 2007 22:32:33 -0500 (CDT) Subject: [Scipy-svn] r3329 - trunk/scipy/sandbox/maskedarray Message-ID: <20070919033233.999D439C1C8@new.scipy.org> Author: pierregm Date: 2007-09-18 22:32:25 -0500 (Tue, 18 Sep 2007) New Revision: 3329 Modified: trunk/scipy/sandbox/maskedarray/mrecords.py trunk/scipy/sandbox/maskedarray/testutils.py Log: testutils : introduced assert_equal_records mrecords : MaskedRecords.__str__ : better support of record printing Modified: trunk/scipy/sandbox/maskedarray/mrecords.py =================================================================== --- trunk/scipy/sandbox/maskedarray/mrecords.py 2007-09-19 01:01:15 UTC (rev 3328) +++ trunk/scipy/sandbox/maskedarray/mrecords.py 2007-09-19 03:32:25 UTC (rev 3329) @@ -330,9 +330,9 @@ for s in zip(*[getattr(self,f) for f in self.dtype.names])] return "[%s]" % ", ".join(mstr) else: - mstr = numeric.asarray(self._data.item(), dtype=object_) - mstr[list(self._fieldmask)] = masked_print_option - return str(mstr) + mstr = ["%s" % ",".join([str(i) for i in s]) + for s in zip([getattr(self,f) for f in self.dtype.names])] + return "(%s)" % ", ".join(mstr) def __repr__(self): """x.__repr__() <==> repr(x) @@ -709,14 +709,14 @@ mrecfr = fromrecords(nrec.tolist(), names=nrec.dtype.names) assert_equal(mrecfr.a, mrec.a) assert_equal(mrecfr.dtype, mrec.dtype) - if 1: + if 0: assert_equal(mrec.a, MA.array(d,mask=m)) assert_equal(mrec.b, MA.array(d[::-1],mask=m[::-1])) assert((mrec._fieldmask == N.core.records.fromarrays([m, m[::-1]])).all()) assert_equal(mrec._mask, N.r_[[m,m[::-1]]].all(0)) assert_equal(mrec.a[1], mrec[1].a) - if 1: + if 0: x = [(1.,10.,'a'),(2.,20,'b'),(3.14,30,'c'),(5.55,40,'d')] desc = [('ffloat', N.float_), ('fint', N.int_), ('fstr', 'S10')] mr = MaskedRecords(x,dtype=desc) Modified: trunk/scipy/sandbox/maskedarray/testutils.py =================================================================== --- trunk/scipy/sandbox/maskedarray/testutils.py 2007-09-19 01:01:15 UTC (rev 3328) +++ trunk/scipy/sandbox/maskedarray/testutils.py 2007-09-19 03:32:25 UTC (rev 3329) @@ -47,6 +47,14 @@ assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k,err_msg)) return +def assert_equal_records(a,b): + """Asserts that two records are equal. Pretty crude for now.""" + assert_equal(a.dtype, b.dtype) + for f in a.dtype.names: + (af, bf) = (getattr(a,f), getattr(b,f)) + if not (af is masked) and not (bf is masked): + assert_equal(getattr(a,f), getattr(b,f)) + return def assert_equal(actual,desired,err_msg=''): """Asserts that two items are equal. From scipy-svn at scipy.org Tue Sep 18 23:37:57 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 18 Sep 2007 22:37:57 -0500 (CDT) Subject: [Scipy-svn] r3330 - in trunk/scipy/sandbox/timeseries: . include src Message-ID: <20070919033757.A14A639C1C6@new.scipy.org> Author: mattknox_ca Date: 2007-09-18 22:37:47 -0500 (Tue, 18 Sep 2007) New Revision: 3330 Added: trunk/scipy/sandbox/timeseries/include/c_dates.h trunk/scipy/sandbox/timeseries/src/c_dates.c Removed: trunk/scipy/sandbox/timeseries/include/c_tdates.h trunk/scipy/sandbox/timeseries/src/c_tdates.c Modified: trunk/scipy/sandbox/timeseries/setup.py trunk/scipy/sandbox/timeseries/src/c_tseries.c trunk/scipy/sandbox/timeseries/src/cseries.c Log: renamed c_tdates.c/.h to c_dates to match the python files naming updated setup.py to reflect the latest file names and directories Copied: trunk/scipy/sandbox/timeseries/include/c_dates.h (from rev 3328, trunk/scipy/sandbox/timeseries/include/c_tdates.h) =================================================================== --- trunk/scipy/sandbox/timeseries/include/c_tdates.h 2007-09-19 01:01:15 UTC (rev 3328) +++ trunk/scipy/sandbox/timeseries/include/c_dates.h 2007-09-19 03:37:47 UTC (rev 3330) @@ -0,0 +1,123 @@ +#ifndef C_DATES_H +#define C_DATES_H + +#include "c_lib.h" + +#define HIGHFREQ_ORIG 719163 + +/*** FREQUENCY CONSTANTS ***/ + +#define FR_ANN 1000 /* Annual */ +#define FR_ANNDEC FR_ANN /* Annual - December year end*/ +#define FR_ANNJAN 1001 /* Annual - January year end*/ +#define FR_ANNFEB 1002 /* Annual - February year end*/ +#define FR_ANNMAR 1003 /* Annual - March year end*/ +#define FR_ANNAPR 1004 /* Annual - April year end*/ +#define FR_ANNMAY 1005 /* Annual - May year end*/ +#define FR_ANNJUN 1006 /* Annual - June year end*/ +#define FR_ANNJUL 1007 /* Annual - July year end*/ +#define FR_ANNAUG 1008 /* Annual - August year end*/ +#define FR_ANNSEP 1009 /* Annual - September year end*/ +#define FR_ANNOCT 1010 /* Annual - October year end*/ +#define FR_ANNNOV 1011 /* Annual - November year end*/ + +/* The standard quarterly frequencies. Year is determined by what year the end + month lies in. */ +#define FR_QTR 2000 /* Quarterly - December year end (default quarterly) */ +#define FR_QTRDEC FR_QTR /* Quarterly - December year end */ +#define FR_QTRJAN 2001 /* Quarterly - January year end */ +#define FR_QTRFEB 2002 /* Quarterly - February year end */ +#define FR_QTRMAR 2003 /* Quarterly - March year end */ +#define FR_QTRAPR 2004 /* Quarterly - April year end */ +#define FR_QTRMAY 2005 /* Quarterly - May year end */ +#define FR_QTRJUN 2006 /* Quarterly - June year end */ +#define FR_QTRJUL 2007 /* Quarterly - July year end */ +#define FR_QTRAUG 2008 /* Quarterly - August year end */ +#define FR_QTRSEP 2009 /* Quarterly - September year end */ +#define FR_QTROCT 2010 /* Quarterly - October year end */ +#define FR_QTRNOV 2011 /* Quarterly - November year end */ + +/* End period based quarterly frequencies. Year is determined by what year the + end month lies in. */ +#define FR_QTREDEC FR_QTRDEC /* Quarterly - December year end*/ +#define FR_QTREJAN FR_QTRJAN /* Quarterly - January year end*/ +#define FR_QTREFEB FR_QTRFEB /* Quarterly - February year end*/ +#define FR_QTREMAR FR_QTRMAR /* Quarterly - March year end*/ +#define FR_QTREAPR FR_QTRAPR /* Quarterly - April year end*/ +#define FR_QTREMAY FR_QTRMAY /* Quarterly - May year end*/ +#define FR_QTREJUN FR_QTRJUN /* Quarterly - June year end*/ +#define FR_QTREJUL FR_QTRJUL /* Quarterly - July year end*/ +#define FR_QTREAUG FR_QTRAUG /* Quarterly - August year end*/ +#define FR_QTRESEP FR_QTRSEP /* Quarterly - September year end*/ +#define FR_QTREOCT FR_QTROCT /* Quarterly - October year end*/ +#define FR_QTRENOV FR_QTRNOV /* Quarterly - November year end*/ + +/* Starting period based quarterly frequencies. Year is determined by what year + the starting month lies in. */ +#define FR_QTRSDEC FR_QTRDEC+12 /* Quarterly - December year end*/ +#define FR_QTRSJAN FR_QTRJAN+12 /* Quarterly - January year end*/ +#define FR_QTRSFEB FR_QTRFEB+12 /* Quarterly - February year end*/ +#define FR_QTRSMAR FR_QTRMAR+12 /* Quarterly - March year end*/ +#define FR_QTRSAPR FR_QTRAPR+12 /* Quarterly - April year end*/ +#define FR_QTRSMAY FR_QTRMAY+12 /* Quarterly - May year end*/ +#define FR_QTRSJUN FR_QTRJUN+12 /* Quarterly - June year end*/ +#define FR_QTRSJUL FR_QTRJUL+12 /* Quarterly - July year end*/ +#define FR_QTRSAUG FR_QTRAUG+12 /* Quarterly - August year end*/ +#define FR_QTRSSEP FR_QTRSEP+12 /* Quarterly - September year end*/ +#define FR_QTRSOCT FR_QTROCT+12 /* Quarterly - October year end*/ +#define FR_QTRSNOV FR_QTRNOV+12 /* Quarterly - November year end*/ + +#define FR_MTH 3000 /* Monthly */ + +#define FR_WK 4000 /* Weekly */ +#define FR_WKSUN FR_WK /* Weekly - Sunday end of week */ +#define FR_WKMON 4001 /* Weekly - Monday end of week */ +#define FR_WKTUE 4002 /* Weekly - Tuesday end of week */ +#define FR_WKWED 4003 /* Weekly - Wednesday end of week */ +#define FR_WKTHU 4004 /* Weekly - Thursday end of week */ +#define FR_WKFRI 4005 /* Weekly - Friday end of week */ +#define FR_WKSAT 4006 /* Weekly - Saturday end of week */ + +#define FR_BUS 5000 /* Business days */ +#define FR_DAY 6000 /* Daily */ +#define FR_HR 7000 /* Hourly */ +#define FR_MIN 8000 /* Minutely */ +#define FR_SEC 9000 /* Secondly */ +#define FR_UND -10000 /* Undefined */ + +//////////////////////////////////////////////////// + +int get_freq_group(int); + +typedef struct { + int from_week_end; //day the week ends on in the "from" frequency + int to_week_end; //day the week ends on in the "to" frequency + + int from_a_year_end; //month the year ends on in the "from" frequency + int to_a_year_end; //month the year ends on in the "to" frequency + + int from_q_year_end; //month the year ends on in the "from" frequency + int to_q_year_end; //month the year ends on in the "to" frequency +} asfreq_info; + +int check_freq(PyObject *); +void get_asfreq_info(int, int, asfreq_info*); +long (*get_asfreq_func(int, int, int))(long, char, asfreq_info*); + +#define CHECK_ASFREQ(result) if ((result) == INT_ERR_CODE) return NULL + +PyObject *DateArray_asfreq(PyObject *, PyObject *); +PyObject *DateArray_getDateInfo(PyObject *, PyObject *); + + +PyObject *c_dates_thisday(PyObject *, PyObject *); +PyObject *c_dates_check_freq(PyObject *, PyObject *); +PyObject *c_dates_check_freq_str(PyObject *, PyObject *); +PyObject *c_dates_get_freq_group(PyObject *, PyObject *); + +PyObject *set_callback_DateFromString(PyObject *, PyObject *); +PyObject *set_callback_DateTimeFromString(PyObject *, PyObject *); + +void import_c_dates(PyObject *); + +#endif Deleted: trunk/scipy/sandbox/timeseries/include/c_tdates.h =================================================================== --- trunk/scipy/sandbox/timeseries/include/c_tdates.h 2007-09-19 03:32:25 UTC (rev 3329) +++ trunk/scipy/sandbox/timeseries/include/c_tdates.h 2007-09-19 03:37:47 UTC (rev 3330) @@ -1,123 +0,0 @@ -#ifndef C_TDATES_H -#define C_TDATES_H - -#include "c_lib.h" - -#define HIGHFREQ_ORIG 719163 - -/*** FREQUENCY CONSTANTS ***/ - -#define FR_ANN 1000 /* Annual */ -#define FR_ANNDEC FR_ANN /* Annual - December year end*/ -#define FR_ANNJAN 1001 /* Annual - January year end*/ -#define FR_ANNFEB 1002 /* Annual - February year end*/ -#define FR_ANNMAR 1003 /* Annual - March year end*/ -#define FR_ANNAPR 1004 /* Annual - April year end*/ -#define FR_ANNMAY 1005 /* Annual - May year end*/ -#define FR_ANNJUN 1006 /* Annual - June year end*/ -#define FR_ANNJUL 1007 /* Annual - July year end*/ -#define FR_ANNAUG 1008 /* Annual - August year end*/ -#define FR_ANNSEP 1009 /* Annual - September year end*/ -#define FR_ANNOCT 1010 /* Annual - October year end*/ -#define FR_ANNNOV 1011 /* Annual - November year end*/ - -/* The standard quarterly frequencies. Year is determined by what year the end - month lies in. */ -#define FR_QTR 2000 /* Quarterly - December year end (default quarterly) */ -#define FR_QTRDEC FR_QTR /* Quarterly - December year end */ -#define FR_QTRJAN 2001 /* Quarterly - January year end */ -#define FR_QTRFEB 2002 /* Quarterly - February year end */ -#define FR_QTRMAR 2003 /* Quarterly - March year end */ -#define FR_QTRAPR 2004 /* Quarterly - April year end */ -#define FR_QTRMAY 2005 /* Quarterly - May year end */ -#define FR_QTRJUN 2006 /* Quarterly - June year end */ -#define FR_QTRJUL 2007 /* Quarterly - July year end */ -#define FR_QTRAUG 2008 /* Quarterly - August year end */ -#define FR_QTRSEP 2009 /* Quarterly - September year end */ -#define FR_QTROCT 2010 /* Quarterly - October year end */ -#define FR_QTRNOV 2011 /* Quarterly - November year end */ - -/* End period based quarterly frequencies. Year is determined by what year the - end month lies in. */ -#define FR_QTREDEC FR_QTRDEC /* Quarterly - December year end*/ -#define FR_QTREJAN FR_QTRJAN /* Quarterly - January year end*/ -#define FR_QTREFEB FR_QTRFEB /* Quarterly - February year end*/ -#define FR_QTREMAR FR_QTRMAR /* Quarterly - March year end*/ -#define FR_QTREAPR FR_QTRAPR /* Quarterly - April year end*/ -#define FR_QTREMAY FR_QTRMAY /* Quarterly - May year end*/ -#define FR_QTREJUN FR_QTRJUN /* Quarterly - June year end*/ -#define FR_QTREJUL FR_QTRJUL /* Quarterly - July year end*/ -#define FR_QTREAUG FR_QTRAUG /* Quarterly - August year end*/ -#define FR_QTRESEP FR_QTRSEP /* Quarterly - September year end*/ -#define FR_QTREOCT FR_QTROCT /* Quarterly - October year end*/ -#define FR_QTRENOV FR_QTRNOV /* Quarterly - November year end*/ - -/* Starting period based quarterly frequencies. Year is determined by what year - the starting month lies in. */ -#define FR_QTRSDEC FR_QTRDEC+12 /* Quarterly - December year end*/ -#define FR_QTRSJAN FR_QTRJAN+12 /* Quarterly - January year end*/ -#define FR_QTRSFEB FR_QTRFEB+12 /* Quarterly - February year end*/ -#define FR_QTRSMAR FR_QTRMAR+12 /* Quarterly - March year end*/ -#define FR_QTRSAPR FR_QTRAPR+12 /* Quarterly - April year end*/ -#define FR_QTRSMAY FR_QTRMAY+12 /* Quarterly - May year end*/ -#define FR_QTRSJUN FR_QTRJUN+12 /* Quarterly - June year end*/ -#define FR_QTRSJUL FR_QTRJUL+12 /* Quarterly - July year end*/ -#define FR_QTRSAUG FR_QTRAUG+12 /* Quarterly - August year end*/ -#define FR_QTRSSEP FR_QTRSEP+12 /* Quarterly - September year end*/ -#define FR_QTRSOCT FR_QTROCT+12 /* Quarterly - October year end*/ -#define FR_QTRSNOV FR_QTRNOV+12 /* Quarterly - November year end*/ - -#define FR_MTH 3000 /* Monthly */ - -#define FR_WK 4000 /* Weekly */ -#define FR_WKSUN FR_WK /* Weekly - Sunday end of week */ -#define FR_WKMON 4001 /* Weekly - Monday end of week */ -#define FR_WKTUE 4002 /* Weekly - Tuesday end of week */ -#define FR_WKWED 4003 /* Weekly - Wednesday end of week */ -#define FR_WKTHU 4004 /* Weekly - Thursday end of week */ -#define FR_WKFRI 4005 /* Weekly - Friday end of week */ -#define FR_WKSAT 4006 /* Weekly - Saturday end of week */ - -#define FR_BUS 5000 /* Business days */ -#define FR_DAY 6000 /* Daily */ -#define FR_HR 7000 /* Hourly */ -#define FR_MIN 8000 /* Minutely */ -#define FR_SEC 9000 /* Secondly */ -#define FR_UND -10000 /* Undefined */ - -//////////////////////////////////////////////////// - -int get_freq_group(int); - -typedef struct { - int from_week_end; //day the week ends on in the "from" frequency - int to_week_end; //day the week ends on in the "to" frequency - - int from_a_year_end; //month the year ends on in the "from" frequency - int to_a_year_end; //month the year ends on in the "to" frequency - - int from_q_year_end; //month the year ends on in the "from" frequency - int to_q_year_end; //month the year ends on in the "to" frequency -} asfreq_info; - -int check_freq(PyObject *); -void get_asfreq_info(int, int, asfreq_info*); -long (*get_asfreq_func(int, int, int))(long, char, asfreq_info*); - -#define CHECK_ASFREQ(result) if ((result) == INT_ERR_CODE) return NULL - -PyObject *DateArray_asfreq(PyObject *, PyObject *); -PyObject *DateArray_getDateInfo(PyObject *, PyObject *); - - -PyObject *c_tdates_thisday(PyObject *, PyObject *); -PyObject *c_tdates_check_freq(PyObject *, PyObject *); -PyObject *c_tdates_check_freq_str(PyObject *, PyObject *); -PyObject *c_tdates_get_freq_group(PyObject *, PyObject *); - -PyObject *set_callback_DateFromString(PyObject *, PyObject *); -PyObject *set_callback_DateTimeFromString(PyObject *, PyObject *); - -void import_c_tdates(PyObject *); - -#endif Modified: trunk/scipy/sandbox/timeseries/setup.py =================================================================== --- trunk/scipy/sandbox/timeseries/setup.py 2007-09-19 03:32:25 UTC (rev 3329) +++ trunk/scipy/sandbox/timeseries/setup.py 2007-09-19 03:37:47 UTC (rev 3330) @@ -11,7 +11,7 @@ nxheader = join(get_numpy_include_dirs()[0],'numpy',) confgr = Configuration('timeseries',parent_package,top_path) sources = [join('src', x) for x in ('c_lib.c', - 'c_tdates.c', + 'c_dates.c', 'c_tseries.c', 'cseries.c')] confgr.add_extension('cseries', @@ -19,8 +19,6 @@ include_dirs=[nxheader, 'include']) confgr.add_subpackage('lib') - confgr.add_subpackage('io') - confgr.add_subpackage('plotlib') confgr.add_subpackage('tests') return confgr Copied: trunk/scipy/sandbox/timeseries/src/c_dates.c (from rev 3328, trunk/scipy/sandbox/timeseries/src/c_tdates.c) =================================================================== --- trunk/scipy/sandbox/timeseries/src/c_tdates.c 2007-09-19 01:01:15 UTC (rev 3328) +++ trunk/scipy/sandbox/timeseries/src/c_dates.c 2007-09-19 03:37:47 UTC (rev 3330) @@ -0,0 +1,2741 @@ +#include "c_dates.h" +#include +#include + + +int get_freq_group(int freq) { return (freq/1000)*1000; } + +static asfreq_info NULL_AF_INFO; + +/********************************************************* +** Python callbacks. These functions must be called by ** +** the module __init__ script ** +*********************************************************/ + +static PyObject *DateFromString = NULL; +PyObject * +set_callback_DateFromString(PyObject *dummy, PyObject *args) { + return set_callback(args, &DateFromString); +} + +static PyObject *DateTimeFromString = NULL; +PyObject * +set_callback_DateTimeFromString(PyObject *dummy, PyObject *args) { + return set_callback(args, &DateTimeFromString); +} + +//DERIVED FROM mx.DateTime +/* + Functions in the following section are borrowed from mx.DateTime version + 2.0.6, and hence this code is subject to the terms of the egenix public + license version 1.0.0 +*/ + +#define Py_AssertWithArg(x,errortype,errorstr,a1) {if (!(x)) {PyErr_Format(errortype,errorstr,a1);goto onError;}} +#define Py_Error(errortype,errorstr) {PyErr_SetString(errortype,errorstr);goto onError;} + + /* Error Exception objects */ +static PyObject *DateCalc_Error; +static PyObject *DateCalc_RangeError; + +#define GREGORIAN_CALENDAR 0 +#define JULIAN_CALENDAR 1 + +#define SECONDS_PER_DAY ((double) 86400.0) + +/* Table with day offsets for each month (0-based, without and with leap) */ +static int month_offset[2][13] = { + { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, + { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } +}; + +/* Table of number of days in a month (0-based, without and with leap) */ +static int days_in_month[2][12] = { + { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }, + { 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 } +}; + +struct date_info { + long absdate; + double abstime; + + double second; + int minute; + int hour; + int day; + int month; + int quarter; + int year; + int day_of_week; + int day_of_year; + int calendar; +}; + + +/* Return 1/0 iff year points to a leap year in calendar. */ +static +int dInfoCalc_Leapyear(register long year, + int calendar) +{ + if (calendar == GREGORIAN_CALENDAR) { + return (year % 4 == 0) && ((year % 100 != 0) || (year % 400 == 0)); + } else { + return (year % 4 == 0); + } +} + +static +int dInfoCalc_ISOWeek(struct date_info *dinfo) +{ + int week; + + /* Estimate */ + week = (dinfo->day_of_year-1) - dinfo->day_of_week + 3; + if (week >= 0) week = week / 7 + 1; + + /* Verify */ + if (week < 0) { + /* The day lies in last week of the previous year */ + if ((week > -2) || + (week == -2 && dInfoCalc_Leapyear(dinfo->year-1, dinfo->calendar))) + week = 53; + else + week = 52; + } else if (week == 53) { + /* Check if the week belongs to year or year+1 */ + if (31-dinfo->day + dinfo->day_of_week < 3) { + week = 1; + } + } + + return week; +} + + +/* Return the day of the week for the given absolute date. */ +static +int dInfoCalc_DayOfWeek(register long absdate) +{ + int day_of_week; + + if (absdate >= 1) { + day_of_week = (absdate - 1) % 7; + } else { + day_of_week = 6 - ((-absdate) % 7); + } + return day_of_week; +} + +/* Return the year offset, that is the absolute date of the day + 31.12.(year-1) in the given calendar. + + Note: + For the Julian calendar we shift the absdate (which is measured + using the Gregorian Epoch) value by two days because the Epoch + (0001-01-01) in the Julian calendar lies 2 days before the Epoch in + the Gregorian calendar. */ +static +int dInfoCalc_YearOffset(register long year, + int calendar) +{ + year--; + if (calendar == GREGORIAN_CALENDAR) { + if (year >= 0 || -1/4 == -1) + return year*365 + year/4 - year/100 + year/400; + else + return year*365 + (year-3)/4 - (year-99)/100 + (year-399)/400; + } + else if (calendar == JULIAN_CALENDAR) { + if (year >= 0 || -1/4 == -1) + return year*365 + year/4 - 2; + else + return year*365 + (year-3)/4 - 2; + } + Py_Error(DateCalc_Error, "unknown calendar"); + onError: + return -1; +} + + +/* Set the instance's value using the given date and time. calendar + may be set to the flags: GREGORIAN_CALENDAR, + JULIAN_CALENDAR to indicate the calendar to be used. */ + +static +int dInfoCalc_SetFromDateAndTime(struct date_info *dinfo, + int year, + int month, + int day, + int hour, + int minute, + double second, + int calendar) +{ + + /* Calculate the absolute date */ + { + int leap; + long yearoffset,absdate; + + /* Range check */ + Py_AssertWithArg(year > -(INT_MAX / 366) && year < (INT_MAX / 366), + DateCalc_RangeError, + "year out of range: %i", + year); + + /* Is it a leap year ? */ + leap = dInfoCalc_Leapyear(year,calendar); + + /* Negative month values indicate months relative to the years end */ + if (month < 0) month += 13; + Py_AssertWithArg(month >= 1 && month <= 12, + DateCalc_RangeError, + "month out of range (1-12): %i", + month); + + /* Negative values indicate days relative to the months end */ + if (day < 0) day += days_in_month[leap][month - 1] + 1; + Py_AssertWithArg(day >= 1 && day <= days_in_month[leap][month - 1], + DateCalc_RangeError, + "day out of range: %i", + day); + + yearoffset = dInfoCalc_YearOffset(year,calendar); + if (PyErr_Occurred()) goto onError; + + absdate = day + month_offset[leap][month - 1] + yearoffset; + + dinfo->absdate = absdate; + + dinfo->year = year; + dinfo->month = month; + dinfo->quarter = ((month-1)/3)+1; + dinfo->day = day; + + dinfo->day_of_week = dInfoCalc_DayOfWeek(absdate); + dinfo->day_of_year = (short)(absdate - yearoffset); + + dinfo->calendar = calendar; + } + + /* Calculate the absolute time */ + { + Py_AssertWithArg(hour >= 0 && hour <= 23, + DateCalc_RangeError, + "hour out of range (0-23): %i", + hour); + Py_AssertWithArg(minute >= 0 && minute <= 59, + DateCalc_RangeError, + "minute out of range (0-59): %i", + minute); + Py_AssertWithArg(second >= (double)0.0 && + (second < (double)60.0 || + (hour == 23 && minute == 59 && + second < (double)61.0)), + DateCalc_RangeError, + "second out of range (0.0 - <60.0; <61.0 for 23:59): %f", + second); + + dinfo->abstime = (double)(hour*3600 + minute*60) + second; + + dinfo->hour = hour; + dinfo->minute = minute; + dinfo->second = second; + } + return 0; + onError: + return -1; +} + +static int monthToQuarter(int month) { return ((month-1)/3)+1; } + +/* Sets the date part of the date_info struct using the indicated + calendar. + + XXX This could also be done using some integer arithmetics rather + than with this iterative approach... */ +static +int dInfoCalc_SetFromAbsDate(register struct date_info *dinfo, + long absdate, + int calendar) +{ + register long year; + long yearoffset; + int leap,dayoffset; + int *monthoffset; + + /* Approximate year */ + if (calendar == GREGORIAN_CALENDAR) { + year = (long)(((double)absdate) / 365.2425); + } else if (calendar == JULIAN_CALENDAR) { + year = (long)(((double)absdate) / 365.25); + } else { + Py_Error(DateCalc_Error, "unknown calendar"); + } + if (absdate > 0) year++; + + /* Apply corrections to reach the correct year */ + while (1) { + /* Calculate the year offset */ + yearoffset = dInfoCalc_YearOffset(year,calendar); + if (PyErr_Occurred()) + goto onError; + + /* Backward correction: absdate must be greater than the + yearoffset */ + if (yearoffset >= absdate) { + year--; + continue; + } + + dayoffset = absdate - yearoffset; + leap = dInfoCalc_Leapyear(year,calendar); + + /* Forward correction: non leap years only have 365 days */ + if (dayoffset > 365 && !leap) { + year++; + continue; + } + break; + } + + dinfo->year = year; + dinfo->calendar = calendar; + + /* Now iterate to find the month */ + monthoffset = month_offset[leap]; + { + register int month; + + for (month = 1; month < 13; month++) { + if (monthoffset[month] >= dayoffset) + break; + } + + dinfo->month = month; + dinfo->quarter = monthToQuarter(month); + dinfo->day = dayoffset - month_offset[leap][month-1]; + } + + + dinfo->day_of_week = dInfoCalc_DayOfWeek(absdate); + dinfo->day_of_year = dayoffset; + dinfo->absdate = absdate; + + return 0; + + onError: + return -1; +} + +/* Sets the time part of the DateTime object. */ +static +int dInfoCalc_SetFromAbsTime(struct date_info *dinfo, + double abstime) +{ + int inttime; + int hour,minute; + double second; + + inttime = (int)abstime; + hour = inttime / 3600; + minute = (inttime % 3600) / 60; + second = abstime - (double)(hour*3600 + minute*60); + + dinfo->hour = hour; + dinfo->minute = minute; + dinfo->second = second; + + dinfo->abstime = abstime; + + return 0; +} + +/* Set the instance's value using the given date and time. calendar + may be set to the flags: GREGORIAN_CALENDAR, JULIAN_CALENDAR to + indicate the calendar to be used. */ +static +int dInfoCalc_SetFromAbsDateTime(struct date_info *dinfo, + long absdate, + double abstime, + int calendar) +{ + + /* Bounds check */ + Py_AssertWithArg(abstime >= 0.0 && abstime <= SECONDS_PER_DAY, + DateCalc_Error, + "abstime out of range (0.0 - 86400.0): %f", + abstime); + + /* Calculate the date */ + if (dInfoCalc_SetFromAbsDate(dinfo, + absdate, + calendar)) + goto onError; + + /* Calculate the time */ + if (dInfoCalc_SetFromAbsTime(dinfo, + abstime)) + goto onError; + + return 0; + onError: + return -1; +} + +/* +==================================================== +== End of section borrowed from mx.DateTime == +==================================================== +*/ + + + + + +/////////////////////////////////////////////////////////////////////// + +// helpers for frequency conversion routines // + +static long DtoB_weekday(long fromDate) { return (((fromDate) / 7) * 5) + (fromDate)%7; } + +static long DtoB_WeekendToMonday(long absdate, int day_of_week) { + + if (day_of_week > 4) { + //change to Monday after weekend + absdate += (7 - day_of_week); + } + return DtoB_weekday(absdate); +} + +static long DtoB_WeekendToFriday(long absdate, int day_of_week) { + + if (day_of_week > 4) { + //change to friday before weekend + absdate -= (day_of_week - 4); + } + return DtoB_weekday(absdate); +} + +static long absdate_from_ymd(int y, int m, int d) { + struct date_info tempDate; + if (dInfoCalc_SetFromDateAndTime(&tempDate, y, m, d, 0, 0, 0, GREGORIAN_CALENDAR)) return INT_ERR_CODE; + return tempDate.absdate; +} + + +/////////////////////////////////////////////// + +// frequency specifc conversion routines +// each function must take an integer fromDate and a char relation ('B' or 'A' for 'BEFORE' or 'AFTER') + +//************ FROM DAILY *************** + +static long asfreq_DtoA(long fromDate, char relation, asfreq_info *af_info) { + + struct date_info dinfo; + if (dInfoCalc_SetFromAbsDate(&dinfo, fromDate, + GREGORIAN_CALENDAR)) return INT_ERR_CODE; + if (dinfo.month > af_info->to_a_year_end) { return (long)(dinfo.year + 1); } + else { return (long)(dinfo.year); } +} + +static long DtoQ_yq(long fromDate, asfreq_info *af_info, + int *year, int *quarter) { + struct date_info dinfo; + if (dInfoCalc_SetFromAbsDate(&dinfo, fromDate, + GREGORIAN_CALENDAR)) return INT_ERR_CODE; + if (af_info->to_q_year_end != 12) { + dinfo.month -= af_info->to_q_year_end; + if (dinfo.month <= 0) { dinfo.month += 12; } + else { dinfo.year += 1; } + dinfo.quarter = monthToQuarter(dinfo.month); + } + + *year = dinfo.year; + *quarter = dinfo.quarter; + + return 0; +} + + +static long asfreq_DtoQ(long fromDate, char relation, asfreq_info *af_info) { + + int year, quarter; + + if (DtoQ_yq(fromDate, af_info, &year, &quarter) == INT_ERR_CODE) + { return INT_ERR_CODE; } + + return (long)((year - 1) * 4 + quarter); +} + +static long asfreq_DtoM(long fromDate, char relation, asfreq_info *af_info) { + + struct date_info dinfo; + if (dInfoCalc_SetFromAbsDate(&dinfo, fromDate, + GREGORIAN_CALENDAR)) return INT_ERR_CODE; + return (long)((dinfo.year - 1) * 12 + dinfo.month); +} + +static long asfreq_DtoW(long fromDate, char relation, asfreq_info *af_info) { + return (fromDate - (1 + af_info->to_week_end))/7 + 1; +} + +static long asfreq_DtoB(long fromDate, char relation, asfreq_info *af_info) { + + struct date_info dinfo; + if (dInfoCalc_SetFromAbsDate(&dinfo, fromDate, + GREGORIAN_CALENDAR)) return INT_ERR_CODE; + + if (relation == 'B') { + return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); + } else { + return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); + } +} + +static long asfreq_DtoB_forConvert(long fromDate, char relation, asfreq_info *af_info) { + + struct date_info dinfo; + if (dInfoCalc_SetFromAbsDate(&dinfo, fromDate, + GREGORIAN_CALENDAR)) return INT_ERR_CODE; + + if (dinfo.day_of_week > 4) { + return -1; + } else { + return DtoB_weekday(fromDate); + } +} + +// needed for getDateInfo function +static long asfreq_DtoD(long fromDate, char relation, asfreq_info *af_info) { return fromDate; } + +static long asfreq_DtoHIGHFREQ(long fromDate, char relation, long periodsPerDay) { + if (fromDate >= HIGHFREQ_ORIG) { + if (relation == 'B') { return (fromDate - HIGHFREQ_ORIG)*(periodsPerDay) + 1; } + else { return (fromDate - HIGHFREQ_ORIG + 1)*(periodsPerDay); } + } else { return -1; } +} + +static long asfreq_DtoH(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoHIGHFREQ(fromDate, relation, 24); } +static long asfreq_DtoT(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoHIGHFREQ(fromDate, relation, 24*60); } +static long asfreq_DtoS(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoHIGHFREQ(fromDate, relation, 24*60*60); } + +//************ FROM SECONDLY *************** + +static long asfreq_StoD(long fromDate, char relation, asfreq_info *af_info) + { return (fromDate - 1)/(60*60*24) + HIGHFREQ_ORIG; } + +static long asfreq_StoA(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoA(asfreq_StoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } +static long asfreq_StoQ(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoQ(asfreq_StoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } +static long asfreq_StoM(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoM(asfreq_StoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } +static long asfreq_StoW(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoW(asfreq_StoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } +static long asfreq_StoB(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoB(asfreq_StoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } +static long asfreq_StoB_forConvert(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoB_forConvert(asfreq_StoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } +static long asfreq_StoT(long fromDate, char relation, asfreq_info *af_info) + { return (fromDate - 1)/60 + 1; } +static long asfreq_StoH(long fromDate, char relation, asfreq_info *af_info) + { return (fromDate - 1)/(60*60) + 1; } + +//************ FROM MINUTELY *************** + +static long asfreq_TtoD(long fromDate, char relation, asfreq_info *af_info) + { return (fromDate - 1)/(60*24) + HIGHFREQ_ORIG; } + +static long asfreq_TtoA(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoA(asfreq_TtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } +static long asfreq_TtoQ(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoQ(asfreq_TtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } +static long asfreq_TtoM(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoM(asfreq_TtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } +static long asfreq_TtoW(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoW(asfreq_TtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } +static long asfreq_TtoB(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoB(asfreq_TtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } + +static long asfreq_TtoB_forConvert(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoB_forConvert(asfreq_TtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } + +static long asfreq_TtoH(long fromDate, char relation, asfreq_info *af_info) + { return (fromDate - 1)/60 + 1; } +static long asfreq_TtoS(long fromDate, char relation, asfreq_info *af_info) { + if (relation == 'B') { return fromDate*60 - 59; } + else { return fromDate*60; }} + +//************ FROM HOURLY *************** + +static long asfreq_HtoD(long fromDate, char relation, asfreq_info *af_info) + { return (fromDate - 1)/24 + HIGHFREQ_ORIG; } +static long asfreq_HtoA(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoA(asfreq_HtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } +static long asfreq_HtoQ(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoQ(asfreq_HtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } +static long asfreq_HtoM(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoM(asfreq_HtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } +static long asfreq_HtoW(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoW(asfreq_HtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } +static long asfreq_HtoB(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoB(asfreq_HtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } + +static long asfreq_HtoB_forConvert(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoB_forConvert(asfreq_HtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } + +// calculation works out the same as TtoS, so we just call that function for HtoT +static long asfreq_HtoT(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_TtoS(fromDate, relation, &NULL_AF_INFO); } +static long asfreq_HtoS(long fromDate, char relation, asfreq_info *af_info) { + if (relation == 'B') { return fromDate*60*60 - 60*60 + 1; } + else { return fromDate*60*60; }} + +//************ FROM BUSINESS *************** + +static long asfreq_BtoD(long fromDate, char relation, asfreq_info *af_info) + { return ((fromDate-1)/5)*7 + (fromDate-1)%5 + 1; } + +static long asfreq_BtoA(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoA(asfreq_BtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } + +static long asfreq_BtoQ(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoQ(asfreq_BtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } + +static long asfreq_BtoM(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoM(asfreq_BtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } + +static long asfreq_BtoW(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoW(asfreq_BtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } + +static long asfreq_BtoH(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoH(asfreq_BtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } + +static long asfreq_BtoT(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoT(asfreq_BtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } + +static long asfreq_BtoS(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoS(asfreq_BtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } + +//************ FROM WEEKLY *************** + +static long asfreq_WtoD(long fromDate, char relation, asfreq_info *af_info) { + if (relation == 'B') { return fromDate * 7 - 6 + af_info->from_week_end;} + else { return fromDate * 7 + af_info->from_week_end; } +} + +static long asfreq_WtoA(long fromDate, char relation, asfreq_info *af_info) { + return asfreq_DtoA(asfreq_WtoD(fromDate, 'A', af_info), relation, af_info); } +static long asfreq_WtoQ(long fromDate, char relation, asfreq_info *af_info) { + return asfreq_DtoQ(asfreq_WtoD(fromDate, 'A', af_info), relation, af_info); } +static long asfreq_WtoM(long fromDate, char relation, asfreq_info *af_info) { + return asfreq_DtoM(asfreq_WtoD(fromDate, 'A', af_info), relation, &NULL_AF_INFO); } + +static long asfreq_WtoW(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoW(asfreq_WtoD(fromDate, relation, af_info), relation, af_info); } + +static long asfreq_WtoB(long fromDate, char relation, asfreq_info *af_info) { + + struct date_info dinfo; + if (dInfoCalc_SetFromAbsDate(&dinfo, asfreq_WtoD(fromDate, relation, af_info), + GREGORIAN_CALENDAR)) return INT_ERR_CODE; + + if (relation == 'B') { return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); } + else { return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); } +} + +static long asfreq_WtoH(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoH(asfreq_WtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } +static long asfreq_WtoT(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoT(asfreq_WtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } +static long asfreq_WtoS(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoS(asfreq_WtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } + +//************ FROM MONTHLY *************** + +static void MtoD_ym(long fromDate, long *y, long *m) { + *y = (fromDate - 1) / 12 + 1; + *m = fromDate - 12 * (*y) - 1; +} + +static long asfreq_MtoD(long fromDate, char relation, asfreq_info *af_info) { + + long y, m, absdate; + + if (relation == 'B') { + MtoD_ym(fromDate, &y, &m); + if ((absdate = absdate_from_ymd(y, m, 1)) == INT_ERR_CODE) return INT_ERR_CODE; + return absdate; + } else { + MtoD_ym(fromDate+1, &y, &m); + if ((absdate = absdate_from_ymd(y, m, 1)) == INT_ERR_CODE) return INT_ERR_CODE; + return absdate-1; + } +} + +static long asfreq_MtoA(long fromDate, char relation, asfreq_info *af_info) { + return asfreq_DtoA(asfreq_MtoD(fromDate, 'A', &NULL_AF_INFO), relation, af_info); } + +static long asfreq_MtoQ(long fromDate, char relation, asfreq_info *af_info) { + return asfreq_DtoQ(asfreq_MtoD(fromDate, 'A', &NULL_AF_INFO), relation, af_info); } + +static long asfreq_MtoW(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoW(asfreq_MtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } + +static long asfreq_MtoB(long fromDate, char relation, asfreq_info *af_info) { + + struct date_info dinfo; + if (dInfoCalc_SetFromAbsDate(&dinfo, asfreq_MtoD(fromDate, relation, &NULL_AF_INFO), + GREGORIAN_CALENDAR)) return INT_ERR_CODE; + + if (relation == 'B') { return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); } + else { return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); } +} + +static long asfreq_MtoH(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoH(asfreq_MtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } +static long asfreq_MtoT(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoT(asfreq_MtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } +static long asfreq_MtoS(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoS(asfreq_MtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } + +//************ FROM QUARTERLY *************** + +static void QtoD_ym(long fromDate, long *y, long *m, asfreq_info *af_info) { + + *y = (fromDate - 1) / 4 + 1; + *m = (fromDate + 4) * 3 - 12 * (*y) - 2; + + if (af_info->from_q_year_end != 12) { + *m += af_info->from_q_year_end; + if (*m > 12) { *m -= 12; } + else { *y -= 1; } + } +} + +static long asfreq_QtoD(long fromDate, char relation, asfreq_info *af_info) { + + long y, m, absdate; + + if (relation == 'B') { + QtoD_ym(fromDate, &y, &m, af_info); + if ((absdate = absdate_from_ymd(y, m, 1)) == INT_ERR_CODE) return INT_ERR_CODE; + return absdate; + } else { + QtoD_ym(fromDate+1, &y, &m, af_info); + if ((absdate = absdate_from_ymd(y, m, 1)) == INT_ERR_CODE) return INT_ERR_CODE; + return absdate - 1; + } +} + +static long asfreq_QtoQ(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoQ(asfreq_QtoD(fromDate, relation, af_info), relation, af_info); } + +static long asfreq_QtoA(long fromDate, char relation, asfreq_info *af_info) { + return asfreq_DtoA(asfreq_QtoD(fromDate, relation, af_info), relation, af_info); } + +static long asfreq_QtoM(long fromDate, char relation, asfreq_info *af_info) { + return asfreq_DtoM(asfreq_QtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } + +static long asfreq_QtoW(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoW(asfreq_QtoD(fromDate, relation, af_info), relation, af_info); } + +static long asfreq_QtoB(long fromDate, char relation, asfreq_info *af_info) { + + struct date_info dinfo; + if (dInfoCalc_SetFromAbsDate(&dinfo, asfreq_QtoD(fromDate, relation, af_info), + GREGORIAN_CALENDAR)) return INT_ERR_CODE; + + if (relation == 'B') { return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); } + else { return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); } +} + + +static long asfreq_QtoH(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoH(asfreq_QtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } +static long asfreq_QtoT(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoT(asfreq_QtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } +static long asfreq_QtoS(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoS(asfreq_QtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } + + +//************ FROM ANNUAL *************** + +static long asfreq_AtoD(long fromDate, char relation, asfreq_info *af_info) { + long absdate, year, final_adj; + int month = (af_info->from_a_year_end) % 12; + + if (month == 0) { month = 1; } + else { month += 1; } + + if (relation == 'B') { + if (af_info->from_a_year_end == 12) {year = fromDate;} + else {year = fromDate - 1;} + final_adj = 0; + } else { + if (af_info->from_a_year_end == 12) {year = fromDate+1;} + else {year = fromDate;} + final_adj = -1; + } + absdate = absdate_from_ymd(year, month, 1); + if (absdate == INT_ERR_CODE) return INT_ERR_CODE; + return absdate + final_adj; +} + +static long asfreq_AtoA(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoA(asfreq_AtoD(fromDate, relation, af_info), relation, af_info); } + +static long asfreq_AtoQ(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoQ(asfreq_AtoD(fromDate, relation, af_info), relation, af_info); } + +static long asfreq_AtoM(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoM(asfreq_AtoD(fromDate, relation, af_info), relation, af_info); } + +static long asfreq_AtoW(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoW(asfreq_AtoD(fromDate, relation, af_info), relation, af_info); } + +static long asfreq_AtoB(long fromDate, char relation, asfreq_info *af_info) { + + struct date_info dinfo; + if (dInfoCalc_SetFromAbsDate(&dinfo, asfreq_AtoD(fromDate, relation, af_info), + GREGORIAN_CALENDAR)) return INT_ERR_CODE; + + if (relation == 'B') { return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); } + else { return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); } +} + +static long asfreq_AtoH(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoH(asfreq_AtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } +static long asfreq_AtoT(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoT(asfreq_AtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } +static long asfreq_AtoS(long fromDate, char relation, asfreq_info *af_info) + { return asfreq_DtoS(asfreq_AtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } + +static long nofunc(long fromDate, char relation, asfreq_info *af_info) { return -1; } + +// end of frequency specific conversion routines + +// return a pointer to appropriate conversion function +long (*get_asfreq_func(int fromFreq, int toFreq, int forConvert))(long, char, asfreq_info*) { + + int fromGroup = get_freq_group(fromFreq); + int toGroup = get_freq_group(toFreq); + + if (fromGroup == FR_UND) { fromGroup = FR_DAY; } + + switch(fromGroup) + { + case FR_ANN: + switch(toGroup) + { + case FR_ANN: return &asfreq_AtoA; + case FR_QTR: return &asfreq_AtoQ; + case FR_MTH: return &asfreq_AtoM; + case FR_WK: return &asfreq_AtoW; + case FR_BUS: return &asfreq_AtoB; + case FR_DAY: return &asfreq_AtoD; + case FR_HR: return &asfreq_AtoH; + case FR_MIN: return &asfreq_AtoT; + case FR_SEC: return &asfreq_AtoS; + default: return &nofunc; + } + + case FR_QTR: + switch(toGroup) + { + case FR_ANN: return &asfreq_QtoA; + case FR_QTR: return &asfreq_QtoQ; + case FR_MTH: return &asfreq_QtoM; + case FR_WK: return &asfreq_QtoW; + case FR_BUS: return &asfreq_QtoB; + case FR_DAY: return &asfreq_QtoD; + case FR_HR: return &asfreq_QtoH; + case FR_MIN: return &asfreq_QtoT; + case FR_SEC: return &asfreq_QtoS; + default: return &nofunc; + } + + case FR_MTH: + switch(toGroup) + { + case FR_ANN: return &asfreq_MtoA; + case FR_QTR: return &asfreq_MtoQ; + case FR_WK: return &asfreq_MtoW; + case FR_BUS: return &asfreq_MtoB; + case FR_DAY: return &asfreq_MtoD; + case FR_HR: return &asfreq_MtoH; + case FR_MIN: return &asfreq_MtoT; + case FR_SEC: return &asfreq_MtoS; + default: return &nofunc; + } + + case FR_WK: + switch(toGroup) + { + case FR_ANN: return &asfreq_WtoA; + case FR_QTR: return &asfreq_WtoQ; + case FR_MTH: return &asfreq_WtoM; + case FR_WK: return &asfreq_WtoW; + case FR_BUS: return &asfreq_WtoB; + case FR_DAY: return &asfreq_WtoD; + case FR_HR: return &asfreq_WtoH; + case FR_MIN: return &asfreq_WtoT; + case FR_SEC: return &asfreq_WtoS; + default: return &nofunc; + } + + case FR_BUS: + switch(toGroup) + { + case FR_ANN: return &asfreq_BtoA; + case FR_QTR: return &asfreq_BtoQ; + case FR_MTH: return &asfreq_BtoM; + case FR_WK: return &asfreq_BtoW; + case FR_DAY: return &asfreq_BtoD; + case FR_HR: return &asfreq_BtoH; + case FR_MIN: return &asfreq_BtoT; + case FR_SEC: return &asfreq_BtoS; + default: return &nofunc; + } + + case FR_DAY: + switch(toGroup) + { + case FR_ANN: return &asfreq_DtoA; + case FR_QTR: return &asfreq_DtoQ; + case FR_MTH: return &asfreq_DtoM; + case FR_WK: return &asfreq_DtoW; + case FR_BUS: + if (forConvert) { return &asfreq_DtoB_forConvert; } + else { return &asfreq_DtoB; } + case FR_DAY: return &asfreq_DtoD; + case FR_HR: return &asfreq_DtoH; + case FR_MIN: return &asfreq_DtoT; + case FR_SEC: return &asfreq_DtoS; + default: return &nofunc; + } + + case FR_HR: + switch(toGroup) + { + case FR_ANN: return &asfreq_HtoA; + case FR_QTR: return &asfreq_HtoQ; + case FR_MTH: return &asfreq_HtoM; + case FR_WK: return &asfreq_HtoW; + case FR_BUS: + if (forConvert) { return &asfreq_HtoB_forConvert; } + else { return &asfreq_HtoB; } + case FR_DAY: return &asfreq_HtoD; + case FR_MIN: return &asfreq_HtoT; + case FR_SEC: return &asfreq_HtoS; + default: return &nofunc; + } + + case FR_MIN: + switch(toGroup) + { + case FR_ANN: return &asfreq_TtoA; + case FR_QTR: return &asfreq_TtoQ; + case FR_MTH: return &asfreq_TtoM; + case FR_WK: return &asfreq_TtoW; + case FR_BUS: + if (forConvert) { return &asfreq_TtoB_forConvert; } + else { return &asfreq_TtoB; } + case FR_DAY: return &asfreq_TtoD; + case FR_HR: return &asfreq_TtoH; + case FR_SEC: return &asfreq_TtoS; + default: return &nofunc; + } + + case FR_SEC: + switch(toGroup) + { + case FR_ANN: return &asfreq_StoA; + case FR_QTR: return &asfreq_StoQ; + case FR_MTH: return &asfreq_StoM; + case FR_WK: return &asfreq_StoW; + case FR_BUS: + if (forConvert) { return &asfreq_StoB_forConvert; } + else { return &asfreq_StoB; } + case FR_DAY: return &asfreq_StoD; + case FR_HR: return &asfreq_StoH; + case FR_MIN: return &asfreq_StoT; + default: return &nofunc; + } + default: return &nofunc; + } +} + +static int calc_a_year_end(int freq, int group) { + int result = (freq - group) % 12; + if (result == 0) {return 12;} + else {return result;} +} + +static int calc_week_end(int freq, int group) { + return freq - group; +} + +void get_asfreq_info(int fromFreq, int toFreq, asfreq_info *af_info) { + + int fromGroup = get_freq_group(fromFreq); + int toGroup = get_freq_group(toFreq); + + switch(fromGroup) + { + case FR_WK: { + af_info->from_week_end = calc_week_end(fromFreq, fromGroup); + } break; + case FR_ANN: { + af_info->from_a_year_end = calc_a_year_end(fromFreq, fromGroup); + } break; + case FR_QTR: { + af_info->from_q_year_end = calc_a_year_end(fromFreq, fromGroup); + } break; + + } + + switch(toGroup) + { + case FR_WK: { + af_info->to_week_end = calc_week_end(toFreq, toGroup); + } break; + case FR_ANN: { + af_info->to_a_year_end = calc_a_year_end(toFreq, toGroup); + } break; + case FR_QTR: { + af_info->to_q_year_end = calc_a_year_end(toFreq, toGroup); + } break; + } + +} + +static double getAbsTime(int freq, long dailyDate, long originalDate) { + + long startOfDay, periodsPerDay; + + switch(freq) + { + case FR_HR: + periodsPerDay = 24; + break; + case FR_MIN: + periodsPerDay = 24*60; + break; + case FR_SEC: + periodsPerDay = 24*60*60; + break; + default: + return 24*60*60 - 1; + } + + startOfDay = asfreq_DtoHIGHFREQ(dailyDate, 'B', periodsPerDay); + return (24*60*60)*((double)(originalDate - startOfDay))/((double)periodsPerDay); +} + +/************************************************************ +** Date type definition +************************************************************/ + +typedef struct { + PyObject_HEAD + int freq; /* frequency of date */ + int value; /* integer representation of date */ + PyObject* cached_vals; +} DateObject; + +/* Forward declarations */ +static PyTypeObject DateType; +#define DateObject_Check(op) PyObject_TypeCheck(op, &DateType) + +static void +DateObject_dealloc(DateObject* self) { + Py_XDECREF(self->cached_vals); + self->ob_type->tp_free((PyObject*)self); +} + + +static PyObject *freq_dict, *freq_dict_rev, *freq_constants; + +#define DICT_SETINT_STRKEY(dict, key, val) \ + {PyObject *pyval = PyInt_FromLong(val); \ + PyDict_SetItemString(dict, key, pyval); \ + Py_DECREF(pyval); } + +#define ADD_FREQ_CONSTANT(const_name, val) \ + DICT_SETINT_STRKEY(freq_constants, const_name, val) + +#define INIT_FREQ(const_name, key, aliases) \ + {PyObject *pykey = PyInt_FromLong(key); \ + PyDict_SetItem(freq_dict, pykey, aliases); \ + PyDict_SetItemString(freq_constants, const_name, pykey); \ + Py_DECREF(pykey); \ + Py_DECREF(aliases); } + + +static int init_freq_group(int num_items, int num_roots, int base_const, + char item_abbrevs[][2][10], char group_prefixes[][15], + char item_const_names[][15]) { + + int i; + + for (i = 0; i < num_items; i++) { + + PyObject *aliases; + int j, size, k; + + if (i == 0) { k = 3; } else { k = 2; } + + size = num_roots * k; + + aliases = PyTuple_New(size); + + for (j = 0; j < num_roots; j++) { + PyObject *alias_v1, *alias_v2; + char *root, *alt; + + if ((root = malloc((30) * sizeof(char))) == NULL) return INT_ERR_CODE; + if ((alt = malloc((30) * sizeof(char))) == NULL) return INT_ERR_CODE; + + strcpy(root, group_prefixes[j]); + strcpy(alt, group_prefixes[j]); + + if (i == 0) { + PyObject *alias = PyString_FromString(root); + PyTuple_SET_ITEM(aliases, j*k + 2, alias); + } + + strcat(root, "-"); + strcat(root, item_abbrevs[i][0]); + strcat(alt, "-"); + strcat(alt, item_abbrevs[i][1]); + + alias_v1 = PyString_FromString(root); + alias_v2 = PyString_FromString(alt); + + free(root); + free(alt); + + PyTuple_SET_ITEM(aliases, j*k, alias_v1); + PyTuple_SET_ITEM(aliases, j*k + 1, alias_v2); + } + + INIT_FREQ(item_const_names[i], base_const+i, aliases); + } + + return 0; +} + +/* take a dictionary with integer keys and tuples of strings for values, + and populate a dictionary with all the strings as keys and integers + for values */ +static int reverse_dict(PyObject *source, PyObject *dest) { + + PyObject *key, *value; + + Py_ssize_t pos = 0; + + while (PyDict_Next(source, &pos, &key, &value)) { + PyObject *tuple_iter; + PyObject *item; + + if((tuple_iter = PyObject_GetIter(value)) == NULL) return INT_ERR_CODE; + + while ((item = PyIter_Next(tuple_iter)) != NULL) { + PyDict_SetItem(dest, item, key); + Py_DECREF(item); + } + Py_DECREF(tuple_iter); + } + return 0; +} + +static int build_freq_dict(void) { + + char ANN_prefixes[8][15] = { "A", "Y", "ANN", "ANNUAL", "ANNUALLY", + "YR", "YEAR", "YEARLY" }; + + char QTRE_prefixes[8][15] = { "Q", "QTR", "QUARTER", "QUARTERLY", "Q-E", + "QTR-E", "QUARTER-E", "QUARTERLY-E"}; + char QTRS_prefixes[4][15] = { "Q-S", "QTR-S", "QUARTER-S", "QUARTERLY-S" }; + + char WK_prefixes[4][15] = { "W", "WK", "WEEK", "WEEKLY" }; + + /* Note: order of this array must match up with how the Annual + frequency constants are lined up */ + char month_names[12][2][10] = { + { "DEC", "DECEMBER" }, + { "JAN", "JANUARY" }, + { "FEB", "FEBRUARY" }, + { "MAR", "MARCH" }, + { "APR", "APRIL" }, + { "MAY", "MAY" }, + { "JUN", "JUNE" }, + { "JUL", "JULY" }, + { "AUG", "AUGUST" }, + { "SEP", "SEPTEMBER" }, + { "OCT", "OCTOBER" }, + { "NOV", "NOVEMBER" }}; + + char day_names[7][2][10] = { + { "SUN", "SUNDAY" }, + { "MON", "MONDAY" }, + { "TUE", "TUESDAY" }, + { "WED", "WEDNESDAY" }, + { "THU", "THURSDAY" }, + { "FRI", "FRIDAY" }, + { "SAT", "SATURDAY" }}; + + char ANN_const_names[12][15] = { + "FR_ANNDEC", + "FR_ANNJAN", + "FR_ANNFEB", + "FR_ANNMAR", + "FR_ANNAPR", + "FR_ANNMAY", + "FR_ANNJUN", + "FR_ANNJUL", + "FR_ANNAUG", + "FR_ANNSEP", + "FR_ANNOCT", + "FR_ANNNOV"}; + + char QTRE_const_names[12][15] = { + "FR_QTREDEC", + "FR_QTREJAN", + "FR_QTREFEB", + "FR_QTREMAR", + "FR_QTREAPR", + "FR_QTREMAY", + "FR_QTREJUN", + "FR_QTREJUL", + "FR_QTREAUG", + "FR_QTRESEP", + "FR_QTREOCT", + "FR_QTRENOV"}; + + char QTRS_const_names[12][15] = { + "FR_QTRSDEC", + "FR_QTRSJAN", + "FR_QTRSFEB", + "FR_QTRSMAR", + "FR_QTRSAPR", + "FR_QTRSMAY", + "FR_QTRSJUN", + "FR_QTRSJUL", + "FR_QTRSAUG", + "FR_QTRSSEP", + "FR_QTRSOCT", + "FR_QTRSNOV"}; + + char WK_const_names[7][15] = { + "FR_WKSUN", + "FR_WKMON", + "FR_WKTUE", + "FR_WKWED", + "FR_WKTHU", + "FR_WKFRI", + "FR_WKSAT"}; + + PyObject *aliases; + + freq_dict = PyDict_New(); + freq_dict_rev = PyDict_New(); + freq_constants = PyDict_New(); + + aliases = Py_BuildValue("(ssss)", "M", "MTH", "MONTH", "MONTHLY"); + INIT_FREQ("FR_MTH", FR_MTH, aliases); + + aliases = Py_BuildValue("(ssss)", "B", "BUS", "BUSINESS", "BUSINESSLY"); + INIT_FREQ("FR_BUS", FR_BUS, aliases); + + aliases = Py_BuildValue("(ssss)", "D", "DAY", "DLY", "DAILY"); + INIT_FREQ("FR_DAY", FR_DAY, aliases); + + aliases = Py_BuildValue("(sssss)", "H", "HR", "HOUR", "HRLY", "HOURLY"); + INIT_FREQ("FR_HR", FR_HR, aliases); + + aliases = Py_BuildValue("(ssss)", "T", "MIN", "MINUTE", "MINUTELY"); + INIT_FREQ("FR_MIN", FR_MIN, aliases); + + aliases = Py_BuildValue("(ssss)", "S", "SEC", "SECOND", "SECONDLY"); + INIT_FREQ("FR_SEC", FR_SEC, aliases); + + aliases = Py_BuildValue("(ssss)", "U", "UND", "UNDEF", "UNDEFINED"); + INIT_FREQ("FR_UND", FR_UND, aliases); + + ADD_FREQ_CONSTANT("FR_ANN", FR_ANN); + + if(init_freq_group(12, 8, FR_ANN, + month_names, ANN_prefixes, ANN_const_names) == INT_ERR_CODE) { + return INT_ERR_CODE; + } + + ADD_FREQ_CONSTANT("FR_QTR", FR_QTR); + + if(init_freq_group(12, 8, FR_QTREDEC, + month_names, QTRE_prefixes, QTRE_const_names) == INT_ERR_CODE) { + return INT_ERR_CODE; + } + + if(init_freq_group(12, 4, FR_QTRSDEC, + month_names, QTRS_prefixes, QTRS_const_names) == INT_ERR_CODE) { + return INT_ERR_CODE; + } + + ADD_FREQ_CONSTANT("FR_WK", FR_WK); + + if(init_freq_group(7, 4, FR_WK, + day_names, WK_prefixes, WK_const_names) == INT_ERR_CODE) { + return INT_ERR_CODE; + } + + if(reverse_dict(freq_dict, freq_dict_rev) == INT_ERR_CODE) { + return INT_ERR_CODE; + } + + return 0; +} + + +/* take user specified frequency and convert to int representation + of the frequency */ +int check_freq(PyObject *freq_spec) { + + if (PyInt_Check(freq_spec)) { + return (int)PyInt_AsLong(freq_spec); + } else if (PyString_Check(freq_spec)) { + char *freq_str, *freq_str_uc; + PyObject *freq_val; + + freq_str = PyString_AsString(freq_spec); + if((freq_str_uc = str_uppercase(freq_str)) == NULL) {return INT_ERR_CODE;} + + freq_val = PyDict_GetItemString(freq_dict_rev, freq_str_uc); + + free(freq_str_uc); + + if (freq_val == NULL) { + PyErr_SetString(PyExc_ValueError, "invalid frequency specification"); + return INT_ERR_CODE; + } else { + int ret_val = (int)PyInt_AsLong(freq_val); + return ret_val; + } + } else if (freq_spec == Py_None) { + return FR_UND; + } else { + int retval = (int)PyInt_AsLong(freq_spec); + if (PyErr_Occurred()) { + PyErr_SetString(PyExc_ValueError, "invalid frequency specification"); + return INT_ERR_CODE; + } else { return retval; } + } + +} + +static PyObject * +DateObject_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { + + DateObject *self; + + self = (DateObject*)type->tp_alloc(type, 0); + if (self != NULL) { + // initialize attributes that need initializing in here + self->freq = FR_UND; + self->value = -1; + } + + return (PyObject *)self; +} + +/* for use in C code */ +static DateObject * +DateObject_New(void) { + PyObject *dummy; + return (DateObject*)DateObject_new(&DateType, dummy, dummy); +} + +#define INIT_ERR(errortype, errmsg) PyErr_SetString(errortype,errmsg);return -1 + +static int +DateObject_init(DateObject *self, PyObject *args, PyObject *kwds) { + + PyObject *freq=NULL, *value=NULL, *datetime=NULL, *string=NULL; + char *INSUFFICIENT_MSG = "insufficient parameters to initialize Date"; + + int def_info=INT_ERR_CODE; + + int year=def_info, month=def_info, day=def_info, quarter=def_info, + hour=def_info, minute=def_info, second=def_info; + + int free_dt=0; + + static char *kwlist[] = {"freq", "value", "string", + "year", "month", "day", "quarter", + "hour", "minute", "second", + "datetime", NULL}; + + if (! PyArg_ParseTupleAndKeywords(args, kwds, "O|OOiiiiiiiO", kwlist, + &freq, &value, &string, + &year, &month, &day, &quarter, + &hour, &minute, &second, + &datetime)) { + return -1; + } + + if (PyObject_HasAttrString(freq, "freq")) { + PyObject *freq_attr = PyObject_GetAttrString(freq, "freq"); + self->freq = PyInt_AS_LONG(freq_attr); + Py_DECREF(freq_attr); + } else { + if((self->freq = check_freq(freq)) == INT_ERR_CODE) return -1; + } + + if ((value && PyString_Check(value)) || string) { + + PyObject *string_arg = PyTuple_New(1); + int freq_group = get_freq_group(self->freq); + + free_dt = 1; + + if (!string) { + string = value; + } + + PyTuple_SET_ITEM(string_arg, 0, string); + Py_INCREF(string); + + if (freq_group == FR_HR || + freq_group == FR_MIN || + freq_group == FR_SEC) + { datetime = PyEval_CallObject(DateTimeFromString, string_arg); } + else { datetime = PyEval_CallObject(DateFromString, string_arg); } + + Py_DECREF(string_arg); + + value = NULL; + } + + if (value) { + self->value = PyInt_AsLong(value); + } else { + + int freq_group = get_freq_group(self->freq); + + if (datetime) { + year=PyDateTime_GET_YEAR(datetime); + month=PyDateTime_GET_MONTH(datetime); + day=PyDateTime_GET_DAY(datetime); + hour=PyDateTime_DATE_GET_HOUR(datetime); + minute=PyDateTime_DATE_GET_MINUTE(datetime); + second=PyDateTime_DATE_GET_SECOND(datetime); + } + + if (!datetime) { + + // First, some basic checks..... + if (year == def_info) { + INIT_ERR(PyExc_ValueError, INSUFFICIENT_MSG); + } + if (self->freq == FR_BUS || + self->freq == FR_DAY || + self->freq == FR_WK || + self->freq == FR_UND) { + if (month == def_info || day == def_info) { + INIT_ERR(PyExc_ValueError, INSUFFICIENT_MSG); + } + + // if FR_BUS, check for week day + + } else if (self->freq == FR_MTH) { + if (month == def_info) { + INIT_ERR(PyExc_ValueError, INSUFFICIENT_MSG); + } + } else if (freq_group == FR_QTR) { + if (quarter == def_info) { + INIT_ERR(PyExc_ValueError, INSUFFICIENT_MSG); + } + } else if (self->freq == FR_SEC) { + if (month == def_info || + day == def_info || + second == def_info) { + INIT_ERR(PyExc_ValueError, INSUFFICIENT_MSG); + } + if (hour == def_info) { + hour = second/3600; + minute = (second % 3600)/60; + second = second % 60; + } else if (minute == def_info) { + INIT_ERR(PyExc_ValueError, INSUFFICIENT_MSG); + } + } else if (self->freq == FR_MIN) { + if (month == def_info || + day == def_info || + minute == def_info) { + INIT_ERR(PyExc_ValueError, INSUFFICIENT_MSG); + } + if (hour == def_info) { + hour = minute/60; + minute = minute % 60; + } + } else if (self->freq == FR_HR) { + if (month == def_info || + day == def_info || + hour == def_info) { + INIT_ERR(PyExc_ValueError, INSUFFICIENT_MSG); + } + } + + } + + if (self->freq == FR_SEC) { + long absdays, delta; + absdays = absdate_from_ymd(year, month, day); + delta = (absdays - HIGHFREQ_ORIG); + self->value = (int)(delta*86400 + hour*3600 + minute*60 + second + 1); + } else if (self->freq == FR_MIN) { + long absdays, delta; + absdays = absdate_from_ymd(year, month, day); + delta = (absdays - HIGHFREQ_ORIG); + self->value = (int)(delta*1440 + hour*60 + minute + 1); + } else if (self->freq == FR_HR) { + long absdays, delta; + if((absdays = absdate_from_ymd(year, month, day)) == INT_ERR_CODE) return -1; + delta = (absdays - HIGHFREQ_ORIG); + self->value = (int)(delta*24 + hour + 1); + } else if (self->freq == FR_DAY) { + if((self->value = (int)absdate_from_ymd(year, month, day)) == INT_ERR_CODE) return -1; + } else if (self->freq == FR_UND) { + if((self->value = (int)absdate_from_ymd(year, month, day)) == INT_ERR_CODE) return -1; + } else if (self->freq == FR_BUS) { + long weeks, days; + if((days = absdate_from_ymd(year, month, day)) == INT_ERR_CODE) return -1; + weeks = days/7; + self->value = (int)(days - weeks*2); + } else if (freq_group == FR_WK) { + int adj_ordinal, ordinal, day_adj; + if((ordinal = (int)absdate_from_ymd(year, month, day)) == INT_ERR_CODE) return -1; + day_adj = (7 - (self->freq - FR_WK)) % 7; + adj_ordinal = ordinal + ((7 - day_adj) - ordinal % 7) % 7; + self->value = adj_ordinal/7; + } else if (self->freq == FR_MTH) { + self->value = (year-1)*12 + month; + } else if (freq_group == FR_QTR) { + if ((self->freq - freq_group) > 12) { + // quarterly frequency with year determined by ending period + self->value = year*4 + quarter; + } else { + /* quarterly frequency with year determined by ending period + or has December year end*/ + self->value = (year-1)*4 + quarter; + } + } else if (freq_group == FR_ANN) { + self->value = year; + } + + } + + if (free_dt) { Py_DECREF(datetime); } + + return 0; +} + +static PyMemberDef DateObject_members[] = { + {"freq", T_INT, offsetof(DateObject, freq), 0, + "frequency"}, + {"value", T_INT, offsetof(DateObject, value), 0, + "integer representation of the Date"}, + {NULL} /* Sentinel */ +}; + +static char DateObject_toordinal_doc[] = +"Return the proleptic Gregorian ordinal of the date, where January 1 of\n" +"year 1 has ordinal 1"; +static PyObject * +DateObject_toordinal(DateObject* self) +{ + if (self->freq == FR_DAY) { + return PyInt_FromLong(self->value); + } else { + long (*toDaily)(long, char, asfreq_info*) = NULL; + asfreq_info af_info; + + toDaily = get_asfreq_func(self->freq, FR_DAY, 0); + get_asfreq_info(self->freq, FR_DAY, &af_info); + + return PyInt_FromLong(toDaily(self->value, 'A', &af_info)); + } +} + +static char DateObject_asfreq_doc[] = +"Returns a date converted to a specified frequency.\n\n" +":Parameters:\n" +" - freq : string/int\n" +" Frequency to convert the Date to. Accepts any valid frequency\n" +" specification (string or integer)\n" +" - relation :string *['After']*\n" +" Applies only when converting a lower frequency Date to a higher\n" +" frequency Date, or when converting a weekend Date to a business\n" +" frequency Date. Valid values are 'before', 'after', 'b', and 'a'."; +static PyObject * +DateObject_asfreq(DateObject *self, PyObject *args, PyObject *kwds) +{ + + PyObject *freq=NULL; + char *relation_raw=NULL; + char *relation_uc; + char relation; + int invalid_relation=0; + int toFreq; + int result_val; + DateObject *result = DateObject_New(); + + static char *kwlist[] = {"freq", "relation", NULL}; + + long (*asfreq_func)(long, char, asfreq_info*) = NULL; + asfreq_info af_info; + + if (! PyArg_ParseTupleAndKeywords(args, kwds, "O|s", kwlist, + &freq, &relation_raw)) return NULL; + + if(relation_raw) { + if (strlen(relation_raw) > 0) { + if((relation_uc = str_uppercase(relation_raw)) == NULL) + {return PyErr_NoMemory();} + + if (strcmp(relation_uc, "BEFORE") == 0 || + strcmp(relation_uc, "B") == 0 || + strcmp(relation_uc, "AFTER") == 0 || + strcmp(relation_uc, "A") == 0) { + if(relation_uc[0] == 'A') { relation = 'A'; } + else { relation = 'B'; } + + } else { invalid_relation=1; } + + free(relation_uc); + + } else { + invalid_relation=1; + } + + if (invalid_relation) { + PyErr_SetString(PyExc_ValueError,"Invalid relation specification"); + return NULL; + } + } else { + relation = 'A'; + } + + if ((toFreq = check_freq(freq)) == INT_ERR_CODE) return NULL; + + get_asfreq_info(self->freq, toFreq, &af_info); + asfreq_func = get_asfreq_func(self->freq, toFreq, 0); + + result_val = asfreq_func(self->value, relation, &af_info); + + if (result_val == INT_ERR_CODE) return NULL; + + result->freq = toFreq; + result->value = result_val; + + return (PyObject*)result; + +} + +static char DateObject_strfmt_doc[] = +"Returns string representation of Date object according to format specified.\n\n" +":Parameters:\n" +" - fmt : string\n" +" Formatting string. Uses the same directives as in the time.strftime\n" +" function in the standard Python time module. In addition, a few other\n" +" directives are supported:\n" +" %q - the 'quarter' of the date\n" +" %f - Year without century as a decimal number [00,99]. The\n" +" 'year' in this case is the year of the date determined by\n" +" the year for the current quarter. This is the same as %y\n" +" unless the Date is one of the 'qtr-s' frequencies\n" +" %F - Year with century as a decimal number. The 'year' in this\n" +" case is the year of the date determined by the year for\n" +" the current quarter. This is the same as %Y unless the\n" +" Date is one of the 'qtr-s' frequencies\n"; +static PyObject * +DateObject_strfmt(DateObject *self, PyObject *args) +{ + + char *orig_fmt_str, *fmt_str; + char *result; + + int num_extra_fmts = 3; + + char extra_fmts[3][2][10] = {{"%q", "^`AB`^"}, + {"%f", "^`CD`^"}, + {"%F", "^`EF`^"}}; + + int extra_fmts_found[3] = {0,0,0}; + int extra_fmts_found_one = 0; + struct tm c_date; + struct date_info tempDate; + long absdate; + double abstime; + int i, result_len; + PyObject *py_result; + + long (*toDaily)(long, char, asfreq_info*) = NULL; + asfreq_info af_info; + + if (!PyArg_ParseTuple(args, "s:strfmt(fmt)", &orig_fmt_str)) return NULL; + + toDaily = get_asfreq_func(self->freq, FR_DAY, 0); + get_asfreq_info(self->freq, FR_DAY, &af_info); + + absdate = toDaily(self->value, 'A', &af_info); + abstime = getAbsTime(self->freq, absdate, self->value); + + if(dInfoCalc_SetFromAbsDateTime(&tempDate, absdate, abstime, + GREGORIAN_CALENDAR)) return NULL; + + // populate standard C date struct with info from our date_info struct + c_date.tm_sec = (int)tempDate.second; + c_date.tm_min = tempDate.minute; + c_date.tm_hour = tempDate.hour; + c_date.tm_mday = tempDate.day; + c_date.tm_mon = tempDate.month - 1; + c_date.tm_year = tempDate.year - 1900; + c_date.tm_wday = tempDate.day_of_week; + c_date.tm_yday = tempDate.day_of_year; + c_date.tm_isdst = -1; + + result_len = strlen(orig_fmt_str) + 50; + if ((result = malloc(result_len * sizeof(char))) == NULL) {return PyErr_NoMemory();} + + fmt_str = orig_fmt_str; + + // replace any special format characters with their place holder + for(i=0; i < num_extra_fmts; i++) { + char *special_loc; + if ((special_loc = strstr(fmt_str,extra_fmts[i][0])) != NULL) { + char *tmp_str = fmt_str; + fmt_str = str_replace(fmt_str, extra_fmts[i][0], + extra_fmts[i][1]); + /* only free the previous loop value if this is not the first + special format string found */ + if (extra_fmts_found_one) { free(tmp_str); } + + if (fmt_str == NULL) {return NULL;} + + extra_fmts_found[i] = 1; + extra_fmts_found_one = 1; + } + } + + strftime(result, result_len, fmt_str, &c_date); + if (extra_fmts_found_one) { free(fmt_str); } + + // replace any place holders with the appropriate value + for(i=0; i < num_extra_fmts; i++) { + if (extra_fmts_found[i]) { + char *tmp_str = result; + char *extra_str; + + if (strcmp(extra_fmts[i][0], "%q") == 0 || + strcmp(extra_fmts[i][0], "%f") == 0 || + strcmp(extra_fmts[i][0], "%F") == 0) { + + asfreq_info af_info; + int qtr_freq, year, quarter, year_len; + + if (get_freq_group(self->freq) == FR_QTR) { + qtr_freq = self->freq; + } else { qtr_freq = FR_QTR; } + get_asfreq_info(FR_DAY, qtr_freq, &af_info); + + if(DtoQ_yq(absdate, &af_info, &year, &quarter) == INT_ERR_CODE) + { return NULL; } + + if(strcmp(extra_fmts[i][0], "%q") == 0) { + if ((extra_str = malloc(2 * sizeof(char))) == NULL) { + free(tmp_str); + return PyErr_NoMemory(); + } + sprintf(extra_str, "%i", quarter); + } else { + if ((qtr_freq % 1000) > 12) { year -= 1; } + + if (strcmp(extra_fmts[i][0], "%f") == 0) { + year_len = 2; + year = year % 100; + } else { year_len = 4; } + + if ((extra_str = malloc((year_len+1) * sizeof(char))) == NULL) { + free(tmp_str); + return PyErr_NoMemory(); + } + + if (year_len == 2 && year < 10) { + sprintf(extra_str, "0%i", year); + } else { sprintf(extra_str, "%i", year); } + } + + } else { + PyErr_SetString(PyExc_RuntimeError,"Unrecogized fmt string"); + return NULL; + } + + result = str_replace(result, extra_fmts[i][1], extra_str); + free(tmp_str); + free(extra_str); + if (result == NULL) { return NULL; } + } + } + + py_result = PyString_FromString(result); + free(result); + + return py_result; +} + +static PyObject * +DateObject___str__(DateObject* self) +{ + + int freq_group = get_freq_group(self->freq); + PyObject *string_arg, *retval; + + string_arg = NULL; + if (freq_group == FR_ANN) { string_arg = Py_BuildValue("(s)", "%Y"); } + else if (freq_group == FR_QTR) { string_arg = Py_BuildValue("(s)", "%FQ%q"); } + else if (freq_group == FR_MTH) { string_arg = Py_BuildValue("(s)", "%b-%Y"); } + else if (freq_group == FR_DAY || + freq_group == FR_BUS || + freq_group == FR_WK || + freq_group == FR_UND) { string_arg = Py_BuildValue("(s)", "%d-%b-%Y"); } + else if (freq_group == FR_HR) { string_arg = Py_BuildValue("(s)", "%d-%b-%Y %H:00"); } + else if (freq_group == FR_MIN) { string_arg = Py_BuildValue("(s)", "%d-%b-%Y %H:%M"); } + else if (freq_group == FR_SEC) { string_arg = Py_BuildValue("(s)", "%d-%b-%Y %H:%M:%S"); } + + if (string_arg == NULL) { return NULL; } + + retval = DateObject_strfmt(self, string_arg); + Py_DECREF(string_arg); + + return retval; +} + +static PyObject * +DateObject_freqstr(DateObject *self, void *closure) { + PyObject *key = PyInt_FromLong(self->freq); + PyObject *freq_aliases = PyDict_GetItem(freq_dict, key); + PyObject *main_alias = PyTuple_GET_ITEM(freq_aliases, 0); + Py_DECREF(key); + Py_INCREF(main_alias); + return main_alias; +} + + +static PyObject * +DateObject___repr__(DateObject* self) +{ + PyObject *py_str_rep, *py_freqstr, *py_repr; + char *str_rep, *freqstr, *repr; + int repr_len; + + py_str_rep = DateObject___str__(self); + if (py_str_rep == NULL) { return NULL; } + + py_freqstr = DateObject_freqstr(self, NULL); + + str_rep = PyString_AsString(py_str_rep); + freqstr = PyString_AsString(py_freqstr); + + repr_len = strlen(str_rep) + strlen(freqstr) + 6; + + if((repr = malloc((repr_len + 1) * sizeof(char))) == NULL) + { return PyErr_NoMemory(); } + + strcpy(repr, "<"); + strcat(repr, freqstr); + strcat(repr, " : "); + strcat(repr, str_rep); + strcat(repr, ">"); + + py_repr = PyString_FromString(repr); + + Py_DECREF(py_str_rep); + Py_DECREF(py_freqstr); + + free(repr); + + return py_repr; +} + +/****************************** + These methods seem rather useless. May or may not implement them. +fromordinal(self, ordinal): + return Date(self.freq, datetime=dt.datetime.fromordinal(ordinal)) +tostring(self): + return str(self) +toobject(self): + return self +isvalid(self): + return True +*******************************/ + + +static DateObject * +DateObject_FromFreqAndValue(int freq, int value) { + + DateObject *result = DateObject_New(); + + PyObject *args = PyTuple_New(0); + PyObject *kw = PyDict_New(); + PyObject *py_freq = PyInt_FromLong(freq); + PyObject *py_value = PyInt_FromLong(value); + + PyDict_SetItemString(kw, "freq", py_freq); + PyDict_SetItemString(kw, "value", py_value); + + Py_DECREF(py_freq); + Py_DECREF(py_value); + + DateObject_init(result, args, kw); + + Py_DECREF(args); + Py_DECREF(kw); + + return result; +} + +static PyObject * +DateObject_date_plus_int(PyObject *date, PyObject *pyint) { + DateObject *dateobj = (DateObject*)date; + if (DateObject_Check(pyint)) { + PyErr_SetString(PyExc_TypeError, "Cannot add two Date objects"); + return NULL; + } + + return (PyObject*)DateObject_FromFreqAndValue(dateobj->freq, PyInt_AsLong(pyint) + dateobj->value); +} + +static PyObject * +DateObject___add__(PyObject *left, PyObject *right) +{ + if (DateObject_Check(left)) { + return DateObject_date_plus_int(left, right); + } else { + return DateObject_date_plus_int(right, left); + } +} + +static PyObject * +DateObject___subtract__(PyObject *left, PyObject *right) +{ + int result; + DateObject *dleft; + if (!DateObject_Check(left)) { + PyErr_SetString(PyExc_ValueError, "Cannot subtract Date from non-Date value"); + return NULL; + } + + dleft = (DateObject*)left; + + if (DateObject_Check(right)) { + DateObject *dright = (DateObject*)right; + if (dleft->freq != dright->freq) { + PyErr_SetString(PyExc_ValueError, "Cannot subtract Dates with different frequency"); + return NULL; + } + result = dleft->value - dright->value; + return PyInt_FromLong(result); + } else { + result = dleft->value - PyInt_AsLong(right); + return (PyObject*)DateObject_FromFreqAndValue(dleft->freq, result); + } +} + +static int +DateObject___compare__(DateObject * obj1, DateObject * obj2) +{ + if (obj1->freq != obj2->freq) { + PyErr_SetString(PyExc_ValueError, + "Cannot compare dates with different frequency"); + return -1; + } + + if (obj1->value < obj2->value) return -1; + if (obj1->value > obj2->value) return 1; + if (obj1->value == obj2->value) return 0; + return -1; +} + +static long +DateObject___hash__(DateObject *self) +{ + register int freq_group = get_freq_group(self->freq); + + /* within a given frequency, hash values are guaranteed to be unique + for different dates. For different frequencies, we make a reasonable + effort to ensure hash values will be unique, but it is not guaranteed */ + if (freq_group == FR_BUS) { + return self->value + 10000000; + } else if (freq_group == FR_WK) { + return self->value + 100000000; + } else { return self->value; } +} + +static PyObject * +DateObject___int__(DateObject *self) +{ + return PyInt_FromLong(self->value); +} + +static PyObject * +DateObject___float__(DateObject *self) +{ + return PyFloat_FromDouble((double)(self->value)); +} + +/*************************************************** + ====== Date Properties ====== +****************************************************/ + +// helper function for date property funcs +static int +DateObject_set_date_info(DateObject *self, struct date_info *dinfo) { + PyObject *daily_obj = DateObject_toordinal(self); + long absdate = PyInt_AsLong(daily_obj); + + Py_DECREF(daily_obj); + + if(dInfoCalc_SetFromAbsDate(dinfo, absdate, + GREGORIAN_CALENDAR)) return -1; + + return 0; +} + +// helper function for date property funcs +static int +DateObject_set_date_info_wtime(DateObject *self, struct date_info *dinfo) { + PyObject *daily_obj = DateObject_toordinal(self); + long absdate = PyInt_AsLong(daily_obj); + double abstime; + + Py_DECREF(daily_obj); + + abstime = getAbsTime(self->freq, absdate, self->value); + + if(dInfoCalc_SetFromAbsDateTime(dinfo, absdate, abstime, + GREGORIAN_CALENDAR)) return -1; + + return 0; +} + +static PyObject * +DateObject_year(DateObject *self, void *closure) { + struct date_info dinfo; + if(DateObject_set_date_info(self, &dinfo) == -1) return NULL; + return PyInt_FromLong(dinfo.year); +} + +static int _DateObject_quarter_year(DateObject *self, int *year, int *quarter) { + + PyObject *daily_obj; + long absdate; + + asfreq_info af_info; + int qtr_freq; + + daily_obj = DateObject_toordinal(self); + absdate = PyInt_AsLong(daily_obj); + Py_DECREF(daily_obj); + + if (get_freq_group(self->freq) == FR_QTR) { + qtr_freq = self->freq; + } else { qtr_freq = FR_QTR; } + get_asfreq_info(FR_DAY, qtr_freq, &af_info); + + if(DtoQ_yq(absdate, &af_info, year, quarter) == INT_ERR_CODE) + { return INT_ERR_CODE; } + + if ((qtr_freq % 1000) > 12) { *year -= 1; } + + return 0; +} + +static PyObject * +DateObject_qyear(DateObject *self, void *closure) { + int year, quarter; + if(_DateObject_quarter_year(self, + &year, &quarter) == INT_ERR_CODE) { return NULL; } + return PyInt_FromLong(year); +} + +static PyObject * +DateObject_quarter(DateObject *self, void *closure) { + int year, quarter; + if(_DateObject_quarter_year(self, + &year, &quarter) == INT_ERR_CODE) { return NULL; } + return PyInt_FromLong(quarter); +} + +static PyObject * +DateObject_month(DateObject *self, void *closure) { + struct date_info dinfo; + if(DateObject_set_date_info(self, &dinfo) == -1) return NULL; + return PyInt_FromLong(dinfo.month); +} + +static PyObject * +DateObject_day(DateObject *self, void *closure) { + struct date_info dinfo; + if(DateObject_set_date_info(self, &dinfo) == -1) return NULL; + return PyInt_FromLong(dinfo.day); +} + +static PyObject * +DateObject_day_of_week(DateObject *self, void *closure) { + struct date_info dinfo; + if(DateObject_set_date_info(self, &dinfo) == -1) return NULL; + return PyInt_FromLong(dinfo.day_of_week); +} + +static PyObject * +DateObject_day_of_year(DateObject *self, void *closure) { + struct date_info dinfo; + if(DateObject_set_date_info(self, &dinfo) == -1) return NULL; + return PyInt_FromLong(dinfo.day_of_year); +} + +static PyObject * +DateObject_week(DateObject *self, void *closure) { + struct date_info dinfo; + if(DateObject_set_date_info(self, &dinfo) == -1) return NULL; + return PyInt_FromLong(dInfoCalc_ISOWeek(&dinfo)); +} + +static PyObject * +DateObject_hour(DateObject *self, void *closure) { + struct date_info dinfo; + if(DateObject_set_date_info_wtime(self, &dinfo) == -1) return NULL; + return PyInt_FromLong(dinfo.hour); +} + +static PyObject * +DateObject_minute(DateObject *self, void *closure) { + struct date_info dinfo; + if(DateObject_set_date_info_wtime(self, &dinfo) == -1) return NULL; + return PyInt_FromLong(dinfo.minute); +} + +static PyObject * +DateObject_second(DateObject *self, void *closure) { + struct date_info dinfo; + if(DateObject_set_date_info_wtime(self, &dinfo) == -1) return NULL; + return PyInt_FromLong((int)dinfo.second); +} + +static PyObject * +DateObject_datetime(DateObject *self, void *closure) { + PyObject *datetime; + struct date_info dinfo; + if(DateObject_set_date_info_wtime(self, &dinfo) == -1) return NULL; + datetime = PyDateTime_FromDateAndTime(dinfo.year, dinfo.month, + dinfo.day, dinfo.hour, + dinfo.minute, (int)dinfo.second, 0); + return datetime; +} + +static int +DateObject_ReadOnlyErr(DateObject *self, PyObject *value, void *closure) { + PyErr_SetString(PyExc_AttributeError, "Cannot set read-only property"); + return -1; +} + +static PyGetSetDef DateObject_getseters[] = { + {"year", (getter)DateObject_year, (setter)DateObject_ReadOnlyErr, + "Returns the year.", NULL}, + {"qyear", (getter)DateObject_qyear, (setter)DateObject_ReadOnlyErr, + "For quarterly frequency dates, returns the year corresponding to the\n" + "year end (start) month. When using QTR or QTR-E based quarterly\n" + "frequencies, this is the fiscal year in a financial context.\n\n" + "For non-quarterly dates, this simply returns the year of the date.", + NULL}, + {"quarter", (getter)DateObject_quarter, (setter)DateObject_ReadOnlyErr, + "Returns the quarter.", NULL}, + {"month", (getter)DateObject_month, (setter)DateObject_ReadOnlyErr, + "Returns the month.", NULL}, + {"week", (getter)DateObject_week, (setter)DateObject_ReadOnlyErr, + "Returns the week.", NULL}, + {"day", (getter)DateObject_day, (setter)DateObject_ReadOnlyErr, + "Returns the day of month.", NULL}, + {"day_of_week", (getter)DateObject_day_of_week, (setter)DateObject_ReadOnlyErr, + "Returns the day of week.", NULL}, + {"day_of_year", (getter)DateObject_day_of_year, (setter)DateObject_ReadOnlyErr, + "Returns the day of year.", NULL}, + {"second", (getter)DateObject_second, (setter)DateObject_ReadOnlyErr, + "Returns the second.", NULL}, + {"minute", (getter)DateObject_minute, (setter)DateObject_ReadOnlyErr, + "Returns the minute.", NULL}, + {"hour", (getter)DateObject_hour, (setter)DateObject_ReadOnlyErr, + "Returns the hour.", NULL}, + + {"freqstr", (getter)DateObject_freqstr, (setter)DateObject_ReadOnlyErr, + "Returns the string representation of frequency.", NULL}, + {"datetime", (getter)DateObject_datetime, (setter)DateObject_ReadOnlyErr, + "Returns the Date object converted to standard python datetime object", + NULL}, + + {NULL} /* Sentinel */ +}; + + +static PyNumberMethods DateObject_as_number = { + (binaryfunc)DateObject___add__, /* nb_add */ + (binaryfunc)DateObject___subtract__, /* nb_subtract */ + 0, /* nb_multiply */ + 0, /* nb_divide */ + 0, /* nb_remainder */ + 0, /* nb_divmod */ + 0, /* nb_power */ + 0, /* nb_negative */ + 0, /* nb_positive */ + 0, /* nb_absolute */ + 0, /* nb_nonzero */ + 0, /* nb_invert */ + 0, /* nb_lshift */ + 0, /* nb_rshift */ + 0, /* nb_and */ + 0, /* nb_xor */ + 0, /* nb_or */ + 0, /* nb_coerce */ + (unaryfunc)DateObject___int__, /* nb_int */ + (unaryfunc)0, /* nb_long */ + (unaryfunc)DateObject___float__, /* nb_float */ + (unaryfunc)0, /* nb_oct */ + (unaryfunc)0, /* nb_hex */ +}; + +static PyMethodDef DateObject_methods[] = { + {"toordinal", (PyCFunction)DateObject_toordinal, METH_NOARGS, + DateObject_toordinal_doc}, + {"strfmt", (PyCFunction)DateObject_strfmt, METH_VARARGS, + DateObject_strfmt_doc}, + {"asfreq", (PyCFunction)DateObject_asfreq, METH_VARARGS | METH_KEYWORDS, + DateObject_asfreq_doc}, + {NULL} /* Sentinel */ +}; + + +static PyTypeObject DateType = { + PyObject_HEAD_INIT(NULL) + 0, /* ob_size */ + "timeseries.Date", /* tp_name */ + sizeof(DateObject), /* tp_basicsize */ + 0, /* tp_itemsize */ + (destructor)DateObject_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + (cmpfunc)DateObject___compare__, /* tp_compare */ + (reprfunc)DateObject___repr__, /* tp_repr */ + &DateObject_as_number, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + (hashfunc)DateObject___hash__, /* tp_hash */ + 0, /* tp_call*/ + (reprfunc)DateObject___str__, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT | /* tp_flags */ + Py_TPFLAGS_CHECKTYPES | + Py_TPFLAGS_BASETYPE, + "Defines a Date object, as the combination of a date and a frequency.\n" + "Several options are available to construct a Date object explicitly:\n\n" + "- Give appropriate values to the `year`, `month`, `day`, `quarter`, `hours`,\n" + " `minutes`, `seconds` arguments.\n\n" + " >>> td.Date(freq='Q',year=2004,quarter=3)\n" + " >>> td.Date(freq='D',year=2001,month=1,day=1)\n\n" + "- Use the `string` keyword. This method uses a modified version of the\n" + " mx.DateTime parser submodule. More information is available in its\n" + " documentation.\n\n" + " >>> ts.Date('D', '2007-01-01')\n\n" + "- Use the `datetime` keyword with an existing datetime.datetime object.\n\n" + " >>> td.Date('D', datetime=datetime.datetime.now())", /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + DateObject_methods, /* tp_methods */ + DateObject_members, /* tp_members */ + DateObject_getseters, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)DateObject_init, /* tp_init */ + 0, /* tp_alloc */ + DateObject_new, /* tp_new */ +}; + + +/////////////////////////////////////////////////////////////////////// + +PyObject * +c_dates_check_freq(PyObject *self, PyObject *args) { + + PyObject *freq; + int freq_val; + + if (!PyArg_ParseTuple(args, "O:check_freq(freq)", &freq)) return NULL; + if ((freq_val = check_freq(freq)) == INT_ERR_CODE) return NULL; + + return PyInt_FromLong(freq_val); +} + +PyObject * +c_dates_check_freq_str(PyObject *self, PyObject *args) { + + PyObject *alias_tuple, *result, *freq_key; + + if ((freq_key = c_dates_check_freq(self, args)) == NULL) return NULL; + + alias_tuple = PyDict_GetItem(freq_dict, freq_key); + result = PyTuple_GET_ITEM(alias_tuple, 0); + + Py_INCREF(result); + + Py_DECREF(freq_key); + + return result; +} + +PyObject * +c_dates_get_freq_group(PyObject *self, PyObject *args) { + + PyObject *freq; + int freq_val; + + if (!PyArg_ParseTuple(args, "O:get_freq_group(freq)", &freq)) return NULL; + if ((freq_val = check_freq(freq)) == INT_ERR_CODE) return NULL; + + return PyInt_FromLong(get_freq_group(freq_val)); +} + +PyObject * +c_dates_thisday(PyObject *self, PyObject *args) { + + PyObject *freq, *init_args, *init_kwargs; + time_t rawtime; + struct tm *timeinfo; + int freq_val; + + DateObject *secondly_date; + + if (!PyArg_ParseTuple(args, "O:thisday(freq)", &freq)) return NULL; + + if ((freq_val = check_freq(freq)) == INT_ERR_CODE) return NULL; + + time(&rawtime); + timeinfo = localtime(&rawtime); + + init_args = PyTuple_New(0); + init_kwargs = PyDict_New(); + + DICT_SETINT_STRKEY(init_kwargs, "freq", FR_SEC); + DICT_SETINT_STRKEY(init_kwargs, "year", timeinfo->tm_year+1900); + DICT_SETINT_STRKEY(init_kwargs, "month", timeinfo->tm_mon+1); + DICT_SETINT_STRKEY(init_kwargs, "day", timeinfo->tm_mday); + DICT_SETINT_STRKEY(init_kwargs, "hour", timeinfo->tm_hour); + DICT_SETINT_STRKEY(init_kwargs, "minute", timeinfo->tm_min); + DICT_SETINT_STRKEY(init_kwargs, "second", timeinfo->tm_sec); + + secondly_date = DateObject_New(); + DateObject_init(secondly_date, init_args, init_kwargs); + + Py_DECREF(init_args); + Py_DECREF(init_kwargs); + + if (freq_val != FR_SEC) { + DateObject *result = DateObject_New(); + + long (*asfreq_func)(long, char, asfreq_info*) = NULL; + asfreq_info af_info; + + int date_val; + + get_asfreq_info(FR_SEC, freq_val, &af_info); + asfreq_func = get_asfreq_func(FR_SEC, freq_val, 0); + + date_val = asfreq_func(secondly_date->value, 'B', &af_info); + + Py_DECREF(secondly_date); + + result->freq = freq_val; + result->value = date_val; + + return (PyObject*)result; + + } else { return (PyObject*)secondly_date; } +} + + +PyObject * +DateArray_asfreq(PyObject *self, PyObject *args) +{ + PyArrayObject *fromDates, *toDates; + PyArrayIterObject *iterFrom, *iterTo; + PyObject *fromDateObj, *toDateObj; + char *relation; + int fromFreq, toFreq; + long fromDate, toDate; + long (*asfreq_main)(long, char, asfreq_info*) = NULL; + asfreq_info af_info; + + if (!PyArg_ParseTuple(args, + "Oiis:asfreq(fromDates, fromfreq, tofreq, relation)", + &fromDates, &fromFreq, &toFreq, &relation)) return NULL; + + get_asfreq_info(fromFreq, toFreq, &af_info); + + asfreq_main = get_asfreq_func(fromFreq, toFreq, 0); + + toDates = (PyArrayObject *)PyArray_Copy(fromDates); + + iterFrom = (PyArrayIterObject *)PyArray_IterNew((PyObject *)fromDates); + if (iterFrom == NULL) return NULL; + + iterTo = (PyArrayIterObject *)PyArray_IterNew((PyObject *)toDates); + if (iterTo == NULL) return NULL; + + while (iterFrom->index < iterFrom->size) { + + fromDateObj = PyArray_GETITEM(fromDates, iterFrom->dataptr); + fromDate = PyInt_AsLong(fromDateObj); + CHECK_ASFREQ(toDate = asfreq_main(fromDate, relation[0], &af_info)); + toDateObj = PyInt_FromLong(toDate); + + PyArray_SETITEM(toDates, iterTo->dataptr, toDateObj); + + Py_DECREF(fromDateObj); + Py_DECREF(toDateObj); + + PyArray_ITER_NEXT(iterFrom); + PyArray_ITER_NEXT(iterTo); + } + + Py_DECREF(iterFrom); + Py_DECREF(iterTo); + + return (PyObject *)toDates; + +} + +/************************************************************** +** The following functions are used by DateArray_getDateInfo ** +** to determine how many consecutive periods will have the ** +** same result ** +**************************************************************/ + +// also used for qyear +static int __skip_periods_year(int freq) { + + int freq_group = get_freq_group(freq); + + switch(freq_group) + { + case FR_QTR: + return 4; + case FR_MTH: + return 12; + case FR_WK: + return 51; + case FR_BUS: + return 260; + case FR_DAY: + return 365; + case FR_HR: + return 365*24; + case FR_MIN: + return 365*24*60; + case FR_SEC: + return 365*24*60*60; + default: + return 1; + } +} + +static int __skip_periods_quarter(int freq) { + + int freq_group = get_freq_group(freq); + + switch(freq_group) + { + case FR_MTH: + return 3; + case FR_WK: + return 12; + case FR_BUS: + return 64; + case FR_DAY: + return 90; + case FR_HR: + return 90*24; + case FR_MIN: + return 90*24*60; + case FR_SEC: + return 90*24*60*60; + default: + return 1; + } +} + +static int __skip_periods_month(int freq) { + + int freq_group = get_freq_group(freq); + + switch(freq_group) + { + case FR_WK: + return 3; + case FR_BUS: + return 20; + case FR_DAY: + return 28; + case FR_HR: + return 28*24; + case FR_MIN: + return 28*24*60; + case FR_SEC: + return 28*24*60*60; + default: + return 1; + } +} + +// also used for day_of_year, day_of_week +static int __skip_periods_day(int freq) { + + int freq_group = get_freq_group(freq); + + switch(freq_group) + { + case FR_HR: + return 24; + case FR_MIN: + return 24*60; + case FR_SEC: + return 24*60*60; + default: + return 1; + } +} + +static int __skip_periods_week(int freq) { + + int freq_group = get_freq_group(freq); + + switch(freq_group) + { + case FR_BUS: + return 5; + case FR_DAY: + return 7; + case FR_HR: + return 7*28*24; + case FR_MIN: + return 7*28*24*60; + case FR_SEC: + return 7*28*24*60*60; + default: + return 1; + } +} + +static int __skip_periods_hour(int freq) { + + int freq_group = get_freq_group(freq); + + switch(freq_group) + { + case FR_MIN: + return 60; + case FR_SEC: + return 60*60; + default: + return 1; + } +} + +static int __skip_periods_minute(int freq) { + + int freq_group = get_freq_group(freq); + + switch(freq_group) + { + case FR_SEC: + return 60; + default: + return 1; + } +} + +PyObject * +DateArray_getDateInfo(PyObject *self, PyObject *args) +{ + int freq, is_full, skip_periods, counter=1, val_changed=0; + char *info; + + PyObject *prev_val=NULL; + PyArrayObject *array, *newArray; + PyArrayIterObject *iterSource, *iterResult; + + PyObject* (*getDateInfo)(DateObject*, void*) = NULL; + + if (!PyArg_ParseTuple(args, "Oisi:getDateInfo(array, freq, info, is_full)", + &array, &freq, &info, &is_full)) return NULL; + newArray = (PyArrayObject *)PyArray_Copy(array); + + iterSource = (PyArrayIterObject *)PyArray_IterNew((PyObject *)array); + iterResult = (PyArrayIterObject *)PyArray_IterNew((PyObject *)newArray); + + + switch(*info) + { + case 'Y': //year + getDateInfo = &DateObject_year; + skip_periods = __skip_periods_year(freq); + break; + case 'F': //"fiscal" year + getDateInfo = &DateObject_qyear; + skip_periods = __skip_periods_year(freq); + break; + case 'Q': //quarter + getDateInfo = &DateObject_quarter; + skip_periods = __skip_periods_quarter(freq); + break; + case 'M': //month + getDateInfo = &DateObject_month; + skip_periods = __skip_periods_month(freq); + break; + case 'D': //day + getDateInfo = &DateObject_day; + skip_periods = __skip_periods_day(freq); + break; + case 'R': //day of year + getDateInfo = &DateObject_day_of_year; + skip_periods = __skip_periods_day(freq); + break; + case 'W': //day of week + getDateInfo = &DateObject_day_of_week; + skip_periods = __skip_periods_day(freq); + break; + case 'I': //week of year + getDateInfo = &DateObject_week; + skip_periods = __skip_periods_week(freq); + break; + case 'H': //hour + getDateInfo = &DateObject_hour; + skip_periods = __skip_periods_hour(freq); + break; + case 'T': //minute + getDateInfo = &DateObject_minute; + skip_periods = __skip_periods_minute(freq); + break; + case 'S': //second + getDateInfo = &DateObject_second; + skip_periods = 1; + break; + default: + return NULL; + } + + { + DateObject *curr_date; + PyObject *val, *dInfo; + + while (iterSource->index < iterSource->size) { + + if ((val_changed == 0) || + (is_full == 0) || + (prev_val == NULL) || + (counter >= skip_periods)) { + + val = PyArray_GETITEM(array, iterSource->dataptr); + curr_date = DateObject_FromFreqAndValue(freq, PyInt_AsLong(val)); + dInfo = getDateInfo(curr_date, NULL); + + if ((prev_val != NULL) && + (PyInt_AsLong(prev_val) != PyInt_AsLong(dInfo))) { + val_changed = 1; + counter = 0; + } + + Py_DECREF(val); + Py_DECREF(curr_date); + + if (prev_val != NULL) { + Py_DECREF(prev_val); + } + + prev_val = dInfo; + } + + PyArray_SETITEM(newArray, iterResult->dataptr, dInfo); + + PyArray_ITER_NEXT(iterSource); + PyArray_ITER_NEXT(iterResult); + + counter += 1; + } + } + + if (prev_val != NULL) { + Py_DECREF(prev_val); + } + Py_DECREF(iterSource); + Py_DECREF(iterResult); + + return (PyObject *) newArray; +} + + +void import_c_dates(PyObject *m) +{ + + if (PyType_Ready(&DateType) < 0) return; + + DateCalc_Error = + PyErr_NewException("c_dates.DateCalc_Error", NULL, NULL); + DateCalc_RangeError = + PyErr_NewException("c_dates.DateCalc_RangeError", NULL, NULL); + + import_array(); + PyDateTime_IMPORT; + + Py_INCREF(&DateType); + PyModule_AddObject(m, "Date", (PyObject *)(&DateType)); + + if(build_freq_dict() == INT_ERR_CODE) { + PyErr_SetString( \ + PyExc_ImportError, \ + "initialization of module timeseries.c_dates failed"); + return; + }; + + PyModule_AddObject(m, "freq_dict", freq_dict); + PyModule_AddObject(m, "freq_dict_rev", freq_dict_rev); + PyModule_AddObject(m, "freq_constants", freq_constants); + + PyModule_AddObject(m, "DateCalc_Error", DateCalc_Error); + PyModule_AddObject(m, "DateCalc_RangeError", DateCalc_RangeError); + +} Deleted: trunk/scipy/sandbox/timeseries/src/c_tdates.c =================================================================== --- trunk/scipy/sandbox/timeseries/src/c_tdates.c 2007-09-19 03:32:25 UTC (rev 3329) +++ trunk/scipy/sandbox/timeseries/src/c_tdates.c 2007-09-19 03:37:47 UTC (rev 3330) @@ -1,2741 +0,0 @@ -#include "c_tdates.h" -#include -#include - - -int get_freq_group(int freq) { return (freq/1000)*1000; } - -static asfreq_info NULL_AF_INFO; - -/********************************************************* -** Python callbacks. These functions must be called by ** -** the module __init__ script ** -*********************************************************/ - -static PyObject *DateFromString = NULL; -PyObject * -set_callback_DateFromString(PyObject *dummy, PyObject *args) { - return set_callback(args, &DateFromString); -} - -static PyObject *DateTimeFromString = NULL; -PyObject * -set_callback_DateTimeFromString(PyObject *dummy, PyObject *args) { - return set_callback(args, &DateTimeFromString); -} - -//DERIVED FROM mx.DateTime -/* - Functions in the following section are borrowed from mx.DateTime version - 2.0.6, and hence this code is subject to the terms of the egenix public - license version 1.0.0 -*/ - -#define Py_AssertWithArg(x,errortype,errorstr,a1) {if (!(x)) {PyErr_Format(errortype,errorstr,a1);goto onError;}} -#define Py_Error(errortype,errorstr) {PyErr_SetString(errortype,errorstr);goto onError;} - - /* Error Exception objects */ -static PyObject *DateCalc_Error; -static PyObject *DateCalc_RangeError; - -#define GREGORIAN_CALENDAR 0 -#define JULIAN_CALENDAR 1 - -#define SECONDS_PER_DAY ((double) 86400.0) - -/* Table with day offsets for each month (0-based, without and with leap) */ -static int month_offset[2][13] = { - { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, - { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } -}; - -/* Table of number of days in a month (0-based, without and with leap) */ -static int days_in_month[2][12] = { - { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }, - { 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 } -}; - -struct date_info { - long absdate; - double abstime; - - double second; - int minute; - int hour; - int day; - int month; - int quarter; - int year; - int day_of_week; - int day_of_year; - int calendar; -}; - - -/* Return 1/0 iff year points to a leap year in calendar. */ -static -int dInfoCalc_Leapyear(register long year, - int calendar) -{ - if (calendar == GREGORIAN_CALENDAR) { - return (year % 4 == 0) && ((year % 100 != 0) || (year % 400 == 0)); - } else { - return (year % 4 == 0); - } -} - -static -int dInfoCalc_ISOWeek(struct date_info *dinfo) -{ - int week; - - /* Estimate */ - week = (dinfo->day_of_year-1) - dinfo->day_of_week + 3; - if (week >= 0) week = week / 7 + 1; - - /* Verify */ - if (week < 0) { - /* The day lies in last week of the previous year */ - if ((week > -2) || - (week == -2 && dInfoCalc_Leapyear(dinfo->year-1, dinfo->calendar))) - week = 53; - else - week = 52; - } else if (week == 53) { - /* Check if the week belongs to year or year+1 */ - if (31-dinfo->day + dinfo->day_of_week < 3) { - week = 1; - } - } - - return week; -} - - -/* Return the day of the week for the given absolute date. */ -static -int dInfoCalc_DayOfWeek(register long absdate) -{ - int day_of_week; - - if (absdate >= 1) { - day_of_week = (absdate - 1) % 7; - } else { - day_of_week = 6 - ((-absdate) % 7); - } - return day_of_week; -} - -/* Return the year offset, that is the absolute date of the day - 31.12.(year-1) in the given calendar. - - Note: - For the Julian calendar we shift the absdate (which is measured - using the Gregorian Epoch) value by two days because the Epoch - (0001-01-01) in the Julian calendar lies 2 days before the Epoch in - the Gregorian calendar. */ -static -int dInfoCalc_YearOffset(register long year, - int calendar) -{ - year--; - if (calendar == GREGORIAN_CALENDAR) { - if (year >= 0 || -1/4 == -1) - return year*365 + year/4 - year/100 + year/400; - else - return year*365 + (year-3)/4 - (year-99)/100 + (year-399)/400; - } - else if (calendar == JULIAN_CALENDAR) { - if (year >= 0 || -1/4 == -1) - return year*365 + year/4 - 2; - else - return year*365 + (year-3)/4 - 2; - } - Py_Error(DateCalc_Error, "unknown calendar"); - onError: - return -1; -} - - -/* Set the instance's value using the given date and time. calendar - may be set to the flags: GREGORIAN_CALENDAR, - JULIAN_CALENDAR to indicate the calendar to be used. */ - -static -int dInfoCalc_SetFromDateAndTime(struct date_info *dinfo, - int year, - int month, - int day, - int hour, - int minute, - double second, - int calendar) -{ - - /* Calculate the absolute date */ - { - int leap; - long yearoffset,absdate; - - /* Range check */ - Py_AssertWithArg(year > -(INT_MAX / 366) && year < (INT_MAX / 366), - DateCalc_RangeError, - "year out of range: %i", - year); - - /* Is it a leap year ? */ - leap = dInfoCalc_Leapyear(year,calendar); - - /* Negative month values indicate months relative to the years end */ - if (month < 0) month += 13; - Py_AssertWithArg(month >= 1 && month <= 12, - DateCalc_RangeError, - "month out of range (1-12): %i", - month); - - /* Negative values indicate days relative to the months end */ - if (day < 0) day += days_in_month[leap][month - 1] + 1; - Py_AssertWithArg(day >= 1 && day <= days_in_month[leap][month - 1], - DateCalc_RangeError, - "day out of range: %i", - day); - - yearoffset = dInfoCalc_YearOffset(year,calendar); - if (PyErr_Occurred()) goto onError; - - absdate = day + month_offset[leap][month - 1] + yearoffset; - - dinfo->absdate = absdate; - - dinfo->year = year; - dinfo->month = month; - dinfo->quarter = ((month-1)/3)+1; - dinfo->day = day; - - dinfo->day_of_week = dInfoCalc_DayOfWeek(absdate); - dinfo->day_of_year = (short)(absdate - yearoffset); - - dinfo->calendar = calendar; - } - - /* Calculate the absolute time */ - { - Py_AssertWithArg(hour >= 0 && hour <= 23, - DateCalc_RangeError, - "hour out of range (0-23): %i", - hour); - Py_AssertWithArg(minute >= 0 && minute <= 59, - DateCalc_RangeError, - "minute out of range (0-59): %i", - minute); - Py_AssertWithArg(second >= (double)0.0 && - (second < (double)60.0 || - (hour == 23 && minute == 59 && - second < (double)61.0)), - DateCalc_RangeError, - "second out of range (0.0 - <60.0; <61.0 for 23:59): %f", - second); - - dinfo->abstime = (double)(hour*3600 + minute*60) + second; - - dinfo->hour = hour; - dinfo->minute = minute; - dinfo->second = second; - } - return 0; - onError: - return -1; -} - -static int monthToQuarter(int month) { return ((month-1)/3)+1; } - -/* Sets the date part of the date_info struct using the indicated - calendar. - - XXX This could also be done using some integer arithmetics rather - than with this iterative approach... */ -static -int dInfoCalc_SetFromAbsDate(register struct date_info *dinfo, - long absdate, - int calendar) -{ - register long year; - long yearoffset; - int leap,dayoffset; - int *monthoffset; - - /* Approximate year */ - if (calendar == GREGORIAN_CALENDAR) { - year = (long)(((double)absdate) / 365.2425); - } else if (calendar == JULIAN_CALENDAR) { - year = (long)(((double)absdate) / 365.25); - } else { - Py_Error(DateCalc_Error, "unknown calendar"); - } - if (absdate > 0) year++; - - /* Apply corrections to reach the correct year */ - while (1) { - /* Calculate the year offset */ - yearoffset = dInfoCalc_YearOffset(year,calendar); - if (PyErr_Occurred()) - goto onError; - - /* Backward correction: absdate must be greater than the - yearoffset */ - if (yearoffset >= absdate) { - year--; - continue; - } - - dayoffset = absdate - yearoffset; - leap = dInfoCalc_Leapyear(year,calendar); - - /* Forward correction: non leap years only have 365 days */ - if (dayoffset > 365 && !leap) { - year++; - continue; - } - break; - } - - dinfo->year = year; - dinfo->calendar = calendar; - - /* Now iterate to find the month */ - monthoffset = month_offset[leap]; - { - register int month; - - for (month = 1; month < 13; month++) { - if (monthoffset[month] >= dayoffset) - break; - } - - dinfo->month = month; - dinfo->quarter = monthToQuarter(month); - dinfo->day = dayoffset - month_offset[leap][month-1]; - } - - - dinfo->day_of_week = dInfoCalc_DayOfWeek(absdate); - dinfo->day_of_year = dayoffset; - dinfo->absdate = absdate; - - return 0; - - onError: - return -1; -} - -/* Sets the time part of the DateTime object. */ -static -int dInfoCalc_SetFromAbsTime(struct date_info *dinfo, - double abstime) -{ - int inttime; - int hour,minute; - double second; - - inttime = (int)abstime; - hour = inttime / 3600; - minute = (inttime % 3600) / 60; - second = abstime - (double)(hour*3600 + minute*60); - - dinfo->hour = hour; - dinfo->minute = minute; - dinfo->second = second; - - dinfo->abstime = abstime; - - return 0; -} - -/* Set the instance's value using the given date and time. calendar - may be set to the flags: GREGORIAN_CALENDAR, JULIAN_CALENDAR to - indicate the calendar to be used. */ -static -int dInfoCalc_SetFromAbsDateTime(struct date_info *dinfo, - long absdate, - double abstime, - int calendar) -{ - - /* Bounds check */ - Py_AssertWithArg(abstime >= 0.0 && abstime <= SECONDS_PER_DAY, - DateCalc_Error, - "abstime out of range (0.0 - 86400.0): %f", - abstime); - - /* Calculate the date */ - if (dInfoCalc_SetFromAbsDate(dinfo, - absdate, - calendar)) - goto onError; - - /* Calculate the time */ - if (dInfoCalc_SetFromAbsTime(dinfo, - abstime)) - goto onError; - - return 0; - onError: - return -1; -} - -/* -==================================================== -== End of section borrowed from mx.DateTime == -==================================================== -*/ - - - - - -/////////////////////////////////////////////////////////////////////// - -// helpers for frequency conversion routines // - -static long DtoB_weekday(long fromDate) { return (((fromDate) / 7) * 5) + (fromDate)%7; } - -static long DtoB_WeekendToMonday(long absdate, int day_of_week) { - - if (day_of_week > 4) { - //change to Monday after weekend - absdate += (7 - day_of_week); - } - return DtoB_weekday(absdate); -} - -static long DtoB_WeekendToFriday(long absdate, int day_of_week) { - - if (day_of_week > 4) { - //change to friday before weekend - absdate -= (day_of_week - 4); - } - return DtoB_weekday(absdate); -} - -static long absdate_from_ymd(int y, int m, int d) { - struct date_info tempDate; - if (dInfoCalc_SetFromDateAndTime(&tempDate, y, m, d, 0, 0, 0, GREGORIAN_CALENDAR)) return INT_ERR_CODE; - return tempDate.absdate; -} - - -/////////////////////////////////////////////// - -// frequency specifc conversion routines -// each function must take an integer fromDate and a char relation ('B' or 'A' for 'BEFORE' or 'AFTER') - -//************ FROM DAILY *************** - -static long asfreq_DtoA(long fromDate, char relation, asfreq_info *af_info) { - - struct date_info dinfo; - if (dInfoCalc_SetFromAbsDate(&dinfo, fromDate, - GREGORIAN_CALENDAR)) return INT_ERR_CODE; - if (dinfo.month > af_info->to_a_year_end) { return (long)(dinfo.year + 1); } - else { return (long)(dinfo.year); } -} - -static long DtoQ_yq(long fromDate, asfreq_info *af_info, - int *year, int *quarter) { - struct date_info dinfo; - if (dInfoCalc_SetFromAbsDate(&dinfo, fromDate, - GREGORIAN_CALENDAR)) return INT_ERR_CODE; - if (af_info->to_q_year_end != 12) { - dinfo.month -= af_info->to_q_year_end; - if (dinfo.month <= 0) { dinfo.month += 12; } - else { dinfo.year += 1; } - dinfo.quarter = monthToQuarter(dinfo.month); - } - - *year = dinfo.year; - *quarter = dinfo.quarter; - - return 0; -} - - -static long asfreq_DtoQ(long fromDate, char relation, asfreq_info *af_info) { - - int year, quarter; - - if (DtoQ_yq(fromDate, af_info, &year, &quarter) == INT_ERR_CODE) - { return INT_ERR_CODE; } - - return (long)((year - 1) * 4 + quarter); -} - -static long asfreq_DtoM(long fromDate, char relation, asfreq_info *af_info) { - - struct date_info dinfo; - if (dInfoCalc_SetFromAbsDate(&dinfo, fromDate, - GREGORIAN_CALENDAR)) return INT_ERR_CODE; - return (long)((dinfo.year - 1) * 12 + dinfo.month); -} - -static long asfreq_DtoW(long fromDate, char relation, asfreq_info *af_info) { - return (fromDate - (1 + af_info->to_week_end))/7 + 1; -} - -static long asfreq_DtoB(long fromDate, char relation, asfreq_info *af_info) { - - struct date_info dinfo; - if (dInfoCalc_SetFromAbsDate(&dinfo, fromDate, - GREGORIAN_CALENDAR)) return INT_ERR_CODE; - - if (relation == 'B') { - return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); - } else { - return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); - } -} - -static long asfreq_DtoB_forConvert(long fromDate, char relation, asfreq_info *af_info) { - - struct date_info dinfo; - if (dInfoCalc_SetFromAbsDate(&dinfo, fromDate, - GREGORIAN_CALENDAR)) return INT_ERR_CODE; - - if (dinfo.day_of_week > 4) { - return -1; - } else { - return DtoB_weekday(fromDate); - } -} - -// needed for getDateInfo function -static long asfreq_DtoD(long fromDate, char relation, asfreq_info *af_info) { return fromDate; } - -static long asfreq_DtoHIGHFREQ(long fromDate, char relation, long periodsPerDay) { - if (fromDate >= HIGHFREQ_ORIG) { - if (relation == 'B') { return (fromDate - HIGHFREQ_ORIG)*(periodsPerDay) + 1; } - else { return (fromDate - HIGHFREQ_ORIG + 1)*(periodsPerDay); } - } else { return -1; } -} - -static long asfreq_DtoH(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoHIGHFREQ(fromDate, relation, 24); } -static long asfreq_DtoT(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoHIGHFREQ(fromDate, relation, 24*60); } -static long asfreq_DtoS(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoHIGHFREQ(fromDate, relation, 24*60*60); } - -//************ FROM SECONDLY *************** - -static long asfreq_StoD(long fromDate, char relation, asfreq_info *af_info) - { return (fromDate - 1)/(60*60*24) + HIGHFREQ_ORIG; } - -static long asfreq_StoA(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoA(asfreq_StoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } -static long asfreq_StoQ(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoQ(asfreq_StoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } -static long asfreq_StoM(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoM(asfreq_StoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } -static long asfreq_StoW(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoW(asfreq_StoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } -static long asfreq_StoB(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoB(asfreq_StoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } -static long asfreq_StoB_forConvert(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoB_forConvert(asfreq_StoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } -static long asfreq_StoT(long fromDate, char relation, asfreq_info *af_info) - { return (fromDate - 1)/60 + 1; } -static long asfreq_StoH(long fromDate, char relation, asfreq_info *af_info) - { return (fromDate - 1)/(60*60) + 1; } - -//************ FROM MINUTELY *************** - -static long asfreq_TtoD(long fromDate, char relation, asfreq_info *af_info) - { return (fromDate - 1)/(60*24) + HIGHFREQ_ORIG; } - -static long asfreq_TtoA(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoA(asfreq_TtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } -static long asfreq_TtoQ(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoQ(asfreq_TtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } -static long asfreq_TtoM(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoM(asfreq_TtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } -static long asfreq_TtoW(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoW(asfreq_TtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } -static long asfreq_TtoB(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoB(asfreq_TtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } - -static long asfreq_TtoB_forConvert(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoB_forConvert(asfreq_TtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } - -static long asfreq_TtoH(long fromDate, char relation, asfreq_info *af_info) - { return (fromDate - 1)/60 + 1; } -static long asfreq_TtoS(long fromDate, char relation, asfreq_info *af_info) { - if (relation == 'B') { return fromDate*60 - 59; } - else { return fromDate*60; }} - -//************ FROM HOURLY *************** - -static long asfreq_HtoD(long fromDate, char relation, asfreq_info *af_info) - { return (fromDate - 1)/24 + HIGHFREQ_ORIG; } -static long asfreq_HtoA(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoA(asfreq_HtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } -static long asfreq_HtoQ(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoQ(asfreq_HtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } -static long asfreq_HtoM(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoM(asfreq_HtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } -static long asfreq_HtoW(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoW(asfreq_HtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } -static long asfreq_HtoB(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoB(asfreq_HtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } - -static long asfreq_HtoB_forConvert(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoB_forConvert(asfreq_HtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } - -// calculation works out the same as TtoS, so we just call that function for HtoT -static long asfreq_HtoT(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_TtoS(fromDate, relation, &NULL_AF_INFO); } -static long asfreq_HtoS(long fromDate, char relation, asfreq_info *af_info) { - if (relation == 'B') { return fromDate*60*60 - 60*60 + 1; } - else { return fromDate*60*60; }} - -//************ FROM BUSINESS *************** - -static long asfreq_BtoD(long fromDate, char relation, asfreq_info *af_info) - { return ((fromDate-1)/5)*7 + (fromDate-1)%5 + 1; } - -static long asfreq_BtoA(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoA(asfreq_BtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } - -static long asfreq_BtoQ(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoQ(asfreq_BtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } - -static long asfreq_BtoM(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoM(asfreq_BtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } - -static long asfreq_BtoW(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoW(asfreq_BtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } - -static long asfreq_BtoH(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoH(asfreq_BtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } - -static long asfreq_BtoT(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoT(asfreq_BtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } - -static long asfreq_BtoS(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoS(asfreq_BtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } - -//************ FROM WEEKLY *************** - -static long asfreq_WtoD(long fromDate, char relation, asfreq_info *af_info) { - if (relation == 'B') { return fromDate * 7 - 6 + af_info->from_week_end;} - else { return fromDate * 7 + af_info->from_week_end; } -} - -static long asfreq_WtoA(long fromDate, char relation, asfreq_info *af_info) { - return asfreq_DtoA(asfreq_WtoD(fromDate, 'A', af_info), relation, af_info); } -static long asfreq_WtoQ(long fromDate, char relation, asfreq_info *af_info) { - return asfreq_DtoQ(asfreq_WtoD(fromDate, 'A', af_info), relation, af_info); } -static long asfreq_WtoM(long fromDate, char relation, asfreq_info *af_info) { - return asfreq_DtoM(asfreq_WtoD(fromDate, 'A', af_info), relation, &NULL_AF_INFO); } - -static long asfreq_WtoW(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoW(asfreq_WtoD(fromDate, relation, af_info), relation, af_info); } - -static long asfreq_WtoB(long fromDate, char relation, asfreq_info *af_info) { - - struct date_info dinfo; - if (dInfoCalc_SetFromAbsDate(&dinfo, asfreq_WtoD(fromDate, relation, af_info), - GREGORIAN_CALENDAR)) return INT_ERR_CODE; - - if (relation == 'B') { return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); } - else { return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); } -} - -static long asfreq_WtoH(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoH(asfreq_WtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } -static long asfreq_WtoT(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoT(asfreq_WtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } -static long asfreq_WtoS(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoS(asfreq_WtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } - -//************ FROM MONTHLY *************** - -static void MtoD_ym(long fromDate, long *y, long *m) { - *y = (fromDate - 1) / 12 + 1; - *m = fromDate - 12 * (*y) - 1; -} - -static long asfreq_MtoD(long fromDate, char relation, asfreq_info *af_info) { - - long y, m, absdate; - - if (relation == 'B') { - MtoD_ym(fromDate, &y, &m); - if ((absdate = absdate_from_ymd(y, m, 1)) == INT_ERR_CODE) return INT_ERR_CODE; - return absdate; - } else { - MtoD_ym(fromDate+1, &y, &m); - if ((absdate = absdate_from_ymd(y, m, 1)) == INT_ERR_CODE) return INT_ERR_CODE; - return absdate-1; - } -} - -static long asfreq_MtoA(long fromDate, char relation, asfreq_info *af_info) { - return asfreq_DtoA(asfreq_MtoD(fromDate, 'A', &NULL_AF_INFO), relation, af_info); } - -static long asfreq_MtoQ(long fromDate, char relation, asfreq_info *af_info) { - return asfreq_DtoQ(asfreq_MtoD(fromDate, 'A', &NULL_AF_INFO), relation, af_info); } - -static long asfreq_MtoW(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoW(asfreq_MtoD(fromDate, relation, &NULL_AF_INFO), relation, af_info); } - -static long asfreq_MtoB(long fromDate, char relation, asfreq_info *af_info) { - - struct date_info dinfo; - if (dInfoCalc_SetFromAbsDate(&dinfo, asfreq_MtoD(fromDate, relation, &NULL_AF_INFO), - GREGORIAN_CALENDAR)) return INT_ERR_CODE; - - if (relation == 'B') { return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); } - else { return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); } -} - -static long asfreq_MtoH(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoH(asfreq_MtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } -static long asfreq_MtoT(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoT(asfreq_MtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } -static long asfreq_MtoS(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoS(asfreq_MtoD(fromDate, relation, &NULL_AF_INFO), relation, &NULL_AF_INFO); } - -//************ FROM QUARTERLY *************** - -static void QtoD_ym(long fromDate, long *y, long *m, asfreq_info *af_info) { - - *y = (fromDate - 1) / 4 + 1; - *m = (fromDate + 4) * 3 - 12 * (*y) - 2; - - if (af_info->from_q_year_end != 12) { - *m += af_info->from_q_year_end; - if (*m > 12) { *m -= 12; } - else { *y -= 1; } - } -} - -static long asfreq_QtoD(long fromDate, char relation, asfreq_info *af_info) { - - long y, m, absdate; - - if (relation == 'B') { - QtoD_ym(fromDate, &y, &m, af_info); - if ((absdate = absdate_from_ymd(y, m, 1)) == INT_ERR_CODE) return INT_ERR_CODE; - return absdate; - } else { - QtoD_ym(fromDate+1, &y, &m, af_info); - if ((absdate = absdate_from_ymd(y, m, 1)) == INT_ERR_CODE) return INT_ERR_CODE; - return absdate - 1; - } -} - -static long asfreq_QtoQ(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoQ(asfreq_QtoD(fromDate, relation, af_info), relation, af_info); } - -static long asfreq_QtoA(long fromDate, char relation, asfreq_info *af_info) { - return asfreq_DtoA(asfreq_QtoD(fromDate, relation, af_info), relation, af_info); } - -static long asfreq_QtoM(long fromDate, char relation, asfreq_info *af_info) { - return asfreq_DtoM(asfreq_QtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } - -static long asfreq_QtoW(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoW(asfreq_QtoD(fromDate, relation, af_info), relation, af_info); } - -static long asfreq_QtoB(long fromDate, char relation, asfreq_info *af_info) { - - struct date_info dinfo; - if (dInfoCalc_SetFromAbsDate(&dinfo, asfreq_QtoD(fromDate, relation, af_info), - GREGORIAN_CALENDAR)) return INT_ERR_CODE; - - if (relation == 'B') { return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); } - else { return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); } -} - - -static long asfreq_QtoH(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoH(asfreq_QtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } -static long asfreq_QtoT(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoT(asfreq_QtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } -static long asfreq_QtoS(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoS(asfreq_QtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } - - -//************ FROM ANNUAL *************** - -static long asfreq_AtoD(long fromDate, char relation, asfreq_info *af_info) { - long absdate, year, final_adj; - int month = (af_info->from_a_year_end) % 12; - - if (month == 0) { month = 1; } - else { month += 1; } - - if (relation == 'B') { - if (af_info->from_a_year_end == 12) {year = fromDate;} - else {year = fromDate - 1;} - final_adj = 0; - } else { - if (af_info->from_a_year_end == 12) {year = fromDate+1;} - else {year = fromDate;} - final_adj = -1; - } - absdate = absdate_from_ymd(year, month, 1); - if (absdate == INT_ERR_CODE) return INT_ERR_CODE; - return absdate + final_adj; -} - -static long asfreq_AtoA(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoA(asfreq_AtoD(fromDate, relation, af_info), relation, af_info); } - -static long asfreq_AtoQ(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoQ(asfreq_AtoD(fromDate, relation, af_info), relation, af_info); } - -static long asfreq_AtoM(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoM(asfreq_AtoD(fromDate, relation, af_info), relation, af_info); } - -static long asfreq_AtoW(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoW(asfreq_AtoD(fromDate, relation, af_info), relation, af_info); } - -static long asfreq_AtoB(long fromDate, char relation, asfreq_info *af_info) { - - struct date_info dinfo; - if (dInfoCalc_SetFromAbsDate(&dinfo, asfreq_AtoD(fromDate, relation, af_info), - GREGORIAN_CALENDAR)) return INT_ERR_CODE; - - if (relation == 'B') { return DtoB_WeekendToMonday(dinfo.absdate, dinfo.day_of_week); } - else { return DtoB_WeekendToFriday(dinfo.absdate, dinfo.day_of_week); } -} - -static long asfreq_AtoH(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoH(asfreq_AtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } -static long asfreq_AtoT(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoT(asfreq_AtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } -static long asfreq_AtoS(long fromDate, char relation, asfreq_info *af_info) - { return asfreq_DtoS(asfreq_AtoD(fromDate, relation, af_info), relation, &NULL_AF_INFO); } - -static long nofunc(long fromDate, char relation, asfreq_info *af_info) { return -1; } - -// end of frequency specific conversion routines - -// return a pointer to appropriate conversion function -long (*get_asfreq_func(int fromFreq, int toFreq, int forConvert))(long, char, asfreq_info*) { - - int fromGroup = get_freq_group(fromFreq); - int toGroup = get_freq_group(toFreq); - - if (fromGroup == FR_UND) { fromGroup = FR_DAY; } - - switch(fromGroup) - { - case FR_ANN: - switch(toGroup) - { - case FR_ANN: return &asfreq_AtoA; - case FR_QTR: return &asfreq_AtoQ; - case FR_MTH: return &asfreq_AtoM; - case FR_WK: return &asfreq_AtoW; - case FR_BUS: return &asfreq_AtoB; - case FR_DAY: return &asfreq_AtoD; - case FR_HR: return &asfreq_AtoH; - case FR_MIN: return &asfreq_AtoT; - case FR_SEC: return &asfreq_AtoS; - default: return &nofunc; - } - - case FR_QTR: - switch(toGroup) - { - case FR_ANN: return &asfreq_QtoA; - case FR_QTR: return &asfreq_QtoQ; - case FR_MTH: return &asfreq_QtoM; - case FR_WK: return &asfreq_QtoW; - case FR_BUS: return &asfreq_QtoB; - case FR_DAY: return &asfreq_QtoD; - case FR_HR: return &asfreq_QtoH; - case FR_MIN: return &asfreq_QtoT; - case FR_SEC: return &asfreq_QtoS; - default: return &nofunc; - } - - case FR_MTH: - switch(toGroup) - { - case FR_ANN: return &asfreq_MtoA; - case FR_QTR: return &asfreq_MtoQ; - case FR_WK: return &asfreq_MtoW; - case FR_BUS: return &asfreq_MtoB; - case FR_DAY: return &asfreq_MtoD; - case FR_HR: return &asfreq_MtoH; - case FR_MIN: return &asfreq_MtoT; - case FR_SEC: return &asfreq_MtoS; - default: return &nofunc; - } - - case FR_WK: - switch(toGroup) - { - case FR_ANN: return &asfreq_WtoA; - case FR_QTR: return &asfreq_WtoQ; - case FR_MTH: return &asfreq_WtoM; - case FR_WK: return &asfreq_WtoW; - case FR_BUS: return &asfreq_WtoB; - case FR_DAY: return &asfreq_WtoD; - case FR_HR: return &asfreq_WtoH; - case FR_MIN: return &asfreq_WtoT; - case FR_SEC: return &asfreq_WtoS; - default: return &nofunc; - } - - case FR_BUS: - switch(toGroup) - { - case FR_ANN: return &asfreq_BtoA; - case FR_QTR: return &asfreq_BtoQ; - case FR_MTH: return &asfreq_BtoM; - case FR_WK: return &asfreq_BtoW; - case FR_DAY: return &asfreq_BtoD; - case FR_HR: return &asfreq_BtoH; - case FR_MIN: return &asfreq_BtoT; - case FR_SEC: return &asfreq_BtoS; - default: return &nofunc; - } - - case FR_DAY: - switch(toGroup) - { - case FR_ANN: return &asfreq_DtoA; - case FR_QTR: return &asfreq_DtoQ; - case FR_MTH: return &asfreq_DtoM; - case FR_WK: return &asfreq_DtoW; - case FR_BUS: - if (forConvert) { return &asfreq_DtoB_forConvert; } - else { return &asfreq_DtoB; } - case FR_DAY: return &asfreq_DtoD; - case FR_HR: return &asfreq_DtoH; - case FR_MIN: return &asfreq_DtoT; - case FR_SEC: return &asfreq_DtoS; - default: return &nofunc; - } - - case FR_HR: - switch(toGroup) - { - case FR_ANN: return &asfreq_HtoA; - case FR_QTR: return &asfreq_HtoQ; - case FR_MTH: return &asfreq_HtoM; - case FR_WK: return &asfreq_HtoW; - case FR_BUS: - if (forConvert) { return &asfreq_HtoB_forConvert; } - else { return &asfreq_HtoB; } - case FR_DAY: return &asfreq_HtoD; - case FR_MIN: return &asfreq_HtoT; - case FR_SEC: return &asfreq_HtoS; - default: return &nofunc; - } - - case FR_MIN: - switch(toGroup) - { - case FR_ANN: return &asfreq_TtoA; - case FR_QTR: return &asfreq_TtoQ; - case FR_MTH: return &asfreq_TtoM; - case FR_WK: return &asfreq_TtoW; - case FR_BUS: - if (forConvert) { return &asfreq_TtoB_forConvert; } - else { return &asfreq_TtoB; } - case FR_DAY: return &asfreq_TtoD; - case FR_HR: return &asfreq_TtoH; - case FR_SEC: return &asfreq_TtoS; - default: return &nofunc; - } - - case FR_SEC: - switch(toGroup) - { - case FR_ANN: return &asfreq_StoA; - case FR_QTR: return &asfreq_StoQ; - case FR_MTH: return &asfreq_StoM; - case FR_WK: return &asfreq_StoW; - case FR_BUS: - if (forConvert) { return &asfreq_StoB_forConvert; } - else { return &asfreq_StoB; } - case FR_DAY: return &asfreq_StoD; - case FR_HR: return &asfreq_StoH; - case FR_MIN: return &asfreq_StoT; - default: return &nofunc; - } - default: return &nofunc; - } -} - -static int calc_a_year_end(int freq, int group) { - int result = (freq - group) % 12; - if (result == 0) {return 12;} - else {return result;} -} - -static int calc_week_end(int freq, int group) { - return freq - group; -} - -void get_asfreq_info(int fromFreq, int toFreq, asfreq_info *af_info) { - - int fromGroup = get_freq_group(fromFreq); - int toGroup = get_freq_group(toFreq); - - switch(fromGroup) - { - case FR_WK: { - af_info->from_week_end = calc_week_end(fromFreq, fromGroup); - } break; - case FR_ANN: { - af_info->from_a_year_end = calc_a_year_end(fromFreq, fromGroup); - } break; - case FR_QTR: { - af_info->from_q_year_end = calc_a_year_end(fromFreq, fromGroup); - } break; - - } - - switch(toGroup) - { - case FR_WK: { - af_info->to_week_end = calc_week_end(toFreq, toGroup); - } break; - case FR_ANN: { - af_info->to_a_year_end = calc_a_year_end(toFreq, toGroup); - } break; - case FR_QTR: { - af_info->to_q_year_end = calc_a_year_end(toFreq, toGroup); - } break; - } - -} - -static double getAbsTime(int freq, long dailyDate, long originalDate) { - - long startOfDay, periodsPerDay; - - switch(freq) - { - case FR_HR: - periodsPerDay = 24; - break; - case FR_MIN: - periodsPerDay = 24*60; - break; - case FR_SEC: - periodsPerDay = 24*60*60; - break; - default: - return 24*60*60 - 1; - } - - startOfDay = asfreq_DtoHIGHFREQ(dailyDate, 'B', periodsPerDay); - return (24*60*60)*((double)(originalDate - startOfDay))/((double)periodsPerDay); -} - -/************************************************************ -** Date type definition -************************************************************/ - -typedef struct { - PyObject_HEAD - int freq; /* frequency of date */ - int value; /* integer representation of date */ - PyObject* cached_vals; -} DateObject; - -/* Forward declarations */ -static PyTypeObject DateType; -#define DateObject_Check(op) PyObject_TypeCheck(op, &DateType) - -static void -DateObject_dealloc(DateObject* self) { - Py_XDECREF(self->cached_vals); - self->ob_type->tp_free((PyObject*)self); -} - - -static PyObject *freq_dict, *freq_dict_rev, *freq_constants; - -#define DICT_SETINT_STRKEY(dict, key, val) \ - {PyObject *pyval = PyInt_FromLong(val); \ - PyDict_SetItemString(dict, key, pyval); \ - Py_DECREF(pyval); } - -#define ADD_FREQ_CONSTANT(const_name, val) \ - DICT_SETINT_STRKEY(freq_constants, const_name, val) - -#define INIT_FREQ(const_name, key, aliases) \ - {PyObject *pykey = PyInt_FromLong(key); \ - PyDict_SetItem(freq_dict, pykey, aliases); \ - PyDict_SetItemString(freq_constants, const_name, pykey); \ - Py_DECREF(pykey); \ - Py_DECREF(aliases); } - - -static int init_freq_group(int num_items, int num_roots, int base_const, - char item_abbrevs[][2][10], char group_prefixes[][15], - char item_const_names[][15]) { - - int i; - - for (i = 0; i < num_items; i++) { - - PyObject *aliases; - int j, size, k; - - if (i == 0) { k = 3; } else { k = 2; } - - size = num_roots * k; - - aliases = PyTuple_New(size); - - for (j = 0; j < num_roots; j++) { - PyObject *alias_v1, *alias_v2; - char *root, *alt; - - if ((root = malloc((30) * sizeof(char))) == NULL) return INT_ERR_CODE; - if ((alt = malloc((30) * sizeof(char))) == NULL) return INT_ERR_CODE; - - strcpy(root, group_prefixes[j]); - strcpy(alt, group_prefixes[j]); - - if (i == 0) { - PyObject *alias = PyString_FromString(root); - PyTuple_SET_ITEM(aliases, j*k + 2, alias); - } - - strcat(root, "-"); - strcat(root, item_abbrevs[i][0]); - strcat(alt, "-"); - strcat(alt, item_abbrevs[i][1]); - - alias_v1 = PyString_FromString(root); - alias_v2 = PyString_FromString(alt); - - free(root); - free(alt); - - PyTuple_SET_ITEM(aliases, j*k, alias_v1); - PyTuple_SET_ITEM(aliases, j*k + 1, alias_v2); - } - - INIT_FREQ(item_const_names[i], base_const+i, aliases); - } - - return 0; -} - -/* take a dictionary with integer keys and tuples of strings for values, - and populate a dictionary with all the strings as keys and integers - for values */ -static int reverse_dict(PyObject *source, PyObject *dest) { - - PyObject *key, *value; - - Py_ssize_t pos = 0; - - while (PyDict_Next(source, &pos, &key, &value)) { - PyObject *tuple_iter; - PyObject *item; - - if((tuple_iter = PyObject_GetIter(value)) == NULL) return INT_ERR_CODE; - - while ((item = PyIter_Next(tuple_iter)) != NULL) { - PyDict_SetItem(dest, item, key); - Py_DECREF(item); - } - Py_DECREF(tuple_iter); - } - return 0; -} - -static int build_freq_dict(void) { - - char ANN_prefixes[8][15] = { "A", "Y", "ANN", "ANNUAL", "ANNUALLY", - "YR", "YEAR", "YEARLY" }; - - char QTRE_prefixes[8][15] = { "Q", "QTR", "QUARTER", "QUARTERLY", "Q-E", - "QTR-E", "QUARTER-E", "QUARTERLY-E"}; - char QTRS_prefixes[4][15] = { "Q-S", "QTR-S", "QUARTER-S", "QUARTERLY-S" }; - - char WK_prefixes[4][15] = { "W", "WK", "WEEK", "WEEKLY" }; - - /* Note: order of this array must match up with how the Annual - frequency constants are lined up */ - char month_names[12][2][10] = { - { "DEC", "DECEMBER" }, - { "JAN", "JANUARY" }, - { "FEB", "FEBRUARY" }, - { "MAR", "MARCH" }, - { "APR", "APRIL" }, - { "MAY", "MAY" }, - { "JUN", "JUNE" }, - { "JUL", "JULY" }, - { "AUG", "AUGUST" }, - { "SEP", "SEPTEMBER" }, - { "OCT", "OCTOBER" }, - { "NOV", "NOVEMBER" }}; - - char day_names[7][2][10] = { - { "SUN", "SUNDAY" }, - { "MON", "MONDAY" }, - { "TUE", "TUESDAY" }, - { "WED", "WEDNESDAY" }, - { "THU", "THURSDAY" }, - { "FRI", "FRIDAY" }, - { "SAT", "SATURDAY" }}; - - char ANN_const_names[12][15] = { - "FR_ANNDEC", - "FR_ANNJAN", - "FR_ANNFEB", - "FR_ANNMAR", - "FR_ANNAPR", - "FR_ANNMAY", - "FR_ANNJUN", - "FR_ANNJUL", - "FR_ANNAUG", - "FR_ANNSEP", - "FR_ANNOCT", - "FR_ANNNOV"}; - - char QTRE_const_names[12][15] = { - "FR_QTREDEC", - "FR_QTREJAN", - "FR_QTREFEB", - "FR_QTREMAR", - "FR_QTREAPR", - "FR_QTREMAY", - "FR_QTREJUN", - "FR_QTREJUL", - "FR_QTREAUG", - "FR_QTRESEP", - "FR_QTREOCT", - "FR_QTRENOV"}; - - char QTRS_const_names[12][15] = { - "FR_QTRSDEC", - "FR_QTRSJAN", - "FR_QTRSFEB", - "FR_QTRSMAR", - "FR_QTRSAPR", - "FR_QTRSMAY", - "FR_QTRSJUN", - "FR_QTRSJUL", - "FR_QTRSAUG", - "FR_QTRSSEP", - "FR_QTRSOCT", - "FR_QTRSNOV"}; - - char WK_const_names[7][15] = { - "FR_WKSUN", - "FR_WKMON", - "FR_WKTUE", - "FR_WKWED", - "FR_WKTHU", - "FR_WKFRI", - "FR_WKSAT"}; - - PyObject *aliases; - - freq_dict = PyDict_New(); - freq_dict_rev = PyDict_New(); - freq_constants = PyDict_New(); - - aliases = Py_BuildValue("(ssss)", "M", "MTH", "MONTH", "MONTHLY"); - INIT_FREQ("FR_MTH", FR_MTH, aliases); - - aliases = Py_BuildValue("(ssss)", "B", "BUS", "BUSINESS", "BUSINESSLY"); - INIT_FREQ("FR_BUS", FR_BUS, aliases); - - aliases = Py_BuildValue("(ssss)", "D", "DAY", "DLY", "DAILY"); - INIT_FREQ("FR_DAY", FR_DAY, aliases); - - aliases = Py_BuildValue("(sssss)", "H", "HR", "HOUR", "HRLY", "HOURLY"); - INIT_FREQ("FR_HR", FR_HR, aliases); - - aliases = Py_BuildValue("(ssss)", "T", "MIN", "MINUTE", "MINUTELY"); - INIT_FREQ("FR_MIN", FR_MIN, aliases); - - aliases = Py_BuildValue("(ssss)", "S", "SEC", "SECOND", "SECONDLY"); - INIT_FREQ("FR_SEC", FR_SEC, aliases); - - aliases = Py_BuildValue("(ssss)", "U", "UND", "UNDEF", "UNDEFINED"); - INIT_FREQ("FR_UND", FR_UND, aliases); - - ADD_FREQ_CONSTANT("FR_ANN", FR_ANN); - - if(init_freq_group(12, 8, FR_ANN, - month_names, ANN_prefixes, ANN_const_names) == INT_ERR_CODE) { - return INT_ERR_CODE; - } - - ADD_FREQ_CONSTANT("FR_QTR", FR_QTR); - - if(init_freq_group(12, 8, FR_QTREDEC, - month_names, QTRE_prefixes, QTRE_const_names) == INT_ERR_CODE) { - return INT_ERR_CODE; - } - - if(init_freq_group(12, 4, FR_QTRSDEC, - month_names, QTRS_prefixes, QTRS_const_names) == INT_ERR_CODE) { - return INT_ERR_CODE; - } - - ADD_FREQ_CONSTANT("FR_WK", FR_WK); - - if(init_freq_group(7, 4, FR_WK, - day_names, WK_prefixes, WK_const_names) == INT_ERR_CODE) { - return INT_ERR_CODE; - } - - if(reverse_dict(freq_dict, freq_dict_rev) == INT_ERR_CODE) { - return INT_ERR_CODE; - } - - return 0; -} - - -/* take user specified frequency and convert to int representation - of the frequency */ -int check_freq(PyObject *freq_spec) { - - if (PyInt_Check(freq_spec)) { - return (int)PyInt_AsLong(freq_spec); - } else if (PyString_Check(freq_spec)) { - char *freq_str, *freq_str_uc; - PyObject *freq_val; - - freq_str = PyString_AsString(freq_spec); - if((freq_str_uc = str_uppercase(freq_str)) == NULL) {return INT_ERR_CODE;} - - freq_val = PyDict_GetItemString(freq_dict_rev, freq_str_uc); - - free(freq_str_uc); - - if (freq_val == NULL) { - PyErr_SetString(PyExc_ValueError, "invalid frequency specification"); - return INT_ERR_CODE; - } else { - int ret_val = (int)PyInt_AsLong(freq_val); - return ret_val; - } - } else if (freq_spec == Py_None) { - return FR_UND; - } else { - int retval = (int)PyInt_AsLong(freq_spec); - if (PyErr_Occurred()) { - PyErr_SetString(PyExc_ValueError, "invalid frequency specification"); - return INT_ERR_CODE; - } else { return retval; } - } - -} - -static PyObject * -DateObject_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { - - DateObject *self; - - self = (DateObject*)type->tp_alloc(type, 0); - if (self != NULL) { - // initialize attributes that need initializing in here - self->freq = FR_UND; - self->value = -1; - } - - return (PyObject *)self; -} - -/* for use in C code */ -static DateObject * -DateObject_New(void) { - PyObject *dummy; - return (DateObject*)DateObject_new(&DateType, dummy, dummy); -} - -#define INIT_ERR(errortype, errmsg) PyErr_SetString(errortype,errmsg);return -1 - -static int -DateObject_init(DateObject *self, PyObject *args, PyObject *kwds) { - - PyObject *freq=NULL, *value=NULL, *datetime=NULL, *string=NULL; - char *INSUFFICIENT_MSG = "insufficient parameters to initialize Date"; - - int def_info=INT_ERR_CODE; - - int year=def_info, month=def_info, day=def_info, quarter=def_info, - hour=def_info, minute=def_info, second=def_info; - - int free_dt=0; - - static char *kwlist[] = {"freq", "value", "string", - "year", "month", "day", "quarter", - "hour", "minute", "second", - "datetime", NULL}; - - if (! PyArg_ParseTupleAndKeywords(args, kwds, "O|OOiiiiiiiO", kwlist, - &freq, &value, &string, - &year, &month, &day, &quarter, - &hour, &minute, &second, - &datetime)) { - return -1; - } - - if (PyObject_HasAttrString(freq, "freq")) { - PyObject *freq_attr = PyObject_GetAttrString(freq, "freq"); - self->freq = PyInt_AS_LONG(freq_attr); - Py_DECREF(freq_attr); - } else { - if((self->freq = check_freq(freq)) == INT_ERR_CODE) return -1; - } - - if ((value && PyString_Check(value)) || string) { - - PyObject *string_arg = PyTuple_New(1); - int freq_group = get_freq_group(self->freq); - - free_dt = 1; - - if (!string) { - string = value; - } - - PyTuple_SET_ITEM(string_arg, 0, string); - Py_INCREF(string); - - if (freq_group == FR_HR || - freq_group == FR_MIN || - freq_group == FR_SEC) - { datetime = PyEval_CallObject(DateTimeFromString, string_arg); } - else { datetime = PyEval_CallObject(DateFromString, string_arg); } - - Py_DECREF(string_arg); - - value = NULL; - } - - if (value) { - self->value = PyInt_AsLong(value); - } else { - - int freq_group = get_freq_group(self->freq); - - if (datetime) { - year=PyDateTime_GET_YEAR(datetime); - month=PyDateTime_GET_MONTH(datetime); - day=PyDateTime_GET_DAY(datetime); - hour=PyDateTime_DATE_GET_HOUR(datetime); - minute=PyDateTime_DATE_GET_MINUTE(datetime); - second=PyDateTime_DATE_GET_SECOND(datetime); - } - - if (!datetime) { - - // First, some basic checks..... - if (year == def_info) { - INIT_ERR(PyExc_ValueError, INSUFFICIENT_MSG); - } - if (self->freq == FR_BUS || - self->freq == FR_DAY || - self->freq == FR_WK || - self->freq == FR_UND) { - if (month == def_info || day == def_info) { - INIT_ERR(PyExc_ValueError, INSUFFICIENT_MSG); - } - - // if FR_BUS, check for week day - - } else if (self->freq == FR_MTH) { - if (month == def_info) { - INIT_ERR(PyExc_ValueError, INSUFFICIENT_MSG); - } - } else if (freq_group == FR_QTR) { - if (quarter == def_info) { - INIT_ERR(PyExc_ValueError, INSUFFICIENT_MSG); - } - } else if (self->freq == FR_SEC) { - if (month == def_info || - day == def_info || - second == def_info) { - INIT_ERR(PyExc_ValueError, INSUFFICIENT_MSG); - } - if (hour == def_info) { - hour = second/3600; - minute = (second % 3600)/60; - second = second % 60; - } else if (minute == def_info) { - INIT_ERR(PyExc_ValueError, INSUFFICIENT_MSG); - } - } else if (self->freq == FR_MIN) { - if (month == def_info || - day == def_info || - minute == def_info) { - INIT_ERR(PyExc_ValueError, INSUFFICIENT_MSG); - } - if (hour == def_info) { - hour = minute/60; - minute = minute % 60; - } - } else if (self->freq == FR_HR) { - if (month == def_info || - day == def_info || - hour == def_info) { - INIT_ERR(PyExc_ValueError, INSUFFICIENT_MSG); - } - } - - } - - if (self->freq == FR_SEC) { - long absdays, delta; - absdays = absdate_from_ymd(year, month, day); - delta = (absdays - HIGHFREQ_ORIG); - self->value = (int)(delta*86400 + hour*3600 + minute*60 + second + 1); - } else if (self->freq == FR_MIN) { - long absdays, delta; - absdays = absdate_from_ymd(year, month, day); - delta = (absdays - HIGHFREQ_ORIG); - self->value = (int)(delta*1440 + hour*60 + minute + 1); - } else if (self->freq == FR_HR) { - long absdays, delta; - if((absdays = absdate_from_ymd(year, month, day)) == INT_ERR_CODE) return -1; - delta = (absdays - HIGHFREQ_ORIG); - self->value = (int)(delta*24 + hour + 1); - } else if (self->freq == FR_DAY) { - if((self->value = (int)absdate_from_ymd(year, month, day)) == INT_ERR_CODE) return -1; - } else if (self->freq == FR_UND) { - if((self->value = (int)absdate_from_ymd(year, month, day)) == INT_ERR_CODE) return -1; - } else if (self->freq == FR_BUS) { - long weeks, days; - if((days = absdate_from_ymd(year, month, day)) == INT_ERR_CODE) return -1; - weeks = days/7; - self->value = (int)(days - weeks*2); - } else if (freq_group == FR_WK) { - int adj_ordinal, ordinal, day_adj; - if((ordinal = (int)absdate_from_ymd(year, month, day)) == INT_ERR_CODE) return -1; - day_adj = (7 - (self->freq - FR_WK)) % 7; - adj_ordinal = ordinal + ((7 - day_adj) - ordinal % 7) % 7; - self->value = adj_ordinal/7; - } else if (self->freq == FR_MTH) { - self->value = (year-1)*12 + month; - } else if (freq_group == FR_QTR) { - if ((self->freq - freq_group) > 12) { - // quarterly frequency with year determined by ending period - self->value = year*4 + quarter; - } else { - /* quarterly frequency with year determined by ending period - or has December year end*/ - self->value = (year-1)*4 + quarter; - } - } else if (freq_group == FR_ANN) { - self->value = year; - } - - } - - if (free_dt) { Py_DECREF(datetime); } - - return 0; -} - -static PyMemberDef DateObject_members[] = { - {"freq", T_INT, offsetof(DateObject, freq), 0, - "frequency"}, - {"value", T_INT, offsetof(DateObject, value), 0, - "integer representation of the Date"}, - {NULL} /* Sentinel */ -}; - -static char DateObject_toordinal_doc[] = -"Return the proleptic Gregorian ordinal of the date, where January 1 of\n" -"year 1 has ordinal 1"; -static PyObject * -DateObject_toordinal(DateObject* self) -{ - if (self->freq == FR_DAY) { - return PyInt_FromLong(self->value); - } else { - long (*toDaily)(long, char, asfreq_info*) = NULL; - asfreq_info af_info; - - toDaily = get_asfreq_func(self->freq, FR_DAY, 0); - get_asfreq_info(self->freq, FR_DAY, &af_info); - - return PyInt_FromLong(toDaily(self->value, 'A', &af_info)); - } -} - -static char DateObject_asfreq_doc[] = -"Returns a date converted to a specified frequency.\n\n" -":Parameters:\n" -" - freq : string/int\n" -" Frequency to convert the Date to. Accepts any valid frequency\n" -" specification (string or integer)\n" -" - relation :string *['After']*\n" -" Applies only when converting a lower frequency Date to a higher\n" -" frequency Date, or when converting a weekend Date to a business\n" -" frequency Date. Valid values are 'before', 'after', 'b', and 'a'."; -static PyObject * -DateObject_asfreq(DateObject *self, PyObject *args, PyObject *kwds) -{ - - PyObject *freq=NULL; - char *relation_raw=NULL; - char *relation_uc; - char relation; - int invalid_relation=0; - int toFreq; - int result_val; - DateObject *result = DateObject_New(); - - static char *kwlist[] = {"freq", "relation", NULL}; - - long (*asfreq_func)(long, char, asfreq_info*) = NULL; - asfreq_info af_info; - - if (! PyArg_ParseTupleAndKeywords(args, kwds, "O|s", kwlist, - &freq, &relation_raw)) return NULL; - - if(relation_raw) { - if (strlen(relation_raw) > 0) { - if((relation_uc = str_uppercase(relation_raw)) == NULL) - {return PyErr_NoMemory();} - - if (strcmp(relation_uc, "BEFORE") == 0 || - strcmp(relation_uc, "B") == 0 || - strcmp(relation_uc, "AFTER") == 0 || - strcmp(relation_uc, "A") == 0) { - if(relation_uc[0] == 'A') { relation = 'A'; } - else { relation = 'B'; } - - } else { invalid_relation=1; } - - free(relation_uc); - - } else { - invalid_relation=1; - } - - if (invalid_relation) { - PyErr_SetString(PyExc_ValueError,"Invalid relation specification"); - return NULL; - } - } else { - relation = 'A'; - } - - if ((toFreq = check_freq(freq)) == INT_ERR_CODE) return NULL; - - get_asfreq_info(self->freq, toFreq, &af_info); - asfreq_func = get_asfreq_func(self->freq, toFreq, 0); - - result_val = asfreq_func(self->value, relation, &af_info); - - if (result_val == INT_ERR_CODE) return NULL; - - result->freq = toFreq; - result->value = result_val; - - return (PyObject*)result; - -} - -static char DateObject_strfmt_doc[] = -"Returns string representation of Date object according to format specified.\n\n" -":Parameters:\n" -" - fmt : string\n" -" Formatting string. Uses the same directives as in the time.strftime\n" -" function in the standard Python time module. In addition, a few other\n" -" directives are supported:\n" -" %q - the 'quarter' of the date\n" -" %f - Year without century as a decimal number [00,99]. The\n" -" 'year' in this case is the year of the date determined by\n" -" the year for the current quarter. This is the same as %y\n" -" unless the Date is one of the 'qtr-s' frequencies\n" -" %F - Year with century as a decimal number. The 'year' in this\n" -" case is the year of the date determined by the year for\n" -" the current quarter. This is the same as %Y unless the\n" -" Date is one of the 'qtr-s' frequencies\n"; -static PyObject * -DateObject_strfmt(DateObject *self, PyObject *args) -{ - - char *orig_fmt_str, *fmt_str; - char *result; - - int num_extra_fmts = 3; - - char extra_fmts[3][2][10] = {{"%q", "^`AB`^"}, - {"%f", "^`CD`^"}, - {"%F", "^`EF`^"}}; - - int extra_fmts_found[3] = {0,0,0}; - int extra_fmts_found_one = 0; - struct tm c_date; - struct date_info tempDate; - long absdate; - double abstime; - int i, result_len; - PyObject *py_result; - - long (*toDaily)(long, char, asfreq_info*) = NULL; - asfreq_info af_info; - - if (!PyArg_ParseTuple(args, "s:strfmt(fmt)", &orig_fmt_str)) return NULL; - - toDaily = get_asfreq_func(self->freq, FR_DAY, 0); - get_asfreq_info(self->freq, FR_DAY, &af_info); - - absdate = toDaily(self->value, 'A', &af_info); - abstime = getAbsTime(self->freq, absdate, self->value); - - if(dInfoCalc_SetFromAbsDateTime(&tempDate, absdate, abstime, - GREGORIAN_CALENDAR)) return NULL; - - // populate standard C date struct with info from our date_info struct - c_date.tm_sec = (int)tempDate.second; - c_date.tm_min = tempDate.minute; - c_date.tm_hour = tempDate.hour; - c_date.tm_mday = tempDate.day; - c_date.tm_mon = tempDate.month - 1; - c_date.tm_year = tempDate.year - 1900; - c_date.tm_wday = tempDate.day_of_week; - c_date.tm_yday = tempDate.day_of_year; - c_date.tm_isdst = -1; - - result_len = strlen(orig_fmt_str) + 50; - if ((result = malloc(result_len * sizeof(char))) == NULL) {return PyErr_NoMemory();} - - fmt_str = orig_fmt_str; - - // replace any special format characters with their place holder - for(i=0; i < num_extra_fmts; i++) { - char *special_loc; - if ((special_loc = strstr(fmt_str,extra_fmts[i][0])) != NULL) { - char *tmp_str = fmt_str; - fmt_str = str_replace(fmt_str, extra_fmts[i][0], - extra_fmts[i][1]); - /* only free the previous loop value if this is not the first - special format string found */ - if (extra_fmts_found_one) { free(tmp_str); } - - if (fmt_str == NULL) {return NULL;} - - extra_fmts_found[i] = 1; - extra_fmts_found_one = 1; - } - } - - strftime(result, result_len, fmt_str, &c_date); - if (extra_fmts_found_one) { free(fmt_str); } - - // replace any place holders with the appropriate value - for(i=0; i < num_extra_fmts; i++) { - if (extra_fmts_found[i]) { - char *tmp_str = result; - char *extra_str; - - if (strcmp(extra_fmts[i][0], "%q") == 0 || - strcmp(extra_fmts[i][0], "%f") == 0 || - strcmp(extra_fmts[i][0], "%F") == 0) { - - asfreq_info af_info; - int qtr_freq, year, quarter, year_len; - - if (get_freq_group(self->freq) == FR_QTR) { - qtr_freq = self->freq; - } else { qtr_freq = FR_QTR; } - get_asfreq_info(FR_DAY, qtr_freq, &af_info); - - if(DtoQ_yq(absdate, &af_info, &year, &quarter) == INT_ERR_CODE) - { return NULL; } - - if(strcmp(extra_fmts[i][0], "%q") == 0) { - if ((extra_str = malloc(2 * sizeof(char))) == NULL) { - free(tmp_str); - return PyErr_NoMemory(); - } - sprintf(extra_str, "%i", quarter); - } else { - if ((qtr_freq % 1000) > 12) { year -= 1; } - - if (strcmp(extra_fmts[i][0], "%f") == 0) { - year_len = 2; - year = year % 100; - } else { year_len = 4; } - - if ((extra_str = malloc((year_len+1) * sizeof(char))) == NULL) { - free(tmp_str); - return PyErr_NoMemory(); - } - - if (year_len == 2 && year < 10) { - sprintf(extra_str, "0%i", year); - } else { sprintf(extra_str, "%i", year); } - } - - } else { - PyErr_SetString(PyExc_RuntimeError,"Unrecogized fmt string"); - return NULL; - } - - result = str_replace(result, extra_fmts[i][1], extra_str); - free(tmp_str); - free(extra_str); - if (result == NULL) { return NULL; } - } - } - - py_result = PyString_FromString(result); - free(result); - - return py_result; -} - -static PyObject * -DateObject___str__(DateObject* self) -{ - - int freq_group = get_freq_group(self->freq); - PyObject *string_arg, *retval; - - string_arg = NULL; - if (freq_group == FR_ANN) { string_arg = Py_BuildValue("(s)", "%Y"); } - else if (freq_group == FR_QTR) { string_arg = Py_BuildValue("(s)", "%FQ%q"); } - else if (freq_group == FR_MTH) { string_arg = Py_BuildValue("(s)", "%b-%Y"); } - else if (freq_group == FR_DAY || - freq_group == FR_BUS || - freq_group == FR_WK || - freq_group == FR_UND) { string_arg = Py_BuildValue("(s)", "%d-%b-%Y"); } - else if (freq_group == FR_HR) { string_arg = Py_BuildValue("(s)", "%d-%b-%Y %H:00"); } - else if (freq_group == FR_MIN) { string_arg = Py_BuildValue("(s)", "%d-%b-%Y %H:%M"); } - else if (freq_group == FR_SEC) { string_arg = Py_BuildValue("(s)", "%d-%b-%Y %H:%M:%S"); } - - if (string_arg == NULL) { return NULL; } - - retval = DateObject_strfmt(self, string_arg); - Py_DECREF(string_arg); - - return retval; -} - -static PyObject * -DateObject_freqstr(DateObject *self, void *closure) { - PyObject *key = PyInt_FromLong(self->freq); - PyObject *freq_aliases = PyDict_GetItem(freq_dict, key); - PyObject *main_alias = PyTuple_GET_ITEM(freq_aliases, 0); - Py_DECREF(key); - Py_INCREF(main_alias); - return main_alias; -} - - -static PyObject * -DateObject___repr__(DateObject* self) -{ - PyObject *py_str_rep, *py_freqstr, *py_repr; - char *str_rep, *freqstr, *repr; - int repr_len; - - py_str_rep = DateObject___str__(self); - if (py_str_rep == NULL) { return NULL; } - - py_freqstr = DateObject_freqstr(self, NULL); - - str_rep = PyString_AsString(py_str_rep); - freqstr = PyString_AsString(py_freqstr); - - repr_len = strlen(str_rep) + strlen(freqstr) + 6; - - if((repr = malloc((repr_len + 1) * sizeof(char))) == NULL) - { return PyErr_NoMemory(); } - - strcpy(repr, "<"); - strcat(repr, freqstr); - strcat(repr, " : "); - strcat(repr, str_rep); - strcat(repr, ">"); - - py_repr = PyString_FromString(repr); - - Py_DECREF(py_str_rep); - Py_DECREF(py_freqstr); - - free(repr); - - return py_repr; -} - -/****************************** - These methods seem rather useless. May or may not implement them. -fromordinal(self, ordinal): - return Date(self.freq, datetime=dt.datetime.fromordinal(ordinal)) -tostring(self): - return str(self) -toobject(self): - return self -isvalid(self): - return True -*******************************/ - - -static DateObject * -DateObject_FromFreqAndValue(int freq, int value) { - - DateObject *result = DateObject_New(); - - PyObject *args = PyTuple_New(0); - PyObject *kw = PyDict_New(); - PyObject *py_freq = PyInt_FromLong(freq); - PyObject *py_value = PyInt_FromLong(value); - - PyDict_SetItemString(kw, "freq", py_freq); - PyDict_SetItemString(kw, "value", py_value); - - Py_DECREF(py_freq); - Py_DECREF(py_value); - - DateObject_init(result, args, kw); - - Py_DECREF(args); - Py_DECREF(kw); - - return result; -} - -static PyObject * -DateObject_date_plus_int(PyObject *date, PyObject *pyint) { - DateObject *dateobj = (DateObject*)date; - if (DateObject_Check(pyint)) { - PyErr_SetString(PyExc_TypeError, "Cannot add two Date objects"); - return NULL; - } - - return (PyObject*)DateObject_FromFreqAndValue(dateobj->freq, PyInt_AsLong(pyint) + dateobj->value); -} - -static PyObject * -DateObject___add__(PyObject *left, PyObject *right) -{ - if (DateObject_Check(left)) { - return DateObject_date_plus_int(left, right); - } else { - return DateObject_date_plus_int(right, left); - } -} - -static PyObject * -DateObject___subtract__(PyObject *left, PyObject *right) -{ - int result; - DateObject *dleft; - if (!DateObject_Check(left)) { - PyErr_SetString(PyExc_ValueError, "Cannot subtract Date from non-Date value"); - return NULL; - } - - dleft = (DateObject*)left; - - if (DateObject_Check(right)) { - DateObject *dright = (DateObject*)right; - if (dleft->freq != dright->freq) { - PyErr_SetString(PyExc_ValueError, "Cannot subtract Dates with different frequency"); - return NULL; - } - result = dleft->value - dright->value; - return PyInt_FromLong(result); - } else { - result = dleft->value - PyInt_AsLong(right); - return (PyObject*)DateObject_FromFreqAndValue(dleft->freq, result); - } -} - -static int -DateObject___compare__(DateObject * obj1, DateObject * obj2) -{ - if (obj1->freq != obj2->freq) { - PyErr_SetString(PyExc_ValueError, - "Cannot compare dates with different frequency"); - return -1; - } - - if (obj1->value < obj2->value) return -1; - if (obj1->value > obj2->value) return 1; - if (obj1->value == obj2->value) return 0; - return -1; -} - -static long -DateObject___hash__(DateObject *self) -{ - register int freq_group = get_freq_group(self->freq); - - /* within a given frequency, hash values are guaranteed to be unique - for different dates. For different frequencies, we make a reasonable - effort to ensure hash values will be unique, but it is not guaranteed */ - if (freq_group == FR_BUS) { - return self->value + 10000000; - } else if (freq_group == FR_WK) { - return self->value + 100000000; - } else { return self->value; } -} - -static PyObject * -DateObject___int__(DateObject *self) -{ - return PyInt_FromLong(self->value); -} - -static PyObject * -DateObject___float__(DateObject *self) -{ - return PyFloat_FromDouble((double)(self->value)); -} - -/*************************************************** - ====== Date Properties ====== -****************************************************/ - -// helper function for date property funcs -static int -DateObject_set_date_info(DateObject *self, struct date_info *dinfo) { - PyObject *daily_obj = DateObject_toordinal(self); - long absdate = PyInt_AsLong(daily_obj); - - Py_DECREF(daily_obj); - - if(dInfoCalc_SetFromAbsDate(dinfo, absdate, - GREGORIAN_CALENDAR)) return -1; - - return 0; -} - -// helper function for date property funcs -static int -DateObject_set_date_info_wtime(DateObject *self, struct date_info *dinfo) { - PyObject *daily_obj = DateObject_toordinal(self); - long absdate = PyInt_AsLong(daily_obj); - double abstime; - - Py_DECREF(daily_obj); - - abstime = getAbsTime(self->freq, absdate, self->value); - - if(dInfoCalc_SetFromAbsDateTime(dinfo, absdate, abstime, - GREGORIAN_CALENDAR)) return -1; - - return 0; -} - -static PyObject * -DateObject_year(DateObject *self, void *closure) { - struct date_info dinfo; - if(DateObject_set_date_info(self, &dinfo) == -1) return NULL; - return PyInt_FromLong(dinfo.year); -} - -static int _DateObject_quarter_year(DateObject *self, int *year, int *quarter) { - - PyObject *daily_obj; - long absdate; - - asfreq_info af_info; - int qtr_freq; - - daily_obj = DateObject_toordinal(self); - absdate = PyInt_AsLong(daily_obj); - Py_DECREF(daily_obj); - - if (get_freq_group(self->freq) == FR_QTR) { - qtr_freq = self->freq; - } else { qtr_freq = FR_QTR; } - get_asfreq_info(FR_DAY, qtr_freq, &af_info); - - if(DtoQ_yq(absdate, &af_info, year, quarter) == INT_ERR_CODE) - { return INT_ERR_CODE; } - - if ((qtr_freq % 1000) > 12) { *year -= 1; } - - return 0; -} - -static PyObject * -DateObject_qyear(DateObject *self, void *closure) { - int year, quarter; - if(_DateObject_quarter_year(self, - &year, &quarter) == INT_ERR_CODE) { return NULL; } - return PyInt_FromLong(year); -} - -static PyObject * -DateObject_quarter(DateObject *self, void *closure) { - int year, quarter; - if(_DateObject_quarter_year(self, - &year, &quarter) == INT_ERR_CODE) { return NULL; } - return PyInt_FromLong(quarter); -} - -static PyObject * -DateObject_month(DateObject *self, void *closure) { - struct date_info dinfo; - if(DateObject_set_date_info(self, &dinfo) == -1) return NULL; - return PyInt_FromLong(dinfo.month); -} - -static PyObject * -DateObject_day(DateObject *self, void *closure) { - struct date_info dinfo; - if(DateObject_set_date_info(self, &dinfo) == -1) return NULL; - return PyInt_FromLong(dinfo.day); -} - -static PyObject * -DateObject_day_of_week(DateObject *self, void *closure) { - struct date_info dinfo; - if(DateObject_set_date_info(self, &dinfo) == -1) return NULL; - return PyInt_FromLong(dinfo.day_of_week); -} - -static PyObject * -DateObject_day_of_year(DateObject *self, void *closure) { - struct date_info dinfo; - if(DateObject_set_date_info(self, &dinfo) == -1) return NULL; - return PyInt_FromLong(dinfo.day_of_year); -} - -static PyObject * -DateObject_week(DateObject *self, void *closure) { - struct date_info dinfo; - if(DateObject_set_date_info(self, &dinfo) == -1) return NULL; - return PyInt_FromLong(dInfoCalc_ISOWeek(&dinfo)); -} - -static PyObject * -DateObject_hour(DateObject *self, void *closure) { - struct date_info dinfo; - if(DateObject_set_date_info_wtime(self, &dinfo) == -1) return NULL; - return PyInt_FromLong(dinfo.hour); -} - -static PyObject * -DateObject_minute(DateObject *self, void *closure) { - struct date_info dinfo; - if(DateObject_set_date_info_wtime(self, &dinfo) == -1) return NULL; - return PyInt_FromLong(dinfo.minute); -} - -static PyObject * -DateObject_second(DateObject *self, void *closure) { - struct date_info dinfo; - if(DateObject_set_date_info_wtime(self, &dinfo) == -1) return NULL; - return PyInt_FromLong((int)dinfo.second); -} - -static PyObject * -DateObject_datetime(DateObject *self, void *closure) { - PyObject *datetime; - struct date_info dinfo; - if(DateObject_set_date_info_wtime(self, &dinfo) == -1) return NULL; - datetime = PyDateTime_FromDateAndTime(dinfo.year, dinfo.month, - dinfo.day, dinfo.hour, - dinfo.minute, (int)dinfo.second, 0); - return datetime; -} - -static int -DateObject_ReadOnlyErr(DateObject *self, PyObject *value, void *closure) { - PyErr_SetString(PyExc_AttributeError, "Cannot set read-only property"); - return -1; -} - -static PyGetSetDef DateObject_getseters[] = { - {"year", (getter)DateObject_year, (setter)DateObject_ReadOnlyErr, - "Returns the year.", NULL}, - {"qyear", (getter)DateObject_qyear, (setter)DateObject_ReadOnlyErr, - "For quarterly frequency dates, returns the year corresponding to the\n" - "year end (start) month. When using QTR or QTR-E based quarterly\n" - "frequencies, this is the fiscal year in a financial context.\n\n" - "For non-quarterly dates, this simply returns the year of the date.", - NULL}, - {"quarter", (getter)DateObject_quarter, (setter)DateObject_ReadOnlyErr, - "Returns the quarter.", NULL}, - {"month", (getter)DateObject_month, (setter)DateObject_ReadOnlyErr, - "Returns the month.", NULL}, - {"week", (getter)DateObject_week, (setter)DateObject_ReadOnlyErr, - "Returns the week.", NULL}, - {"day", (getter)DateObject_day, (setter)DateObject_ReadOnlyErr, - "Returns the day of month.", NULL}, - {"day_of_week", (getter)DateObject_day_of_week, (setter)DateObject_ReadOnlyErr, - "Returns the day of week.", NULL}, - {"day_of_year", (getter)DateObject_day_of_year, (setter)DateObject_ReadOnlyErr, - "Returns the day of year.", NULL}, - {"second", (getter)DateObject_second, (setter)DateObject_ReadOnlyErr, - "Returns the second.", NULL}, - {"minute", (getter)DateObject_minute, (setter)DateObject_ReadOnlyErr, - "Returns the minute.", NULL}, - {"hour", (getter)DateObject_hour, (setter)DateObject_ReadOnlyErr, - "Returns the hour.", NULL}, - - {"freqstr", (getter)DateObject_freqstr, (setter)DateObject_ReadOnlyErr, - "Returns the string representation of frequency.", NULL}, - {"datetime", (getter)DateObject_datetime, (setter)DateObject_ReadOnlyErr, - "Returns the Date object converted to standard python datetime object", - NULL}, - - {NULL} /* Sentinel */ -}; - - -static PyNumberMethods DateObject_as_number = { - (binaryfunc)DateObject___add__, /* nb_add */ - (binaryfunc)DateObject___subtract__, /* nb_subtract */ - 0, /* nb_multiply */ - 0, /* nb_divide */ - 0, /* nb_remainder */ - 0, /* nb_divmod */ - 0, /* nb_power */ - 0, /* nb_negative */ - 0, /* nb_positive */ - 0, /* nb_absolute */ - 0, /* nb_nonzero */ - 0, /* nb_invert */ - 0, /* nb_lshift */ - 0, /* nb_rshift */ - 0, /* nb_and */ - 0, /* nb_xor */ - 0, /* nb_or */ - 0, /* nb_coerce */ - (unaryfunc)DateObject___int__, /* nb_int */ - (unaryfunc)0, /* nb_long */ - (unaryfunc)DateObject___float__, /* nb_float */ - (unaryfunc)0, /* nb_oct */ - (unaryfunc)0, /* nb_hex */ -}; - -static PyMethodDef DateObject_methods[] = { - {"toordinal", (PyCFunction)DateObject_toordinal, METH_NOARGS, - DateObject_toordinal_doc}, - {"strfmt", (PyCFunction)DateObject_strfmt, METH_VARARGS, - DateObject_strfmt_doc}, - {"asfreq", (PyCFunction)DateObject_asfreq, METH_VARARGS | METH_KEYWORDS, - DateObject_asfreq_doc}, - {NULL} /* Sentinel */ -}; - - -static PyTypeObject DateType = { - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "timeseries.Date", /* tp_name */ - sizeof(DateObject), /* tp_basicsize */ - 0, /* tp_itemsize */ - (destructor)DateObject_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - (cmpfunc)DateObject___compare__, /* tp_compare */ - (reprfunc)DateObject___repr__, /* tp_repr */ - &DateObject_as_number, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - (hashfunc)DateObject___hash__, /* tp_hash */ - 0, /* tp_call*/ - (reprfunc)DateObject___str__, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | /* tp_flags */ - Py_TPFLAGS_CHECKTYPES | - Py_TPFLAGS_BASETYPE, - "Defines a Date object, as the combination of a date and a frequency.\n" - "Several options are available to construct a Date object explicitly:\n\n" - "- Give appropriate values to the `year`, `month`, `day`, `quarter`, `hours`,\n" - " `minutes`, `seconds` arguments.\n\n" - " >>> td.Date(freq='Q',year=2004,quarter=3)\n" - " >>> td.Date(freq='D',year=2001,month=1,day=1)\n\n" - "- Use the `string` keyword. This method uses a modified version of the\n" - " mx.DateTime parser submodule. More information is available in its\n" - " documentation.\n\n" - " >>> ts.Date('D', '2007-01-01')\n\n" - "- Use the `datetime` keyword with an existing datetime.datetime object.\n\n" - " >>> td.Date('D', datetime=datetime.datetime.now())", /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - DateObject_methods, /* tp_methods */ - DateObject_members, /* tp_members */ - DateObject_getseters, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)DateObject_init, /* tp_init */ - 0, /* tp_alloc */ - DateObject_new, /* tp_new */ -}; - - -/////////////////////////////////////////////////////////////////////// - -PyObject * -c_tdates_check_freq(PyObject *self, PyObject *args) { - - PyObject *freq; - int freq_val; - - if (!PyArg_ParseTuple(args, "O:check_freq(freq)", &freq)) return NULL; - if ((freq_val = check_freq(freq)) == INT_ERR_CODE) return NULL; - - return PyInt_FromLong(freq_val); -} - -PyObject * -c_tdates_check_freq_str(PyObject *self, PyObject *args) { - - PyObject *alias_tuple, *result, *freq_key; - - if ((freq_key = c_tdates_check_freq(self, args)) == NULL) return NULL; - - alias_tuple = PyDict_GetItem(freq_dict, freq_key); - result = PyTuple_GET_ITEM(alias_tuple, 0); - - Py_INCREF(result); - - Py_DECREF(freq_key); - - return result; -} - -PyObject * -c_tdates_get_freq_group(PyObject *self, PyObject *args) { - - PyObject *freq; - int freq_val; - - if (!PyArg_ParseTuple(args, "O:get_freq_group(freq)", &freq)) return NULL; - if ((freq_val = check_freq(freq)) == INT_ERR_CODE) return NULL; - - return PyInt_FromLong(get_freq_group(freq_val)); -} - -PyObject * -c_tdates_thisday(PyObject *self, PyObject *args) { - - PyObject *freq, *init_args, *init_kwargs; - time_t rawtime; - struct tm *timeinfo; - int freq_val; - - DateObject *secondly_date; - - if (!PyArg_ParseTuple(args, "O:thisday(freq)", &freq)) return NULL; - - if ((freq_val = check_freq(freq)) == INT_ERR_CODE) return NULL; - - time(&rawtime); - timeinfo = localtime(&rawtime); - - init_args = PyTuple_New(0); - init_kwargs = PyDict_New(); - - DICT_SETINT_STRKEY(init_kwargs, "freq", FR_SEC); - DICT_SETINT_STRKEY(init_kwargs, "year", timeinfo->tm_year+1900); - DICT_SETINT_STRKEY(init_kwargs, "month", timeinfo->tm_mon+1); - DICT_SETINT_STRKEY(init_kwargs, "day", timeinfo->tm_mday); - DICT_SETINT_STRKEY(init_kwargs, "hour", timeinfo->tm_hour); - DICT_SETINT_STRKEY(init_kwargs, "minute", timeinfo->tm_min); - DICT_SETINT_STRKEY(init_kwargs, "second", timeinfo->tm_sec); - - secondly_date = DateObject_New(); - DateObject_init(secondly_date, init_args, init_kwargs); - - Py_DECREF(init_args); - Py_DECREF(init_kwargs); - - if (freq_val != FR_SEC) { - DateObject *result = DateObject_New(); - - long (*asfreq_func)(long, char, asfreq_info*) = NULL; - asfreq_info af_info; - - int date_val; - - get_asfreq_info(FR_SEC, freq_val, &af_info); - asfreq_func = get_asfreq_func(FR_SEC, freq_val, 0); - - date_val = asfreq_func(secondly_date->value, 'B', &af_info); - - Py_DECREF(secondly_date); - - result->freq = freq_val; - result->value = date_val; - - return (PyObject*)result; - - } else { return (PyObject*)secondly_date; } -} - - -PyObject * -DateArray_asfreq(PyObject *self, PyObject *args) -{ - PyArrayObject *fromDates, *toDates; - PyArrayIterObject *iterFrom, *iterTo; - PyObject *fromDateObj, *toDateObj; - char *relation; - int fromFreq, toFreq; - long fromDate, toDate; - long (*asfreq_main)(long, char, asfreq_info*) = NULL; - asfreq_info af_info; - - if (!PyArg_ParseTuple(args, - "Oiis:asfreq(fromDates, fromfreq, tofreq, relation)", - &fromDates, &fromFreq, &toFreq, &relation)) return NULL; - - get_asfreq_info(fromFreq, toFreq, &af_info); - - asfreq_main = get_asfreq_func(fromFreq, toFreq, 0); - - toDates = (PyArrayObject *)PyArray_Copy(fromDates); - - iterFrom = (PyArrayIterObject *)PyArray_IterNew((PyObject *)fromDates); - if (iterFrom == NULL) return NULL; - - iterTo = (PyArrayIterObject *)PyArray_IterNew((PyObject *)toDates); - if (iterTo == NULL) return NULL; - - while (iterFrom->index < iterFrom->size) { - - fromDateObj = PyArray_GETITEM(fromDates, iterFrom->dataptr); - fromDate = PyInt_AsLong(fromDateObj); - CHECK_ASFREQ(toDate = asfreq_main(fromDate, relation[0], &af_info)); - toDateObj = PyInt_FromLong(toDate); - - PyArray_SETITEM(toDates, iterTo->dataptr, toDateObj); - - Py_DECREF(fromDateObj); - Py_DECREF(toDateObj); - - PyArray_ITER_NEXT(iterFrom); - PyArray_ITER_NEXT(iterTo); - } - - Py_DECREF(iterFrom); - Py_DECREF(iterTo); - - return (PyObject *)toDates; - -} - -/************************************************************** -** The following functions are used by DateArray_getDateInfo ** -** to determine how many consecutive periods will have the ** -** same result ** -**************************************************************/ - -// also used for qyear -static int __skip_periods_year(int freq) { - - int freq_group = get_freq_group(freq); - - switch(freq_group) - { - case FR_QTR: - return 4; - case FR_MTH: - return 12; - case FR_WK: - return 51; - case FR_BUS: - return 260; - case FR_DAY: - return 365; - case FR_HR: - return 365*24; - case FR_MIN: - return 365*24*60; - case FR_SEC: - return 365*24*60*60; - default: - return 1; - } -} - -static int __skip_periods_quarter(int freq) { - - int freq_group = get_freq_group(freq); - - switch(freq_group) - { - case FR_MTH: - return 3; - case FR_WK: - return 12; - case FR_BUS: - return 64; - case FR_DAY: - return 90; - case FR_HR: - return 90*24; - case FR_MIN: - return 90*24*60; - case FR_SEC: - return 90*24*60*60; - default: - return 1; - } -} - -static int __skip_periods_month(int freq) { - - int freq_group = get_freq_group(freq); - - switch(freq_group) - { - case FR_WK: - return 3; - case FR_BUS: - return 20; - case FR_DAY: - return 28; - case FR_HR: - return 28*24; - case FR_MIN: - return 28*24*60; - case FR_SEC: - return 28*24*60*60; - default: - return 1; - } -} - -// also used for day_of_year, day_of_week -static int __skip_periods_day(int freq) { - - int freq_group = get_freq_group(freq); - - switch(freq_group) - { - case FR_HR: - return 24; - case FR_MIN: - return 24*60; - case FR_SEC: - return 24*60*60; - default: - return 1; - } -} - -static int __skip_periods_week(int freq) { - - int freq_group = get_freq_group(freq); - - switch(freq_group) - { - case FR_BUS: - return 5; - case FR_DAY: - return 7; - case FR_HR: - return 7*28*24; - case FR_MIN: - return 7*28*24*60; - case FR_SEC: - return 7*28*24*60*60; - default: - return 1; - } -} - -static int __skip_periods_hour(int freq) { - - int freq_group = get_freq_group(freq); - - switch(freq_group) - { - case FR_MIN: - return 60; - case FR_SEC: - return 60*60; - default: - return 1; - } -} - -static int __skip_periods_minute(int freq) { - - int freq_group = get_freq_group(freq); - - switch(freq_group) - { - case FR_SEC: - return 60; - default: - return 1; - } -} - -PyObject * -DateArray_getDateInfo(PyObject *self, PyObject *args) -{ - int freq, is_full, skip_periods, counter=1, val_changed=0; - char *info; - - PyObject *prev_val=NULL; - PyArrayObject *array, *newArray; - PyArrayIterObject *iterSource, *iterResult; - - PyObject* (*getDateInfo)(DateObject*, void*) = NULL; - - if (!PyArg_ParseTuple(args, "Oisi:getDateInfo(array, freq, info, is_full)", - &array, &freq, &info, &is_full)) return NULL; - newArray = (PyArrayObject *)PyArray_Copy(array); - - iterSource = (PyArrayIterObject *)PyArray_IterNew((PyObject *)array); - iterResult = (PyArrayIterObject *)PyArray_IterNew((PyObject *)newArray); - - - switch(*info) - { - case 'Y': //year - getDateInfo = &DateObject_year; - skip_periods = __skip_periods_year(freq); - break; - case 'F': //"fiscal" year - getDateInfo = &DateObject_qyear; - skip_periods = __skip_periods_year(freq); - break; - case 'Q': //quarter - getDateInfo = &DateObject_quarter; - skip_periods = __skip_periods_quarter(freq); - break; - case 'M': //month - getDateInfo = &DateObject_month; - skip_periods = __skip_periods_month(freq); - break; - case 'D': //day - getDateInfo = &DateObject_day; - skip_periods = __skip_periods_day(freq); - break; - case 'R': //day of year - getDateInfo = &DateObject_day_of_year; - skip_periods = __skip_periods_day(freq); - break; - case 'W': //day of week - getDateInfo = &DateObject_day_of_week; - skip_periods = __skip_periods_day(freq); - break; - case 'I': //week of year - getDateInfo = &DateObject_week; - skip_periods = __skip_periods_week(freq); - break; - case 'H': //hour - getDateInfo = &DateObject_hour; - skip_periods = __skip_periods_hour(freq); - break; - case 'T': //minute - getDateInfo = &DateObject_minute; - skip_periods = __skip_periods_minute(freq); - break; - case 'S': //second - getDateInfo = &DateObject_second; - skip_periods = 1; - break; - default: - return NULL; - } - - { - DateObject *curr_date; - PyObject *val, *dInfo; - - while (iterSource->index < iterSource->size) { - - if ((val_changed == 0) || - (is_full == 0) || - (prev_val == NULL) || - (counter >= skip_periods)) { - - val = PyArray_GETITEM(array, iterSource->dataptr); - curr_date = DateObject_FromFreqAndValue(freq, PyInt_AsLong(val)); - dInfo = getDateInfo(curr_date, NULL); - - if ((prev_val != NULL) && - (PyInt_AsLong(prev_val) != PyInt_AsLong(dInfo))) { - val_changed = 1; - counter = 0; - } - - Py_DECREF(val); - Py_DECREF(curr_date); - - if (prev_val != NULL) { - Py_DECREF(prev_val); - } - - prev_val = dInfo; - } - - PyArray_SETITEM(newArray, iterResult->dataptr, dInfo); - - PyArray_ITER_NEXT(iterSource); - PyArray_ITER_NEXT(iterResult); - - counter += 1; - } - } - - if (prev_val != NULL) { - Py_DECREF(prev_val); - } - Py_DECREF(iterSource); - Py_DECREF(iterResult); - - return (PyObject *) newArray; -} - - -void import_c_tdates(PyObject *m) -{ - - if (PyType_Ready(&DateType) < 0) return; - - DateCalc_Error = - PyErr_NewException("c_tdates.DateCalc_Error", NULL, NULL); - DateCalc_RangeError = - PyErr_NewException("c_tdates.DateCalc_RangeError", NULL, NULL); - - import_array(); - PyDateTime_IMPORT; - - Py_INCREF(&DateType); - PyModule_AddObject(m, "Date", (PyObject *)(&DateType)); - - if(build_freq_dict() == INT_ERR_CODE) { - PyErr_SetString( \ - PyExc_ImportError, \ - "initialization of module timeseries.c_tdates failed"); - return; - }; - - PyModule_AddObject(m, "freq_dict", freq_dict); - PyModule_AddObject(m, "freq_dict_rev", freq_dict_rev); - PyModule_AddObject(m, "freq_constants", freq_constants); - - PyModule_AddObject(m, "DateCalc_Error", DateCalc_Error); - PyModule_AddObject(m, "DateCalc_RangeError", DateCalc_RangeError); - -} Modified: trunk/scipy/sandbox/timeseries/src/c_tseries.c =================================================================== --- trunk/scipy/sandbox/timeseries/src/c_tseries.c 2007-09-19 03:32:25 UTC (rev 3329) +++ trunk/scipy/sandbox/timeseries/src/c_tseries.c 2007-09-19 03:37:47 UTC (rev 3330) @@ -1,4 +1,4 @@ -#include "c_tdates.h" +#include "c_dates.h" #include "c_tseries.h" /* Helper function for TimeSeries_convert: Modified: trunk/scipy/sandbox/timeseries/src/cseries.c =================================================================== --- trunk/scipy/sandbox/timeseries/src/cseries.c 2007-09-19 03:32:25 UTC (rev 3329) +++ trunk/scipy/sandbox/timeseries/src/cseries.c 2007-09-19 03:37:47 UTC (rev 3330) @@ -1,5 +1,5 @@ #include "c_lib.h" -#include "c_tdates.h" +#include "c_dates.h" #include "c_tseries.h" static PyMethodDef cseries_methods[] = { @@ -22,7 +22,7 @@ METH_VARARGS, ""}, - {"thisday", (PyCFunction)c_tdates_thisday, + {"thisday", (PyCFunction)c_dates_thisday, METH_VARARGS, "Returns today's date, at the given frequency\n\n" ":Parameters:\n" @@ -30,15 +30,15 @@ " Frequency to convert the Date to. Accepts any valid frequency\n" " specification (string or integer)\n"}, - {"check_freq", (PyCFunction)c_tdates_check_freq, + {"check_freq", (PyCFunction)c_dates_check_freq, METH_VARARGS, "translate user specified frequency into frequency constant"}, - {"check_freq_str", (PyCFunction)c_tdates_check_freq_str, + {"check_freq_str", (PyCFunction)c_dates_check_freq_str, METH_VARARGS, "translate user specified frequency into standard string representation"}, - {"get_freq_group", (PyCFunction)c_tdates_get_freq_group, + {"get_freq_group", (PyCFunction)c_dates_get_freq_group, METH_VARARGS, "translate user specified frequency into frequency group constant"}, @@ -61,7 +61,7 @@ return; import_c_lib(m); - import_c_tdates(m); + import_c_dates(m); import_c_tseries(m); } From scipy-svn at scipy.org Wed Sep 19 13:27:58 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 19 Sep 2007 12:27:58 -0500 (CDT) Subject: [Scipy-svn] r3331 - trunk/scipy/sandbox/timeseries Message-ID: <20070919172758.0515C39C209@new.scipy.org> Author: mattknox_ca Date: 2007-09-19 12:27:50 -0500 (Wed, 19 Sep 2007) New Revision: 3331 Modified: trunk/scipy/sandbox/timeseries/tseries.py Log: removed "observed" attribute for the TimeSeries class Modified: trunk/scipy/sandbox/timeseries/tseries.py =================================================================== --- trunk/scipy/sandbox/timeseries/tseries.py 2007-09-19 03:37:47 UTC (rev 3330) +++ trunk/scipy/sandbox/timeseries/tseries.py 2007-09-19 17:27:50 UTC (rev 3331) @@ -50,22 +50,10 @@ 'empty_like', 'day_of_week','day_of_year','day','month','quarter','year', 'hour','minute','second', -'tofile','asrecords','flatten', 'check_observed', +'tofile','asrecords','flatten', 'first_unmasked_val', 'last_unmasked_val' ] - -#####--------------------------------------------------------------------------- -#---- --- Observed options --- -#####--------------------------------------------------------------------------- -fmtobs_dict = {'UNDEFINED': ['UNDEF','UNDEFINED',None], - 'BEGINNING': ['BEGIN','BEGINNING'], - 'ENDING': ['END','ENDING'], - 'AVERAGED': ['AVERAGE','AVERAGED','MEAN'], - 'SUMMED': ['SUM','SUMMED'], - 'MAXIMUM': ['MAX','MAXIMUM','HIGH'], - 'MINIMUM': ['MIN','MINIMUM','LOW']} - def first_unmasked_val(a): "Returns the first unmasked value in a 1d maskedarray." (i,j) = MA.extras.flatnotmasked_edges(a) @@ -76,39 +64,6 @@ (i,j) = MA.extras.flatnotmasked_edges(a) return a[j] -obs_dict = {"UNDEFINED":None, - "BEGINNING": first_unmasked_val, - "ENDING": last_unmasked_val, - "AVERAGED": MA.average, - "SUMMED": MA.sum, - "MAXIMUM": MA.maximum, - "MINIMUM": MA.minimum, - } - -alias_obs_dict = {} -for ob, aliases in fmtobs_dict.iteritems(): - for al in aliases: - alias_obs_dict[al] = obs_dict[ob] -obs_dict.update(alias_obs_dict) - -def _reverse_dict(d): - "Reverses the keys and values of a dictionary." - alt = [] - tmp = [alt.extend([(w,k) for w in v]) for (k,v) in d.iteritems()] - return dict(alt) - -fmtobs_revdict = _reverse_dict(fmtobs_dict) - -def fmtObserv(obStr): - "Converts a possible 'Observed' string into acceptable values." - if obStr is None: - return fmtobs_revdict[None] - elif obStr.upper() in fmtobs_revdict: - return fmtobs_revdict[obStr.upper()] - else: - raise ValueError("Invalid value for observed attribute: %s " % str(obStr)) -check_observed = fmtObserv - #### -------------------------------------------------------------------------- #--- ... TimeSeriesError class ... #### -------------------------------------------------------------------------- @@ -136,7 +91,6 @@ msg = msg % (first, second) TimeSeriesError.__init__(self, msg) -#def _compatibilitycheck(a, b): def _timeseriescompat(a, b, raise_error=True): """Checks the date compatibility of two TimeSeries object. Returns True if everything's fine, or raises an exception.""" @@ -350,40 +304,20 @@ The combination of `series` and `dates` is the `data` part. """ options = None - _defaultobserved = None - _genattributes = ['fill_value', 'observed'] + _genattributes = ['fill_value'] def __new__(cls, data, dates, mask=nomask, -# freq=None, - observed=None, #start_date=None, length=None, dtype=None, copy=False, fill_value=None, subok=True, keep_mask=True, small_mask=True, hard_mask=False, **options): maparms = dict(copy=copy, dtype=dtype, fill_value=fill_value,subok=subok, keep_mask=keep_mask, small_mask=small_mask, hard_mask=hard_mask,) _data = MaskedArray(data, mask=mask, **maparms) -# # Get the frequency .......................... -# freq = check_freq(freq) + # Get the dates .............................. if not isinstance(dates, (Date, DateArray)): raise TypeError("The input dates should be a valid Date or DateArray object! "\ "(got %s instead)" % type(dates)) -# newdates = date_array(dates) -# elif isinstance(dates, (tuple, list, ndarray)): -# newdates = date_array(dlist=dates, freq=freq) -# if newdates is not None: -# if freq != _c.FR_UND and newdates.freq != freq: -# newdates = newdates.asfreq(freq) -# else: -# dshape = _data.shape -# if len(dshape) > 0: -# if length is None: -# length = dshape[0] -# newdates = date_array(start_date=start_date, length=length, -# freq=freq) -# else: -# newdates = date_array([], freq=freq) - # Get observed ............................... - observed = getattr(data, 'observed', fmtObserv(observed)) + # Get the data ............................... if not subok or not isinstance(_data,TimeSeries): _data = _data.view(cls) @@ -391,31 +325,23 @@ assert(numeric.size(newdates)==1) return _data.view(cls) assert(_datadatescompat(_data,dates)) -# assert(_datadatescompat(_data,newdates)) - # -# _data._dates = newdates _data._dates = dates if _data._dates.size == _data.size: if _data.ndim > 1: current_shape = data.shape -# if newdates._unsorted is not None: + if dates._unsorted is not None: _data.shape = (-1,) -# _data = _data[newdates._unsorted] _data = _data[dates._unsorted] _data.shape = current_shape _data._dates.shape = current_shape elif dates._unsorted is not None: _data = _data[dates._unsorted] -# elif newdates._unsorted is not None: -# _data = _data[newdates._unsorted] - _data.observed = observed return _data #............................................ def __array_finalize__(self,obj): MaskedArray.__array_finalize__(self, obj) self._dates = getattr(obj, '_dates', DateArray([])) - self.observed = getattr(obj, 'observed', None) return #.................................. def __array_wrap__(self, obj, context=None): @@ -849,8 +775,7 @@ def _attrib_dict(series, exclude=[]): """this function is used for passing through attributes of one time series to a new one being created""" - result = {'fill_value':series.fill_value, - 'observed':series.observed} + result = {'fill_value':series.fill_value} return dict(filter(lambda x: x[0] not in exclude, result.iteritems())) @@ -982,24 +907,13 @@ return time_series(newseries, newdates) TimeSeries.flatten = flatten - - -#####--------------------------------------------------------------------------- -#---- --- Archiving --- -#####--------------------------------------------------------------------------- - -#TimeSeries.__dump__ = dump -#TimeSeries.__dumps__ = dumps - - ##### ------------------------------------------------------------------------- #---- --- TimeSeries creator --- ##### ------------------------------------------------------------------------- -def time_series(data, dates=None, freq=None, observed=None, - start_date=None, end_date=None, length=None, - mask=nomask, - dtype=None, copy=False, fill_value=None, - keep_mask=True, small_mask=True, hard_mask=False): +def time_series(data, dates=None, freq=None, start_date=None, end_date=None, + length=None, mask=nomask, dtype=None, copy=False, + fill_value=None, keep_mask=True, small_mask=True, + hard_mask=False): """Creates a TimeSeries object :Parameters: @@ -1012,9 +926,9 @@ keep_mask=keep_mask, small_mask=small_mask, hard_mask=hard_mask,) data = masked_array(data, mask=mask, **maparms) - # data = data.view(MaskedArray) + freq = check_freq(freq) - # + if dates is None: _dates = getattr(data, '_dates', None) elif isinstance(dates, (Date, DateArray)): @@ -1023,7 +937,7 @@ _dates = date_array(dlist=dates, freq=freq) else: _dates = date_array([], freq=freq) - # + if _dates is not None: # Make sure _dates has the proper freqncy if (freq != _c.FR_UND) and (_dates.freq != freq): @@ -1038,14 +952,12 @@ length=length, freq=freq) else: _dates = date_array([], freq=freq) - # + if _dates._unsorted is not None: idx = _dates._unsorted data = data[idx] _dates._unsorted = None return TimeSeries(data=data, dates=_dates, mask=data._mask, -# freq=freq, - observed=observed, copy=copy, dtype=dtype, fill_value=fill_value, keep_mask=keep_mask, small_mask=small_mask, hard_mask=hard_mask,) @@ -1281,22 +1193,8 @@ #.................................................................... -def _convert1d(series, freq, func='auto', position='END', *args, **kwargs): - """Converts a series to a frequency. Private function called by convert - - When converting to a lower frequency, func is a function that acts - on a 1-d array and returns a scalar or 1-d array. func should handle - masked values appropriately. If func is "auto", then an - appropriate function is determined based on the observed attribute - of the series. If func is None, then a 2D array is returned, where each - column represents the values appropriately grouped into the new frequency. - interp and position will be ignored in this case. - - When converting to a higher frequency, position is 'START' or 'END' - and determines where the data point is in each period (eg. if going - from monthly to daily, and position is 'END', then each data point is - placed at the end of the month). - """ +def _convert1d(series, freq, func=None, position='END', *args, **kwargs): + "helper function for `convert` function" if not isinstance(series,TimeSeries): raise TypeError, "The argument should be a valid TimeSeries!" @@ -1324,8 +1222,6 @@ if series.size == 0: return TimeSeries(series, freq=toFreq, start_date=start_date.asfreq(toFreq)) - if func == 'auto': - func = obs_dict[series.observed] tempData = series._series.filled() tempMask = getmaskarray(series) @@ -1356,11 +1252,9 @@ When converting to a lower frequency, func is a function that acts on a 1-d array and returns a scalar or 1-d array. func should handle - masked values appropriately. If func is "auto", then an - appropriate function is determined based on the observed attribute - of the series. If func is None, then a 2D array is returned, where each - column represents the values appropriately grouped into the new frequency. - interp and position will be ignored in this case. + masked values appropriately. If func is None, then a 2D array is returned, + where each column represents the values appropriately grouped into the new + frequency. `position` will be ignored in this case. When converting to a higher frequency, position is 'START' or 'END' and determines where the data point is in each period (eg. if going @@ -1375,11 +1269,15 @@ *args, **kwargs)._series for m in series.split()]).view(type(series)) obj._dates = base._dates - if func is None or (func,series.observed) == ('auto','UNDEFINED'): + if func is None: shp = obj.shape ncols = base.shape[-1] obj.shape = (shp[0], shp[-1]//ncols, ncols) obj = numpy.swapaxes(obj,1,2) + else: + raise ValueError( + "only series with ndim == 1 or ndim == 2 may be converted") + return obj @@ -1562,17 +1460,10 @@ newdatad[new] = datad[old] newdatam[new] = datam[old] newdata = MA.masked_array(newdatad, mask=newdatam, fill_value=fill_value) -# # Get new shape .............. -# if data.ndim == 1: -# nshp = (newdates.size,) -# else: -# nshp = tuple([-1,] + list(data.shape[1:])) -# _data = newdata.reshape(nshp).view(type(data)) _data = newdata.view(datat) _data._dates = newdates return _data -# return time_series(newdata.reshape(nshp), newdates) -#............................................................................... +#.............................................................................. def stack(*series): """Performs a column_stack on the data from each series, and the resulting series has the same dates as each individual series. All series From scipy-svn at scipy.org Wed Sep 19 17:11:50 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 19 Sep 2007 16:11:50 -0500 (CDT) Subject: [Scipy-svn] r3332 - trunk/scipy/io/nifti Message-ID: <20070919211150.7257839C25B@new.scipy.org> Author: matthew.brett at gmail.com Date: 2007-09-19 16:11:44 -0500 (Wed, 19 Sep 2007) New Revision: 3332 Added: trunk/scipy/io/nifti/__init__.py trunk/scipy/io/nifti/nifticlib.i trunk/scipy/io/nifti/niftiimage.py trunk/scipy/io/nifti/utils.py Removed: trunk/scipy/io/nifti/nifti/ trunk/scipy/io/nifti/setup.py.orig Modified: trunk/scipy/io/nifti/setup.py Log: get swig happening, simplify directory structure Copied: trunk/scipy/io/nifti/__init__.py (from rev 3323, trunk/scipy/io/nifti/nifti/__init__.py) Copied: trunk/scipy/io/nifti/nifticlib.i (from rev 3323, trunk/scipy/io/nifti/nifti/nifticlib.i) Copied: trunk/scipy/io/nifti/niftiimage.py (from rev 3323, trunk/scipy/io/nifti/nifti/niftiimage.py) Modified: trunk/scipy/io/nifti/setup.py =================================================================== --- trunk/scipy/io/nifti/setup.py 2007-09-19 17:27:50 UTC (rev 3331) +++ trunk/scipy/io/nifti/setup.py 2007-09-19 21:11:44 UTC (rev 3332) @@ -4,7 +4,7 @@ import sys import numpy -nifti_wrapper_file = join('nifti', 'nifticlib.py') +nifti_wrapper_file = join('nifticlib.py') # create an empty file to workaround crappy swig wrapper installation if not isfile(nifti_wrapper_file): @@ -18,7 +18,7 @@ from numpy.distutils.system_info import get_info config = Configuration('nifti',parent_package,top_path) - #config.add_data_dir('tests') + config.add_data_dir('tests') include_dirs = [ '.', @@ -26,6 +26,9 @@ './nifticlib/niftilib', './nifticlib/znzlib'] + nifticlib_headers = ' -I'.join(include_dirs) + swig_opts = ['-I'+nifticlib_headers, '-I'+numpy_headers] + # Libraries config.add_library('fslio', sources=['./nifticlib/fsliolib/fslio.c'], include_dirs=include_dirs) @@ -36,10 +39,10 @@ # Extension config.add_extension('_nifticlib', - sources = ['nifticlib.i', 'nifticlib_wrap.c'], + sources = ['nifticlib.i'], include_dirs = include_dirs, libraries = ['niftiio', 'fslio', 'znz',], - swig_opts = ['-I/usr/include/nifti', '-I'+numpy_headers]) + swig_opts = swig_opts) return config Deleted: trunk/scipy/io/nifti/setup.py.orig =================================================================== --- trunk/scipy/io/nifti/setup.py.orig 2007-09-19 17:27:50 UTC (rev 3331) +++ trunk/scipy/io/nifti/setup.py.orig 2007-09-19 21:11:44 UTC (rev 3332) @@ -1,56 +0,0 @@ -#!/usr/bin/env python - -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### -# -# Python distutils setup for PyNifti -# -# Copyright (C) 2006-2007 by -# Michael Hanke -# -# This package is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# version 2 of the License, or (at your option) any later version. -# -# This package is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### - -from distutils.core import setup, Extension -import os -import numpy -from glob import glob - -nifti_wrapper_file = os.path.join('nifti', 'nifticlib.py') - -# create an empty file to workaround crappy swig wrapper installation -if not os.path.isfile(nifti_wrapper_file): - open(nifti_wrapper_file, 'w') - -# find numpy headers -numpy_headers = os.path.join(os.path.dirname(numpy.__file__),'core','include') - - -# Notes on the setup -# Version scheme is: -# 0.<4-digit-year><2-digit-month><2-digit-day>. - -setup(name = 'pynifti', - version = '0.20070905.1', - author = 'Michael Hanke', - author_email = 'michael.hanke at gmail.com', - license = 'LGPL', - url = 'http://apsy.gse.uni-magdeburg.de/hanke', - description = 'Python interface for the NIfTI IO libraries', - long_description = """ """, - packages = [ 'nifti' ], - scripts = glob( 'bin/*' ), - ext_modules = [ Extension( 'nifti._nifticlib', [ 'nifti/nifticlib.i' ], - include_dirs = [ '/usr/include/nifti', numpy_headers ], - libraries = [ 'niftiio' ], - swig_opts = [ '-I/usr/include/nifti', - '-I' + numpy_headers ] ) ] - ) - Copied: trunk/scipy/io/nifti/utils.py (from rev 3323, trunk/scipy/io/nifti/nifti/utils.py) From scipy-svn at scipy.org Wed Sep 19 23:02:21 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 19 Sep 2007 22:02:21 -0500 (CDT) Subject: [Scipy-svn] r3333 - trunk/scipy/sandbox/timeseries/tests Message-ID: <20070920030221.3234839C076@new.scipy.org> Author: mattknox_ca Date: 2007-09-19 22:02:17 -0500 (Wed, 19 Sep 2007) New Revision: 3333 Modified: trunk/scipy/sandbox/timeseries/tests/test_trecords.py Log: updated some tests to use new functions in maskedarray testutils Modified: trunk/scipy/sandbox/timeseries/tests/test_trecords.py =================================================================== --- trunk/scipy/sandbox/timeseries/tests/test_trecords.py 2007-09-19 21:11:44 UTC (rev 3332) +++ trunk/scipy/sandbox/timeseries/tests/test_trecords.py 2007-09-20 03:02:17 UTC (rev 3333) @@ -18,7 +18,7 @@ from numpy.testing.utils import build_err_msg import maskedarray.testutils -from maskedarray.testutils import assert_equal, assert_array_equal +from maskedarray.testutils import assert_equal, assert_array_equal, assert_equal_records import maskedarray.core as MA import maskedarray.mrecords as MR @@ -65,12 +65,12 @@ assert_equal(mts._data[0], mrec._data[0]) # We can't use assert_equal here, as it tries to convert the tuple into a singleton # assert(mts[0]._data.view(numpyndarray) == mrec[0]) - assert_equal(numpy.asarray(mts._data[0]), mrec[0]) + assert_equal_records(mts._data[0], mrec[0]) assert_equal(mts._dates[0], dates[0]) assert_equal(mts[0]._dates, dates[0]) # assert(isinstance(mts['2007-01'], TimeSeriesRecords)) - assert(mts['2007-01']._data == mrec[0]) + assert_equal_records(mts['2007-01']._data, mrec[0]) assert_equal(mts['2007-01']._dates, dates[0]) # assert(isinstance(mts.f0, TimeSeries)) From scipy-svn at scipy.org Thu Sep 20 01:48:08 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 20 Sep 2007 00:48:08 -0500 (CDT) Subject: [Scipy-svn] r3334 - trunk/scipy/io Message-ID: <20070920054808.4749039C32B@new.scipy.org> Author: jarrod.millman Date: 2007-09-20 00:45:53 -0500 (Thu, 20 Sep 2007) New Revision: 3334 Modified: trunk/scipy/io/info.py Log: updating docs Modified: trunk/scipy/io/info.py =================================================================== --- trunk/scipy/io/info.py 2007-09-20 03:02:17 UTC (rev 3333) +++ trunk/scipy/io/info.py 2007-09-20 05:45:53 UTC (rev 3334) @@ -4,7 +4,10 @@ Classes - fopen -- a class for easily reading and writing binary data. + npfile -- a class for reading and writing numpy arrays from / to binary files + Cache + DataSource + Repository Functions From scipy-svn at scipy.org Thu Sep 20 01:48:39 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 20 Sep 2007 00:48:39 -0500 (CDT) Subject: [Scipy-svn] r3335 - trunk/scipy/io Message-ID: <20070920054839.AADAB39C32B@new.scipy.org> Author: jarrod.millman Date: 2007-09-20 00:48:37 -0500 (Thu, 20 Sep 2007) New Revision: 3335 Modified: trunk/scipy/io/path.py Log: resyncing with upstream release Modified: trunk/scipy/io/path.py =================================================================== --- trunk/scipy/io/path.py 2007-09-20 05:45:53 UTC (rev 3334) +++ trunk/scipy/io/path.py 2007-09-20 05:48:37 UTC (rev 3335) @@ -2,7 +2,7 @@ Example: -from neuroimaging.utils.path import path +from path import path d = path('/home/guido/bin') for f in d.files('*.py'): f.chmod(0755) @@ -11,18 +11,14 @@ URL: http://www.jorendorff.com/articles/python/path -Author: Jason Orendorff (and others - see the url!) -Date: 7 Mar 2004 +Author: Jason Orendorff (and others - see the url!) +Date: 9 Mar 2007 """ # TODO -# - Tree-walking functions don't avoid symlink loops. Matt Harrison sent me a patch for this. -# - Tree-walking functions can't ignore errors. Matt Harrison asked for this. -# -# - Two people asked for path.chdir(). This just seems wrong to me, -# I dunno. chdir() is moderately evil anyway. -# +# - Tree-walking functions don't avoid symlink loops. Matt Harrison +# sent me a patch for this. # - Bug in write_text(). It doesn't support Universal newline mode. # - Better error message in listdir() when self isn't a # directory. (On Windows, the error message really sucks.) @@ -30,11 +26,12 @@ # - Add methods for regex find and replace. # - guess_content_type() method? # - Perhaps support arguments to touch(). -# - Could add split() and join() methods that generate warnings. +from __future__ import generators + import sys, warnings, os, fnmatch, glob, shutil, codecs, md5 -__version__ = '2.1' +__version__ = '2.2' __all__ = ['path'] # Platform-specific support for path.owner @@ -387,6 +384,7 @@ "Unable to list directory '%s': %s" % (self, sys.exc_info()[1]), TreeWalkWarning) + return else: raise @@ -437,6 +435,7 @@ "Unable to list directory '%s': %s" % (self, sys.exc_info()[1]), TreeWalkWarning) + return else: raise @@ -467,6 +466,7 @@ "Unable to list directory '%s': %s" % (self, sys.exc_info()[1]), TreeWalkWarning) + return else: raise @@ -476,12 +476,13 @@ isdir = not isfile and child.isdir() except: if errors == 'ignore': - return + continue elif errors == 'warn': warnings.warn( "Unable to access '%s': %s" % (self, sys.exc_info()[1]), TreeWalkWarning) + continue else: raise From scipy-svn at scipy.org Thu Sep 20 01:51:37 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 20 Sep 2007 00:51:37 -0500 (CDT) Subject: [Scipy-svn] r3336 - trunk/scipy/io Message-ID: <20070920055137.6854539C32B@new.scipy.org> Author: jarrod.millman Date: 2007-09-20 00:51:33 -0500 (Thu, 20 Sep 2007) New Revision: 3336 Modified: trunk/scipy/io/path.py Log: better integrate code with scipy Modified: trunk/scipy/io/path.py =================================================================== --- trunk/scipy/io/path.py 2007-09-20 05:48:37 UTC (rev 3335) +++ trunk/scipy/io/path.py 2007-09-20 05:51:33 UTC (rev 3336) @@ -2,14 +2,12 @@ Example: -from path import path +from scipy.io.path import path d = path('/home/guido/bin') for f in d.files('*.py'): f.chmod(0755) -This module requires Python 2.2 or later. - URL: http://www.jorendorff.com/articles/python/path Author: Jason Orendorff (and others - see the url!) Date: 9 Mar 2007 @@ -27,8 +25,6 @@ # - guess_content_type() method? # - Perhaps support arguments to touch(). -from __future__ import generators - import sys, warnings, os, fnmatch, glob, shutil, codecs, md5 __version__ = '2.2' From scipy-svn at scipy.org Thu Sep 20 01:57:57 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 20 Sep 2007 00:57:57 -0500 (CDT) Subject: [Scipy-svn] r3337 - trunk/scipy/io Message-ID: <20070920055757.85B9639C32B@new.scipy.org> Author: jarrod.millman Date: 2007-09-20 00:57:54 -0500 (Thu, 20 Sep 2007) New Revision: 3337 Modified: trunk/scipy/io/info.py Log: typo Modified: trunk/scipy/io/info.py =================================================================== --- trunk/scipy/io/info.py 2007-09-20 05:51:33 UTC (rev 3336) +++ trunk/scipy/io/info.py 2007-09-20 05:57:54 UTC (rev 3337) @@ -11,7 +11,7 @@ Functions - read_array -- reading ascii streams into Numeric arrays + read_array -- reading ascii streams into NumPy arrays write_array -- write an array to an ascii stream loadmat -- read a MATLAB style mat file (version 4 and 5) savemat -- write a MATLAB (version <= 4) style mat file From scipy-svn at scipy.org Thu Sep 20 01:58:14 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 20 Sep 2007 00:58:14 -0500 (CDT) Subject: [Scipy-svn] r3338 - trunk/scipy/special/tests Message-ID: <20070920055814.25DF639C32B@new.scipy.org> Author: jarrod.millman Date: 2007-09-20 00:58:11 -0500 (Thu, 20 Sep 2007) New Revision: 3338 Removed: trunk/scipy/special/tests/Makefile Log: removed unneeded file Deleted: trunk/scipy/special/tests/Makefile =================================================================== --- trunk/scipy/special/tests/Makefile 2007-09-20 05:57:54 UTC (rev 3337) +++ trunk/scipy/special/tests/Makefile 2007-09-20 05:58:11 UTC (rev 3338) @@ -1,7 +0,0 @@ -# -# -test: - sh -c 'for t in test*.py; do PYTHONPATH=.. $$t; done' - -testref: - sh -c 'for t in test*.py; do PYTHONPATH=.. $$t -b; done' From scipy-svn at scipy.org Thu Sep 20 15:43:18 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 20 Sep 2007 14:43:18 -0500 (CDT) Subject: [Scipy-svn] r3339 - trunk/scipy/io/nifti Message-ID: <20070920194318.6DAEC39C05F@new.scipy.org> Author: brian.hawthorne Date: 2007-09-20 14:43:12 -0500 (Thu, 20 Sep 2007) New Revision: 3339 Added: trunk/scipy/io/nifti/svntest Log: test my commit access Added: trunk/scipy/io/nifti/svntest =================================================================== From scipy-svn at scipy.org Thu Sep 20 15:43:29 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 20 Sep 2007 14:43:29 -0500 (CDT) Subject: [Scipy-svn] r3340 - trunk/scipy/io/nifti Message-ID: <20070920194329.96AE539C05F@new.scipy.org> Author: brian.hawthorne Date: 2007-09-20 14:43:27 -0500 (Thu, 20 Sep 2007) New Revision: 3340 Removed: trunk/scipy/io/nifti/svntest Log: test my commit access Deleted: trunk/scipy/io/nifti/svntest =================================================================== From scipy-svn at scipy.org Thu Sep 20 16:18:35 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 20 Sep 2007 15:18:35 -0500 (CDT) Subject: [Scipy-svn] r3341 - in trunk/scipy/stats: . tests Message-ID: <20070920201835.6F8B039C0DB@new.scipy.org> Author: stefan Date: 2007-09-20 15:18:15 -0500 (Thu, 20 Sep 2007) New Revision: 3341 Modified: trunk/scipy/stats/stats.py trunk/scipy/stats/tests/test_stats.py Log: Fix stats.percentile for 2D input. Modified: trunk/scipy/stats/stats.py =================================================================== --- trunk/scipy/stats/stats.py 2007-09-20 19:43:27 UTC (rev 3340) +++ trunk/scipy/stats/stats.py 2007-09-20 20:18:15 UTC (rev 3341) @@ -966,24 +966,25 @@ return a + (b - a)*fraction; def scoreatpercentile(a, per, limit=()): - """Calculates the score at the given 'per' percentile of the sequence - a. For example, the score at per=50 is the median. + """Calculate the score at the given 'per' percentile of the + sequence a. For example, the score at per=50 is the median. - If the desired quantile lies between two data points, we interpolate - between them. - + If the desired quantile lies between two data points, we + interpolate between them. + If the parameter 'limit' is provided, it should be a tuple (lower, - upper) of two values. Values of 'a' outside this (closed) interval - will be ignored. + upper) of two values. Values of 'a' outside this (closed) + interval will be ignored. + """ # TODO: this should be a simple wrapper around a well-written quantile # function. GNU R provides 9 quantile algorithms (!), with differing # behaviour at, for example, discontinuities. - values = np.sort(a) + values = np.sort(a,axis=0) if limit: values = values[(limit[0] < a) & (a < limit[1])] - - idx = per /100. * (len(values) - 1) + + idx = per /100. * (values.shape[0] - 1) if (idx % 1 == 0): return values[idx] else: Modified: trunk/scipy/stats/tests/test_stats.py =================================================================== --- trunk/scipy/stats/tests/test_stats.py 2007-09-20 19:43:27 UTC (rev 3340) +++ trunk/scipy/stats/tests/test_stats.py 2007-09-20 20:18:15 UTC (rev 3341) @@ -624,18 +624,28 @@ self.a1 = [3,4,5,10,-3,-5,6] self.a2 = [3,-6,-2,8,7,4,2,1] self.a3 = [3.,4,5,10,-3,-5,-6,7.0] - + def check_median(self): assert_equal(stats.median(self.a1), 4) assert_equal(stats.median(self.a2), 2.5) assert_equal(stats.median(self.a3), 3.5) - + def check_percentile(self): x = arange(8) * 0.5 assert_equal(stats.scoreatpercentile(x, 0), 0.) assert_equal(stats.scoreatpercentile(x, 100), 3.5) assert_equal(stats.scoreatpercentile(x, 50), 1.75) - + + def test_2D(self): + x = array([[1, 1, 1], + [1, 1, 1], + [4, 4, 3], + [1, 1, 1], + [1, 1, 1]]) + assert_array_equal(stats.scoreatpercentile(x,50), + [1,1,1]) + + class test_std(NumpyTestCase): def check_basic(self): a = [3,4,5,10,-3,-5,6] From scipy-svn at scipy.org Thu Sep 20 18:33:16 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 20 Sep 2007 17:33:16 -0500 (CDT) Subject: [Scipy-svn] r3342 - branches/0.6.x Message-ID: <20070920223316.4172539C0DB@new.scipy.org> Author: jarrod.millman Date: 2007-09-20 17:33:12 -0500 (Thu, 20 Sep 2007) New Revision: 3342 Modified: branches/0.6.x/setup.py Log: Fix problem with version information being doubled (see r3310). Modified: branches/0.6.x/setup.py =================================================================== --- branches/0.6.x/setup.py 2007-09-20 20:18:15 UTC (rev 3341) +++ branches/0.6.x/setup.py 2007-09-20 22:33:12 UTC (rev 3342) @@ -35,10 +35,8 @@ sys.path.insert(0,os.path.join(local_path,'scipy')) # to retrive version try: - from version import version as version setup( name = 'scipy', - version = version, # will be overwritten by configuration version maintainer = "SciPy Developers", maintainer_email = "scipy-dev at scipy.org", description = "Scientific Algorithms Library for Python", From scipy-svn at scipy.org Thu Sep 20 19:19:38 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 20 Sep 2007 18:19:38 -0500 (CDT) Subject: [Scipy-svn] r3343 - trunk/scipy/special/tests Message-ID: <20070920231938.AC87839C0D1@new.scipy.org> Author: jarrod.millman Date: 2007-09-20 18:19:34 -0500 (Thu, 20 Sep 2007) New Revision: 3343 Removed: trunk/scipy/special/tests/Test.py Log: removed old file Deleted: trunk/scipy/special/tests/Test.py =================================================================== --- trunk/scipy/special/tests/Test.py 2007-09-20 22:33:12 UTC (rev 3342) +++ trunk/scipy/special/tests/Test.py 2007-09-20 23:19:34 UTC (rev 3343) @@ -1,100 +0,0 @@ -#!/usr/bin/env python -# -import pickle -import Numeric, cephes, RandomArray -import sys - -class Test: - - """ - There are two reasons why we don't rely on test.regrtest: - first, putting the expected results inside the test_ - script would lead to very small coverage, or VERY HUGE test_ - files; second, I liked the idea of trying to evenly cover the - configuration space, avoiding deterministic lattices; third, I never - pickled variables, and wanted to try! """ - - def __init__(self,fn,fnname,**args): - self.name=fnname - self.reffile='ref_'+self.name+'.pkl' - self.call=fn - self.vars_read=(0==1) - if args.has_key('ref'): - self.ref = args['ref'] - else: - self.ref = (0 == 1) - self.in_vars=args['in_vars'] - self.out_vars=args['out_vars'] - if args.has_key('tries'): - self.tries=args['tries'] - else: - self.tries=100 - - def _readref(self): - if not self.ref: - f=open(self.reffile,'r') - p=Numeric.Unpickler(f) - for t in self.in_vars.keys(): - self.in_vars[t]=p.load() - for t in self.out_vars.keys(): - self.out_vars[t]=p.load() - f.close() - self.vars_read=(0==0) - - def _genref(self): - if self.ref: - f=open(self.reffile,'w') - p=Numeric.Pickler(f) - for t in self.in_vars.keys(): - self.in_vars[t]=self._gen_array(self.in_vars[t]) - p.dump(self.in_vars[t]) - self._compute() - if type (self.result) != type (()): self.result=self.result, - for t in self.result: - p.dump(t) - f.close - - def _gen_array(self,limits): - seed=RandomArray.seed - random=RandomArray.uniform - for t in limits: - if type(t)==type(0.+0.j): _complex=(0==0) - else: _complex=(0==1) - if _complex: - seed() - minr=min(limits[0].real,limits[1].real) - maxr=max(limits[0].real,limits[1].real) - mini=min(limits[0].imag,limits[1].imag) - maxi=max(limits[0].imag,limits[1].imag) - a=random(minr,maxr,(self.tries,))+0.j - a.imag=random(mini,maxi,(self.tries,)) - else: - minr=min(limits[0],limits[1]) - maxr=max(limits[0],limits[1]) - a=random(minr,maxr,(self.tries,)) - return a - - def _compute(self): - self.result=apply(self.call,tuple(self.in_vars.values())) - - def test(self): - self.max_rel_dev=[] - if self.ref: - self._genref() - else: - if not self.vars_read: - self._readref() - self._compute() - if type (self.result) != type(()): self.result=self.result, - for t in range(len(self.out_vars.keys())): - dev=abs(self.result[t]-self.out_vars[self.out_vars.keys()[t]]) - ref=abs(self.result[t]+self.out_vars[self.out_vars.keys()[t]])/2 - mx_dev_idx=Numeric.argmax(dev,axis=-1) - if dev[mx_dev_idx] > 0.: - if ref[mx_dev_idx] > 0.: - self.max_rel_dev.append(dev[mx_dev_idx]/ref[mx_dev_idx]) - else: - self.max_rel_dev.append(1.e+38) - if len(self.max_rel_dev)>0: - return max(self.max_rel_dev) - return 0 From scipy-svn at scipy.org Thu Sep 20 21:18:07 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 20 Sep 2007 20:18:07 -0500 (CDT) Subject: [Scipy-svn] r3344 - trunk/scipy/io Message-ID: <20070921011807.2B6F139C24E@new.scipy.org> Author: jarrod.millman Date: 2007-09-20 20:17:41 -0500 (Thu, 20 Sep 2007) New Revision: 3344 Modified: trunk/scipy/io/datasource.py Log: docs and clean up Modified: trunk/scipy/io/datasource.py =================================================================== --- trunk/scipy/io/datasource.py 2007-09-20 23:19:34 UTC (rev 3343) +++ trunk/scipy/io/datasource.py 2007-09-21 01:17:41 UTC (rev 3344) @@ -1,3 +1,8 @@ +"""Utilities for importing (possibly compressed) data sets from an URL +(or file) and possibly caching them. + +""" + import os import gzip import bz2 @@ -6,7 +11,7 @@ from tempfile import mkstemp # TODO: replace with newer tuple-based path module -from path import path +from scipy.io.path import path zipexts = (".gz",".bz2") file_openers = {".gz":gzip.open, ".bz2":bz2.BZ2File, None:file} @@ -107,7 +112,7 @@ if not self.path.exists(): ensuredirs(self.path) - def tempfile(self,suffix='', prefix=''): + def tempfile(self, suffix='', prefix=''): """ Return an temporary file name in the cache""" _, fname = mkstemp(suffix, prefix, self.path) return fname @@ -148,8 +153,8 @@ :Returns: ``None`` """ - for f in self.path.files(): - f.rm() + for file in self.path.files(): + file.rm() def iscached(self, uri): """ Check if a file exists in the cache. @@ -175,7 +180,7 @@ def __init__(self, cachepath=os.curdir): self._cache = Cache(cachepath) - def tempfile(self,suffix='', prefix=''): + def tempfile(self, suffix='', prefix=''): ''' Return an temporary file name in the cache''' return self._cache.tempfile(suffix, prefix) From scipy-svn at scipy.org Fri Sep 21 11:33:20 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 21 Sep 2007 10:33:20 -0500 (CDT) Subject: [Scipy-svn] r3345 - trunk/scipy/sandbox/timeseries Message-ID: <20070921153320.507F439C25E@new.scipy.org> Author: mattknox_ca Date: 2007-09-21 10:33:16 -0500 (Fri, 21 Sep 2007) New Revision: 3345 Modified: trunk/scipy/sandbox/timeseries/const.py Log: added documentation describing all frequency constants and their corresponding string aliases Modified: trunk/scipy/sandbox/timeseries/const.py =================================================================== --- trunk/scipy/sandbox/timeseries/const.py 2007-09-21 01:17:41 UTC (rev 3344) +++ trunk/scipy/sandbox/timeseries/const.py 2007-09-21 15:33:16 UTC (rev 3345) @@ -1,6 +1,73 @@ """ -A collection of tools for timeseries +This module contains all the integer frequency constants. Below is a detailed +description of the constants, as well as a listing of the corresponding string +aliases. +All functions in the timeseries module that accept a frequency parameter can +accept either the integer constant, or a valid string alias. + +|----------------------------------------------------------------------------| +|CONSTANT | String aliases (case insensitive) and comments | +|----------------------------------------------------------------------------| +| Note: For annual frequencies, "Year" is determined by where the last month | +| of the year falls. | +|----------------------------------------------------------------------------| +| FR_ANN | 'A', 'Y', 'ANNUAL', 'ANNUALLY', 'YEAR', 'YEARLY' | +|----------------------------------------------------------------------------| +| FR_ANNDEC | 'A-DEC', 'A-December', 'Y-DEC', 'ANNUAL-DEC', etc... | +| | (annual frequency with December year end, equivalent to | +| | FR_ANN) | +|----------------------------------------------------------------------------| +| FR_ANNNOV | 'A-NOV', 'A-NOVEMBER', 'Y-NOVEMBER', 'ANNUAL-NOV', etc...| +| (annual frequency with November year end) | +| ...etc for the rest of the months | +|----------------------------------------------------------------------------| +| Note: For the following quarterly frequencies, "Year" is determined by | +| where the last quarter of the current group of quarters ENDS | +|----------------------------------------------------------------------------| +| FR_QTR | 'Q', 'QUARTER', 'QUARTERLY' | +|----------------------------------------------------------------------------| +| FR_QTREDEC | 'Q-DEC', 'QTR-December', 'QUARTERLY-DEC', etc... | +| | (quarterly frequency with December year end, equivalent | +| | to FR_QTR) | +|----------------------------------------------------------------------------| +| FR_QTRENOV | 'Q-NOV', 'QTR-NOVEMBER', 'QUARTERLY-NOV', etc... | +| | (quarterly frequency with November year end) | +| ...etc for the rest of the months | +|----------------------------------------------------------------------------| +| Note: For the following quarterly frequencies, "Year" is determined by | +| where the first quarter of the current group of quarters STARTS | +|----------------------------------------------------------------------------| +| FR_QTRSDEC | 'Q-S-DEC', 'QTR-S-December', etc... (quarterly frequency | +| | with December year end) | +| ...etc for the rest of the months | +|----------------------------------------------------------------------------| +| FR_MTH | 'M', 'MONTH', 'MONTHLY' | +|----------------------------------------------------------------------------| +| FR_WK | 'W', 'WEEK', 'WEEKLY' | +|----------------------------------------------------------------------------| +| FR_WKSUN | 'W-SUN', 'WEEK-SUNDAY', 'WEEKLY-SUN', etc... (weekly | +| | frequency with Sunday being the last day of the week, | +| | equivalent to FR_WK) | +|----------------------------------------------------------------------------| +| FR_WKSAT | 'W-SAT', 'WEEK-SATURDAY', 'WEEKLY-SAT', etc... (weekly | +| | frequency with Saturday being the last day of the week) | +| ...etc for the rest of the days of the week | +|----------------------------------------------------------------------------| +| FR_DAY | 'D', 'DAY', 'DAILY' | +|----------------------------------------------------------------------------| +| FR_BUS | 'B', 'BUSINESS', 'BUSINESSLY' (this is a daily frequency | +| | excluding Saturdays and Sundays) | +|----------------------------------------------------------------------------| +| FR_HR | 'H', 'HOUR', 'HOURLY' | +|----------------------------------------------------------------------------| +| FR_MIN | 'T', 'MINUTE', 'MINUTELY' | +|----------------------------------------------------------------------------| +| FR_SEC | 'S', 'SECOND', 'SECONDLY' | +|----------------------------------------------------------------------------| +| FR_UND | 'U', 'UNDEF', 'UNDEFINED' | +|----------------------------------------------------------------------------| + :author: Pierre GF Gerard-Marchant & Matt Knox :contact: pierregm_at_uga_dot_edu - mattknox_ca_at_hotmail_dot_com :version: $Id: tcore.py 2836 2007-03-07 16:58:14Z mattknox_ca $ From scipy-svn at scipy.org Fri Sep 21 12:02:48 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 21 Sep 2007 11:02:48 -0500 (CDT) Subject: [Scipy-svn] r3346 - trunk/scipy/sandbox/timeseries Message-ID: <20070921160248.C77B139C03B@new.scipy.org> Author: mattknox_ca Date: 2007-09-21 11:01:23 -0500 (Fri, 21 Sep 2007) New Revision: 3346 Modified: trunk/scipy/sandbox/timeseries/tseries.py Log: - fixed error with default value for func parameter in convert function - updated some documentation Modified: trunk/scipy/sandbox/timeseries/tseries.py =================================================================== --- trunk/scipy/sandbox/timeseries/tseries.py 2007-09-21 15:33:16 UTC (rev 3345) +++ trunk/scipy/sandbox/timeseries/tseries.py 2007-09-21 16:01:23 UTC (rev 3346) @@ -54,19 +54,49 @@ 'first_unmasked_val', 'last_unmasked_val' ] -def first_unmasked_val(a): - "Returns the first unmasked value in a 1d maskedarray." - (i,j) = MA.extras.flatnotmasked_edges(a) - return a[i] +def _unmasked_val(marray, x): + "helper function for first_unmasked_val and last_unmasked_val" + try: + assert(marray.ndim == 1) + except AssertionError: + raise ValueError("array must have ndim == 1") + + idx = MA.extras.flatnotmasked_edges(marray) + if idx is None: + return MA.masked + return marray[idx[x]] -def last_unmasked_val(a): - "Returns the last unmasked value in a 1d maskedarray." - (i,j) = MA.extras.flatnotmasked_edges(a) - return a[j] +def first_unmasked_val(marray): + """Retrieve the first unmasked value in a 1d maskedarray. -#### -------------------------------------------------------------------------- +*Parameters*: + marray : {MaskedArray} + marray must be 1 dimensional. + +*Returns*: + val : {marray.dtype} + first unmasked value in marray. If all values in marray are masked, + the function returns the maskedarray.masked constant +""" + return _unmasked_val(marray, 0) + +def last_unmasked_val(marray): + """Retrieve the last unmasked value in a 1d maskedarray. + +*Parameters*: + marray : {MaskedArray} + marray must be 1 dimensional. + +*Returns*: + val : {marray.dtype} + last unmasked value in marray. If all values in marray are masked, + the function returns the maskedarray.masked constant +""" + return _unmasked_val(marray, 1) + +#### ------------------------------------------------------------------------- #--- ... TimeSeriesError class ... -#### -------------------------------------------------------------------------- +#### ------------------------------------------------------------------------- class TimeSeriesError(Exception): "Class for TS related errors." def __init__ (self, value=None): @@ -109,16 +139,19 @@ return False else: step_diff = a._dates.get_steps() != b._dates.get_steps() - if (step_diff is True) or (hasattr(step_diff, "any") and step_diff.any()): + if (step_diff is True) or \ + (hasattr(step_diff, "any") and step_diff.any()): if raise_error: raise TimeSeriesCompatibilityError('time_steps', - a._dates.get_steps(), b._dates.get_steps()) + a._dates.get_steps(), + b._dates.get_steps()) else: return False elif a.shape != b.shape: if raise_error: - raise TimeSeriesCompatibilityError('size', "1: %s" % str(a.shape), - "2: %s" % str(b.shape)) + raise TimeSeriesCompatibilityError( + 'size', "1: %s" % str(a.shape), + "2: %s" % str(b.shape)) else: return False return True @@ -156,7 +189,7 @@ return True -def _datadatescompat(data,dates): +def _datadatescompat(data, dates): """Checks the compatibility of dates and data at the creation of a TimeSeries. Returns True if everything's fine, raises an exception otherwise.""" # If there's only 1 element, the date is a Date object, which has no size... @@ -166,7 +199,6 @@ if dsize == tsize: return True elif data.ndim > 1: - #dsize = numeric.asarray(data.shape)[:-1].prod() dsize = data.shape[0] if dsize == tsize: return True @@ -194,9 +226,9 @@ "All series must have same frequency! (got %s instead)" % unique_freqs return common_freq -##### -------------------------------------------------------------------------- +##### ------------------------------------------------------------------------ ##--- ... Time Series ... -##### -------------------------------------------------------------------------- +##### ------------------------------------------------------------------------ class _tsmathmethod(object): """Defines a wrapper for arithmetic array methods (add, mul...). When called, returns a new TimeSeries object, with the new series the result of @@ -205,12 +237,12 @@ """ def __init__ (self, methodname): self._name = methodname - # + def __get__(self, obj, objtype=None): "Gets the calling object." self.obj = obj return self - # + def __call__ (self, other, *args): "Execute the call behavior." instance = self.obj @@ -229,22 +261,22 @@ class _tsarraymethod(object): """Defines a wrapper for basic array methods. -When called, returns a new TimeSeries object, with the new series the result of -the method applied on the original series. +When called, returns a new TimeSeries object, with the new series the result +of the method applied on the original series. If `ondates` is True, the same operation is performed on the `_dates`. If `ondates` is False, the `_dates` part remains unchanged. - """ +""" def __init__ (self, methodname, ondates=False): """abfunc(fillx, filly) must be defined. abinop(x, filly) = x for all x to enable reduce. """ self._name = methodname self._ondates = ondates - # + def __get__(self, obj, objtype=None): self.obj = obj return self - # + def __call__ (self, *args): "Execute the call behavior." _name = self._name @@ -260,18 +292,19 @@ class _tsaxismethod(object): """Defines a wrapper for array methods working on an axis (mean...). -When called, returns a ndarray, as the result of the method applied on the series. - """ +When called, returns a ndarray, as the result of the method applied on the +series. +""" def __init__ (self, methodname): """abfunc(fillx, filly) must be defined. abinop(x, filly) = x for all x to enable reduce. """ self._name = methodname - # + def __get__(self, obj, objtype=None): self.obj = obj return self - # + def __call__ (self, *args, **params): "Execute the call behavior." (_dates, _series) = (self.obj._dates, self.obj._series) @@ -285,40 +318,51 @@ if axis in [-1, _series.ndim-1]: result = result.view(type(self.obj)) result._dates = _dates -# result = TimeSeries(result, dates=_dates) except IndexError: pass return result class TimeSeries(MaskedArray, object): """Base class for the definition of time series. -A time series is here defined as the combination of three arrays: - - `series` : *[ndarray]* +A time series is here defined as the combination of two arrays: + + series : {MaskedArray} Data part - - `mask` : *[ndarray]* - Mask part - - `dates` : *[DateArray]* + dates : {DateArray} Date part -The combination of `series` and `dates` is the `data` part. - """ - options = None +*Construction*: + data : {array_like} + data portion of the array. Any data that is valid for constructing a + MaskedArray can be used here. + dates : {DateArray} + +*Other Parameters*: + all other parameters are the same as for MaskedArray. Please see the + documentation for the MaskedArray class in the maskedarray module + for details. + +*Notes*: + it is typically recommended to use the `time_series` function for + construction as it allows greater flexibility and convenience. +""" _genattributes = ['fill_value'] - def __new__(cls, data, dates, mask=nomask, - dtype=None, copy=False, fill_value=None, subok=True, - keep_mask=True, small_mask=True, hard_mask=False, **options): - maparms = dict(copy=copy, dtype=dtype, fill_value=fill_value,subok=subok, + def __new__(cls, data, dates, mask=nomask, dtype=None, copy=False, + fill_value=None, subok=True, keep_mask=True, small_mask=True, + hard_mask=False, **options): + + maparms = dict(copy=copy, dtype=dtype, fill_value=fill_value, subok=subok, keep_mask=keep_mask, small_mask=small_mask, - hard_mask=hard_mask,) + hard_mask=hard_mask) _data = MaskedArray(data, mask=mask, **maparms) - # Get the dates .............................. + # Get the dates ...................................................... if not isinstance(dates, (Date, DateArray)): - raise TypeError("The input dates should be a valid Date or DateArray object! "\ - "(got %s instead)" % type(dates)) + raise TypeError("The input dates should be a valid Date or " + \ + "DateArray object (got %s instead)" % type(dates)) - # Get the data ............................... + # Get the data ....................................................... if not subok or not isinstance(_data,TimeSeries): _data = _data.view(cls) if _data is masked: @@ -338,24 +382,24 @@ elif dates._unsorted is not None: _data = _data[dates._unsorted] return _data - #............................................ + #......................................................................... def __array_finalize__(self,obj): MaskedArray.__array_finalize__(self, obj) self._dates = getattr(obj, '_dates', DateArray([])) return - #.................................. + #......................................................................... def __array_wrap__(self, obj, context=None): result = super(TimeSeries, self).__array_wrap__(obj, context) result._dates = self._dates return result - #............................................ + #......................................................................... def _get_series(self): "Returns the series as a regular masked array." if self._mask.ndim == 0 and self._mask: return masked return self.view(MaskedArray) _series = property(fget=_get_series) - #............................................ + #......................................................................... def __checkindex(self, indx): "Checks the validity of an index." if isinstance(indx, int): @@ -385,9 +429,6 @@ if self._dates.size == self.size: return (indx, indx) return (indx,indx[0]) -# elif len(indx)==2: -# return (indx,indx[0]) -# return (indx,indx[:-1]) elif isTimeSeries(indx): indx = indx._series if getmask(indx) is not nomask: @@ -397,8 +438,8 @@ def __getitem__(self, indx): """x.__getitem__(y) <==> x[y] -Returns the item described by i. Not a copy as in previous versions. - """ +Returns the item described by i. Not a copy. +""" (sindx, dindx) = self.__checkindex(indx) newdata = numeric.array(self._series[sindx], copy=False, subok=True) newdate = self._dates[dindx] @@ -416,36 +457,11 @@ newdata = newdata.view(type(self)) newdata._dates = newdate return newdata -# CHECK : The implementation below should work, but does not. Why ? -# newdata = numeric.array(self._data[sindx], copy=False) -# newdates = self._dates[dindx] -# if self._mask is not nomask: -# newmask = self._mask.copy()[sindx] -# else: -# newmask = nomask -# singlepoint = (len(numeric.shape(newdates))==0) -# if singlepoint: -# if newmask.ndim == 0 and newmask: -# output = tsmasked -# output._dates = newdates -# return output -# if self.ndim > 1: -# # CHECK: use reshape, or set shape ? -# newdata = newdata.reshape((list((1,)) + list(newdata.shape))) -# if newmask is not nomask: -# newmask.shape = newdata.shape -# newdata = newdata.view(type(self)) -# newdata._dates = newdates -# newdata._mask = newmask -# return newdata - - - #........................ def __setitem__(self, indx, value): """x.__setitem__(i, y) <==> x[i]=y Sets item described by index. If value is masked, masks those locations. - """ +""" if self is masked: raise MAError, 'Cannot alter the masked element.' (sindx, _) = self.__checkindex(indx) @@ -472,9 +488,9 @@ """Returns a string representation of self (w/o the dates...)""" return str(self._series) def __repr__(self): - """Calculates the repr representation, using masked for fill if - it is enabled. Otherwise fill with fill value. - """ + """Calculates the repr representation, using masked for fill if it is +enabled. Otherwise fill with fill value. +""" desc = """\ timeseries( %(data)s, @@ -537,23 +553,14 @@ stdu = _tsaxismethod('stdu') all = _tsaxismethod('all') any = _tsaxismethod('any') - - -# def nonzero(self): -# """Returns a tuple of ndarrays, one for each dimension of the array, -# containing the indices of the non-zero elements in that dimension.""" -# return self._series.nonzero() - -# filled = _tsarraymethod('filled', ondates=False) - - #............................................ + #......................................................................... def ids (self): """Return the ids of the data, dates and mask areas""" return (id(self._series), id(self.dates),) - #------------------------------------------------------ + #......................................................................... @property def series(self): - "Returns the series." + """Returns the series.""" return self._series @property def dates(self): @@ -569,43 +576,43 @@ return self._dates.freqstr @property def day(self): - "Returns the day of month for each date in self._dates." + """Returns the day of month for each date in self._dates.""" return self._dates.day @property def day_of_week(self): - "Returns the day of week for each date in self._dates." + """Returns the day of week for each date in self._dates.""" return self._dates.day_of_week @property def day_of_year(self): - "Returns the day of year for each date in self._dates." + """Returns the day of year for each date in self._dates.""" return self._dates.day_of_year @property def month(self): - "Returns the month for each date in self._dates." + """Returns the month for each date in self._dates.""" return self._dates.month @property def quarter(self): - "Returns the quarter for each date in self._dates." + """Returns the quarter for each date in self._dates.""" return self._dates.quarter @property def year(self): - "Returns the year for each date in self._dates." + """Returns the year for each date in self._dates.""" return self._dates.year @property def second(self): - "Returns the seconds for each date in self._dates." + """Returns the second for each date in self._dates.""" return self._dates.second @property def minute(self): - "Returns the minutes for each date in self._dates." + """Returns the minute for each date in self._dates.""" return self._dates.minute @property def hour(self): - "Returns the hour for each date in self._dates." + """Returns the hour for each date in self._dates.""" return self._dates.hour @property def week(self): - "Returns the week for each date in self._dates." + """Returns the week for each date in self._dates.""" return self._dates.week days = day @@ -659,14 +666,33 @@ return self._dates.has_duplicated_dates() def date_to_index(self, date): - "Returns the index corresponding to a given date, as an integer." + """Returns the index corresponding to a given date, as an integer.""" return self._dates.date_to_index(date) #..................................................... - def asfreq(self, freq=None): - "Converts the dates to another frequency." - if freq is None: - return self - return TimeSeries(self._series, dates=self._dates.asfreq(freq)) + def asfreq(self, freq, relation="AFTER"): + """Converts the dates portion of the TimeSeries to another frequency. + +The resulting TimeSeries will have the same shape and dimensions as the +original series (unlike the `convert` method). + +*Parameters*: + freq : {freq_spec} + relation : {'AFTER', 'BEFORE'} , optional + +*Returns*: + a new TimeSeries (data copied) with the .dates DateArray at the specified + frequency (the .asfreq method of the .dates property will be called) + +*Notes*: + The parameters are the exact same as for DateArray.asfreq , please see the + __doc__ string for that method for details on the parameters and how the + actual conversion is performed. +""" + if freq is None: return self + + return TimeSeries(self._series, + dates=self._dates.asfreq(freq, relation=relation), + copy=True) #..................................................... def transpose(self, *axes): """ a.transpose(*axes) @@ -1193,7 +1219,7 @@ #.................................................................... -def _convert1d(series, freq, func=None, position='END', *args, **kwargs): +def _convert1d(series, freq, func, position, *args, **kwargs): "helper function for `convert` function" if not isinstance(series,TimeSeries): raise TypeError, "The argument should be a valid TimeSeries!" @@ -1247,7 +1273,7 @@ newseries.copy_attributes(series) return newseries -def convert(series, freq, func='auto', position='END', *args, **kwargs): +def convert(series, freq, func=None, position='END', *args, **kwargs): """Converts a series to a frequency. Private function called by convert When converting to a lower frequency, func is a function that acts @@ -1280,6 +1306,7 @@ return obj +TimeSeries.convert = convert def group_byperiod(series, freq, position='END'): """Converts a series to a frequency, without any processing. If the series @@ -1292,7 +1319,6 @@ series = fill_missing_dates(series) return convert(series, freq, func=None, position=position) -TimeSeries.convert = convert TimeSeries.group_byperiod = group_byperiod #............................................................................... From scipy-svn at scipy.org Sat Sep 22 03:45:58 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 02:45:58 -0500 (CDT) Subject: [Scipy-svn] r3347 - trunk/scipy/linalg Message-ID: <20070922074558.1CD5C39C140@new.scipy.org> Author: pearu Date: 2007-09-22 02:45:49 -0500 (Sat, 22 Sep 2007) New Revision: 3347 Modified: trunk/scipy/linalg/setup.py Log: fixed fblaswrap dependencies Modified: trunk/scipy/linalg/setup.py =================================================================== --- trunk/scipy/linalg/setup.py 2007-09-21 16:01:23 UTC (rev 3346) +++ trunk/scipy/linalg/setup.py 2007-09-22 07:45:49 UTC (rev 3347) @@ -114,27 +114,28 @@ skip_names[name]) return target + depends = ['generic_fblas.pyf', + 'generic_fblas1.pyf', + 'generic_fblas2.pyf', + 'generic_fblas3.pyf', + 'interface_gen.py', + join('src','fblaswrap_veclib_c.c'), + join('src','fblaswrap.f'), + ] + # fblas: if needs_cblas_wrapper(lapack_opt): config.add_extension('fblas', sources = [generate_pyf, join('src','fblaswrap_veclib_c.c')], - depends = ['generic_fblas.pyf', - 'generic_fblas1.pyf', - 'generic_fblas2.pyf', - 'generic_fblas3.pyf', - 'interface_gen.py'], + depends = depends, extra_info = lapack_opt ) else: config.add_extension('fblas', sources = [generate_pyf, join('src','fblaswrap.f')], - depends = ['generic_fblas.pyf', - 'generic_fblas1.pyf', - 'generic_fblas2.pyf', - 'generic_fblas3.pyf', - 'interface_gen.py'], + depends = depends, extra_info = lapack_opt ) From scipy-svn at scipy.org Sat Sep 22 03:58:42 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 02:58:42 -0500 (CDT) Subject: [Scipy-svn] r3348 - tags/0.6.0/scipy/linalg Message-ID: <20070922075842.08BAD39C03D@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 02:58:36 -0500 (Sat, 22 Sep 2007) New Revision: 3348 Modified: tags/0.6.0/scipy/linalg/setup.py Log: fixed fblaswrap dependencies (see r3347) Modified: tags/0.6.0/scipy/linalg/setup.py =================================================================== --- tags/0.6.0/scipy/linalg/setup.py 2007-09-22 07:45:49 UTC (rev 3347) +++ tags/0.6.0/scipy/linalg/setup.py 2007-09-22 07:58:36 UTC (rev 3348) @@ -114,27 +114,28 @@ skip_names[name]) return target + depends = ['generic_fblas.pyf', + 'generic_fblas1.pyf', + 'generic_fblas2.pyf', + 'generic_fblas3.pyf', + 'interface_gen.py', + join('src','fblaswrap_veclib_c.c'), + join('src','fblaswrap.f'), + ] + # fblas: if needs_cblas_wrapper(lapack_opt): config.add_extension('fblas', sources = [generate_pyf, join('src','fblaswrap_veclib_c.c')], - depends = ['generic_fblas.pyf', - 'generic_fblas1.pyf', - 'generic_fblas2.pyf', - 'generic_fblas3.pyf', - 'interface_gen.py'], + depends = depends, extra_info = lapack_opt ) else: config.add_extension('fblas', sources = [generate_pyf, join('src','fblaswrap.f')], - depends = ['generic_fblas.pyf', - 'generic_fblas1.pyf', - 'generic_fblas2.pyf', - 'generic_fblas3.pyf', - 'interface_gen.py'], + depends = depends, extra_info = lapack_opt ) From scipy-svn at scipy.org Sat Sep 22 04:03:27 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 03:03:27 -0500 (CDT) Subject: [Scipy-svn] r3349 - branches/0.6.x/scipy/linalg Message-ID: <20070922080327.96D8B39C03D@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 03:03:21 -0500 (Sat, 22 Sep 2007) New Revision: 3349 Modified: branches/0.6.x/scipy/linalg/setup.py Log: fixed fblaswrap dependencies (see r3347) Modified: branches/0.6.x/scipy/linalg/setup.py =================================================================== --- branches/0.6.x/scipy/linalg/setup.py 2007-09-22 07:58:36 UTC (rev 3348) +++ branches/0.6.x/scipy/linalg/setup.py 2007-09-22 08:03:21 UTC (rev 3349) @@ -114,27 +114,28 @@ skip_names[name]) return target + depends = ['generic_fblas.pyf', + 'generic_fblas1.pyf', + 'generic_fblas2.pyf', + 'generic_fblas3.pyf', + 'interface_gen.py', + join('src','fblaswrap_veclib_c.c'), + join('src','fblaswrap.f'), + ] + # fblas: if needs_cblas_wrapper(lapack_opt): config.add_extension('fblas', sources = [generate_pyf, join('src','fblaswrap_veclib_c.c')], - depends = ['generic_fblas.pyf', - 'generic_fblas1.pyf', - 'generic_fblas2.pyf', - 'generic_fblas3.pyf', - 'interface_gen.py'], + depends = depends, extra_info = lapack_opt ) else: config.add_extension('fblas', sources = [generate_pyf, join('src','fblaswrap.f')], - depends = ['generic_fblas.pyf', - 'generic_fblas1.pyf', - 'generic_fblas2.pyf', - 'generic_fblas3.pyf', - 'interface_gen.py'], + depends = depends, extra_info = lapack_opt ) From scipy-svn at scipy.org Sat Sep 22 04:10:43 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 03:10:43 -0500 (CDT) Subject: [Scipy-svn] r3350 - tags/0.6.0 Message-ID: <20070922081043.981D439C03D@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 03:10:33 -0500 (Sat, 22 Sep 2007) New Revision: 3350 Modified: tags/0.6.0/MANIFEST.in Log: ran new_manifest.sh Modified: tags/0.6.0/MANIFEST.in =================================================================== --- tags/0.6.0/MANIFEST.in 2007-09-22 08:03:21 UTC (rev 3349) +++ tags/0.6.0/MANIFEST.in 2007-09-22 08:10:33 UTC (rev 3350) @@ -17,7 +17,6 @@ include scipy/sandbox/arpack/ARPACK/LAPACK/* include scipy/sandbox/arpack/ARPACK/SRC/* include scipy/sandbox/arpack/ARPACK/UTIL/* -include scipy/sandbox/arpack/build/* include scipy/sandbox/arpack/tests/* include scipy/sandbox/arraysetops/* include scipy/sandbox/arraysetops/tests/* @@ -29,6 +28,7 @@ include scipy/sandbox/cow/* include scipy/sandbox/delaunay/* include scipy/sandbox/delaunay/tests/* +include scipy/sandbox/dhuard/* include scipy/sandbox/exmplpackage/* include scipy/sandbox/exmplpackage/tests/* include scipy/sandbox/exmplpackage/yyy/* @@ -40,7 +40,10 @@ include scipy/sandbox/ga/* include scipy/sandbox/gplt/* include scipy/sandbox/image/* +include scipy/sandbox/lobpcg/* +include scipy/sandbox/lobpcg/tests/* include scipy/sandbox/maskedarray/* +include scipy/sandbox/maskedarray/alternative_versions/* include scipy/sandbox/maskedarray/tests/* include scipy/sandbox/models/* include scipy/sandbox/models/family/* @@ -49,47 +52,49 @@ include scipy/sandbox/montecarlo/* include scipy/sandbox/montecarlo/src/* include scipy/sandbox/montecarlo/tests/* +include scipy/sandbox/multigrid/* +include scipy/sandbox/multigrid/multigridtools/* +include scipy/sandbox/multigrid/tests/* include scipy/sandbox/netcdf/* include scipy/sandbox/newoptimize/* +include scipy/sandbox/newoptimize/tnc/* include scipy/sandbox/numexpr/* include scipy/sandbox/numexpr/tests/* include scipy/sandbox/oliphant/* include scipy/sandbox/plt/* include scipy/sandbox/pyem/* +include scipy/sandbox/pyem/doc/* +include scipy/sandbox/pyem/doc/examples/* +include scipy/sandbox/pyem/examples/* include scipy/sandbox/pyem/profile_data/* include scipy/sandbox/pyem/src/* include scipy/sandbox/pyem/tests/* -include scipy/sandbox/pysparse/* -include scipy/sandbox/pysparse/Tools/* -include scipy/sandbox/pysparse/amd/* -include scipy/sandbox/pysparse/docs/* -include scipy/sandbox/pysparse/examples/* -include scipy/sandbox/pysparse/examples/poisson_test/* -include scipy/sandbox/pysparse/include/* -include scipy/sandbox/pysparse/include/pysparse/* -include scipy/sandbox/pysparse/lib/* -include scipy/sandbox/pysparse/src/* -include scipy/sandbox/pysparse/superlu/* -include scipy/sandbox/pysparse/tests/* -include scipy/sandbox/pysparse/umfpack/* +include scipy/sandbox/pyloess/* +include scipy/sandbox/pyloess/doc/* +include scipy/sandbox/pyloess/src/* +include scipy/sandbox/pyloess/tests/* +include scipy/sandbox/rbf/* +include scipy/sandbox/rbf/tests/* include scipy/sandbox/rkern/* include scipy/sandbox/spline/* include scipy/sandbox/spline/fitpack/* include scipy/sandbox/spline/tests/* include scipy/sandbox/stats/* include scipy/sandbox/svm/* +include scipy/sandbox/svm/examples/* include scipy/sandbox/svm/libsvm-2.82/* include scipy/sandbox/svm/tests/* include scipy/sandbox/timeseries/* -include scipy/sandbox/timeseries/doc/* -include scipy/sandbox/timeseries/examples/* -include scipy/sandbox/timeseries/mtimeseries/* -include scipy/sandbox/timeseries/mtimeseries/tests/* -include scipy/sandbox/timeseries/old/* +include scipy/sandbox/timeseries/include/* +include scipy/sandbox/timeseries/io/* +include scipy/sandbox/timeseries/io/fame/* +include scipy/sandbox/timeseries/io/fame/src/* +include scipy/sandbox/timeseries/io/fame/tests/* +include scipy/sandbox/timeseries/lib/* +include scipy/sandbox/timeseries/lib/tests/* include scipy/sandbox/timeseries/plotlib/* include scipy/sandbox/timeseries/src/* -include scipy/sandbox/umfpack/* -include scipy/sandbox/umfpack/umfpack/* +include scipy/sandbox/timeseries/tests/* include scipy/sandbox/wavelet/* include scipy/sandbox/xplt/* include scipy/sandbox/xplt/gistdata/* From scipy-svn at scipy.org Sat Sep 22 04:13:31 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 03:13:31 -0500 (CDT) Subject: [Scipy-svn] r3351 - tags/0.6.0 Message-ID: <20070922081331.17CE839C042@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 03:13:20 -0500 (Sat, 22 Sep 2007) New Revision: 3351 Modified: tags/0.6.0/setup.py Log: Fix problem with version information being doubled (see r3310) Modified: tags/0.6.0/setup.py =================================================================== --- tags/0.6.0/setup.py 2007-09-22 08:10:33 UTC (rev 3350) +++ tags/0.6.0/setup.py 2007-09-22 08:13:20 UTC (rev 3351) @@ -35,10 +35,8 @@ sys.path.insert(0,os.path.join(local_path,'scipy')) # to retrive version try: - from version import version as version setup( name = 'scipy', - version = version, # will be overwritten by configuration version maintainer = "SciPy Developers", maintainer_email = "scipy-dev at scipy.org", description = "Scientific Algorithms Library for Python", From scipy-svn at scipy.org Sat Sep 22 21:03:56 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 20:03:56 -0500 (CDT) Subject: [Scipy-svn] r3352 - trunk Message-ID: <20070923010356.9D7D639C061@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 20:03:52 -0500 (Sat, 22 Sep 2007) New Revision: 3352 Modified: trunk/INSTALL.txt Log: point Documentation to itself Modified: trunk/INSTALL.txt =================================================================== --- trunk/INSTALL.txt 2007-09-22 08:13:20 UTC (rev 3351) +++ trunk/INSTALL.txt 2007-09-23 01:03:52 UTC (rev 3352) @@ -10,7 +10,7 @@ :Revision: $Revision$ :Discussions to: scipy-user at scipy.org -See http://new.scipy.org/Wiki/Installing_SciPy +See http://www.scipy.org/scipy/scipy/wiki/GetCode for updates of this document. .. Contents:: From scipy-svn at scipy.org Sat Sep 22 21:15:18 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 20:15:18 -0500 (CDT) Subject: [Scipy-svn] r3353 - trunk Message-ID: <20070923011518.BE3A039C077@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 20:15:11 -0500 (Sat, 22 Sep 2007) New Revision: 3353 Added: trunk/README.txt Removed: trunk/DEVELOPERS.txt Log: renamed to README Deleted: trunk/DEVELOPERS.txt =================================================================== --- trunk/DEVELOPERS.txt 2007-09-23 01:03:52 UTC (rev 3352) +++ trunk/DEVELOPERS.txt 2007-09-23 01:15:11 UTC (rev 3353) @@ -1,147 +0,0 @@ -.. -*- rest -*- -.. NB! Keep this document a valid restructured document. - - -Developing SciPy -================ - -:Author: Pearu Peterson -:Modified by: Ed Schofield -:Last changed: $Date$ -:Revision: $Revision$ -:Discussions to: scipy-dev at scipy.org - -.. Contents:: - -Introduction ------------- - -SciPy aims at being a robust and efficient "super-package" of a number -of modules, each of a non-trivial size and complexity. In order for -"SciPy integration" to work flawlessly, all SciPy modules must follow -certain rules that are described in this document. Hopefully this -document will be helpful for SciPy contributors and developers as a -basic reference about the structure of the SciPy package. - -SciPy structure ---------------- - -Currently SciPy consists of the following files and directories: - - INSTALL.txt - SciPy prerequisites, installation, testing, and troubleshooting. - - THANKS.txt - SciPy developers and contributors. Please keep it up to date!! - - DEVELOPERS.txt - SciPy structure (this document). - - setup.py - Script for building and installing SciPy. - - MANIFEST.in - Additions to distutils-generated SciPy tar-balls. Its usage is - deprecated. - - scipy/ - Contains SciPy __init__.py and the directories of SciPy modules. - - - - -SciPy modules -------------- - -In the following, a *SciPy module* is defined as a Python package, say -xxx, that is located in the scipy/ directory. All SciPy modules should -follow the following conventions: - -* Ideally, each SciPy module should be as self-contained as possible. - That is, it should have minimal dependencies on other packages or - modules. Even dependencies on other SciPy modules should be kept to a - minimum. A dependency on NumPy is of course assumed. - -* Directory ``xxx/`` must contain - - + a file ``setup.py`` that defines - ``configuration(parent_package='',top_path=None)`` function. - See below for more details. - - + a file ``info.py``. See below more details. - -* Directory ``xxx/`` may contain - - + a directory ``tests/`` that contains files ``test_.py`` - corresponding to modules ``xxx/{.py,.so,/}``. See below for - more details. - - + a file ``MANIFEST.in`` that may contain only ``include setup.py`` line. - DO NOT specify sources in MANIFEST.in, you must specify all sources - in setup.py file. Otherwise released SciPy tarballs will miss these sources. - - + a directory ``docs/`` for documentation. - -For details, read: - - http://svn.scipy.org/svn/numpy/trunk/numpy/doc/DISTUTILS.txt - -Open issues and discussion --------------------------- - -Documentation -+++++++++++++ - -This is an important feature where SciPy is currently lacking. A few -SciPy modules have some documentation but they use different formats -and are mostly out of date. We could use some help with this. - -Currently there are - -* A SciPy tutorial by Travis E. Oliphant. This is maintained using LyX. - The main advantage of this approach is that one can use mathematical - formulas in documentation. - -* I (Pearu) have used reStructuredText formated .txt files to document - various bits of software. This is mainly because ``docutils`` might - become a standard tool to document Python modules. The disadvantage - is that it does not support mathematical formulas (though, we might - add this feature ourself using e.g. LaTeX syntax). - -* Various text files with almost no formatting and mostly badly out - dated. - -* Documentation strings of Python functions, classes, and modules. - Some SciPy modules are well-documented in this sense, others are very - poorly documented. Another issue is that there is no consensus on how - to format documentation strings, mainly because we haven't decided - which tool to use to generate, for instance, HTML pages of - documentation strings. - -So, we need unique rules for documenting SciPy modules. Here are some -requirements that documentation tools should satsify: - -* Easy to use. This is important to lower the threshold of developers - to use the same documentation utilities. - -* In general, all functions that are visible to SciPy end-users, must - have well-maintained documentation strings. - -* Support for mathematical formulas. Since SciPy is a tool for - scientific work, it is hard to avoid formulas to describe how its - modules are good for. So, documentation tools should support LaTeX. - -* Documentation of a feature should be closely related to its - interface and implementation. This is important for keeping - documentation up to date. One option would be to maintain - documentation in source files (and have a tool that extracts - documentation from sources). The main disadvantage with that is the - lack of convenience writing documentation as the editor would be in - different mode (e.g. Python mode) from the mode suitable for - documentation. - -* Differentiation of implementation (e.g. from scanning sources) and - concept (e.g. tutorial, users guide, manual) based docs. - - - Copied: trunk/README.txt (from rev 3352, trunk/DEVELOPERS.txt) From scipy-svn at scipy.org Sat Sep 22 21:37:21 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 20:37:21 -0500 (CDT) Subject: [Scipy-svn] r3354 - trunk Message-ID: <20070923013721.EFEF139C061@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 20:36:55 -0500 (Sat, 22 Sep 2007) New Revision: 3354 Modified: trunk/README.txt Log: updating README Modified: trunk/README.txt =================================================================== --- trunk/README.txt 2007-09-23 01:15:11 UTC (rev 3353) +++ trunk/README.txt 2007-09-23 01:36:55 UTC (rev 3354) @@ -1,21 +1,41 @@ .. -*- rest -*- -.. NB! Keep this document a valid restructured document. - - +================================================= Developing SciPy -================ +================================================= :Author: Pearu Peterson :Modified by: Ed Schofield +:Modified by: Jarrod Millman :Last changed: $Date$ :Revision: $Revision$ :Discussions to: scipy-dev at scipy.org .. Contents:: -Introduction ------------- +What is SciPY? +-------------- + +SciPy (pronounced "Sigh Pie") is open-source software for mathematics, +science, and engineering. It includes modules for statistics, optimization, +integration, linear algebra, Fourier transforms, signal and image processing, +genetic algorithms, ODE solvers, and more. It is also the name of a very +popular conference on scientific programming with Python. + +The SciPy library depends on NumPy, which provides convenient and fast +N-dimensional array manipulation. The SciPy library is built to work with +NumPy arrays, and provides many user-friendly and efficient numerical routines +such as routines for numerical integration and optimization. Together, they +run on all popular operating systems, are quick to install, and are free of +charge. NumPy and SciPy are easy to use, but powerful enough to be depended +upon by some of the world's leading scientists and engineers. If you need to +manipulate numbers on a computer and display or publish the results, give +SciPy a try! + + +SciPy structure +--------------- + SciPy aims at being a robust and efficient "super-package" of a number of modules, each of a non-trivial size and complexity. In order for "SciPy integration" to work flawlessly, all SciPy modules must follow @@ -23,9 +43,6 @@ document will be helpful for SciPy contributors and developers as a basic reference about the structure of the SciPy package. -SciPy structure ---------------- - Currently SciPy consists of the following files and directories: INSTALL.txt @@ -47,11 +64,8 @@ scipy/ Contains SciPy __init__.py and the directories of SciPy modules. - - - SciPy modules -------------- ++++++++++++++ In the following, a *SciPy module* is defined as a Python package, say xxx, that is located in the scipy/ directory. All SciPy modules should @@ -86,11 +100,9 @@ http://svn.scipy.org/svn/numpy/trunk/numpy/doc/DISTUTILS.txt -Open issues and discussion --------------------------- Documentation -+++++++++++++ +------------- This is an important feature where SciPy is currently lacking. A few SciPy modules have some documentation but they use different formats @@ -144,4 +156,36 @@ concept (e.g. tutorial, users guide, manual) based docs. +Web sites +--------- +The user's site is here + http://www.scipy.org/ + +The developer's site is here + http://projects.scipy.org/scipy/scipy/wiki + + +Mailing Lists +------------- + +Please see the developer's list here + http://projects.scipy.org/mailman/listinfo/scipy-devel + + +Bug reports +----------- + +To search for bugs, please use the NIPY Bug Tracker at + http://projects.scipy.org/scipy/scipy/query + +To report a bug, please use the NIPY Bug Tracker at + http://projects.scipy.org/scipy/scipy/newticket + + +License information +------------------- + +See the file "LICENSE" for information on the history of this +software, terms & conditions for usage, and a DISCLAIMER OF ALL +WARRANTIES. From scipy-svn at scipy.org Sat Sep 22 22:17:05 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 21:17:05 -0500 (CDT) Subject: [Scipy-svn] r3355 - trunk Message-ID: <20070923021705.A96DD39C053@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 21:16:58 -0500 (Sat, 22 Sep 2007) New Revision: 3355 Modified: trunk/FORMAT_GUIDELINES.txt trunk/README.txt Log: udpating documentation Modified: trunk/FORMAT_GUIDELINES.txt =================================================================== --- trunk/FORMAT_GUIDELINES.txt 2007-09-23 01:36:55 UTC (rev 3354) +++ trunk/FORMAT_GUIDELINES.txt 2007-09-23 02:16:58 UTC (rev 3355) @@ -9,7 +9,7 @@ o Only 80 characters on a line. - o use all lowercase function/class names with underscore separated words: + o use all lowercase function names with underscore separated words: def set_some_value() @@ -17,8 +17,11 @@ def setSomeValue() - There is no differentiation between classes, functions, verbs, nouns, etc. - This simple scheme aims to make it easier for non-programmers to become - familiar with the interface. For those that object, you are skilled - enough to deal with the limitation. + o use CamelCase class names: + + def BaseClass() + + instead of: + + def base_class() Modified: trunk/README.txt =================================================================== --- trunk/README.txt 2007-09-23 01:36:55 UTC (rev 3354) +++ trunk/README.txt 2007-09-23 02:16:58 UTC (rev 3355) @@ -98,7 +98,7 @@ For details, read: - http://svn.scipy.org/svn/numpy/trunk/numpy/doc/DISTUTILS.txt + http://projects.scipy.org/scipy/numpy/wiki/DistutilsDoc Documentation @@ -170,7 +170,7 @@ ------------- Please see the developer's list here - http://projects.scipy.org/mailman/listinfo/scipy-devel + http://projects.scipy.org/mailman/listinfo/scipy-dev Bug reports @@ -189,3 +189,4 @@ See the file "LICENSE" for information on the history of this software, terms & conditions for usage, and a DISCLAIMER OF ALL WARRANTIES. + From scipy-svn at scipy.org Sat Sep 22 22:24:00 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 21:24:00 -0500 (CDT) Subject: [Scipy-svn] r3356 - trunk Message-ID: <20070923022400.3B3A139C053@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 21:23:56 -0500 (Sat, 22 Sep 2007) New Revision: 3356 Modified: trunk/TOCHANGE.txt Log: converting to reST markup Modified: trunk/TOCHANGE.txt =================================================================== --- trunk/TOCHANGE.txt 2007-09-23 02:16:58 UTC (rev 3355) +++ trunk/TOCHANGE.txt 2007-09-23 02:23:56 UTC (rev 3356) @@ -1,7 +1,27 @@ -Changes that should be made someday: +================================================= +Development Plans for SciPy 1.0 +================================================= -* io rewritten to use internal writing capabilities of arrays +See http://www.scipy.org/scipy/scipy/wiki/GetCode +for updates of this document. + +.. Contents:: + +General +-------- + * distributions heavy use of extract and insert (could use fancy indexing?) -- but we should wait until we learn how slow fancy indexing is....) * Use of old Numeric C-API. Using it means an extra C-level function call, but ... * Make use of type addition to extend certain ufuncs with cephes quad types * Use finfo(foo).bar instead of limits.foo_bar + +Handling Data IO +---------------- + +* io rewritten to use internal writing capabilities of arrays + +Image Processing +---------------- + + + From scipy-svn at scipy.org Sat Sep 22 23:02:16 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 22:02:16 -0500 (CDT) Subject: [Scipy-svn] r3357 - trunk Message-ID: <20070923030216.DBA1239C089@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 22:02:13 -0500 (Sat, 22 Sep 2007) New Revision: 3357 Modified: trunk/TOCHANGE.txt Log: typo Modified: trunk/TOCHANGE.txt =================================================================== --- trunk/TOCHANGE.txt 2007-09-23 02:23:56 UTC (rev 3356) +++ trunk/TOCHANGE.txt 2007-09-23 03:02:13 UTC (rev 3357) @@ -2,7 +2,7 @@ Development Plans for SciPy 1.0 ================================================= -See http://www.scipy.org/scipy/scipy/wiki/GetCode +See http://www.scipy.org/scipy/scipy/wiki/DevelopmentPlan for updates of this document. .. Contents:: From scipy-svn at scipy.org Sat Sep 22 23:02:50 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 22:02:50 -0500 (CDT) Subject: [Scipy-svn] r3358 - trunk/scipy/tests Message-ID: <20070923030250.C2DB139C089@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 22:02:44 -0500 (Sat, 22 Sep 2007) New Revision: 3358 Removed: trunk/scipy/tests/test_limits.py Modified: trunk/scipy/tests/test_basic.py trunk/scipy/tests/test_basic1a.py trunk/scipy/tests/test_common.py trunk/scipy/tests/test_handy.py Log: remove references to limits Modified: trunk/scipy/tests/test_basic.py =================================================================== --- trunk/scipy/tests/test_basic.py 2007-09-23 03:02:13 UTC (rev 3357) +++ trunk/scipy/tests/test_basic.py 2007-09-23 03:02:44 UTC (rev 3358) @@ -3,7 +3,6 @@ """ import unittest -import numpy.utils.limits as limits from numpy.test.testing import assert_array_equal, assert_equal from numpy.test.testing import assert_almost_equal, assert_array_almost_equal from scipy import * @@ -45,7 +44,7 @@ assert_almost_equal(std,1.0,1) -val = limits.double_resolution +#val = limits.double_resolution class test_eye(unittest.TestCase): def check_basic(self): Modified: trunk/scipy/tests/test_basic1a.py =================================================================== --- trunk/scipy/tests/test_basic1a.py 2007-09-23 03:02:13 UTC (rev 3357) +++ trunk/scipy/tests/test_basic1a.py 2007-09-23 03:02:44 UTC (rev 3358) @@ -3,7 +3,6 @@ """ import unittest -import numpy.limits as limits from numpy.test.testing import assert_array_equal, assert_equal from numpy.test.testing import assert_almost_equal, assert_array_almost_equal from scipy import * Modified: trunk/scipy/tests/test_common.py =================================================================== --- trunk/scipy/tests/test_common.py 2007-09-23 03:02:13 UTC (rev 3357) +++ trunk/scipy/tests/test_common.py 2007-09-23 03:02:44 UTC (rev 3358) @@ -3,7 +3,6 @@ """ import unittest -import numpy.limits as limits from numpy.test.testing import assert_array_equal, assert_equal from numpy.test.testing import assert_almost_equal, assert_array_almost_equal from numpy import sqrt, product, add, ravel, mgrid Modified: trunk/scipy/tests/test_handy.py =================================================================== --- trunk/scipy/tests/test_handy.py 2007-09-23 03:02:13 UTC (rev 3357) +++ trunk/scipy/tests/test_handy.py 2007-09-23 03:02:44 UTC (rev 3358) @@ -3,7 +3,6 @@ """ import unittest -import scipy.misc.limits as limits from numpy.testing import assert_array_equal, assert_equal from numpy.testing import assert_almost_equal, assert_array_almost_equal from scipy import * Deleted: trunk/scipy/tests/test_limits.py =================================================================== --- trunk/scipy/tests/test_limits.py 2007-09-23 03:02:13 UTC (rev 3357) +++ trunk/scipy/tests/test_limits.py 2007-09-23 03:02:44 UTC (rev 3358) @@ -1,44 +0,0 @@ -""" Test functions for limits module. - - Currently empty -- not sure how to test these values - and routines as they are machine dependent. Suggestions? -""" - -from numpy import * -import unittest -import numpy.limits - - - -################################################## -### Test for sum - -class test_float(unittest.TestCase): - def check_nothing(self): - pass - -class test_double(unittest.TestCase): - def check_nothing(self): - pass - -################################################## - - -def test_suite(level=1): - suites = [] - if level > 0: - suites.append( unittest.makeSuite(test_float,'check_') ) - suites.append( unittest.makeSuite(test_double,'check_') ) - - total_suite = unittest.TestSuite(suites) - return total_suite - -def test(level=10): - all_tests = test_suite(level) - runner = unittest.TextTestRunner() - runner.run(all_tests) - return runner - - -if __name__ == "__main__": - test() From scipy-svn at scipy.org Sat Sep 22 23:12:55 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 22:12:55 -0500 (CDT) Subject: [Scipy-svn] r3359 - trunk Message-ID: <20070923031255.083E939C096@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 22:12:52 -0500 (Sat, 22 Sep 2007) New Revision: 3359 Added: trunk/site.cfg.example Log: copied over the site.cfg.example from numpy Added: trunk/site.cfg.example =================================================================== --- trunk/site.cfg.example 2007-09-23 03:02:44 UTC (rev 3358) +++ trunk/site.cfg.example 2007-09-23 03:12:52 UTC (rev 3359) @@ -0,0 +1,120 @@ +# This file provides configuration information about non-Python dependencies for +# numpy.distutils-using packages. Create a file like this called "site.cfg" next +# to your package's setup.py file and fill in the appropriate sections. Not all +# packages will use all sections so you should leave out sections that your +# package does not use. + +# To assist automatic installation like easy_install, the user's home directory +# will also be checked for the file ~/.numpy-site.cfg . + +# The format of the file is that of the standard library's ConfigParser module. +# +# http://www.python.org/doc/current/lib/module-ConfigParser.html +# +# Each section defines settings that apply to one particular dependency. Some of +# the settings are general and apply to nearly any section and are defined here. +# Settings specific to a particular section will be defined near their section. +# +# libraries +# Comma-separated list of library names to add to compile the extension +# with. Note that these should be just the names, not the filenames. For +# example, the file "libfoo.so" would become simply "foo". +# libraries = lapack,f77blas,cblas,atlas +# +# library_dirs +# List of directories to add to the library search path when compiling +# extensions with this dependency. Use the character given by os.pathsep +# to separate the items in the list. On UN*X-type systems (Linux, FreeBSD, +# OS X): +# library_dirs = /usr/lib:/usr/local/lib +# On Windows: +# library_dirs = c:\mingw\lib,c:\atlas\lib +# +# include_dirs +# List of directories to add to the header file earch path. +# include_dirs = /usr/include:/usr/local/include +# +# src_dirs +# List of directories that contain extracted source code for the +# dependency. For some dependencies, numpy.distutils will be able to build +# them from source if binaries cannot be found. The FORTRAN BLAS and +# LAPACK libraries are one example. However, most dependencies are more +# complicated and require actual installation that you need to do +# yourself. +# src_dirs = /home/rkern/src/BLAS_SRC:/home/rkern/src/LAPACK_SRC +# +# search_static_first +# Boolean (one of (0, false, no, off) for False or (1, true, yes, on) for +# True) to tell numpy.distutils to prefer static libraries (.a) over +# shared libraries (.so). It is turned off by default. +# search_static_first = false + +# Defaults +# ======== +# The settings given here will apply to all other sections if not overridden. +# This is a good place to add general library and include directories like +# /usr/local/{lib,include} +# +#[DEFAULT] +#library_dirs = /usr/local/lib +#include_dirs = /usr/local/include + +# Optimized BLAS and LAPACK +# ------------------------- +# Use the blas_opt and lapack_opt sections to give any settings that are +# required to link against your chosen BLAS and LAPACK, including the regular +# FORTRAN reference BLAS and also ATLAS. Some other sections still exist for +# linking against certain optimized libraries (e.g. [atlas], [lapack_atlas]), +# however, they are now deprecated and should not be used. +# +# These are typical configurations for ATLAS (assuming that the library and +# include directories have already been set in [DEFAULT]; the include directory +# is important for the BLAS C interface): +# +#[blas_opt] +#libraries = f77blas, cblas, atlas +# +#[lapack_opt] +#libraries = lapack, f77blas, cblas, atlas +# +# If your ATLAS was compiled with pthreads, the names of the libraries might be +# different: +# +#[blas_opt] +#libraries = ptf77blas, ptcblas, atlas +# +#[lapack_opt] +#libraries = lapack, ptf77blas, ptcblas, atlas + +# UMFPACK +# ------- +# The UMFPACK library is used to factor large sparse matrices. It, in turn, +# depends on the AMD library for reordering the matrices for better performance. +# Note that the AMD library has nothing to do with AMD (Advanced Micro Devices), +# the CPU company. +# +# http://www.cise.ufl.edu/research/sparse/umfpack/ +# http://www.cise.ufl.edu/research/sparse/amd/ +# +#[amd] +#amd_libs = amd +# +#[umfpack] +#umfpack_libs = umfpack + +# FFT libraries +# ------------- +# There are two FFT libraries that we can configure here: FFTW (2 and 3) and djbfft. +# +# http://fftw.org/ +# http://cr.yp.to/djbfft.html +# +# Given only this section, numpy.distutils will try to figure out which version +# of FFTW you are using. +#[fftw] +#libraries = fftw3 +# +# For djbfft, numpy.distutils will look for either djbfft.a or libdjbfft.a . +#[djbfft] +#include_dirs = /usr/local/djbfft/include +#library_dirs = /usr/local/djbfft/lib From scipy-svn at scipy.org Sat Sep 22 23:44:35 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 22:44:35 -0500 (CDT) Subject: [Scipy-svn] r3360 - trunk Message-ID: <20070923034435.A000F39C099@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 22:44:32 -0500 (Sat, 22 Sep 2007) New Revision: 3360 Modified: trunk/TOCHANGE.txt Log: more documentation Modified: trunk/TOCHANGE.txt =================================================================== --- trunk/TOCHANGE.txt 2007-09-23 03:12:52 UTC (rev 3359) +++ trunk/TOCHANGE.txt 2007-09-23 03:44:32 UTC (rev 3360) @@ -7,6 +7,7 @@ .. Contents:: + General -------- @@ -14,14 +15,35 @@ * Use of old Numeric C-API. Using it means an extra C-level function call, but ... * Make use of type addition to extend certain ufuncs with cephes quad types * Use finfo(foo).bar instead of limits.foo_bar +* Get rid of sandbox + * move packages to + * scipy + * scikits + * or a branch -Handling Data IO ----------------- +Documentation +------------- + +* use new docstring format + + +Packages +-------- + +* consider reorganizing the namespace + * scipy.tests, scipy.misc, scipy.stsci + +IO (scipy.io) ++++++++++++++ + * io rewritten to use internal writing capabilities of arrays -Image Processing ----------------- +Image Processing (scipy.ndimage) +++++++++++++++++++++++++++++++++ +Statistical Analysis (scipy.stats) +++++++++++++++++++++++++++++++++++ +* add statistical models From scipy-svn at scipy.org Sat Sep 22 23:48:26 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 22:48:26 -0500 (CDT) Subject: [Scipy-svn] r3361 - trunk Message-ID: <20070923034826.971D039C093@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 22:48:23 -0500 (Sat, 22 Sep 2007) New Revision: 3361 Modified: trunk/TOCHANGE.txt Log: valid reST Modified: trunk/TOCHANGE.txt =================================================================== --- trunk/TOCHANGE.txt 2007-09-23 03:44:32 UTC (rev 3360) +++ trunk/TOCHANGE.txt 2007-09-23 03:48:23 UTC (rev 3361) @@ -12,13 +12,21 @@ -------- * distributions heavy use of extract and insert (could use fancy indexing?) -- but we should wait until we learn how slow fancy indexing is....) + * Use of old Numeric C-API. Using it means an extra C-level function call, but ... + * Make use of type addition to extend certain ufuncs with cephes quad types + * Use finfo(foo).bar instead of limits.foo_bar + * Get rid of sandbox + * move packages to + * scipy + * scikits + * or a branch @@ -32,6 +40,7 @@ -------- * consider reorganizing the namespace + * scipy.tests, scipy.misc, scipy.stsci IO (scipy.io) From scipy-svn at scipy.org Sun Sep 23 00:10:57 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 23:10:57 -0500 (CDT) Subject: [Scipy-svn] r3362 - trunk/scipy/misc Message-ID: <20070923041057.E827A39C096@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 23:10:45 -0500 (Sat, 22 Sep 2007) New Revision: 3362 Modified: trunk/scipy/misc/limits.py Log: Deprecating scipy.misc.limits Modified: trunk/scipy/misc/limits.py =================================================================== --- trunk/scipy/misc/limits.py 2007-09-23 03:48:23 UTC (rev 3361) +++ trunk/scipy/misc/limits.py 2007-09-23 04:10:45 UTC (rev 3362) @@ -1,6 +1,11 @@ """ Machine limits for Float32 and Float64. """ +import warnings +warnings.warn('limits module is deprecated, please use numpy.finfo instead', + DeprecationWarning) + + __all__ = ['float_epsilon','float_tiny','float_min', 'float_max','float_precision','float_resolution', 'single_epsilon','single_tiny','single_min','single_max', From scipy-svn at scipy.org Sun Sep 23 00:12:48 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 23:12:48 -0500 (CDT) Subject: [Scipy-svn] r3363 - trunk Message-ID: <20070923041248.6473039C096@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 23:12:44 -0500 (Sat, 22 Sep 2007) New Revision: 3363 Modified: trunk/TOCHANGE.txt Log: trying to reference trac revisions Modified: trunk/TOCHANGE.txt =================================================================== --- trunk/TOCHANGE.txt 2007-09-23 04:10:45 UTC (rev 3362) +++ trunk/TOCHANGE.txt 2007-09-23 04:12:44 UTC (rev 3363) @@ -17,7 +17,7 @@ * Make use of type addition to extend certain ufuncs with cephes quad types -* Use finfo(foo).bar instead of limits.foo_bar +* Use finfo(foo).bar instead of limits.foo_bar (see r3358 and r3362) * Get rid of sandbox From scipy-svn at scipy.org Sun Sep 23 00:17:00 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 22 Sep 2007 23:17:00 -0500 (CDT) Subject: [Scipy-svn] r3364 - trunk/scipy/misc Message-ID: <20070923041700.E633139C096@new.scipy.org> Author: jarrod.millman Date: 2007-09-22 23:16:52 -0500 (Sat, 22 Sep 2007) New Revision: 3364 Modified: trunk/scipy/misc/__init__.py Log: stop importing limits.py Modified: trunk/scipy/misc/__init__.py =================================================================== --- trunk/scipy/misc/__init__.py 2007-09-23 04:12:44 UTC (rev 3363) +++ trunk/scipy/misc/__init__.py 2007-09-23 04:16:52 UTC (rev 3364) @@ -1,9 +1,8 @@ from info import __doc__ -__all__ = ['limits', 'who', 'source', 'info'] +__all__ = ['who', 'source', 'info'] -import limits from common import * from numpy import who, source, info as _info From scipy-svn at scipy.org Sun Sep 23 11:59:18 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 23 Sep 2007 10:59:18 -0500 (CDT) Subject: [Scipy-svn] r3365 - trunk Message-ID: <20070923155918.1B87339C075@new.scipy.org> Author: jarrod.millman Date: 2007-09-23 10:59:15 -0500 (Sun, 23 Sep 2007) New Revision: 3365 Modified: trunk/TOCHANGE.txt Log: added a couple of things to roadmap Modified: trunk/TOCHANGE.txt =================================================================== --- trunk/TOCHANGE.txt 2007-09-23 04:16:52 UTC (rev 3364) +++ trunk/TOCHANGE.txt 2007-09-23 15:59:15 UTC (rev 3365) @@ -19,6 +19,12 @@ * Use finfo(foo).bar instead of limits.foo_bar (see r3358 and r3362) +* Comply with Python Style Guide + + * use CamelCase for class names + +* Improve testing (e.g., increased coverage) + * Get rid of sandbox * move packages to From scipy-svn at scipy.org Sun Sep 23 15:17:54 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 23 Sep 2007 14:17:54 -0500 (CDT) Subject: [Scipy-svn] r3366 - trunk/scipy/optimize Message-ID: <20070923191754.1A6C639C0FC@new.scipy.org> Author: dmitrey.kroshko Date: 2007-09-23 14:17:48 -0500 (Sun, 23 Sep 2007) New Revision: 3366 Modified: trunk/scipy/optimize/cobyla.py trunk/scipy/optimize/optimize.py Log: docstrings: info message about calling bfgs and cobyla from OpenOpt Modified: trunk/scipy/optimize/cobyla.py =================================================================== --- trunk/scipy/optimize/cobyla.py 2007-09-23 15:59:15 UTC (rev 3365) +++ trunk/scipy/optimize/cobyla.py 2007-09-23 19:17:48 UTC (rev 3366) @@ -47,6 +47,8 @@ See also: + scikits.openopt, which offers a unified syntax to call this and other solvers + fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers leastsq -- nonlinear least squares minimizer Modified: trunk/scipy/optimize/optimize.py =================================================================== --- trunk/scipy/optimize/optimize.py 2007-09-23 15:59:15 UTC (rev 3365) +++ trunk/scipy/optimize/optimize.py 2007-09-23 19:17:48 UTC (rev 3366) @@ -684,7 +684,9 @@ return a list of results at each iteration if non-zero :SeeAlso: - + + scikits.openopt, which offers a unified syntax to call this and other solvers + fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers leastsq -- nonlinear least squares minimizer From scipy-svn at scipy.org Mon Sep 24 10:34:07 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 24 Sep 2007 09:34:07 -0500 (CDT) Subject: [Scipy-svn] r3367 - in trunk/scipy/ndimage: . src tests Message-ID: <20070924143407.BF63F39C21E@new.scipy.org> Author: stefan Date: 2007-09-24 09:33:35 -0500 (Mon, 24 Sep 2007) New Revision: 3367 Modified: trunk/scipy/ndimage/measurements.py trunk/scipy/ndimage/src/ni_measure.c trunk/scipy/ndimage/tests/test_ndimage.py Log: Some systems have DBL_MIN defined as 0. Changed to use -DBL_MAX instead. Fixes ticket #501. Modified: trunk/scipy/ndimage/measurements.py =================================================================== --- trunk/scipy/ndimage/measurements.py 2007-09-23 19:17:48 UTC (rev 3366) +++ trunk/scipy/ndimage/measurements.py 2007-09-24 14:33:35 UTC (rev 3367) @@ -197,12 +197,13 @@ return _nd_image.statistics(input, labels, index, 3) -def maximum(input, labels = None, index = None): - """Calculate the maximum of the values of the array. +def maximum(input, labels=None, index=None): + """Return the maximum input value. The index parameter is a single label number or a sequence of label numbers of the objects to be measured. If index is None, all values are used where labels is larger than zero. + """ input = numpy.asarray(input) if numpy.iscomplexobj(input): Modified: trunk/scipy/ndimage/src/ni_measure.c =================================================================== --- trunk/scipy/ndimage/src/ni_measure.c 2007-09-23 19:17:48 UTC (rev 3366) +++ trunk/scipy/ndimage/src/ni_measure.c 2007-09-24 14:33:35 UTC (rev 3367) @@ -545,7 +545,7 @@ if (minimum) minimum[jj] = DBL_MAX; if (maximum) - maximum[jj] = DBL_MIN; + maximum[jj] = -DBL_MAX; if (min_pos) min_pos[jj] = 0; if (max_pos) @@ -576,7 +576,7 @@ if (min_pos) min_pos[idx] = jj; } - if (maximum && val > maximum[idx]) { + if (maximum && (val > maximum[idx])) { maximum[idx] = val; if (max_pos) max_pos[idx] = jj; @@ -596,7 +596,7 @@ } if (maximum) { for(jj = 0; jj < n_results; jj++) { - if (!(maximum[jj] > DBL_MIN)) + if (!(maximum[jj] > -DBL_MAX)) maximum[jj] = 0.0; } } Modified: trunk/scipy/ndimage/tests/test_ndimage.py =================================================================== --- trunk/scipy/ndimage/tests/test_ndimage.py 2007-09-23 19:17:48 UTC (rev 3366) +++ trunk/scipy/ndimage/tests/test_ndimage.py 2007-09-24 14:33:35 UTC (rev 3367) @@ -2933,6 +2933,11 @@ index = [2, 3, 8]) self.failUnless(output == [3.0, 4.0, 0.0]) + def test_maximum05(self): + "Ticket #501" + x = numpy.array([-3,-2,-1]) + assert_equal(ndimage.maximum(x),-1) + def test_variance01(self): "variance 1" for type in self.types: From scipy-svn at scipy.org Mon Sep 24 11:30:54 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 24 Sep 2007 10:30:54 -0500 (CDT) Subject: [Scipy-svn] r3368 - trunk/scipy/sandbox Message-ID: <20070924153054.015F039C1DF@new.scipy.org> Author: cookedm Date: 2007-09-24 10:30:52 -0500 (Mon, 24 Sep 2007) New Revision: 3368 Modified: trunk/scipy/sandbox/setup.py Log: Skip blank lines in scipy/sandbox/enabled_packages.txt. Should fix #504 Modified: trunk/scipy/sandbox/setup.py =================================================================== --- trunk/scipy/sandbox/setup.py 2007-09-24 14:33:35 UTC (rev 3367) +++ trunk/scipy/sandbox/setup.py 2007-09-24 15:30:52 UTC (rev 3368) @@ -15,7 +15,7 @@ else: for line in sandbox_file: p = line.strip() - if line.startswith('#'): + if not p or p.startswith('#'): continue sandbox_packages.append(p) sandbox_file.close() From scipy-svn at scipy.org Tue Sep 25 17:49:09 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 25 Sep 2007 16:49:09 -0500 (CDT) Subject: [Scipy-svn] r3369 - trunk/scipy/optimize Message-ID: <20070925214909.C512039C0C6@new.scipy.org> Author: stefan Date: 2007-09-25 16:48:55 -0500 (Tue, 25 Sep 2007) New Revision: 3369 Modified: trunk/scipy/optimize/optimize.py Log: Reformat optimize documentation. Modified: trunk/scipy/optimize/optimize.py =================================================================== --- trunk/scipy/optimize/optimize.py 2007-09-24 15:30:52 UTC (rev 3368) +++ trunk/scipy/optimize/optimize.py 2007-09-25 21:48:55 UTC (rev 3369) @@ -20,6 +20,8 @@ 'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime', 'line_search', 'check_grad'] +__docformat__ = "restructuredtext en" + import numpy from numpy import atleast_1d, eye, mgrid, argmin, zeros, shape, empty, \ squeeze, isscalar, vectorize, asarray, absolute, sqrt, Inf, asfarray, isinf @@ -98,76 +100,58 @@ def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None): """Minimize a function using the downhill simplex algorithm. - - :Parameters: - func : the Python function or method to be minimized. - x0 : ndarray - the initial guess. - args : extra arguments for func. - callback : an optional user-supplied function to call after each - iteration. It is called as callback(xk), where xk is the - current parameter vector. + *Parameters*: - :Returns: (xopt, {fopt, iter, funcalls, warnflag}) + func : callable func(x,*args) + The objective function to be minimized. + x0 : ndarray + Initial guess. + args : tuple + Extra arguments passed to func, i.e. ``f(x,*args)``. + callback : callable + Called after each iteration, as callback(xk), where xk is the + current parameter vector. - xopt : ndarray - minimizer of function - fopt : number - value of function at minimum: fopt = func(xopt) - iter : number - number of iterations - funcalls : number - number of function calls - warnflag : number - Integer warning flag: - 1 : 'Maximum number of function evaluations.' - 2 : 'Maximum number of iterations.' - allvecs : Python list - a list of solutions at each iteration + *Returns*: (xopt, {fopt, iter, funcalls, warnflag}) - :OtherParameters: + xopt : ndarray + Parameter that minimizes function. + fopt : float + Value of function at minimum: ``fopt = func(xopt)``. + iter : int + Number of iterations performed. + funcalls : int + Number of function calls made. + warnflag : int + 1 : Maximum number of function evaluations made. + 2 : Maximum number of iterations reached. + allvecs : list + Solution at each iteration. - xtol : number - acceptable relative error in xopt for convergence. - ftol : number - acceptable relative error in func(xopt) for convergence. - maxiter : number - the maximum number of iterations to perform. - maxfun : number - the maximum number of function evaluations. - full_output : number - non-zero if fval and warnflag outputs are desired. - disp : number - non-zero to print convergence messages. - retall : number - non-zero to return list of solutions at each iteration + *Other Parameters*: - :SeeAlso: + xtol : float + Relative error in xopt acceptable for convergence. + ftol : number + Relative error in func(xopt) acceptable for convergence. + maxiter : int + Maximum number of iterations to perform. + maxfun : number + Maximum number of function evaluations to make. + full_output : bool + Set to True if fval and warnflag outputs are desired. + disp : bool + Set to True to print convergence messages. + retall : bool + Set to True to return list of solutions at each iteration. - fmin, fmin_powell, fmin_cg, - fmin_bfgs, fmin_ncg -- multivariate local optimizers - leastsq -- nonlinear least squares minimizer + *Notes* - fmin_l_bfgs_b, fmin_tnc, - fmin_cobyla -- constrained multivariate optimizers + Uses a Nelder-Mead simplex algorithm to find the minimum of + function of one or more variables. - anneal, brute -- global optimizers - - fminbound, brent, golden, bracket -- local scalar minimizers - - fsolve -- n-dimenstional root-finding - - brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding - - fixed_point -- scalar fixed-point finder - - Notes - - ----------- - - Uses a Nelder-Mead simplex algorithm to find the minimum of function - of one or more variables. - """ + """ fcalls, func = wrap_function(func, args) x0 = asfarray(x0).flatten() N = len(x0) @@ -421,34 +405,44 @@ def line_search(f, myfprime, xk, pk, gfk, old_fval, old_old_fval, args=(), c1=1e-4, c2=0.9, amax=50): """Find alpha that satisfies strong Wolfe conditions. - - :Parameters: - - f : objective function - myfprime : objective function gradient (can be None) - xk : ndarray -- start point - pk : ndarray -- search direction - gfk : ndarray -- gradient value for x=xk - args : additional arguments for user functions - c1 : number -- parameter for Armijo condition rule - c2 : number - parameter for curvature condition rule - - :Returns: - - alpha0 : number -- required alpha (x_new = x0 + alpha * pk) - fc : number of function evaluations - gc : number of gradient evaluations - - - Notes - - -------------------------------- - - Uses the line search algorithm to enforce strong Wolfe conditions - Wright and Nocedal, 'Numerical Optimization', 1999, pg. 59-60 - For the zoom phase it uses an algorithm by + *Parameters*: + f : callable f(x,*args) + Objective function. + myfprime : callable f'(x,*args) + Objective function gradient (can be None). + xk : ndarray + Starting point. + pk : ndarray + Search direction. + gfk : ndarray + Gradient value for x=xk (xk being the current parameter + estimate). + args : tuple + Additional arguments passed to objective function. + c1 : float + Parameter for Armijo condition rule. + c2 : float + Parameter for curvature condition rule. + + *Returns*: + + alpha0 : float + Alpha for which ``x_new = x0 + alpha * pk``. + fc : int + Number of function evaluations made. + gc : int + Number of gradient evaluations made. + + *Notes* + + Uses the line search algorithm to enforce strong Wolfe + conditions. See Wright and Nocedal, 'Numerical Optimization', + 1999, pg. 59-60. + + For the zoom phase it uses an algorithm by [...]. + """ global _ls_fc, _ls_gc, _ls_ingfk @@ -541,28 +535,29 @@ break if fprime_star is not None: - # fprime_star is a number (derphi) -- so use the most recently calculated gradient - # used in computing it derphi = gfk*pk - # this is the gradient at the next step - # no need to compute it again in the outer loop. + # fprime_star is a number (derphi) -- so use the most recently + # calculated gradient used in computing it derphi = gfk*pk + # this is the gradient at the next step no need to compute it + # again in the outer loop. fprime_star = _ls_ingfk return alpha_star, _ls_fc, _ls_gc, fval_star, old_fval, fprime_star def line_search_BFGS(f, xk, pk, gfk, old_fval, args=(), c1=1e-4, alpha0=1): - """Minimize over alpha, the function f(xk+alpha pk) + """Minimize over alpha, the function ``f(xk+alpha pk)``. Uses the interpolation algorithm (Armiijo backtracking) as suggested by Wright and Nocedal in 'Numerical Optimization', 1999, pg. 56-57 - :Returns: (alpha, fc, gc) + *Returns*: (alpha, fc, gc) + """ - + xk = atleast_1d(xk) fc = 0 - phi0 = old_fval # compute f(xk) -- done in past loop - phi_a0 = f(*((xk+alpha0*pk,)+args)) + phi0 = old_fval # compute f(xk) -- done in past loop + phi_a0 = f(*((xk+alpha0*pk,)+args)) fc = fc + 1 derphi0 = numpy.dot(gfk,pk) @@ -631,87 +626,72 @@ epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None): """Minimize a function using the BFGS algorithm. - - :Parameters: - f : the Python function or method to be minimized. + *Parameters*: + + f : callable f(x,*args) + Objective function to be minimized. x0 : ndarray - the initial guess for the minimizer. + Initial guess. + fprime : callable f'(x,*args) + Gradient of f. + args : tuple + Extra arguments passed to f and fprime. + gtol : float + Gradient norm must be less than gtol before succesful termination. + norm : float + Order of norm (Inf is max, -Inf is min) + epsilon : int or ndarray + If fprime is approximated, use this value for the step size. + callback : callable + An optional user-supplied function to call after each + iteration. Called as callback(xk), where xk is the + current parameter vector. - fprime : a function to compute the gradient of f. - args : extra arguments to f and fprime. - gtol : number - gradient norm must be less than gtol before succesful termination - norm : number - order of norm (Inf is max, -Inf is min) - epsilon : number - if fprime is approximated use this value for - the step size (can be scalar or vector) - callback : an optional user-supplied function to call after each - iteration. It is called as callback(xk), where xk is the - current parameter vector. + *Returns*: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, ) - :Returns: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, ) + xopt : ndarray + Parameters which minimize f, i.e. f(xopt) == fopt. + fopt : float + Minimum value. + gopt : ndarray + Value of gradient at minimum, f'(xopt), which should be near 0. + Bopt : ndarray + Value of 1/f''(xopt), i.e. the inverse hessian matrix. + func_calls : int + Number of function_calls made. + grad_calls : int + Number of gradient calls made. + warnflag : integer + 1 : Maximum number of iterations exceeded. + 2 : Gradient and/or function calls not changing. + allvecs : list + Results at each iteration. Only returned if retall is True. - xopt : ndarray - the minimizer of f. + *Other Parameters*: + maxiter : int + Maximum number of iterations to perform. + full_output : bool + If True,return fopt, func_calls, grad_calls, and warnflag + in addition to xopt. + disp : bool + Print convergence message if True. + retall : bool + Return a list of results at each iteration if True. - fopt : number - the value of f(xopt). - gopt : ndarray - the value of f'(xopt). (Should be near 0) - Bopt : ndarray - the value of 1/f''(xopt). (inverse hessian matrix) - func_calls : number - the number of function_calls. - grad_calls : number - the number of gradient calls. - warnflag : integer - 1 : 'Maximum number of iterations exceeded.' - 2 : 'Gradient and/or function calls not changing' - allvecs : a list of all iterates (only returned if retall==1) + *Notes* - :OtherParameters: + Optimize the function, f, whose gradient is given by fprime + using the quasi-Newton method of Broyden, Fletcher, Goldfarb, + and Shanno (BFGS) See Wright, and Nocedal 'Numerical + Optimization', 1999, pg. 198. - maxiter : number - the maximum number of iterations. - full_output : number - if non-zero then return fopt, func_calls, grad_calls, - and warnflag in addition to xopt. - disp : number - print convergence message if non-zero. - retall : number - return a list of results at each iteration if non-zero + *See Also*: - :SeeAlso: - - scikits.openopt, which offers a unified syntax to call this and other solvers - - fmin, fmin_powell, fmin_cg, - fmin_bfgs, fmin_ncg -- multivariate local optimizers - leastsq -- nonlinear least squares minimizer + scikits.openopt : SciKit which offers a unified syntax to call + this and other solvers. - fmin_l_bfgs_b, fmin_tnc, - fmin_cobyla -- constrained multivariate optimizers - - anneal, brute -- global optimizers - - fminbound, brent, golden, bracket -- local scalar minimizers - - fsolve -- n-dimenstional root-finding - - brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding - - fixed_point -- scalar fixed-point finder - - Notes - - ---------------------------------- - - Optimize the function, f, whose gradient is given by fprime using the - quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) - See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. - """ + """ x0 = asarray(x0).squeeze() if x0.ndim == 0: x0.shape = (1,) @@ -747,7 +727,7 @@ if alpha_k is None: # This line search also failed to find a better solution. warnflag = 2 - break + break xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) @@ -767,7 +747,7 @@ try: # this was handled in numeric, let it remaines for more safety rhok = 1.0 / (numpy.dot(yk,sk)) - except ZeroDivisionError: + except ZeroDivisionError: rhok = 1000.0 print "Divide-by-zero encountered: rhok assumed large" if isinf(rhok): # this is patch for numpy @@ -782,7 +762,8 @@ fval = old_fval if warnflag == 2: if disp: - print "Warning: Desired error not necessarily achieved due to precision loss" + print "Warning: Desired error not necessarily achieved" \ + "due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls[0] @@ -818,81 +799,65 @@ def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None): - """Minimize a function with nonlinear conjugate gradient algorithm. + """Minimize a function using a nonlinear conjugate gradient algorithm. - :Parameters: + *Parameters*: + f : callable f(x,*args) + Objective function to be minimized. + x0 : ndarray + Initial guess. + fprime : callable f'(x,*args) + Function which omputes the gradient of f. + args : tuple + Extra arguments passed to f and fprime. + gtol : float + Stop when norm of gradient is less than gtol. + norm : float + Order of vector norm to use. -Inf is min, Inf is max. + epsilon : float or ndarray + If fprime is approximated, use this value for the step + size (can be scalar or vector). + callback : callable + An optional user-supplied function, called after each + iteration. Called as callback(xk), where xk is the + current parameter vector. - f -- the Python function or method to be minimized. - x0 : ndarray -- the initial guess for the minimizer. + *Returns*: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) - fprime -- a function to compute the gradient of f. - args -- extra arguments to f and fprime. - gtol : number - stop when norm of gradient is less than gtol - norm : number - order of vector norm to use - epsilon :number - if fprime is approximated use this value for - the step size (can be scalar or vector) - callback -- an optional user-supplied function to call after each - iteration. It is called as callback(xk), where xk is the - current parameter vector. + xopt : ndarray + Parameters which minimize f, i.e. f(xopt) == fopt. + fopt : float + Minimum value found, f(xopt). + func_calls : int + The number of function_calls made. + grad_calls : int + The number of gradient calls made. + warnflag : int + 1 : Maximum number of iterations exceeded. + 2 : Gradient and/or function calls not changing. + allvecs : ndarray + If retall is True (see other parameters below), then this + vector containing the result at each iteration is returned. - :Returns: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) + *Other Parameters*: + maxiter : int + Maximum number of iterations to perform. + full_output : bool + If True then return fopt, func_calls, grad_calls, and + warnflag in addition to xopt. + disp : bool + Print convergence message if True. + retall : bool + return a list of results at each iteration if True. - xopt : ndarray - the minimizer of f. - fopt :number - the value of f(xopt). - func_calls : number - the number of function_calls. - grad_calls : number - the number of gradient calls. - warnflag :number - an integer warning flag: - 1 : 'Maximum number of iterations exceeded.' - 2 : 'Gradient and/or function calls not changing' - allvecs : ndarray - if retall then this vector of the iterates is returned + *Notes* - :OtherParameters: + Optimize the function, f, whose gradient is given by fprime + using the nonlinear conjugate gradient algorithm of Polak and + Ribiere See Wright, and Nocedal 'Numerical Optimization', + 1999, pg. 120-122. - maxiter :number - the maximum number of iterations. - full_output : number - if non-zero then return fopt, func_calls, grad_calls, - and warnflag in addition to xopt. - disp : number - print convergence message if non-zero. - retall : number - return a list of results at each iteration if True - - :SeeAlso: - - fmin, fmin_powell, fmin_cg, - fmin_bfgs, fmin_ncg -- multivariate local optimizers - leastsq -- nonlinear least squares minimizer - - fmin_l_bfgs_b, fmin_tnc, - fmin_cobyla -- constrained multivariate optimizers - - anneal, brute -- global optimizers - - fminbound, brent, golden, bracket -- local scalar minimizers - - fsolve -- n-dimenstional root-finding - - brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding - - fixed_point -- scalar fixed-point finder - - Notes - --------------------------------------------- - - Optimize the function, f, whose gradient is given by fprime using the - nonlinear conjugate gradient algorithm of Polak and Ribiere - See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. - """ + """ x0 = asarray(x0).flatten() if maxiter is None: maxiter = len(x0)*200 @@ -988,89 +953,75 @@ def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None): - """ Minimize the function f using the Newton-CG method. + """Minimize a function using the Newton-CG method. - :Parameters: + *Parameters*: - f -- the Python function or method to be minimized. - x0 : ndarray -- the initial guess for the minimizer. - fprime -- a function to compute the gradient of f: fprime(x, *args) - fhess_p -- a function to compute the Hessian of f times an - arbitrary vector: fhess_p (x, p, *args) - fhess -- a function to compute the Hessian matrix of f. - args -- extra arguments for f, fprime, fhess_p, and fhess (the same - set of extra arguments is supplied to all of these functions). + f : callable f(x,*args) + Objective function to be minimized. + x0 : ndarray + Initial guess. + fprime : callable f'(x,*args) + Gradient of f. + fhess_p : callable fhess_p(x,p,*args) + Function which computes the Hessian of f times an + arbitrary vector, p. + fhess : callable fhess(x,*args) + Function to compute the Hessian matrix of f. + args : tuple + Extra arguments passed to f, fprime, fhess_p, and fhess + (the same set of extra arguments is supplied to all of + these functions). + epsilon : float or ndarray + If fhess is approximated, use this value for the step size. + callback : callable + An optional user-supplied function which is called after + each iteration. Called as callback(xk), where xk is the + current parameter vector. - epsilon : number - if fhess is approximated use this value for - the step size (can be scalar or vector) - callback -- an optional user-supplied function to call after each - iteration. It is called as callback(xk), where xk is the - current parameter vector. + *Returns*: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) - :Returns: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) + xopt : ndarray + Parameters which minimizer f, i.e. ``f(xopt) == fopt``. + fopt : float + Value of the function at xopt, i.e. ``fopt = f(xopt)``. + fcalls : int + Number of function calls made. + gcalls : int + Number of gradient calls made. + hcalls : int + Number of hessian calls made. + warnflag : int + Warnings generated by the algorithm. + 1 : Maximum number of iterations exceeded. + allvecs : list + The result at each iteration, if retall is True (see below). - xopt : ndarray - the minimizer of f - fopt : number - the value of the function at xopt: fopt = f(xopt) - fcalls : number - the number of function calls - gcalls : number - the number of gradient calls - hcalls : number - the number of hessian calls. - warnflag : number - algorithm warnings: - 1 : 'Maximum number of iterations exceeded.' - allvecs : Python list - a list of all tried iterates + *Other Parameters*: - :OtherParameters: + avextol : float + Convergence is assumed when the average relative error in + the minimizer falls below this amount. + maxiter : int + Maximum number of iterations to perform. + full_output : bool + If True, return the optional outputs. + disp : bool + If True, print convergence message. + retall : bool + If True, return a list of results at each iteration. - avextol : number - Convergence is assumed when the average relative error in - the minimizer falls below this amount. - maxiter : number - Maximum number of iterations to allow. - full_output : number - If non-zero return the optional outputs. - disp : number - If non-zero print convergence message. - retall : bool - return a list of results at each iteration if True + *Notes* - :SeeAlso: + Only one of `fhess_p` or `fhess` need to be given. If `fhess` + is provided, then `fhess_p` will be ignored. If neither `fhess` + nor `fhess_p` is provided, then the hessian product will be + approximated using finite differences on `fprime`. `fhess_p` + must compute the hessian times an arbitrary vector. If it is not + given, finite-differences on `fprime` are used to compute + it. See Wright, and Nocedal 'Numerical Optimization', 1999, + pg. 140. - fmin, fmin_powell, fmin_cg, - fmin_bfgs, fmin_ncg -- multivariate local optimizers - leastsq -- nonlinear least squares minimizer - - fmin_l_bfgs_b, fmin_tnc, - fmin_cobyla -- constrained multivariate optimizers - - anneal, brute -- global optimizers - - fminbound, brent, golden, bracket -- local scalar minimizers - - fsolve -- n-dimenstional root-finding - - brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding - - fixed_point -- scalar fixed-point finder - - Notes - - --------------------------------------------- - - Only one of fhess_p or fhess need be given. If fhess is provided, - then fhess_p will be ignored. If neither fhess nor fhess_p is - provided, then the hessian product will be approximated using finite - differences on fprime. fhess_p must compute the hessian times an arbitrary - vector. If it is not given, finite-differences on fprime are used to - compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, - pg. 140. - """ x0 = asarray(x0).flatten() fcalls, f = wrap_function(f, args) @@ -1181,68 +1132,50 @@ full_output=0, disp=1): """Bounded minimization for scalar functions. - :Parameters: + *Parameters*: - func -- the function to be minimized (must accept scalar input and return - scalar output). - x1, x2 : ndarray - the optimization bounds. - args -- extra arguments to pass to function. - xtol : number - the convergence tolerance. - maxfun : number - maximum function evaluations. - full_output : number - Non-zero to return optional outputs. - disp : number - Non-zero to print messages. + func : callable f(x,*args) + Objective function to be minimized (must accept and return scalars). + x1, x2 : ndarray + The optimization bounds. + args : tuple + Extra arguments passed to function. + xtol : float + The convergence tolerance. + maxfun : int + Maximum number of function evaluations allowed. + full_output : bool + If True, return optional outputs. + disp : int + If non-zero, print messages. 0 : no message printing. 1 : non-convergence notification messages only. 2 : print a message on convergence too. 3 : print iteration results. - :Returns: (xopt, {fval, ierr, numfunc}) + *Returns*: (xopt, {fval, ierr, numfunc}) xopt : ndarray - The minimizer of the function over the interval. + Parameters (over given interval) which minimize the + objective function. fval : number - The function value at the minimum point. - ierr : number - An error flag (0 if converged, 1 if maximum number of - function calls reached). - numfunc : number - The number of function calls. + The function value at the minimum point. + ierr : int + An error flag (0 if converged, 1 if maximum number of + function calls reached). + numfunc : int + The number of function calls made. - :SeeAlso: - fmin, fmin_powell, fmin_cg, - fmin_bfgs, fmin_ncg -- multivariate local optimizers - leastsq -- nonlinear least squares minimizer + *Notes* - fmin_l_bfgs_b, fmin_tnc, - fmin_cobyla -- constrained multivariate optimizers + Finds a local minimizer of the scalar function `func` in the + interval x1 < xopt < x2 using Brent's method. (See `brent` + for auto-bracketing). - anneal, brute -- global optimizers - fminbound, brent, golden, bracket -- local scalar minimizers - - fsolve -- n-dimenstional root-finding - - brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding - - fixed_point -- scalar fixed-point finder - - Notes - - ------------------------------------------------------- - - Finds a local minimizer of the scalar function func in the interval - x1 < xopt < x2 using Brent's method. (See brent for auto-bracketing). - - """ - if x1 > x2: raise ValueError, "The lower bound exceeds the upper bound." @@ -1287,7 +1220,8 @@ e = rat # Check for acceptability of parabola - if ( (abs(p) < abs(0.5*q*r)) and (p > q*(a-xf)) and (p < q*(b-xf))): + if ( (abs(p) < abs(0.5*q*r)) and (p > q*(a-xf)) and \ + (p < q*(b-xf))): rat = (p+0.0) / q; x = xf + rat step = ' parabolic' @@ -1358,7 +1292,8 @@ class Brent: #need to rethink design of __init__ - def __init__(self, func, args=(), tol=1.48e-8, maxiter=500, full_output=0): + def __init__(self, func, args=(), tol=1.48e-8, maxiter=500, + full_output=0): self.func = func self.args = args self.tol = tol @@ -1369,7 +1304,7 @@ self.fval = None self.iter = 0 self.funcalls = 0 - + #need to rethink design of set_bracket (new options, etc) def set_bracket(self, brack = None): self.brack = brack @@ -1490,55 +1425,39 @@ def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500): - """ Given a function of one-variable and a possible bracketing interval, + """Given a function of one-variable and a possible bracketing interval, return the minimum of the function isolated to a fractional precision of - tol. - - :Parameters: - - func - objective func - args - additional arguments (if present) - brack - triple (a,b,c) where (a f(xb) < f(xc). It doesn't always mean that obtained solution will satisfy xa<=x<=xb - - :Parameters: - - func -- objective func - xa, xb : number - bracketing interval - args -- additional arguments (if present) - grow_limit : number - max grow limit - maxiter : number - max iterations number - - :Returns: xa, xb, xc, fa, fb, fc, funcalls - - xa, xb, xc : number - bracket - fa, fb, fc : number - objective function values in bracket - funcalls : number - number of function evaluations + """Given a function and distinct initial points, search in the + downhill direction (as defined by the initital points) and return + new points xa, xb, xc that bracket the minimum of the function + f(xa) > f(xb) < f(xc). It doesn't always mean that obtained + solution will satisfy xa<=x<=xb + + *Parameters*: + + func : callable f(x,*args) + Objective function to minimize. + xa, xb : float + Bracketing interval. + args : tuple + Additional arguments (if present), passed to `func`. + grow_limit : float + Maximum grow limit. + maxiter : int + Maximum number of iterations to perform. + + *Returns*: xa, xb, xc, fa, fb, fc, funcalls + + xa, xb, xc : float + Bracket. + fa, fb, fc : float + Objective function values in bracket. + funcalls : int + Number of function evaluations made. + """ _gold = 1.618034 _verysmall_num = 1e-21 @@ -1731,9 +1638,11 @@ def _linesearch_powell(func, p, xi, tol=1e-3): - # line-search algorithm using fminbound - # find the minimium of the function - # func(x0+ alpha*direc) + """Line-search algorithm using fminbound. + + Find the minimium of the function ``func(x0+ alpha*direc)``. + + """ def myfunc(alpha): return func(p + alpha * xi) alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol) @@ -1746,79 +1655,65 @@ direc=None): """Minimize a function using modified Powell's method. - :Parameters: + *Parameters*: - func -- the Python function or method to be minimized. + func : callable f(x,*args) + Objective function to be minimized. x0 : ndarray - the initial guess. - args -- extra arguments for func - callback -- an optional user-supplied function to call after each - iteration. It is called as callback(xk), where xk is the - current parameter vector - direc -- initial direction set + Initial guess. + args : tuple + Eextra arguments passed to func. + callback : callable + An optional user-supplied function, called after each + iteration. Called as ``callback(xk)``, where ``xk`` is the + current parameter vector. + direc : ndarray + Initial direction set. - :Returns: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) + *Returns*: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) - xopt : ndarray - minimizer of function + xopt : ndarray + Parameter which minimizes `func`. + fopt : number + Value of function at minimum: ``fopt = func(xopt)``. + direc : ndarray + Current direction set. + iter : int + Number of iterations. + funcalls : int + Number of function calls made. + warnflag : int + Integer warning flag: + 1 : Maximum number of function evaluations. + 2 : Maximum number of iterations. + allvecs : list + List of solutions at each iteration. - fopt : number - value of function at minimum: fopt = func(xopt) - direc -- current direction set - iter : number - number of iterations - funcalls : number - number of function calls - warnflag : number - Integer warning flag: - 1 : 'Maximum number of function evaluations.' - 2 : 'Maximum number of iterations.' - allvecs : Python list - a list of solutions at each iteration + *Other Parameters*: - :OtherParameters: + xtol : float + Line-search error tolerance. + ftol : float + Relative error in ``func(xopt)`` acceptable for convergence. + maxiter : int + Maximum number of iterations to perform. + maxfun : int + Maximum number of function evaluations to make. + full_output : bool + If True, fopt, xi, direc, iter, funcalls, and + warnflag are returned. + disp : bool + If True, print convergence messages. + retall : bool + If True, return a list of the solution at each iteration. - xtol : number - line-search error tolerance. - ftol : number - acceptable relative error in func(xopt) for convergence. - maxiter : number - the maximum number of iterations to perform. - maxfun : number - the maximum number of function evaluations. - full_output : number - non-zero if fval and warnflag outputs are desired. - disp : number - non-zero to print convergence messages. - retall : number - non-zero to return a list of the solution at each iteration - :SeeAlso: + *Notes* - fmin, fmin_powell, fmin_cg, - fmin_bfgs, fmin_ncg -- multivariate local optimizers - leastsq -- nonlinear least squares minimizer + Uses a modification of Powell's method to find the minimum of + a function of N variables. - fmin_l_bfgs_b, fmin_tnc, - fmin_cobyla -- constrained multivariate optimizers - - anneal, brute -- global optimizers - - fminbound, brent, golden, bracket -- local scalar minimizers - - fsolve -- n-dimenstional root-finding - - brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding - - fixed_point -- scalar fixed-point finder - - Notes - - ----------------------- - - Uses a modification of Powell's method to find the minimum of a function - of N variables - """ + """ # we need to use a mutable object here that we can update in the # wrapper function fcalls, func = wrap_function(func, args) @@ -1877,7 +1772,8 @@ temp = fx-fx2 t -= delta*temp*temp if t < 0.0: - fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100) + fval, x, direc1 = _linesearch_powell(func, x, direc1, + tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 @@ -1917,67 +1813,54 @@ def _endprint(x, flag, fval, maxfun, xtol, disp): if flag == 0: if disp > 1: - print "\nOptimization terminated successfully;\n the returned value" + \ - " satisfies the termination criteria\n (using xtol = ", xtol, ")" + print "\nOptimization terminated successfully;\n" \ + "The returned value satisfies the termination criteria\n" \ + "(using xtol = ", xtol, ")" if flag == 1: - print "\nMaximum number of function evaluations exceeded --- increase maxfun argument.\n" + print "\nMaximum number of function evaluations exceeded --- " \ + "increase maxfun argument.\n" return def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin): """Minimize a function over a given range by brute force. - :Parameters: + *Parameters*: - func -- Function to be optimized - ranges : tuple - Tuple where each element is a tuple of parameters - or a slice object to be handed to numpy.mgrid + func : callable f(x,*args) + Objective function to be minimized. + ranges : tuple + Each element is a tuple of parameters or a slice object to + be handed to ``numpy.mgrid``. + args : tuple + Extra arguments passed to function. + Ns : int + Default number of samples, if those are not provided. + full_output : bool + If True, return the evaluation grid. - args -- Extra arguments to function. - Ns : number - Default number of samples if not given - full_output : number - Nonzero to return evaluation grid. + *Returns*: (x0, fval, {grid, Jout}) - :Returns: (x0, fval, {grid, Jout}) + x0 : ndarray + Value of arguments to `func`, giving minimum over the grid. + fval : int + Function value at minimum. + grid : tuple + Representation of the evaluation grid. It has the same + length as x0. + Jout : ndarray + Function values over grid: ``Jout = func(*grid)``. - x0 : ndarray - Value of arguments giving minimum over the grird - fval : number - Function value at minimum - grid : tuple - tuple with same length as x0 representing the evaluation grid - Jout : ndarray -- Function values over grid: Jout = func(*grid) + *Notes* - :SeeAlso: + Find the minimum of a function evaluated on a grid given by + the tuple ranges. - fmin, fmin_powell, fmin_cg, - fmin_bfgs, fmin_ncg -- multivariate local optimizers - leastsq -- nonlinear least squares minimizer - - fmin_l_bfgs_b, fmin_tnc, - fmin_cobyla -- constrained multivariate optimizers - - anneal, brute -- global optimizers - - fminbound, brent, golden, bracket -- local scalar minimizers - - fsolve -- n-dimenstional root-finding - - brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding - - fixed_point -- scalar fixed-point finder - - Notes - - ------------------ - - Find the minimum of a function evaluated on a grid given by the tuple ranges. """ N = len(ranges) if N > 40: - raise ValueError, "Brute Force not possible with more than 40 variables." + raise ValueError, "Brute Force not possible with more " \ + "than 40 variables." lrange = list(ranges) for k in range(N): if type(lrange[k]) is not type(slice(None)): From scipy-svn at scipy.org Wed Sep 26 15:49:16 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 26 Sep 2007 14:49:16 -0500 (CDT) Subject: [Scipy-svn] r3370 - trunk/scipy/special/tests Message-ID: <20070926194916.1A3C339C1F5@new.scipy.org> Author: stefan Date: 2007-09-26 14:48:46 -0500 (Wed, 26 Sep 2007) New Revision: 3370 Modified: trunk/scipy/special/tests/test_basic.py Log: Import special from scipy namespace. Modified: trunk/scipy/special/tests/test_basic.py =================================================================== --- trunk/scipy/special/tests/test_basic.py 2007-09-25 21:48:55 UTC (rev 3369) +++ trunk/scipy/special/tests/test_basic.py 2007-09-26 19:48:46 UTC (rev 3370) @@ -36,7 +36,7 @@ from numpy.testing import * set_package_path() -from special import * +from scipy.special import * import scipy.special._cephes as cephes restore_path() From scipy-svn at scipy.org Wed Sep 26 23:35:08 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 26 Sep 2007 22:35:08 -0500 (CDT) Subject: [Scipy-svn] r3371 - in trunk/scipy/sandbox/maskedarray: . alternative_versions tests Message-ID: <20070927033508.423CD39C00F@new.scipy.org> Author: pierregm Date: 2007-09-26 22:34:44 -0500 (Wed, 26 Sep 2007) New Revision: 3371 Added: trunk/scipy/sandbox/maskedarray/alternative_versions/core_initial.py trunk/scipy/sandbox/maskedarray/bench.py Modified: trunk/scipy/sandbox/maskedarray/__init__.py trunk/scipy/sandbox/maskedarray/core.py trunk/scipy/sandbox/maskedarray/extras.py trunk/scipy/sandbox/maskedarray/morestats.py trunk/scipy/sandbox/maskedarray/mrecords.py trunk/scipy/sandbox/maskedarray/mstats.py trunk/scipy/sandbox/maskedarray/tests/test_core.py trunk/scipy/sandbox/maskedarray/tests/test_mrecords.py trunk/scipy/sandbox/maskedarray/tests/test_subclassing.py Log: core : * simplified __getitem__ and __setitem__ * no automatic shrinking of the mask * force prefilling on function domain * prevent unnecessary filling on function arguments * introduced fix_invalid and masked_invalid * reintroduced _sharedmask to manage mask propagation * updated doc core._arraymethods: make sure the result has the same generic attributes as the initial data Modified: trunk/scipy/sandbox/maskedarray/__init__.py =================================================================== --- trunk/scipy/sandbox/maskedarray/__init__.py 2007-09-26 19:48:46 UTC (rev 3370) +++ trunk/scipy/sandbox/maskedarray/__init__.py 2007-09-27 03:34:44 UTC (rev 3371) @@ -17,6 +17,7 @@ import extras from extras import * +import _nfcore __all__ = ['core', 'extras'] __all__ += core.__all__ Added: trunk/scipy/sandbox/maskedarray/alternative_versions/core_initial.py =================================================================== --- trunk/scipy/sandbox/maskedarray/alternative_versions/core_initial.py 2007-09-26 19:48:46 UTC (rev 3370) +++ trunk/scipy/sandbox/maskedarray/alternative_versions/core_initial.py 2007-09-27 03:34:44 UTC (rev 3371) @@ -0,0 +1,2708 @@ +# pylint: disable-msg=E1002 +"""MA: a facility for dealing with missing observations +MA is generally used as a numpy.array look-alike. +by Paul F. Dubois. + +Copyright 1999, 2000, 2001 Regents of the University of California. +Released for unlimited redistribution. +Adapted for numpy_core 2005 by Travis Oliphant and +(mainly) Paul Dubois. + +Subclassing of the base ndarray 2006 by Pierre Gerard-Marchant. +pgmdevlist_AT_gmail_DOT_com +Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +:version: $Id: core.py 254 2007-08-15 04:11:52Z backtopop $ +""" +__author__ = "Pierre GF Gerard-Marchant ($Author: backtopop $)" +__version__ = '1.0' +__revision__ = "$Revision: 254 $" +__date__ = '$Date: 2007-08-15 00:11:52 -0400 (Wed, 15 Aug 2007) $' + +__all__ = ['MAError', 'MaskType', 'MaskedArray', + 'bool_', 'complex_', 'float_', 'int_', 'object_', + 'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue', + 'amax', 'amin', 'anom', 'anomalies', 'any', 'arange', + 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', + 'arctanh', 'argmax', 'argmin', 'argsort', 'around', + 'array', 'asarray', + 'bitwise_and', 'bitwise_or', 'bitwise_xor', + 'ceil', 'choose', 'compressed', 'concatenate', 'conjugate', + 'cos', 'cosh', 'count', + 'diagonal', 'divide', 'dump', 'dumps', + 'empty', 'empty_like', 'equal', 'exp', + 'fabs', 'fmod', 'filled', 'floor', 'floor_divide', + 'getmask', 'getmaskarray', 'greater', 'greater_equal', 'hypot', + 'ids', 'inner', 'innerproduct', + 'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', + 'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log10', + 'logical_and', 'logical_not', 'logical_or', 'logical_xor', + 'make_mask', 'make_mask_none', 'mask_or', 'masked', + 'masked_array', 'masked_equal', 'masked_greater', + 'masked_greater_equal', 'masked_inside', 'masked_less', + 'masked_less_equal', 'masked_not_equal', 'masked_object', + 'masked_outside', 'masked_print_option', 'masked_singleton', + 'masked_values', 'masked_where', 'max', 'maximum', 'mean', 'min', + 'minimum', 'multiply', + 'negative', 'nomask', 'nonzero', 'not_equal', + 'ones', 'outer', 'outerproduct', + 'power', 'product', 'ptp', 'put', 'putmask', + 'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize', + 'right_shift', 'round_', + 'shape', 'sin', 'sinh', 'size', 'sometrue', 'sort', 'sqrt', 'std', + 'subtract', 'sum', 'swapaxes', + 'take', 'tan', 'tanh', 'transpose', 'true_divide', + 'var', 'where', + 'zeros'] + +import sys +import types +import cPickle +import operator +# +import numpy +from numpy import bool_, complex_, float_, int_, object_, str_ + +import numpy.core.umath as umath +import numpy.core.fromnumeric as fromnumeric +import numpy.core.numeric as numeric +import numpy.core.numerictypes as ntypes +from numpy import bool_, dtype, typecodes, amax, amin, ndarray +from numpy import expand_dims as n_expand_dims +import warnings + + +MaskType = bool_ +nomask = MaskType(0) + +divide_tolerance = 1.e-35 +numpy.seterr(all='ignore') + +# TODO: There's still a problem with N.add.reduce not working... +# TODO: ...neither does N.add.accumulate + +#####-------------------------------------------------------------------------- +#---- --- Exceptions --- +#####-------------------------------------------------------------------------- +class MAError(Exception): + "Class for MA related errors." + def __init__ (self, args=None): + "Creates an exception." + Exception.__init__(self,args) + self.args = args + def __str__(self): + "Calculates the string representation." + return str(self.args) + __repr__ = __str__ + +#####-------------------------------------------------------------------------- +#---- --- Filling options --- +#####-------------------------------------------------------------------------- +# b: boolean - c: complex - f: floats - i: integer - O: object - S: string +default_filler = {'b': True, + 'c' : 1.e20 + 0.0j, + 'f' : 1.e20, + 'i' : 999999, + 'O' : '?', + 'S' : 'N/A', + 'u' : 999999, + 'V' : '???', + } +max_filler = ntypes._minvals +max_filler.update([(k,-numeric.inf) for k in [numpy.float32, numpy.float64]]) +min_filler = ntypes._maxvals +min_filler.update([(k,numeric.inf) for k in [numpy.float32, numpy.float64]]) +if 'float128' in ntypes.typeDict: + max_filler.update([(numpy.float128,-numeric.inf)]) + min_filler.update([(numpy.float128, numeric.inf)]) + + +def default_fill_value(obj): + "Calculates the default fill value for an object `obj`." + if hasattr(obj,'dtype'): + defval = default_filler[obj.dtype.kind] + elif isinstance(obj, numeric.dtype): + defval = default_filler[obj.kind] + elif isinstance(obj, float): + defval = default_filler['f'] + elif isinstance(obj, int) or isinstance(obj, long): + defval = default_filler['i'] + elif isinstance(obj, str): + defval = default_filler['S'] + elif isinstance(obj, complex): + defval = default_filler['c'] + else: + defval = default_filler['O'] + return defval + +def minimum_fill_value(obj): + "Calculates the default fill value suitable for taking the minimum of `obj`." + if hasattr(obj, 'dtype'): + objtype = obj.dtype + filler = min_filler[objtype] + if filler is None: + raise TypeError, 'Unsuitable type for calculating minimum.' + return filler + elif isinstance(obj, float): + return min_filler[ntypes.typeDict['float_']] + elif isinstance(obj, int): + return min_filler[ntypes.typeDict['int_']] + elif isinstance(obj, long): + return min_filler[ntypes.typeDict['uint']] + elif isinstance(obj, numeric.dtype): + return min_filler[obj] + else: + raise TypeError, 'Unsuitable type for calculating minimum.' + +def maximum_fill_value(obj): + "Calculates the default fill value suitable for taking the maximum of `obj`." + if hasattr(obj, 'dtype'): + objtype = obj.dtype + filler = max_filler[objtype] + if filler is None: + raise TypeError, 'Unsuitable type for calculating minimum.' + return filler + elif isinstance(obj, float): + return max_filler[ntypes.typeDict['float_']] + elif isinstance(obj, int): + return max_filler[ntypes.typeDict['int_']] + elif isinstance(obj, long): + return max_filler[ntypes.typeDict['uint']] + elif isinstance(obj, numeric.dtype): + return max_filler[obj] + else: + raise TypeError, 'Unsuitable type for calculating minimum.' + +def set_fill_value(a, fill_value): + "Sets the fill value of `a` if it is a masked array." + if isinstance(a, MaskedArray): + a.set_fill_value(fill_value) + +def get_fill_value(a): + """Returns the fill value of `a`, if any. + Otherwise, returns the default fill value for that type. + """ + if isinstance(a, MaskedArray): + result = a.fill_value + else: + result = default_fill_value(a) + return result + +def common_fill_value(a, b): + "Returns the common fill_value of `a` and `b`, if any, or `None`." + t1 = get_fill_value(a) + t2 = get_fill_value(b) + if t1 == t2: + return t1 + return None + +#................................................ +def filled(a, value = None): + """Returns `a` as an array with masked data replaced by `value`. +If `value` is `None` or the special element `masked`, `get_fill_value(a)` +is used instead. + +If `a` is already a contiguous numeric array, `a` itself is returned. + +`filled(a)` can be used to be sure that the result is numeric when passing +an object a to other software ignorant of MA, in particular to numpy itself. + """ + if hasattr(a, 'filled'): + return a.filled(value) + elif isinstance(a, ndarray): # and a.flags['CONTIGUOUS']: + return a + elif isinstance(a, dict): + return numeric.array(a, 'O') + else: + return numeric.array(a) + +def get_masked_subclass(*arrays): + """Returns the youngest subclass of MaskedArray from a list of arrays, + or MaskedArray. In case of siblings, the first takes over.""" + if len(arrays) == 1: + arr = arrays[0] + if isinstance(arr, MaskedArray): + rcls = type(arr) + else: + rcls = MaskedArray + else: + arrcls = [type(a) for a in arrays] + rcls = arrcls[0] + if not issubclass(rcls, MaskedArray): + rcls = MaskedArray + for cls in arrcls[1:]: + if issubclass(cls, rcls): + rcls = cls + return rcls + +#####-------------------------------------------------------------------------- +#---- --- Ufuncs --- +#####-------------------------------------------------------------------------- +ufunc_domain = {} +ufunc_fills = {} + +class domain_check_interval: + """Defines a valid interval, +so that `domain_check_interval(a,b)(x) = true` where `x < a` or `x > b`.""" + def __init__(self, a, b): + "domain_check_interval(a,b)(x) = true where x < a or y > b" + if (a > b): + (a, b) = (b, a) + self.a = a + self.b = b + + def __call__ (self, x): + "Execute the call behavior." + return umath.logical_or(umath.greater (x, self.b), + umath.less(x, self.a)) +#............................ +class domain_tan: + """Defines a valid interval for the `tan` function, +so that `domain_tan(eps) = True where `abs(cos(x)) < eps`""" + def __init__(self, eps): + "domain_tan(eps) = true where abs(cos(x)) < eps)" + self.eps = eps + def __call__ (self, x): + "Execute the call behavior." + return umath.less(umath.absolute(umath.cos(x)), self.eps) +#............................ +class domain_safe_divide: + """defines a domain for safe division.""" + def __init__ (self, tolerance=divide_tolerance): + self.tolerance = tolerance + def __call__ (self, a, b): + return umath.absolute(a) * self.tolerance >= umath.absolute(b) +#............................ +class domain_greater: + "domain_greater(v)(x) = true where x <= v" + def __init__(self, critical_value): + "domain_greater(v)(x) = true where x <= v" + self.critical_value = critical_value + + def __call__ (self, x): + "Execute the call behavior." + return umath.less_equal(x, self.critical_value) +#............................ +class domain_greater_equal: + "domain_greater_equal(v)(x) = true where x < v" + def __init__(self, critical_value): + "domain_greater_equal(v)(x) = true where x < v" + self.critical_value = critical_value + + def __call__ (self, x): + "Execute the call behavior." + return umath.less(x, self.critical_value) +#.............................................................................. +class masked_unary_operation: + """Defines masked version of unary operations, +where invalid values are pre-masked. + +:IVariables: + - `f` : function. + - `fill` : Default filling value *[0]*. + - `domain` : Default domain *[None]*. + """ + def __init__ (self, mufunc, fill=0, domain=None): + """ masked_unary_operation(aufunc, fill=0, domain=None) + aufunc(fill) must be defined + self(x) returns aufunc(x) + with masked values where domain(x) is true or getmask(x) is true. + """ + self.f = mufunc + self.fill = fill + self.domain = domain + self.__doc__ = getattr(mufunc, "__doc__", str(mufunc)) + self.__name__ = getattr(mufunc, "__name__", str(mufunc)) + ufunc_domain[mufunc] = domain + ufunc_fills[mufunc] = fill + # + def __call__ (self, a, *args, **kwargs): + "Execute the call behavior." +# numeric tries to return scalars rather than arrays when given scalars. + m = getmask(a) + d1 = filled(a, self.fill) + if self.domain is not None: + m = mask_or(m, numeric.asarray(self.domain(d1))) + # Take care of the masked singletong first ... + if m.ndim == 0 and m: + return masked + # Get the result.... + if isinstance(a, MaskedArray): + result = self.f(d1, *args, **kwargs).view(type(a)) + else: + result = self.f(d1, *args, **kwargs).view(MaskedArray) + # Fix the mask if we don't have a scalar + if result.ndim > 0: + result._mask = m + return result + # + def __str__ (self): + return "Masked version of %s. [Invalid values are masked]" % str(self.f) +#.............................................................................. +class masked_binary_operation: + """Defines masked version of binary operations, +where invalid values are pre-masked. + +:IVariables: + - `f` : function. + - `fillx` : Default filling value for first array*[0]*. + - `filly` : Default filling value for second array*[0]*. + - `domain` : Default domain *[None]*. + """ + def __init__ (self, mbfunc, fillx=0, filly=0): + """abfunc(fillx, filly) must be defined. + abfunc(x, filly) = x for all x to enable reduce. + """ + self.f = mbfunc + self.fillx = fillx + self.filly = filly + self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc)) + self.__name__ = getattr(mbfunc, "__name__", str(mbfunc)) + ufunc_domain[mbfunc] = None + ufunc_fills[mbfunc] = (fillx, filly) + # + def __call__ (self, a, b, *args, **kwargs): + "Execute the call behavior." + m = mask_or(getmask(a), getmask(b)) + if (not m.ndim) and m: + return masked + d1 = filled(a, self.fillx) + d2 = filled(b, self.filly) +# CHECK : Do we really need to fill the arguments ? Pro'ly not +# result = self.f(a, b, *args, **kwargs).view(get_masked_subclass(a,b)) + result = self.f(d1, d2, *args, **kwargs).view(get_masked_subclass(a,b)) + if result.ndim > 0: + result._mask = m + return result + # + def reduce (self, target, axis=0, dtype=None): + """Reduces `target` along the given `axis`.""" + if isinstance(target, MaskedArray): + tclass = type(target) + else: + tclass = MaskedArray + m = getmask(target) + t = filled(target, self.filly) + if t.shape == (): + t = t.reshape(1) + if m is not nomask: + m = make_mask(m, copy=1) + m.shape = (1,) + if m is nomask: + return self.f.reduce(t, axis).view(tclass) + t = t.view(tclass) + t._mask = m + # XXX: "or t.dtype" below is a workaround for what appears + # XXX: to be a bug in reduce. + tr = self.f.reduce(filled(t, self.filly), axis, dtype=dtype or t.dtype) + mr = umath.logical_and.reduce(m, axis) + tr = tr.view(tclass) + if mr.ndim > 0: + tr._mask = mr + return tr + elif mr: + return masked + return tr + + def outer (self, a, b): + "Returns the function applied to the outer product of a and b." + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = umath.logical_or.outer(ma, mb) + if (not m.ndim) and m: + return masked + rcls = get_masked_subclass(a,b) + d = self.f.outer(filled(a, self.fillx), filled(b, self.filly)).view(rcls) + if d.ndim > 0: + d._mask = m + return d + + def accumulate (self, target, axis=0): + """Accumulates `target` along `axis` after filling with y fill value.""" + if isinstance(target, MaskedArray): + tclass = type(target) + else: + tclass = masked_array + t = filled(target, self.filly) + return self.f.accumulate(t, axis).view(tclass) + + def __str__ (self): + return "Masked version of " + str(self.f) +#.............................................................................. +class domained_binary_operation: + """Defines binary operations that have a domain, like divide. + +These are complicated so they are a separate class. +They have no reduce, outer or accumulate. + +:IVariables: + - `f` : function. + - `fillx` : Default filling value for first array*[0]*. + - `filly` : Default filling value for second array*[0]*. + - `domain` : Default domain *[None]*. + """ + def __init__ (self, dbfunc, domain, fillx=0, filly=0): + """abfunc(fillx, filly) must be defined. + abfunc(x, filly) = x for all x to enable reduce. + """ + self.f = dbfunc + self.domain = domain + self.fillx = fillx + self.filly = filly + self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc)) + self.__name__ = getattr(dbfunc, "__name__", str(dbfunc)) + ufunc_domain[dbfunc] = domain + ufunc_fills[dbfunc] = (fillx, filly) + + def __call__(self, a, b): + "Execute the call behavior." + ma = getmask(a) + mb = getmask(b) + d1 = filled(a, self.fillx) + d2 = filled(b, self.filly) + t = numeric.asarray(self.domain(d1, d2)) + + if fromnumeric.sometrue(t, None): + d2 = numeric.where(t, self.filly, d2) + mb = mask_or(mb, t) + m = mask_or(ma, mb) + if (not m.ndim) and m: + return masked + result = self.f(d1, d2).view(get_masked_subclass(a,b)) + if result.ndim > 0: + result._mask = m + return result + + def __str__ (self): + return "Masked version of " + str(self.f) + +#.............................................................................. +# Unary ufuncs +exp = masked_unary_operation(umath.exp) +conjugate = masked_unary_operation(umath.conjugate) +sin = masked_unary_operation(umath.sin) +cos = masked_unary_operation(umath.cos) +tan = masked_unary_operation(umath.tan) +arctan = masked_unary_operation(umath.arctan) +arcsinh = masked_unary_operation(umath.arcsinh) +sinh = masked_unary_operation(umath.sinh) +cosh = masked_unary_operation(umath.cosh) +tanh = masked_unary_operation(umath.tanh) +abs = absolute = masked_unary_operation(umath.absolute) +fabs = masked_unary_operation(umath.fabs) +negative = masked_unary_operation(umath.negative) +floor = masked_unary_operation(umath.floor) +ceil = masked_unary_operation(umath.ceil) +around = masked_unary_operation(fromnumeric.round_) +logical_not = masked_unary_operation(umath.logical_not) +# Domained unary ufuncs +sqrt = masked_unary_operation(umath.sqrt, 0.0, domain_greater_equal(0.0)) +log = masked_unary_operation(umath.log, 1.0, domain_greater(0.0)) +log10 = masked_unary_operation(umath.log10, 1.0, domain_greater(0.0)) +tan = masked_unary_operation(umath.tan, 0.0, domain_tan(1.e-35)) +arcsin = masked_unary_operation(umath.arcsin, 0.0, + domain_check_interval(-1.0, 1.0)) +arccos = masked_unary_operation(umath.arccos, 0.0, + domain_check_interval(-1.0, 1.0)) +arccosh = masked_unary_operation(umath.arccosh, 1.0, domain_greater_equal(1.0)) +arctanh = masked_unary_operation(umath.arctanh, 0.0, + domain_check_interval(-1.0+1e-15, 1.0-1e-15)) +# Binary ufuncs +add = masked_binary_operation(umath.add) +subtract = masked_binary_operation(umath.subtract) +multiply = masked_binary_operation(umath.multiply, 1, 1) +arctan2 = masked_binary_operation(umath.arctan2, 0.0, 1.0) +equal = masked_binary_operation(umath.equal) +equal.reduce = None +not_equal = masked_binary_operation(umath.not_equal) +not_equal.reduce = None +less_equal = masked_binary_operation(umath.less_equal) +less_equal.reduce = None +greater_equal = masked_binary_operation(umath.greater_equal) +greater_equal.reduce = None +less = masked_binary_operation(umath.less) +less.reduce = None +greater = masked_binary_operation(umath.greater) +greater.reduce = None +logical_and = masked_binary_operation(umath.logical_and) +alltrue = masked_binary_operation(umath.logical_and, 1, 1).reduce +logical_or = masked_binary_operation(umath.logical_or) +sometrue = logical_or.reduce +logical_xor = masked_binary_operation(umath.logical_xor) +bitwise_and = masked_binary_operation(umath.bitwise_and) +bitwise_or = masked_binary_operation(umath.bitwise_or) +bitwise_xor = masked_binary_operation(umath.bitwise_xor) +hypot = masked_binary_operation(umath.hypot) +# Domained binary ufuncs +divide = domained_binary_operation(umath.divide, domain_safe_divide(), 0, 1) +true_divide = domained_binary_operation(umath.true_divide, + domain_safe_divide(), 0, 1) +floor_divide = domained_binary_operation(umath.floor_divide, + domain_safe_divide(), 0, 1) +remainder = domained_binary_operation(umath.remainder, + domain_safe_divide(), 0, 1) +fmod = domained_binary_operation(umath.fmod, domain_safe_divide(), 0, 1) + + +#####-------------------------------------------------------------------------- +#---- --- Mask creation functions --- +#####-------------------------------------------------------------------------- +def getmask(a): + """Returns the mask of `a`, if any, or `nomask`. +Returns `nomask` if `a` is not a masked array. +To get an array for sure use getmaskarray.""" + if hasattr(a, "_mask"): + return a._mask + else: + return nomask + +def getmaskarray(a): + """Returns the mask of `a`, if any. +Otherwise, returns an array of `False`, with the same shape as `a`. + """ + m = getmask(a) + if m is nomask: + return make_mask_none(fromnumeric.shape(a)) + else: + return m + +def is_mask(m): + """Returns `True` if `m` is a legal mask. +Does not check contents, only type. + """ + try: + return m.dtype.type is MaskType + except AttributeError: + return False +# +def make_mask(m, copy=False, small_mask=True, flag=None): + """make_mask(m, copy=0, small_mask=0) +Returns `m` as a mask, creating a copy if necessary or requested. +The function can accept any sequence of integers or `nomask`. +Does not check that contents must be 0s and 1s. +If `small_mask=True`, returns `nomask` if `m` contains no true elements. + +:Parameters: + - `m` (ndarray) : Mask. + - `copy` (boolean, *[False]*) : Returns a copy of `m` if true. + - `small_mask` (boolean, *[False]*): Flattens mask to `nomask` if `m` is all false. + """ + if flag is not None: + warnings.warn("The flag 'flag' is now called 'small_mask'!", + DeprecationWarning) + small_mask = flag + if m is nomask: + return nomask + elif isinstance(m, ndarray): + m = filled(m, True) + if m.dtype.type is MaskType: + if copy: + result = numeric.array(m, dtype=MaskType, copy=copy) + else: + result = m + else: + result = numeric.array(m, dtype=MaskType) + else: + result = numeric.array(filled(m, True), dtype=MaskType) + # Bas les masques ! + if small_mask and not result.any(): + return nomask + else: + return result + +def make_mask_none(s): + "Returns a mask of shape `s`, filled with `False`." + result = numeric.zeros(s, dtype=MaskType) + return result + +def mask_or (m1, m2, copy=False, small_mask=True): + """Returns the combination of two masks `m1` and `m2`. +The masks are combined with the `logical_or` operator, treating `nomask` as false. +The result may equal m1 or m2 if the other is nomask. + +:Parameters: + - `m` (ndarray) : Mask. + - `copy` (boolean, *[False]*) : Returns a copy of `m` if true. + - `small_mask` (boolean, *[False]*): Flattens mask to `nomask` if `m` is all false. + """ + if m1 is nomask: + return make_mask(m2, copy=copy, small_mask=small_mask) + if m2 is nomask: + return make_mask(m1, copy=copy, small_mask=small_mask) + if m1 is m2 and is_mask(m1): + return m1 + return make_mask(umath.logical_or(m1, m2), copy=copy, small_mask=small_mask) + +#####-------------------------------------------------------------------------- +#--- --- Masking functions --- +#####-------------------------------------------------------------------------- +def masked_where(condition, a, copy=True): + """Returns `x` as an array masked where `condition` is true. +Masked values of `x` or `condition` are kept. + +:Parameters: + - `condition` (ndarray) : Masking condition. + - `x` (ndarray) : Array to mask. + - `copy` (boolean, *[False]*) : Returns a copy of `m` if true. + """ + cond = filled(condition,1) + a = numeric.array(a, copy=copy, subok=True) + if hasattr(a, '_mask'): + cond = mask_or(cond, a._mask) + cls = type(a) + else: + cls = MaskedArray + result = a.view(cls) + result._mask = cond + return result + +def masked_greater(x, value, copy=1): + "Shortcut to `masked_where`, with ``condition = (x > value)``." + return masked_where(greater(x, value), x, copy=copy) + +def masked_greater_equal(x, value, copy=1): + "Shortcut to `masked_where`, with ``condition = (x >= value)``." + return masked_where(greater_equal(x, value), x, copy=copy) + +def masked_less(x, value, copy=True): + "Shortcut to `masked_where`, with ``condition = (x < value)``." + return masked_where(less(x, value), x, copy=copy) + +def masked_less_equal(x, value, copy=True): + "Shortcut to `masked_where`, with ``condition = (x <= value)``." + return masked_where(less_equal(x, value), x, copy=copy) + +def masked_not_equal(x, value, copy=True): + "Shortcut to `masked_where`, with ``condition = (x != value)``." + return masked_where((x != value), x, copy=copy) + +# +def masked_equal(x, value, copy=True): + """Shortcut to `masked_where`, with ``condition = (x == value)``. +For floating point, consider `masked_values(x, value)` instead. + """ + return masked_where((x == value), x, copy=copy) +# d = filled(x, 0) +# c = umath.equal(d, value) +# m = mask_or(c, getmask(x)) +# return array(d, mask=m, copy=copy) + +def masked_inside(x, v1, v2, copy=True): + """Shortcut to `masked_where`, where `condition` is True for x inside +the interval `[v1,v2]` ``(v1 <= x <= v2)``. +The boundaries `v1` and `v2` can be given in either order. + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf >= v1) & (xf <= v2) + return masked_where(condition, x, copy=copy) + +def masked_outside(x, v1, v2, copy=True): + """Shortcut to `masked_where`, where `condition` is True for x outside +the interval `[v1,v2]` ``(x < v1)|(x > v2)``. +The boundaries `v1` and `v2` can be given in either order. + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf < v1) | (xf > v2) + return masked_where(condition, x, copy=copy) + +# +def masked_object(x, value, copy=True): + """Masks the array `x` where the data are exactly equal to `value`. +This function is suitable only for `object` arrays: for floating point, +please use `masked_values` instead. +The mask is set to `nomask` if posible. + +:parameter copy (Boolean, *[True]*): Returns a copy of `x` if true. """ + if isMaskedArray(x): + condition = umath.equal(x._data, value) + mask = x._mask + else: + condition = umath.equal(fromnumeric.asarray(x), value) + mask = nomask + mask = mask_or(mask, make_mask(condition, small_mask=True)) + return masked_array(x, mask=mask, copy=copy, fill_value=value) + +def masked_values(x, value, rtol=1.e-5, atol=1.e-8, copy=True): + """Masks the array `x` where the data are approximately equal to `value` +(that is, ``abs(x - value) <= atol+rtol*abs(value)``). +Suitable only for floating points. For integers, please use `masked_equal`. +The mask is set to `nomask` if posible. + +:Parameters: + - `rtol` (Float, *[1e-5]*): Tolerance parameter. + - `atol` (Float, *[1e-8]*): Tolerance parameter. + - `copy` (boolean, *[False]*) : Returns a copy of `x` if True. + """ + abs = umath.absolute + xnew = filled(x, value) + if issubclass(xnew.dtype.type, numeric.floating): + condition = umath.less_equal(abs(xnew-value), atol+rtol*abs(value)) + try: + mask = x._mask + except AttributeError: + mask = nomask + else: + condition = umath.equal(xnew, value) + mask = nomask + mask = mask_or(mask, make_mask(condition, small_mask=True)) + return masked_array(xnew, mask=mask, copy=copy, fill_value=value) + +#####-------------------------------------------------------------------------- +#---- --- Printing options --- +#####-------------------------------------------------------------------------- +class _MaskedPrintOption: + """Handles the string used to represent missing data in a masked array.""" + def __init__ (self, display): + "Creates the masked_print_option object." + self._display = display + self._enabled = True + + def display(self): + "Displays the string to print for masked values." + return self._display + + def set_display (self, s): + "Sets the string to print for masked values." + self._display = s + + def enabled(self): + "Is the use of the display value enabled?" + return self._enabled + + def enable(self, small_mask=1): + "Set the enabling small_mask to `small_mask`." + self._enabled = small_mask + + def __str__ (self): + return str(self._display) + + __repr__ = __str__ + +#if you single index into a masked location you get this object. +masked_print_option = _MaskedPrintOption('--') + +#####-------------------------------------------------------------------------- +#---- --- MaskedArray class --- +#####-------------------------------------------------------------------------- +##def _getoptions(a_out, a_in): +## "Copies standards options of a_in to a_out." +## for att in ['] +#class _mathmethod(object): +# """Defines a wrapper for arithmetic methods. +#Instead of directly calling a ufunc, the corresponding method of the `array._data` +#object is called instead. +# """ +# def __init__ (self, methodname, fill_self=0, fill_other=0, domain=None): +# """ +#:Parameters: +# - `methodname` (String) : Method name. +# - `fill_self` (Float *[0]*) : Fill value for the instance. +# - `fill_other` (Float *[0]*) : Fill value for the target. +# - `domain` (Domain object *[None]*) : Domain of non-validity. +# """ +# self.methodname = methodname +# self.fill_self = fill_self +# self.fill_other = fill_other +# self.domain = domain +# self.obj = None +# self.__doc__ = self.getdoc() +# # +# def getdoc(self): +# "Returns the doc of the function (from the doc of the method)." +# try: +# return getattr(MaskedArray, self.methodname).__doc__ +# except: +# return getattr(ndarray, self.methodname).__doc__ +# # +# def __get__(self, obj, objtype=None): +# self.obj = obj +# return self +# # +# def __call__ (self, other, *args): +# "Execute the call behavior." +# instance = self.obj +# m_self = instance._mask +# m_other = getmask(other) +# base = instance.filled(self.fill_self) +# target = filled(other, self.fill_other) +# if self.domain is not None: +# # We need to force the domain to a ndarray only. +# if self.fill_other > self.fill_self: +# domain = self.domain(base, target) +# else: +# domain = self.domain(target, base) +# if domain.any(): +# #If `other` is a subclass of ndarray, `filled` must have the +# # same subclass, else we'll lose some info. +# #The easiest then is to fill `target` instead of creating +# # a pure ndarray. +# #Oh, and we better make a copy! +# if isinstance(other, ndarray): +# # We don't want to modify other: let's copy target, then +# target = target.copy() +# target[fromnumeric.asarray(domain)] = self.fill_other +# else: +# target = numeric.where(fromnumeric.asarray(domain), +# self.fill_other, target) +# m_other = mask_or(m_other, domain) +# m = mask_or(m_self, m_other) +# method = getattr(base, self.methodname) +# result = method(target, *args).view(type(instance)) +# try: +# result._mask = m +# except AttributeError: +# if m: +# result = masked +# return result +#............................................................................... +class _arraymethod(object): + """Defines a wrapper for basic array methods. +Upon call, returns a masked array, where the new `_data` array is the output +of the corresponding method called on the original `_data`. + +If `onmask` is True, the new mask is the output of the method calld on the initial mask. +If `onmask` is False, the new mask is just a reference to the initial mask. + +:Parameters: + `funcname` : String + Name of the function to apply on data. + `onmask` : Boolean *[True]* + Whether the mask must be processed also (True) or left alone (False). + """ + def __init__(self, funcname, onmask=True): + self._name = funcname + self._onmask = onmask + self.obj = None + self.__doc__ = self.getdoc() + # + def getdoc(self): + "Returns the doc of the function (from the doc of the method)." + methdoc = getattr(ndarray, self._name, None) + methdoc = getattr(numpy, self._name, methdoc) +# methdoc = getattr(MaskedArray, self._name, methdoc) + if methdoc is not None: + return methdoc.__doc__ +# try: +# return getattr(MaskedArray, self._name).__doc__ +# except: +# try: +# return getattr(numpy, self._name).__doc__ +# except: +# return getattr(ndarray, self._name).__doc + # + def __get__(self, obj, objtype=None): + self.obj = obj + return self + # + def __call__(self, *args, **params): + methodname = self._name + data = self.obj._data + mask = self.obj._mask + cls = type(self.obj) + result = getattr(data, methodname)(*args, **params).view(cls) + result._smallmask = self.obj._smallmask + if result.ndim: + if not self._onmask: + result._mask = mask + elif mask is not nomask: + result.__setmask__(getattr(mask, methodname)(*args, **params)) + return result +#.......................................................... + +class flatiter(object): + "Defines an interator." + def __init__(self, ma): + self.ma = ma + self.ma_iter = numpy.asarray(ma).flat + + if ma._mask is nomask: + self.maskiter = None + else: + self.maskiter = ma._mask.flat + + def __iter__(self): + return self + + ### This won't work is ravel makes a copy + def __setitem__(self, index, value): + a = self.ma.ravel() + a[index] = value + + def next(self): + d = self.ma_iter.next() + if self.maskiter is not None and self.maskiter.next(): + d = masked + return d + + +class MaskedArray(numeric.ndarray): + """Arrays with possibly masked values. +Masked values of True exclude the corresponding element from any computation. + +Construction: + x = array(data, dtype=None, copy=True, order=False, + mask = nomask, fill_value=None, small_mask=True) + +If copy=False, every effort is made not to copy the data: +If `data` is a MaskedArray, and argument mask=nomask, then the candidate data +is `data._data` and the mask used is `data._mask`. +If `data` is a numeric array, it is used as the candidate raw data. +If `dtype` is not None and is different from data.dtype.char then a data copy is required. +Otherwise, the candidate is used. + +If a data copy is required, the raw (unmasked) data stored is the result of: +numeric.array(data, dtype=dtype.char, copy=copy) + +If `mask` is `nomask` there are no masked values. +Otherwise mask must be convertible to an array of booleans with the same shape as x. +If `small_mask` is True, a mask consisting of zeros (False) only is compressed to `nomask`. +Otherwise, the mask is not compressed. + +fill_value is used to fill in masked values when necessary, such as when +printing and in method/function filled(). +The fill_value is not used for computation within this module. + """ + __array_priority__ = 10.1 + _defaultmask = nomask + _defaulthardmask = False + _baseclass = numeric.ndarray + def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, fill_value=None, + keep_mask=True, small_mask=True, hard_mask=False, flag=None, + subok=True, **options): + """array(data, dtype=None, copy=True, mask=nomask, fill_value=None) + +If `data` is already a ndarray, its dtype becomes the default value of dtype. + """ + if flag is not None: + warnings.warn("The flag 'flag' is now called 'small_mask'!", + DeprecationWarning) + small_mask = flag + # Process data............ + _data = numeric.array(data, dtype=dtype, copy=copy, subok=subok) + _baseclass = getattr(data, '_baseclass', type(_data)) + _basedict = getattr(data, '_basedict', getattr(data, '__dict__', None)) + if not isinstance(data, MaskedArray): + _data = _data.view(cls) + elif not subok: + _data = data.view(cls) + else: + _data = _data.view(type(data)) + # Backwards compat ....... + if hasattr(data,'_mask') and not isinstance(data, ndarray): + _data._mask = data._mask + _sharedmask = True + # Process mask ........... + if mask is nomask: + if not keep_mask: + _data._mask = nomask + if copy: + _data._mask = _data._mask.copy() + else: + mask = numeric.array(mask, dtype=MaskType, copy=copy) + if mask.shape != _data.shape: + (nd, nm) = (_data.size, mask.size) + if nm == 1: + mask = numeric.resize(mask, _data.shape) + elif nm == nd: + mask = fromnumeric.reshape(mask, _data.shape) + else: + msg = "Mask and data not compatible: data size is %i, "+\ + "mask size is %i." + raise MAError, msg % (nd, nm) + if _data._mask is nomask: + _data._mask = mask + _data._sharedmask = True + else: + # Make a copy of the mask to avoid propagation + _data._sharedmask = False + if not keep_mask: + _data._mask = mask + else: + _data._mask = umath.logical_or(mask, _data._mask) + + + # Update fill_value....... + _data._fill_value = getattr(data, '_fill_value', fill_value) + if _data._fill_value is None: + _data._fill_value = default_fill_value(_data) + # Process extra options .. + _data._hardmask = hard_mask + _data._smallmask = small_mask + _data._baseclass = _baseclass + _data._basedict = _basedict + return _data + #........................ + def __array_finalize__(self,obj): + """Finalizes the masked array. + """ + # Finalize mask ............... + self._mask = getattr(obj, '_mask', nomask) + if self._mask is not nomask: + self._mask.shape = self.shape + # Get the remaining options ... + self._hardmask = getattr(obj, '_hardmask', self._defaulthardmask) + self._smallmask = getattr(obj, '_smallmask', True) + self._sharedmask = True + self._baseclass = getattr(obj, '_baseclass', type(obj)) + self._fill_value = getattr(obj, '_fill_value', None) + # Update special attributes ... + self._basedict = getattr(obj, '_basedict', getattr(obj, '__dict__', None)) + if self._basedict is not None: + self.__dict__.update(self._basedict) + return + #.................................. + def __array_wrap__(self, obj, context=None): + """Special hook for ufuncs. +Wraps the numpy array and sets the mask according to context. + """ + #TODO : Should we check for type result + result = obj.view(type(self)) + #.......... + if context is not None: + result._mask = result._mask.copy() + (func, args, _) = context + m = reduce(mask_or, [getmask(arg) for arg in args]) + # Get domain mask + domain = ufunc_domain.get(func, None) + if domain is not None: + if len(args) > 2: + d = reduce(domain, args) + else: + d = domain(*args) + if m is nomask: + if d is not nomask: + m = d + else: + m |= d + if not m.ndim and m: + if m: + if result.shape == (): + return masked + result._mask = numeric.ones(result.shape, bool_) + else: + result._mask = m + #.... +# result._mask = m + result._fill_value = self._fill_value + result._hardmask = self._hardmask + result._smallmask = self._smallmask + result._baseclass = self._baseclass + return result + #............................................. + def __getitem__(self, indx): + """x.__getitem__(y) <==> x[y] +Returns the item described by i. Not a copy as in previous versions. + """ + # This test is useful, but we should keep things light... +# if getmask(indx) is not nomask: +# msg = "Masked arrays must be filled before they can be used as indices!" +# raise IndexError, msg + # super() can't work here if the underlying data is a matrix... + dout = (self._data).__getitem__(indx) + m = self._mask + if hasattr(dout, 'shape') and len(dout.shape) > 0: + # Not a scalar: make sure that dout is a MA + dout = dout.view(type(self)) + dout._smallmask = self._smallmask + if m is not nomask: + # use _set_mask to take care of the shape + dout.__setmask__(m[indx]) + elif m is not nomask and m[indx]: + return masked + return dout + #........................ + def __setitem__(self, indx, value): + """x.__setitem__(i, y) <==> x[i]=y +Sets item described by index. If value is masked, masks those locations. + """ + if self is masked: + raise MAError, 'Cannot alter the masked element.' +# if getmask(indx) is not nomask: +# msg = "Masked arrays must be filled before they can be used as indices!" +# raise IndexError, msg + #.... + if value is masked: + m = self._mask + if m is nomask: + m = make_mask_none(self.shape) +# else: +# m = m.copy() + m[indx] = True + self.__setmask__(m) + return + #.... + dval = numeric.asarray(value).astype(self.dtype) + valmask = getmask(value) + if self._mask is nomask: + if valmask is not nomask: + self._mask = make_mask_none(self.shape) + self._mask[indx] = valmask + elif not self._hardmask: + _mask = self._mask.copy() + if valmask is nomask: + _mask[indx] = False + else: + _mask[indx] = valmask + self._set_mask(_mask) + elif hasattr(indx, 'dtype') and (indx.dtype==bool_): + indx = indx * umath.logical_not(self._mask) + else: + mindx = mask_or(self._mask[indx], valmask, copy=True) + dindx = self._data[indx] + if dindx.size > 1: + dindx[~mindx] = dval + elif mindx is nomask: + dindx = dval + dval = dindx + self._mask[indx] = mindx + # Set data .......... + #dval = filled(value).astype(self.dtype) + ndarray.__setitem__(self._data,indx,dval) + #............................................ + def __getslice__(self, i, j): + """x.__getslice__(i, j) <==> x[i:j] +Returns the slice described by i, j. +The use of negative indices is not supported.""" + return self.__getitem__(slice(i,j)) + #........................ + def __setslice__(self, i, j, value): + """x.__setslice__(i, j, value) <==> x[i:j]=value +Sets a slice i:j to `value`. +If `value` is masked, masks those locations.""" + self.__setitem__(slice(i,j), value) + #............................................ + def __setmask__(self, mask, copy=False): + newmask = make_mask(mask, copy=copy, small_mask=self._smallmask) +# self.unshare_mask() + if self._mask is nomask: + self._mask = newmask + elif self._hardmask: + if newmask is not nomask: + self._mask.__ior__(newmask) + else: + # This one is tricky: if we set the mask that way, we may break the + # propagation. But if we don't, we end up with a mask full of False + # and a test on nomask fails... + if newmask is nomask: + self._mask = nomask + else: + self._mask.flat = newmask + if self._mask.shape: + self._mask = numeric.reshape(self._mask, self.shape) + _set_mask = __setmask__ + + def _get_mask(self): + """Returns the current mask.""" + return self._mask + + mask = property(fget=_get_mask, fset=__setmask__, doc="Mask") + #............................................ + def harden_mask(self): + "Forces the mask to hard." + self._hardmask = True + + def soften_mask(self): + "Forces the mask to soft." + self._hardmask = False + + def unshare_mask(self): + "Copies the mask and set the sharedmask flag to False." + if self._sharedmask: + self._mask = self._mask.copy() + self._sharedmask = False + + #............................................ + def _get_data(self): + "Returns the current data (as a view of the original underlying data)>" + return self.view(self._baseclass) + _data = property(fget=_get_data) + #............................................ + def _get_flat(self): + """Calculates the flat value. + """ + return flatiter(self) + # + def _set_flat (self, value): + "x.flat = value" + y = self.ravel() + y[:] = value + # + flat = property(fget=_get_flat, fset=_set_flat, doc="Flat version") + #............................................ + def get_fill_value(self): + "Returns the filling value." + if self._fill_value is None: + self._fill_value = default_fill_value(self) + return self._fill_value + + def set_fill_value(self, value=None): + """Sets the filling value to `value`. +If None, uses the default, based on the data type.""" + if value is None: + value = default_fill_value(self) + self._fill_value = value + + fill_value = property(fget=get_fill_value, fset=set_fill_value, + doc="Filling value") + + def filled(self, fill_value=None): + """Returns an array of the same class as `_data`, + with masked values filled with `fill_value`. +Subclassing is preserved. + +If `fill_value` is None, uses self.fill_value. + """ + m = self._mask + if m is nomask or not m.any(): + return self._data + # + if fill_value is None: + fill_value = self.fill_value + # + if self is masked_singleton: + result = numeric.asanyarray(fill_value) + else: + result = self._data.copy() + try: + numpy.putmask(result, m, fill_value) + #result[m] = fill_value + except (TypeError, AttributeError): + fill_value = numeric.array(fill_value, dtype=object) + d = result.astype(object) + result = fromnumeric.choose(m, (d, fill_value)) + except IndexError: + #ok, if scalar + if self._data.shape: + raise + elif m: + result = numeric.array(fill_value, dtype=self.dtype) + else: + result = self._data + return result + + def compressed(self): + "A 1-D array of all the non-masked data." + d = self.ravel() + if self._mask is nomask: + return d + elif not self._smallmask and not self._mask.any(): + return d + else: + return d[numeric.logical_not(d._mask)] + #............................................ + def __str__(self): + """x.__str__() <==> str(x) +Calculates the string representation, using masked for fill if it is enabled. +Otherwise, fills with fill value. + """ + if masked_print_option.enabled(): + f = masked_print_option + if self is masked: + return str(f) + m = self._mask + if m is nomask: + res = self._data + else: + if m.shape == (): + if m: + return str(f) + else: + return str(self._data) + # convert to object array to make filled work +#CHECK: the two lines below seem more robust than the self._data.astype +# res = numeric.empty(self._data.shape, object_) +# numeric.putmask(res,~m,self._data) + res = self._data.astype("|O8") + res[m] = f + else: + res = self.filled(self.fill_value) + return str(res) + + def __repr__(self): + """x.__repr__() <==> repr(x) +Calculates the repr representation, using masked for fill if it is enabled. +Otherwise fill with fill value. + """ + with_mask = """\ +masked_%(name)s(data = + %(data)s, + mask = + %(mask)s, + fill_value=%(fill)s) +""" + with_mask1 = """\ +masked_%(name)s(data = %(data)s, + mask = %(mask)s, + fill_value=%(fill)s) +""" + n = len(self.shape) + name = repr(self._data).split('(')[0] + if n <= 1: + return with_mask1 % { + 'name': name, + 'data': str(self), + 'mask': str(self._mask), + 'fill': str(self.fill_value), + } + return with_mask % { + 'name': name, + 'data': str(self), + 'mask': str(self._mask), + 'fill': str(self.fill_value), + } + #............................................ + def __iadd__(self, other): + "Adds other to self in place." + ndarray.__iadd__(self._data,other) + m = getmask(other) + if self._mask is nomask: + self._mask = m + elif m is not nomask: + self._mask += m + return self + #.... + def __isub__(self, other): + "Subtracts other from self in place." + ndarray.__isub__(self._data,other) + m = getmask(other) + if self._mask is nomask: + self._mask = m + elif m is not nomask: + self._mask += m + return self + #.... + def __imul__(self, other): + "Multiplies self by other in place." + ndarray.__imul__(self._data,other) + m = getmask(other) + if self._mask is nomask: + self._mask = m + elif m is not nomask: + self._mask += m + return self + #.... + def __idiv__(self, other): + "Divides self by other in place." + dom_mask = domain_safe_divide().__call__(self, filled(other,1)) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + ndarray.__idiv__(self._data, other) + self._mask = mask_or(self._mask, new_mask) + return self + #............................................ + def __float__(self): + "Converts self to float." + if self._mask is not nomask: + warnings.warn("Warning: converting a masked element to nan.") + return numpy.nan + #raise MAError, 'Cannot convert masked element to a Python float.' + return float(self.item()) + + def __int__(self): + "Converts self to int." + if self._mask is not nomask: + raise MAError, 'Cannot convert masked element to a Python int.' + return int(self.item()) + #............................................ + def count(self, axis=None): + """Counts the non-masked elements of the array along a given axis, +and returns a masked array where the mask is True where all data are masked. +If `axis` is None, counts all the non-masked elements, and returns either a +scalar or the masked singleton.""" + m = self._mask + s = self.shape + ls = len(s) + if m is nomask: + if ls == 0: + return 1 + if ls == 1: + return s[0] + if axis is None: + return self.size + else: + n = s[axis] + t = list(s) + del t[axis] + return numeric.ones(t) * n + n1 = fromnumeric.size(m, axis) + n2 = m.astype(int_).sum(axis) + if axis is None: + return (n1-n2) + else: + return masked_array(n1 - n2) + #............................................ + def reshape (self, *s): + """Reshapes the array to shape s. +Returns a new masked array. +If you want to modify the shape in place, please use `a.shape = s`""" + result = self._data.reshape(*s).view(type(self)) + result.__dict__.update(self.__dict__) + if result._mask is not nomask: + result._mask = self._mask.copy() + result._mask.shape = result.shape + return result + # + repeat = _arraymethod('repeat') + # + def resize(self, newshape, refcheck=True, order=False): + """Attempts to modify size and shape of self inplace. + The array must own its own memory and not be referenced by other arrays. + Returns None. + """ + try: + self._data.resize(newshape, refcheck, order) + if self.mask is not nomask: + self._mask.resize(newshape, refcheck, order) + except ValueError: + raise ValueError("Cannot resize an array that has been referenced " + "or is referencing another array in this way.\n" + "Use the resize function.") + return None + # + flatten = _arraymethod('flatten') + # + def put(self, indices, values, mode='raise'): + """Sets storage-indexed locations to corresponding values. +a.put(values, indices, mode) sets a.flat[n] = values[n] for each n in indices. +`values` can be scalar or an array shorter than indices, and it will be repeated, +if necessary. +If `values` has some masked values, the initial mask is updated in consequence, +else the corresponding values are unmasked. + """ + m = self._mask + # Hard mask: Get rid of the values/indices that fall on masked data + if self._hardmask and self._mask is not nomask: + mask = self._mask[indices] + indices = numeric.asarray(indices) + values = numeric.asanyarray(values) + values.resize(indices.shape) + indices = indices[~mask] + values = values[~mask] + #.... + self._data.put(indices, values, mode=mode) + #.... + if m is nomask: + m = getmask(values) + else: + m = m.copy() + if getmask(values) is nomask: + m.put(indices, False, mode=mode) + else: + m.put(indices, values._mask, mode=mode) + m = make_mask(m, copy=False, small_mask=True) + self._mask = m + #............................................ + def ids (self): + """Return the address of the data and mask areas.""" + return (self.ctypes.data, self._mask.ctypes.data) + #............................................ + def all(self, axis=None, out=None): + """a.all(axis) returns True if all entries along the axis are True. + Returns False otherwise. If axis is None, uses the flatten array. + Masked data are considered as True during computation. + Outputs a masked array, where the mask is True if all data are masked along the axis. + Note: the out argument is not really operational... + """ + d = self.filled(True).all(axis=axis, out=out).view(type(self)) + if d.ndim > 0: + d.__setmask__(self._mask.all(axis)) + return d + + def any(self, axis=None, out=None): + """a.any(axis) returns True if some or all entries along the axis are True. + Returns False otherwise. If axis is None, uses the flatten array. + Masked data are considered as False during computation. + Outputs a masked array, where the mask is True if all data are masked along the axis. + Note: the out argument is not really operational... + """ + d = self.filled(False).any(axis=axis, out=out).view(type(self)) + if d.ndim > 0: + d.__setmask__(self._mask.all(axis)) + return d + + def nonzero(self): + """a.nonzero() returns a tuple of arrays + + Returns a tuple of arrays, one for each dimension of a, + containing the indices of the non-zero elements in that + dimension. The corresponding non-zero values can be obtained + with + a[a.nonzero()]. + + To group the indices by element, rather than dimension, use + transpose(a.nonzero()) + instead. The result of this is always a 2d array, with a row for + each non-zero element.""" + return numeric.asarray(self.filled(0)).nonzero() + #............................................ + def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) +Returns the sum along the offset diagonal of the array's indicated `axis1` and `axis2`. + """ + # TODO: What are we doing with `out`? + m = self._mask + if m is nomask: + result = super(MaskedArray, self).trace(offset=offset, axis1=axis1, + axis2=axis2, out=out) + return result.astype(dtype) + else: + D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) + return D.astype(dtype).sum(axis=None) + #............................................ + def sum(self, axis=None, dtype=None): + """a.sum(axis=None, dtype=None) +Sums the array `a` over the given axis `axis`. +Masked values are set to 0. +If `axis` is None, applies to a flattened version of the array. + """ + if self._mask is nomask: + mask = nomask + else: + mask = self._mask.all(axis) + if (not mask.ndim) and mask: + return masked + result = self.filled(0).sum(axis, dtype=dtype).view(type(self)) + if result.ndim > 0: + result.__setmask__(mask) + return result + + def cumsum(self, axis=None, dtype=None): + """a.cumprod(axis=None, dtype=None) +Returns the cumulative sum of the elements of array `a` along the given axis `axis`. +Masked values are set to 0. +If `axis` is None, applies to a flattened version of the array. + """ + result = self.filled(0).cumsum(axis=axis, dtype=dtype).view(type(self)) + result.__setmask__(self.mask) + return result + + def prod(self, axis=None, dtype=None): + """a.prod(axis=None, dtype=None) +Returns the product of the elements of array `a` along the given axis `axis`. +Masked elements are set to 1. +If `axis` is None, applies to a flattened version of the array. + """ + if self._mask is nomask: + mask = nomask + else: + mask = self._mask.all(axis) + if (not mask.ndim) and mask: + return masked + result = self.filled(1).prod(axis=axis, dtype=dtype).view(type(self)) + if result.ndim: + result.__setmask__(mask) + return result + product = prod + + def cumprod(self, axis=None, dtype=None): + """a.cumprod(axis=None, dtype=None) +Returns the cumulative product of ethe lements of array `a` along the given axis `axis`. +Masked values are set to 1. +If `axis` is None, applies to a flattened version of the array. + """ + result = self.filled(1).cumprod(axis=axis, dtype=dtype).view(type(self)) + result.__setmask__(self.mask) + return result + + def mean(self, axis=None, dtype=None): + """a.mean(axis=None, dtype=None) + + Averages the array over the given axis. If the axis is None, + averages over all dimensions of the array. Equivalent to + + a.sum(axis, dtype) / size(a, axis). + + The optional dtype argument is the data type for intermediate + calculations in the sum. + + Returns a masked array, of the same class as a. + """ + if self._mask is nomask: + return super(MaskedArray, self).mean(axis=axis, dtype=dtype) + else: + dsum = self.sum(axis=axis, dtype=dtype) + cnt = self.count(axis=axis) + return dsum*1./cnt + + def anom(self, axis=None, dtype=None): + """a.anom(axis=None, dtype=None) + Returns the anomalies, or deviation from the average. + """ + m = self.mean(axis, dtype) + if not axis: + return (self - m) + else: + return (self - expand_dims(m,axis)) + + def var(self, axis=None, dtype=None): + """a.var(axis=None, dtype=None) +Returns the variance, a measure of the spread of a distribution. + +The variance is the average of the squared deviations from the mean, +i.e. var = mean((x - x.mean())**2). + """ + if self._mask is nomask: + # TODO: Do we keep super, or var _data and take a view ? + return super(MaskedArray, self).var(axis=axis, dtype=dtype) + else: + cnt = self.count(axis=axis) + danom = self.anom(axis=axis, dtype=dtype) + danom *= danom + dvar = numeric.array(danom.sum(axis) / cnt).view(type(self)) + if axis is not None: + dvar._mask = mask_or(self._mask.all(axis), (cnt==1)) + return dvar + + def std(self, axis=None, dtype=None): + """a.std(axis=None, dtype=None) +Returns the standard deviation, a measure of the spread of a distribution. + +The standard deviation is the square root of the average of the squared +deviations from the mean, i.e. std = sqrt(mean((x - x.mean())**2)). + """ + dvar = self.var(axis,dtype) + if axis is not None or dvar is not masked: + dvar = sqrt(dvar) + return dvar + #............................................ + def argsort(self, axis=None, fill_value=None, kind='quicksort', + order=None): + """Returns an array of indices that sort 'a' along the specified axis. + Masked values are filled beforehand to `fill_value`. + If `fill_value` is None, uses the default for the data type. + Returns a numpy array. + +:Keywords: + `axis` : Integer *[None]* + Axis to be indirectly sorted (default -1) + `kind` : String *['quicksort']* + Sorting algorithm (default 'quicksort') + Possible values: 'quicksort', 'mergesort', or 'heapsort' + + Returns: array of indices that sort 'a' along the specified axis. + + This method executes an indirect sort along the given axis using the + algorithm specified by the kind keyword. It returns an array of indices of + the same shape as 'a' that index data along the given axis in sorted order. + + The various sorts are characterized by average speed, worst case + performance, need for work space, and whether they are stable. A stable + sort keeps items with the same key in the same relative order. The three + available algorithms have the following properties: + + |------------------------------------------------------| + | kind | speed | worst case | work space | stable| + |------------------------------------------------------| + |'quicksort'| 1 | O(n^2) | 0 | no | + |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | + |'heapsort' | 3 | O(n*log(n)) | 0 | no | + |------------------------------------------------------| + + All the sort algorithms make temporary copies of the data when the sort is not + along the last axis. Consequently, sorts along the last axis are faster and use + less space than sorts along other axis. + """ + if fill_value is None: + fill_value = default_fill_value(self) + d = self.filled(fill_value).view(ndarray) + return d.argsort(axis=axis, kind=kind, order=order) + #........................ + def argmin(self, axis=None, fill_value=None): + """Returns a ndarray of indices for the minimum values of `a` along the + specified axis. + Masked values are treated as if they had the value `fill_value`. + If `fill_value` is None, the default for the data type is used. + Returns a numpy array. + +:Keywords: + `axis` : Integer *[None]* + Axis to be indirectly sorted (default -1) + `fill_value` : var *[None]* + Default filling value. If None, uses the minimum default for the data type. + """ + if fill_value is None: + fill_value = minimum_fill_value(self) + d = self.filled(fill_value).view(ndarray) + return d.argmin(axis) + #........................ + def argmax(self, axis=None, fill_value=None): + """Returns the array of indices for the maximum values of `a` along the + specified axis. + Masked values are treated as if they had the value `fill_value`. + If `fill_value` is None, the maximum default for the data type is used. + Returns a numpy array. + +:Keywords: + `axis` : Integer *[None]* + Axis to be indirectly sorted (default -1) + `fill_value` : var *[None]* + Default filling value. If None, uses the data type default. + """ + if fill_value is None: + fill_value = maximum_fill_value(self._data) + d = self.filled(fill_value).view(ndarray) + return d.argmax(axis) + + def sort(self, axis=-1, kind='quicksort', order=None, + endwith=True, fill_value=None): + """ + Sort a along the given axis. + + Keyword arguments: + + axis -- axis to be sorted (default -1) + kind -- sorting algorithm (default 'quicksort') + Possible values: 'quicksort', 'mergesort', or 'heapsort'. + order -- If a has fields defined, then the order keyword can be the + field name to sort on or a list (or tuple) of field names + to indicate the order that fields should be used to define + the sort. + endwith--Boolean flag indicating whether missing values (if any) should + be forced in the upper indices (at the end of the array) or + lower indices (at the beginning). + + Returns: None. + + This method sorts 'a' in place along the given axis using the algorithm + specified by the kind keyword. + + The various sorts may characterized by average speed, worst case + performance, need for work space, and whether they are stable. A stable + sort keeps items with the same key in the same relative order and is most + useful when used with argsort where the key might differ from the items + being sorted. The three available algorithms have the following properties: + + |------------------------------------------------------| + | kind | speed | worst case | work space | stable| + |------------------------------------------------------| + |'quicksort'| 1 | O(n^2) | 0 | no | + |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | + |'heapsort' | 3 | O(n*log(n)) | 0 | no | + |------------------------------------------------------| + + """ + if self._mask is nomask: + ndarray.sort(self,axis=axis, kind=kind, order=order) + else: + if fill_value is None: + if endwith: + filler = minimum_fill_value(self) + else: + filler = maximum_fill_value(self) + else: + filler = fill_value + idx = numpy.indices(self.shape) + idx[axis] = self.filled(filler).argsort(axis=axis,kind=kind,order=order) + idx_l = idx.tolist() + tmp_mask = self._mask[idx_l].flat + tmp_data = self._data[idx_l].flat + self.flat = tmp_data + self._mask.flat = tmp_mask + return + #............................................ + def min(self, axis=None, fill_value=None): + """Returns the minimum/a along the given axis. +If `axis` is None, applies to the flattened array. Masked values are filled +with `fill_value` during processing. If `fill_value is None, it is set to the +maximum_fill_value corresponding to the data type.""" + mask = self._mask + # Check all/nothing case ...... + if mask is nomask: + return super(MaskedArray, self).min(axis=axis) + elif (not mask.ndim) and mask: + return masked + # Get the mask ................ + if axis is None: + mask = umath.logical_and.reduce(mask.flat) + else: + mask = umath.logical_and.reduce(mask, axis=axis) + # Get the fil value ........... + if fill_value is None: + fill_value = minimum_fill_value(self) + # Get the data ................ + result = self.filled(fill_value).min(axis=axis).view(type(self)) + if result.ndim > 0: + result._mask = mask + return result + #........................ + def max(self, axis=None, fill_value=None): + """Returns the maximum/a along the given axis. +If `axis` is None, applies to the flattened array. Masked values are filled +with `fill_value` during processing. If `fill_value is None, it is set to the +maximum_fill_value corresponding to the data type.""" + mask = self._mask + # Check all/nothing case ...... + if mask is nomask: + return super(MaskedArray, self).max(axis=axis) + elif (not mask.ndim) and mask: + return masked + # Check the mask .............. + if axis is None: + mask = umath.logical_and.reduce(mask.flat) + else: + mask = umath.logical_and.reduce(mask, axis=axis) + # Get the fill value .......... + if fill_value is None: + fill_value = maximum_fill_value(self) + # Get the data ................ + result = self.filled(fill_value).max(axis=axis).view(type(self)) + if result.ndim > 0: + result._mask = mask + return result + #........................ + def ptp(self, axis=None, fill_value=None): + """Returns the visible data range (max-min) along the given axis. +If the axis is `None`, applies on a flattened array. Masked values are filled +with `fill_value` for processing. If `fill_value` is None, the maximum is uses +the maximum default, the minimum uses the minimum default.""" + return self.max(axis, fill_value) - self.min(axis, fill_value) + + # Array methods --------------------------------------- + conj = conjugate = _arraymethod('conjugate') + copy = _arraymethod('copy') + diagonal = _arraymethod('diagonal') + take = _arraymethod('take') + ravel = _arraymethod('ravel') + transpose = _arraymethod('transpose') + T = property(fget=lambda self:self.transpose()) + swapaxes = _arraymethod('swapaxes') + clip = _arraymethod('clip', onmask=False) + compress = _arraymethod('compress') + copy = _arraymethod('copy') + squeeze = _arraymethod('squeeze') + #-------------------------------------------- + def tolist(self, fill_value=None): + """Copies the data portion of the array to a hierarchical python list and + returns that list. Data items are converted to the nearest compatible Python + type. + Masked values are converted to `fill_value`. If `fill_value` is None, the + corresponding entries in the output list will be None. + """ + if fill_value is not None: + return self.filled(fill_value).tolist() + result = self.filled().tolist() + if self._mask is nomask: + return result + if self.ndim == 0: + return [None] + elif self.ndim == 1: + maskedidx = self._mask.nonzero()[0].tolist() + [operator.setitem(result,i,None) for i in maskedidx] + else: + for idx in zip(*[i.tolist() for i in self._mask.nonzero()]): + tmp = result + for i in idx[:-1]: + tmp = tmp[i] + tmp[idx[-1]] = None + return result + + + #........................ + def tostring(self, fill_value=None): + """a.tostring(order='C', fill_value=None) -> raw copy of array data as a Python string. + + Keyword arguments: + order : order of the data item in the copy {"C","F","A"} (default "C") + fill_value : value used in lieu of missing data + + Construct a Python string containing the raw bytes in the array. The order + of the data in arrays with ndim > 1 is specified by the 'order' keyword and + this keyword overrides the order of the array. The + choices are: + + "C" -- C order (row major) + "Fortran" -- Fortran order (column major) + "Any" -- Current order of array. + None -- Same as "Any" + + Masked data are filled with fill_value. If fill_value is None, the data-type- + dependent default is used.""" + return self.filled(fill_value).tostring() + #-------------------------------------------- + # Backwards Compatibility. Heck... + @property + def data(self): + """Returns the `_data` part of the MaskedArray.""" + return self._data + def raw_data(self): + """Returns the `_data` part of the MaskedArray. +You should really use `data` instead...""" + return self._data + #-------------------------------------------- + # Pickling + def __getstate__(self): + "Returns the internal state of the masked array, for pickling purposes." + state = (1, + self.shape, + self.dtype, + self.flags.fnc, + self._data.tostring(), + getmaskarray(self).tostring(), + self._fill_value, + ) + return state + # + def __setstate__(self, state): + """Restores the internal state of the masked array, for pickling purposes. + `state` is typically the output of the ``__getstate__`` output, and is a 5-tuple: + + - class name + - a tuple giving the shape of the data + - a typecode for the data + - a binary string for the data + - a binary string for the mask. + """ + (ver, shp, typ, isf, raw, msk, flv) = state + ndarray.__setstate__(self, (shp, typ, isf, raw)) + self._mask.__setstate__((shp, dtype(bool), isf, msk)) + self.fill_value = flv + # + def __reduce__(self): + """Returns a 3-tuple for pickling a MaskedArray.""" + return (_mareconstruct, + (self.__class__, self._baseclass, (0,), 'b', ), + self.__getstate__()) + + +def _mareconstruct(subtype, baseclass, baseshape, basetype,): + """Internal function that builds a new MaskedArray from the information stored +in a pickle.""" + _data = ndarray.__new__(baseclass, baseshape, basetype) + _mask = ndarray.__new__(ndarray, baseshape, 'b1') + return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype, small_mask=False) +#MaskedArray.__dump__ = dump +#MaskedArray.__dumps__ = dumps + + + +#####-------------------------------------------------------------------------- +#---- --- Shortcuts --- +#####--------------------------------------------------------------------------- +def isMaskedArray(x): + "Is x a masked array, that is, an instance of MaskedArray?" + return isinstance(x, MaskedArray) +isarray = isMaskedArray +isMA = isMaskedArray #backward compatibility +#masked = MaskedArray(0, int, mask=1) +masked_singleton = MaskedArray(0, dtype=int_, mask=True) +masked = masked_singleton + +masked_array = MaskedArray +def array(data, dtype=None, copy=False, order=False, mask=nomask, subok=True, + keep_mask=True, small_mask=True, hard_mask=None, fill_value=None): + """array(data, dtype=None, copy=True, order=False, mask=nomask, + keep_mask=True, small_mask=True, fill_value=None) +Acts as shortcut to MaskedArray, with options in a different order for convenience. +And backwards compatibility... + """ + #TODO: we should try to put 'order' somwehere + return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok, + keep_mask=keep_mask, small_mask=small_mask, + hard_mask=hard_mask, fill_value=fill_value) + +def is_masked(x): + """Returns whether x has some masked values.""" + m = getmask(x) + if m is nomask: + return False + elif m.any(): + return True + return False + + +#####--------------------------------------------------------------------------- +#---- --- Extrema functions --- +#####--------------------------------------------------------------------------- +class _extrema_operation(object): + "Generic class for maximum/minimum functions." + def __call__(self, a, b=None): + "Executes the call behavior." + if b is None: + return self.reduce(a) + return where(self.compare(a, b), a, b) + #......... + def reduce(self, target, axis=None): + """Reduces target along the given axis.""" + m = getmask(target) + if axis is not None: + kargs = { 'axis' : axis } + else: + kargs = {} + target = target.ravel() + if not (m is nomask): + m = m.ravel() + if m is nomask: + t = self.ufunc.reduce(target, **kargs) + else: + target = target.filled(self.fill_value_func(target)).view(type(target)) + t = self.ufunc.reduce(target, **kargs) + m = umath.logical_and.reduce(m, **kargs) + if hasattr(t, '_mask'): + t._mask = m + elif m: + t = masked + return t + #......... + def outer (self, a, b): + "Returns the function applied to the outer product of a and b." + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = logical_or.outer(ma, mb) + result = self.ufunc.outer(filled(a), filled(b)) + result._mask = m + return result +#............................ +class _minimum_operation(_extrema_operation): + "Object to calculate minima" + def __init__ (self): + """minimum(a, b) or minimum(a) +In one argument case, returns the scalar minimum. + """ + self.ufunc = umath.minimum + self.afunc = amin + self.compare = less + self.fill_value_func = minimum_fill_value +#............................ +class _maximum_operation(_extrema_operation): + "Object to calculate maxima" + def __init__ (self): + """maximum(a, b) or maximum(a) + In one argument case returns the scalar maximum. + """ + self.ufunc = umath.maximum + self.afunc = amax + self.compare = greater + self.fill_value_func = maximum_fill_value +#.......................................................... +def min(array, axis=None, out=None): + """Returns the minima along the given axis. +If `axis` is None, applies to the flattened array.""" + if out is not None: + raise TypeError("Output arrays Unsupported for masked arrays") + if axis is None: + return minimum(array) + else: + return minimum.reduce(array, axis) +#............................ +def max(obj, axis=None, out=None): + """Returns the maxima along the given axis. +If `axis` is None, applies to the flattened array.""" + if out is not None: + raise TypeError("Output arrays Unsupported for masked arrays") + if axis is None: + return maximum(obj) + else: + return maximum.reduce(obj, axis) +#............................. +def ptp(obj, axis=None): + """a.ptp(axis=None) = a.max(axis)-a.min(axis)""" + try: + return obj.max(axis)-obj.min(axis) + except AttributeError: + return max(obj, axis=axis) - min(obj, axis=axis) + + +#####--------------------------------------------------------------------------- +#---- --- Definition of functions from the corresponding methods --- +#####--------------------------------------------------------------------------- +class _frommethod: + """Defines functions from existing MaskedArray methods. +:ivar _methodname (String): Name of the method to transform. + """ + def __init__(self, methodname): + self._methodname = methodname + self.__doc__ = self.getdoc() + def getdoc(self): + "Returns the doc of the function (from the doc of the method)." + try: + return getattr(MaskedArray, self._methodname).__doc__ + except: + return getattr(numpy, self._methodname).__doc__ + def __call__(self, a, *args, **params): + if isinstance(a, MaskedArray): + return getattr(a, self._methodname).__call__(*args, **params) + #FIXME ---- + #As x is not a MaskedArray, we transform it to a ndarray with asarray + #... and call the corresponding method. + #Except that sometimes it doesn't work (try reshape([1,2,3,4],(2,2))) + #we end up with a "SystemError: NULL result without error in PyObject_Call" + #A dirty trick is then to call the initial numpy function... + method = getattr(fromnumeric.asarray(a), self._methodname) + try: + return method(*args, **params) + except SystemError: + return getattr(numpy,self._methodname).__call__(a, *args, **params) + +all = _frommethod('all') +anomalies = anom = _frommethod('anom') +any = _frommethod('any') +conjugate = _frommethod('conjugate') +ids = _frommethod('ids') +nonzero = _frommethod('nonzero') +diagonal = _frommethod('diagonal') +maximum = _maximum_operation() +mean = _frommethod('mean') +minimum = _minimum_operation () +product = _frommethod('prod') +ptp = _frommethod('ptp') +ravel = _frommethod('ravel') +repeat = _frommethod('repeat') +std = _frommethod('std') +sum = _frommethod('sum') +swapaxes = _frommethod('swapaxes') +take = _frommethod('take') +var = _frommethod('var') + +#.............................................................................. +def power(a, b, third=None): + """Computes a**b elementwise. + Masked values are set to 1.""" + if third is not None: + raise MAError, "3-argument power not supported." + ma = getmask(a) + mb = getmask(b) + m = mask_or(ma, mb) + fa = filled(a, 1) + fb = filled(b, 1) + if fb.dtype.char in typecodes["Integer"]: + return masked_array(umath.power(fa, fb), m) + md = make_mask((fa < 0), small_mask=1) + m = mask_or(m, md) + if m is nomask: + return masked_array(umath.power(fa, fb)) + else: + fa[m] = 1 + return masked_array(umath.power(fa, fb), m) + +#.............................................................................. +def argsort(a, axis=None, kind='quicksort', order=None, fill_value=None): + """Returns an array of indices that sort 'a' along the specified axis. + Masked values are filled beforehand to `fill_value`. + If `fill_value` is None, uses the default for the data type. + Returns a numpy array. + +:Keywords: + `axis` : Integer *[None]* + Axis to be indirectly sorted (default -1) + `kind` : String *['quicksort']* + Sorting algorithm (default 'quicksort') + Possible values: 'quicksort', 'mergesort', or 'heapsort' + + Returns: array of indices that sort 'a' along the specified axis. + + This method executes an indirect sort along the given axis using the + algorithm specified by the kind keyword. It returns an array of indices of + the same shape as 'a' that index data along the given axis in sorted order. + + The various sorts are characterized by average speed, worst case + performance, need for work space, and whether they are stable. A stable + sort keeps items with the same key in the same relative order. The three + available algorithms have the following properties: + + |------------------------------------------------------| + | kind | speed | worst case | work space | stable| + |------------------------------------------------------| + |'quicksort'| 1 | O(n^2) | 0 | no | + |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | + |'heapsort' | 3 | O(n*log(n)) | 0 | no | + |------------------------------------------------------| + + All the sort algorithms make temporary copies of the data when the sort is not + along the last axis. Consequently, sorts along the last axis are faster and use + less space than sorts along other axis. + """ + if fill_value is None: + fill_value = default_fill_value(a) + d = filled(a, fill_value) + if axis is None: + return d.argsort(kind=kind, order=order) + return d.argsort(axis, kind=kind, order=order) + +def argmin(a, axis=None, fill_value=None): + """Returns the array of indices for the minimum values of `a` along the + specified axis. + Masked values are treated as if they had the value `fill_value`. + If `fill_value` is None, the default for the data type is used. + Returns a numpy array. + +:Keywords: + `axis` : Integer *[None]* + Axis to be indirectly sorted (default -1) + `fill_value` : var *[None]* + Default filling value. If None, uses the data type default. + """ + if fill_value is None: + fill_value = default_fill_value(a) + d = filled(a, fill_value) + return d.argmin(axis=axis) + +def argmax(a, axis=None, fill_value=None): + """Returns the array of indices for the maximum values of `a` along the + specified axis. + Masked values are treated as if they had the value `fill_value`. + If `fill_value` is None, the default for the data type is used. + Returns a numpy array. + +:Keywords: + `axis` : Integer *[None]* + Axis to be indirectly sorted (default -1) + `fill_value` : var *[None]* + Default filling value. If None, uses the data type default. + """ + if fill_value is None: + fill_value = default_fill_value(a) + try: + fill_value = - fill_value + except: + pass + d = filled(a, fill_value) + return d.argmax(axis=axis) + +def sort(a, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None): + """ + Sort a along the given axis. + +Keyword arguments: + +axis -- axis to be sorted (default -1) +kind -- sorting algorithm (default 'quicksort') + Possible values: 'quicksort', 'mergesort', or 'heapsort'. +order -- If a has fields defined, then the order keyword can be the + field name to sort on or a list (or tuple) of field names + to indicate the order that fields should be used to define + the sort. +endwith--Boolean flag indicating whether missing values (if any) should + be forced in the upper indices (at the end of the array) or + lower indices (at the beginning). + +Returns: None. + +This method sorts 'a' in place along the given axis using the algorithm +specified by the kind keyword. + +The various sorts may characterized by average speed, worst case +performance, need for work space, and whether they are stable. A stable +sort keeps items with the same key in the same relative order and is most +useful when used with argsort where the key might differ from the items +being sorted. The three available algorithms have the following properties: + +|------------------------------------------------------| +| kind | speed | worst case | work space | stable| +|------------------------------------------------------| +|'quicksort'| 1 | O(n^2) | 0 | no | +|'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | +|'heapsort' | 3 | O(n*log(n)) | 0 | no | +|------------------------------------------------------| + +All the sort algorithms make temporary copies of the data when the sort is +not along the last axis. Consequently, sorts along the last axis are faster +and use less space than sorts along other axis. + +""" + a = numeric.asanyarray(a) + if fill_value is None: + if endwith: + filler = minimum_fill_value(a) + else: + filler = maximum_fill_value(a) + else: + filler = fill_value +# return + indx = numpy.indices(a.shape).tolist() + indx[axis] = filled(a,filler).argsort(axis=axis,kind=kind,order=order) + return a[indx] + +def compressed(x): + """Returns a compressed version of a masked array (or just the array if it + wasn't masked first).""" + if getmask(x) is None: + return x + else: + return x.compressed() + +def count(a, axis = None): + "Count of the non-masked elements in a, or along a certain axis." + a = masked_array(a) + return a.count(axis) + +def concatenate(arrays, axis=0): + "Concatenates the arrays along the given axis" + d = numeric.concatenate([filled(a) for a in arrays], axis) + rcls = get_masked_subclass(*arrays) + data = d.view(rcls) + for x in arrays: + if getmask(x) is not nomask: + break + else: + return data + dm = numeric.concatenate([getmaskarray(a) for a in arrays], axis) + dm = make_mask(dm, copy=False, small_mask=True) + data._mask = dm + return data + +def expand_dims(x,axis): + """Expand the shape of a by including newaxis before given axis.""" + result = n_expand_dims(x,axis) + if isinstance(x, MaskedArray): + new_shape = result.shape + result = x.view() + result.shape = new_shape + if result._mask is not nomask: + result._mask.shape = new_shape + return result + +#...................................... +def left_shift (a, n): + "Left shift n bits" + m = getmask(a) + if m is nomask: + d = umath.left_shift(filled(a), n) + return masked_array(d) + else: + d = umath.left_shift(filled(a, 0), n) + return masked_array(d, mask=m) + +def right_shift (a, n): + "Right shift n bits" + m = getmask(a) + if m is nomask: + d = umath.right_shift(filled(a), n) + return masked_array(d) + else: + d = umath.right_shift(filled(a, 0), n) + return masked_array(d, mask=m) +#...................................... +def put(a, indices, values, mode='raise'): + """Sets storage-indexed locations to corresponding values. + Values and indices are filled if necessary.""" + # We can't use 'frommethod', the order of arguments is different + try: + return a.put(indices, values, mode=mode) + except AttributeError: + return fromnumeric.asarray(a).put(indices, values, mode=mode) + +def putmask(a, mask, values): #, mode='raise'): + """`putmask(a, mask, v)` results in `a = v` for all places where `mask` is true. +If `v` is shorter than `mask`, it will be repeated as necessary. +In particular `v` can be a scalar or length 1 array.""" + # We can't use 'frommethod', the order of arguments is different + try: + return a.putmask(values, mask) + except AttributeError: + return fromnumeric.asarray(a).putmask(values, mask) + +def transpose(a,axes=None): + """Returns a view of the array with dimensions permuted according to axes. +If `axes` is None (default), returns array with dimensions reversed. + """ + #We can't use 'frommethod', as 'transpose' doesn't take keywords + try: + return a.transpose(axes) + except AttributeError: + return fromnumeric.asarray(a).transpose(axes) + +def reshape(a, new_shape): + """Changes the shape of the array `a` to `new_shape`.""" + #We can't use 'frommethod', it whine about some parameters. Dmmit. + try: + return a.reshape(new_shape) + except AttributeError: + return fromnumeric.asarray(a).reshape(new_shape) + +def resize(x, new_shape): + """resize(a,new_shape) returns a new array with the specified shape. + The total size of the original array can be any size. + The new array is filled with repeated copies of a. If a was masked, the new + array will be masked, and the new mask will be a repetition of the old one. + """ + # We can't use _frommethods here, as N.resize is notoriously whiny. + m = getmask(x) + if m is not nomask: + m = fromnumeric.resize(m, new_shape) + result = fromnumeric.resize(x, new_shape).view(get_masked_subclass(x)) + if result.ndim: + result._mask = m + return result + + +#................................................ +def rank(obj): + """Gets the rank of sequence a (the number of dimensions, not a matrix rank) +The rank of a scalar is zero.""" + return fromnumeric.rank(filled(obj)) +# +def shape(obj): + """Returns the shape of `a` (as a function call which also works on nested sequences). + """ + return fromnumeric.shape(filled(obj)) +# +def size(obj, axis=None): + """Returns the number of elements in the array along the given axis, +or in the sequence if `axis` is None. + """ + return fromnumeric.size(filled(obj), axis) +#................................................ + +#####-------------------------------------------------------------------------- +#---- --- Extra functions --- +#####-------------------------------------------------------------------------- +def where (condition, x, y): + """where(condition, x, y) is x where condition is nonzero, y otherwise. + condition must be convertible to an integer array. + Answer is always the shape of condition. + The type depends on x and y. It is integer if both x and y are + the value masked. + """ + fc = filled(not_equal(condition, 0), 0) + xv = filled(x) + xm = getmask(x) + yv = filled(y) + ym = getmask(y) + d = numeric.choose(fc, (yv, xv)) + md = numeric.choose(fc, (ym, xm)) + m = getmask(condition) + m = make_mask(mask_or(m, md), copy=False, small_mask=True) + return masked_array(d, mask=m) + +def choose (indices, t, out=None, mode='raise'): + "Returns array shaped like indices with elements chosen from t" + #TODO: implement options `out` and `mode`, if possible. + def fmask (x): + "Returns the filled array, or True if ``masked``." + if x is masked: + return 1 + return filled(x) + def nmask (x): + "Returns the mask, True if ``masked``, False if ``nomask``." + if x is masked: + return 1 + m = getmask(x) + if m is nomask: + return 0 + return m + c = filled(indices, 0) + masks = [nmask(x) for x in t] + a = [fmask(x) for x in t] + d = numeric.choose(c, a) + m = numeric.choose(c, masks) + m = make_mask(mask_or(m, getmask(indices)), copy=0, small_mask=1) + return masked_array(d, mask=m) + +def round_(a, decimals=0, out=None): + """Returns reference to result. Copies a and rounds to 'decimals' places. + + Keyword arguments: + decimals -- number of decimals to round to (default 0). May be negative. + out -- existing array to use for output (default copy of a). + + Return: + Reference to out, where None specifies a copy of the original array a. + + Round to the specified number of decimals. When 'decimals' is negative it + specifies the number of positions to the left of the decimal point. The + real and imaginary parts of complex numbers are rounded separately. + Nothing is done if the array is not of float type and 'decimals' is greater + than or equal to 0.""" + result = fromnumeric.round_(filled(a), decimals, out) + if isinstance(a,MaskedArray): + result = result.view(type(a)) + result._mask = a._mask + else: + result = result.view(MaskedArray) + return result + +def arange(start, stop=None, step=1, dtype=None): + """Just like range() except it returns a array whose type can be specified + by the keyword argument dtype. + """ + return array(numeric.arange(start, stop, step, dtype),mask=nomask) + +def inner(a, b): + """inner(a,b) returns the dot product of two arrays, which has + shape a.shape[:-1] + b.shape[:-1] with elements computed by summing the + product of the elements from the last dimensions of a and b. + Masked elements are replace by zeros. + """ + fa = filled(a, 0) + fb = filled(b, 0) + if len(fa.shape) == 0: + fa.shape = (1,) + if len(fb.shape) == 0: + fb.shape = (1,) + return masked_array(numeric.inner(fa, fb)) +innerproduct = inner + +def outer(a, b): + """outer(a,b) = {a[i]*b[j]}, has shape (len(a),len(b))""" + fa = filled(a, 0).ravel() + fb = filled(b, 0).ravel() + d = numeric.outer(fa, fb) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + return masked_array(d) + ma = getmaskarray(a) + mb = getmaskarray(b) + m = make_mask(1-numeric.outer(1-ma, 1-mb), copy=0) + return masked_array(d, mask=m) +outerproduct = outer + +def allequal (a, b, fill_value=True): + """ +Returns `True` if all entries of a and b are equal, using +fill_value as a truth value where either or both are masked. + """ + m = mask_or(getmask(a), getmask(b)) + if m is nomask: + x = filled(a) + y = filled(b) + d = umath.equal(x, y) + return d.all() + elif fill_value: + x = filled(a) + y = filled(b) + d = umath.equal(x, y) + dm = array(d, mask=m, copy=False) + return dm.filled(True).all(None) + else: + return False + +def allclose (a, b, fill_value=True, rtol=1.e-5, atol=1.e-8): + """ Returns `True` if all elements of `a` and `b` are equal subject to given tolerances. +If `fill_value` is True, masked values are considered equal. +If `fill_value` is False, masked values considered unequal. +The relative error rtol should be positive and << 1.0 +The absolute error `atol` comes into play for those elements of `b` + that are very small or zero; it says how small `a` must be also. + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + x = filled(array(d1, copy=0, mask=m), fill_value).astype(float) + y = filled(array(d2, copy=0, mask=m), 1).astype(float) + d = umath.less_equal(umath.absolute(x-y), atol + rtol * umath.absolute(y)) + return fromnumeric.alltrue(fromnumeric.ravel(d)) + +#.............................................................................. +def asarray(a, dtype=None): + """asarray(data, dtype) = array(data, dtype, copy=0) +Returns `a` as an masked array. +No copy is performed if `a` is already an array. +Subclasses are converted to base class MaskedArray. + """ + return masked_array(a, dtype=dtype, copy=False, keep_mask=True) + +def empty(new_shape, dtype=float): + """empty((d1,...,dn),dtype=float,order='C') +Returns a new array of shape (d1,...,dn) and given type with all its +entries uninitialized. This can be faster than zeros.""" + return numeric.empty(new_shape, dtype).view(MaskedArray) + +def empty_like(a): + """empty_like(a) +Returns an empty (uninitialized) array of the shape and typecode of a. +Note that this does NOT initialize the returned array. +If you require your array to be initialized, you should use zeros_like().""" + return numeric.empty_like(a).view(MaskedArray) + +def ones(new_shape, dtype=float): + """ones(shape, dtype=None) +Returns an array of the given dimensions, initialized to all ones.""" + return numeric.ones(new_shape, dtype).view(MaskedArray) + +def zeros(new_shape, dtype=float): + """zeros(new_shape, dtype=None) +Returns an array of the given dimensions, initialized to all zeros.""" + return numeric.zeros(new_shape, dtype).view(MaskedArray) + +#####-------------------------------------------------------------------------- +#---- --- Pickling --- +#####-------------------------------------------------------------------------- +def dump(a,F): + """Pickles the MaskedArray `a` to the file `F`. +`F` can either be the handle of an exiting file, or a string representing a file name. + """ + if not hasattr(F,'readline'): + F = open(F,'w') + return cPickle.dump(a,F) + +def dumps(a): + """Returns a string corresponding to the pickling of the MaskedArray.""" + return cPickle.dumps(a) + +def load(F): + """Wrapper around ``cPickle.load`` which accepts either a file-like object or + a filename.""" + if not hasattr(F, 'readline'): + F = open(F,'r') + return cPickle.load(F) + +def loads(strg): + "Loads a pickle from the current string.""" + return cPickle.loads(strg) + + +################################################################################ + +if __name__ == '__main__': + from testutils import assert_equal, assert_almost_equal + if 1: + narray = numpy.array + pi = numpy.pi + x = narray([1.,1.,1.,-2., pi/2., 4., 5., -10., 10., 1., 2., 3.]) + y = narray([5.,0.,3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + a10 = 10. + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 ,0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + + # + if 1: + n = [0,0,1,0,1] + m = make_mask(n) + m2 = make_mask(m) + assert(m is m2) + m3 = make_mask(m, copy=1) + assert(m is not m3) + + x1 = numpy.arange(5) + y1 = array(x1, mask=m) + #assert( y1._data is x1) + assert_equal(y1._data.__array_interface__, x1.__array_interface__) + assert( allequal(x1,y1.raw_data())) + #assert( y1.mask is m) + assert_equal(y1._mask.__array_interface__, m.__array_interface__) + + y1a = array(y1) + #assert( y1a.raw_data() is y1.raw_data()) + assert( y1a._data.__array_interface__ == y1._data.__array_interface__) + assert( y1a.mask is y1.mask) + + y2 = array(x1, mask=m) + #assert( y2.raw_data() is x1) + assert (y2._data.__array_interface__ == x1.__array_interface__) + #assert( y2.mask is m) + assert (y2._mask.__array_interface__ == m.__array_interface__) + assert( y2[2] is masked) + y2[2] = 9 + assert( y2[2] is not masked) + #assert( y2.mask is not m) + assert (y2._mask.__array_interface__ != m.__array_interface__) + assert( allequal(y2.mask, 0)) \ No newline at end of file Added: trunk/scipy/sandbox/maskedarray/bench.py =================================================================== --- trunk/scipy/sandbox/maskedarray/bench.py 2007-09-26 19:48:46 UTC (rev 3370) +++ trunk/scipy/sandbox/maskedarray/bench.py 2007-09-27 03:34:44 UTC (rev 3371) @@ -0,0 +1,199 @@ +#! python + +import timeit +#import IPython.ipapi +#ip = IPython.ipapi.get() +#from IPython import ipmagic +import numpy +import maskedarray +from maskedarray import filled +from maskedarray.testutils import assert_equal + + +#####--------------------------------------------------------------------------- +#---- --- Global variables --- +#####--------------------------------------------------------------------------- + +# Small arrays .................................. +xs = numpy.random.uniform(-1,1,6).reshape(2,3) +ys = numpy.random.uniform(-1,1,6).reshape(2,3) +zs = xs + 1j * ys +m1 = [[True, False, False], [False, False, True]] +m2 = [[True, False, True], [False, False, True]] +nmxs = numpy.ma.array(xs, mask=m1) +nmys = numpy.ma.array(ys, mask=m2) +nmzs = numpy.ma.array(zs, mask=m1) +mmxs = maskedarray.array(xs, mask=m1) +mmys = maskedarray.array(ys, mask=m2) +mmzs = maskedarray.array(zs, mask=m1) +# Big arrays .................................... +xl = numpy.random.uniform(-1,1,100*100).reshape(100,100) +yl = numpy.random.uniform(-1,1,100*100).reshape(100,100) +zl = xl + 1j * yl +maskx = xl > 0.8 +masky = yl < -0.8 +nmxl = numpy.ma.array(xl, mask=maskx) +nmyl = numpy.ma.array(yl, mask=masky) +nmzl = numpy.ma.array(zl, mask=maskx) +mmxl = maskedarray.array(xl, mask=maskx, shrink=True) +mmyl = maskedarray.array(yl, mask=masky, shrink=True) +mmzl = maskedarray.array(zl, mask=maskx, shrink=True) + +#####--------------------------------------------------------------------------- +#---- --- Functions --- +#####--------------------------------------------------------------------------- + +def timer(s, v='', nloop=500, nrep=3): + units = ["s", "ms", "\xb5s", "ns"] + scaling = [1, 1e3, 1e6, 1e9] + print "%s : %-50s : " % (v,s), + varnames = ["%ss,nm%ss,mm%ss,%sl,nm%sl,mm%sl" % tuple(x*6) for x in 'xyz'] + setup = 'from __main__ import numpy, maskedarray, %s' % ','.join(varnames) + Timer = timeit.Timer(stmt=s, setup=setup) + best = min(Timer.repeat(nrep, nloop)) / nloop + if best > 0.0: + order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3) + else: + order = 3 + print "%d loops, best of %d: %.*g %s per loop" % (nloop, nrep, + 3, + best * scaling[order], + units[order]) +# ip.magic('timeit -n%i %s' % (nloop,s)) + + + +def compare_functions_1v(func, nloop=500, test=True, + xs=xs, nmxs=nmxs, mmxs=mmxs, + xl=xl, nmxl=nmxl, mmxl=mmxl): + funcname = func.__name__ + print "-"*50 + print "%s on small arrays" % funcname + if test: + assert_equal(filled(eval("numpy.ma.%s(nmxs)" % funcname),0), + filled(eval("maskedarray.%s(mmxs)" % funcname),0)) + for (module, data) in zip(("numpy", "numpy.ma","maskedarray","maskedarray._nfcore"), + ("xs","nmxs","mmxs","mmxs")): + timer("%(module)s.%(funcname)s(%(data)s)" % locals()) + # + print "%s on large arrays" % funcname + if test: + assert_equal(filled(eval("numpy.ma.%s(nmxl)" % funcname),0), + filled(eval("maskedarray.%s(mmxl)" % funcname),0)) + for (module, data) in zip(("numpy", "numpy.ma","maskedarray","maskedarray._nfcore"), + ("xl","nmxl","mmxl","mmxl")): + timer("%(module)s.%(funcname)s(%(data)s)" % locals()) + return + +def compare_methods(methodname, args, vars='x', nloop=500, test=True, + xs=xs, nmxs=nmxs, mmxs=mmxs, + xl=xl, nmxl=nmxl, mmxl=mmxl): + print "-"*50 + print "%s on small arrays" % methodname + if test: + assert_equal(filled(eval("nm%ss.%s(%s)" % (vars,methodname,args)),0), + filled(eval("mm%ss.%s(%s)" % (vars,methodname,args)),0)) + for (data, ver) in zip(["nm%ss" % vars, "mm%ss" % vars], ('numpy.ma ','maskedarray')): + timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop) + # + print "%s on large arrays" % methodname + if test: + assert_equal(filled(eval("nm%sl.%s(%s)" % (vars,methodname,args)),0), + filled(eval("mm%sl.%s(%s)" % (vars,methodname,args)),0)) + for (data, ver) in zip(["nm%sl" % vars, "mm%sl" % vars], ('numpy.ma ','maskedarray')): + timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop) + return + +def compare_functions_2v(func, nloop=500, test=True, + xs=xs, nmxs=nmxs, mmxs=mmxs, + ys=ys, nmys=nmys, mmys=mmys, + xl=xl, nmxl=nmxl, mmxl=mmxl, + yl=yl, nmyl=nmyl, mmyl=mmyl): + funcname = func.__name__ + print "-"*50 + print "%s on small arrays" % funcname + if test: + assert_equal(filled(eval("numpy.ma.%s(nmxs,nmys)" % funcname),0), + filled(eval("maskedarray.%s(mmxs,mmys)" % funcname),0)) + for (module, data) in zip(("numpy", "numpy.ma","maskedarray","maskedarray._nfcore"), + ("xs,ys","nmxs,nmys","mmxs,mmys","mmxs,mmys")): + timer("%(module)s.%(funcname)s(%(data)s)" % locals()) + # + print "%s on large arrays" % funcname + if test: + assert_equal(filled(eval("numpy.ma.%s(nmxl, nmyl)" % funcname),0), + filled(eval("maskedarray.%s(mmxl, mmyl)" % funcname),0)) + for (module, data) in zip(("numpy", "numpy.ma","maskedarray","maskedarray._nfcore"), + ("xl,yl","nmxl,nmyl","mmxl,mmyl","mmxl,mmyl")): + timer("%(module)s.%(funcname)s(%(data)s)" % locals()) + return + + +############################################################################### + + +################################################################################ +if __name__ == '__main__': +# # Small arrays .................................. +# xs = numpy.random.uniform(-1,1,6).reshape(2,3) +# ys = numpy.random.uniform(-1,1,6).reshape(2,3) +# zs = xs + 1j * ys +# m1 = [[True, False, False], [False, False, True]] +# m2 = [[True, False, True], [False, False, True]] +# nmxs = numpy.ma.array(xs, mask=m1) +# nmys = numpy.ma.array(ys, mask=m2) +# nmzs = numpy.ma.array(zs, mask=m1) +# mmxs = maskedarray.array(xs, mask=m1) +# mmys = maskedarray.array(ys, mask=m2) +# mmzs = maskedarray.array(zs, mask=m1) +# # Big arrays .................................... +# xl = numpy.random.uniform(-1,1,100*100).reshape(100,100) +# yl = numpy.random.uniform(-1,1,100*100).reshape(100,100) +# zl = xl + 1j * yl +# maskx = xl > 0.8 +# masky = yl < -0.8 +# nmxl = numpy.ma.array(xl, mask=maskx) +# nmyl = numpy.ma.array(yl, mask=masky) +# nmzl = numpy.ma.array(zl, mask=maskx) +# mmxl = maskedarray.array(xl, mask=maskx, shrink=True) +# mmyl = maskedarray.array(yl, mask=masky, shrink=True) +# mmzl = maskedarray.array(zl, mask=maskx, shrink=True) +# + compare_functions_1v(numpy.sin) + compare_functions_1v(numpy.log) + compare_functions_1v(numpy.sqrt) + #.................................................................... + compare_functions_2v(numpy.multiply) + compare_functions_2v(numpy.divide) + compare_functions_2v(numpy.power) + #.................................................................... + compare_methods('ravel','', nloop=1000) + compare_methods('conjugate','','z', nloop=1000) + compare_methods('transpose','', nloop=1000) + compare_methods('compressed','', nloop=1000) + compare_methods('__getitem__','0', nloop=1000) + compare_methods('__getitem__','(0,0)', nloop=1000) + compare_methods('__getitem__','[0,-1]', nloop=1000) + compare_methods('__setitem__','0, 17', nloop=1000, test=False) + compare_methods('__setitem__','(0,0), 17', nloop=1000, test=False) + #.................................................................... + print "-"*50 + print "__setitem__ on small arrays" + timer('nmxs.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ',nloop=10000) + timer('mmxs.__setitem__((-1,0),maskedarray.masked)', 'maskedarray',nloop=10000) + print "-"*50 + print "__setitem__ on large arrays" + timer('nmxl.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ',nloop=10000) + timer('mmxl.__setitem__((-1,0),maskedarray.masked)', 'maskedarray',nloop=10000) + #.................................................................... + print "-"*50 + print "where on small arrays" + assert_equal(eval("numpy.ma.where(nmxs>2,nmxs,nmys)"), + eval("maskedarray.where(mmxs>2, mmxs,mmys)")) + timer('numpy.ma.where(nmxs>2,nmxs,nmys)', 'numpy.ma ',nloop=1000) + timer('maskedarray.where(mmxs>2, mmxs,mmys)', 'maskedarray',nloop=1000) + print "-"*50 + print "where on large arrays" + timer('numpy.ma.where(nmxl>2,nmxl,nmyl)', 'numpy.ma ',nloop=100) + timer('maskedarray.where(mmxl>2, mmxl,mmyl)', 'maskedarray',nloop=100) + \ No newline at end of file Modified: trunk/scipy/sandbox/maskedarray/core.py =================================================================== --- trunk/scipy/sandbox/maskedarray/core.py 2007-09-26 19:48:46 UTC (rev 3370) +++ trunk/scipy/sandbox/maskedarray/core.py 2007-09-27 03:34:44 UTC (rev 3371) @@ -27,13 +27,13 @@ 'amax', 'amin', 'anom', 'anomalies', 'any', 'arange', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin', 'argsort', 'around', - 'array', 'asarray', + 'array', 'asarray','asanyarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'ceil', 'choose', 'compressed', 'concatenate', 'conjugate', 'cos', 'cosh', 'count', 'default_fill_value', 'diagonal', 'divide', 'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp', - 'fabs', 'fmod', 'filled', 'floor', 'floor_divide', + 'fabs', 'fmod', 'filled', 'floor', 'floor_divide','fix_invalid', 'getmask', 'getmaskarray', 'greater', 'greater_equal', 'hypot', 'ids', 'inner', 'innerproduct', 'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', @@ -71,6 +71,7 @@ import numpy.core.numerictypes as ntypes from numpy import bool_, dtype, typecodes, amax, amin, ndarray from numpy import expand_dims as n_expand_dims +from numpy import array as narray import warnings @@ -80,9 +81,8 @@ divide_tolerance = 1.e-35 numpy.seterr(all='ignore') -# TODO: There's still a problem with N.add.reduce not working... -# TODO: ...neither does N.add.accumulate + #####-------------------------------------------------------------------------- #---- --- Exceptions --- #####-------------------------------------------------------------------------- @@ -118,7 +118,6 @@ max_filler.update([(numpy.float128,-numpy.inf)]) min_filler.update([(numpy.float128, numpy.inf)]) - def default_fill_value(obj): "Calculates the default fill value for an object `obj`." if hasattr(obj,'dtype'): @@ -198,26 +197,23 @@ return t1 return None -#................................................ +#####-------------------------------------------------------------------------- def filled(a, value = None): - """Returns `a` as an array with masked data replaced by `value`. -If `value` is `None` or the special element `masked`, `get_fill_value(a)` -is used instead. + """Returns a as an array with masked data replaced by value. +If value is None, get_fill_value(a) is used instead. -If `a` is already a contiguous numeric array, `a` itself is returned. - -`filled(a)` can be used to be sure that the result is numeric when passing -an object a to other software ignorant of MA, in particular to numpy itself. +If a is already a ndarray, a itself is returned. """ if hasattr(a, 'filled'): return a.filled(value) elif isinstance(a, ndarray): # and a.flags['CONTIGUOUS']: return a elif isinstance(a, dict): - return numeric.array(a, 'O') + return narray(a, 'O') else: - return numeric.array(a) + return narray(a) +#####-------------------------------------------------------------------------- def get_masked_subclass(*arrays): """Returns the youngest subclass of MaskedArray from a list of arrays, or MaskedArray. In case of siblings, the first takes over.""" @@ -237,21 +233,47 @@ rcls = cls return rcls -def get_data(a, copy=False, subok=True): - """Return the ._data part of a (if any), or a as a ndarray.""" - if hasattr(a,'_data'): - if copy: - if subok: - return a._data.copy() - return a._data.view(ndarray).copy() - elif subok: - return a._data - return a._data.view(ndarray) - return numpy.ndarray(a, copy=copy, subok=subok) +#####-------------------------------------------------------------------------- +def get_data(a, subok=True): + """Returns the _data part of a (if any), or a as a ndarray. + +:Parameters: + a : ndarray + A ndarray or a subclass of. + subok : boolean *[True]* + Whether to force a to a 'pure' ndarray (False) or to return a subclass + of ndarray if approriate (True). + """ + data = getattr(a, '_data', numpy.array(a, subok=subok)) + if not subok: + return data.view(ndarray) + return data +getdata = get_data +def fix_invalid(a, copy=False, fill_value=None): + """Returns (a copy of) a where invalid data (nan/inf) are masked and replaced + by fill_value. + If fill_value is None, a.fill_value is used instead. + +:Parameters: + a : ndarray + A ndarray or a subclass of. + copy : boolean *[False]* + Whether to use a copy of a (True) or to fix a in place (False). + fill_value : var *[None]* + Value used for fixing invalid data. If None, use a default based on the + datatype. + """ + a = masked_array(a, copy=copy, subok=True) + invalid = (numpy.isnan(a._data) | numpy.isinf(a._data)) + a._mask |= invalid + if fill_value is None: + fill_value = a.fill_value + a._data[invalid] = fill_value + return a + + - - #####-------------------------------------------------------------------------- #---- --- Ufuncs --- #####-------------------------------------------------------------------------- @@ -269,7 +291,7 @@ self.b = b def __call__ (self, x): - "Execute the call behavior." + "Executes the call behavior." return umath.logical_or(umath.greater (x, self.b), umath.less(x, self.a)) #............................ @@ -280,11 +302,11 @@ "domain_tan(eps) = true where abs(cos(x)) < eps)" self.eps = eps def __call__ (self, x): - "Execute the call behavior." + "Executes the call behavior." return umath.less(umath.absolute(umath.cos(x)), self.eps) #............................ class domain_safe_divide: - """defines a domain for safe division.""" + """Defines a domain for safe division.""" def __init__ (self, tolerance=divide_tolerance): self.tolerance = tolerance def __call__ (self, a, b): @@ -297,7 +319,7 @@ self.critical_value = critical_value def __call__ (self, x): - "Execute the call behavior." + "Executes the call behavior." return umath.less_equal(x, self.critical_value) #............................ class domain_greater_equal: @@ -307,17 +329,18 @@ self.critical_value = critical_value def __call__ (self, x): - "Execute the call behavior." + "Executes the call behavior." return umath.less(x, self.critical_value) + #.............................................................................. class masked_unary_operation: """Defines masked version of unary operations, where invalid values are pre-masked. :IVariables: - - `f` : function. - - `fill` : Default filling value *[0]*. - - `domain` : Default domain *[None]*. + f : function. + fill : Default filling value *[0]*. + domain : Default domain *[None]*. """ def __init__ (self, mufunc, fill=0, domain=None): """ masked_unary_operation(aufunc, fill=0, domain=None) @@ -334,16 +357,19 @@ ufunc_fills[mufunc] = fill # def __call__ (self, a, *args, **kwargs): - "Execute the call behavior." -# numeric tries to return scalars rather than arrays when given scalars. + "Executes the call behavior." m = getmask(a) - d1 = filled(a, self.fill) + d1 = get_data(a) if self.domain is not None: - m = mask_or(m, numeric.asarray(self.domain(d1))) + dm = narray(self.domain(d1), copy=False) + m = mask_or(m, narray(self.domain(d1))) + # The following two lines control the domain filling methods. + d1 = d1.copy() + numpy.putmask(d1, dm, self.fill) # Take care of the masked singletong first ... - if m.ndim == 0 and m: + if not m.ndim and m: return masked - # Get the result.... + # Get the result.............................. if isinstance(a, MaskedArray): result = self.f(d1, *args, **kwargs).view(type(a)) else: @@ -355,16 +381,17 @@ # def __str__ (self): return "Masked version of %s. [Invalid values are masked]" % str(self.f) + #.............................................................................. class masked_binary_operation: """Defines masked version of binary operations, where invalid values are pre-masked. :IVariables: - - `f` : function. - - `fillx` : Default filling value for first array*[0]*. - - `filly` : Default filling value for second array*[0]*. - - `domain` : Default domain *[None]*. + f : function. + fillx : Default filling value for the first argument *[0]*. + filly : Default filling value for the second argument *[0]*. + domain : Default domain *[None]*. """ def __init__ (self, mbfunc, fillx=0, filly=0): """abfunc(fillx, filly) must be defined. @@ -381,15 +408,14 @@ def __call__ (self, a, b, *args, **kwargs): "Execute the call behavior." m = mask_or(getmask(a), getmask(b)) - if (not m.ndim) and m: + (d1, d2) = (get_data(a), get_data(b)) + result = self.f(d1, d2, *args, **kwargs).view(get_masked_subclass(a,b)) + if result.size > 1: + if m is not nomask: + result._mask = make_mask_none(result.shape) + result._mask.flat = m + elif m: return masked - d1 = filled(a, self.fillx) - d2 = filled(b, self.filly) -# CHECK : Do we really need to fill the arguments ? Pro'ly not -# result = self.f(a, b, *args, **kwargs).view(get_masked_subclass(a,b)) - result = self.f(d1, d2, *args, **kwargs).view(get_masked_subclass(a,b)) - if result.ndim > 0: - result._mask = m return result # def reduce (self, target, axis=0, dtype=None): @@ -409,9 +435,7 @@ return self.f.reduce(t, axis).view(tclass) t = t.view(tclass) t._mask = m - # XXX: "or t.dtype" below is a workaround for what appears - # XXX: to be a bug in reduce. - tr = self.f.reduce(filled(t, self.filly), axis, dtype=dtype or t.dtype) + tr = self.f.reduce(getdata(t), axis, dtype=dtype or t.dtype) mr = umath.logical_and.reduce(m, axis) tr = tr.view(tclass) if mr.ndim > 0: @@ -434,7 +458,8 @@ if (not m.ndim) and m: return masked rcls = get_masked_subclass(a,b) - d = self.f.outer(filled(a, self.fillx), filled(b, self.filly)).view(rcls) +# d = self.f.outer(filled(a, self.fillx), filled(b, self.filly)).view(rcls) + d = self.f.outer(getdata(a), getdata(b)).view(rcls) if d.ndim > 0: d._mask = m return d @@ -450,6 +475,7 @@ def __str__ (self): return "Masked version of " + str(self.f) + #.............................................................................. class domained_binary_operation: """Defines binary operations that have a domain, like divide. @@ -458,10 +484,10 @@ They have no reduce, outer or accumulate. :IVariables: - - `f` : function. - - `fillx` : Default filling value for first array*[0]*. - - `filly` : Default filling value for second array*[0]*. - - `domain` : Default domain *[None]*. + f : function. + domain : Default domain. + fillx : Default filling value for the first argument *[0]*. + filly : Default filling value for the second argument *[0]*. """ def __init__ (self, dbfunc, domain, fillx=0, filly=0): """abfunc(fillx, filly) must be defined. @@ -480,13 +506,14 @@ "Execute the call behavior." ma = getmask(a) mb = getmask(b) - d1 = filled(a, self.fillx) - d2 = filled(b, self.filly) - t = numeric.asarray(self.domain(d1, d2)) - - if fromnumeric.sometrue(t, None): - d2 = numeric.where(t, self.filly, d2) + d1 = getdata(a) + d2 = get_data(b) + t = narray(self.domain(d1, d2), copy=False) + if t.any(None): mb = mask_or(mb, t) + # The following two lines control the domain filling + d2 = d2.copy() + numpy.putmask(d2, t, self.filly) m = mask_or(ma, mb) if (not m.ndim) and m: return masked @@ -517,7 +544,7 @@ ceil = masked_unary_operation(umath.ceil) around = masked_unary_operation(fromnumeric.round_) logical_not = masked_unary_operation(umath.logical_not) -# Domained unary ufuncs +# Domained unary ufuncs ....................................................... sqrt = masked_unary_operation(umath.sqrt, 0.0, domain_greater_equal(0.0)) log = masked_unary_operation(umath.log, 1.0, domain_greater(0.0)) log10 = masked_unary_operation(umath.log10, 1.0, domain_greater(0.0)) @@ -529,7 +556,7 @@ arccosh = masked_unary_operation(umath.arccosh, 1.0, domain_greater_equal(1.0)) arctanh = masked_unary_operation(umath.arctanh, 0.0, domain_check_interval(-1.0+1e-15, 1.0-1e-15)) -# Binary ufuncs +# Binary ufuncs ............................................................... add = masked_binary_operation(umath.add) subtract = masked_binary_operation(umath.subtract) multiply = masked_binary_operation(umath.multiply, 1, 1) @@ -555,7 +582,7 @@ bitwise_or = masked_binary_operation(umath.bitwise_or) bitwise_xor = masked_binary_operation(umath.bitwise_xor) hypot = masked_binary_operation(umath.hypot) -# Domained binary ufuncs +# Domained binary ufuncs ...................................................... divide = domained_binary_operation(umath.divide, domain_safe_divide(), 0, 1) true_divide = domained_binary_operation(umath.true_divide, domain_safe_divide(), 0, 1) @@ -569,27 +596,22 @@ #####-------------------------------------------------------------------------- #---- --- Mask creation functions --- #####-------------------------------------------------------------------------- -def getmask(a): - """Returns the mask of `a`, if any, or `nomask`. -Returns `nomask` if `a` is not a masked array. -To get an array for sure use getmaskarray.""" - if hasattr(a, "_mask"): - return a._mask - else: - return nomask +def get_mask(a): + """Returns the mask of a, if any, or nomask. +To get a full array of booleans of the same shape as a, use getmaskarray.""" + return getattr(a, '_mask', nomask) +getmask = get_mask def getmaskarray(a): - """Returns the mask of `a`, if any. -Otherwise, returns an array of `False`, with the same shape as `a`. + """Returns the mask of a, if any, or an array of the shape of a, full of False. """ m = getmask(a) if m is nomask: - return make_mask_none(fromnumeric.shape(a)) - else: - return m + m = make_mask_none(fromnumeric.shape(a)) + return m def is_mask(m): - """Returns `True` if `m` is a legal mask. + """Returns True if m is a legal mask. Does not check contents, only type. """ try: @@ -597,78 +619,93 @@ except AttributeError: return False # -def make_mask(m, copy=False, small_mask=True, flag=None): - """make_mask(m, copy=0, small_mask=0) -Returns `m` as a mask, creating a copy if necessary or requested. -The function can accept any sequence of integers or `nomask`. +def make_mask(m, copy=False, shrink=True, flag=None): + """make_mask(m, copy=0, shrink=0) +Returns m as a mask, creating a copy if necessary or requested. +The function can accept any sequence of integers or nomask. Does not check that contents must be 0s and 1s. -If `small_mask=True`, returns `nomask` if `m` contains no true elements. :Parameters: - - `m` (ndarray) : Mask. - - `copy` (boolean, *[False]*) : Returns a copy of `m` if true. - - `small_mask` (boolean, *[False]*): Flattens mask to `nomask` if `m` is all false. + m : ndarray + Potential mask. + copy : boolean *[False]* + Whether to return a copy of m. + shrink : boolean *[True]* + Whether to shrink m to nomask if all its values are False. """ if flag is not None: - warnings.warn("The flag 'flag' is now called 'small_mask'!", + warnings.warn("The flag 'flag' is now called 'shrink'!", DeprecationWarning) - small_mask = flag + shrink = flag if m is nomask: return nomask elif isinstance(m, ndarray): m = filled(m, True) if m.dtype.type is MaskType: if copy: - result = numeric.array(m, dtype=MaskType, copy=copy) + result = narray(m, dtype=MaskType, copy=copy) else: result = m else: - result = numeric.array(m, dtype=MaskType) + result = narray(m, dtype=MaskType) else: - result = numeric.array(filled(m, True), dtype=MaskType) + result = narray(filled(m, True), dtype=MaskType) # Bas les masques ! - if small_mask and not result.any(): + if shrink and not result.any(): return nomask else: return result def make_mask_none(s): - "Returns a mask of shape `s`, filled with `False`." + """Returns a mask of shape s, filled with False. + +:Parameters: + s : tuple + A tuple indicating the shape of the final mask. + """ result = numeric.zeros(s, dtype=MaskType) return result -def mask_or (m1, m2, copy=False, small_mask=True): - """Returns the combination of two masks `m1` and `m2`. -The masks are combined with the `logical_or` operator, treating `nomask` as false. +def mask_or (m1, m2, copy=False, shrink=True): + """Returns the combination of two masks m1 and m2. +The masks are combined with the *logical_or* operator, treating nomask as False. The result may equal m1 or m2 if the other is nomask. :Parameters: - - `m` (ndarray) : Mask. - - `copy` (boolean, *[False]*) : Returns a copy of `m` if true. - - `small_mask` (boolean, *[False]*): Flattens mask to `nomask` if `m` is all false. + m1 : ndarray + First mask. + m2 : ndarray + Second mask + copy : boolean *[False]* + Whether to return a copy. + shrink : boolean *[True]* + Whether to shrink m to nomask if all its values are False. """ if m1 is nomask: - return make_mask(m2, copy=copy, small_mask=small_mask) + return make_mask(m2, copy=copy, shrink=shrink) if m2 is nomask: - return make_mask(m1, copy=copy, small_mask=small_mask) + return make_mask(m1, copy=copy, shrink=shrink) if m1 is m2 and is_mask(m1): return m1 - return make_mask(umath.logical_or(m1, m2), copy=copy, small_mask=small_mask) + return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) #####-------------------------------------------------------------------------- #--- --- Masking functions --- #####-------------------------------------------------------------------------- def masked_where(condition, a, copy=True): - """Returns `x` as an array masked where `condition` is true. -Masked values of `x` or `condition` are kept. + """Returns a as an array masked where condition is true. +Masked values of a or condition are kept. :Parameters: - - `condition` (ndarray) : Masking condition. - - `x` (ndarray) : Array to mask. - - `copy` (boolean, *[False]*) : Returns a copy of `m` if true. + condition : ndarray + Masking condition. + a : ndarray + Array to mask. + copy : boolean *[True]* + Whether to return a copy of a. """ cond = filled(condition,1) - a = numeric.array(a, copy=copy, subok=True) + a = narray(a, copy=copy, subok=True) if hasattr(a, '_mask'): cond = mask_or(cond, a._mask) cls = type(a) @@ -678,29 +715,29 @@ result._mask = cond return result -def masked_greater(x, value, copy=1): - "Shortcut to `masked_where`, with ``condition = (x > value)``." +def masked_greater(x, value, copy=True): + "Shortcut to masked_where, with condition = (x > value)." return masked_where(greater(x, value), x, copy=copy) -def masked_greater_equal(x, value, copy=1): - "Shortcut to `masked_where`, with ``condition = (x >= value)``." +def masked_greater_equal(x, value, copy=True): + "Shortcut to masked_where, with condition = (x >= value)." return masked_where(greater_equal(x, value), x, copy=copy) def masked_less(x, value, copy=True): - "Shortcut to `masked_where`, with ``condition = (x < value)``." + "Shortcut to masked_where, with condition = (x < value)." return masked_where(less(x, value), x, copy=copy) def masked_less_equal(x, value, copy=True): - "Shortcut to `masked_where`, with ``condition = (x <= value)``." + "Shortcut to masked_where, with condition = (x <= value)." return masked_where(less_equal(x, value), x, copy=copy) def masked_not_equal(x, value, copy=True): - "Shortcut to `masked_where`, with ``condition = (x != value)``." + "Shortcut to masked_where, with condition = (x != value)." return masked_where((x != value), x, copy=copy) # def masked_equal(x, value, copy=True): - """Shortcut to `masked_where`, with ``condition = (x == value)``. + """Shortcut to masked_where, with condition = (x == value). For floating point, consider `masked_values(x, value)` instead. """ return masked_where((x == value), x, copy=copy) @@ -710,9 +747,9 @@ # return array(d, mask=m, copy=copy) def masked_inside(x, v1, v2, copy=True): - """Shortcut to `masked_where`, where `condition` is True for x inside -the interval `[v1,v2]` ``(v1 <= x <= v2)``. -The boundaries `v1` and `v2` can be given in either order. + """Shortcut to masked_where, where condition is True for x inside +the interval [v1,v2] (v1 <= x <= v2). +The boundaries v1 and v2 can be given in either order. """ if v2 < v1: (v1, v2) = (v2, v1) @@ -721,9 +758,9 @@ return masked_where(condition, x, copy=copy) def masked_outside(x, v1, v2, copy=True): - """Shortcut to `masked_where`, where `condition` is True for x outside -the interval `[v1,v2]` ``(x < v1)|(x > v2)``. -The boundaries `v1` and `v2` can be given in either order. + """Shortcut to masked_where, where condition is True for x outside +the interval [v1,v2] (x < v1)|(x > v2). +The boundaries v1 and v2 can be given in either order. """ if v2 < v1: (v1, v2) = (v2, v1) @@ -733,46 +770,64 @@ # def masked_object(x, value, copy=True): - """Masks the array `x` where the data are exactly equal to `value`. -This function is suitable only for `object` arrays: for floating point, -please use `masked_values` instead. + """Masks the array x where the data are exactly equal to value. +This function is suitable only for object arrays: for floating point, +please use masked_values instead. The mask is set to `nomask` if posible. - -:parameter copy (Boolean, *[True]*): Returns a copy of `x` if true. """ + """ if isMaskedArray(x): condition = umath.equal(x._data, value) mask = x._mask else: condition = umath.equal(fromnumeric.asarray(x), value) mask = nomask - mask = mask_or(mask, make_mask(condition, small_mask=True)) + mask = mask_or(mask, make_mask(condition, shrink=True)) return masked_array(x, mask=mask, copy=copy, fill_value=value) def masked_values(x, value, rtol=1.e-5, atol=1.e-8, copy=True): - """Masks the array `x` where the data are approximately equal to `value` -(that is, ``abs(x - value) <= atol+rtol*abs(value)``). -Suitable only for floating points. For integers, please use `masked_equal`. -The mask is set to `nomask` if posible. + """Masks the array x where the data are approximately equal to value +(abs(x - value) <= atol+rtol*abs(value)). +Suitable only for floating points. For integers, please use masked_equal. +The mask is set to nomask if posible. :Parameters: - - `rtol` (Float, *[1e-5]*): Tolerance parameter. - - `atol` (Float, *[1e-8]*): Tolerance parameter. - - `copy` (boolean, *[False]*) : Returns a copy of `x` if True. + x : ndarray + Array to fill. + value : float + Masking value. + rtol : float *[1e-5]* + Tolerance parameter. + atol : float, *[1e-8]* + Tolerance parameter. + copy : boolean *[True]* + Whether to return a copy of x. """ abs = umath.absolute xnew = filled(x, value) if issubclass(xnew.dtype.type, numeric.floating): condition = umath.less_equal(abs(xnew-value), atol+rtol*abs(value)) - try: - mask = x._mask - except AttributeError: - mask = nomask + mask = getattr(x, '_mask', nomask) else: condition = umath.equal(xnew, value) mask = nomask - mask = mask_or(mask, make_mask(condition, small_mask=True)) + mask = mask_or(mask, make_mask(condition, shrink=True)) return masked_array(xnew, mask=mask, copy=copy, fill_value=value) +def masked_invalid(a, copy=True): + """Masks the array for invalid values (nans or infs). + Any preexisting mask is conserved.""" + a = narray(a, copy=copy, subok=True) + condition = (numpy.isnan(a) | numpy.isinf(a)) + if hasattr(a, '_mask'): + condition = mask_or(condition, a._mask) + cls = type(a) + else: + cls = MaskedArray + result = a.view(cls) + result._mask = cond + return result + + #####-------------------------------------------------------------------------- #---- --- Printing options --- #####-------------------------------------------------------------------------- @@ -795,9 +850,9 @@ "Is the use of the display value enabled?" return self._enabled - def enable(self, small_mask=1): - "Set the enabling small_mask to `small_mask`." - self._enabled = small_mask + def enable(self, shrink=1): + "Set the enabling shrink to `shrink`." + self._enabled = shrink def __str__ (self): return str(self._display) @@ -810,90 +865,23 @@ #####-------------------------------------------------------------------------- #---- --- MaskedArray class --- #####-------------------------------------------------------------------------- -##def _getoptions(a_out, a_in): -## "Copies standards options of a_in to a_out." -## for att in ['] -#class _mathmethod(object): -# """Defines a wrapper for arithmetic methods. -#Instead of directly calling a ufunc, the corresponding method of the `array._data` -#object is called instead. -# """ -# def __init__ (self, methodname, fill_self=0, fill_other=0, domain=None): -# """ -#:Parameters: -# - `methodname` (String) : Method name. -# - `fill_self` (Float *[0]*) : Fill value for the instance. -# - `fill_other` (Float *[0]*) : Fill value for the target. -# - `domain` (Domain object *[None]*) : Domain of non-validity. -# """ -# self.methodname = methodname -# self.fill_self = fill_self -# self.fill_other = fill_other -# self.domain = domain -# self.obj = None -# self.__doc__ = self.getdoc() -# # -# def getdoc(self): -# "Returns the doc of the function (from the doc of the method)." -# try: -# return getattr(MaskedArray, self.methodname).__doc__ -# except: -# return getattr(ndarray, self.methodname).__doc__ -# # -# def __get__(self, obj, objtype=None): -# self.obj = obj -# return self -# # -# def __call__ (self, other, *args): -# "Execute the call behavior." -# instance = self.obj -# m_self = instance._mask -# m_other = getmask(other) -# base = instance.filled(self.fill_self) -# target = filled(other, self.fill_other) -# if self.domain is not None: -# # We need to force the domain to a ndarray only. -# if self.fill_other > self.fill_self: -# domain = self.domain(base, target) -# else: -# domain = self.domain(target, base) -# if domain.any(): -# #If `other` is a subclass of ndarray, `filled` must have the -# # same subclass, else we'll lose some info. -# #The easiest then is to fill `target` instead of creating -# # a pure ndarray. -# #Oh, and we better make a copy! -# if isinstance(other, ndarray): -# # We don't want to modify other: let's copy target, then -# target = target.copy() -# target[fromnumeric.asarray(domain)] = self.fill_other -# else: -# target = numeric.where(fromnumeric.asarray(domain), -# self.fill_other, target) -# m_other = mask_or(m_other, domain) -# m = mask_or(m_self, m_other) -# method = getattr(base, self.methodname) -# result = method(target, *args).view(type(instance)) -# try: -# result._mask = m -# except AttributeError: -# if m: -# result = masked -# return result + #............................................................................... class _arraymethod(object): """Defines a wrapper for basic array methods. -Upon call, returns a masked array, where the new `_data` array is the output -of the corresponding method called on the original `_data`. +Upon call, returns a masked array, where the new _data array is the output +of the corresponding method called on the original _data. -If `onmask` is True, the new mask is the output of the method calld on the initial mask. -If `onmask` is False, the new mask is just a reference to the initial mask. +If onmask is True, the new mask is the output of the method called on the initial mask. +If onmask is False, the new mask is just a reference to the initial mask. -:Parameters: - `funcname` : String +:IVariables: + _name : String Name of the function to apply on data. - `onmask` : Boolean *[True]* + _onmask : Boolean *[True]* Whether the mask must be processed also (True) or left alone (False). + obj : Object + The object calling the arraymethod """ def __init__(self, funcname, onmask=True): self._name = funcname @@ -905,16 +893,8 @@ "Returns the doc of the function (from the doc of the method)." methdoc = getattr(ndarray, self._name, None) methdoc = getattr(numpy, self._name, methdoc) -# methdoc = getattr(MaskedArray, self._name, methdoc) if methdoc is not None: return methdoc.__doc__ -# try: -# return getattr(MaskedArray, self._name).__doc__ -# except: -# try: -# return getattr(numpy, self._name).__doc__ -# except: -# return getattr(ndarray, self._name).__doc # def __get__(self, obj, objtype=None): self.obj = obj @@ -926,10 +906,11 @@ mask = self.obj._mask cls = type(self.obj) result = getattr(data, methodname)(*args, **params).view(cls) - result._smallmask = self.obj._smallmask + result._update_from(self.obj) + #result._shrinkmask = self.obj._shrinkmask if result.ndim: if not self._onmask: - result._mask = mask + result.__setmask__(mask) elif mask is not nomask: result.__setmask__(getattr(mask, methodname)(*args, **params)) else: @@ -969,54 +950,60 @@ Masked values of True exclude the corresponding element from any computation. Construction: - x = array(data, dtype=None, copy=True, order=False, - mask = nomask, fill_value=None, small_mask=True) + x = MaskedArray(data, mask=nomask, dtype=None, copy=True, fill_value=None, + mask = nomask, fill_value=None, shrink=True) -If copy=False, every effort is made not to copy the data: -If `data` is a MaskedArray, and argument mask=nomask, then the candidate data -is `data._data` and the mask used is `data._mask`. -If `data` is a numeric array, it is used as the candidate raw data. -If `dtype` is not None and is different from data.dtype.char then a data copy is required. -Otherwise, the candidate is used. - -If a data copy is required, the raw (unmasked) data stored is the result of: -numeric.array(data, dtype=dtype.char, copy=copy) - -If `mask` is `nomask` there are no masked values. -Otherwise mask must be convertible to an array of booleans with the same shape as x. -If `small_mask` is True, a mask consisting of zeros (False) only is compressed to `nomask`. -Otherwise, the mask is not compressed. - -fill_value is used to fill in masked values when necessary, such as when -printing and in method/function filled(). -The fill_value is not used for computation within this module. +:Parameters: + data : var + Input data. + mask : sequence *[nomask]* + Mask. + Must be convertible to an array of booleans with the same shape as data: + True indicates a masked (eg., invalid) data. + dtype : dtype *[None]* + Data type of the output. If None, the type of the data argument is used. + If dtype is not None and different from data.dtype, a copy is performed. + copy : boolean *[False]* + Whether to copy the input data (True), or to use a reference instead. + fill_value : var *[None]* + Value used to fill in the masked values when necessary. If None, a default + based on the datatype is used. + keep_mask : boolean *[True]* + Whether to combine mask with the mask of the input data, if any (True), + or to use only mask for the output (False). + hard_mask : boolean *[False]* + Whether to use a hard mask or not. With a hard mask, masked values cannot + be unmasked. + subok : boolean *[True]* + Whether to return a subclass of MaskedArray (if possible) or a plain + MaskedArray. """ - __array_priority__ = 10.1 + + __array_priority__ = 15 _defaultmask = nomask _defaulthardmask = False _baseclass = numeric.ndarray + def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, fill_value=None, - keep_mask=True, small_mask=True, hard_mask=False, flag=None, + keep_mask=True, hard_mask=False, flag=None, subok=True, **options): """array(data, dtype=None, copy=True, mask=nomask, fill_value=None) If `data` is already a ndarray, its dtype becomes the default value of dtype. """ if flag is not None: - warnings.warn("The flag 'flag' is now called 'small_mask'!", + warnings.warn("The flag 'flag' is now called 'shrink'!", DeprecationWarning) - small_mask = flag + shrink = flag # Process data............ - _data = numeric.array(data, dtype=dtype, copy=copy, subok=subok) + _data = narray(data, dtype=dtype, copy=copy, subok=True) _baseclass = getattr(data, '_baseclass', type(_data)) _basedict = getattr(data, '_basedict', getattr(data, '__dict__', None)) - if not isinstance(data, MaskedArray): + if not isinstance(data, MaskedArray) or not subok: _data = _data.view(cls) - elif not subok: - _data = data.view(cls) else: _data = _data.view(type(data)) - # Backwards compat ....... + # Backwards compatibility w/ numpy.core.ma ....... if hasattr(data,'_mask') and not isinstance(data, ndarray): _data._mask = data._mask _sharedmask = True @@ -1026,8 +1013,11 @@ _data._mask = nomask if copy: _data._mask = _data._mask.copy() + _data._sharedmask = False + else: + _data._sharedmask = True else: - mask = numeric.array(mask, dtype=MaskType, copy=copy) + mask = narray(mask, dtype=MaskType, copy=copy) if mask.shape != _data.shape: (nd, nm) = (_data.size, mask.size) if nm == 1: @@ -1038,18 +1028,18 @@ msg = "Mask and data not compatible: data size is %i, "+\ "mask size is %i." raise MAError, msg % (nd, nm) + copy = True if _data._mask is nomask: _data._mask = mask - _data._sharedmask = True + _data._sharedmask = not copy else: - # Make a copy of the mask to avoid propagation - _data._sharedmask = False if not keep_mask: _data._mask = mask + _data._sharedmask = not copy else: - _data._mask = umath.logical_or(mask, _data._mask) + _data._mask = umath.logical_or(mask, _data._mask) + _data._sharedmask = False - # Update fill_value....... if fill_value is None: _data._fill_value = getattr(data, '_fill_value', @@ -1058,66 +1048,80 @@ _data._fill_value = fill_value # Process extra options .. _data._hardmask = hard_mask - _data._smallmask = small_mask _data._baseclass = _baseclass _data._basedict = _basedict return _data + # + def _update_from(self, obj): + """Copies some attributes of obj to self. + """ + self._hardmask = getattr(obj, '_hardmask', self._defaulthardmask) + self._sharedmask = getattr(obj, '_sharedmask', False) + if obj is not None: + self._baseclass = getattr(obj, '_baseclass', type(obj)) + else: + self._baseclass = ndarray + self._fill_value = getattr(obj, '_fill_value', None) + return #........................ def __array_finalize__(self,obj): """Finalizes the masked array. """ - # Finalize mask ............... + # Get main attributes ......... self._mask = getattr(obj, '_mask', nomask) - if self._mask is not nomask: - self._mask.shape = self.shape - # Get the remaining options ... - self._hardmask = getattr(obj, '_hardmask', self._defaulthardmask) - self._smallmask = getattr(obj, '_smallmask', True) - self._sharedmask = True - self._baseclass = getattr(obj, '_baseclass', type(obj)) - self._fill_value = getattr(obj, '_fill_value', None) + self._update_from(obj) # Update special attributes ... self._basedict = getattr(obj, '_basedict', getattr(obj, '__dict__', None)) if self._basedict is not None: self.__dict__.update(self._basedict) + # Finalize the mask ........... + if self._mask is not nomask: + self._mask.shape = self.shape return #.................................. def __array_wrap__(self, obj, context=None): """Special hook for ufuncs. Wraps the numpy array and sets the mask according to context. """ - #TODO : Should we check for type result result = obj.view(type(self)) + result._update_from(self) #.......... if context is not None: result._mask = result._mask.copy() (func, args, _) = context m = reduce(mask_or, [getmask(arg) for arg in args]) - # Get domain mask + # Get the domain mask................ domain = ufunc_domain.get(func, None) if domain is not None: if len(args) > 2: d = reduce(domain, args) else: d = domain(*args) + # Fill the result where the domain is wrong + try: + # Binary domain: take the last value + fill_value = ufunc_fills[func][-1] + except TypeError: + # Unary domain: just use this one + fill_value = ufunc_fills[func] + except KeyError: + # Domain not recognized, use fill_value instead + fill_value = self.fill_value + result = result.copy() + numpy.putmask(result, d, fill_value) + # Update the mask if m is nomask: if d is not nomask: m = d else: m |= d - if not m.ndim and m: - if m: - if result.shape == (): - return masked - result._mask = numeric.ones(result.shape, bool_) + # Make sure the mask has the proper size + if result.shape == () and m: + return masked else: result._mask = m + result._sharedmask = False #.... -# result._mask = m - result._fill_value = self._fill_value - result._hardmask = self._hardmask - result._smallmask = self._smallmask - result._baseclass = self._baseclass return result #............................................. def __getitem__(self, indx): @@ -1128,20 +1132,26 @@ # if getmask(indx) is not nomask: # msg = "Masked arrays must be filled before they can be used as indices!" # raise IndexError, msg - # super() can't work here if the underlying data is a matrix... - dout = (self._data).__getitem__(indx) + dout = ndarray.__getitem__(self.view(ndarray), indx) m = self._mask - if hasattr(dout, 'shape') and len(dout.shape) > 0: - # Not a scalar: make sure that dout is a MA + if not getattr(dout,'ndim', False): + # Just a scalar............ + if m is not nomask and m[indx]: + return masked + else: + # Force dout to MA ........ dout = dout.view(type(self)) - dout._smallmask = self._smallmask - dout._hardmask = self._hardmask - dout._fill_value = self._fill_value + # Inherit attributes from self + dout._update_from(self) + # Update the mask if needed if m is not nomask: - # use _set_mask to take care of the shape - dout.__setmask__(m[indx]) - elif m is not nomask and m[indx]: - return masked + dout._mask = ndarray.__getitem__(m, indx).reshape(dout.shape) +# Note: Don't try to check for m.any(), that'll take too long... +# mask = ndarray.__getitem__(m, indx).reshape(dout.shape) +# if self._shrinkmask and not m.any(): +# dout._mask = nomask +# else: +# dout._mask = mask return dout #........................ def __setitem__(self, indx, value): @@ -1157,26 +1167,22 @@ if value is masked: m = self._mask if m is nomask: - m = make_mask_none(self.shape) -# else: -# m = m.copy() + m = numpy.zeros(self.shape, dtype=MaskType) m[indx] = True - self.__setmask__(m) + self._mask = m + self._sharedmask = False return #.... - dval = numeric.asarray(value).astype(self.dtype) + dval = getdata(value).astype(self.dtype) valmask = getmask(value) if self._mask is nomask: if valmask is not nomask: - self._mask = make_mask_none(self.shape) + self._mask = numpy.zeros(self.shape, dtype=MaskType) self._mask[indx] = valmask elif not self._hardmask: - _mask = self._mask.copy() - if valmask is nomask: - _mask[indx] = False - else: - _mask[indx] = valmask - self._set_mask(_mask) + # Unshare the mask if necessary to avoid propagation + self.unshare_mask() + self._mask[indx] = valmask elif hasattr(indx, 'dtype') and (indx.dtype==bool_): indx = indx * umath.logical_not(self._mask) else: @@ -1189,7 +1195,6 @@ dval = dindx self._mask[indx] = mindx # Set data .......... - #dval = filled(value).astype(self.dtype) ndarray.__setitem__(self._data,indx,dval) #............................................ def __getslice__(self, i, j): @@ -1205,29 +1210,33 @@ self.__setitem__(slice(i,j), value) #............................................ def __setmask__(self, mask, copy=False): - newmask = make_mask(mask, copy=copy, small_mask=self._smallmask) -# self.unshare_mask() + """Sets the mask.""" + if mask is not nomask: + mask = narray(mask, copy=copy, dtype=MaskType) +# if self._shrinkmask and not mask.any(): +# mask = nomask if self._mask is nomask: - self._mask = newmask + self._mask = mask elif self._hardmask: - if newmask is not nomask: - self._mask.__ior__(newmask) + if mask is not nomask: + self._mask.__ior__(mask) else: # This one is tricky: if we set the mask that way, we may break the # propagation. But if we don't, we end up with a mask full of False # and a test on nomask fails... - if newmask is nomask: + if mask is nomask: self._mask = nomask else: - self._mask.flat = newmask + self.unshare_mask() + self._mask.flat = mask if self._mask.shape: self._mask = numeric.reshape(self._mask, self.shape) _set_mask = __setmask__ - + #.... def _get_mask(self): """Returns the current mask.""" return self._mask - +# return self._mask.reshape(self.shape) mask = property(fget=_get_mask, fset=__setmask__, doc="Mask") #............................................ def harden_mask(self): @@ -1243,19 +1252,24 @@ if self._sharedmask: self._mask = self._mask.copy() self._sharedmask = False - #............................................ def _get_data(self): "Returns the current data (as a view of the original underlying data)>" return self.view(self._baseclass) _data = property(fget=_get_data) + data = property(fget=_get_data) + + def raw_data(self): + """Returns the `_data` part of the MaskedArray. +You should really use `data` instead...""" + return self._data #............................................ def _get_flat(self): - """Calculates the flat value. - """ + "Returns a flat iterator." return flatiter(self) # def _set_flat (self, value): + "Sets a flattened version of self to value." "x.flat = value" y = self.ravel() y[:] = value @@ -1269,8 +1283,8 @@ return self._fill_value def set_fill_value(self, value=None): - """Sets the filling value to `value`. -If None, uses the default, based on the data type.""" + """Sets the filling value to value. +If None, uses a default based on the data type.""" if value is None: value = default_fill_value(self) self._fill_value = value @@ -1279,11 +1293,18 @@ doc="Filling value") def filled(self, fill_value=None): - """Returns an array of the same class as `_data`, - with masked values filled with `fill_value`. -Subclassing is preserved. - -If `fill_value` is None, uses self.fill_value. + """Returns a copy of self._data, where masked values are filled with + fill_value. If fill_value is None, self.fill_value is used instead. + Subclassing is preserved. + Note : the result is NOT a MaskedArray ! + +Examples +-------- +>>> x = array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) +>>> x.filled() +array([1,2,-999,4,-999]) +>>> type(x.filled()) + """ m = self._mask if m is nomask or not m.any(): @@ -1298,9 +1319,8 @@ result = self._data.copy() try: numpy.putmask(result, m, fill_value) - #result[m] = fill_value except (TypeError, AttributeError): - fill_value = numeric.array(fill_value, dtype=object) + fill_value = narray(fill_value, dtype=object) d = result.astype(object) result = fromnumeric.choose(m, (d, fill_value)) except IndexError: @@ -1308,20 +1328,21 @@ if self._data.shape: raise elif m: - result = numeric.array(fill_value, dtype=self.dtype) + result = narray(fill_value, dtype=self.dtype) else: result = self._data return result def compressed(self): - "A 1-D array of all the non-masked data." - d = self.ravel() - if self._mask is nomask: - return d - elif not self._smallmask and not self._mask.any(): - return d - else: - return d[numeric.logical_not(d._mask)] + """Returns a 1-D array of all the non-masked data.""" + data = ndarray.ravel(self._data).view(type(self)) + data._update_from(self) + if self._mask is not nomask: + data = data[numpy.logical_not(ndarray.ravel(self._mask))] +# if not self._shrinkmask: +# data._mask = numpy.zeros(data.shape, dtype=MaskType) + return data + #............................................ def __str__(self): """x.__str__() <==> str(x) @@ -1384,9 +1405,34 @@ 'fill': str(self.fill_value), } #............................................ + def __add__(self, other): + "Adds other to self, and returns a new masked array." + return add(self, other) + # + def __sub__(self, other): + "Subtracts other to self, and returns a new masked array." + return subtract(self, other) + # + def __mul__(self, other): + "Multiplies other to self, and returns a new masked array." + return multiply(self, other) + # + def __div__(self, other): + "Divides other to self, and returns a new masked array." + return divide(self, other) + # + def __truediv__(self, other): + "Divides other to self, and returns a new masked array." + return true_divide(self, other) + # + def __floordiv__(self, other): + "Divides other to self, and returns a new masked array." + return floor_divide(self, other) + + #............................................ def __iadd__(self, other): "Adds other to self in place." - ndarray.__iadd__(self._data,other) + ndarray.__iadd__(self._data, getdata(other)) m = getmask(other) if self._mask is nomask: self._mask = m @@ -1396,7 +1442,7 @@ #.... def __isub__(self, other): "Subtracts other from self in place." - ndarray.__isub__(self._data,other) + ndarray.__isub__(self._data, getdata(other)) m = getmask(other) if self._mask is nomask: self._mask = m @@ -1406,7 +1452,7 @@ #.... def __imul__(self, other): "Multiplies self by other in place." - ndarray.__imul__(self._data,other) + ndarray.__imul__(self._data, getdata(other)) m = getmask(other) if self._mask is nomask: self._mask = m @@ -1416,10 +1462,15 @@ #.... def __idiv__(self, other): "Divides self by other in place." - dom_mask = domain_safe_divide().__call__(self, filled(other,1)) + other_data = getdata(other) + dom_mask = domain_safe_divide().__call__(self._data, other_data) other_mask = getmask(other) new_mask = mask_or(other_mask, dom_mask) - ndarray.__idiv__(self._data, other) + # The following 3 lines control the domain filling + if dom_mask.any(): + other_data = other_data.copy() + numpy.putmask(other_data, dom_mask, 1) + ndarray.__idiv__(self._data, other_data) self._mask = mask_or(self._mask, new_mask) return self #............................................ @@ -1440,7 +1491,7 @@ def count(self, axis=None): """Counts the non-masked elements of the array along a given axis, and returns a masked array where the mask is True where all data are masked. -If `axis` is None, counts all the non-masked elements, and returns either a +If axis is None, counts all the non-masked elements, and returns either a scalar or the masked singleton.""" m = self._mask s = self.shape @@ -1457,17 +1508,30 @@ t = list(s) del t[axis] return numeric.ones(t) * n - n1 = fromnumeric.size(m, axis) + n1 = numpy.size(m, axis) n2 = m.astype(int_).sum(axis) if axis is None: return (n1-n2) else: return masked_array(n1 - n2) #............................................ + flatten = _arraymethod('flatten') +# ravel = _arraymethod('ravel') + def ravel(self): + """Returns a 1D version of self, as a view.""" + r = ndarray.ravel(self._data).view(type(self)) + r._update_from(self) + if self._mask is not nomask: + r._mask = ndarray.ravel(self._mask).reshape(r.shape) + else: + r._mask = nomask + return r + repeat = _arraymethod('repeat') + # def reshape (self, *s): """Reshapes the array to shape s. -Returns a new masked array. -If you want to modify the shape in place, please use `a.shape = s`""" + Returns a new masked array. + If you want to modify the shape in place, please use a.shape = s""" result = self._data.reshape(*s).view(type(self)) result.__dict__.update(self.__dict__) if result._mask is not nomask: @@ -1475,12 +1539,10 @@ result._mask.shape = result.shape return result # - repeat = _arraymethod('repeat') - # def resize(self, newshape, refcheck=True, order=False): """Attempts to modify size and shape of self inplace. - The array must own its own memory and not be referenced by other arrays. - Returns None. + The array must own its own memory and not be referenced by other arrays. + Returns None. """ try: self._data.resize(newshape, refcheck, order) @@ -1492,22 +1554,19 @@ "Use the resize function.") return None # - flatten = _arraymethod('flatten') - # def put(self, indices, values, mode='raise'): """Sets storage-indexed locations to corresponding values. -a.put(values, indices, mode) sets a.flat[n] = values[n] for each n in indices. -`values` can be scalar or an array shorter than indices, and it will be repeated, -if necessary. -If `values` has some masked values, the initial mask is updated in consequence, -else the corresponding values are unmasked. + a.put(values, indices, mode) sets a.flat[n] = values[n] for each n in indices. + If values is shorter than indices then it will repeat. + If values has some masked values, the initial mask is updated in consequence, + else the corresponding values are unmasked. """ m = self._mask # Hard mask: Get rid of the values/indices that fall on masked data if self._hardmask and self._mask is not nomask: mask = self._mask[indices] - indices = numeric.asarray(indices) - values = numeric.asanyarray(values) + indices = narray(indices, copy=False) + values = narray(values, copy=False, subok=True) values.resize(indices.shape) indices = indices[~mask] values = values[~mask] @@ -1522,17 +1581,17 @@ m.put(indices, False, mode=mode) else: m.put(indices, values._mask, mode=mode) - m = make_mask(m, copy=False, small_mask=True) + m = make_mask(m, copy=False, shrink=True) self._mask = m #............................................ def ids (self): - """Return the address of the data and mask areas.""" + """Returns the addresses of the data and mask areas.""" return (self.ctypes.data, self._mask.ctypes.data) #............................................ def all(self, axis=None, out=None): """a.all(axis) returns True if all entries along the axis are True. Returns False otherwise. If axis is None, uses the flatten array. - Masked data are considered as True during computation. + Masked values are considered as True during computation. Outputs a masked array, where the mask is True if all data are masked along the axis. Note: the out argument is not really operational... """ @@ -1554,19 +1613,19 @@ return d def nonzero(self): - """a.nonzero() returns a tuple of arrays + """a.nonzero() returns the indices of the elements of a that are not + zero nor masked, as a tuple of arrays. - Returns a tuple of arrays, one for each dimension of a, - containing the indices of the non-zero elements in that - dimension. The corresponding non-zero values can be obtained - with + There are as many tuples as dimensions of a, each tuple contains the indices + of the non-zero elements in that dimension. The corresponding non-zero values + can be obtained with a[a.nonzero()]. To group the indices by element, rather than dimension, use transpose(a.nonzero()) instead. The result of this is always a 2d array, with a row for each non-zero element.""" - return numeric.asarray(self.filled(0)).nonzero() + return narray(self.filled(0), copy=False).nonzero() #............................................ def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): """a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) @@ -1580,13 +1639,19 @@ return result.astype(dtype) else: D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) - return D.astype(dtype).sum(axis=None) + return D.astype(dtype).filled(0).sum(axis=None) #............................................ def sum(self, axis=None, dtype=None): """a.sum(axis=None, dtype=None) -Sums the array `a` over the given axis `axis`. -Masked values are set to 0. -If `axis` is None, applies to a flattened version of the array. + Sums the array a over the given axis. + Masked elements are set to 0. + +:Parameters: + axis : integer *[None]* + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + dtype : dtype *[None]* + Datatype for the intermediary computation. """ if self._mask is nomask: mask = nomask @@ -1601,9 +1666,15 @@ def cumsum(self, axis=None, dtype=None): """a.cumprod(axis=None, dtype=None) -Returns the cumulative sum of the elements of array `a` along the given axis `axis`. -Masked values are set to 0. -If `axis` is None, applies to a flattened version of the array. + Returns the cumulative sum of the elements of a along the given axis. + Masked values are set to 0. + +:Parameters: + axis : integer *[None]* + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + dtype : dtype *[None]* + Datatype for the intermediary computation. """ result = self.filled(0).cumsum(axis=axis, dtype=dtype).view(type(self)) result.__setmask__(self.mask) @@ -1611,9 +1682,15 @@ def prod(self, axis=None, dtype=None): """a.prod(axis=None, dtype=None) -Returns the product of the elements of array `a` along the given axis `axis`. -Masked elements are set to 1. -If `axis` is None, applies to a flattened version of the array. + Returns the product of the elements of a along the given axis. + Masked elements are set to 1. + +:Parameters: + axis : integer *[None]* + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + dtype : dtype *[None]* + Datatype for the intermediary computation. """ if self._mask is nomask: mask = nomask @@ -1629,9 +1706,15 @@ def cumprod(self, axis=None, dtype=None): """a.cumprod(axis=None, dtype=None) -Returns the cumulative product of ethe lements of array `a` along the given axis `axis`. -Masked values are set to 1. -If `axis` is None, applies to a flattened version of the array. + Returns the cumulative product of the elements of a along the given axis. + Masked values are set to 1. + +:Parameters: + axis : integer *[None]* + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + dtype : dtype *[None]* + Datatype for the intermediary computation. """ result = self.filled(1).cumprod(axis=axis, dtype=dtype).view(type(self)) result.__setmask__(self.mask) @@ -1640,15 +1723,16 @@ def mean(self, axis=None, dtype=None): """a.mean(axis=None, dtype=None) - Averages the array over the given axis. If the axis is None, - averages over all dimensions of the array. Equivalent to + Averages the array over the given axis. Equivalent to - a.sum(axis, dtype) / size(a, axis). - - The optional dtype argument is the data type for intermediate - calculations in the sum. - - Returns a masked array, of the same class as a. + a.sum(axis, dtype) / size(a, axis). + +:Parameters: + axis : integer *[None]* + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + dtype : dtype *[None]* + Datatype for the intermediary computation. """ if self._mask is nomask: return super(MaskedArray, self).mean(axis=axis, dtype=dtype) @@ -1660,6 +1744,13 @@ def anom(self, axis=None, dtype=None): """a.anom(axis=None, dtype=None) Returns the anomalies, or deviation from the average. + +:Parameters: + axis : integer *[None]* + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + dtype : dtype *[None]* + Datatype for the intermediary computation. """ m = self.mean(axis, dtype) if not axis: @@ -1670,9 +1761,21 @@ def var(self, axis=None, dtype=None): """a.var(axis=None, dtype=None) Returns the variance, a measure of the spread of a distribution. - The variance is the average of the squared deviations from the mean, i.e. var = mean((x - x.mean())**2). + +:Parameters: + axis : integer *[None]* + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + dtype : dtype *[None]* + Datatype for the intermediary computation. + + +Notes +----- +The value returned is a biased estimate of the true variance. +For the more standard unbiased estimate, use varu. """ if self._mask is nomask: # TODO: Do we keep super, or var _data and take a view ? @@ -1681,9 +1784,10 @@ cnt = self.count(axis=axis) danom = self.anom(axis=axis, dtype=dtype) danom *= danom - dvar = numeric.array(danom.sum(axis) / cnt).view(type(self)) + dvar = narray(danom.sum(axis) / cnt).view(type(self)) if axis is not None: dvar._mask = mask_or(self._mask.all(axis), (cnt==1)) + dvar._update_from(self) return dvar def std(self, axis=None, dtype=None): @@ -1691,29 +1795,45 @@ Returns the standard deviation, a measure of the spread of a distribution. The standard deviation is the square root of the average of the squared -deviations from the mean, i.e. std = sqrt(mean((x - x.mean())**2)). +deviations from the mean, i.e. std = sqrt(mean((x - x.mean())**2)). + +:Parameters: + axis : integer *[None]* + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + dtype : dtype *[None]* + Datatype for the intermediary computation. + + +Notes +----- +The value returned is a biased estimate of the true standard deviation. +For the more standard unbiased estimate, use stdu. """ dvar = self.var(axis,dtype) if axis is not None or dvar is not masked: dvar = sqrt(dvar) return dvar + #............................................ def argsort(self, axis=None, fill_value=None, kind='quicksort', order=None): - """Returns an array of indices that sort 'a' along the specified axis. - Masked values are filled beforehand to `fill_value`. - If `fill_value` is None, uses the default for the data type. + """Returns a ndarray of indices that sort 'a' along the specified axis. + Masked values are filled beforehand to fill_value. Returns a numpy array. -:Keywords: - `axis` : Integer *[None]* - Axis to be indirectly sorted (default -1) - `kind` : String *['quicksort']* +:Parameters: + axis : integer *[None]* + Axis to be indirectly sorted. + fill_value : var *[None]* + Value used to fill in the masked values. + If None, use self.fill_value instead. + kind : String *['quicksort']* Sorting algorithm (default 'quicksort') Possible values: 'quicksort', 'mergesort', or 'heapsort' - Returns: array of indices that sort 'a' along the specified axis. - +Notes: +------ This method executes an indirect sort along the given axis using the algorithm specified by the kind keyword. It returns an array of indices of the same shape as 'a' that index data along the given axis in sorted order. @@ -1741,17 +1861,16 @@ return d.argsort(axis=axis, kind=kind, order=order) #........................ def argmin(self, axis=None, fill_value=None): - """Returns a ndarray of indices for the minimum values of `a` along the + """Returns a ndarray of indices for the minimum values of a along the specified axis. - Masked values are treated as if they had the value `fill_value`. - If `fill_value` is None, the default for the data type is used. - Returns a numpy array. + Masked values are treated as if they had the value fill_value. -:Keywords: - `axis` : Integer *[None]* - Axis to be indirectly sorted (default -1) - `fill_value` : var *[None]* - Default filling value. If None, uses the minimum default for the data type. +:Parameters: + axis : integer *[None]* + Axis to be indirectly sorted. + fill_value : var *[None]* + Value used to fill in the masked values. + If None, use the the output of minimum_fill_value(). """ if fill_value is None: fill_value = minimum_fill_value(self) @@ -1762,14 +1881,13 @@ """Returns the array of indices for the maximum values of `a` along the specified axis. Masked values are treated as if they had the value `fill_value`. - If `fill_value` is None, the maximum default for the data type is used. - Returns a numpy array. -:Keywords: - `axis` : Integer *[None]* - Axis to be indirectly sorted (default -1) - `fill_value` : var *[None]* - Default filling value. If None, uses the data type default. +:Parameters: + axis : integer *[None]* + Axis to be indirectly sorted. + fill_value : var *[None]* + Value used to fill in the masked values. + If None, use the the output of maximum_fill_value(). """ if fill_value is None: fill_value = maximum_fill_value(self._data) @@ -1778,24 +1896,31 @@ def sort(self, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None): - """ - Sort a along the given axis. + """Sort a along the given axis. - Keyword arguments: +:Parameters: + axis : integer *[-1]* + Axis to be indirectly sorted. + kind : String *['quicksort']* + Sorting algorithm (default 'quicksort') + Possible values: 'quicksort', 'mergesort', or 'heapsort'. + order : var *[None]* + If a has fields defined, then the order keyword can be the field + name to sort on or a list (or tuple) of field names to indicate + the order that fields should be used to define the sort. + fill_value : var *[None]* + Value used to fill in the masked values. + If None, use the the output of minimum_fill_value(). + endwith : boolean *[True]* + Whether missing values (if any) should be forced in the upper indices + (at the end of the array) (True) or lower indices (at the beginning). - axis -- axis to be sorted (default -1) - kind -- sorting algorithm (default 'quicksort') - Possible values: 'quicksort', 'mergesort', or 'heapsort'. - order -- If a has fields defined, then the order keyword can be the - field name to sort on or a list (or tuple) of field names - to indicate the order that fields should be used to define - the sort. - endwith--Boolean flag indicating whether missing values (if any) should - be forced in the upper indices (at the end of the array) or - lower indices (at the beginning). +:Returns: + When used as method, returns None. + When used as a function, returns an array. - Returns: None. - +Notes +----- This method sorts 'a' in place along the given axis using the algorithm specified by the kind keyword. @@ -1832,12 +1957,20 @@ self.flat = tmp_data self._mask.flat = tmp_mask return + #............................................ def min(self, axis=None, fill_value=None): - """Returns the minimum/a along the given axis. -If `axis` is None, applies to the flattened array. Masked values are filled -with `fill_value` during processing. If `fill_value is None, it is set to the -maximum_fill_value corresponding to the data type.""" + """Returns the minimum of a along the given axis. + Masked values are filled with fill_value. + +:Parameters: + axis : integer *[None]* + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + fill_value : var *[None]* + Value used to fill in the masked values. + If None, use the the output of minimum_fill_value(). + """ mask = self._mask # Check all/nothing case ...... if mask is nomask: @@ -1860,9 +1993,16 @@ #........................ def max(self, axis=None, fill_value=None): """Returns the maximum/a along the given axis. -If `axis` is None, applies to the flattened array. Masked values are filled -with `fill_value` during processing. If `fill_value is None, it is set to the -maximum_fill_value corresponding to the data type.""" + Masked values are filled with fill_value. + +:Parameters: + axis : integer *[None]* + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + fill_value : var *[None]* + Value used to fill in the masked values. + If None, use the the output of maximum_fill_value(). + """ mask = self._mask # Check all/nothing case ...... if mask is nomask: @@ -1885,17 +2025,25 @@ #........................ def ptp(self, axis=None, fill_value=None): """Returns the visible data range (max-min) along the given axis. -If the axis is `None`, applies on a flattened array. Masked values are filled -with `fill_value` for processing. If `fill_value` is None, the maximum is uses -the maximum default, the minimum uses the minimum default.""" + +:Parameters: + axis : integer *[None]* + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + fill_value : var *[None]* + Value used to fill in the masked values. + If None, the maximum uses the maximum default, the minimum uses + the minimum default. + """ return self.max(axis, fill_value) - self.min(axis, fill_value) + # Array methods --------------------------------------- - conj = conjugate = _arraymethod('conjugate') +# conj = conjugate = _arraymethod('conjugate') copy = _arraymethod('copy') diagonal = _arraymethod('diagonal') take = _arraymethod('take') - ravel = _arraymethod('ravel') +# ravel = _arraymethod('ravel') transpose = _arraymethod('transpose') T = property(fget=lambda self:self.transpose()) swapaxes = _arraymethod('swapaxes') @@ -1908,7 +2056,7 @@ """Copies the data portion of the array to a hierarchical python list and returns that list. Data items are converted to the nearest compatible Python type. - Masked values are converted to `fill_value`. If `fill_value` is None, the + Masked values are converted to fill_value. If fill_value is None, the corresponding entries in the output list will be None. """ if fill_value is not None: @@ -1931,37 +2079,25 @@ #........................ - def tostring(self, fill_value=None): - """a.tostring(order='C', fill_value=None) -> raw copy of array data as a Python string. + def tostring(self, fill_value=None, order='C'): + """a.tostring(order='C', fill_value=None) + + Returns a copy of array data as a Python string containing the + raw bytes in the array. - Keyword arguments: - order : order of the data item in the copy {"C","F","A"} (default "C") - fill_value : value used in lieu of missing data - - Construct a Python string containing the raw bytes in the array. The order - of the data in arrays with ndim > 1 is specified by the 'order' keyword and - this keyword overrides the order of the array. The - choices are: - +:Parameters: + fill_value : var *[None]* + Value used to fill in the masked values. + If None, uses self.fill_value instead. + order : string *['C']* + Order of the data item in the copy {"C","F","A"}. "C" -- C order (row major) "Fortran" -- Fortran order (column major) "Any" -- Current order of array. None -- Same as "Any" - - Masked data are filled with fill_value. If fill_value is None, the data-type- - dependent default is used.""" - return self.filled(fill_value).tostring() + """ + return self.filled(fill_value).tostring(order=order) #-------------------------------------------- - # Backwards Compatibility. Heck... - @property - def data(self): - """Returns the `_data` part of the MaskedArray.""" - return self._data - def raw_data(self): - """Returns the `_data` part of the MaskedArray. -You should really use `data` instead...""" - return self._data - #-------------------------------------------- # Pickling def __getstate__(self): "Returns the internal state of the masked array, for pickling purposes." @@ -2002,7 +2138,7 @@ in a pickle.""" _data = ndarray.__new__(baseclass, baseshape, basetype) _mask = ndarray.__new__(ndarray, baseshape, 'b1') - return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype, small_mask=False) + return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype, shrink=False) #MaskedArray.__dump__ = dump #MaskedArray.__dumps__ = dumps @@ -2016,22 +2152,24 @@ return isinstance(x, MaskedArray) isarray = isMaskedArray isMA = isMaskedArray #backward compatibility -#masked = MaskedArray(0, int, mask=1) -masked_singleton = MaskedArray(0, dtype=int_, mask=True) +# We define the masked singleton as a float for higher precedence... +masked_singleton = MaskedArray(0, dtype=float_, mask=True) masked = masked_singleton masked_array = MaskedArray + def array(data, dtype=None, copy=False, order=False, mask=nomask, subok=True, - keep_mask=True, small_mask=True, hard_mask=None, fill_value=None): + keep_mask=True, hard_mask=False, fill_value=None): """array(data, dtype=None, copy=True, order=False, mask=nomask, - keep_mask=True, small_mask=True, fill_value=None) + keep_mask=True, shrink=True, fill_value=None) Acts as shortcut to MaskedArray, with options in a different order for convenience. And backwards compatibility... """ #TODO: we should try to put 'order' somwehere return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok, - keep_mask=keep_mask, small_mask=small_mask, - hard_mask=hard_mask, fill_value=fill_value) + keep_mask=keep_mask, hard_mask=hard_mask, + fill_value=fill_value) +array.__doc__ = masked_array.__doc__ def is_masked(x): """Returns whether x has some masked values.""" @@ -2055,7 +2193,7 @@ return where(self.compare(a, b), a, b) #......... def reduce(self, target, axis=None): - """Reduces target along the given axis.""" + "Reduces target along the given axis." m = getmask(target) if axis is not None: kargs = { 'axis' : axis } @@ -2089,6 +2227,7 @@ result = self.ufunc.outer(filled(a), filled(b)) result._mask = m return result + #............................ class _minimum_operation(_extrema_operation): "Object to calculate minima" @@ -2100,6 +2239,7 @@ self.afunc = amin self.compare = less self.fill_value_func = minimum_fill_value + #............................ class _maximum_operation(_extrema_operation): "Object to calculate maxima" @@ -2111,6 +2251,7 @@ self.afunc = amax self.compare = greater self.fill_value_func = maximum_fill_value + #.......................................................... def min(array, axis=None, out=None): """Returns the minima along the given axis. @@ -2121,16 +2262,16 @@ return minimum(array) else: return minimum.reduce(array, axis) +min.__doc__ = MaskedArray.min.__doc__ #............................ def max(obj, axis=None, out=None): - """Returns the maxima along the given axis. -If `axis` is None, applies to the flattened array.""" if out is not None: raise TypeError("Output arrays Unsupported for masked arrays") if axis is None: return maximum(obj) else: return maximum.reduce(obj, axis) +max.__doc__ = MaskedArray.max.__doc__ #............................. def ptp(obj, axis=None): """a.ptp(axis=None) = a.max(axis)-a.min(axis)""" @@ -2138,6 +2279,7 @@ return obj.max(axis)-obj.min(axis) except AttributeError: return max(obj, axis=axis) - min(obj, axis=axis) +ptp.__doc__ = MaskedArray.ptp.__doc__ #####--------------------------------------------------------------------------- @@ -2165,7 +2307,7 @@ #Except that sometimes it doesn't work (try reshape([1,2,3,4],(2,2))) #we end up with a "SystemError: NULL result without error in PyObject_Call" #A dirty trick is then to call the initial numpy function... - method = getattr(fromnumeric.asarray(a), self._methodname) + method = getattr(narray(a, copy=False), self._methodname) try: return method(*args, **params) except SystemError: @@ -2200,93 +2342,40 @@ ma = getmask(a) mb = getmask(b) m = mask_or(ma, mb) - fa = filled(a, 1) - fb = filled(b, 1) + fa = getdata(a) + fb = getdata(b) if fb.dtype.char in typecodes["Integer"]: return masked_array(umath.power(fa, fb), m) - md = make_mask((fa < 0), small_mask=1) + md = make_mask((fa < 0), shrink=True) m = mask_or(m, md) if m is nomask: return masked_array(umath.power(fa, fb)) else: - fa[m] = 1 + fa = fa.copy() + fa[(fa < 0)] = 1 return masked_array(umath.power(fa, fb), m) #.............................................................................. def argsort(a, axis=None, kind='quicksort', order=None, fill_value=None): - """Returns an array of indices that sort 'a' along the specified axis. - Masked values are filled beforehand to `fill_value`. - If `fill_value` is None, uses the default for the data type. - Returns a numpy array. - -:Keywords: - `axis` : Integer *[None]* - Axis to be indirectly sorted (default -1) - `kind` : String *['quicksort']* - Sorting algorithm (default 'quicksort') - Possible values: 'quicksort', 'mergesort', or 'heapsort' - - Returns: array of indices that sort 'a' along the specified axis. - - This method executes an indirect sort along the given axis using the - algorithm specified by the kind keyword. It returns an array of indices of - the same shape as 'a' that index data along the given axis in sorted order. - - The various sorts are characterized by average speed, worst case - performance, need for work space, and whether they are stable. A stable - sort keeps items with the same key in the same relative order. The three - available algorithms have the following properties: - - |------------------------------------------------------| - | kind | speed | worst case | work space | stable| - |------------------------------------------------------| - |'quicksort'| 1 | O(n^2) | 0 | no | - |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | - |'heapsort' | 3 | O(n*log(n)) | 0 | no | - |------------------------------------------------------| - - All the sort algorithms make temporary copies of the data when the sort is not - along the last axis. Consequently, sorts along the last axis are faster and use - less space than sorts along other axis. - """ + "Function version of the eponymous method." if fill_value is None: fill_value = default_fill_value(a) d = filled(a, fill_value) if axis is None: return d.argsort(kind=kind, order=order) return d.argsort(axis, kind=kind, order=order) +argsort.__doc__ = MaskedArray.argsort.__doc__ def argmin(a, axis=None, fill_value=None): - """Returns the array of indices for the minimum values of `a` along the - specified axis. - Masked values are treated as if they had the value `fill_value`. - If `fill_value` is None, the default for the data type is used. - Returns a numpy array. - -:Keywords: - `axis` : Integer *[None]* - Axis to be indirectly sorted (default -1) - `fill_value` : var *[None]* - Default filling value. If None, uses the data type default. - """ + "Function version of the eponymous method." if fill_value is None: fill_value = default_fill_value(a) d = filled(a, fill_value) return d.argmin(axis=axis) +argmin.__doc__ = MaskedArray.argmin.__doc__ def argmax(a, axis=None, fill_value=None): - """Returns the array of indices for the maximum values of `a` along the - specified axis. - Masked values are treated as if they had the value `fill_value`. - If `fill_value` is None, the default for the data type is used. - Returns a numpy array. - -:Keywords: - `axis` : Integer *[None]* - Axis to be indirectly sorted (default -1) - `fill_value` : var *[None]* - Default filling value. If None, uses the data type default. - """ + "Function version of the eponymous method." if fill_value is None: fill_value = default_fill_value(a) try: @@ -2295,49 +2384,11 @@ pass d = filled(a, fill_value) return d.argmax(axis=axis) +argmin.__doc__ = MaskedArray.argmax.__doc__ def sort(a, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None): - """ - Sort a along the given axis. - -Keyword arguments: - -axis -- axis to be sorted (default -1) -kind -- sorting algorithm (default 'quicksort') - Possible values: 'quicksort', 'mergesort', or 'heapsort'. -order -- If a has fields defined, then the order keyword can be the - field name to sort on or a list (or tuple) of field names - to indicate the order that fields should be used to define - the sort. -endwith--Boolean flag indicating whether missing values (if any) should - be forced in the upper indices (at the end of the array) or - lower indices (at the beginning). - -Returns: None. - -This method sorts 'a' in place along the given axis using the algorithm -specified by the kind keyword. - -The various sorts may characterized by average speed, worst case -performance, need for work space, and whether they are stable. A stable -sort keeps items with the same key in the same relative order and is most -useful when used with argsort where the key might differ from the items -being sorted. The three available algorithms have the following properties: - -|------------------------------------------------------| -| kind | speed | worst case | work space | stable| -|------------------------------------------------------| -|'quicksort'| 1 | O(n^2) | 0 | no | -|'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | -|'heapsort' | 3 | O(n*log(n)) | 0 | no | -|------------------------------------------------------| - -All the sort algorithms make temporary copies of the data when the sort is -not along the last axis. Consequently, sorts along the last axis are faster -and use less space than sorts along other axis. - -""" - a = numeric.asanyarray(a) + "Function version of the eponymous method." + a = narray(a, copy=False, subok=True) if fill_value is None: if endwith: filler = minimum_fill_value(a) @@ -2349,37 +2400,42 @@ indx = numpy.indices(a.shape).tolist() indx[axis] = filled(a,filler).argsort(axis=axis,kind=kind,order=order) return a[indx] +sort.__doc__ = MaskedArray.sort.__doc__ + def compressed(x): - """Returns a compressed version of a masked array (or just the array if it - wasn't masked first).""" - if getmask(x) is None: + """Returns a 1-D array of all the non-masked data.""" + if getmask(x) is nomask: return x else: return x.compressed() -def count(a, axis = None): - "Count of the non-masked elements in a, or along a certain axis." - a = masked_array(a) - return a.count(axis) - def concatenate(arrays, axis=0): "Concatenates the arrays along the given axis" - d = numeric.concatenate([filled(a) for a in arrays], axis) + d = numpy.concatenate([getdata(a) for a in arrays], axis) rcls = get_masked_subclass(*arrays) data = d.view(rcls) + # Check whether one of the arrays has a non-empty mask... for x in arrays: if getmask(x) is not nomask: break + return data + # OK, so we have to concatenate the masks + dm = numpy.concatenate([getmaskarray(a) for a in arrays], axis) + shrink = numpy.logical_or.reduce([getattr(a,'_shrinkmask',True) for a in arrays]) + if shrink and not dm.any(): + data._mask = nomask else: - return data - dm = numeric.concatenate([getmaskarray(a) for a in arrays], axis) - dm = make_mask(dm, copy=False, small_mask=True) - data._mask = dm + data._mask = dm.reshape(d.shape) return data +def count(a, axis = None): + "Count of the non-masked elements in a, or along a certain axis." + return masked_array(a, copy=False).count(axis) + + def expand_dims(x,axis): - """Expand the shape of a by including newaxis before given axis.""" + """Expands the shape of a by including newaxis before given axis.""" result = n_expand_dims(x,axis) if isinstance(x, MaskedArray): new_shape = result.shape @@ -2409,6 +2465,7 @@ else: d = umath.right_shift(filled(a, 0), n) return masked_array(d, mask=m) + #...................................... def put(a, indices, values, mode='raise'): """Sets storage-indexed locations to corresponding values. @@ -2417,7 +2474,7 @@ try: return a.put(indices, values, mode=mode) except AttributeError: - return fromnumeric.asarray(a).put(indices, values, mode=mode) + return narray(a, copy=False).put(indices, values, mode=mode) def putmask(a, mask, values): #, mode='raise'): """`putmask(a, mask, v)` results in `a = v` for all places where `mask` is true. @@ -2427,7 +2484,7 @@ try: return a.putmask(values, mask) except AttributeError: - return fromnumeric.asarray(a).putmask(values, mask) + return narray(a, copy=False).putmask(values, mask) def transpose(a,axes=None): """Returns a view of the array with dimensions permuted according to axes. @@ -2437,15 +2494,15 @@ try: return a.transpose(axes) except AttributeError: - return fromnumeric.asarray(a).transpose(axes) + return narray(a, copy=False).transpose(axes) def reshape(a, new_shape): - """Changes the shape of the array `a` to `new_shape`.""" + """Changes the shape of the array a to new_shape.""" #We can't use 'frommethod', it whine about some parameters. Dmmit. try: return a.reshape(new_shape) except AttributeError: - return fromnumeric.asarray(a).reshape(new_shape) + return narray(a, copy=False).reshape(new_shape) def resize(x, new_shape): """resize(a,new_shape) returns a new array with the specified shape. @@ -2456,8 +2513,8 @@ # We can't use _frommethods here, as N.resize is notoriously whiny. m = getmask(x) if m is not nomask: - m = fromnumeric.resize(m, new_shape) - result = fromnumeric.resize(x, new_shape).view(get_masked_subclass(x)) + m = numpy.resize(m, new_shape) + result = numpy.resize(x, new_shape).view(get_masked_subclass(x)) if result.ndim: result._mask = m return result @@ -2467,46 +2524,82 @@ def rank(obj): """Gets the rank of sequence a (the number of dimensions, not a matrix rank) The rank of a scalar is zero.""" - return fromnumeric.rank(filled(obj)) + return fromnumeric.rank(getdata(obj)) # def shape(obj): """Returns the shape of `a` (as a function call which also works on nested sequences). """ - return fromnumeric.shape(filled(obj)) + return fromnumeric.shape(getdata(obj)) # def size(obj, axis=None): """Returns the number of elements in the array along the given axis, or in the sequence if `axis` is None. """ - return fromnumeric.size(filled(obj), axis) + return fromnumeric.size(getdata(obj), axis) #................................................ #####-------------------------------------------------------------------------- #---- --- Extra functions --- #####-------------------------------------------------------------------------- -def where (condition, x, y): - """where(condition, x, y) is x where condition is nonzero, y otherwise. - condition must be convertible to an integer array. - Answer is always the shape of condition. - The type depends on x and y. It is integer if both x and y are - the value masked. +def where (condition, x=None, y=None): + """where(condition | x, y) + Returns a (subclass of) masked array, shaped like condition, where + the elements are x when condition is True, and y otherwise. + condition must be convertible to an integer array. + If neither x nor y are given, returns a tuple of indices where condition is + True (a la condition.nonzero()). """ - fc = filled(not_equal(condition, 0), 0) - xv = filled(x) - xm = getmask(x) - yv = filled(y) - ym = getmask(y) - d = numeric.choose(fc, (yv, xv)) - md = numeric.choose(fc, (ym, xm)) - m = getmask(condition) - m = make_mask(mask_or(m, md), copy=False, small_mask=True) - return masked_array(d, mask=m) + if x is None and y is None: + return filled(condition, 0).nonzero() + elif x is None or y is None: + raise ValueError, "Either bioth or neither x and y should be given." + # Get the condition ............... + fc = filled(condition, 0).astype(bool_) + notfc = numpy.logical_not(fc) + # Get the data ...................................... + xv = getdata(x) + yv = getdata(y) + if x is masked: + ndtype = yv.dtype + xm = numpy.ones(fc.shape, dtype=MaskType) + elif y is masked: + ndtype = xv.dtype + ym = numpy.ones(fc.shape, dtype=MaskType) + else: + ndtype = numpy.max([xv.dtype, yv.dtype]) + xm = getmask(x) + d = numpy.empty(fc.shape, dtype=ndtype).view(MaskedArray) + numpy.putmask(d._data, fc, xv.astype(ndtype)) + numpy.putmask(d._data, notfc, yv.astype(ndtype)) + d._mask = numpy.zeros(fc.shape, dtype=MaskType) + numpy.putmask(d._mask, fc, getmask(x)) + numpy.putmask(d._mask, notfc, getmask(y)) + d._mask |= getmaskarray(condition) + if not d._mask.any(): + d._mask = nomask + return d +# # Get the data as a (subclass of) MaskedArray +# xv = getdata(x) +# yv = getdata(y) +# d = numpy.choose(fc, (yv, xv)).view(MaskedArray) +# # Get the mask .................... +# xm = getmask(x) +# ym = getmask(y) +# d.mask = numpy.choose(fc, (ym, xm)) | getmask(condition) +# # Fix the dtype if one of the values was masked, to prevent an upload to float +# if y is masked: +# ndtype = xv.dtype +# elif x is masked: +# ndtype = yv.dtype +# else: +# ndtype = d.dtype +# return d.astype(ndtype) def choose (indices, t, out=None, mode='raise'): "Returns array shaped like indices with elements chosen from t" #TODO: implement options `out` and `mode`, if possible. def fmask (x): - "Returns the filled array, or True if ``masked``." + "Returns the filled array, or True if masked." if x is masked: return 1 return filled(x) @@ -2521,27 +2614,28 @@ c = filled(indices, 0) masks = [nmask(x) for x in t] a = [fmask(x) for x in t] - d = numeric.choose(c, a) - m = numeric.choose(c, masks) - m = make_mask(mask_or(m, getmask(indices)), copy=0, small_mask=1) + d = numpy.choose(c, a) + m = numpy.choose(c, masks) + m = make_mask(mask_or(m, getmask(indices)), copy=0, shrink=True) return masked_array(d, mask=m) def round_(a, decimals=0, out=None): - """Returns reference to result. Copies a and rounds to 'decimals' places. + """Returns a copy of a rounded to 'decimals' places. - Keyword arguments: - decimals -- number of decimals to round to (default 0). May be negative. - out -- existing array to use for output (default copy of a). +:Parameters: + decimals : integer *[0]* + Number of decimals to round to. May be negative. + out : ndarray + Existing array to use for output (default copy of a). - Return: - Reference to out, where None specifies a copy of the original array a. - - Round to the specified number of decimals. When 'decimals' is negative it - specifies the number of positions to the left of the decimal point. The - real and imaginary parts of complex numbers are rounded separately. +Notes +----- + Rounds to the specified number of decimals. When 'decimals' is negative, + it specifies the number of positions to the left of the decimal point. + The real and imaginary parts of complex numbers are rounded separately. Nothing is done if the array is not of float type and 'decimals' is greater than or equal to 0.""" - result = fromnumeric.round_(filled(a), decimals, out) + result = fromnumeric.round_(getdata(a), decimals, out) if isinstance(a,MaskedArray): result = result.view(type(a)) result._mask = a._mask @@ -2549,29 +2643,25 @@ result = result.view(MaskedArray) return result -def arange(start, stop=None, step=1, dtype=None): - """Just like range() except it returns a array whose type can be specified - by the keyword argument dtype. - """ - return array(numeric.arange(start, stop, step, dtype),mask=nomask) +def arange(stop, start=None, step=1, dtype=None): + "maskedarray version of the numpy function." + return numpy.arange(start, stop, step, dtype).view(MaskedArray) +arange.__doc__ = numpy.arange.__doc__ def inner(a, b): - """inner(a,b) returns the dot product of two arrays, which has - shape a.shape[:-1] + b.shape[:-1] with elements computed by summing the - product of the elements from the last dimensions of a and b. - Masked elements are replace by zeros. - """ + "maskedarray version of the numpy function." fa = filled(a, 0) fb = filled(b, 0) if len(fa.shape) == 0: fa.shape = (1,) if len(fb.shape) == 0: fb.shape = (1,) - return masked_array(numeric.inner(fa, fb)) + return numpy.inner(fa, fb).view(MaskedArray) +inner.__doc__ = numpy.inner.__doc__ + "\nMasked values are replaced by 0." innerproduct = inner def outer(a, b): - """outer(a,b) = {a[i]*b[j]}, has shape (len(a),len(b))""" + "maskedarray version of the numpy function." fa = filled(a, 0).ravel() fb = filled(b, 0).ravel() d = numeric.outer(fa, fb) @@ -2583,22 +2673,22 @@ mb = getmaskarray(b) m = make_mask(1-numeric.outer(1-ma, 1-mb), copy=0) return masked_array(d, mask=m) +outer.__doc__ = numpy.outer.__doc__ + "\nMasked values are replaced by 0." outerproduct = outer def allequal (a, b, fill_value=True): + """Returns True if all entries of a and b are equal, using fill_value + as a truth value where either or both are masked. """ -Returns `True` if all entries of a and b are equal, using -fill_value as a truth value where either or both are masked. - """ m = mask_or(getmask(a), getmask(b)) if m is nomask: - x = filled(a) - y = filled(b) + x = getdata(a) + y = getdata(b) d = umath.equal(x, y) return d.all() elif fill_value: - x = filled(a) - y = filled(b) + x = getdata(a) + y = getdata(b) d = umath.equal(x, y) dm = array(d, mask=m, copy=False) return dm.filled(True).all(None) @@ -2606,16 +2696,16 @@ return False def allclose (a, b, fill_value=True, rtol=1.e-5, atol=1.e-8): - """ Returns `True` if all elements of `a` and `b` are equal subject to given tolerances. -If `fill_value` is True, masked values are considered equal. -If `fill_value` is False, masked values considered unequal. + """ Returns True if all elements of a and b are equal subject to given tolerances. +If fill_value is True, masked values are considered equal. +If fill_value is False, masked values considered unequal. The relative error rtol should be positive and << 1.0 -The absolute error `atol` comes into play for those elements of `b` - that are very small or zero; it says how small `a` must be also. +The absolute error atol comes into play for those elements of b that are very small +or zero; it says how small `a` must be also. """ m = mask_or(getmask(a), getmask(b)) - d1 = filled(a) - d2 = filled(b) + d1 = getdata(a) + d2 = getdata(b) x = filled(array(d1, copy=0, mask=m), fill_value).astype(float) y = filled(array(d2, copy=0, mask=m), 1).astype(float) d = umath.less_equal(umath.absolute(x-y), atol + rtol * umath.absolute(y)) @@ -2623,35 +2713,41 @@ #.............................................................................. def asarray(a, dtype=None): - """asarray(data, dtype) = array(data, dtype, copy=0) -Returns `a` as an masked array. -No copy is performed if `a` is already an array. -Subclasses are converted to base class MaskedArray. + """asarray(data, dtype) = array(data, dtype, copy=0, subok=0) +Returns a as a MaskedArray object. +No copy is performed if a is already an array. +Subclasses are converted to the base class MaskedArray. """ - return masked_array(a, dtype=dtype, copy=False, keep_mask=True) + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False) +def asanyarray(a, dtype=None): + """asanyarray(data, dtype) = array(data, dtype, copy=0, subok=1) +Returns a as an masked array. +No copy is performed if a is already an array. +Subclasses are conserved. + """ + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) + + def empty(new_shape, dtype=float): - """empty((d1,...,dn),dtype=float,order='C') -Returns a new array of shape (d1,...,dn) and given type with all its -entries uninitialized. This can be faster than zeros.""" - return numeric.empty(new_shape, dtype).view(MaskedArray) + "maskedarray version of the numpy function." + return numpy.empty(new_shape, dtype).view(MaskedArray) +empty.__doc__ = numpy.empty.__doc__ def empty_like(a): - """empty_like(a) -Returns an empty (uninitialized) array of the shape and typecode of a. -Note that this does NOT initialize the returned array. -If you require your array to be initialized, you should use zeros_like().""" - return numeric.empty_like(a).view(MaskedArray) + "maskedarray version of the numpy function." + return numpy.empty_like(a).view(MaskedArray) +empty_like.__doc__ = numpy.empty_like.__doc__ def ones(new_shape, dtype=float): - """ones(shape, dtype=None) -Returns an array of the given dimensions, initialized to all ones.""" - return numeric.ones(new_shape, dtype).view(MaskedArray) + "maskedarray version of the numpy function." + return numpy.ones(new_shape, dtype).view(MaskedArray) +ones.__doc__ = numpy.ones.__doc__ def zeros(new_shape, dtype=float): - """zeros(new_shape, dtype=None) -Returns an array of the given dimensions, initialized to all zeros.""" - return numeric.zeros(new_shape, dtype).view(MaskedArray) + "maskedarray version of the numpy function." + return numpy.zeros(new_shape, dtype).view(MaskedArray) +zeros.__doc__ = numpy.zeros.__doc__ #####-------------------------------------------------------------------------- #---- --- Pickling --- @@ -2680,82 +2776,16 @@ return cPickle.loads(strg) -################################################################################ +############################################################################### -if __name__ == '__main__': - from testutils import assert_equal, assert_almost_equal - if 0: - x = arange(10) - assert(x.ctypes.data == x.filled().ctypes.data) - if 0: - a = array([1,2,3,4],mask=[0,0,0,0],small_mask=True) - a[1] = masked - a[1] = 1 - assert(a.ravel()._mask, [0,0,0,0]) - assert(a.compressed(), a) - a[0] = masked - assert(a.compressed()._mask, [0,0,0]) - if 0: - x = array(0, mask=0) - I = x.ctypes.data - J = x.filled().ctypes.data - print (I,J) - x = array([0,0], mask=0) - (I,J) = (x.ctypes.data, x.filled().ctypes.data) - print (I,J) - if 0: - x = array(numpy.arange(12)) - x[[1,-2]] = masked - xlist = x.tolist() - assert(xlist[1] is None) - assert(xlist[-2] is None) - # - x.shape = (3,4) - xlist = x.tolist() - # - assert_equal(xlist[0],[0,None,2,3]) - assert_equal(xlist[1],[4,5,6,7]) - assert_equal(xlist[2],[8,9,None,11]) - - if 0: - xl = numpy.random.rand(100,100) - yl = numpy.random.rand(100,100) - maskx = xl > 0.8 - masky = yl < 0.2 - mxl = array(xl, mask=maskx) - myl = array(yl, mask=masky) - - zz = mxl + myl +#if __name__ == '__main__': + #from maskedarray.testutils import assert_equal, assert_almost_equal - if 0: - print "x is ndarray" - x = array(numpy.random.rand(50,50)) - print "set x._mask" - x[x > 0.8] = masked - print "set y" - y = array(numpy.random.rand(50,50)) - print "set y._mask" - ymask = y._data < 0.2 - print "set y._mask" - y.__setmask__(ymask) - print "add x + y" - z = x + y - - r.__setmask__() - - if 1: - "Check that we don't lose the fill_value" - data = masked_array([1,2,3],fill_value=-999) - series = data[[0,2,1]] - assert_equal(series._fill_value, data._fill_value) - - if 1: - "Check squeeze" - data = masked_array([[1,2,3]]) - assert_equal(data.squeeze(), [1,2,3]) - data = masked_array([[1,2,3]], mask=[[1,1,1]]) - assert_equal(data.squeeze(), [1,2,3]) - assert_equal(data.squeeze()._mask, [1,1,1]) - data = masked_array([[1]], mask=True) - assert(data.squeeze() is masked) - + #xm = array(numpy.random.uniform(-1,1,25)) + #xm[xm>0.5] = masked + #xm.fill_value = -999 + ## + #z = 3//where(xm.mask,0,xm) + #assert_equal(z._mask, numpy.logical_or(xm==0,xm._mask)) + #assert_equal(z._data[xm._mask], 1) + Modified: trunk/scipy/sandbox/maskedarray/extras.py =================================================================== --- trunk/scipy/sandbox/maskedarray/extras.py 2007-09-26 19:48:46 UTC (rev 3370) +++ trunk/scipy/sandbox/maskedarray/extras.py 2007-09-27 03:34:44 UTC (rev 3371) @@ -672,12 +672,7 @@ import numpy as N from maskedarray.testutils import assert_equal if 1: - n = N.arange(1,7) - # - m = [1,0,0,0,0,0] - a = masked_array(n, mask=m).reshape(2,3) - b = masked_array(n, mask=m).reshape(3,2) - c = dot(a,b, True) - assert_equal(c.mask, [[1,1],[1,0]]) - c = dot(a,b,False) - assert_equal(c, N.dot(a.filled(0), b.filled(0))) \ No newline at end of file + b = ones(5) + m = [1,0,0,0,0] + d = masked_array(b,mask=m) + c = mr_[d,0,0,d] \ No newline at end of file Modified: trunk/scipy/sandbox/maskedarray/morestats.py =================================================================== --- trunk/scipy/sandbox/maskedarray/morestats.py 2007-09-26 19:48:46 UTC (rev 3370) +++ trunk/scipy/sandbox/maskedarray/morestats.py 2007-09-27 03:34:44 UTC (rev 3371) @@ -365,6 +365,37 @@ ############################################################################### if __name__ == '__main__': - data = numpy.arange(100).reshape(4,25) -# tmp = hdquantiles(data, prob=[0.25,0.75,0.5], axis=1, var=False) + if 0: + from maskedarray.testutils import assert_almost_equal + data = [0.706560797,0.727229578,0.990399276,0.927065621,0.158953014, + 0.887764025,0.239407086,0.349638551,0.972791145,0.149789972, + 0.936947700,0.132359948,0.046041972,0.641675031,0.945530547, + 0.224218684,0.771450991,0.820257774,0.336458052,0.589113496, + 0.509736129,0.696838829,0.491323573,0.622767425,0.775189248, + 0.641461450,0.118455200,0.773029450,0.319280007,0.752229111, + 0.047841438,0.466295911,0.583850781,0.840581845,0.550086491, + 0.466470062,0.504765074,0.226855960,0.362641207,0.891620942, + 0.127898691,0.490094097,0.044882048,0.041441695,0.317976349, + 0.504135618,0.567353033,0.434617473,0.636243375,0.231803616, + 0.230154113,0.160011327,0.819464108,0.854706985,0.438809221, + 0.487427267,0.786907310,0.408367937,0.405534192,0.250444460, + 0.995309248,0.144389588,0.739947527,0.953543606,0.680051621, + 0.388382017,0.863530727,0.006514031,0.118007779,0.924024803, + 0.384236354,0.893687694,0.626534881,0.473051932,0.750134705, + 0.241843555,0.432947602,0.689538104,0.136934797,0.150206859, + 0.474335206,0.907775349,0.525869295,0.189184225,0.854284286, + 0.831089744,0.251637345,0.587038213,0.254475554,0.237781276, + 0.827928620,0.480283781,0.594514455,0.213641488,0.024194386, + 0.536668589,0.699497811,0.892804071,0.093835427,0.731107772] + # + assert_almost_equal(hdquantiles(data,[0., 1.]), + [0.006514031, 0.995309248]) + hdq = hdquantiles(data,[0.25, 0.5, 0.75]) + assert_almost_equal(hdq, [0.253210762, 0.512847491, 0.762232442,]) + hdq = hdquantiles_sd(data,[0.25, 0.5, 0.75]) + assert_almost_equal(hdq, [0.03786954, 0.03805389, 0.03800152,], 4) + # + data = numpy.array(data).reshape(10,10) + hdq = hdquantiles(data,[0.25,0.5,0.75],axis=0) + Modified: trunk/scipy/sandbox/maskedarray/mrecords.py =================================================================== --- trunk/scipy/sandbox/maskedarray/mrecords.py 2007-09-26 19:48:46 UTC (rev 3370) +++ trunk/scipy/sandbox/maskedarray/mrecords.py 2007-09-27 03:34:44 UTC (rev 3371) @@ -15,6 +15,7 @@ import numpy from numpy import bool_, complex_, float_, int_, str_, object_ +from numpy import array as narray import numpy.core.numeric as numeric import numpy.core.numerictypes as ntypes from numpy.core.defchararray import chararray @@ -27,10 +28,9 @@ _byteorderconv = numpy.core.records._byteorderconv _typestr = ntypes._typestr -import maskedarray as MA -from maskedarray import masked, nomask, mask_or, filled, getmask, getmaskarray, \ - masked_array, make_mask -from maskedarray import MaskedArray +import maskedarray +from maskedarray import MaskedArray, masked, nomask, masked_array,\ + make_mask, mask_or, getmask, getmaskarray, filled from maskedarray.core import default_fill_value, masked_print_option import warnings @@ -194,9 +194,12 @@ if attr in _names: _data = self._data _mask = self._fieldmask - obj = numeric.asarray(_data.__getattribute__(attr)).view(MaskedArray) - obj.__setmask__(_mask.__getattribute__(attr)) - if (obj.ndim == 0) and obj._mask: +# obj = masked_array(_data.__getattribute__(attr), copy=False, +# mask=_mask.__getattribute__(attr)) + # Use a view in order to avoid the copy of the mask in MaskedArray.__new__ + obj = narray(_data.__getattribute__(attr), copy=False).view(MaskedArray) + obj._mask = _mask.__getattribute__(attr) + if not obj.ndim and obj._mask: return masked return obj raise AttributeError,"No attribute '%s' !" % attr @@ -213,7 +216,7 @@ exctype, value = sys.exc_info()[:2] raise exctype, value else: - if attr not in list(self.dtype.names) + ['_mask']: + if attr not in list(self.dtype.names) + ['_mask','mask']: return ret if newattr: # We just added this one try: # or this setattr worked on an internal @@ -245,6 +248,9 @@ if isinstance(indx, str): obj = _data[indx].view(MaskedArray) obj._set_mask(_localdict['_fieldmask'][indx]) + # Force to nomask if the mask is empty + if not obj._mask.any(): + obj._mask = nomask return obj # We want some elements .. # First, the data ........ @@ -309,6 +315,7 @@ else: for n in names: fmask[n].flat = newmask + return def _getmask(self): """Returns the mask of the mrecord: a record is masked when all the fields @@ -435,7 +442,7 @@ """ - arraylist = [MA.asarray(x) for x in arraylist] + arraylist = [masked_array(x) for x in arraylist] # Define/check the shape..................... if shape is None or shape == 0: shape = arraylist[0].shape @@ -611,7 +618,7 @@ if varnames is None: varnames = _varnames # Get the data .............................. - _variables = MA.asarray([line.strip().split(delimitor) for line in f + _variables = masked_array([line.strip().split(delimitor) for line in f if line[0] != commentchar and len(line) > 1]) (_, nfields) = _variables.shape # Try to guess the dtype .................... @@ -643,7 +650,7 @@ _mask = mrecord._fieldmask if newfieldname is None or newfieldname in reserved_fields: newfieldname = 'f%i' % len(_data.dtype) - newfield = MA.asarray(newfield) + newfield = masked_array(newfield) # Get the new data ............ # Create a new empty recarray newdtype = numeric.dtype(_data.dtype.descr + \ @@ -674,58 +681,23 @@ from maskedarray.testutils import assert_equal if 1: d = N.arange(5) - m = MA.make_mask([1,0,0,1,1]) + m = maskedarray.make_mask([1,0,0,1,1]) base_d = N.r_[d,d[::-1]].reshape(2,-1).T base_m = N.r_[[m, m[::-1]]].T - base = MA.array(base_d, mask=base_m).T + base = masked_array(base_d, mask=base_m).T mrecord = fromarrays(base,dtype=[('a',N.float_),('b',N.float_)]) mrec = MaskedRecords(mrecord.copy()) # - mrec.a[3:] = 5 - assert_equal(mrec.a, [0,1,2,5,5]) - assert_equal(mrec.a._mask, [1,0,0,0,0]) - # - mrec.b[3:] = masked - assert_equal(mrec.b, [4,3,2,1,0]) - assert_equal(mrec.b._mask, [1,1,0,1,1]) - # - mrec[:2] = masked - assert_equal(mrec._mask, [1,1,0,0,0]) - mrec[-1] = masked - assert_equal(mrec._mask, [1,1,0,0,1]) - if 1: - nrec = N.core.records.fromarrays(N.r_[[d,d[::-1]]], - dtype=[('a',N.float_),('b',N.float_)]) - mrec = mrecord - #.................... - mrecfr = fromrecords(nrec) - assert_equal(mrecfr.a, mrec.a) - assert_equal(mrecfr.dtype, mrec.dtype) - #.................... - tmp = mrec[::-1] #.tolist() - mrecfr = fromrecords(tmp) - assert_equal(mrecfr.a, mrec.a[::-1]) - #.................... - mrecfr = fromrecords(nrec.tolist(), names=nrec.dtype.names) - assert_equal(mrecfr.a, mrec.a) - assert_equal(mrecfr.dtype, mrec.dtype) - if 0: - assert_equal(mrec.a, MA.array(d,mask=m)) - assert_equal(mrec.b, MA.array(d[::-1],mask=m[::-1])) - assert((mrec._fieldmask == N.core.records.fromarrays([m, m[::-1]])).all()) + if 1: + mrec = mrec.copy() + mrec.harden_mask() + assert(mrec._hardmask) + mrec._mask = nomask assert_equal(mrec._mask, N.r_[[m,m[::-1]]].all(0)) - assert_equal(mrec.a[1], mrec[1].a) - - if 0: - x = [(1.,10.,'a'),(2.,20,'b'),(3.14,30,'c'),(5.55,40,'d')] - desc = [('ffloat', N.float_), ('fint', N.int_), ('fstr', 'S10')] - mr = MaskedRecords(x,dtype=desc) - mr[0] = masked - mr.ffloat[-1] = masked - # - mrlast = mr[-1] - assert(isinstance(mrlast,MaskedRecords)) - assert(hasattr(mrlast,'ffloat')) - assert_equal(mrlast.ffloat, masked) - + mrec.soften_mask() + assert(not mrec._hardmask) + mrec.mask = nomask + tmp = mrec['b']._mask + assert(mrec['b']._mask is nomask) + assert_equal(mrec['a']._mask,mrec['b']._mask) \ No newline at end of file Modified: trunk/scipy/sandbox/maskedarray/mstats.py =================================================================== --- trunk/scipy/sandbox/maskedarray/mstats.py 2007-09-26 19:48:46 UTC (rev 3370) +++ trunk/scipy/sandbox/maskedarray/mstats.py 2007-09-27 03:34:44 UTC (rev 3371) @@ -19,7 +19,7 @@ import numpy.core.numeric as numeric from numpy.core.numeric import concatenate -import maskedarray as MA +import maskedarray from maskedarray.core import masked, nomask, MaskedArray, masked_array from maskedarray.extras import apply_along_axis, dot @@ -391,4 +391,14 @@ nlo = (data[:,None] < points[None,:] - h).sum(0) return (nhi-nlo) / (2.*n*h) - \ No newline at end of file +################################################################################ +if __name__ == '__main__': + from maskedarray.testutils import assert_almost_equal + if 1: + a = maskedarray.arange(1,101) + a[1::2] = masked + b = maskedarray.resize(a, (100,100)) + assert_almost_equal(mquantiles(b), [25., 50., 75.]) + assert_almost_equal(mquantiles(b, axis=0), maskedarray.resize(a,(3,100))) + assert_almost_equal(mquantiles(b, axis=1), + maskedarray.resize([24.9, 50., 75.1], (100,3))) \ No newline at end of file Modified: trunk/scipy/sandbox/maskedarray/tests/test_core.py =================================================================== --- trunk/scipy/sandbox/maskedarray/tests/test_core.py 2007-09-26 19:48:46 UTC (rev 3370) +++ trunk/scipy/sandbox/maskedarray/tests/test_core.py 2007-09-27 03:34:44 UTC (rev 3371) @@ -229,6 +229,17 @@ assert_equal(x,y/a) assert_equal(xm,y/a) assert_equal(xm.mask, mask_or(mask_or(m,a.mask), (a==0))) + # + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + z = xm/ym + assert_equal(z._mask, [1,1,1,0,0,1,1,0,0,0,1,1]) + assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) + xm = xm.copy() + xm /= ym + assert_equal(xm._mask, [1,1,1,0,0,1,1,0,0,0,1,1]) + assert_equal(xm._data, [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) + + #.......................... def check_scalararithmetic(self): "Tests some scalar arithmetics on MaskedArrays." @@ -479,6 +490,46 @@ # assert_not_equal(id(y._mask), id(x._mask)) assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) + #........................ + def check_where(self): + "Test the where function" + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + d = where(xm>2,xm,-9) + assert_equal(d, [-9.,-9.,-9.,-9., -9., 4., -9., -9., 10., -9., -9., 3.]) + assert_equal(d._mask, xm._mask) + d = where(xm>2,-9,ym) + assert_equal(d, [5.,0.,3., 2., -1.,-9.,-9., -10., -9., 1., 0., -9.]) + assert_equal(d._mask, [1,0,1,0,0,0,1,0,0,0,0,0]) + d = where(xm>2, xm, masked) + assert_equal(d, [-9.,-9.,-9.,-9., -9., 4., -9., -9., 10., -9., -9., 3.]) + tmp = xm._mask.copy() + tmp[(xm<=2).filled(True)] = True + assert_equal(d._mask, tmp) + # + ixm = xm.astype(int_) + d = where(ixm>2, ixm, masked) + assert_equal(d, [-9,-9,-9,-9, -9, 4, -9, -9, 10, -9, -9, 3]) + assert_equal(d.dtype, ixm.dtype) + # + x = arange(10) + x[3] = masked + c = x >= 8 + z = where(c , x, masked) + assert z.dtype is x.dtype + assert z[3] is masked + assert z[4] is masked + assert z[7] is masked + assert z[8] is not masked + assert z[9] is not masked + assert_equal(x,z) + # + z = where(c , masked, x) + assert z.dtype is x.dtype + assert z[3] is masked + assert z[4] is not masked + assert z[7] is not masked + assert z[8] is masked + assert z[9] is masked #........................ def check_oddfeatures_1(self): @@ -500,23 +551,6 @@ assert count(where(c,masked,masked)) == 0 assert shape(where(c,masked,masked)) == c.shape # - z = where(c , x, masked) - assert z.dtype is x.dtype - assert z[3] is masked - assert z[4] is masked - assert z[7] is masked - assert z[8] is not masked - assert z[9] is not masked - assert_equal(x,z) - # - z = where(c , masked, x) - assert z.dtype is x.dtype - assert z[3] is masked - assert z[4] is not masked - assert z[7] is not masked - assert z[8] is masked - assert z[9] is masked - # z = masked_where(c, x) assert z.dtype is x.dtype assert z[3] is masked @@ -631,6 +665,8 @@ y = x * masked assert_equal(y.shape, x.shape) assert_equal(y._mask, [True, True]) + y = x[0] * masked + assert y is masked y = x + masked assert_equal(y.shape, x.shape) assert_equal(y._mask, [True, True]) @@ -734,6 +770,19 @@ data = masked_array([1,2,3],fill_value=-999) series = data[[0,2,1]] assert_equal(series._fill_value, data._fill_value) + # + def check_asarray(self): + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + xmm = asarray(xm) + assert_equal(xmm._data, xm._data) + assert_equal(xmm._mask, xm._mask) + # + def check_fix_invalid(self): + "Checks fix_invalid." + data = masked_array(numpy.sqrt([-1., 0., 1.]), mask=[0,0,1]) + data_fixed = fix_invalid(data) + assert_equal(data_fixed._data, [data.fill_value, 0., 1.]) + assert_equal(data_fixed._mask, [1., 0., 1.]) #............................................................................... @@ -784,6 +833,7 @@ assert(sometrue(a,axis=0)) assert_equal(sum(a[:3],axis=0), 0) assert_equal(product(a,axis=0), 0) + assert_equal(add.reduce(a), pi) #........................ def test_minmax(self): "Tests extrema on MaskedArrays." @@ -929,7 +979,7 @@ assert( x[3] is masked) assert( x[4] is masked) x[[1,4]] = [10,40] - assert( x.mask is not m) +# assert( x.mask is not m) assert( x[3] is masked) assert( x[4] is not masked) assert_equal(x, [0,10,2,-1,40]) @@ -1202,7 +1252,7 @@ assert_equal(a.shape,(1,5)) assert_equal(a._mask.shape, a.shape) # Checs that small_mask is preserved - a = array([1,2,3,4],mask=[0,0,0,0],small_mask=False) + a = array([1,2,3,4],mask=[0,0,0,0],shrink=False) assert_equal(a.ravel()._mask, [0,0,0,0]) def check_reshape(self): @@ -1217,17 +1267,13 @@ def check_compressed(self): "Tests compressed" - a = array([1,2,3,4],mask=[0,0,0,0],small_mask=False) + a = array([1,2,3,4],mask=[0,0,0,0]) b = a.compressed() assert_equal(b, a) - assert_equal(b._mask, a._mask) + assert_equal(b._mask, nomask) a[0] = masked b = a.compressed() assert_equal(b._data, [2,3,4]) - assert_equal(b._mask, [0,0,0]) - a._smallmask = True - b = a.compressed() - assert_equal(b._data, [2,3,4]) assert_equal(b._mask, nomask) def check_tolist(self): Modified: trunk/scipy/sandbox/maskedarray/tests/test_mrecords.py =================================================================== --- trunk/scipy/sandbox/maskedarray/tests/test_mrecords.py 2007-09-26 19:48:46 UTC (rev 3370) +++ trunk/scipy/sandbox/maskedarray/tests/test_mrecords.py 2007-09-27 03:34:44 UTC (rev 3371) @@ -20,7 +20,9 @@ import maskedarray.testutils from maskedarray.testutils import * -import maskedarray.core as MA +import maskedarray +from maskedarray import masked_array, masked, nomask + #import maskedarray.mrecords #from maskedarray.mrecords import mrecarray, fromarrays, fromtextfile, fromrecords import maskedarray.mrecords @@ -37,10 +39,10 @@ def setup(self): "Generic setup" d = N.arange(5) - m = MA.make_mask([1,0,0,1,1]) + m = maskedarray.make_mask([1,0,0,1,1]) base_d = N.r_[d,d[::-1]].reshape(2,-1).T base_m = N.r_[[m, m[::-1]]].T - base = MA.array(base_d, mask=base_m) + base = masked_array(base_d, mask=base_m) mrecord = fromarrays(base.T, dtype=[('a',N.float_),('b',N.float_)]) self.data = [d, m, mrecord] @@ -48,8 +50,8 @@ "Tests fields retrieval" [d, m, mrec] = self.data mrec = mrec.copy() - assert_equal(mrec.a, MA.array(d,mask=m)) - assert_equal(mrec.b, MA.array(d[::-1],mask=m[::-1])) + assert_equal(mrec.a, masked_array(d,mask=m)) + assert_equal(mrec.b, masked_array(d[::-1],mask=m[::-1])) assert((mrec._fieldmask == N.core.records.fromarrays([m, m[::-1]], dtype=mrec._fieldmask.dtype)).all()) assert_equal(mrec._mask, N.r_[[m,m[::-1]]].all(0)) assert_equal(mrec.a[1], mrec[1].a) @@ -65,13 +67,13 @@ mrecord.a = 1 assert_equal(mrecord['a']._data, [1]*5) assert_equal(getmaskarray(mrecord['a']), [0]*5) - mrecord.b = MA.masked + mrecord.b = masked assert_equal(mrecord.b.mask, [1]*5) assert_equal(getmaskarray(mrecord['b']), [1]*5) - mrecord._mask = MA.masked + mrecord._mask = masked assert_equal(getmaskarray(mrecord['b']), [1]*5) assert_equal(mrecord['a']._mask, mrecord['b']._mask) - mrecord._mask = MA.nomask + mrecord._mask = nomask assert_equal(getmaskarray(mrecord['b']), [0]*5) assert_equal(mrecord['a']._mask, mrecord['b']._mask) # Modified: trunk/scipy/sandbox/maskedarray/tests/test_subclassing.py =================================================================== --- trunk/scipy/sandbox/maskedarray/tests/test_subclassing.py 2007-09-26 19:48:46 UTC (rev 3370) +++ trunk/scipy/sandbox/maskedarray/tests/test_subclassing.py 2007-09-27 03:34:44 UTC (rev 3371) @@ -45,8 +45,8 @@ _data.info = subarr.info return _data def __array_finalize__(self,obj): - SubArray.__array_finalize__(self, obj) - MaskedArray.__array_finalize__(self,obj) + MaskedArray.__array_finalize__(self,obj) + SubArray.__array_finalize__(self, obj) return def _get_series(self): return self.view(MaskedArray) @@ -136,15 +136,38 @@ assert isinstance(mxsub, MaskedArray) assert_equal(mxsub._mask, m) # + mxsub = asarray(xsub) + assert not isinstance(mxsub, MSubArray) + assert isinstance(mxsub, MaskedArray) + assert_equal(mxsub._mask, m) + # mxsub = masked_array(xsub, subok=True) assert isinstance(mxsub, MSubArray) assert_equal(mxsub.info, xsub.info) assert_equal(mxsub._mask, xsub._mask) + # + mxsub = asanyarray(xsub) + assert isinstance(mxsub, MSubArray) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, m) ################################################################################ if __name__ == '__main__': NumpyTest().run() + # + if 0: + x = array(arange(5), mask=[0]+[1]*4) + my = masked_array(subarray(x)) + ym = msubarray(x) + # + z = (my+1) + assert isinstance(z,MaskedArray) + assert not isinstance(z, MSubArray) + assert isinstance(z._data, SubArray) + assert_equal(z._data.info, {}) + # + z = ym+1 From scipy-svn at scipy.org Thu Sep 27 09:26:01 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 27 Sep 2007 08:26:01 -0500 (CDT) Subject: [Scipy-svn] r3372 - trunk/scipy/sandbox/dhuard Message-ID: <20070927132601.E48D039C05A@new.scipy.org> Author: dhuard Date: 2007-09-27 08:25:57 -0500 (Thu, 27 Sep 2007) New Revision: 3372 Added: trunk/scipy/sandbox/dhuard/stats.py Log: Created stub for statistical functions. Added: trunk/scipy/sandbox/dhuard/stats.py =================================================================== --- trunk/scipy/sandbox/dhuard/stats.py 2007-09-27 03:34:44 UTC (rev 3371) +++ trunk/scipy/sandbox/dhuard/stats.py 2007-09-27 13:25:57 UTC (rev 3372) @@ -0,0 +1,65 @@ +import scipy.interpolate as interpolate +import numpy as np + +def scoreatpercentile(data, per): + """Return the score at the given 'per' percentile of the data. + + Example + >>> scoreatpercentile(randn(100), 50) + will return the median of the sample. + """ + cdf = empiricalcdf(data) + interpolator = interpolate.interp1d(sort(cdf), sort(data)) + return interpolator(per/100.) + +def percentileofscore(data, score): + """Percentile-position of score relative to data. + + score: Array of scores at which the percentile is computed. + + Return percentiles (0-100). + + Example + x = linspace(-2,2,100) + percentileofscore(randn(50),x) + + Return an error if the score is outside the range of data. + """ + cdf = empiricalcdf(data) + interpolator = interpolate.interp1d(sort(data), sort(cdf)) + return interpolator(score)*100. + +def empiricalcdf(data, method='Hazen'): + """Return the empirical cdf. + + Methods available: + Hazen: (i-0.5)/N + Weibull: i/(N+1) + Chegodayev: (i-.3)/(N+.4) + Cunnane: (i-.4)/(N+.2) + Gringorten: (i-.44)/(N+.12) + California: (i-1)/N + + Where i goes from 1 to N. + """ + + i = np.argsort(np.argsort(data)) + 1. + N = len(data) + method = method.lower() + + if method == 'weibull': + cdf = i/(N+1.) + elif method == 'hazen': + cdf = (i-0.5)/N + elif method == 'california': + cdf = (i-1.)/N + elif method == 'chegodayev': + cdf = (i-.3)/(N+.4) + elif method == 'cunnane': + cdf = (i-.4)/(N+.2) + elif method == 'gringorten': + cdf = (i-.44)/(N+.12) + else: + raise 'Unknown method. Choose among Weibull, Hazen, Chegodayev, Cunnane, Gringorten and California.' + + return cdf From scipy-svn at scipy.org Thu Sep 27 10:03:09 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 27 Sep 2007 09:03:09 -0500 (CDT) Subject: [Scipy-svn] r3373 - trunk/scipy/sandbox/dhuard Message-ID: <20070927140309.90C0139C030@new.scipy.org> Author: dhuard Date: 2007-09-27 09:03:00 -0500 (Thu, 27 Sep 2007) New Revision: 3373 Added: trunk/scipy/sandbox/dhuard/test_stats.py Modified: trunk/scipy/sandbox/dhuard/stats.py Log: Added tests for stats stub. Modified: trunk/scipy/sandbox/dhuard/stats.py =================================================================== --- trunk/scipy/sandbox/dhuard/stats.py 2007-09-27 13:25:57 UTC (rev 3372) +++ trunk/scipy/sandbox/dhuard/stats.py 2007-09-27 14:03:00 UTC (rev 3373) @@ -1,32 +1,36 @@ import scipy.interpolate as interpolate import numpy as np -def scoreatpercentile(data, per): - """Return the score at the given 'per' percentile of the data. +def scoreatpercentile(data, percentile): + """Return the score at the given percentile of the data. - Example - >>> scoreatpercentile(randn(100), 50) - will return the median of the sample. + Example: + >>> data = randn(100) + >>> scoreatpercentile(data, 50) + + will return the median of sample `data`. """ + per = np.array(percentile) cdf = empiricalcdf(data) - interpolator = interpolate.interp1d(sort(cdf), sort(data)) + interpolator = interpolate.interp1d(np.sort(cdf), np.sort(data)) return interpolator(per/100.) def percentileofscore(data, score): - """Percentile-position of score relative to data. + """Return the percentile-position of score relative to data. score: Array of scores at which the percentile is computed. Return percentiles (0-100). Example + r = randn(50) x = linspace(-2,2,100) - percentileofscore(randn(50),x) + percentileofscore(r,x) - Return an error if the score is outside the range of data. + Raise an error if the score is outside the range of data. """ cdf = empiricalcdf(data) - interpolator = interpolate.interp1d(sort(data), sort(cdf)) + interpolator = interpolate.interp1d(np.sort(data), np.sort(cdf)) return interpolator(score)*100. def empiricalcdf(data, method='Hazen'): @@ -46,11 +50,10 @@ i = np.argsort(np.argsort(data)) + 1. N = len(data) method = method.lower() - - if method == 'weibull': + if method == 'hazen': + cdf = (i-0.5)/N + elif method == 'weibull': cdf = i/(N+1.) - elif method == 'hazen': - cdf = (i-0.5)/N elif method == 'california': cdf = (i-1.)/N elif method == 'chegodayev': Added: trunk/scipy/sandbox/dhuard/test_stats.py =================================================================== --- trunk/scipy/sandbox/dhuard/test_stats.py 2007-09-27 13:25:57 UTC (rev 3372) +++ trunk/scipy/sandbox/dhuard/test_stats.py 2007-09-27 14:03:00 UTC (rev 3373) @@ -0,0 +1,45 @@ +""" +Test statistical functions. +""" + + +from numpy.testing import * +import stats +import numpy as np + +N = 100 +np.random.seed(2) +r = np.random.randn(N) + +class test_empiricalcdf(NumpyTestCase): + def check_hazen(self): + + f = stats.empiricalcdf(r) + assert_equal(len(f), len(r)) + assert_array_equal(np.argsort(r), np.argsort(f)) + assert_array_equal(np.sort(f), (np.arange(N)+.5)/N) + + def check_weibull(self): + f = stats.empiricalcdf(r, 'weibull') + assert_array_equal(np.sort(f), (np.arange(N)+1.)/(N+1.)) + + def check_california(self): + f = stats.empiricalcdf(r, 'california') + assert_array_equal(np.sort(f), (np.arange(N))/float(N)) + +class test_scoreatpercentile(NumpyTestCase): + def check_simple(self): + r = np.random.randn(1000) + s = stats.scoreatpercentile(r, [15.9,50,84.1]) + assert_array_almost_equal(s, [-1,0,1], 1) + +class test_percentileofscore(NumpyTestCase): + def check_simple(self): + r = np.random.randn(3000) + p = stats.percentileofscore(r, [-1,0,1]) + assert_array_almost_equal(p, [15.9, 50, 84.1], 0) + + + +if __name__ == '__main__': + NumpyTest().run() From scipy-svn at scipy.org Thu Sep 27 10:26:47 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 27 Sep 2007 09:26:47 -0500 (CDT) Subject: [Scipy-svn] r3374 - trunk/scipy/optimize Message-ID: <20070927142647.BA0C339C06A@new.scipy.org> Author: stefan Date: 2007-09-27 09:26:00 -0500 (Thu, 27 Sep 2007) New Revision: 3374 Modified: trunk/scipy/optimize/optimize.py Log: Fix typo in docstring. Modified: trunk/scipy/optimize/optimize.py =================================================================== --- trunk/scipy/optimize/optimize.py 2007-09-27 14:03:00 UTC (rev 3373) +++ trunk/scipy/optimize/optimize.py 2007-09-27 14:26:00 UTC (rev 3374) @@ -807,7 +807,7 @@ x0 : ndarray Initial guess. fprime : callable f'(x,*args) - Function which omputes the gradient of f. + Function which computes the gradient of f. args : tuple Extra arguments passed to f and fprime. gtol : float From scipy-svn at scipy.org Thu Sep 27 11:41:40 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 27 Sep 2007 10:41:40 -0500 (CDT) Subject: [Scipy-svn] r3375 - trunk/scipy/sandbox/maskedarray Message-ID: <20070927154140.CD26139C239@new.scipy.org> Author: pierregm Date: 2007-09-27 10:41:34 -0500 (Thu, 27 Sep 2007) New Revision: 3375 Modified: trunk/scipy/sandbox/maskedarray/__init__.py trunk/scipy/sandbox/maskedarray/bench.py trunk/scipy/sandbox/maskedarray/core.py Log: Fixed __init__ core : add the .shrink_mask() method Modified: trunk/scipy/sandbox/maskedarray/__init__.py =================================================================== --- trunk/scipy/sandbox/maskedarray/__init__.py 2007-09-27 14:26:00 UTC (rev 3374) +++ trunk/scipy/sandbox/maskedarray/__init__.py 2007-09-27 15:41:34 UTC (rev 3375) @@ -17,8 +17,6 @@ import extras from extras import * -import _nfcore - __all__ = ['core', 'extras'] __all__ += core.__all__ __all__ += extras.__all__ \ No newline at end of file Modified: trunk/scipy/sandbox/maskedarray/bench.py =================================================================== --- trunk/scipy/sandbox/maskedarray/bench.py 2007-09-27 14:26:00 UTC (rev 3374) +++ trunk/scipy/sandbox/maskedarray/bench.py 2007-09-27 15:41:34 UTC (rev 3375) @@ -72,17 +72,17 @@ if test: assert_equal(filled(eval("numpy.ma.%s(nmxs)" % funcname),0), filled(eval("maskedarray.%s(mmxs)" % funcname),0)) - for (module, data) in zip(("numpy", "numpy.ma","maskedarray","maskedarray._nfcore"), - ("xs","nmxs","mmxs","mmxs")): - timer("%(module)s.%(funcname)s(%(data)s)" % locals()) + for (module, data) in zip(("numpy", "numpy.ma","maskedarray"), + ("xs","nmxs","mmxs")): + timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) # print "%s on large arrays" % funcname if test: assert_equal(filled(eval("numpy.ma.%s(nmxl)" % funcname),0), filled(eval("maskedarray.%s(mmxl)" % funcname),0)) - for (module, data) in zip(("numpy", "numpy.ma","maskedarray","maskedarray._nfcore"), - ("xl","nmxl","mmxl","mmxl")): - timer("%(module)s.%(funcname)s(%(data)s)" % locals()) + for (module, data) in zip(("numpy", "numpy.ma","maskedarray"), + ("xl","nmxl","mmxl")): + timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) return def compare_methods(methodname, args, vars='x', nloop=500, test=True, @@ -115,17 +115,17 @@ if test: assert_equal(filled(eval("numpy.ma.%s(nmxs,nmys)" % funcname),0), filled(eval("maskedarray.%s(mmxs,mmys)" % funcname),0)) - for (module, data) in zip(("numpy", "numpy.ma","maskedarray","maskedarray._nfcore"), - ("xs,ys","nmxs,nmys","mmxs,mmys","mmxs,mmys")): - timer("%(module)s.%(funcname)s(%(data)s)" % locals()) + for (module, data) in zip(("numpy", "numpy.ma","maskedarray"), + ("xs,ys","nmxs,nmys","mmxs,mmys")): + timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) # print "%s on large arrays" % funcname if test: assert_equal(filled(eval("numpy.ma.%s(nmxl, nmyl)" % funcname),0), filled(eval("maskedarray.%s(mmxl, mmyl)" % funcname),0)) - for (module, data) in zip(("numpy", "numpy.ma","maskedarray","maskedarray._nfcore"), - ("xl,yl","nmxl,nmyl","mmxl,mmyl","mmxl,mmyl")): - timer("%(module)s.%(funcname)s(%(data)s)" % locals()) + for (module, data) in zip(("numpy", "numpy.ma","maskedarray"), + ("xl,yl","nmxl,nmyl","mmxl,mmyl")): + timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop) return Modified: trunk/scipy/sandbox/maskedarray/core.py =================================================================== --- trunk/scipy/sandbox/maskedarray/core.py 2007-09-27 14:26:00 UTC (rev 3374) +++ trunk/scipy/sandbox/maskedarray/core.py 2007-09-27 15:41:34 UTC (rev 3375) @@ -362,9 +362,10 @@ d1 = get_data(a) if self.domain is not None: dm = narray(self.domain(d1), copy=False) - m = mask_or(m, narray(self.domain(d1))) + m = numpy.logical_or(m, dm) # The following two lines control the domain filling methods. d1 = d1.copy() +# d1[dm] = self.fill numpy.putmask(d1, dm, self.fill) # Take care of the masked singletong first ... if not m.ndim and m: @@ -1252,6 +1253,13 @@ if self._sharedmask: self._mask = self._mask.copy() self._sharedmask = False + + def shrink_mask(self): + "Reduces a mask to nomask when possible." + m = self._mask + if m.ndim and not m.any(): + self._mask = nomask + #............................................ def _get_data(self): "Returns the current data (as a view of the original underlying data)>" @@ -2159,7 +2167,7 @@ masked_array = MaskedArray def array(data, dtype=None, copy=False, order=False, mask=nomask, subok=True, - keep_mask=True, hard_mask=False, fill_value=None): + keep_mask=True, hard_mask=False, fill_value=None, shrink=True): """array(data, dtype=None, copy=True, order=False, mask=nomask, keep_mask=True, shrink=True, fill_value=None) Acts as shortcut to MaskedArray, with options in a different order for convenience. @@ -2778,14 +2786,32 @@ ############################################################################### -#if __name__ == '__main__': - #from maskedarray.testutils import assert_equal, assert_almost_equal +if __name__ == '__main__': + from maskedarray.testutils import assert_equal, assert_almost_equal - #xm = array(numpy.random.uniform(-1,1,25)) - #xm[xm>0.5] = masked - #xm.fill_value = -999 - ## - #z = 3//where(xm.mask,0,xm) - #assert_equal(z._mask, numpy.logical_or(xm==0,xm._mask)) - #assert_equal(z._data[xm._mask], 1) - + # Small arrays .................................. + xs = numpy.random.uniform(-1,1,6).reshape(2,3) + ys = numpy.random.uniform(-1,1,6).reshape(2,3) + zs = xs + 1j * ys + m1 = [[True, False, False], [False, False, True]] + m2 = [[True, False, True], [False, False, True]] + nmxs = numpy.ma.array(xs, mask=m1) + nmys = numpy.ma.array(ys, mask=m2) + nmzs = numpy.ma.array(zs, mask=m1) + mmxs = array(xs, mask=m1) + mmys = array(ys, mask=m2) + mmzs = array(zs, mask=m1) + # Big arrays .................................... + xl = numpy.random.uniform(-1,1,100*100).reshape(100,100) + yl = numpy.random.uniform(-1,1,100*100).reshape(100,100) + zl = xl + 1j * yl + maskx = xl > 0.8 + masky = yl < -0.8 + nmxl = numpy.ma.array(xl, mask=maskx) + nmyl = numpy.ma.array(yl, mask=masky) + nmzl = numpy.ma.array(zl, mask=maskx) + mmxl = array(xl, mask=maskx, shrink=True) + mmyl = array(yl, mask=masky, shrink=True) + mmzl = array(zl, mask=maskx, shrink=True) + # + z = log(mmxl) \ No newline at end of file From scipy-svn at scipy.org Thu Sep 27 11:49:13 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 27 Sep 2007 10:49:13 -0500 (CDT) Subject: [Scipy-svn] r3376 - trunk/scipy/sandbox/maskedarray Message-ID: <20070927154913.E3CB1C7C017@new.scipy.org> Author: pierregm Date: 2007-09-27 10:49:05 -0500 (Thu, 27 Sep 2007) New Revision: 3376 Modified: trunk/scipy/sandbox/maskedarray/core.py Log: core : fixed arange. Modified: trunk/scipy/sandbox/maskedarray/core.py =================================================================== --- trunk/scipy/sandbox/maskedarray/core.py 2007-09-27 15:41:34 UTC (rev 3375) +++ trunk/scipy/sandbox/maskedarray/core.py 2007-09-27 15:49:05 UTC (rev 3376) @@ -2653,7 +2653,7 @@ def arange(stop, start=None, step=1, dtype=None): "maskedarray version of the numpy function." - return numpy.arange(start, stop, step, dtype).view(MaskedArray) + return numpy.arange(stop, start, step, dtype).view(MaskedArray) arange.__doc__ = numpy.arange.__doc__ def inner(a, b): From scipy-svn at scipy.org Thu Sep 27 14:37:28 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 27 Sep 2007 13:37:28 -0500 (CDT) Subject: [Scipy-svn] r3377 - trunk/scipy/sandbox/dhuard Message-ID: <20070927183728.C0D9FC7C024@new.scipy.org> Author: dhuard Date: 2007-09-27 13:37:13 -0500 (Thu, 27 Sep 2007) New Revision: 3377 Added: trunk/scipy/sandbox/dhuard/histogram.f trunk/scipy/sandbox/dhuard/histogram.py trunk/scipy/sandbox/dhuard/test_histogram.py Log: added histogram functions. Added: trunk/scipy/sandbox/dhuard/histogram.f =================================================================== --- trunk/scipy/sandbox/dhuard/histogram.f 2007-09-27 15:49:05 UTC (rev 3376) +++ trunk/scipy/sandbox/dhuard/histogram.f 2007-09-27 18:37:13 UTC (rev 3377) @@ -0,0 +1,348 @@ +C******************************************************************* +C RETURN THE HISTOGRAM OF ARRAY X, THAT IS, THE NUMBER OF ELEMENTS +C IN X FALLING INTO EACH BIN. +C THE BIN ARRAY CONSISTS IN N BINS STARTING AT BIN0 WITH WIDTH DELTA. +C HISTO H : | LOWER OUTLIERS | 1 | 2 | 3 | ... | N | UPPER OUTLIERS | +C INDEX i : | 1 | 2 | 3 | 4 | ... | N+1 | N+2 | + + SUBROUTINE FIXED_BINSIZE(X, BIN0, DELTA, N, NX, H) + +C PARAMETERS +C ---------- +C X : ARRAY +C BIN0 : LEFT BIN EDGE +C DELTA : BIN WIDTH +C N : NUMBER OF BINS +C H : HISTOGRAM + + IMPLICIT NONE + INTEGER :: N, NX, i, K + DOUBLE PRECISION :: X(NX), BIN0, DELTA + INTEGER :: H(N+2), UP, LOW + +CF2PY INTEGER INTENT(IN) :: N +CF2PY INTEGER INTENT(HIDE) :: NX = LEN(X) +CF2PY DOUBLE PRECISION DIMENSION(NX), INTENT(IN) :: X +CF2PY DOUBLE PRECISION INTENT(IN) :: BIN0, DELTA +CF2PY INTEGER DIMENSION(N+2), INTENT(OUT) :: H + + + DO i=1,N+2 + H(i) = 0 + ENDDO + +C OUTLIERS INDICES + UP = N+2 + LOW = 1 + + DO i=1,NX + IF (X(i) >= BIN0) THEN + K = INT((X(i)-BIN0)/DELTA)+1 + IF (K <= N) THEN + H(K+1) = H(K+1) + 1 + ELSE + H(UP) = H(UP) + 1 + ENDIF + ELSE + H(LOW) = H(LOW) + 1 + ENDIF + ENDDO + + END SUBROUTINE + + + +C******************************************************************* +C RETURN THE WEIGHTED HISTOGRAM OF ARRAY X, THAT IS, THE SUM OF THE +C WEIGHTS OF THE ELEMENTS OF X FALLING INTO EACH BIN. +C THE BIN ARRAY CONSISTS IN N BINS STARTING AT BIN0 WITH WIDTH DELTA. +C HISTO H : | LOWER OUTLIERS | 1 | 2 | 3 | ... | N | UPPER OUTLIERS | +C INDEX i : | 1 | 2 | 3 | 4 | ... | N+1 | N+2 | + + SUBROUTINE WEIGHTED_FIXED_BINSIZE(X, W, BIN0, DELTA, N, NX, H) + +C PARAMETERS +C ---------- +C X : ARRAY +C W : WEIGHTS +C BIN0 : LEFT BIN EDGE +C DELTA : BIN WIDTH +C N : NUMBER OF BINS +C H : HISTOGRAM + + IMPLICIT NONE + INTEGER :: N, NX, i, K + DOUBLE PRECISION :: X(NX), W(NX), BIN0, DELTA, H(N+2) + INTEGER :: UP, LOW + +CF2PY INTEGER INTENT(IN) :: N +CF2PY INTEGER INTENT(HIDE) :: NX = LEN(X) +CF2PY DOUBLE PRECISION DIMENSION(NX), INTENT(IN) :: X, W +CF2PY DOUBLE PRECISION INTENT(IN) :: BIN0, DELTA +CF2PY DOUBLE PRECISION DIMENSION(N+2), INTENT(OUT) :: H + + + DO i=1,N+2 + H(i) = 0.D0 + ENDDO + +C OUTLIERS INDICES + UP = N+2 + LOW = 1 + + DO i=1,NX + IF (X(i) >= BIN0) THEN + K = INT((X(i)-BIN0)/DELTA)+1 + IF (K <= N) THEN + H(K+1) = H(K+1) + W(i) + ELSE + H(UP) = H(UP) + W(i) + ENDIF + ELSE + H(LOW) = H(LOW) + W(i) + ENDIF + ENDDO + + END SUBROUTINE + + +C***************************************************************************** +C COMPUTE N DIMENSIONAL FLATTENED HISTOGRAM + + SUBROUTINE FIXED_BINSIZE_ND(X, BIN0, DELTA, N, COUNT, NX,D,NC) + +C PARAMETERS +C ---------- +C X : ARRAY (NXD) +C BIN0 : LEFT BIN EDGES (D) +C DELTA : BIN WIDTH (D) +C N : NUMBER OF BINS (D) +C COUNT : FLATTENED HISTOGRAM (NC) +C NC : PROD(N(:)+2) + + IMPLICIT NONE + INTEGER :: NX, D, NC,N(D), i, j, k, T + DOUBLE PRECISION :: X(NX,D), BIN0(D), DELTA(D) + INTEGER :: INDEX(NX), ORDER(D), MULT, COUNT(NC) + + +CF2PY DOUBLE PRECISION DIMENSION(NX,D), INTENT(IN) :: X +CF2PY DOUBLE PRECISION DIMENSION(D) :: BIN0, DELTA +CF2PY INTEGER INTENT(IN) :: N +CF2PY INTEGER DIMENSION(NC), INTENT(OUT) :: COUNT +CF2PY INTEGER INTENT(HIDE) :: NX=SHAPE(X,1) +CF2PY INTEGER INTENT(HIDE) :: D=SHAPE(X,2) + + +C INITIALIZE INDEX + DO i=1, NX + INDEX(i) = 0 + ENDDO + +C INITIALIZE COUNT + DO i=1,NC + COUNT(i) = 0 + ENDDO + +C ORDER THE BIN SIZE ARRAY N(D) + CALL QSORTI(ORDER, D, N) + +C INITIALIZE THE DIMENSIONAL MULTIPLIER + MULT=1 + +C FIND THE FLATTENED INDEX OF EACH SAMPLE + DO j=1, D + k = ORDER(j) + MULT=MULT*N(k) + + DO i=1, NX + IF (X(i,k) >= BIN0(k)) THEN + T = INT((X(i, k)-BIN0(k))/DELTA(k))+1 + IF (T <= N(k)) THEN + T = T+1 + ELSE + T = N(k)+2 + ENDIF + ELSE + T = 1 + ENDIF + + INDEX(i) = INDEX(I) + T*MULT + ENDDO + ENDDO + +C COUNT THE NUMBER OF SAMPLES FALLING INTO EACH BIN + DO i=1,NX + COUNT(INDEX(i)) = COUNT(INDEX(i)) + 1 + ENDDO + + END SUBROUTINE + + +C From HDK at psuvm.psu.edu Thu Dec 8 15:27:16 MST 1994 +C +C The following was converted from Algol recursive to Fortran iterative +C by a colleague at Penn State (a long time ago - Fortran 66, please +C excuse the GoTo's). The following code also corrects a bug in the +C Quicksort algorithm published in the ACM (see Algorithm 402, CACM, +C Sept. 1970, pp 563-567; also you younger folks who weren't born at +C that time might find interesting the history of the Quicksort +C algorithm beginning with the original published in CACM, July 1961, +C pp 321-322, Algorithm 64). Note that the following algorithm sorts +C integer data; actual data is not moved but sort is affected by sorting +C a companion index array (see leading comments). The data type being +C sorted can be changed by changing one line; see comments after +C declarations and subsequent one regarding comparisons(Fortran +C 77 takes care of character comparisons of course, so that comment +C is merely historical from the days when we had to write character +C compare subprograms, usually in assembler language for a specific +C mainframe platform at that time). But the following algorithm is +C good, still one of the best available. + + + SUBROUTINE QSORTI (ORD,N,A) +C +C==============SORTS THE ARRAY A(I),I=1,2,...,N BY PUTTING THE +C ASCENDING ORDER VECTOR IN ORD. THAT IS ASCENDING ORDERED A +C IS A(ORD(I)),I=1,2,...,N; DESCENDING ORDER A IS A(ORD(N-I+1)), +C I=1,2,...,N . THIS SORT RUNS IN TIME PROPORTIONAL TO N LOG N . +C +C +C ACM QUICKSORT - ALGORITHM #402 - IMPLEMENTED IN FORTRAN 66 BY +C WILLIAM H. VERITY, WHV at PSUVM.PSU.EDU +C CENTER FOR ACADEMIC COMPUTING +C THE PENNSYLVANIA STATE UNIVERSITY +C UNIVERSITY PARK, PA. 16802 +C + IMPLICIT INTEGER (A-Z) +C + DIMENSION ORD(N),POPLST(2,20) + INTEGER X,XX,Z,ZZ,Y +C +C TO SORT DIFFERENT INPUT TYPES, CHANGE THE FOLLOWING +C SPECIFICATION STATEMENTS; FOR EXAMPLE, FOR FORTRAN CHARACTER +C USE THE FOLLOWING: CHARACTER *(*) A(N) +C + INTEGER A(N) +C + NDEEP=0 + U1=N + L1=1 + DO 1 I=1,N + 1 ORD(I)=I + 2 IF (U1.LE.L1) RETURN +C + 3 L=L1 + U=U1 +C +C PART +C + 4 P=L + Q=U +C FOR CHARACTER SORTS, THE FOLLOWING 3 STATEMENTS WOULD BECOME +C X = ORD(P) +C Z = ORD(Q) +C IF (A(X) .LE. A(Z)) GO TO 2 +C +C WHERE "CLE" IS A LOGICAL FUNCTION WHICH RETURNS "TRUE" IF THE +C FIRST ARGUMENT IS LESS THAN OR EQUAL TO THE SECOND, BASED ON "LEN" +C CHARACTERS. +C + X=A(ORD(P)) + Z=A(ORD(Q)) + IF (X.LE.Z) GO TO 5 + Y=X + X=Z + Z=Y + YP=ORD(P) + ORD(P)=ORD(Q) + ORD(Q)=YP + 5 IF (U-L.LE.1) GO TO 15 + XX=X + IX=P + ZZ=Z + IZ=Q +C +C LEFT +C + 6 P=P+1 + IF (P.GE.Q) GO TO 7 + X=A(ORD(P)) + IF (X.GE.XX) GO TO 8 + GO TO 6 + 7 P=Q-1 + GO TO 13 +C +C RIGHT +C + 8 Q=Q-1 + IF (Q.LE.P) GO TO 9 + Z=A(ORD(Q)) + IF (Z.LE.ZZ) GO TO 10 + GO TO 8 + 9 Q=P + P=P-1 + Z=X + X=A(ORD(P)) +C +C DIST +C + 10 IF (X.LE.Z) GO TO 11 + Y=X + X=Z + Z=Y + IP=ORD(P) + ORD(P)=ORD(Q) + ORD(Q)=IP + 11 IF (X.LE.XX) GO TO 12 + XX=X + IX=P + 12 IF (Z.GE.ZZ) GO TO 6 + ZZ=Z + IZ=Q + GO TO 6 +C +C OUT +C + 13 CONTINUE + IF (.NOT.(P.NE.IX.AND.X.NE.XX)) GO TO 14 + IP=ORD(P) + ORD(P)=ORD(IX) + ORD(IX)=IP + 14 CONTINUE + IF (.NOT.(Q.NE.IZ.AND.Z.NE.ZZ)) GO TO 15 + IQ=ORD(Q) + ORD(Q)=ORD(IZ) + ORD(IZ)=IQ + 15 CONTINUE + IF (U-Q.LE.P-L) GO TO 16 + L1=L + U1=P-1 + L=Q+1 + GO TO 17 + 16 U1=U + L1=Q+1 + U=P-1 + 17 CONTINUE + IF (U1.LE.L1) GO TO 18 +C +C START RECURSIVE CALL +C + NDEEP=NDEEP+1 + POPLST(1,NDEEP)=U + POPLST(2,NDEEP)=L + GO TO 3 + 18 IF (U.GT.L) GO TO 4 +C +C POP BACK UP IN THE RECURSION LIST +C + IF (NDEEP.EQ.0) GO TO 2 + U=POPLST(1,NDEEP) + L=POPLST(2,NDEEP) + NDEEP=NDEEP-1 + GO TO 18 +C +C END SORT +C END QSORT +C + END Added: trunk/scipy/sandbox/dhuard/histogram.py =================================================================== --- trunk/scipy/sandbox/dhuard/histogram.py 2007-09-27 15:49:05 UTC (rev 3376) +++ trunk/scipy/sandbox/dhuard/histogram.py 2007-09-27 18:37:13 UTC (rev 3377) @@ -0,0 +1,272 @@ +import numpy as np +import subprocess + +try: + import flib +except: + print 'Building the flib fortran library.' + subprocess.call('f2py -c histogram.f -m flib', shell=True) + import flib + +def histogram(a, bins=10, range=None, normed=False, weights=None, axis=None, strategy=None): + """histogram(a, bins=10, range=None, normed=False, weights=None, axis=None) + -> H, dict + + Return the distribution of sample. + + :Parameters: + - `a` : Array sample. + - `bins` : Number of bins, or an array of bin edges, in which case the + range is not used. If 'Scott' or 'Freeman' is passed, then + the named method is used to find the optimal number of bins. + - `range` : Lower and upper bin edges, default: [min, max]. + - `normed` :Boolean, if False, return the number of samples in each bin, + if True, return the density. + - `weights` : Sample weights. The weights are normed only if normed is + True. Should weights.sum() not equal len(a), the total bin count + will not be equal to the number of samples. + - `axis` : Specifies the dimension along which the histogram is computed. + Defaults to None, which aggregates the entire sample array. + - `strategy` : Histogramming method (binsize, searchsorted or digitize). + + :Return: + - `H` : The number of samples in each bin. + If normed is True, H is a frequency distribution. + - dict{ 'edges': The bin edges, including the rightmost edge. + 'upper': Upper outliers. + 'lower': Lower outliers. + 'bincenters': Center of bins. + 'strategy': the histogramming method employed.} + + :Examples: + >>> x = random.rand(100,10) + >>> H, D = histogram(x, bins=10, range=[0,1], normed=True) + >>> H2, D = histogram(x, bins=10, range=[0,1], normed=True, axis=0) + + :SeeAlso: histogramnd + """ + weighted = weights is not None + + a = np.asarray(a) + if axis is None: + a = np.atleast_1d(a.ravel()) + if weighted: + weights = np.atleast_1d(weights.ravel()) + axis = 0 + + # Define the range + if range is None: + mn, mx = a.min(), a.max() + if mn == mx: + mn = mn - .5 + mx = mx + .5 + range = [mn, mx] + + # Find the optimal number of bins. + if bins is None or type(bins) == str: + bins = _optimize_binning(a, range, bins) + + # Compute the bin edges if they are not given explicitely. + # For the rightmost bin, we want values equal to the right + # edge to be counted in the last bin, and not as an outlier. + # Hence, we shift the last bin by a tiny amount. + if not np.iterable(bins): + dr = np.diff(range)/bins*1e-10 + edges = np.linspace(range[0], range[1]+dr, bins+1, endpoint=True) + else: + edges = np.asarray(bins, float) + + dedges = np.diff(edges) + bincenters = edges[:-1] + dedges/2. + + # Number of bins + nbin = len(edges)-1 + + # Measure of bin precision. + decimal = int(-np.log10(dedges.min())+10) + + # Choose the fastest histogramming method + even = (len(set(np.around(dedges, decimal))) == 1) + if strategy is None: + if even: + strategy = 'binsize' + else: + if nbin > 30: # approximative threshold + strategy = 'searchsort' + else: + strategy = 'digitize' + else: + if strategy not in ['binsize', 'digitize', 'searchsort']: + raise 'Unknown histogramming strategy.', strategy + if strategy == 'binsize' and not even: + raise 'This binsize strategy cannot be used for uneven bins.' + + # Parameters for the fixed_binsize functions. + start = float(edges[0]) + binwidth = float(dedges[0]) + + # Looping to reduce memory usage + block = 66600 + slices = [slice(None)]*a.ndim + for i in np.arange(0,len(a),block): + slices[axis] = slice(i,i+block) + at = a[slices] + if weighted: + at = np.concatenate((at, weights[slices]), axis) + if strategy == 'binsize': + count = np.apply_along_axis(_splitinmiddle,axis,at, + flib.weighted_fixed_binsize,start,binwidth,nbin) + elif strategy == 'searchsort': + count = np.apply_along_axis(_splitinmiddle,axis,at, \ + _histogram_searchsort_weighted, edges) + elif strategy == 'digitize': + count = np.apply_along_axis(_splitinmiddle,axis,at,\ + _histogram_digitize,edges,normed) + else: + if strategy == 'binsize': + count = np.apply_along_axis(flib.fixed_binsize,axis,at,start,binwidth,nbin) + elif strategy == 'searchsort': + count = np.apply_along_axis(_histogram_searchsort,axis,at,edges) + elif strategy == 'digitize': + count = np.apply_along_axis(_histogram_digitize,axis,at,None,edges, + normed) + + if i == 0: + total = count + else: + total += count + + # Outlier count + upper = total.take(np.array([-1]), axis) + lower = total.take(np.array([0]), axis) + + # Non-outlier count + core = a.ndim*[slice(None)] + core[axis] = slice(1, -1) + hist = total[core] + + if normed: + normalize = lambda x: np.atleast_1d(x/(x*dedges).sum()) + hist = np.apply_along_axis(normalize, axis, hist) + + return hist, {'edges':edges, 'lower':lower, 'upper':upper, \ + 'bincenters':bincenters, 'strategy':strategy} + + + +def _histogram_fixed_binsize(a, start, width, n): + """histogram_even(a, start, width, n) -> histogram + + Return an histogram where the first bin counts the number of lower + outliers and the last bin the number of upper outliers. Works only with + fixed width bins. + + :Parameters: + a : array + Array of samples. + start : float + Left-most bin edge. + width : float + Width of the bins. All bins are considered to have the same width. + n : int + Number of bins. + + :Return: + H : array + Array containing the number of elements in each bin. H[0] is the number + of samples smaller than start and H[-1] the number of samples + greater than start + n*width. + """ + + return flib.fixed_binsize(a, start, width, n) + + +def _histogram_binsize_weighted(a, w, start, width, n): + """histogram_even_weighted(a, start, width, n) -> histogram + + Return an histogram where the first bin counts the number of lower + outliers and the last bin the number of upper outliers. Works only with + fixed width bins. + + :Parameters: + a : array + Array of samples. + w : array + Weights of samples. + start : float + Left-most bin edge. + width : float + Width of the bins. All bins are considered to have the same width. + n : int + Number of bins. + + :Return: + H : array + Array containing the number of elements in each bin. H[0] is the number + of samples smaller than start and H[-1] the number of samples + greater than start + n*width. + """ + return flib.weighted_fixed_binsize(a, w, start, width, n) + +def _histogram_searchsort(a, bins): + n = np.sort(a).searchsorted(bins) + n = np.concatenate([n, [len(a)]]) + count = np.concatenate([[n[0]], n[1:]-n[:-1]]) + return count + +def _histogram_searchsort_weighted(a, w, bins): + i = np.sort(a).searchsorted(bins) + sw = w[np.argsort(a)] + i = np.concatenate([i, [len(a)]]) + n = np.concatenate([[0],sw.cumsum()])[i] + count = np.concatenate([[n[0]], n[1:]-n[:-1]]) + return count + +def _splitinmiddle(x, function, *args, **kwds): + x1,x2 = np.hsplit(x, 2) + return function(x1,x2,*args, **kwds) + +def _histogram_digitize(a, w, edges, normed): + """Internal routine to compute the 1d weighted histogram for uneven bins. + a: sample + w: weights + edges: bin edges + weighted: Means that the weights are appended to array a. + Return the bin count or frequency if normed. + """ + weighted = w is not None + nbin = edges.shape[0]+1 + if weighted: + count = np.zeros(nbin, dtype=w.dtype) + if normed: + count = np.zeros(nbin, dtype=float) + w = w/w.mean() + else: + count = np.zeros(nbin, int) + + binindex = np.digitize(a, edges) + + # Count the number of identical indices. + flatcount = np.bincount(binindex, w) + + # Place the count in the histogram array. + count[:len(flatcount)] = flatcount + + return count + + +def _optimize_binning(x, range, method='Freedman'): + """Find the optimal number of bins. + Available methods : Freedman, Scott + """ + N = x.shape[0] + if method.lower()=='freedman': + s=np.sort(x) + IQR = s[int(N*.75)] - s[int(N*.25)] # Interquantile range (75% -25%) + width = 2* IQR*N**(-1./3) + + elif method.lower()=='scott': + width = 3.49 * x.std()* N**(-1./3) + else: + raise 'Method must be Scott or Freedman', method + return int(np.diff(range)/width) Added: trunk/scipy/sandbox/dhuard/test_histogram.py =================================================================== --- trunk/scipy/sandbox/dhuard/test_histogram.py 2007-09-27 15:49:05 UTC (rev 3376) +++ trunk/scipy/sandbox/dhuard/test_histogram.py 2007-09-27 18:37:13 UTC (rev 3377) @@ -0,0 +1,99 @@ +from numpy.testing import * +from histogram import _histogram_fixed_binsize, _histogram_digitize,\ + _histogram_searchsort, histogram,_optimize_binning +import numpy as np +from numpy.random import rand + +class test_histogram1d_functions(NumpyTestCase): + def check_consistency(self): + n = 100 + r = rand(n)*12-1 + bins = range(11) + a = _histogram_fixed_binsize(r, bins[0], bins[1]-bins[0], len(bins)-1) + b = _histogram_digitize(r, None, np.array(bins), False) + c = _histogram_searchsort(r,bins) + assert_array_equal(a,b) + assert_array_equal(c,b) + +class test_histogram(NumpyTestCase): + def check_simple(self): + n=100 + v=rand(n) + (a,b)=histogram(v) + #check if the sum of the bins equals the number of samples + assert_equal(np.sum(a,axis=0),n) + #check that the bin counts are evenly spaced when the data is from a linear function + (a,b)=histogram(np.linspace(0,10,100)) + assert_array_equal(a,10) + #Check the construction of the bin array + a, b = histogram(v, bins=4, range=[.2,.8]) + assert_array_almost_equal(b['edges'],np.linspace(.2, .8, 5),8) + #Check the number of outliers + assert_equal((v<.2).sum(), b['lower']) + assert_equal((v>.8).sum(),b['upper']) + #Check the normalization + bins = [0,.5,.75,1] + a,b = histogram(v, bins, normed=True) + assert_almost_equal((a*np.diff(bins)).sum(), 1) + + def check_axis(self): + n,m = 100,20 + v = rand(n,m) + a,b = histogram(v, bins=5) + # Check dimension is reduced (axis=None). + assert_equal(a.ndim, 1) + #Check total number of count is equal to the number of samples. + assert_equal(a.sum(), n*m) + a,b = histogram(v, bins = 7, axis=0) + # Check shape of new array is ok. + assert(a.ndim == 2) + assert_array_equal(a.shape,[7, m]) + # Check normalization is consistent + a,b = histogram(v, bins = 7, axis=0, normed=True) + assert_array_almost_equal((a.T*np.diff(b['edges'])).sum(1), np.ones((m)),5) + a,b = histogram(v, bins = 7, axis=1, normed=True) + assert_array_equal(a.shape, [n,7]) + assert_array_almost_equal((a*np.diff(b['edges'])).sum(1), np.ones((n))) + # Check results are consistent with 1d estimate + a1, b1 = histogram(v[0,:], bins=b['edges'], normed=True) + assert_array_almost_equal(a1, a[0,:],7) + + def check_weights(self): + # Check weights = constant gives the same answer as no weights. + v = rand(100) + w = np.ones(100)*5 + a,b = histogram(v) + na,nb = histogram(v, normed=True) + wa,wb = histogram(v, weights=w) + nwa,nwb = histogram(v, weights=w, normed=True) + assert_array_equal(a*5, wa) + assert_array_almost_equal(na, nwa,8) + # Check weights are properly applied. + v = np.linspace(0,10,10) + w = np.concatenate((np.zeros(5), np.ones(5))) + wa,wb = histogram(v, bins=np.linspace(0,10.01, 11),weights=w) + assert_array_almost_equal(wa, w) + + def check_strategies(self): + v = rand(100) + ae,be = histogram(v, strategy='binsize') + ab,bb = histogram(v, strategy='digitize') + as,bs = histogram(v, strategy='searchsort') + assert_array_equal(ae, ab) + assert_array_equal(ae, as) + + w = rand(100) + ae,be = histogram(v, weights=w, strategy='binsize') + ab,bb = histogram(v, weights=w, strategy='digitize') + as,bs = histogram(v, weights=w, strategy='searchsort') + assert_array_almost_equal(ae, ab,8) + assert_array_almost_equal(ae, as,8) + + def check_automatic_binning(self): + v = rand(100) + h,b = histogram(v, 'Scott') + h,b = histogram(v, 'Freedman') + + +if __name__ == "__main__": + NumpyTest().run() From scipy-svn at scipy.org Thu Sep 27 22:43:16 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 27 Sep 2007 21:43:16 -0500 (CDT) Subject: [Scipy-svn] r3378 - in trunk/scipy/sandbox/multigrid: . tests tests/sample_data Message-ID: <20070928024316.C297239C00C@new.scipy.org> Author: wnbell Date: 2007-09-27 21:43:06 -0500 (Thu, 27 Sep 2007) New Revision: 3378 Added: trunk/scipy/sandbox/multigrid/tests/examples.py trunk/scipy/sandbox/multigrid/tests/sample_data/ trunk/scipy/sandbox/multigrid/tests/sample_data/336_triangle_A.mtx.gz trunk/scipy/sandbox/multigrid/tests/sample_data/336_triangle_B.mtx.gz trunk/scipy/sandbox/multigrid/tests/sample_data/rocker_arm_surface.mtx.gz trunk/scipy/sandbox/multigrid/tests/sample_data/torus.mtx.gz trunk/scipy/sandbox/multigrid/tests/test_adaptive.py Modified: trunk/scipy/sandbox/multigrid/adaptive.py trunk/scipy/sandbox/multigrid/coarsen.py trunk/scipy/sandbox/multigrid/multilevel.py trunk/scipy/sandbox/multigrid/relaxation.py trunk/scipy/sandbox/multigrid/tests/test_coarsen.py trunk/scipy/sandbox/multigrid/tests/test_relaxation.py trunk/scipy/sandbox/multigrid/tests/test_utils.py trunk/scipy/sandbox/multigrid/utils.py Log: some improvements to smoothed aggregation continued work towards adaptive SA Modified: trunk/scipy/sandbox/multigrid/adaptive.py =================================================================== --- trunk/scipy/sandbox/multigrid/adaptive.py 2007-09-27 18:37:13 UTC (rev 3377) +++ trunk/scipy/sandbox/multigrid/adaptive.py 2007-09-28 02:43:06 UTC (rev 3378) @@ -1,13 +1,13 @@ import numpy,scipy,scipy.sparse -from numpy import sqrt,ravel,diff,zeros,zeros_like,inner,concatenate +from numpy import sqrt,ravel,diff,zeros,zeros_like,inner,concatenate,asarray from scipy.sparse import csr_matrix,coo_matrix from relaxation import gauss_seidel from multilevel import multilevel_solver from coarsen import sa_constant_interpolation -from utils import infinity_norm +#from utils import infinity_norm +from utils import approximate_spectral_radius - def fit_candidate(I,x): """ For each aggregate in I (i.e. each column of I) compute vector R and @@ -18,9 +18,11 @@ In otherwords, find a prolongator Q with orthonormal columns so that x is represented exactly on the coarser level by R. """ + x = asarray(x) Q = csr_matrix((x.copy(),I.indices,I.indptr),dims=I.shape,check=False) R = sqrt(ravel(csr_matrix((x*x,I.indices,I.indptr),dims=I.shape,check=False).sum(axis=0))) #column 2-norms - Q.data *= (1.0/R)[Q.indices] + + Q.data *= (1.0/R)[Q.indices] #normalize columns of Q #print "norm(R)",scipy.linalg.norm(R) #print "min(R),max(R)",min(R),max(R) @@ -30,6 +32,60 @@ return Q,R +def fit_candidates(AggOp,candidates): + K = len(candidates) + + N_fine,N_coarse = AggOp.shape + + if len(candidates[0]) == K*N_fine: + #see if fine space has been expanded (all levels except for first) + AggOp = csr_matrix((AggOp.data.repeat(K),AggOp.indices.repeat(K),arange(K*N_fine + 1)),dims=(K*N_fine,N_coarse)) + N_fine = K*N_fine + + R = zeros((K*N_coarse,K)) + + candidate_matrices = [] + for i,c in enumerate(candidates): + X = csr_matrix((c.copy(),AggOp.indices,AggOp.indptr),dims=AggOp.shape) + + #TODO optimize this + + #orthogonalize X against previous + for j,A in enumerate(candidate_matrices): + D_AtX = csr_matrix((A.data*X.data,X.indices,X.indptr),dims=X.shape).sum(axis=0).A.flatten() #same as diagonal of A.T * X + R[j::K,i] = D_AtX + X.data -= D_AtX[X.indices] * A.data + + #AtX = csr_matrix(A.T.tocsr() * X + #R[j::K,i] = AtX.data + #X = X - A * AtX + + #normalize X + XtX = X.T.tocsr() * X + col_norms = sqrt(XtX.sum(axis=0)).flatten() + R[i::K,i] = col_norms + col_norms = 1.0/col_norms + col_norms[isinf(col_norms)] = 0 + X.data *= col_norms[X.indices] + + candidate_matrices.append(X) + + + Q_indptr = K*AggOp.indptr + Q_indices = (K*AggOp.indices).repeat(K) + for i in range(K): + Q_indices[i::K] += i + Q_data = empty(N_fine * K) + for i,X in enumerate(candidate_matrices): + Q_data[i::K] = X.data + Q = csr_matrix((Q_data,Q_indices,Q_indptr),dims=(N_fine,K*N_coarse)) + + coarse_candidates = [R[:,i] for i in range(K)] + + return Q,coarse_candidates + + + ##def orthonormalize_candidate(I,x,basis): ## Px = csr_matrix((x,I.indices,I.indptr),dims=I.shape,check=False) ## Rs = [] @@ -110,16 +166,19 @@ def smoothed_prolongator(P,A): #just use Richardson for now - #omega = 4.0/(3.0*infinity_norm(A)) + #omega = 4.0/(3.0*approximate_spectral_radius(A)) #return P - omega*(A*P) - #return P + #return P #TEST + D = diag_sparse(A) D_inv_A = diag_sparse(1.0/D)*A - omega = 4.0/(3.0*infinity_norm(D_inv_A)) + omega = 4.0/(3.0*approximate_spectral_radius(D_inv_A)) + print "spectral radius",approximate_spectral_radius(D_inv_A) D_inv_A *= omega return P - D_inv_A*P + def sa_hierarchy(A,Ws,x): """ Construct multilevel hierarchy using Smoothed Aggregation @@ -138,7 +197,8 @@ Ps = [] for W in Ws: - P,x = fit_candidate(W,x) + #P,x = fit_candidate(W,x) + P,x = fit_candidates(W,x) I = smoothed_prolongator(P,A) A = I.T.tocsr() * A * I As.append(A) @@ -152,59 +212,57 @@ return csr_matrix((I.data,I.indices,ptr),dims=(N,I.shape[1]),check=False) class adaptive_sa_solver: - def __init__(self,A,options=None): + def __init__(self,A,options=None,max_levels=10,max_coarse=100,max_candidates=1,mu=5,epsilon=0.1): self.A = A self.Rs = [] - self.__construct_hierarchy(A) - - def __construct_hierarchy(self,A): + #if self.A.shape[0] <= self.opts['coarse: max size']: # raise ValueError,'small matrices not handled yet' - x,AggOps = self.__initialization_stage(A) #first candidate + x,AggOps = self.__initialization_stage(A,max_levels=max_levels,max_coarse=max_coarse,mu=mu,epsilon=epsilon) #first candidate + Ws = AggOps - #x[:] = 1 #TEST - self.candidates = [x] - #self.candidates = [1.0/D.data] #create SA using x here - As,Is,Ps = sa_hierarchy(A,Ws,x) + As,Is,Ps = sa_hierarchy(A,Ws,self.candidates) - for i in range(0): - x = self.__develop_candidate(A,As,Is,Ps,Ws,AggOps) + for i in range(max_candidates - 1): + x = self.__develop_candidate(A,As,Is,Ps,Ws,AggOps,mu=mu) + + self.candidates.append(x) + #if i == 0: - # x = arange(20).repeat(20).astype(float) + # x = arange(50).repeat(50).astype(float) #elif i == 1: - # x = arange(20).repeat(20).astype(float) - # x = numpy.ravel(transpose(x.reshape((20,20)))) + # x = arange(50).repeat(50).astype(float) + # x = numpy.ravel(transpose(x.reshape((50,50)))) + + #As,Is,Ps,Ws = self.__augment_cycle(A,As,Ps,Ws,AggOps,x) + As,Is,Ps = sa_hierarchy(A,AggOps,self.candidates) + + #random.seed(0) + #solver = multilevel_solver(As,Is) + #x = solver.solve(zeros(A.shape[0]), x0=rand(A.shape[0]), tol=1e-12, maxiter=30) + #self.candidates.append(x) - As,Is,Ps,Ws = self.__augment_cycle(A,As,Ps,Ws,AggOps,x) - - self.candidates.append(x) - self.Ps = Ps self.solver = multilevel_solver(As,Is) self.AggOps = AggOps - def __develop_candidate(self,A,As,Is,Ps,Ws,AggOps): + def __develop_candidate(self,A,As,Is,Ps,Ws,AggOps,mu): + #scipy.random.seed(0) #TEST x = scipy.rand(A.shape[0]) b = zeros_like(x) - - #x = arange(200).repeat(200).astype(float) - #x[:] = 1 #TEST - - mu = 5 - solver = multilevel_solver(As,Is) - for n in range(mu): - x = solver.solve(b, x0=x, tol=1e-8, maxiter=1) + x = solver.solve(b, x0=x, tol=1e-10, maxiter=mu) + #TEST FOR CONVERGENCE HERE A_l,P_l,W_l,x_l = As[0],Ps[0],Ws[0],x @@ -271,18 +329,15 @@ return new_As,new_Is,new_Ps,new_Ws - def __initialization_stage(self,A): - max_levels = 10 - max_coarse = 50 - + def __initialization_stage(self,A,max_levels,max_coarse,mu,epsilon): AggOps = [] Is = [] # aSA parameters - mu = 5 # number of test relaxation iterations - epsilon = 0.1 # minimum acceptable relaxation convergence factor + # mu - number of test relaxation iterations + # epsilon - minimum acceptable relaxation convergence factor - scipy.random.seed(0) + #scipy.random.seed(0) #TEST #step 1 A_l = A @@ -291,15 +346,15 @@ #step 2 b = zeros_like(x) - gauss_seidel(A_l,x,b,iterations=mu) + gauss_seidel(A_l,x,b,iterations=mu,sweep='symmetric') #step 3 #test convergence rate here As = [A] while len(AggOps) + 1 < max_levels and A_l.shape[0] > max_coarse: - W_l = sa_constant_interpolation(A_l,epsilon=0.08*0.5**(len(AggOps)-1)) #step 4b #TEST - #W_l = sa_constant_interpolation(A_l,epsilon=0) #step 4b #TEST + #W_l = sa_constant_interpolation(A_l,epsilon=0.08*0.5**(len(AggOps)-1)) #step 4b #TEST + W_l = sa_constant_interpolation(A_l,epsilon=0) #step 4b P_l,x = fit_candidate(W_l,x) #step 4c I_l = smoothed_prolongator(P_l,A_l) #step 4d A_l = I_l.T.tocsr() * A_l * I_l #step 4e @@ -312,10 +367,10 @@ if not skip_f_to_i: print "." - x_hat = x.copy() #step 4g - gauss_seidel(A_l,x,zeros_like(x),iterations=mu) #step 4h + x_hat = x.copy() #step 4g + gauss_seidel(A_l,x,zeros_like(x),iterations=mu,sweep='symmetric') #step 4h x_A_x = inner(x,A_l*x) - if (x_A_x/inner(x_hat,A_l*x_hat))**(1.0/mu) < epsilon: #step 4i + if (x_A_x/inner(x_hat,A_l*x_hat))**(1.0/mu) < epsilon: #step 4i print "sufficient convergence, skipping" skip_f_to_i = True if x_A_x == 0: @@ -323,7 +378,7 @@ #update fine-level candidate for A_l,I in reversed(zip(As[1:],Is)): - gauss_seidel(A_l,x,zeros_like(x),iterations=mu) #TEST + gauss_seidel(A_l,x,zeros_like(x),iterations=mu,sweep='symmetric') #TEST x = I * x gauss_seidel(A,x,b,iterations=mu) #TEST @@ -336,23 +391,39 @@ from scipy import * from utils import diag_sparse from multilevel import poisson_problem1D,poisson_problem2D -#A = poisson_problem2D(100) -A = io.mmread("tests/sample_data/laplacian_40_3dcube.mtx").tocsr() +A = poisson_problem2D(50) +#A = io.mmread("tests/sample_data/laplacian_41_3dcube.mtx").tocsr() +#A = io.mmread("laplacian_40_3dcube.mtx").tocsr() +#A = io.mmread("/home/nathan/Desktop/9pt/9pt-100x100.mtx").tocsr() +#A = io.mmread("/home/nathan/Desktop/BasisShift_W_EnergyMin_Luke/9pt-5x5.mtx").tocsr() #A = A*A #D = diag_sparse(1.0/sqrt(10**(12*rand(A.shape[0])-6))).tocsr() #A = D * A * D #A = io.mmread("nos2.mtx").tocsr() -asa = adaptive_sa_solver(A) +asa = adaptive_sa_solver(A,max_candidates=1) +#x = arange(A.shape[0]).astype('d') + 1 +scipy.random.seed(0) #TEST x = rand(A.shape[0]) b = zeros_like(x) print "solving" -x_sol,residuals = asa.solver.solve(b,x,tol=1e-12,maxiter=30,return_residuals=True) +#x_sol,residuals = asa.solver.solve(b,x,tol=1e-8,maxiter=30,return_residuals=True) +if True: + x_sol,residuals = asa.solver.solve(b,x0=x,maxiter=10,tol=1e-12,return_residuals=True) +else: + residuals = [] + def add_resid(x): + residuals.append(linalg.norm(b - A*x)) + A.psolve = asa.solver.psolve + x_sol = linalg.cg(A,b,x0=x,maxiter=20,tol=1e-100,callback=add_resid)[0] residuals = array(residuals)/residuals[0] print "residuals ",residuals +print "mean convergence factor",(residuals[-1]/residuals[0])**(1.0/len(residuals)) +print "last convergence factor",residuals[-1]/residuals[-2] +print print asa.solver print "constant Rayleigh quotient",dot(ones(A.shape[0]),A*ones(A.shape[0]))/float(A.shape[0]) Modified: trunk/scipy/sandbox/multigrid/coarsen.py =================================================================== --- trunk/scipy/sandbox/multigrid/coarsen.py 2007-09-27 18:37:13 UTC (rev 3377) +++ trunk/scipy/sandbox/multigrid/coarsen.py 2007-09-28 02:43:06 UTC (rev 3378) @@ -1,24 +1,31 @@ - import multigridtools -import scipy -import numpy - -from utils import diag_sparse,infinity_norm +import scipy,numpy,scipy.sparse +from scipy.sparse import csr_matrix,isspmatrix_csr +from utils import diag_sparse,approximate_spectral_radius + def rs_strong_connections(A,theta): - if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') + """ + Return a strength of connection matrix using the method of Ruge and Stuben + An off-diagonal entry A[i.j] is a strong connection iff + + -A[i,j] >= theta * max( -A[i,k] ) where k != i + """ + if not isspmatrix_csr(A): raise TypeError('expected csr_matrix') + if not isspmatrix_csr(A): raise TypeError('expected csr_matrix') + Sp,Sj,Sx = multigridtools.rs_strong_connections(A.shape[0],theta,A.indptr,A.indices,A.data) return scipy.sparse.csr_matrix((Sx,Sj,Sp),A.shape) def rs_interpolation(A,theta=0.25): - if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') + if not isspmatrix_csr(A): raise TypeError('expected csr_matrix') S = rs_strong_connections(A,theta) - T = S.T.tocsr() + T = S.T.tocsr() #transpose S for efficient column access Ip,Ij,Ix = multigridtools.rs_interpolation(A.shape[0],\ A.indptr,A.indices,A.data,\ @@ -29,44 +36,67 @@ def sa_strong_connections(A,epsilon): - if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') + if not isspmatrix_csr(A): raise TypeError('expected csr_matrix') Sp,Sj,Sx = multigridtools.sa_strong_connections(A.shape[0],epsilon,A.indptr,A.indices,A.data) return scipy.sparse.csr_matrix((Sx,Sj,Sp),A.shape) -def sa_constant_interpolation(A,epsilon): - if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') +def sa_constant_interpolation(A,epsilon,blocks=None): + if not isspmatrix_csr(A): raise TypeError('expected csr_matrix') - S = sa_strong_connections(A,epsilon) + if blocks is not None: + num_dofs = A.shape[0] + num_blocks = blocks.max() + + if num_dofs != len(blocks): + raise ValueError,'improper block specification' + + # for non-scalar problems, use pre-defined blocks in aggregation + # the strength of connection matrix is based on the Frobenius norms of the blocks + + B = csr_matrix((ones(num_dofs),blocks,arange(num_dofs + 1)),dims=(num_dofs,num_blocks)) + Block_Frob = B.T.tocsr() * csr_matrix((A.data**2,A.indices,A.indptr),dims=A.shape) * B #Frobenius norms of blocks entries of A - #S.ensure_sorted_indices() - - #tentative (non-smooth) interpolation operator I - Pj = multigridtools.sa_get_aggregates(S.shape[0],S.indptr,S.indices) - Pp = numpy.arange(len(Pj)+1) - Px = numpy.ones(len(Pj)) + S = sa_strong_connections(Block_Frob,epsilon) + Pj = multigridtools.sa_get_aggregates(S.shape[0],S.indptr,S.indices) + Pj = Pj[blocks] #expand block aggregates into constituent dofs + Pp = B.indptr + Px = B.data + else: + S = sa_strong_connections(A,epsilon) + + Pj = multigridtools.sa_get_aggregates(S.shape[0],S.indptr,S.indices) + Pp = numpy.arange(len(Pj)+1) + Px = numpy.ones(len(Pj)) + return scipy.sparse.csr_matrix((Px,Pj,Pp)) + +## S = sa_strong_connections(A,epsilon) +## +## #tentative (non-smooth) interpolation operator I +## Pj = multigridtools.sa_get_aggregates(S.shape[0],S.indptr,S.indices) +## Pp = numpy.arange(len(Pj)+1) +## Px = numpy.ones(len(Pj)) +## +## return scipy.sparse.csr_matrix((Px,Pj,Pp)) + ##def sa_smoother(A,S,omega): ## Bp,Bj,Bx = multigridtools.sa_smoother(A.shape[0],omega,A.indptr,A.indices,A.data,S.indptr,S.indices,S.data) ## ## return csr_matrix((Bx,Bj,Bp),dims=A.shape) -def sa_interpolation(A,epsilon,omega=4.0/3.0): - if not scipy.sparse.isspmatrix_csr(A): raise TypeError('expected sparse.csr_matrix') +def sa_interpolation(A,epsilon,omega=4.0/3.0,blocks=None): + if not isspmatrix_csr(A): raise TypeError('expected csr_matrix') - P = sa_constant_interpolation(A,epsilon) + P = sa_constant_interpolation(A,epsilon=epsilon,blocks=blocks) -## As = sa_strong_connections(A,epsilon) -## S = sa_smoother(A,S,omega) - - D_inv = diag_sparse(1.0/diag_sparse(A)) D_inv_A = D_inv * A - D_inv_A *= omega/infinity_norm(D_inv_A) + D_inv_A *= omega/approximate_spectral_radius(D_inv_A) I = P - (D_inv_A*P) #same as I=S*P, (faster?) Modified: trunk/scipy/sandbox/multigrid/multilevel.py =================================================================== --- trunk/scipy/sandbox/multigrid/multilevel.py 2007-09-27 18:37:13 UTC (rev 3377) +++ trunk/scipy/sandbox/multigrid/multilevel.py 2007-09-28 02:43:06 UTC (rev 3378) @@ -2,14 +2,13 @@ 'ruge_stuben_solver','smoothed_aggregation_solver', 'multilevel_solver'] - from numpy.linalg import norm from numpy import zeros,zeros_like,array import scipy import numpy from coarsen import sa_interpolation,rs_interpolation -from relaxation import gauss_seidel,jacobi +from relaxation import gauss_seidel,jacobi,sor from utils import infinity_norm @@ -59,7 +58,7 @@ return multilevel_solver(As,Ps) -def smoothed_aggregation_solver(A,max_levels=10,max_coarse=500,epsilon=0.08): +def smoothed_aggregation_solver(A,blocks=None,max_levels=10,max_coarse=500,epsilon=0.08,omega=4.0/3.0): """ Create a multilevel solver using Smoothed Aggregation (SA) @@ -73,7 +72,7 @@ Ps = [] while len(As) < max_levels and A.shape[0] > max_coarse: - P = sa_interpolation(A,epsilon=epsilon*0.5**(len(As)-1)) + P = sa_interpolation(A,blocks=blocks,epsilon=epsilon*0.5**(len(As)-1),omega=omega) #P = sa_interpolation(A,epsilon=0.0) A = (P.T.tocsr() * A) * P #galerkin operator @@ -172,28 +171,42 @@ def presmoother(self,A,x,b): gauss_seidel(A,x,b,iterations=1,sweep="forward") + gauss_seidel(A,x,b,iterations=1,sweep="backward") + #sor(A,x,b,omega=1.85,iterations=1,sweep="backward") + #x += 4.0/(3.0*infinity_norm(A))*(b - A*x) def postsmoother(self,A,x,b): + #sor(A,x,b,omega=1.85,iterations=1,sweep="forward") gauss_seidel(A,x,b,iterations=1,sweep="forward") - #gauss_seidel(A,x,b,iterations=1,sweep="backward") + gauss_seidel(A,x,b,iterations=1,sweep="backward") #x += 4.0/(3.0*infinity_norm(A))*(b - A*x) if __name__ == '__main__': from scipy import * - A = poisson_problem2D(200) + #A = poisson_problem2D(100) #A = io.mmread("rocker_arm_surface.mtx").tocsr() + #A = io.mmread("9pt-100x100.mtx").tocsr() + A = io.mmread("/home/nathan/Desktop/9pt/9pt-100x100.mtx").tocsr() + #A = io.mmread("/home/nathan/Desktop/BasisShift_W_EnergyMin_Luke/9pt-5x5.mtx").tocsr() - ml = smoothed_aggregation_solver(A) + ml = smoothed_aggregation_solver(A,max_coarse=100,max_levels=3) #ml = ruge_stuben_solver(A) x = rand(A.shape[0]) b = zeros_like(x) #b = rand(A.shape[0]) - x_sol,residuals = ml.solve(b,x0=x,maxiter=40,tol=1e-10,return_residuals=True) + if True: + x_sol,residuals = ml.solve(b,x0=x,maxiter=30,tol=1e-12,return_residuals=True) + else: + residuals = [] + def add_resid(x): + residuals.append(linalg.norm(b - A*x)) + A.psolve = ml.psolve + x_sol = linalg.cg(A,b,x0=x,maxiter=12,tol=1e-100,callback=add_resid)[0] residuals = array(residuals)/residuals[0] Modified: trunk/scipy/sandbox/multigrid/relaxation.py =================================================================== --- trunk/scipy/sandbox/multigrid/relaxation.py 2007-09-27 18:37:13 UTC (rev 3377) +++ trunk/scipy/sandbox/multigrid/relaxation.py 2007-09-28 02:43:06 UTC (rev 3378) @@ -1,6 +1,22 @@ import multigridtools -import numpy +from numpy import empty_like + +def sor(A,x,b,omega,iterations=1,sweep='forward'): + """ + Perform SOR iteration on the linear system Ax=b + """ + x_old = empty_like(x) + + for i in range(iterations): + x_old[:] = x + gauss_seidel(A,x,b,iterations=1,sweep=sweep) + + x *= omega + x_old *= (1-omega) + x += x_old + + def gauss_seidel(A,x,b,iterations=1,sweep='forward'): """ Perform Gauss-Seidel iteration on the linear system Ax=b @@ -11,7 +27,8 @@ b - rank 1 ndarray of length N Optional: iterations - number of iterations to perform (default: 1) - sweep - slice of unknowns to relax (default: all in forward direction) + sweep - direction of sweep: + 'forward' (default), 'backward', or 'symmetric' """ if A.shape[0] != A.shape[1]: raise ValueError,'expected symmetric matrix' @@ -21,16 +38,25 @@ if sweep == 'forward': row_start,row_stop,row_step = 0,len(x),1 + for iter in xrange(iterations): + multigridtools.gauss_seidel(A.shape[0], + A.indptr, A.indices, A.data, + x, b, + row_start, row_stop, row_step) elif sweep == 'backward': row_start,row_stop,row_step = len(x)-1,-1,-1 + for iter in xrange(iterations): + multigridtools.gauss_seidel(A.shape[0], + A.indptr, A.indices, A.data, + x, b, + row_start, row_stop, row_step) + elif sweep == 'symmetric': + for iter in xrange(iterations): + gauss_seidel(A,x,b,iterations=1,sweep='forward') + gauss_seidel(A,x,b,iterations=1,sweep='backward') else: - raise ValueError,'valid sweep directions are \'forward\' and \'backward\'' + raise ValueError,'valid sweep directions are \'forward\', \'backward\', and \'symmetric\'' - for iter in xrange(iterations): - multigridtools.gauss_seidel(A.shape[0], - A.indptr, A.indices, A.data, - x, b, - row_start, row_stop, row_step) def jacobi(A,x,b,iterations=1,omega=1.0): """ @@ -54,7 +80,7 @@ if (row_stop - row_start) * row_step <= 0: #no work to do return - temp = numpy.empty_like(x) + temp = empty_like(x) for iter in xrange(iterations): multigridtools.jacobi(A.shape[0], @@ -88,6 +114,8 @@ Note: Horner's Rule is applied to avoid computing A^k directly. """ + #TODO skip first matvec if x is all zero + residual = (b - A*x) h = coeffs[0]*residual Added: trunk/scipy/sandbox/multigrid/tests/examples.py =================================================================== --- trunk/scipy/sandbox/multigrid/tests/examples.py 2007-09-27 18:37:13 UTC (rev 3377) +++ trunk/scipy/sandbox/multigrid/tests/examples.py 2007-09-28 02:43:06 UTC (rev 3378) @@ -0,0 +1,27 @@ +import gzip +from scipy.io import mmread + + +def read_matrix(filename): + filename = "sample_data/" + filename + if filename.endswith(".gz"): + fid = gzip.open(filename) + else: + fid = open(filename) + + return mmread(fid).tocsr() + + +mesh2d_laplacians = ['torus.mtx.gz','rocker_arm_surface.mtx.gz', + '336_triangle_A.mtx.gz','336_triangle_B.mtx.gz'] + + +all_examples = mesh2d_laplacians + +if __name__ == '__main__': + print "All Available Examples Are Listed Below\n" + for filename in all_examples: + print filename + print repr(read_matrix(filename)) + print "\n" + Added: trunk/scipy/sandbox/multigrid/tests/sample_data/336_triangle_A.mtx.gz =================================================================== (Binary files differ) Property changes on: trunk/scipy/sandbox/multigrid/tests/sample_data/336_triangle_A.mtx.gz ___________________________________________________________________ Name: svn:mime-type + application/octet-stream Added: trunk/scipy/sandbox/multigrid/tests/sample_data/336_triangle_B.mtx.gz =================================================================== (Binary files differ) Property changes on: trunk/scipy/sandbox/multigrid/tests/sample_data/336_triangle_B.mtx.gz ___________________________________________________________________ Name: svn:mime-type + application/octet-stream Added: trunk/scipy/sandbox/multigrid/tests/sample_data/rocker_arm_surface.mtx.gz =================================================================== (Binary files differ) Property changes on: trunk/scipy/sandbox/multigrid/tests/sample_data/rocker_arm_surface.mtx.gz ___________________________________________________________________ Name: svn:mime-type + application/octet-stream Added: trunk/scipy/sandbox/multigrid/tests/sample_data/torus.mtx.gz =================================================================== (Binary files differ) Property changes on: trunk/scipy/sandbox/multigrid/tests/sample_data/torus.mtx.gz ___________________________________________________________________ Name: svn:mime-type + application/octet-stream Added: trunk/scipy/sandbox/multigrid/tests/test_adaptive.py =================================================================== --- trunk/scipy/sandbox/multigrid/tests/test_adaptive.py 2007-09-27 18:37:13 UTC (rev 3377) +++ trunk/scipy/sandbox/multigrid/tests/test_adaptive.py 2007-09-28 02:43:06 UTC (rev 3378) @@ -0,0 +1,53 @@ +from numpy.testing import * + +from scipy.sparse import csr_matrix +from scipy import arange,ones,zeros,array,eye + +set_package_path() +from scipy.sandbox.multigrid.adaptive import fit_candidates +restore_path() + + +class test_fit_candidates(NumpyTestCase): + def setUp(self): + self.cases = [] + + #one candidate + self.cases.append((csr_matrix((ones(5),array([0,0,0,1,1]),arange(6)),dims=(5,2)),[ones(5)])) + self.cases.append((csr_matrix((ones(5),array([1,1,0,0,0]),arange(6)),dims=(5,2)),[ones(5)])) + self.cases.append((csr_matrix((ones(9),array([0,0,0,1,1,1,2,2,2]),arange(10)),dims=(9,3)),[ones(9)])) + self.cases.append((csr_matrix((ones(9),array([2,1,0,0,1,2,1,0,2]),arange(10)),dims=(9,3)),[arange(9)])) + + #two candidates + self.cases.append((csr_matrix((ones(4),array([0,0,1,1]),arange(5)),dims=(4,2)),[ones(4),arange(4)])) + self.cases.append((csr_matrix((ones(9),array([0,0,0,1,1,1,2,2,2]),arange(10)),dims=(9,3)),[ones(9),arange(9)])) + self.cases.append((csr_matrix((ones(9),array([0,0,1,1,2,2,3,3,3]),arange(10)),dims=(9,4)),[ones(9),arange(9)])) + + def check_all(self): + for AggOp,fine_candidates in self.cases: + Q,coarse_candidates = fit_candidates(AggOp,fine_candidates) + + assert_equal(len(coarse_candidates),len(fine_candidates)) + assert_almost_equal((Q.T*Q).todense(),eye(Q.shape[1])) + + for fine,coarse in zip(fine_candidates,coarse_candidates): + assert_almost_equal(fine,Q*coarse) + + #aggregate one more level (to a single aggregate) + K = len(coarse_candidates) + N = K*AggOp.shape[1] + AggOp = csr_matrix((ones(N),zeros(N),arange(N + 1)),dims=(N,1)) + fine_candidates = coarse_candidates + + Q,coarse_candidates = fit_candidates(AggOp,fine_candidates) + + assert_equal(len(coarse_candidates),len(fine_candidates)) + assert_almost_equal((Q.T*Q).todense(),eye(Q.shape[1])) + + for fine,coarse in zip(fine_candidates,coarse_candidates): + assert_almost_equal(fine,Q*coarse) + +if __name__ == '__main__': + NumpyTest().run() + + Modified: trunk/scipy/sandbox/multigrid/tests/test_coarsen.py =================================================================== --- trunk/scipy/sandbox/multigrid/tests/test_coarsen.py 2007-09-27 18:37:13 UTC (rev 3377) +++ trunk/scipy/sandbox/multigrid/tests/test_coarsen.py 2007-09-28 02:43:06 UTC (rev 3378) @@ -6,9 +6,9 @@ import numpy set_package_path() -import scipy.multigrid -from scipy.multigrid.coarsen import sa_strong_connections,sa_constant_interpolation -from scipy.multigrid.multilevel import poisson_problem1D,poisson_problem2D +import scipy.sandbox.multigrid +from scipy.sandbox.multigrid.coarsen import sa_strong_connections,sa_constant_interpolation +from scipy.sandbox.multigrid.multilevel import poisson_problem1D,poisson_problem2D restore_path() @@ -39,7 +39,6 @@ aggregates = empty(n,dtype=A.indices.dtype) aggregates[:] = -1 - # Pass #1 for i,row in enumerate(S): Ni = set(row) | set([i]) @@ -120,17 +119,10 @@ S_expected = reference_sa_strong_connections(A,epsilon) assert_array_equal(S_result.todense(),S_expected.todense()) -## def check_sample_data(self): -## for filename in all_matrices: -## A = open_matrix(filename) - -S_result = None -S_expected = None class test_sa_constant_interpolation(NumpyTestCase): def check_random(self): numpy.random.seed(0) - for N in [2,3,5,10]: A = csr_matrix(rand(N,N)) for epsilon in [0.0,0.1,0.5,0.8,1.0]: @@ -154,7 +146,16 @@ S_expected = reference_sa_constant_interpolation(A,epsilon) assert_array_equal(S_result.todense(),S_expected.todense()) + def check_sample_data(self): + from examples import all_examples,read_matrix + for filename in all_examples: + A = read_matrix(filename) + for epsilon in [0.0,0.08,0.51,1.0]: + S_result = sa_constant_interpolation(A,epsilon) + S_expected = reference_sa_constant_interpolation(A,epsilon) + assert_array_equal((S_result - S_expected).nnz,0) + if __name__ == '__main__': NumpyTest().run() Modified: trunk/scipy/sandbox/multigrid/tests/test_relaxation.py =================================================================== --- trunk/scipy/sandbox/multigrid/tests/test_relaxation.py 2007-09-27 18:37:13 UTC (rev 3377) +++ trunk/scipy/sandbox/multigrid/tests/test_relaxation.py 2007-09-28 02:43:06 UTC (rev 3378) @@ -7,8 +7,8 @@ set_package_path() -import scipy.multigrid -from scipy.multigrid.relaxation import polynomial_smoother,gauss_seidel,jacobi +import scipy.sandbox.multigrid +from scipy.sandbox.multigrid.relaxation import polynomial_smoother,gauss_seidel,jacobi restore_path() Modified: trunk/scipy/sandbox/multigrid/tests/test_utils.py =================================================================== --- trunk/scipy/sandbox/multigrid/tests/test_utils.py 2007-09-27 18:37:13 UTC (rev 3377) +++ trunk/scipy/sandbox/multigrid/tests/test_utils.py 2007-09-28 02:43:06 UTC (rev 3378) @@ -7,7 +7,7 @@ set_package_path() -from scipy.multigrid.utils import infinity_norm,diag_sparse +from scipy.sandbox.multigrid.utils import infinity_norm,diag_sparse restore_path() Modified: trunk/scipy/sandbox/multigrid/utils.py =================================================================== --- trunk/scipy/sandbox/multigrid/utils.py 2007-09-27 18:37:13 UTC (rev 3377) +++ trunk/scipy/sandbox/multigrid/utils.py 2007-09-28 02:43:06 UTC (rev 3378) @@ -1,11 +1,20 @@ -__all__ =['inf_norm','diag_sparse'] +__all__ =['approximate_spectral_radius','infinity_norm','diag_sparse'] -import numpy,scipy,scipy.sparse,scipy.weave +import numpy,scipy,scipy.sparse from numpy import ravel,arange from scipy.sparse import isspmatrix,isspmatrix_csr,isspmatrix_csc, \ csr_matrix,csc_matrix,extract_diagonal +def approximate_spectral_radius(A,tol=0.1,maxiter=20): + """ + Approximate the spectral radius of a symmetric matrix using ARPACK + """ + from scipy.sandbox.arpack import eigen_symmetric + return eigen_symmetric(A, k=1, ncv=10, which='LM', maxiter=maxiter, tol=tol, return_eigenvectors=False)[0] + + + def infinity_norm(A): """ Infinity norm of a sparse matrix (maximum absolute row sum). This serves From scipy-svn at scipy.org Fri Sep 28 15:41:29 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 28 Sep 2007 14:41:29 -0500 (CDT) Subject: [Scipy-svn] r3379 - trunk/scipy/optimize Message-ID: <20070928194129.E011039C092@new.scipy.org> Author: dmitrey.kroshko Date: 2007-09-28 14:41:17 -0500 (Fri, 28 Sep 2007) New Revision: 3379 Modified: trunk/scipy/optimize/minpack.py Log: updates in fsolve docstring (possibility to call from OpenOpt) Modified: trunk/scipy/optimize/minpack.py =================================================================== --- trunk/scipy/optimize/minpack.py 2007-09-28 02:43:06 UTC (rev 3378) +++ trunk/scipy/optimize/minpack.py 2007-09-28 19:41:17 UTC (rev 3379) @@ -88,6 +88,8 @@ See also: + scikits.openopt, which offers a unified syntax to call this and other solvers + fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers leastsq -- nonlinear least squares minimizer From scipy-svn at scipy.org Fri Sep 28 16:58:08 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 28 Sep 2007 15:58:08 -0500 (CDT) Subject: [Scipy-svn] r3380 - in trunk/scipy/sandbox/multigrid: . tests Message-ID: <20070928205808.68AA239C092@new.scipy.org> Author: wnbell Date: 2007-09-28 15:58:04 -0500 (Fri, 28 Sep 2007) New Revision: 3380 Modified: trunk/scipy/sandbox/multigrid/setup.py trunk/scipy/sandbox/multigrid/tests/test_coarsen.py Log: disabled code that uses examples Modified: trunk/scipy/sandbox/multigrid/setup.py =================================================================== --- trunk/scipy/sandbox/multigrid/setup.py 2007-09-28 19:41:17 UTC (rev 3379) +++ trunk/scipy/sandbox/multigrid/setup.py 2007-09-28 20:58:04 UTC (rev 3380) @@ -10,6 +10,7 @@ config = Configuration('multigrid',parent_package,top_path) config.add_data_dir('tests') + config.add_data_dir(join('tests','sample_data')) # Adding a Python file as a "source" file for an extension is something of # a hack, but it works to put it in the right place. Modified: trunk/scipy/sandbox/multigrid/tests/test_coarsen.py =================================================================== --- trunk/scipy/sandbox/multigrid/tests/test_coarsen.py 2007-09-28 19:41:17 UTC (rev 3379) +++ trunk/scipy/sandbox/multigrid/tests/test_coarsen.py 2007-09-28 20:58:04 UTC (rev 3380) @@ -146,16 +146,16 @@ S_expected = reference_sa_constant_interpolation(A,epsilon) assert_array_equal(S_result.todense(),S_expected.todense()) - def check_sample_data(self): - from examples import all_examples,read_matrix +## def check_sample_data(self): +## from examples import all_examples,read_matrix +## +## for filename in all_examples: +## A = read_matrix(filename) +## for epsilon in [0.0,0.08,0.51,1.0]: +## S_result = sa_constant_interpolation(A,epsilon) +## S_expected = reference_sa_constant_interpolation(A,epsilon) +## assert_array_equal((S_result - S_expected).nnz,0) - for filename in all_examples: - A = read_matrix(filename) - for epsilon in [0.0,0.08,0.51,1.0]: - S_result = sa_constant_interpolation(A,epsilon) - S_expected = reference_sa_constant_interpolation(A,epsilon) - assert_array_equal((S_result - S_expected).nnz,0) - if __name__ == '__main__': NumpyTest().run() From scipy-svn at scipy.org Fri Sep 28 19:01:25 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 28 Sep 2007 18:01:25 -0500 (CDT) Subject: [Scipy-svn] r3381 - trunk/scipy/io Message-ID: <20070928230125.0944239C06B@new.scipy.org> Author: chris.burns Date: 2007-09-28 18:01:20 -0500 (Fri, 28 Sep 2007) New Revision: 3381 Removed: trunk/scipy/io/nifti/ Log: Remove nifti until licensing is resolved. From scipy-svn at scipy.org Fri Sep 28 20:24:55 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 28 Sep 2007 19:24:55 -0500 (CDT) Subject: [Scipy-svn] r3382 - trunk/scipy/io Message-ID: <20070929002455.8463739C0F9@new.scipy.org> Author: chris.burns Date: 2007-09-28 19:24:52 -0500 (Fri, 28 Sep 2007) New Revision: 3382 Modified: trunk/scipy/io/datasource.py Log: Code cleanup and documentation. Modified: trunk/scipy/io/datasource.py =================================================================== --- trunk/scipy/io/datasource.py 2007-09-28 23:01:20 UTC (rev 3381) +++ trunk/scipy/io/datasource.py 2007-09-29 00:24:52 UTC (rev 3382) @@ -17,46 +17,84 @@ file_openers = {".gz":gzip.open, ".bz2":bz2.BZ2File, None:file} def iszip(filename): - """ Is this filename a zip file. + """Is filename a zip file. - :Returns: ``bool`` + *Parameters*: + + filename : {string} + Filename to test. + + *Returns*: + + bool + Results of test. + """ - _, ext = path(filename).splitext() + + _tmp, ext = path(filename).splitext() return ext in zipexts def unzip(filename): - """ Unzip the given file into another file. Return the new file's name. + """Unzip filename into another file. - :Returns: ``string`` + *Parameters*: + + filename : {string} + Filename to unzip. + + *Returns*: + + string + Name of the unzipped file. + """ + if not iszip(filename): raise ValueError("file %s is not zipped"%filename) unzip_name, zipext = splitzipext(filename) opener = file_openers[zipext] - outfile = file(unzip_name,'w') + outfile = file(unzip_name, 'w') outfile.write(opener(filename).read()) outfile.close() return unzip_name def iswritemode(mode): - """ Test if the given mode will open a file for writing. + """Test if the given mode will open a file for writing. - :Parameters: - `mode` : string - The mode to be checked + *Parameters*: - :Returns: ``bool`` + mode : {string} + The mode to be checked. + + *Returns*: + + bool + Result of test. + """ + return mode.find("w")>-1 or mode.find("+")>-1 +def splitzipext(filename): + """Return a tuple containing the filename and the zip extension separated. -def splitzipext(filename): + If the filename does not have a zip extension then: + base -> filename + zip_ext -> None + + *Parameters*: + + filename : {string} + Filename to split. + + *Returns*: + + base, zip_ext : {tuple} + Tuple containing the base file... + """ - return (base, zip_extension) from filename. - If filename does not have a zip extention then - base = filename and zip_extension = None - """ + if iszip(filename): return path(filename).splitext() else: @@ -64,7 +102,6 @@ - def isurl(pathstr): """ Check whether a given string can be parsed as a URL. @@ -97,24 +134,27 @@ class Cache (object): + """A file cache. + + The path of the cache can be specified or else use ~/.scipy/cache + by default. + + """ - A file cache. The path of the cache can be specified - or else use ~/.nipy/cache by default. - """ def __init__(self, cachepath=None): if cachepath is not None: self.path = path(cachepath) elif os.name == 'posix': - self.path = path(os.environ["HOME"]).joinpath(".nipy","cache") + self.path = path(os.environ["HOME"]).joinpath(".scipy","cache") elif os.name == 'nt': - self.path = path(os.environ["HOMEPATH"]).joinpath(".nipy","cache") + self.path = path(os.environ["HOMEPATH"]).joinpath(".scipy","cache") if not self.path.exists(): ensuredirs(self.path) def tempfile(self, suffix='', prefix=''): """ Return an temporary file name in the cache""" - _, fname = mkstemp(suffix, prefix, self.path) + _tmp, fname = mkstemp(suffix, prefix, self.path) return fname def filepath(self, uri): @@ -153,8 +193,8 @@ :Returns: ``None`` """ - for file in self.path.files(): - file.rm() + for _file in self.path.files(): + _file.rm() def iscached(self, uri): """ Check if a file exists in the cache. @@ -176,12 +216,19 @@ class DataSource (object): + """A generic data source class. + Data could be from a file, URL, cached file. + + TODO: Improve DataSource docstring + + """ + def __init__(self, cachepath=os.curdir): self._cache = Cache(cachepath) def tempfile(self, suffix='', prefix=''): - ''' Return an temporary file name in the cache''' + """Return an temporary file name in the cache.""" return self._cache.tempfile(suffix, prefix) def _possible_names(self, filename): @@ -234,7 +281,13 @@ class Repository (DataSource): - """DataSource with an implied root.""" + """Multiple DataSource's that share one base url. + + TODO: Improve Repository docstring. + + """ + + #"""DataSource with an implied root.""" def __init__(self, baseurl, cachepath=None): DataSource.__init__(self, cachepath=cachepath) self._baseurl = baseurl From scipy-svn at scipy.org Sat Sep 29 15:23:48 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 29 Sep 2007 14:23:48 -0500 (CDT) Subject: [Scipy-svn] r3383 - in trunk/scipy/sandbox/timeseries: . lib Message-ID: <20070929192348.5D76939C060@new.scipy.org> Author: mattknox_ca Date: 2007-09-29 14:23:41 -0500 (Sat, 29 Sep 2007) New Revision: 3383 Modified: trunk/scipy/sandbox/timeseries/lib/interpolate.py trunk/scipy/sandbox/timeseries/lib/moving_funcs.py trunk/scipy/sandbox/timeseries/tseries.py Log: - removed copy_attributes method (used _update_from from MaskedArray instead) - minor code and documentation clean up Modified: trunk/scipy/sandbox/timeseries/lib/interpolate.py =================================================================== --- trunk/scipy/sandbox/timeseries/lib/interpolate.py 2007-09-29 00:24:52 UTC (rev 3382) +++ trunk/scipy/sandbox/timeseries/lib/interpolate.py 2007-09-29 19:23:41 UTC (rev 3383) @@ -10,7 +10,6 @@ __revision__ = "$Revision$" __date__ = '$Date$' - import numpy.core.numeric as numeric from scipy.interpolate import fitpack @@ -20,14 +19,10 @@ from maskedarray.extras import flatnotmasked_edges marray = MA.array - __all__ = [ 'forward_fill', 'backward_fill', 'interp_masked1d', ] - - - #####--------------------------------------------------------------------------- #---- --- Functions for filling in masked values in a masked array --- #####--------------------------------------------------------------------------- @@ -35,8 +30,9 @@ """forward_fill(marr, maxgap=None) Forward fills masked values in a 1-d array when there are less maxgap -consecutive masked values. If maxgap is None, then forward fill all -masked values.""" +consecutive masked values. If maxgap is None, then forward fill all masked +values. +""" # Initialization .................. if numeric.ndim(marr) > 1: raise ValueError,"The input array should be 1D only!" @@ -63,17 +59,16 @@ marr._data[i] = marr._data[i-1] marr._mask[i] = False return marr -#.......................................................... +#............................................................................. def backward_fill(marr, maxgap=None): """backward_fill(marr, maxgap=None) Backward fills masked values in a 1-d array when there are less than maxgap consecutive masked values. If maxgap is None, then backward fills all masked values. - """ +""" return forward_fill(marr[::-1], maxgap=maxgap)[::-1] - -#.......................................................... +#............................................................................. def interp_masked1d(marr, kind='linear'): """interp_masked1d(marr, king='linear') @@ -112,4 +107,3 @@ (maskedIndices < last_unmasked)] marr[interpIndices] = fitpack.splev(interpIndices, tck).astype(marr.dtype) return marr - Modified: trunk/scipy/sandbox/timeseries/lib/moving_funcs.py =================================================================== --- trunk/scipy/sandbox/timeseries/lib/moving_funcs.py 2007-09-29 00:24:52 UTC (rev 3382) +++ trunk/scipy/sandbox/timeseries/lib/moving_funcs.py 2007-09-29 19:23:41 UTC (rev 3383) @@ -34,12 +34,11 @@ "process the results from the c function" rarray = result_dict['array'] - rtype = result_dict['array'].dtype rmask = result_dict['mask'] # makes a copy of the appropriate type - data = orig_data.astype(rtype).copy() - data.flat = result_dict['array'].ravel() + data = orig_data.astype(rarray.dtype).copy() + data.flat = rarray.ravel() if not hasattr(data, '__setmask__'): data = data.view(MA.MaskedArray) data.__setmask__(rmask) @@ -77,7 +76,7 @@ def mov_sum(data, span, dtype=None): """Calculates the moving sum of a series. -:Parameters: +*Parameters*: $$data$$ $$span$$ $$dtype$$""" @@ -91,7 +90,7 @@ def mov_median(data, span, dtype=None): """Calculates the moving median of a series. -:Parameters: +*Parameters*: $$data$$ $$span$$ $$dtype$$""" @@ -105,7 +104,7 @@ def mov_average(data, span, dtype=None): """Calculates the moving average of a series. -:Parameters: +*Parameters*: $$data$$ $$span$$ $$dtype$$""" @@ -131,7 +130,7 @@ def mov_var(data, span, bias=False, dtype=None): """Calculates the moving variance of a 1-D array. -:Parameters: +*Parameters*: $$data$$ $$span$$ $$bias$$ @@ -143,7 +142,7 @@ def mov_stddev(data, span, bias=False, dtype=None): """Calculates the moving standard deviation of a 1-D array. -:Parameters: +*Parameters*: $$data$$ $$span$$ $$bias$$ @@ -155,7 +154,7 @@ def mov_covar(x, y, span, bias=False, dtype=None): """Calculates the moving covariance of two 1-D arrays. -:Parameters: +*Parameters*: $$x$$ $$y$$ $$span$$ @@ -173,7 +172,7 @@ def mov_corr(x, y, span, dtype=None): """Calculates the moving correlation of two 1-D arrays. -:Parameters: +*Parameters*: $$x$$ $$y$$ $$span$$ @@ -188,15 +187,16 @@ def mov_average_expw(data, span, tol=1e-6): """Calculates the exponentially weighted moving average of a series. -:Parameters: +*Parameters*: $$data$$ span : int Time periods. The smoothing factor is 2/(span + 1) tol : float, *[1e-6]* Tolerance for the definition of the mask. When data contains masked - values, this parameter determinea what points in the result should be masked. - Values in the result that would not be "significantly" impacted (as - determined by this parameter) by the masked values are left unmasked.""" + values, this parameter determinea what points in the result shoud be + masked. Values in the result that would not be "significantly" + impacted (as determined by this parameter) by the masked values are + left unmasked.""" data = marray(data, copy=True, subok=True) ismasked = (data._mask is not nomask) @@ -215,37 +215,38 @@ data._mask[0] = True # return data -#............................................................................... +#............................................................................. def cmov_window(data, span, window_type): - """Applies a centered moving window of type window_type and size span on the -data. + """Applies a centered moving window of type window_type and size span on +the data. Returns a (subclass of) MaskedArray. The k first and k last data are always masked (with k=span//2). When data has a missing value at position i, the result has missing values in the interval [i-k:i+k+1]. -:Parameters: - data : ndarray - Data to process. The array should be at most 2D. On 2D arrays, the window - is applied recursively on each column. - span : integer +*Parameters*: + data : {ndarray} + Data to process. The array should be at most 2D. On 2D arrays, the + window is applied recursively on each column. + span : {int} The width of the window. - window_type : string/tuple/float + window_type : {string/tuple/float} Window type (see Notes) -Notes ------ +*Notes*: -The recognized window types are: boxcar, triang, blackman, hamming, hanning, -bartlett, parzen, bohman, blackmanharris, nuttall, barthann, kaiser (needs beta), -gaussian (needs std), general_gaussian (needs power, width), slepian (needs width). -If the window requires parameters, the window_type argument should be a tuple -with the first argument the string name of the window, and the next arguments -the needed parameters. If window_type is a floating point number, it is interpreted -as the beta parameter of the kaiser window. + The recognized window types are: boxcar, triang, blackman, hamming, + hanning, bartlett, parzen, bohman, blackmanharris, nuttall, barthann, + kaiser (needs beta), gaussian (needs std), general_gaussian (needs power, + width), slepian (needs width). If the window requires parameters, the + window_type argument should be a tuple with the first argument the string + name of the window, and the next arguments the needed parameters. If + window_type is a floating point number, it is interpreted as the beta + parameter of the kaiser window. -Note also that only boxcar has been thoroughly tested.""" + Note also that only boxcar has been thoroughly tested. +""" data = marray(data, copy=True, subok=True) if data._mask is nomask: @@ -269,57 +270,60 @@ def cmov_average(data, span): """Computes the centered moving average of size span on the data. - Returns a (subclass of) MaskedArray. The k first and k last data are always - masked (with k=span//2). When data has a missing value at position i, - the result has missing values in the interval [i-k:i+k+1]. - -:Parameters: - data : ndarray - Data to process. The array should be at most 2D. On 2D arrays, the window - is applied recursively on each column. - span : integer - The width of the window.""" +*Parameters*: + data : {ndarray} + Data to process. The array should be at most 2D. On 2D arrays, the + window is applied recursively on each column. + span : {int} + The width of the window. + +*Returns*: + A (subclass of) MaskedArray. The k first and k last data are always masked + (with k=span//2). When data has a missing value at position i, the result + has missing values in the interval [i-k:i+k+1]. +""" return cmov_window(data, span, 'boxcar') cmov_mean = cmov_average param_doc = {} param_doc['data'] = \ -"""data : ndarray +"""data : {ndarray} Data must be an ndarray (or subclass). In particular, note that TimeSeries objects are valid here.""" param_doc['x'] = \ -"""x : ndarray +"""x : {ndarray} First array to be included in the calculation. x must be an ndarray (or subclass). In particular, note that TimeSeries objects are valid here.""" param_doc['y'] = \ -"""y : ndarray +"""y : {ndarray} Second array to be included in the calculation. y must be an ndarray (or subclass). In particular, note that TimeSeries objects are valid here.""" param_doc['span'] = \ -"""span : int +"""span : {int } Time periods to use for each calculation.""" param_doc['bias'] = \ -"""bias : boolean (*False*) +"""bias : {False, True}, optional If False, Normalization is by (N-1) where N == span (unbiased estimate). If True then normalization is by N.""" param_doc['dtype'] = \ -"""dtype : numpy data type specification (*None*) +"""dtype : {numpy data type specification}, optional dtype for the result""" mov_result_doc = \ """ -:Return value: +*Returns*: The result is always a masked array (preserves subclass attributes). The - result at index i uses values from [i-span:i+1], and will be masked for the - first `span` values. The result will also be masked at i if any of the - input values in the slice [i-span:i+1] are masked.""" + result at index i uses values from [i-span:i+1], and will be masked for + the first `span` values. The result will also be masked at i if any of the + input values in the slice [i-span:i+1] are masked. +""" _g = globals() Modified: trunk/scipy/sandbox/timeseries/tseries.py =================================================================== --- trunk/scipy/sandbox/timeseries/tseries.py 2007-09-29 00:24:52 UTC (rev 3382) +++ trunk/scipy/sandbox/timeseries/tseries.py 2007-09-29 19:23:41 UTC (rev 3383) @@ -19,8 +19,7 @@ import numpy from numpy import ndarray from numpy import bool_, complex_, float_, int_, object_ -from numpy.core.multiarray import dtype -import numpy.core.fromnumeric as fromnumeric +from numpy import dtype import numpy.core.numeric as numeric import numpy.core.umath as umath from numpy.core.records import recarray @@ -74,7 +73,7 @@ marray must be 1 dimensional. *Returns*: - val : {marray.dtype} + val : {singleton of type marray.dtype} first unmasked value in marray. If all values in marray are masked, the function returns the maskedarray.masked constant """ @@ -88,7 +87,7 @@ marray must be 1 dimensional. *Returns*: - val : {marray.dtype} + val : {singleton of type marray.dtype} last unmasked value in marray. If all values in marray are masked, the function returns the maskedarray.masked constant """ @@ -156,7 +155,6 @@ return False return True - def _timeseriescompat_multiple(*series): """Checks the date compatibility of multiple TimeSeries objects. Returns True if everything's fine, or raises an exception. Unlike @@ -173,26 +171,31 @@ if len(set(start_dates)) > 1: errItems = tuple(set(start_dates)) - raise TimeSeriesCompatibilityError('start_dates', errItems[0], errItems[1]) + raise TimeSeriesCompatibilityError('start_dates', + errItems[0], errItems[1]) - if max(steps) == True: bad_index = [x for x, val in enumerate(steps) if val][0] raise TimeSeriesCompatibilityError('time_steps', - series[0]._dates.get_steps(), series[bad_index]._dates.get_steps()) + series[0]._dates.get_steps(), + series[bad_index]._dates.get_steps()) if len(set(shapes)) > 1: errItems = tuple(set(shapes)) - raise TimeSeriesCompatibilityError('size', "1: %s" % str(errItems[0].shape), - "2: %s" % str(errItems[1].shape)) + raise TimeSeriesCompatibilityError('size', + "1: %s" % str(errItems[0].shape), + "2: %s" % str(errItems[1].shape)) return True +def _datadatescompat(data, dates): + """Checks the compatibility of dates and data at the creation of a +TimeSeries. -def _datadatescompat(data, dates): - """Checks the compatibility of dates and data at the creation of a TimeSeries. - Returns True if everything's fine, raises an exception otherwise.""" - # If there's only 1 element, the date is a Date object, which has no size... +Returns True if everything's fine, raises an exception otherwise. +""" + # If there's only 1 element, the date is a Date object, which has no + # size... tsize = numeric.size(dates) dsize = data.size # Only one data @@ -216,14 +219,17 @@ def _compare_frequencies(*series): """Compares the frequencies of a sequence of series. - Returns the common frequency, or raises an exception if series have different - frequencies.""" + +Returns the common frequency, or raises an exception if series have different +frequencies. +""" unique_freqs = numpy.unique([x.freqstr for x in series]) try: common_freq = unique_freqs.item() except ValueError: raise TimeSeriesError, \ - "All series must have same frequency! (got %s instead)" % unique_freqs + "All series must have same frequency! (got %s instead)" % \ + unique_freqs return common_freq ##### ------------------------------------------------------------------------ @@ -231,10 +237,10 @@ ##### ------------------------------------------------------------------------ class _tsmathmethod(object): """Defines a wrapper for arithmetic array methods (add, mul...). -When called, returns a new TimeSeries object, with the new series the result of -the method applied on the original series. -The `_dates` part remains unchanged. - """ +When called, returns a new TimeSeries object, with the new series the result +of the method applied on the original series. The `_dates` part remains +unchanged. +""" def __init__ (self, methodname): self._name = methodname @@ -287,11 +293,11 @@ result._dates = getattr(instance._dates, _name)(*args) else: result._dates = instance._dates - result.copy_attributes(instance) return result class _tsaxismethod(object): """Defines a wrapper for array methods working on an axis (mean...). + When called, returns a ndarray, as the result of the method applied on the series. """ @@ -347,7 +353,6 @@ it is typically recommended to use the `time_series` function for construction as it allows greater flexibility and convenience. """ - _genattributes = ['fill_value'] def __new__(cls, data, dates, mask=nomask, dtype=None, copy=False, fill_value=None, subok=True, keep_mask=True, small_mask=True, hard_mask=False, **options): @@ -405,7 +410,8 @@ if isinstance(indx, int): return (indx, indx) elif isinstance(indx, str): - indx = self._dates.date_to_index(Date(self._dates.freq, string=indx)) + indx = self._dates.date_to_index( + Date(self._dates.freq, string=indx)) return (indx, indx) elif isDate(indx): indx = self._dates.date_to_index(indx) @@ -432,7 +438,8 @@ elif isTimeSeries(indx): indx = indx._series if getmask(indx) is not nomask: - msg = "Masked arrays must be filled before they can be used as indices!" + msg = "Masked arrays must be filled before they can be used " + \ + "as indices!" raise IndexError, msg return (indx,indx) @@ -680,8 +687,9 @@ relation : {'AFTER', 'BEFORE'} , optional *Returns*: - a new TimeSeries (data copied) with the .dates DateArray at the specified - frequency (the .asfreq method of the .dates property will be called) + a new TimeSeries with the .dates DateArray at the specified frequency (the + .asfreq method of the .dates property will be called). The data in the + resulting series will be a VIEW of the original series. *Notes*: The parameters are the exact same as for DateArray.asfreq , please see the @@ -691,18 +699,23 @@ if freq is None: return self return TimeSeries(self._series, - dates=self._dates.asfreq(freq, relation=relation), - copy=True) + dates=self._dates.asfreq(freq, relation=relation)) #..................................................... def transpose(self, *axes): - """ a.transpose(*axes) + """Returns a view of the series with axes transposed - Returns a view of 'a' with axes transposed. If no axes are given, - or None is passed, switches the order of the axes. For a 2-d - array, this is the usual matrix transpose. If axes are given, - they describe how the axes are permuted. +*Parameters*: + *axes : {integers} + the axes to swap - """ +*Returns*: + a VIEW of the series with axes for both the data and dates transposed + +*Notes*: + If no axes are given, the order of the axes are switches. For a 2-d array, + this is the usual matrix transpose. If axes are given, they describe how + the axes are permuted. +""" if self._dates.size == self.size: result = super(TimeSeries, self).transpose(*axes) result._dates = self._dates.transpose(*axes) @@ -716,7 +729,7 @@ return result def split(self): - """Split a multiple series into individual columns.""" + """Split a multi-dimensional series into individual columns.""" if self.ndim == 1: return [self] else: @@ -727,28 +740,18 @@ **_attrib_dict(self)) for a in arr] def filled(self, fill_value=None): - """Returns an array of the same class as `_data`, - with masked values filled with `fill_value`. -Subclassing is preserved. + """Returns an array of the same class as `_data`, with masked values +filled with `fill_value`. Subclassing is preserved. -If `fill_value` is None, uses self.fill_value. - """ +*Parameters*: + fill_value : {None, singleton of type self.dtype}, optional + The value to fill in masked values with. If `fill_value` is None, uses + self.fill_value. +""" result = self._series.filled(fill_value=fill_value).view(type(self)) result._dates = self._dates - result.copy_attributes(self) return result - #...................................................... - def copy_attributes(self, oldseries, exclude=[]): - "Copies the attributes from oldseries if they are not in the exclude list." - attrlist = type(self)._genattributes - if not isinstance(oldseries, TimeSeries): - msg = "Series should be a valid TimeSeries object! (got <%s> instead)" - raise TimeSeriesError, msg % type(oldseries) - for attr in attrlist: - if not attr in exclude: - setattr(self, attr, getattr(oldseries, attr)) - #...................................................... # Pickling def __getstate__(self): "Returns the internal state of the TimeSeries, for pickling purposes." @@ -855,7 +858,7 @@ if hasattr(method, '__call__'): return method.__call__(*args, **params) return method - method = getattr(fromnumeric.asarray(caller), self._methodname) + method = getattr(numpy.asarray(caller), self._methodname) try: return method(*args, **params) except SystemError: @@ -1168,7 +1171,7 @@ start_date = max(start_date, dstart) end_date = min(end_date, dend) + 1 newseries[start_date:end_date] = a[start_date:end_date] - newseries.copy_attributes(a) + newseries._update_from(a) return newseries #..................................................... def align_series(*series, **kwargs): @@ -1270,7 +1273,7 @@ newseries = tempData.view(type(series)) newseries._dates = date_array(start_date=start_date, length=len(newseries), freq=toFreq) - newseries.copy_attributes(series) + newseries._update_from(series) return newseries def convert(series, freq, func=None, position='END', *args, **kwargs): @@ -1362,7 +1365,7 @@ newdata = inidata newseries = newdata.view(type(series)) newseries._dates = series._dates - newseries.copy_attributes(series) + newseries._update_from(series) return newseries TimeSeries.tshift = tshift #............................................................................... @@ -1392,7 +1395,7 @@ newdata[nper:] = 100*(series._series[nper:]/series._series[:-nper] - 1) newseries = newdata.view(type(series)) newseries._dates = series._dates - newseries.copy_attributes(series) + newseries._update_from(series) return newseries TimeSeries.pct = pct #............................................................................... From scipy-svn at scipy.org Sun Sep 30 20:37:58 2007 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 30 Sep 2007 19:37:58 -0500 (CDT) Subject: [Scipy-svn] r3384 - trunk/scipy/io Message-ID: <20071001003758.49769C7C005@new.scipy.org> Author: stefan Date: 2007-09-30 19:37:44 -0500 (Sun, 30 Sep 2007) New Revision: 3384 Modified: trunk/scipy/io/datasource.py Log: Add __docformat__ specifier. Remove whitespace. Modified: trunk/scipy/io/datasource.py =================================================================== --- trunk/scipy/io/datasource.py 2007-09-29 19:23:41 UTC (rev 3383) +++ trunk/scipy/io/datasource.py 2007-10-01 00:37:44 UTC (rev 3384) @@ -3,6 +3,8 @@ """ +__docformat__ = "restructuredtext en" + import os import gzip import bz2 @@ -23,14 +25,14 @@ filename : {string} Filename to test. - + *Returns*: bool Results of test. - + """ - + _tmp, ext = path(filename).splitext() return ext in zipexts @@ -41,14 +43,14 @@ filename : {string} Filename to unzip. - + *Returns*: string Name of the unzipped file. - + """ - + if not iszip(filename): raise ValueError("file %s is not zipped"%filename) unzip_name, zipext = splitzipext(filename) @@ -82,7 +84,7 @@ If the filename does not have a zip extension then: base -> filename zip_ext -> None - + *Parameters*: filename : {string} @@ -92,7 +94,7 @@ base, zip_ext : {tuple} Tuple containing the base file... - + """ if iszip(filename): @@ -109,7 +111,7 @@ :Parameters: `pathstr` : string The string to be checked. - + :Returns: ``bool`` """ scheme, netloc, _, _, _, _ = urlparse(pathstr) @@ -139,11 +141,11 @@ The path of the cache can be specified or else use ~/.scipy/cache by default. - + """ - + def __init__(self, cachepath=None): - if cachepath is not None: + if cachepath is not None: self.path = path(cachepath) elif os.name == 'posix': self.path = path(os.environ["HOME"]).joinpath(".scipy","cache") @@ -164,14 +166,14 @@ (_, netloc, upath, _, _, _) = urlparse(uri) return self.path.joinpath(netloc, upath[1:]) - def filename(self, uri): + def filename(self, uri): """ Return the complete path + filename within the cache. :Returns: ``string`` """ return str(self.filepath(uri)) - + def cache(self, uri): """ Copy a file into the cache. @@ -187,7 +189,7 @@ except: raise IOError("url not found: "+str(uri)) file(upath, 'w').write(openedurl.read()) - + def clear(self): """ Delete all files in the cache. @@ -195,14 +197,14 @@ """ for _file in self.path.files(): _file.rm() - + def iscached(self, uri): """ Check if a file exists in the cache. :Returns: ``bool`` """ return self.filepath(uri).exists() - + def retrieve(self, uri): """ Retrieve a file from the cache. @@ -223,7 +225,7 @@ TODO: Improve DataSource docstring """ - + def __init__(self, cachepath=os.curdir): self._cache = Cache(cachepath) @@ -245,7 +247,7 @@ def filename(self, pathstr): found = None for name in self._possible_names(pathstr): - try: + try: if isurl(name): self.cache(name) found = self._cache.filename(name) @@ -286,7 +288,7 @@ TODO: Improve Repository docstring. """ - + #"""DataSource with an implied root.""" def __init__(self, baseurl, cachepath=None): DataSource.__init__(self, cachepath=cachepath)