[Scipy-svn] r4683 - in trunk/scipy: cluster cluster/tests io io/arff io/arff/tests io/matlab io/tests misc/tests signal/tests sparse/linalg/eigen/arpack/tests special special/tests stats stats/tests

scipy-svn at scipy.org scipy-svn at scipy.org
Wed Sep 3 12:58:55 EDT 2008


Author: alan.mcintyre
Date: 2008-09-03 11:58:28 -0500 (Wed, 03 Sep 2008)
New Revision: 4683

Modified:
   trunk/scipy/cluster/tests/vq_test.py
   trunk/scipy/cluster/vq.py
   trunk/scipy/io/arff/arffread.py
   trunk/scipy/io/arff/tests/test_data.py
   trunk/scipy/io/matlab/mio4.py
   trunk/scipy/io/matlab/mio5.py
   trunk/scipy/io/matlab/miobase.py
   trunk/scipy/io/npfile.py
   trunk/scipy/io/tests/test_npfile.py
   trunk/scipy/io/tests/test_recaster.py
   trunk/scipy/misc/tests/test_pilutil.py
   trunk/scipy/signal/tests/test_wavelets.py
   trunk/scipy/sparse/linalg/eigen/arpack/tests/test_speigs.py
   trunk/scipy/special/spfun_stats.py
   trunk/scipy/special/tests/test_spfun_stats.py
   trunk/scipy/stats/_support.py
   trunk/scipy/stats/tests/test_morestats.py
Log:
Standardize NumPy import as "import numpy as np".


Modified: trunk/scipy/cluster/tests/vq_test.py
===================================================================
--- trunk/scipy/cluster/tests/vq_test.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/cluster/tests/vq_test.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -1,6 +1,5 @@
-import numpy as N
+import numpy as np
 from scipy.cluster import vq
-#import vq_c as vq
 
 def python_vq(all_data,code_book):
     import time
@@ -12,8 +11,8 @@
     print '  first dist:', dist1[:5]
     print '  last codes:', codes1[-5:]
     print '  last dist:', dist1[-5:]
-    float_obs = all_data.astype(N.float32)
-    float_code = code_book.astype(N.float32)
+    float_obs = all_data.astype(np.float32)
+    float_code = code_book.astype(np.float32)
     t1 = time.time()
     codes1,dist1 = vq.vq(float_obs,float_code)
     t2 = time.time()
@@ -34,12 +33,12 @@
     return array(data)
 
 def main():
-    N.random.seed((1000,1000))
+    np.random.seed((1000,1000))
     Ncodes = 40
     Nfeatures = 16
     Nobs = 4000
-    code_book = N.random.normal(0,1,(Ncodes,Nfeatures))
-    features = N.random.normal(0,1,(Nobs,Nfeatures))
+    code_book = np.random.normal(0,1,(Ncodes,Nfeatures))
+    features = np.random.normal(0,1,(Nobs,Nfeatures))
     codes,dist = python_vq(features,code_book)
 
 if __name__ == '__main__':

Modified: trunk/scipy/cluster/vq.py
===================================================================
--- trunk/scipy/cluster/vq.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/cluster/vq.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -83,7 +83,7 @@
 from numpy import shape, zeros, sqrt, argmin, minimum, array, \
      newaxis, arange, compress, equal, common_type, single, double, take, \
      std, mean
-import numpy as N
+import numpy as np
 
 class ClusterError(Exception):
     pass
@@ -233,8 +233,8 @@
     """
     # n = number of observations
     # d = number of features
-    if N.ndim(obs) == 1:
-        if not N.ndim(obs) == N.ndim(code_book):
+    if np.ndim(obs) == 1:
+        if not np.ndim(obs) == np.ndim(code_book):
             raise ValueError(
                     "Observation and code_book should have the same rank")
         else:
@@ -244,7 +244,7 @@
 
     # code books and observations should have same number of features and same
     # shape
-    if not N.ndim(obs) == N.ndim(code_book):
+    if not np.ndim(obs) == np.ndim(code_book):
         raise ValueError("Observation and code_book should have the same rank")
     elif not d == code_book.shape[1]:
         raise ValueError("Code book(%d) and obs(%d) should have the same " \
@@ -254,7 +254,7 @@
     code = zeros(n, dtype=int)
     min_dist = zeros(n)
     for i in range(n):
-        dist = N.sum((obs[i] - code_book) ** 2, 1)
+        dist = np.sum((obs[i] - code_book) ** 2, 1)
         code[i] = argmin(dist)
         min_dist[i] = dist[code[i]]
 
@@ -281,9 +281,9 @@
     raise RuntimeError("_py_vq_1d buggy, do not use rank 1 arrays for now")
     n = obs.size
     nc = code_book.size
-    dist = N.zeros((n, nc))
+    dist = np.zeros((n, nc))
     for i in range(nc):
-        dist[:, i] = N.sum(obs - code_book[i])
+        dist[:, i] = np.sum(obs - code_book[i])
     print dist
     code = argmin(dist)
     min_dist = dist[code]
@@ -327,7 +327,7 @@
             number of features (eg columns)""" % (code_book.shape[1], d))
 
     diff = obs[newaxis, :, :] - code_book[:,newaxis,:]
-    dist = sqrt(N.sum(diff * diff, -1))
+    dist = sqrt(np.sum(diff * diff, -1))
     code = argmin(dist, 0)
     min_dist = minimum.reduce(dist, 0) #the next line I think is equivalent
                                       #  - and should be faster
@@ -520,7 +520,7 @@
     else:
         n = data.size
 
-    p = N.random.permutation(n)
+    p = np.random.permutation(n)
     x = data[p[:k], :].copy()
 
     return x
@@ -541,23 +541,23 @@
 
     """
     def init_rank1(data):
-        mu  = N.mean(data)
-        cov = N.cov(data)
-        x = N.random.randn(k)
-        x *= N.sqrt(cov)
+        mu  = np.mean(data)
+        cov = np.cov(data)
+        x = np.random.randn(k)
+        x *= np.sqrt(cov)
         x += mu
         return x
     def init_rankn(data):
-        mu  = N.mean(data, 0)
-        cov = N.atleast_2d(N.cov(data, rowvar = 0))
+        mu  = np.mean(data, 0)
+        cov = np.atleast_2d(np.cov(data, rowvar = 0))
 
         # k rows, d cols (one row = one obs)
         # Generate k sample of a random variable ~ Gaussian(mu, cov)
-        x = N.random.randn(k, mu.size)
-        x = N.dot(x, N.linalg.cholesky(cov).T) + mu
+        x = np.random.randn(k, mu.size)
+        x = np.dot(x, np.linalg.cholesky(cov).T) + mu
         return x
 
-    nd = N.ndim(data)
+    nd = np.ndim(data)
     if nd == 1:
         return init_rank1(data)
     else:
@@ -628,7 +628,7 @@
     if missing not in _valid_miss_meth.keys():
         raise ValueError("Unkown missing method: %s" % str(missing))
     # If data is rank 1, then we have 1 dimension problem.
-    nd  = N.ndim(data)
+    nd  = np.ndim(data)
     if nd == 1:
         d = 1
         #raise ValueError("Input of rank 1 not supported yet")
@@ -637,13 +637,13 @@
     else:
         raise ValueError("Input of rank > 2 not supported")
 
-    if N.size(data) < 1:
+    if np.size(data) < 1:
         raise ValueError("Input has 0 items.")
 
     # If k is not a single value, then it should be compatible with data's
     # shape
-    if N.size(k) > 1 or minit == 'matrix':
-        if not nd == N.ndim(k):
+    if np.size(k) > 1 or minit == 'matrix':
+        if not nd == np.ndim(k):
             raise ValueError("k is not an int and has not same rank than data")
         if d == 1:
             nc = len(k)
@@ -683,9 +683,9 @@
         label = vq(data, code)[0]
         # Update the code by computing centroids using the new code book
         for j in range(nc):
-            mbs = N.where(label==j)
+            mbs = np.where(label==j)
             if mbs[0].size > 0:
-                code[j] = N.mean(data[mbs], axis=0)
+                code[j] = np.mean(data[mbs], axis=0)
             else:
                 missing()
 
@@ -694,13 +694,13 @@
 if __name__  == '__main__':
     pass
     #import _vq
-    #a = N.random.randn(4, 2)
-    #b = N.random.randn(2, 2)
+    #a = np.random.randn(4, 2)
+    #b = np.random.randn(2, 2)
 
     #print _vq.vq(a, b)
-    #print _vq.vq(N.array([[1], [2], [3], [4], [5], [6.]]),
-    #        N.array([[2.], [5.]]))
-    #print _vq.vq(N.array([1, 2, 3, 4, 5, 6.]), N.array([2., 5.]))
-    #_vq.vq(a.astype(N.float32), b.astype(N.float32))
-    #_vq.vq(a, b.astype(N.float32))
+    #print _vq.vq(np.array([[1], [2], [3], [4], [5], [6.]]),
+    #             np.array([[2.], [5.]]))
+    #print _vq.vq(np.array([1, 2, 3, 4, 5, 6.]), np.array([2., 5.]))
+    #_vq.vq(a.astype(np.float32), b.astype(np.float32))
+    #_vq.vq(a, b.astype(np.float32))
     #_vq.vq([0], b)

Modified: trunk/scipy/io/arff/arffread.py
===================================================================
--- trunk/scipy/io/arff/arffread.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/io/arff/arffread.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -4,7 +4,7 @@
 import itertools
 import sys
 
-import numpy as N
+import numpy as np
 
 from scipy.io.arff.utils import partial
 
@@ -271,9 +271,9 @@
     """given a string x, convert it to a float. If the stripped string is a ?,
     return a Nan (missing value)."""
     if x.strip() == '?':
-        return N.nan
+        return np.nan
     else:
-        return N.float(x)
+        return np.float(x)
 
 def safe_nominal(value, pvalue):
     svalue = value.strip()
@@ -409,7 +409,7 @@
 
     # This can be used once we want to support integer as integer values and
     # not as numeric anymore (using masked arrays ?).
-    acls2dtype = {'real' : N.float, 'integer' : N.float, 'numeric' : N.float}
+    acls2dtype = {'real' : np.float, 'integer' : np.float, 'numeric' : np.float}
     acls2conv = {'real' : safe_float, 'integer' : safe_float, 'numeric' : safe_float}
     descr = []
     convertors = []
@@ -489,7 +489,7 @@
 
     a = generator(ofile, delim = delim)
     # No error should happen here: it is a bug otherwise
-    data = N.fromiter(a, descr)
+    data = np.fromiter(a, descr)
     return data, meta
 
 #-----
@@ -497,7 +497,7 @@
 #-----
 def basic_stats(data):
     nbfac = data.size * 1. / (data.size - 1)
-    return N.nanmin(data), N.nanmax(data), N.mean(data), N.std(data) * nbfac
+    return np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac
 
 def print_attribute(name, tp, data):
     type = tp[0]

Modified: trunk/scipy/io/arff/tests/test_data.py
===================================================================
--- trunk/scipy/io/arff/tests/test_data.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/io/arff/tests/test_data.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -16,8 +16,8 @@
         (1, 2, 3, 4, 'class3')]
 
 missing = os.path.join(data_path, 'missing.arff')
-expect_missing_raw = N.array([[1, 5], [2, 4], [N.nan, N.nan]])
-expect_missing = N.empty(3, [('yop', N.float), ('yap', N.float)])
+expect_missing_raw = np.array([[1, 5], [2, 4], [np.nan, np.nan]])
+expect_missing = np.empty(3, [('yop', np.float), ('yap', np.float)])
 expect_missing['yop'] = expect_missing_raw[:, 0]
 expect_missing['yap'] = expect_missing_raw[:, 1]
 

Modified: trunk/scipy/io/matlab/mio4.py
===================================================================
--- trunk/scipy/io/matlab/mio4.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/io/matlab/mio4.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -1,7 +1,7 @@
 ''' Classes for read / write of matlab (TM) 4 files
 '''
 
-import numpy as N
+import numpy as np
 
 from miobase import *
 
@@ -76,7 +76,7 @@
         header['mclass'] = T
         header['dims'] = (data['mrows'], data['ncols'])
         header['is_complex'] = data['imagf'] == 1
-        remaining_bytes = header['dtype'].itemsize * N.product(header['dims'])
+        remaining_bytes = header['dtype'].itemsize * np.product(header['dims'])
         if header['is_complex'] and not header['mclass'] == mxSPARSE_CLASS:
             remaining_bytes *= 2
         next_pos = self.mat_stream.tell() + remaining_bytes
@@ -109,10 +109,10 @@
         num_bytes = dt.itemsize
         for d in dims:
             num_bytes *= d
-        arr = N.ndarray(shape=dims,
-                      dtype=dt,
-                      buffer=self.mat_stream.read(num_bytes),
-                      order='F')
+        arr = np.ndarray(shape=dims,
+                         dtype=dt,
+                         buffer=self.mat_stream.read(num_bytes),
+                         order='F')
         if copy:
             arr = arr.copy()
         return arr
@@ -122,9 +122,9 @@
     def __init__(self, array_reader, header):
         super(Mat4FullGetter, self).__init__(array_reader, header)
         if header['is_complex']:
-            self.mat_dtype = N.dtype(N.complex128)
+            self.mat_dtype = np.dtype(np.complex128)
         else:
-            self.mat_dtype = N.dtype(N.float64)
+            self.mat_dtype = np.dtype(np.float64)
 
     def get_raw_array(self):
         if self.header['is_complex']:
@@ -137,12 +137,12 @@
 
 class Mat4CharGetter(Mat4MatrixGetter):
     def get_raw_array(self):
-        arr = self.read_array().astype(N.uint8)
+        arr = self.read_array().astype(np.uint8)
         # ascii to unicode
         S = arr.tostring().decode('ascii')
-        return N.ndarray(shape=self.header['dims'],
-                       dtype=N.dtype('U1'),
-                       buffer = N.array(S)).copy()
+        return np.ndarray(shape=self.header['dims'],
+                          dtype=np.dtype('U1'),
+                          buffer = np.array(S)).copy()
 
 
 class Mat4SparseGetter(Mat4MatrixGetter):
@@ -166,14 +166,14 @@
         res = self.read_array()
         tmp = res[:-1,:]
         dims = res[-1,0:2]
-        I = N.ascontiguousarray(tmp[:,0],dtype='intc') #fixes byte order also
-        J = N.ascontiguousarray(tmp[:,1],dtype='intc')
+        I = np.ascontiguousarray(tmp[:,0],dtype='intc') #fixes byte order also
+        J = np.ascontiguousarray(tmp[:,1],dtype='intc')
         I -= 1  # for 1-based indexing
         J -= 1
         if res.shape[1] == 3:
-            V = N.ascontiguousarray(tmp[:,2],dtype='float')
+            V = np.ascontiguousarray(tmp[:,2],dtype='float')
         else:
-            V = N.ascontiguousarray(tmp[:,2],dtype='complex')
+            V = np.ascontiguousarray(tmp[:,2],dtype='complex')
             V.imag = tmp[:,3]
         if have_sparse:
             return scipy.sparse.coo_matrix((V,(I,J)), dims)
@@ -201,15 +201,15 @@
     def format_looks_right(self):
         # Mat4 files have a zero somewhere in first 4 bytes
         self.mat_stream.seek(0)
-        mopt_bytes = N.ndarray(shape=(4,),
-                             dtype=N.uint8,
-                             buffer = self.mat_stream.read(4))
+        mopt_bytes = np.ndarray(shape=(4,),
+                                dtype=np.uint8,
+                                buffer = self.mat_stream.read(4))
         self.mat_stream.seek(0)
         return 0 in mopt_bytes
 
     def guess_byte_order(self):
         self.mat_stream.seek(0)
-        mopt = self.read_dtype(N.dtype('i4'))
+        mopt = self.read_dtype(np.dtype('i4'))
         self.mat_stream.seek(0)
         if mopt < 0 or mopt > 5000:
             return ByteOrder.swapped_code
@@ -227,7 +227,7 @@
         '''
         if dims is None:
             dims = self.arr.shape
-        header = N.empty((), mdtypes_template['header'])
+        header = np.empty((), mdtypes_template['header'])
         M = not ByteOrder.little_endian
         O = 0
         header['mopt'] = (M * 1000 +
@@ -242,7 +242,7 @@
         self.write_string(self.name + '\0')
 
     def arr_to_2d(self):
-        self.arr = N.atleast_2d(self.arr)
+        self.arr = np.atleast_2d(self.arr)
         dims = self.arr.shape
         if len(dims) > 2:
             self.arr = self.arr.reshape(-1,dims[-1])
@@ -284,12 +284,12 @@
                           T=mxCHAR_CLASS)
         if self.arr.dtype.kind == 'U':
             # Recode unicode to ascii
-            n_chars = N.product(dims)
-            st_arr = N.ndarray(shape=(),
-                             dtype=self.arr_dtype_number(n_chars),
-                             buffer=self.arr)
+            n_chars = np.product(dims)
+            st_arr = np.ndarray(shape=(),
+                                dtype=self.arr_dtype_number(n_chars),
+                                buffer=self.arr)
             st = st_arr.item().encode('ascii')
-            self.arr = N.ndarray(shape=dims, dtype='S1', buffer=st)
+            self.arr = np.ndarray(shape=dims, dtype='S1', buffer=st)
         self.write_bytes(self.arr)
 
 
@@ -301,7 +301,7 @@
         '''
         A = self.arr.tocoo() #convert to sparse COO format (ijv)
         imagf = A.dtype.kind == 'c'
-        ijv = N.zeros((A.nnz + 1, 3+imagf), dtype='f8')
+        ijv = np.zeros((A.nnz + 1, 3+imagf), dtype='f8')
         ijv[:-1,0] = A.row
         ijv[:-1,1] = A.col
         ijv[:-1,0:2] += 1 # 1 based indexing
@@ -326,13 +326,13 @@
     if have_sparse:
         if scipy.sparse.issparse(arr):
             return Mat4SparseWriter(stream, arr, name)
-    arr = N.array(arr)
+    arr = np.array(arr)
     dtt = arr.dtype.type
-    if dtt is N.object_:
+    if dtt is np.object_:
         raise TypeError, 'Cannot save object arrays in Mat4'
-    elif dtt is N.void:
+    elif dtt is np.void:
         raise TypeError, 'Cannot save void type arrays'
-    elif dtt in (N.unicode_, N.string_):
+    elif dtt in (np.unicode_, np.string_):
         return Mat4CharWriter(stream, arr, name)
     else:
         return Mat4NumericWriter(stream, arr, name)

Modified: trunk/scipy/io/matlab/mio5.py
===================================================================
--- trunk/scipy/io/matlab/mio5.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/io/matlab/mio5.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -29,7 +29,7 @@
 import zlib
 from copy import copy as pycopy
 from cStringIO import StringIO
-import numpy as N
+import numpy as np
 
 from miobase import *
 
@@ -189,9 +189,9 @@
 
     def read_element(self, copy=True):
         raw_tag = self.mat_stream.read(8)
-        tag = N.ndarray(shape=(),
-                        dtype=self.dtypes['tag_full'],
-                        buffer=raw_tag)
+        tag = np.ndarray(shape=(),
+                         dtype=self.dtypes['tag_full'],
+                         buffer=raw_tag)
         mdtype = tag['mdtype'].item()
 
         byte_count = mdtype >> 16
@@ -201,9 +201,9 @@
             mdtype = mdtype & 0xFFFF
             dt = self.dtypes[mdtype]
             el_count = byte_count // dt.itemsize
-            return N.ndarray(shape=(el_count,),
-                             dtype=dt,
-                             buffer=raw_tag[4:])
+            return np.ndarray(shape=(el_count,),
+                              dtype=dt,
+                              buffer=raw_tag[4:])
 
         byte_count = tag['byte_count'].item()
         if mdtype == miMATRIX:
@@ -217,9 +217,9 @@
         else: # numeric data
             dt = self.dtypes[mdtype]
             el_count = byte_count // dt.itemsize
-            el = N.ndarray(shape=(el_count,),
-                         dtype=dt,
-                         buffer=self.mat_stream.read(byte_count))
+            el = np.ndarray(shape=(el_count,),
+                            dtype=dt,
+                            buffer=self.mat_stream.read(byte_count))
             if copy:
                 el = el.copy()
 
@@ -325,7 +325,7 @@
         self.mat_dtype = 'f8'
 
     def get_raw_array(self):
-        return N.array([[]])
+        return np.array([[]])
 
 
 class Mat5NumericMatrixGetter(Mat5MatrixGetter):
@@ -333,7 +333,7 @@
     def __init__(self, array_reader, header):
         super(Mat5NumericMatrixGetter, self).__init__(array_reader, header)
         if header['is_logical']:
-            self.mat_dtype = N.dtype('bool')
+            self.mat_dtype = np.dtype('bool')
         else:
             self.mat_dtype = self.class_dtypes[header['mclass']]
 
@@ -345,10 +345,10 @@
             res = res + (res_j * 1j)
         else:
             res = self.read_element()
-        return N.ndarray(shape=self.header['dims'],
-                       dtype=res.dtype,
-                       buffer=res,
-                       order='F')
+        return np.ndarray(shape=self.header['dims'],
+                          dtype=res.dtype,
+                          buffer=res,
+                          order='F')
 
 
 class Mat5SparseMatrixGetter(Mat5MatrixGetter):
@@ -390,28 +390,28 @@
     def get_raw_array(self):
         res = self.read_element()
         # Convert non-string types to unicode
-        if isinstance(res, N.ndarray):
-            if res.dtype.type == N.uint16:
+        if isinstance(res, np.ndarray):
+            if res.dtype.type == np.uint16:
                 codec = miUINT16_codec
                 if self.codecs['uint16_len'] == 1:
-                    res = res.astype(N.uint8)
-            elif res.dtype.type in (N.uint8, N.int8):
+                    res = res.astype(np.uint8)
+            elif res.dtype.type in (np.uint8, np.int8):
                 codec = 'ascii'
             else:
                 raise TypeError, 'Did not expect type %s' % res.dtype
             res = res.tostring().decode(codec)
-        return N.ndarray(shape=self.header['dims'],
-                       dtype=N.dtype('U1'),
-                       buffer=N.array(res),
-                       order='F').copy()
+        return np.ndarray(shape=self.header['dims'],
+                          dtype=np.dtype('U1'),
+                          buffer=np.array(res),
+                          order='F').copy()
 
 
 class Mat5CellMatrixGetter(Mat5MatrixGetter):
     def get_raw_array(self):
         # Account for fortran indexing of cells
         tupdims = tuple(self.header['dims'][::-1])
-        length = N.product(tupdims)
-        result = N.empty(length, dtype=object)
+        length = np.product(tupdims)
+        result = np.empty(length, dtype=object)
         for i in range(length):
             result[i] = self.get_item()
         return result.reshape(tupdims).T
@@ -551,16 +551,16 @@
     def format_looks_right(self):
         # Mat4 files have a zero somewhere in first 4 bytes
         self.mat_stream.seek(0)
-        mopt_bytes = N.ndarray(shape=(4,),
-                             dtype=N.uint8,
-                             buffer = self.mat_stream.read(4))
+        mopt_bytes = np.ndarray(shape=(4,),
+                                dtype=np.uint8,
+                                buffer = self.mat_stream.read(4))
         self.mat_stream.seek(0)
         return 0 not in mopt_bytes
 
 
 class Mat5MatrixWriter(MatStreamWriter):
 
-    mat_tag = N.zeros((), mdtypes_template['tag_full'])
+    mat_tag = np.zeros((), mdtypes_template['tag_full'])
     mat_tag['mdtype'] = miMATRIX
 
     def __init__(self, file_stream, arr, name, is_global=False):
@@ -572,7 +572,7 @@
 
     def write_element(self, arr, mdtype=None):
         # write tag, data
-        tag = N.zeros((), mdtypes_template['tag_full'])
+        tag = np.zeros((), mdtypes_template['tag_full'])
         if mdtype is None:
             tag['mdtype'] = np_to_mtypes[arr.dtype.str[1:]]
         else:
@@ -585,7 +585,7 @@
         self.write_bytes(arr)
 
         # pad to next 64-bit boundary
-        self.write_bytes(N.zeros((padding,),'u1'))
+        self.write_bytes(np.zeros((padding,),'u1'))
 
     def write_header(self, mclass,
                      is_global=False,
@@ -602,7 +602,7 @@
         self._mat_tag_pos = self.file_stream.tell()
         self.write_dtype(self.mat_tag)
         # write array flags (complex, global, logical, class, nzmax)
-        af = N.zeros((), mdtypes_template['array_flags'])
+        af = np.zeros((), mdtypes_template['array_flags'])
         af['data_type'] = miUINT32
         af['byte_count'] = 8
         flags = is_complex << 3 | is_global << 2 | is_logical << 1
@@ -611,13 +611,13 @@
         self.write_dtype(af)
         # write array shape
         if self.arr.ndim < 2:
-            new_arr = N.atleast_2d(self.arr)
+            new_arr = np.atleast_2d(self.arr)
             if type(new_arr) != type(self.arr):
                 raise ValueError("Array should be 2-dimensional.")
             self.arr = new_arr
-        self.write_element(N.array(self.arr.shape, dtype='i4'))
+        self.write_element(np.array(self.arr.shape, dtype='i4'))
         # write name
-        self.write_element(N.array([ord(c) for c in self.name], 'i1'))
+        self.write_element(np.array([ord(c) for c in self.name], 'i1'))
 
     def update_matrix_tag(self):
         curr_pos = self.file_stream.tell()
@@ -657,12 +657,12 @@
         self.write_header(mclass=mxCHAR_CLASS)
         if self.arr.dtype.kind == 'U':
             # Recode unicode using self.codec
-            n_chars = N.product(self.arr.shape)
-            st_arr = N.ndarray(shape=(),
-                             dtype=self.arr_dtype_number(n_chars),
-                             buffer=self.arr)
+            n_chars = np.product(self.arr.shape)
+            st_arr = np.ndarray(shape=(),
+                                dtype=self.arr_dtype_number(n_chars),
+                                buffer=self.arr)
             st = st_arr.item().encode(self.codec)
-            self.arr = N.ndarray(shape=(len(st)), dtype='u1', buffer=st)
+            self.arr = np.ndarray(shape=(len(st)), dtype='u1', buffer=st)
         self.write_element(self.arr,mdtype=miUTF8)
         self.update_matrix_tag()
 
@@ -709,7 +709,7 @@
         if have_sparse:
             if scipy.sparse.issparse(arr):
                 return Mat5SparseWriter(self.stream, arr, name, is_global)
-        arr = N.array(arr)
+        arr = np.array(arr)
         if arr.dtype.hasobject:
             types, arr_type = self.classify_mobjects(arr)
             if arr_type == 'c':
@@ -740,13 +740,13 @@
                         o  - object array
         '''
         n = objarr.size
-        types = N.empty((n,), dtype='S1')
+        types = np.empty((n,), dtype='S1')
         types[:] = 'i'
         type_set = set()
         flato = objarr.flat
         for i in range(n):
             obj = flato[i]
-            if isinstance(obj, N.ndarray):
+            if isinstance(obj, np.ndarray):
                 types[i] = 'a'
                 continue
             try:
@@ -784,11 +784,11 @@
             unicode_strings)
         # write header
         import os, time
-        hdr =  N.zeros((), mdtypes_template['file_header'])
+        hdr =  np.zeros((), mdtypes_template['file_header'])
         hdr['description']='MATLAB 5.0 MAT-file Platform: %s, Created on: %s' % (
                             os.name,time.asctime())
         hdr['version']= 0x0100
-        hdr['endian_test']=N.ndarray(shape=(),dtype='S2',buffer=N.uint16(0x4d49))
+        hdr['endian_test']=np.ndarray(shape=(),dtype='S2',buffer=np.uint16(0x4d49))
         file_stream.write(hdr.tostring())
 
     def get_unicode_strings(self):
@@ -812,7 +812,7 @@
             stream = self.writer_getter.stream
             if self.do_compression:
                 str = zlib.compress(stream.getvalue(stream.tell()))
-                tag = N.empty((), mdtypes_template['tag_full'])
+                tag = np.empty((), mdtypes_template['tag_full'])
                 tag['mdtype'] = miCOMPRESSED
                 tag['byte_count'] = len(str)
                 self.file_stream.write(tag.tostring() + str)

Modified: trunk/scipy/io/matlab/miobase.py
===================================================================
--- trunk/scipy/io/matlab/miobase.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/io/matlab/miobase.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -6,7 +6,7 @@
 
 import sys
 
-import numpy as N
+import numpy as np
 
 try:
     import scipy.sparse
@@ -71,10 +71,10 @@
         a_dtype is assumed to be correct endianness
         '''
         num_bytes = a_dtype.itemsize
-        arr = N.ndarray(shape=(),
-                        dtype=a_dtype,
-                        buffer=self.mat_stream.read(num_bytes),
-                        order='F')
+        arr = np.ndarray(shape=(),
+                         dtype=a_dtype,
+                         buffer=self.mat_stream.read(num_bytes),
+                         order='F')
         return arr
 
     def read_ztstring(self, num_bytes):
@@ -182,8 +182,7 @@
     def convert_dtypes(self, dtype_template):
         dtypes = dtype_template.copy()
         for k in dtypes:
-            dtypes[k] = N.dtype(dtypes[k]).newbyteorder(
-                self.order_code)
+            dtypes[k] = np.dtype(dtypes[k]).newbyteorder(self.order_code)
         return dtypes
 
     def matrix_getter_factory(self):
@@ -228,7 +227,7 @@
                     str_arr = arr.reshape(
                         (small_product(n_dims),
                          dims[-1]))
-                    arr = N.empty(n_dims, dtype=object)
+                    arr = np.empty(n_dims, dtype=object)
                     for i in range(0, n_dims[-1]):
                         arr[...,i] = self.chars_to_str(str_arr[i])
                 else: # return string
@@ -239,9 +238,9 @@
                 if getter.mat_dtype is not None:
                     arr = arr.astype(getter.mat_dtype)
             if self.squeeze_me:
-                arr = N.squeeze(arr)
+                arr = np.squeeze(arr)
                 if not arr.size:
-                    arr = N.array([])
+                    arr = np.array([])
                 elif not arr.shape: # 0d coverted to scalar
                     arr = arr.item()
             return arr
@@ -249,10 +248,10 @@
 
     def chars_to_str(self, str_arr):
         ''' Convert string array to string '''
-        dt = N.dtype('U' + str(small_product(str_arr.shape)))
-        return N.ndarray(shape=(),
-                       dtype = dt,
-                       buffer = str_arr.copy()).item()
+        dt = np.dtype('U' + str(small_product(str_arr.shape)))
+        return np.ndarray(shape=(),
+                          dtype = dt,
+                          buffer = str_arr.copy()).item()
 
     def get_variables(self, variable_names=None):
         ''' get variables from stream as dictionary
@@ -353,7 +352,7 @@
 
     def arr_dtype_number(self, num):
         ''' Return dtype for given number of items per element'''
-        return N.dtype(self.arr.dtype.str[:2] + str(num))
+        return np.dtype(self.arr.dtype.str[:2] + str(num))
 
     def arr_to_chars(self):
         ''' Convert string array to char array '''
@@ -361,9 +360,9 @@
         if not dims:
             dims = [1]
         dims.append(int(self.arr.dtype.str[2:]))
-        self.arr = N.ndarray(shape=dims,
-                           dtype=self.arr_dtype_number(1),
-                           buffer=self.arr)
+        self.arr = np.ndarray(shape=dims,
+                              dtype=self.arr_dtype_number(1),
+                              buffer=self.arr)
 
     def write_bytes(self, arr):
         self.file_stream.write(arr.tostring(order='F'))

Modified: trunk/scipy/io/npfile.py
===================================================================
--- trunk/scipy/io/npfile.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/io/npfile.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -6,7 +6,7 @@
 
 import sys
 
-import numpy as N
+import numpy as np
 
 __all__ = ['sys_endian_code', 'npfile']
 
@@ -40,9 +40,9 @@
 
     Example use:
     >>> from StringIO import StringIO
-    >>> import numpy as N
+    >>> import numpy as np
     >>> from scipy.io import npfile
-    >>> arr = N.arange(10).reshape(5,2)
+    >>> arr = np.arange(10).reshape(5,2)
     >>> # Make file-like object (could also be file name)
     >>> my_file = StringIO()
     >>> npf = npfile(my_file)
@@ -167,7 +167,7 @@
                     (if None from self.order)
         '''
         endian, order = self._endian_order(endian, order)
-        data = N.asarray(data)
+        data = np.asarray(data)
         dt_endian = self._endian_from_dtype(data.dtype)
         if not endian == 'dtype':
             if dt_endian != endian:
@@ -194,7 +194,7 @@
         arr       - array from file with given dtype (dt)
         '''
         endian, order = self._endian_order(endian, order)
-        dt = N.dtype(dt)
+        dt = np.dtype(dt)
         try:
             shape = list(shape)
         except TypeError:
@@ -203,7 +203,7 @@
         if minus_ones == 0:
             pass
         elif minus_ones == 1:
-            known_dimensions_size = -N.product(shape,axis=0) * dt.itemsize
+            known_dimensions_size = -np.product(shape,axis=0) * dt.itemsize
             unknown_dimension_size, illegal = divmod(self.remaining_bytes(),
                                                      known_dimensions_size)
             if illegal:
@@ -212,10 +212,10 @@
         else:
             raise ValueError(
                 "illegal -1 count; can only specify one unknown dimension")
-        sz = dt.itemsize * N.product(shape)
+        sz = dt.itemsize * np.product(shape)
         dt_endian = self._endian_from_dtype(dt)
         buf = self.file.read(sz)
-        arr = N.ndarray(shape=shape,
+        arr = np.ndarray(shape=shape,
                          dtype=dt,
                          buffer=buf,
                          order=order)
@@ -223,7 +223,7 @@
             return arr.byteswap()
         return arr.copy()
 
-npfile = N.deprecate_with_doc("""
+npfile = np.deprecate_with_doc("""
 You can achieve the same effect as using npfile, using ndarray.tofile
 and numpy.fromfile.
 

Modified: trunk/scipy/io/tests/test_npfile.py
===================================================================
--- trunk/scipy/io/tests/test_npfile.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/io/tests/test_npfile.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -2,7 +2,7 @@
 from StringIO import StringIO
 from tempfile import mkstemp
 from numpy.testing import *
-import numpy as N
+import numpy as np
 
 from scipy.io.npfile import npfile, sys_endian_code
 
@@ -12,7 +12,7 @@
         fd, fname = mkstemp()
         os.close(fd)
         npf = npfile(fname)
-        arr = N.reshape(N.arange(10), (5,2))
+        arr = np.reshape(np.arange(10), (5,2))
         self.assertRaises(IOError, npf.write_array, arr)
         npf.close()
         npf = npfile(fname, 'w')
@@ -58,7 +58,7 @@
 
     def test_read_write_array(self):
         npf = npfile(StringIO())
-        arr = N.reshape(N.arange(10), (5,2))
+        arr = np.reshape(np.arange(10), (5,2))
         # Arr as read in fortran order
         f_arr = arr.reshape((2,5)).T
         # Arr written in fortran order read in C order

Modified: trunk/scipy/io/tests/test_recaster.py
===================================================================
--- trunk/scipy/io/tests/test_recaster.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/io/tests/test_recaster.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -1,4 +1,4 @@
-import numpy as N
+import numpy as np
 from numpy.testing import *
 
 from scipy.io.recaster import sctype_attributes, Recaster, RecastError
@@ -15,14 +15,14 @@
         R = Recaster()
         assert set(R.sctype_list) == set(sctype_attributes().keys()), \
                                'Default recaster should include all system types'
-        T = N.float32
+        T = np.float32
         R = Recaster([T])
         assert R.sctype_list == [T], 'Scalar type list not correctly set'
         # Setting tolerances
         R = Recaster()
         tols = R.default_sctype_tols()
         assert tols == R.sctype_tols, 'Unexpected tols dictionary'
-        F = N.finfo(T)
+        F = np.finfo(T)
         R = Recaster(sctype_tols={T: {
             'rtol': F.eps*2,
             'atol': F.tiny*2,
@@ -31,8 +31,8 @@
                'Rtol not correctly set'
         assert R.sctype_tols[T]['atol'] == F.tiny*2, \
                'Atol not correctly set'
-        T = N.complex128
-        F = N.finfo(T)
+        T = np.complex128
+        F = np.finfo(T)
         assert R.sctype_tols[T]['rtol'] == F.eps, \
                'Rtol defaults not correctly set'
         assert R.sctype_tols[T]['atol'] == F.tiny, \
@@ -47,22 +47,22 @@
         # Define expected type output from fp recast of value
         sta = sctype_attributes()
         inp_outp = (
-            (1, N.complex128, 'c', sta[N.complex128]['size'], 0, N.complex128),
-            (1, N.complex128, 'c', sta[N.complex128]['size'], 1, N.complex64),
-            (1, N.complex128, 'c', sta[N.complex64]['size'], 0, N.complex64),
-            (1, N.complex128, 'f', sta[N.float64]['size'], 0, N.float64),
-            (1.0+1j, N.complex128, 'f', sta[N.complex128]['size'], 0, None),
-            (1, N.float64, 'f', sta[N.float64]['size'], 0, N.float64),
-            (1, N.float64, 'f', sta[N.float64]['size'], 1, N.float32),
-            (1, N.float64, 'f', sta[N.float32]['size'], 0, N.float32),
-            (1, N.float64, 'c', sta[N.complex128]['size'], 0, N.complex128),
-            (1, N.float64, 'c', sta[N.complex128]['size'], 1, N.complex64),
-            (1, N.int32, 'f', sta[N.float64]['size'], 0, N.float64),
-            (1, N.int32, 'f', sta[N.float64]['size'], 1, N.float32),
-            (1, N.float64, 'f', 0, 0, None),
+            (1, np.complex128, 'c', sta[np.complex128]['size'], 0, np.complex128),
+            (1, np.complex128, 'c', sta[np.complex128]['size'], 1, np.complex64),
+            (1, np.complex128, 'c', sta[np.complex64]['size'], 0, np.complex64),
+            (1, np.complex128, 'f', sta[np.float64]['size'], 0, np.float64),
+            (1.0+1j, np.complex128, 'f', sta[np.complex128]['size'], 0, None),
+            (1, np.float64, 'f', sta[np.float64]['size'], 0, np.float64),
+            (1, np.float64, 'f', sta[np.float64]['size'], 1, np.float32),
+            (1, np.float64, 'f', sta[np.float32]['size'], 0, np.float32),
+            (1, np.float64, 'c', sta[np.complex128]['size'], 0, np.complex128),
+            (1, np.float64, 'c', sta[np.complex128]['size'], 1, np.complex64),
+            (1, np.int32, 'f', sta[np.float64]['size'], 0, np.float64),
+            (1, np.int32, 'f', sta[np.float64]['size'], 1, np.float32),
+            (1, np.float64, 'f', 0, 0, None),
             )
         for value, inp, kind, max_size, continue_down, outp in inp_outp:
-            arr = N.array(value, dtype=inp)
+            arr = np.array(value, dtype=inp)
             arr = R.cast_to_fp(arr, kind, max_size, continue_down)
             if outp is None:
                 assert arr is None, \
@@ -79,29 +79,29 @@
         # Smallest int sctype with full recaster
         params = sctype_attributes()
         RF = Recaster()
-        test_triples = [(N.uint8, 0, 255),
-                      (N.int8, -128, 0),
-                      (N.uint16, 0, params[N.uint16]['max']),
-                      (N.int16, params[N.int16]['min'], 0),
-                      (N.uint32, 0, params[N.uint32]['max']),
-                      (N.int32, params[N.int32]['min'], 0),
-                      (N.uint64, 0, params[N.uint64]['max']),
-                      (N.int64, params[N.int64]['min'], 0)]
+        test_triples = [(np.uint8, 0, 255),
+                      (np.int8, -128, 0),
+                      (np.uint16, 0, params[np.uint16]['max']),
+                      (np.int16, params[np.int16]['min'], 0),
+                      (np.uint32, 0, params[np.uint32]['max']),
+                      (np.int32, params[np.int32]['min'], 0),
+                      (np.uint64, 0, params[np.uint64]['max']),
+                      (np.int64, params[np.int64]['min'], 0)]
         for T, mn, mx in test_triples:
             rt = RF.smallest_int_sctype(mx, mn)
-            assert N.dtype(rt) == N.dtype(T), \
+            assert np.dtype(rt) == np.dtype(T), \
                    'Expected %s, got %s type' % (T, rt)
         # Smallest int sctype with restricted recaster
-        mmax = params[N.int32]['max']
-        mmin = params[N.int32]['min']
-        RR = Recaster([N.int32])
+        mmax = params[np.int32]['max']
+        mmin = params[np.int32]['min']
+        RR = Recaster([np.int32])
         for kind in ('int', 'uint'):
-            for T in N.sctypes[kind]:
+            for T in np.sctypes[kind]:
                 mx = params[T]['max']
                 mn = params[T]['min']
                 rt = RR.smallest_int_sctype(mx, mn)
                 if mx <= mmax and mn >= mmin:
-                    assert rt == N.int32, \
+                    assert rt == np.int32, \
                            'Expected int32 type, got %s' % rt
                 else:
                     assert rt is None, \
@@ -110,62 +110,62 @@
         mx = 1000
         mn = 0
         rt = RF.smallest_int_sctype(mx, mn)
-        assert rt == N.int16, 'Expected int16, got %s' % rt
+        assert rt == np.int16, 'Expected int16, got %s' % rt
         rt = RF.smallest_int_sctype(mx, mn, 'i')
-        assert rt == N.int16, 'Expected int16, got %s' % rt
+        assert rt == np.int16, 'Expected int16, got %s' % rt
         rt = RF.smallest_int_sctype(mx, mn, prefer='u')
-        assert rt == N.uint16, 'Expected uint16, got %s' % rt
+        assert rt == np.uint16, 'Expected uint16, got %s' % rt
 
     def test_recasts(self):
-        valid_types = [N.int32, N.complex128, N.float64]
+        valid_types = [np.int32, np.complex128, np.float64]
         # Test smallest
         R = Recaster(valid_types, recast_options='smallest')
         inp_outp = (
-            (1, N.complex128, N.int32),
-            (1, N.complex64, N.int32),
-            (1.0+1j, N.complex128, N.complex128),
-            (1.0+1j, N.complex64, N.complex128),
-            (1, N.float64, N.int32),
-            (1, N.float32, N.int32),
-            (1.1, N.float64, N.float64),
-            (-1e12, N.int64, N.float64),
+            (1, np.complex128, np.int32),
+            (1, np.complex64, np.int32),
+            (1.0+1j, np.complex128, np.complex128),
+            (1.0+1j, np.complex64, np.complex128),
+            (1, np.float64, np.int32),
+            (1, np.float32, np.int32),
+            (1.1, np.float64, np.float64),
+            (-1e12, np.int64, np.float64),
             )
         self.run_io_recasts(R, inp_outp)
         # Test only_if_none
         R = Recaster(valid_types, recast_options='only_if_none')
         inp_outp = (
-            (1, N.complex128, N.complex128),
-            (1, N.complex64, N.int32),
-            (1.0+1j, N.complex128, N.complex128),
-            (1.0+1j, N.complex64, N.complex128),
-            (1, N.float64, N.float64),
-            (1, N.float32, N.int32),
-            (1.1, N.float64, N.float64),
-            (-1e12, N.int64, N.float64),
+            (1, np.complex128, np.complex128),
+            (1, np.complex64, np.int32),
+            (1.0+1j, np.complex128, np.complex128),
+            (1.0+1j, np.complex64, np.complex128),
+            (1, np.float64, np.float64),
+            (1, np.float32, np.int32),
+            (1.1, np.float64, np.float64),
+            (-1e12, np.int64, np.float64),
             )
         self.run_io_recasts(R, inp_outp)
         # Test preserve_precision
         R = Recaster(valid_types, recast_options='preserve_precision')
         inp_outp = (
-            (1, N.complex128, N.complex128),
-            (1, N.complex64, N.complex128),
-            (1.0+1j, N.complex128, N.complex128),
-            (1.0+1j, N.complex64, N.complex128),
-            (1, N.float64, N.float64),
-            (1, N.float32, N.float64),
-            (1.1, N.float64, N.float64),
-            (-1e12, N.int64, None),
+            (1, np.complex128, np.complex128),
+            (1, np.complex64, np.complex128),
+            (1.0+1j, np.complex128, np.complex128),
+            (1.0+1j, np.complex64, np.complex128),
+            (1, np.float64, np.float64),
+            (1, np.float32, np.float64),
+            (1.1, np.float64, np.float64),
+            (-1e12, np.int64, None),
             )
         self.run_io_recasts(R, inp_outp)
 
     def run_io_recasts(self, R, inp_outp):
         ''' Runs sets of value, input, output tests '''
         for value, inp, outp in inp_outp:
-            arr = N.array(value, inp)
+            arr = np.array(value, inp)
             if outp is None:
                 self.assertRaises(RecastError, R.recast, arr)
                 continue
-            arr = R.recast(N.array(value, inp))
+            arr = R.recast(np.array(value, inp))
             assert arr is not None, \
                    'Expected %s from %s, got None' % (outp, inp)
             dtt = arr.dtype.type

Modified: trunk/scipy/misc/tests/test_pilutil.py
===================================================================
--- trunk/scipy/misc/tests/test_pilutil.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/misc/tests/test_pilutil.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -1,6 +1,6 @@
 import os.path
 import glob
-import numpy as N
+import numpy as np
 
 from numpy.testing import *
 
@@ -19,14 +19,14 @@
 
 class TestPILUtil(TestCase):
     def test_imresize(self):
-        im = N.random.random((10,20))
-        for T in N.sctypes['float'] + [float]:
+        im = np.random.random((10,20))
+        for T in np.sctypes['float'] + [float]:
             im1 = pilutil.imresize(im,T(1.1))
             assert_equal(im1.shape,(11,22))
 
     def test_bytescale(self):
-        x = N.array([0,1,2],N.uint8)
-        y = N.array([0,1,2])
+        x = np.array([0,1,2],np.uint8)
+        y = np.array([0,1,2])
         assert_equal(pilutil.bytescale(x),x)
         assert_equal(pilutil.bytescale(y),[0,127,255])
 

Modified: trunk/scipy/signal/tests/test_wavelets.py
===================================================================
--- trunk/scipy/signal/tests/test_wavelets.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/signal/tests/test_wavelets.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -1,4 +1,4 @@
-import numpy as N
+import numpy as np
 from numpy.testing import *
 
 from scipy.signal import wavelets
@@ -36,15 +36,15 @@
         assert_equal(x,y)
 
         # miscellaneous tests:
-        x = N.array([1.73752399e-09 +9.84327394e-25j,
-                     6.49471756e-01 +0.00000000e+00j,
-                     1.73752399e-09 -9.84327394e-25j])
+        x = np.array([1.73752399e-09 +9.84327394e-25j,
+                      6.49471756e-01 +0.00000000e+00j,
+                      1.73752399e-09 -9.84327394e-25j])
         y = wavelets.morlet(3,w=2,complete=True)
         assert_array_almost_equal(x,y)
 
-        x = N.array([2.00947715e-09 +9.84327394e-25j,
-                     7.51125544e-01 +0.00000000e+00j,
-                     2.00947715e-09 -9.84327394e-25j])
+        x = np.array([2.00947715e-09 +9.84327394e-25j,
+                      7.51125544e-01 +0.00000000e+00j,
+                      2.00947715e-09 -9.84327394e-25j])
         y = wavelets.morlet(3,w=2,complete=False)
         assert_array_almost_equal(x,y,decimal=2)
 

Modified: trunk/scipy/sparse/linalg/eigen/arpack/tests/test_speigs.py
===================================================================
--- trunk/scipy/sparse/linalg/eigen/arpack/tests/test_speigs.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/sparse/linalg/eigen/arpack/tests/test_speigs.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -5,20 +5,19 @@
 from scipy.sparse.linalg.interface import aslinearoperator
 from scipy.sparse.linalg.eigen.arpack.speigs import *
 
+import numpy as np
 
-import numpy as N
-
 class TestEigs(TestCase):
     def test(self):
         maxn=15                # Dimension of square matrix to be solved
         # Use a PDP^-1 factorisation to construct matrix with known
         # eiegevalues/vectors. Used random eiegenvectors initially.
-        P = N.mat(N.random.random((maxn,)*2))
-        P /= map(N.linalg.norm, P.T)            # Normalise the eigenvectors
-        D = N.mat(N.zeros((maxn,)*2))
-        D[range(maxn), range(maxn)] = (N.arange(maxn, dtype=float)+1)/N.sqrt(maxn)
-        A = P*D*N.linalg.inv(P)
-        vals = N.array(D.diagonal())[0]
+        P = np.mat(np.random.random((maxn,)*2))
+        P /= map(np.linalg.norm, P.T)            # Normalise the eigenvectors
+        D = np.mat(np.zeros((maxn,)*2))
+        D[range(maxn), range(maxn)] = (np.arange(maxn, dtype=float)+1)/np.sqrt(maxn)
+        A = P*D*np.linalg.inv(P)
+        vals = np.array(D.diagonal())[0]
         vecs = P
         uv_sortind = vals.argsort()
         vals = vals[uv_sortind]
@@ -26,14 +25,14 @@
 
         A=aslinearoperator(A)
         matvec = A.matvec
-        #= lambda x: N.asarray(A*x)[0]
+        #= lambda x: np.asarray(A*x)[0]
         nev=4
         eigvs = ARPACK_eigs(matvec, A.shape[0], nev=nev)
         calc_vals = eigvs[0]
         # Ensure the calculated eigenvectors have the same sign as the reference values
-        calc_vecs = eigvs[1] / [N.sign(x[0]) for x in eigvs[1].T]
+        calc_vecs = eigvs[1] / [np.sign(x[0]) for x in eigvs[1].T]
         assert_array_almost_equal(calc_vals, vals[0:nev], decimal=7)
-        assert_array_almost_equal(calc_vecs,  N.array(vecs)[:,0:nev], decimal=7)
+        assert_array_almost_equal(calc_vecs,  np.array(vecs)[:,0:nev], decimal=7)
 
 
 # class TestGeneigs(TestCase):

Modified: trunk/scipy/special/spfun_stats.py
===================================================================
--- trunk/scipy/special/spfun_stats.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/special/spfun_stats.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -33,7 +33,7 @@
 """Some more special functions which may be useful for multivariate statistical
 analysis."""
 
-import numpy as N
+import numpy as np
 from scipy.special import gammaln as loggam
 
 def multigammaln(a, d):
@@ -71,17 +71,17 @@
 
     R. J. Muirhead, Aspects of multivariate statistical theory (Wiley Series in
     probability and mathematical statistics). """
-    a = N.asarray(a)
-    if not N.isscalar(d) or (N.floor(d) != d):
+    a = np.asarray(a)
+    if not np.isscalar(d) or (np.floor(d) != d):
         raise ValueError("d should be a positive integer (dimension)")
-    if N.any(a <= 0.5 * (d - 1)):
+    if np.any(a <= 0.5 * (d - 1)):
         raise ValueError("condition a (%f) > 0.5 * (d-1) (%f) not met" \
                          % (a, 0.5 * (d-1)))
 
-    res = (d * (d-1) * 0.25) * N.log(N.pi)
+    res = (d * (d-1) * 0.25) * np.log(np.pi)
     if a.size == 1:
         axis = -1
     else:
         axis = 0
-    res += N.sum(loggam([(a - (j - 1.)/2) for j in range(1, d+1)]), axis)
+    res += np.sum(loggam([(a - (j - 1.)/2) for j in range(1, d+1)]), axis)
     return res

Modified: trunk/scipy/special/tests/test_spfun_stats.py
===================================================================
--- trunk/scipy/special/tests/test_spfun_stats.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/special/tests/test_spfun_stats.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -1,18 +1,16 @@
-import numpy as N
+import numpy as np
 from numpy.testing import *
 
-
 from scipy.special import gammaln, multigammaln
 
-
 class TestMultiGammaLn(TestCase):
     def test1(self):
-        a = N.abs(N.random.randn())
+        a = np.abs(np.random.randn())
         assert_array_equal(multigammaln(a, 1), gammaln(a))
 
     def test_ararg(self):
         d = 5
-        a = N.abs(N.random.randn(3, 2)) + d
+        a = np.abs(np.random.randn(3, 2)) + d
 
         tr = multigammaln(a, d)
         assert_array_equal(tr.shape, a.shape)
@@ -20,7 +18,7 @@
             assert_array_equal(tr.ravel()[i], multigammaln(a.ravel()[i], d))
 
         d = 5
-        a = N.abs(N.random.randn(1, 2)) + d
+        a = np.abs(np.random.randn(1, 2)) + d
 
         tr = multigammaln(a, d)
         assert_array_equal(tr.shape, a.shape)

Modified: trunk/scipy/stats/_support.py
===================================================================
--- trunk/scipy/stats/_support.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/stats/_support.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -1,6 +1,6 @@
 from numpy import asarray
 import stats
-import numpy as N
+import numpy as np
 from types import ListType, TupleType, StringType
 import copy
 
@@ -17,20 +17,20 @@
     source = asarray(source)
     if len(source.shape)==1:
         width = 1
-        source = N.resize(source,[source.shape[0],width])
+        source = np.resize(source,[source.shape[0],width])
     else:
         width = source.shape[1]
     for addon in args:
         if len(addon.shape)==1:
             width = 1
-            addon = N.resize(addon,[source.shape[0],width])
+            addon = np.resize(addon,[source.shape[0],width])
         else:
             width = source.shape[1]
         if len(addon) < len(source):
-            addon = N.resize(addon,[source.shape[0],addon.shape[1]])
+            addon = np.resize(addon,[source.shape[0],addon.shape[1]])
         elif len(source) < len(addon):
-            source = N.resize(source,[addon.shape[0],source.shape[1]])
-        source = N.concatenate((source,addon),1)
+            source = np.resize(source,[addon.shape[0],source.shape[1]])
+        source = np.concatenate((source,addon),1)
     return source
 
 
@@ -39,37 +39,37 @@
     works on arrays NOT including string items (e.g., type 'O' or 'c').
     """
     inarray = asarray(inarray)
-    uniques = N.array([inarray[0]])
+    uniques = np.array([inarray[0]])
     if len(uniques.shape) == 1:            # IF IT'S A 1D ARRAY
         for item in inarray[1:]:
-            if N.add.reduce(N.equal(uniques,item).flat) == 0:
+            if np.add.reduce(np.equal(uniques,item).flat) == 0:
                 try:
-                    uniques = N.concatenate([uniques,N.array[N.newaxis,:]])
+                    uniques = np.concatenate([uniques,np.array[np.newaxis,:]])
                 except TypeError:
-                    uniques = N.concatenate([uniques,N.array([item])])
+                    uniques = np.concatenate([uniques,np.array([item])])
     else:                                  # IT MUST BE A 2+D ARRAY
         if inarray.typecode() != 'O':  # not an Object array
             for item in inarray[1:]:
-                if not N.sum(N.alltrue(N.equal(uniques,item),1),axis=0):
+                if not np.sum(np.alltrue(np.equal(uniques,item),1),axis=0):
                     try:
-                        uniques = N.concatenate( [uniques,item[N.newaxis,:]] )
+                        uniques = np.concatenate( [uniques,item[np.newaxis,:]] )
                     except TypeError:    # the item to add isn't a list
-                        uniques = N.concatenate([uniques,N.array([item])])
+                        uniques = np.concatenate([uniques,np.array([item])])
                 else:
                     pass  # this item is already in the uniques array
         else:   # must be an Object array, alltrue/equal functions don't work
             for item in inarray[1:]:
                 newflag = 1
                 for unq in uniques:  # NOTE: cmp --> 0=same, -1=<, 1=>
-                    test = N.sum(abs(N.array(map(cmp,item,unq))),axis=0)
+                    test = np.sum(abs(np.array(map(cmp,item,unq))),axis=0)
                     if test == 0:   # if item identical to any 1 row in uniques
                         newflag = 0 # then not a novel item to add
                         break
                 if newflag == 1:
                     try:
-                        uniques = N.concatenate( [uniques,item[N.newaxis,:]] )
+                        uniques = np.concatenate( [uniques,item[np.newaxis,:]] )
                     except TypeError:    # the item to add isn't a list
-                        uniques = N.concatenate([uniques,N.array([item])])
+                        uniques = np.concatenate([uniques,np.array([item])])
     return uniques
 
 def colex(a, indices, axis=1):
@@ -79,12 +79,12 @@
 
     Returns: the columns of a specified by indices\n"""
 
-    if type(indices) not in [ListType,TupleType,N.ndarray]:
+    if type(indices) not in [ListType,TupleType,np.ndarray]:
         indices = [indices]
-    if len(N.shape(a)) == 1:
-        cols = N.resize(a,[a.shape[0],1])
+    if len(np.shape(a)) == 1:
+        cols = np.resize(a,[a.shape[0],1])
     else:
-        cols = N.take(a,indices,axis)
+        cols = np.take(a,indices,axis)
     return cols
 
 def printcc(lst, extra=2):
@@ -137,9 +137,9 @@
     function = 'lines = filter(lambda x: '+criterion+',a)'
     exec(function)
     try:
-        lines = N.array(lines)
+        lines = np.array(lines)
     except:
-        lines = N.array(lines,'O')
+        lines = np.array(lines,'O')
     return lines
 
 
@@ -150,9 +150,9 @@
     Returns: the rows of a where columnlist[i]=valuelist[i] for ALL i\n"""
 
     a = asarray(a)
-    if type(columnlist) not in [ListType,TupleType,N.ndarray]:
+    if type(columnlist) not in [ListType,TupleType,np.ndarray]:
         columnlist = [columnlist]
-    if type(valuelist) not in [ListType,TupleType,N.ndarray]:
+    if type(valuelist) not in [ListType,TupleType,np.ndarray]:
         valuelist = [valuelist]
     criterion = ''
     for i in range(len(columnlist)):
@@ -183,14 +183,14 @@
         means = cfcn(avgcol)
         return means
     else:
-        if type(keepcols) not in [ListType,TupleType,N.ndarray]:
+        if type(keepcols) not in [ListType,TupleType,np.ndarray]:
             keepcols = [keepcols]
         values = colex(a,keepcols)   # so that "item" can be appended (below)
         uniques = unique(values)  # get a LIST, so .sort keeps rows intact
         uniques.sort()
         newlist = []
         for item in uniques:
-            if type(item) not in [ListType,TupleType,N.ndarray]:
+            if type(item) not in [ListType,TupleType,np.ndarray]:
                 item =[item]
             tmprows = linexand(a,keepcols,item)
             for col in collapsecols:
@@ -205,9 +205,9 @@
                     item.append(len(avgcol))
                 newlist.append(item)
         try:
-            new_a = N.array(newlist)
+            new_a = np.array(newlist)
         except TypeError:
-            new_a = N.array(newlist,'O')
+            new_a = np.array(newlist,'O')
         return new_a
 
 

Modified: trunk/scipy/stats/tests/test_morestats.py
===================================================================
--- trunk/scipy/stats/tests/test_morestats.py	2008-09-03 16:18:43 UTC (rev 4682)
+++ trunk/scipy/stats/tests/test_morestats.py	2008-09-03 16:58:28 UTC (rev 4683)
@@ -6,7 +6,7 @@
 
 import scipy.stats as stats
 
-import numpy as N
+import numpy as np
 from numpy.random import RandomState
 
 g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
@@ -63,10 +63,10 @@
         assert_almost_equal(pval,0.13499256881897437,11)
 
     def test_approx(self):
-        ramsay = N.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
-                  101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
-        parekh = N.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
-                  100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99))
+        ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
+                           101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
+        parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
+                           100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99))
         W, pval = stats.ansari(ramsay, parekh)
         assert_almost_equal(W,185.5,11)
         assert_almost_equal(pval,0.18145819972867083,11)




More information about the Scipy-svn mailing list