From numpy-svn at scipy.org Wed Nov 5 11:58:57 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 5 Nov 2008 10:58:57 -0600 (CST) Subject: [Numpy-svn] r5973 - trunk/numpy/core/src Message-ID: <20081105165857.D410739C0F1@scipy.org> Author: charris Date: 2008-11-05 10:58:53 -0600 (Wed, 05 Nov 2008) New Revision: 5973 Modified: trunk/numpy/core/src/umathmodule.c.src Log: More umath cleanups. Modified: trunk/numpy/core/src/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2008-10-28 11:23:10 UTC (rev 5972) +++ trunk/numpy/core/src/umathmodule.c.src 2008-11-05 16:58:53 UTC (rev 5973) @@ -498,18 +498,18 @@ */ #define OUTPUT_LOOP\ - char *op = args[1];\ - intp os = steps[1];\ + char *op1 = args[1];\ + intp os1 = steps[1];\ intp n = dimensions[0];\ intp i;\ - for(i = 0; i < n; i++, op += os) + for(i = 0; i < n; i++, op1 += os1) #define UNARY_LOOP\ - char *ip1 = args[0], *op = args[1];\ - intp is1 = steps[0], os = steps[1];\ + char *ip1 = args[0], *op1 = args[1];\ + intp is1 = steps[0], os1 = steps[1];\ intp n = dimensions[0];\ intp i;\ - for(i = 0; i < n; i++, ip1 += is1, op += os) + for(i = 0; i < n; i++, ip1 += is1, op1 += os1) #define UNARY_LOOP_TWO_OUT\ char *ip1 = args[0], *op1 = args[1], *op2 = args[2];\ @@ -519,11 +519,11 @@ for(i = 0; i < n; i++, ip1 += is1, op1 += os1, op2 += os2) #define BINARY_LOOP\ - char *ip1 = args[0], *ip2 = args[1], *op = args[2];\ - intp is1 = steps[0], is2 = steps[1], os = steps[2];\ + char *ip1 = args[0], *ip2 = args[1], *op1 = args[2];\ + intp is1 = steps[0], is2 = steps[1], os1 = steps[2];\ intp n = dimensions[0];\ intp i;\ - for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op += os) + for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) #define BINARY_LOOP_TWO_OUT\ char *ip1 = args[0], *ip2 = args[1], *op1 = args[2], *op2 = args[3];\ @@ -559,7 +559,7 @@ BINARY_LOOP { Bool in1 = *((Bool *)ip1) != 0; Bool in2 = *((Bool *)ip2) != 0; - *((Bool *)op)= in1 @OP@ in2; + *((Bool *)op1)= in1 @OP@ in2; } } /**end repeat**/ @@ -570,7 +570,7 @@ BINARY_LOOP { Bool in1 = *((Bool *)ip1) != 0; Bool in2 = *((Bool *)ip2) != 0; - *((Bool *)op)= (in1 && !in2) || (!in1 && in2); + *((Bool *)op1)= (in1 && !in2) || (!in1 && in2); } } @@ -584,7 +584,7 @@ BINARY_LOOP { Bool in1 = *((Bool *)ip1) != 0; Bool in2 = *((Bool *)ip2) != 0; - *((Bool *)op) = (in1 @OP@ in2) ? in1 : in2; + *((Bool *)op1) = (in1 @OP@ in2) ? in1 : in2; } } /**end repeat**/ @@ -598,7 +598,7 @@ { UNARY_LOOP { Bool in1 = *(Bool *)ip1; - *((Bool *)op) = in1 @OP@ 0; + *((Bool *)op1) = in1 @OP@ 0; } } /**end repeat**/ @@ -607,7 +607,7 @@ BOOL_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) { OUTPUT_LOOP { - *((Bool *)op) = 1; + *((Bool *)op1) = 1; } } @@ -636,7 +636,7 @@ @S@@TYPE at _ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) { OUTPUT_LOOP { - *((@s@@type@ *)op) = 1; + *((@s@@type@ *)op1) = 1; } } @@ -645,7 +645,7 @@ { UNARY_LOOP { const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((@s@@type@ *)op) = in1*in1; + *((@s@@type@ *)op1) = in1*in1; } } @@ -654,7 +654,7 @@ { UNARY_LOOP { const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((@s@@type@ *)op) = (@s@@type@)(1.0/in1); + *((@s@@type@ *)op1) = (@s@@type@)(1.0/in1); } } @@ -663,7 +663,7 @@ { UNARY_LOOP { const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((@s@@type@ *)op) = in1; + *((@s@@type@ *)op1) = in1; } } @@ -672,7 +672,7 @@ { UNARY_LOOP { const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((@s@@type@ *)op) = (@s@@type@)(-(@type@)in1); + *((@s@@type@ *)op1) = (@s@@type@)(-(@type@)in1); } } @@ -681,7 +681,7 @@ { UNARY_LOOP { const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((Bool *)op) = !in1; + *((Bool *)op1) = !in1; } } @@ -690,7 +690,7 @@ { UNARY_LOOP { const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((@s@@type@ *)op) = ~in1; + *((@s@@type@ *)op1) = ~in1; } } @@ -706,7 +706,7 @@ BINARY_LOOP { const @s@@type@ in1 = *(@s@@type@ *)ip1; const @s@@type@ in2 = *(@s@@type@ *)ip2; - *((@s@@type@ *)op) = in1 @OP@ in2; + *((@s@@type@ *)op1) = in1 @OP@ in2; } } /**end repeat2**/ @@ -722,7 +722,7 @@ BINARY_LOOP { const @s@@type@ in1 = *(@s@@type@ *)ip1; const @s@@type@ in2 = *(@s@@type@ *)ip2; - *((Bool *)op) = in1 @OP@ in2; + *((Bool *)op1) = in1 @OP@ in2; } } /**end repeat2**/ @@ -733,7 +733,7 @@ BINARY_LOOP { const @s@@type@ in1 = *(@s@@type@ *)ip1; const @s@@type@ in2 = *(@s@@type@ *)ip2; - *((Bool *)op)= (in1 && !in2) || (!in1 && in2); + *((Bool *)op1)= (in1 && !in2) || (!in1 && in2); } } @@ -747,7 +747,7 @@ BINARY_LOOP { const @s@@type@ in1 = *(@s@@type@ *)ip1; const @s@@type@ in2 = *(@s@@type@ *)ip2; - *((@s@@type@ *)op) = (in1 @OP@ in2) ? in1 : in2; + *((@s@@type@ *)op1) = (in1 @OP@ in2) ? in1 : in2; } } /**end repeat2**/ @@ -760,10 +760,10 @@ const @s@@type@ in2 = *(@s@@type@ *)ip2; if (in2 == 0) { generate_divbyzero_error(); - *((@ftype@ *)op) = 0; + *((@ftype@ *)op1) = 0; } else { - *((@ftype@ *)op) = (@ftype@)in1 / (@ftype@)in2; + *((@ftype@ *)op1) = (@ftype@)in1 / (@ftype@)in2; } } } @@ -774,7 +774,7 @@ BINARY_LOOP { const @ftype@ in1 = (@ftype@)*(@s@@type@ *)ip1; const @ftype@ in2 = (@ftype@)*(@s@@type@ *)ip2; - *((@s@@type@ *)op) = (@s@@type@) pow(in1, in2); + *((@s@@type@ *)op1) = (@s@@type@) pow(in1, in2); } } @@ -786,10 +786,10 @@ const @s@@type@ in2 = *(@s@@type@ *)ip2; if (in2 == 0) { generate_divbyzero_error(); - *((@s@@type@ *)op) = 0; + *((@s@@type@ *)op1) = 0; } else { - *((@s@@type@ *)op)= in1 % in2; + *((@s@@type@ *)op1)= in1 % in2; } } @@ -802,7 +802,7 @@ { UNARY_LOOP { const u at type@ in1 = *(u at type@ *)ip1; - *((u at type@ *)op) = in1; + *((u at type@ *)op1) = in1; } } @@ -811,7 +811,7 @@ { UNARY_LOOP { const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op) = (in1 >= 0) ? in1 : -in1; + *((@type@ *)op1) = (in1 >= 0) ? in1 : -in1; } } @@ -820,7 +820,7 @@ { UNARY_LOOP { const u at type@ in1 = *(u at type@ *)ip1; - *((u at type@ *)op) = in1 > 0 ? 1 : 0; + *((u at type@ *)op1) = in1 > 0 ? 1 : 0; } } @@ -829,7 +829,7 @@ { UNARY_LOOP { const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op) = in1 > 0 ? 1 : (in1 < 0 ? -1 : 0); + *((@type@ *)op1) = in1 > 0 ? 1 : (in1 < 0 ? -1 : 0); } } @@ -841,13 +841,13 @@ const @type@ in2 = *(@type@ *)ip2; if (in2 == 0) { generate_divbyzero_error(); - *((@type@ *)op) = 0; + *((@type@ *)op1) = 0; } else if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { - *((@type@ *)op) = in1/in2 - 1; + *((@type@ *)op1) = in1/in2 - 1; } else { - *((@type@ *)op) = in1/in2; + *((@type@ *)op1) = in1/in2; } } } @@ -860,10 +860,10 @@ const u at type@ in2 = *(u at type@ *)ip2; if (in2 == 0) { generate_divbyzero_error(); - *((u at type@ *)op) = 0; + *((u at type@ *)op1) = 0; } else { - *((u at type@ *)op)= in1/in2; + *((u at type@ *)op1)= in1/in2; } } } @@ -876,16 +876,16 @@ const @type@ in2 = *(@type@ *)ip2; if (in2 == 0) { generate_divbyzero_error(); - *((@type@ *)op) = 0; + *((@type@ *)op1) = 0; } else { /* handle mixed case the way Python does */ const @type@ rem = in1 % in2; if ((in1 > 0) == (in2 > 0) || rem == 0) { - *((@type@ *)op) = rem; + *((@type@ *)op1) = rem; } else { - *((@type@ *)op) = rem + in2; + *((@type@ *)op1) = rem + in2; } } } @@ -899,10 +899,10 @@ const u at type@ in2 = *(u at type@ *)ip2; if (in2 == 0) { generate_divbyzero_error(); - *((@type@ *)op) = 0; + *((@type@ *)op1) = 0; } else { - *((@type@ *)op) = in1 % in2; + *((@type@ *)op1) = in1 % in2; } } } @@ -924,8 +924,6 @@ * #C = F, , L# */ -#define ONE 1.0 at c@ -#define ZERO 0.0 at c@ /**begin repeat1 * Arithmetic @@ -938,7 +936,7 @@ BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op) = in1 @OP@ in2; + *((@type@ *)op1) = in1 @OP@ in2; } } /**end repeat1**/ @@ -954,7 +952,7 @@ BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - *((Bool *)op) = in1 @OP@ in2; + *((Bool *)op1) = in1 @OP@ in2; } } /**end repeat1**/ @@ -965,7 +963,7 @@ BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - *((Bool *)op)= (in1 && !in2) || (!in1 && in2); + *((Bool *)op1)= (in1 && !in2) || (!in1 && in2); } } @@ -974,7 +972,7 @@ { UNARY_LOOP { const @type@ in1 = *(@type@ *)ip1; - *((Bool *)op) = !in1; + *((Bool *)op1) = !in1; } } @@ -987,7 +985,7 @@ { UNARY_LOOP { const @type@ in1 = *(@type@ *)ip1; - *((Bool *)op) = @func@(in1) != 0; + *((Bool *)op1) = @func@(in1) != 0; } } /**end repeat1**/ @@ -1003,7 +1001,7 @@ BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op) = (in1 @OP@ in2 || isnan(in1)) ? in1 : in2; + *((@type@ *)op1) = (in1 @OP@ in2 || isnan(in1)) ? in1 : in2; } } /**end repeat1**/ @@ -1019,7 +1017,7 @@ BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op) = (in1 @OP@ in2 || isnan(in2)) ? in1 : in2; + *((@type@ *)op1) = (in1 @OP@ in2 || isnan(in2)) ? in1 : in2; } } /**end repeat1**/ @@ -1030,7 +1028,7 @@ BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op) = floor at c@(in1/in2); + *((@type@ *)op1) = floor at c@(in1/in2); } } @@ -1042,10 +1040,10 @@ const @type@ in2 = *(@type@ *)ip2; const @type@ res = fmod at c@(in1,in2); if (res && ((in2 < 0) != (res < 0))) { - *((@type@ *)op) = res + in2; + *((@type@ *)op1) = res + in2; } else { - *((@type@ *)op) = res; + *((@type@ *)op1) = res; } } } @@ -1055,7 +1053,7 @@ { UNARY_LOOP { const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op) = in1*in1; + *((@type@ *)op1) = in1*in1; } } @@ -1064,7 +1062,7 @@ { UNARY_LOOP { const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op) = ONE/in1; + *((@type@ *)op1) = 1/in1; } } @@ -1072,7 +1070,7 @@ @TYPE at _ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) { OUTPUT_LOOP { - *((@type@ *)op) = ONE; + *((@type@ *)op1) = 1; } } @@ -1081,7 +1079,7 @@ { UNARY_LOOP { const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op) = in1; + *((@type@ *)op1) = in1; } } @@ -1092,7 +1090,7 @@ const @type@ in1 = *(@type@ *)ip1; const @type@ tmp = in1 > 0 ? in1 : -in1; /* add 0 to clear -0.0 */ - *((@type@ *)op) = tmp + 0; + *((@type@ *)op1) = tmp + 0; } } @@ -1101,25 +1099,17 @@ { UNARY_LOOP { const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op) = -in1; + *((@type@ *)op1) = -in1; } } static void @TYPE at _sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { - /* */ + /* Sign of nan is currently 0 */ UNARY_LOOP { const @type@ in1 = *(@type@ *)ip1; - if (in1 > 0) { - *((@type@ *)op) = 1; - } - else if (in1 < 0) { - *((@type@ *)op) = -1; - } - else { - *((@type@ *)op) = 0; - } + *((@type@ *)op1) = in1 > 0 ? 1 : (in1 < 0 ? -1 : 0); } } @@ -1157,9 +1147,6 @@ #define @TYPE at _true_divide @TYPE at _divide -#undef ONE -#undef ZERO - /**end repeat**/ @@ -1169,53 +1156,54 @@ ***************************************************************************** */ +#define CGE(xr,xi,yr,yi) (xr > yr || (xr == yr && xi >= yi)) +#define CLE(xr,xi,yr,yi) (xr < yr || (xr == yr && xi <= yi)) +#define CGT(xr,xi,yr,yi) (xr > yr || (xr == yr && xi > yi)) +#define CLT(xr,xi,yr,yi) (xr < yr || (xr == yr && xi < yi)) +#define CEQ(xr,xi,yr,yi) (xr == yr && xi == yi) +#define CNE(xr,xi,yr,yi) (xr != yr || xi != yi) + /**begin repeat * complex types - * #ctype= cfloat, cdouble, clongdouble# - * #CTYPE= CFLOAT, CDOUBLE, CLONGDOUBLE# * #type = float, double, longdouble# + * #TYPE = FLOAT, DOUBLE, LONGDOUBLE# * #c = f, , l# */ -#define CGE(xr,xi,yr,yi) (xr > yr || (xr == yr && xi >= yi)) -#define CLE(xr,xi,yr,yi) (xr < yr || (xr == yr && xi <= yi)) -#define ONE 1.0 at c@ -#define ZERO 0.0 at c@ - /**begin repeat1 * arithmetic * #kind = add, subtract# * #OP = +, -# */ static void - at CTYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { BINARY_LOOP { const @type@ in1r = ((@type@ *)ip1)[0]; const @type@ in1i = ((@type@ *)ip1)[1]; const @type@ in2r = ((@type@ *)ip2)[0]; const @type@ in2i = ((@type@ *)ip2)[1]; - ((@type@ *)op)[0] = in1r @OP@ in2r; - ((@type@ *)op)[1] = in1i @OP@ in2i; + ((@type@ *)op1)[0] = in1r @OP@ in2r; + ((@type@ *)op1)[1] = in1i @OP@ in2i; } } /**end repeat1**/ static void - at CTYPE@_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +C at TYPE@_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { BINARY_LOOP { const @type@ in1r = ((@type@ *)ip1)[0]; const @type@ in1i = ((@type@ *)ip1)[1]; const @type@ in2r = ((@type@ *)ip2)[0]; const @type@ in2i = ((@type@ *)ip2)[1]; - ((@type@ *)op)[0] = in1r*in2r - in1i*in2i; - ((@type@ *)op)[1] = in1r*in2i + in1i*in2r; + ((@type@ *)op1)[0] = in1r*in2r - in1i*in2i; + ((@type@ *)op1)[1] = in1r*in2i + in1i*in2r; } } static void - at CTYPE@_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +C at TYPE@_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { BINARY_LOOP { const @type@ in1r = ((@type@ *)ip1)[0]; @@ -1223,13 +1211,13 @@ const @type@ in2r = ((@type@ *)ip2)[0]; const @type@ in2i = ((@type@ *)ip2)[1]; @type@ d = in2r*in2r + in2i*in2i; - ((@type@ *)op)[0] = (in1r*in2r + in1i*in2i)/d; - ((@type@ *)op)[1] = (in1i*in2r - in1r*in2i)/d; + ((@type@ *)op1)[0] = (in1r*in2r + in1i*in2i)/d; + ((@type@ *)op1)[1] = (in1i*in2r - in1r*in2i)/d; } } static void - at CTYPE@_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +C at TYPE@_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { BINARY_LOOP { const @type@ in1r = ((@type@ *)ip1)[0]; @@ -1237,47 +1225,24 @@ const @type@ in2r = ((@type@ *)ip2)[0]; const @type@ in2i = ((@type@ *)ip2)[1]; @type@ d = in2r*in2r + in2i*in2i; - ((@type@ *)op)[0] = floor at c@((in1r*in2r + in1i*in2i)/d); - ((@type@ *)op)[1] = 0; + ((@type@ *)op1)[0] = floor at c@((in1r*in2r + in1i*in2i)/d); + ((@type@ *)op1)[1] = 0; } } /**begin repeat1 - #kind = equal, not_equal# - #OP1 = ==, !=# - #OP2 = &&, ||# -*/ -static void - at CTYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - *((Bool *)op) = (in1r @OP1@ in2r) @OP2@ (in1i @OP1@ in2i); - } -} -/**end repeat1**/ - -/**begin repeat1 - * #kind= greater, greater_equal, less, less_equal# - * #OP = >, >=, <, <=# + * #kind= greater, greater_equal, less, less_equal, equal, not_equal# + * #OP = CGT, CGE, CLT, CLE, CEQ, CNE# */ static void - at CTYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { BINARY_LOOP { const @type@ in1r = ((@type@ *)ip1)[0]; const @type@ in1i = ((@type@ *)ip1)[1]; const @type@ in2r = ((@type@ *)ip2)[0]; const @type@ in2i = ((@type@ *)ip2)[1]; - if (in1r != in2r) { - *((Bool *)op) = in1r @OP@ in2r ? 1 : 0; - } - else { - *((Bool *)op) = in1i @OP@ in2i ? 1 : 0; - } + *((Bool *)op1) = @OP@(in1r,in1i,in2r,in2i); } } /**end repeat1**/ @@ -1288,20 +1253,20 @@ #OP2 = &&, ||# */ static void - at CTYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { BINARY_LOOP { const @type@ in1r = ((@type@ *)ip1)[0]; const @type@ in1i = ((@type@ *)ip1)[1]; const @type@ in2r = ((@type@ *)ip2)[0]; const @type@ in2i = ((@type@ *)ip2)[1]; - *((Bool *)op) = (in1r @OP1@ in1i) @OP2@ (in2r @OP1@ in2i); + *((Bool *)op1) = (in1r @OP1@ in1i) @OP2@ (in2r @OP1@ in2i); } } /**end repeat1**/ static void - at CTYPE@_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +C at TYPE@_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { BINARY_LOOP { const @type@ in1r = ((@type@ *)ip1)[0]; @@ -1310,17 +1275,17 @@ const @type@ in2i = ((@type@ *)ip2)[1]; const Bool tmp1 = (in1r || in1i); const Bool tmp2 = (in2r || in2i); - *((Bool *)op) = (tmp1 && !tmp2) || (!tmp1 && tmp2); + *((Bool *)op1) = (tmp1 && !tmp2) || (!tmp1 && tmp2); } } static void - at CTYPE@_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +C at TYPE@_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { UNARY_LOOP { const @type@ in1r = ((@type@ *)ip1)[0]; const @type@ in1i = ((@type@ *)ip1)[1]; - *((Bool *)op) = !(in1r || in1i); + *((Bool *)op1) = !(in1r || in1i); } } @@ -1330,29 +1295,29 @@ * #OP = ||, ||, &&# **/ static void - at CTYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { UNARY_LOOP { const @type@ in1r = ((@type@ *)ip1)[0]; const @type@ in1i = ((@type@ *)ip1)[1]; - *((Bool *)op) = @func@(in1r) @OP@ @func@(in1i); + *((Bool *)op1) = @func@(in1r) @OP@ @func@(in1i); } } /**end repeat1**/ static void - at CTYPE@_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) +C at TYPE@_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) { UNARY_LOOP { const @type@ in1r = ((@type@ *)ip1)[0]; const @type@ in1i = ((@type@ *)ip1)[1]; - ((@type@ *)op)[0] = in1r*in1r - in1i*in1i; - ((@type@ *)op)[1] = in1r*in1i + in1i*in1r; + ((@type@ *)op1)[0] = in1r*in1r - in1i*in1i; + ((@type@ *)op1)[1] = in1r*in1i + in1i*in1r; } } static void - at CTYPE@_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) +C at TYPE@_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) { UNARY_LOOP { const @type@ in1r = ((@type@ *)ip1)[0]; @@ -1360,93 +1325,78 @@ if (fabs at c@(in1i) <= fabs at c@(in1r)) { const @type@ r = in1i/in1r; const @type@ d = in1r + in1i*r; - ((@type@ *)op)[0] = 1/d; - ((@type@ *)op)[1] = -r/d; + ((@type@ *)op1)[0] = 1/d; + ((@type@ *)op1)[1] = -r/d; } else { const @type@ r = in1r/in1i; const @type@ d = in1r*r + in1i; - ((@type@ *)op)[0] = r/d; - ((@type@ *)op)[1] = -1/d; + ((@type@ *)op1)[0] = r/d; + ((@type@ *)op1)[1] = -1/d; } } } static void - at CTYPE@_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) +C at TYPE@_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) { OUTPUT_LOOP { - ((@type@ *)op)[0] = ONE; - ((@type@ *)op)[1] = ZERO; + ((@type@ *)op1)[0] = 1; + ((@type@ *)op1)[1] = 0; } } static void - at CTYPE@_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { +C at TYPE@_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { UNARY_LOOP { const @type@ in1r = ((@type@ *)ip1)[0]; const @type@ in1i = ((@type@ *)ip1)[1]; - ((@type@ *)op)[0] = in1r; - ((@type@ *)op)[1] = -in1i; + ((@type@ *)op1)[0] = in1r; + ((@type@ *)op1)[1] = -in1i; } } static void - at CTYPE@_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +C at TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { UNARY_LOOP { const @type@ in1r = ((@type@ *)ip1)[0]; const @type@ in1i = ((@type@ *)ip1)[1]; - *((@type@ *)op) = sqrt at c@(in1r*in1r + in1i*in1i); + *((@type@ *)op1) = sqrt at c@(in1r*in1r + in1i*in1i); } } static void - at CTYPE@_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +C at TYPE@_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { + /* fixme: sign of nan is currently 0 */ UNARY_LOOP { const @type@ in1r = ((@type@ *)ip1)[0]; const @type@ in1i = ((@type@ *)ip1)[1]; - if (in1r > 0) { - ((@type@ *)op)[0] = ONE; - } - else if (in1r < 0) { - ((@type@ *)op)[0] = -ONE; - } - else { - if (in1i > 0) { - ((@type@ *)op)[0] = ONE; - } - else if (in1i < 0) { - ((@type@ *)op)[0] = -ONE; - } - else { - ((@type@ *)op)[0] = ZERO; - } - } - ((@type@ *)op)[1] = ZERO; + ((@type@ *)op1)[0] = CGT(in1r, in1i, 0, 0) ? 1 : + (CLT(in1r, in1i, 0, 0) ? -1 : 0); + ((@type@ *)op1)[1] = 0; } } /**begin repeat1 * #kind = maximum, minimum# - * #OP1 = CGE, CLE# - * #OP2 = CLE, CGE# + * #OP = CGE, CLE# */ static void - at CTYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { BINARY_LOOP { const @type@ in1r = ((@type@ *)ip1)[0]; const @type@ in1i = ((@type@ *)ip1)[1]; const @type@ in2r = ((@type@ *)ip2)[0]; const @type@ in2i = ((@type@ *)ip2)[1]; - if (@OP1@(in1r, in1i, in2r, in2i) || isnan(in1r) || isnan(in1i)) { - ((@type@ *)op)[0] = in1r; - ((@type@ *)op)[1] = in1i; + if (@OP@(in1r, in1i, in2r, in2i) || isnan(in1r) || isnan(in1i)) { + ((@type@ *)op1)[0] = in1r; + ((@type@ *)op1)[1] = in1i; } else { - ((@type@ *)op)[0] = in2r; - ((@type@ *)op)[1] = in2i; + ((@type@ *)op1)[0] = in2r; + ((@type@ *)op1)[1] = in2i; } } } @@ -1454,37 +1404,39 @@ /**begin repeat1 * #kind = fmax, fmin# - * #OP1 = CGE, CLE# + * #OP = CGE, CLE# */ static void - at CTYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *func) +C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *func) { BINARY_LOOP { const @type@ in1r = ((@type@ *)ip1)[0]; const @type@ in1i = ((@type@ *)ip1)[1]; const @type@ in2r = ((@type@ *)ip2)[0]; const @type@ in2i = ((@type@ *)ip2)[1]; - if (@OP1@(in1r, in1i, in2r, in2i) || isnan(in2r) || isnan(in2i)) { - ((@type@ *)op)[0] = in1r; - ((@type@ *)op)[1] = in1i; + if (@OP@(in1r, in1i, in2r, in2i) || isnan(in2r) || isnan(in2i)) { + ((@type@ *)op1)[0] = in1r; + ((@type@ *)op1)[1] = in1i; } else { - ((@type@ *)op)[0] = in2r; - ((@type@ *)op)[1] = in2i; + ((@type@ *)op1)[0] = in2r; + ((@type@ *)op1)[1] = in2i; } } } /**end repeat1**/ -#define @CTYPE at _true_divide @CTYPE at _divide +#define C at TYPE@_true_divide C at TYPE@_divide +/**end repeat**/ + #undef CGE #undef CLE -#undef ONE -#undef ZERO +#undef CGT +#undef CLT +#undef CEQ +#undef CNE -/**end repeat**/ - /* ***************************************************************************** ** OBJECT LOOPS ** From numpy-svn at scipy.org Wed Nov 5 11:59:11 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 5 Nov 2008 10:59:11 -0600 (CST) Subject: [Numpy-svn] r5974 - trunk/numpy/core/src Message-ID: <20081105165911.A51E939C0F1@scipy.org> Author: charris Date: 2008-11-05 10:59:07 -0600 (Wed, 05 Nov 2008) New Revision: 5974 Modified: trunk/numpy/core/src/umathmodule.c.src Log: Finish replacing op by op1 so it compiles. Modified: trunk/numpy/core/src/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2008-11-05 16:58:53 UTC (rev 5973) +++ trunk/numpy/core/src/umathmodule.c.src 2008-11-05 16:59:07 UTC (rev 5974) @@ -1118,7 +1118,7 @@ { UNARY_LOOP_TWO_OUT { const @type@ in1 = *(@type@ *)ip1; - *(@type@ *)op1 = modf at c@(in1, (@type@ *)op2); + *((@type@ *)op1) = modf at c@(in1, (@type@ *)op2); } } @@ -1128,7 +1128,7 @@ { UNARY_LOOP_TWO_OUT { const @type@ in1 = *(@type@ *)ip1; - *(@type@ *)op1 = frexp at c@(in1, (int *)op2); + *((@type@ *)op1) = frexp at c@(in1, (int *)op2); } } #endif @@ -1140,7 +1140,7 @@ BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const int in2 = *(int *)ip2; - *(@type@ *)op = ldexp at c@(in1, in2); + *((@type@ *)op1) = ldexp at c@(in1, in2); } } #endif @@ -1452,7 +1452,7 @@ BINARY_LOOP { PyObject *in1 = *(PyObject **)ip1; PyObject *in2 = *(PyObject **)ip2; - *(Bool *)op = (Bool) PyObject_RichCompareBool(in1, in2, Py_ at OP@); + *((Bool *)op1) = (Bool) PyObject_RichCompareBool(in1, in2, Py_ at OP@); } } /**end repeat**/ @@ -1463,7 +1463,7 @@ PyObject *zero = PyInt_FromLong(0); UNARY_LOOP { PyObject *in1 = *(PyObject **)ip1; - *(PyObject **)op = PyInt_FromLong(PyObject_Compare(in1, zero)); + *((PyObject **)op1) = PyInt_FromLong(PyObject_Compare(in1, zero)); } Py_DECREF(zero); } From numpy-svn at scipy.org Wed Nov 5 14:40:35 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 5 Nov 2008 13:40:35 -0600 (CST) Subject: [Numpy-svn] r5975 - in trunk/numpy/core: code_generators src Message-ID: <20081105194035.67AEA39C0F1@scipy.org> Author: charris Date: 2008-11-05 13:40:28 -0600 (Wed, 05 Nov 2008) New Revision: 5975 Modified: trunk/numpy/core/code_generators/generate_umath.py trunk/numpy/core/src/math_c99.inc.src trunk/numpy/core/src/umathmodule.c.src Log: Add logsumexp ufunc and some small cleanups. Modified: trunk/numpy/core/code_generators/generate_umath.py =================================================================== --- trunk/numpy/core/code_generators/generate_umath.py 2008-11-05 16:59:07 UTC (rev 5974) +++ trunk/numpy/core/code_generators/generate_umath.py 2008-11-05 19:40:28 UTC (rev 5975) @@ -326,6 +326,11 @@ "", TD(inexact) ), +'logsumexp' : + Ufunc(2, 1, None, + "", + TD(flts, f="logsumexp") + ), 'bitwise_and' : Ufunc(2, 1, One, docstrings.get('numpy.core.umath.bitwise_and'), Modified: trunk/numpy/core/src/math_c99.inc.src =================================================================== --- trunk/numpy/core/src/math_c99.inc.src 2008-11-05 16:59:07 UTC (rev 5974) +++ trunk/numpy/core/src/math_c99.inc.src 2008-11-05 19:40:28 UTC (rev 5975) @@ -33,7 +33,7 @@ if (u == 1.0) { return x; } else { - return log(u) * x / (u-1.); + return log(u) * x / (u - 1); } } #endif @@ -187,78 +187,56 @@ * instead test for the macro, but I am lazy to do that for now. */ -/* - * One value argument function - */ - /**begin repeat - - #kind=(sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10,log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p)*2# - #KIND=(SIN,COS,TAN,SINH,COSH,TANH,FABS,FLOOR,CEIL,RINT,TRUNC,SQRT,LOG10,LOG,EXP,EXPM1,ASIN,ACOS,ATAN,ASINH,ACOSH,ATANH,LOG1P)*2# - #typ=longdouble*23, float*23# - #c=l*23,f*23# - #C=L*23,F*23# - #TYPE=LONGDOUBLE*23, FLOAT*23# + * #type = longdouble, float# + * #TYPE = LONGDOUBLE, FLOAT# + * #c = l,f# + * #C = L,F# */ +/**begin repeat1 + * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, + * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p# + * #KIND = SIN,COS,TAN,SINH,COSH,TANH,FABS,FLOOR,CEIL,RINT,TRUNC,SQRT,LOG10, + * LOG,EXP,EXPM1,ASIN,ACOS,ATAN,ASINH,ACOSH,ATANH,LOG1P# + */ #ifndef HAVE_ at KIND@@C@ #ifdef @kind@@c@ #undef @kind@@c@ #endif - at typ@ @kind@@c@(@typ@ x) + at type@ @kind@@c@(@type@ x) { - return (@typ@) @kind@((double)x); + return (@type@) @kind@((double)x); } #endif -/**end repeat**/ +/**end repeat1**/ -/* - * Two values arguments function +/**begin repeat1 + * #kind = atan2,hypot,pow,fmod# + * #KIND = ATAN2,HYPOT,POW,FMOD# */ - -/**begin repeat - - #kind=(atan2,hypot,pow,fmod)*2# - #KIND=(ATAN2,HYPOT,POW,FMOD)*2# - #typ=longdouble*4, float*4# - #c=l*4,f*4# - #C=L*4,F*4# - #TYPE=LONGDOUBLE*4,FLOAT*4# -*/ #ifndef HAVE_ at KIND@@C@ #ifdef @kind@@c@ #undef @kind@@c@ #endif - at typ@ @kind@@c@(@typ@ x, @typ@ y) + at type@ @kind@@c@(@type@ x, @type@ y) { - return (@typ@) @kind@((double)x, (double) y); + return (@type@) @kind@((double)x, (double) y); } #endif -/**end repeat**/ +/**end repeat1**/ -/* - * One value - one pointer argument function - */ - -/**begin repeat - #kind=modf*2# - #KIND=MODF*2# - #c=l,f# - #C=L,F# - #typ=longdouble, float# - #TYPE=LONGDOUBLE, FLOAT# -*/ -#ifndef HAVE_ at KIND@@C@ +#ifndef HAVE_MODF at C@ #ifdef modf at c@ #undef modf at c@ #endif - at typ@ modf at c@(@typ@ x, @typ@ *iptr) + at type@ modf at c@(@type@ x, @type@ *iptr) { - double nx, niptr, y; - nx = (double) x; - y = modf(nx, &niptr); - *iptr = (@typ@) niptr; - return (@typ@) y; + double niptr; + double y = modf((double)x, &niptr); + *iptr = (@type@) niptr; + return (@type@) y; } #endif + /**end repeat**/ Modified: trunk/numpy/core/src/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2008-11-05 16:59:07 UTC (rev 5974) +++ trunk/numpy/core/src/umathmodule.c.src 2008-11-05 19:40:28 UTC (rev 5975) @@ -23,26 +23,56 @@ #include "math_c99.inc" -float degreesf(float x) { - return x * (float)(180.0/M_PI); +/* + ***************************************************************************** + ** FLOAT FUNCTIONS ** + ***************************************************************************** + */ + +/**begin repeat + * #type = float, double, longdouble# + * #c = f, ,l# + * #C = F, ,L# + */ + +#define PI 3.14159265358979323846264338328 at c@ + +static @type@ +degrees at c@(@type@ x) { + return x*(180.0 at c@/PI); } -double degrees(double x) { - return x * (180.0/M_PI); + +static @type@ +radians at c@(@type@ x) { + return x*(PI/180.0 at c@); } -longdouble degreesl(longdouble x) { - return x * (180.0L/M_PI); + +static @type@ +rad2deg at c@(@type@ x) { + return x*(180.0 at c@/PI); } -float radiansf(float x) { - return x * (float)(M_PI/180.0); +static @type@ +deg2rad at c@(@type@ x) { + return x*(PI/180.0 at c@); } -double radians(double x) { - return x * (M_PI/180.0); + +static @type@ +logsumexp at c@(@type@ x, @type@ y) +{ + const @type@ tmp = x - y; + if (tmp > 0) { + return x + log1p at c@(exp at c@(-tmp)); + } + else { + return y + log1p at c@(exp at c@(tmp)); + } } -longdouble radiansl(longdouble x) { - return x * (M_PI/180.0L); -} +#undef PI + +/**end repeat**/ + /* ***************************************************************************** ** PYTHON OBJECT FUNCTIONS ** From numpy-svn at scipy.org Wed Nov 5 17:53:46 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 5 Nov 2008 16:53:46 -0600 (CST) Subject: [Numpy-svn] r5976 - in trunk/numpy/core: code_generators src Message-ID: <20081105225346.D5FDB39C0F1@scipy.org> Author: charris Date: 2008-11-05 16:53:39 -0600 (Wed, 05 Nov 2008) New Revision: 5976 Modified: trunk/numpy/core/code_generators/generate_umath.py trunk/numpy/core/src/umathmodule.c.src Log: Merge branch 'ufunc' Conflicts: numpy/core/src/umathmodule.c.src Modified: trunk/numpy/core/code_generators/generate_umath.py =================================================================== --- trunk/numpy/core/code_generators/generate_umath.py 2008-11-05 19:40:28 UTC (rev 5975) +++ trunk/numpy/core/code_generators/generate_umath.py 2008-11-05 22:53:39 UTC (rev 5976) @@ -221,7 +221,7 @@ 'ones_like' : Ufunc(1, 1, None, docstrings.get('numpy.core.umath.ones_like'), - TD(nobool_or_obj), + TD(noobj), TD(O, f='Py_get_one'), ), 'power' : @@ -319,18 +319,25 @@ 'fmax' : Ufunc(2, 1, None, "", - TD(inexact) + TD(noobj), + TD(O, f='_npy_ObjectMax') ), 'fmin' : Ufunc(2, 1, None, "", - TD(inexact) + TD(noobj), + TD(O, f='_npy_ObjectMin') ), 'logsumexp' : Ufunc(2, 1, None, "", TD(flts, f="logsumexp") ), +'logsumexp' : + Ufunc(2, 1, None, + "", + TD(flts, f="logsumexp") + ), 'bitwise_and' : Ufunc(2, 1, One, docstrings.get('numpy.core.umath.bitwise_and'), Modified: trunk/numpy/core/src/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2008-11-05 19:40:28 UTC (rev 5975) +++ trunk/numpy/core/src/umathmodule.c.src 2008-11-05 22:53:39 UTC (rev 5976) @@ -36,18 +36,10 @@ */ #define PI 3.14159265358979323846264338328 at c@ +#define degrees at c@ deg2rad at c@ +#define radians at c@ rad2deg at c@ static @type@ -degrees at c@(@type@ x) { - return x*(180.0 at c@/PI); -} - -static @type@ -radians at c@(@type@ x) { - return x*(PI/180.0 at c@); -} - -static @type@ rad2deg at c@(@type@ x) { return x*(180.0 at c@/PI); } @@ -136,7 +128,7 @@ */ -/* +/* * Don't pass structures between functions (only pointers) because how * structures are passed is compiler dependent and could cause * segfaults if ufuncobject.c is compiled with a different compiler @@ -576,6 +568,8 @@ #define BOOL_bitwise_xor BOOL_logical_xor #define BOOL_multiply BOOL_logical_and #define BOOL_subtract BOOL_logical_xor +#define BOOL_fmax BOOL_maximum +#define BOOL_fmin BOOL_minimum /**begin repeat * #kind = equal, not_equal, greater, greater_equal, less, less_equal, @@ -661,6 +655,8 @@ */ #define @S@@TYPE at _floor_divide @S@@TYPE at _divide +#define @S@@TYPE at _fmax @S@@TYPE at _maximum +#define @S@@TYPE at _fmin @S@@TYPE at _minimum static void @S@@TYPE at _ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) From numpy-svn at scipy.org Wed Nov 5 18:11:00 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 5 Nov 2008 17:11:00 -0600 (CST) Subject: [Numpy-svn] r5977 - trunk/numpy/core/src Message-ID: <20081105231100.CF38639C0F1@scipy.org> Author: charris Date: 2008-11-05 17:10:56 -0600 (Wed, 05 Nov 2008) New Revision: 5977 Modified: trunk/numpy/core/src/umathmodule.c.src Log: Fix reversal between radians and degrees. Modified: trunk/numpy/core/src/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2008-11-05 22:53:39 UTC (rev 5976) +++ trunk/numpy/core/src/umathmodule.c.src 2008-11-05 23:10:56 UTC (rev 5977) @@ -36,8 +36,8 @@ */ #define PI 3.14159265358979323846264338328 at c@ -#define degrees at c@ deg2rad at c@ -#define radians at c@ rad2deg at c@ +#define degrees at c@ rad2deg at c@ +#define radians at c@ deg2rad at c@ static @type@ rad2deg at c@(@type@ x) { From numpy-svn at scipy.org Thu Nov 6 07:52:37 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 6 Nov 2008 06:52:37 -0600 (CST) Subject: [Numpy-svn] r5978 - trunk/numpy/distutils/fcompiler Message-ID: <20081106125237.90EB439C089@scipy.org> Author: cdavid Date: 2008-11-06 06:52:26 -0600 (Thu, 06 Nov 2008) New Revision: 5978 Modified: trunk/numpy/distutils/fcompiler/gnu.py Log: Remove cpu/arch specific optimization flags for GNU fortran compilers: they are too fragile, and are more likely not that useful anyway. Modified: trunk/numpy/distutils/fcompiler/gnu.py =================================================================== --- trunk/numpy/distutils/fcompiler/gnu.py 2008-11-05 23:10:56 UTC (rev 5977) +++ trunk/numpy/distutils/fcompiler/gnu.py 2008-11-06 12:52:26 UTC (rev 5978) @@ -178,104 +178,8 @@ return opt def get_flags_arch(self): - opt = [] - if sys.platform == 'darwin': - # Since Apple doesn't distribute a GNU Fortran compiler, we - # can't add -arch ppc or -arch i386, as only their version - # of the GNU compilers accepts those. - for a in '601 602 603 603e 604 604e 620 630 740 7400 7450 750'\ - '403 505 801 821 823 860'.split(): - if getattr(cpu,'is_ppc%s'%a)(): - opt.append('-mcpu='+a) - opt.append('-mtune='+a) - break - return opt + return [] - # default march options in case we find nothing better - if cpu.is_i686(): - march_opt = '-march=i686' - elif cpu.is_i586(): - march_opt = '-march=i586' - elif cpu.is_i486(): - march_opt = '-march=i486' - elif cpu.is_i386(): - march_opt = '-march=i386' - else: - march_opt = '' - - gnu_ver = self.get_version() - - if gnu_ver >= '0.5.26': # gcc 3.0 - if cpu.is_AthlonK6(): - march_opt = '-march=k6' - elif cpu.is_AthlonK7(): - march_opt = '-march=athlon' - - if gnu_ver >= '3.1.1': - if cpu.is_AthlonK6_2(): - march_opt = '-march=k6-2' - elif cpu.is_AthlonK6_3(): - march_opt = '-march=k6-3' - elif cpu.is_AthlonMP(): - march_opt = '-march=athlon-mp' - # there's also: athlon-tbird, athlon-4, athlon-xp - elif cpu.is_Nocona(): - march_opt = '-march=nocona' - elif cpu.is_Core2(): - march_opt = '-march=nocona' - elif cpu.is_Xeon() and cpu.is_64bit(): - march_opt = '-march=nocona' - elif cpu.is_Prescott(): - march_opt = '-march=prescott' - elif cpu.is_PentiumIV(): - march_opt = '-march=pentium4' - elif cpu.is_PentiumIII(): - march_opt = '-march=pentium3' - elif cpu.is_PentiumM(): - march_opt = '-march=pentium3' - elif cpu.is_PentiumII(): - march_opt = '-march=pentium2' - - if gnu_ver >= '3.4': - # Actually, I think these all do the same things - if cpu.is_Opteron(): - march_opt = '-march=opteron' - elif cpu.is_Athlon64(): - march_opt = '-march=athlon64' - elif cpu.is_AMD64(): - march_opt = '-march=k8' - - if gnu_ver >= '3.4.4': - if cpu.is_PentiumM(): - march_opt = '-march=pentium-m' - # Future: - # if gnu_ver >= '4.3': - # if cpu.is_Core2(): - # march_opt = '-march=core2' - - # Note: gcc 3.2 on win32 has breakage with -march specified - if '3.1.1' <= gnu_ver <= '3.4' and sys.platform=='win32': - march_opt = '' - - if march_opt: - opt.append(march_opt) - - # other CPU flags - if gnu_ver >= '3.1.1': - if cpu.has_mmx(): opt.append('-mmmx') - if cpu.has_3dnow(): opt.append('-m3dnow') - - if gnu_ver > '3.2.2': - if cpu.has_sse2(): opt.append('-msse2') - if cpu.has_sse(): opt.append('-msse') - if gnu_ver >= '3.4': - if cpu.has_sse3(): opt.append('-msse3') - if cpu.is_Intel(): - opt.append('-fomit-frame-pointer') - if cpu.is_32bit(): - opt.append('-malign-double') - return opt - class Gnu95FCompiler(GnuFCompiler): compiler_type = 'gnu95' compiler_aliases = ('gfortran',) From numpy-svn at scipy.org Thu Nov 6 10:25:02 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 6 Nov 2008 09:25:02 -0600 (CST) Subject: [Numpy-svn] r5979 - in trunk/numpy/core: code_generators src Message-ID: <20081106152502.0E41139C4BF@scipy.org> Author: charris Date: 2008-11-06 09:24:51 -0600 (Thu, 06 Nov 2008) New Revision: 5979 Modified: trunk/numpy/core/code_generators/generate_umath.py trunk/numpy/core/src/umathmodule.c.src Log: Rename logsumexp to logaddexp. Add ufuncs deg2rad and rad2deg. Modified: trunk/numpy/core/code_generators/generate_umath.py =================================================================== --- trunk/numpy/core/code_generators/generate_umath.py 2008-11-06 12:52:26 UTC (rev 5978) +++ trunk/numpy/core/code_generators/generate_umath.py 2008-11-06 15:24:51 UTC (rev 5979) @@ -328,10 +328,10 @@ TD(noobj), TD(O, f='_npy_ObjectMin') ), -'logsumexp' : +'logaddexp' : Ufunc(2, 1, None, "", - TD(flts, f="logsumexp") + TD(flts, f="logaddexp") ), 'logsumexp' : Ufunc(2, 1, None, @@ -379,11 +379,21 @@ docstrings.get('numpy.core.umath.degrees'), TD(fltsM, f='degrees'), ), +'rad2deg' : + Ufunc(1, 1, None, + '', + TD(fltsM, f='rad2deg'), + ), 'radians' : Ufunc(1, 1, None, docstrings.get('numpy.core.umath.radians'), TD(fltsM, f='radians'), ), +'deg2rad' : + Ufunc(1, 1, None, + '', + TD(fltsM, f='deg2rad'), + ), 'arccos' : Ufunc(1, 1, None, docstrings.get('numpy.core.umath.arccos'), Modified: trunk/numpy/core/src/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2008-11-06 12:52:26 UTC (rev 5978) +++ trunk/numpy/core/src/umathmodule.c.src 2008-11-06 15:24:51 UTC (rev 5979) @@ -50,7 +50,7 @@ } static @type@ -logsumexp at c@(@type@ x, @type@ y) +logaddexp at c@(@type@ x, @type@ y) { const @type@ tmp = x - y; if (tmp > 0) { From numpy-svn at scipy.org Thu Nov 6 10:25:25 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 6 Nov 2008 09:25:25 -0600 (CST) Subject: [Numpy-svn] r5980 - trunk/numpy/core/code_generators Message-ID: <20081106152525.8601D39C4C7@scipy.org> Author: charris Date: 2008-11-06 09:25:12 -0600 (Thu, 06 Nov 2008) New Revision: 5980 Modified: trunk/numpy/core/code_generators/generate_umath.py Log: Remove remove logsumexp and replace with logaddexp. Modified: trunk/numpy/core/code_generators/generate_umath.py =================================================================== --- trunk/numpy/core/code_generators/generate_umath.py 2008-11-06 15:24:51 UTC (rev 5979) +++ trunk/numpy/core/code_generators/generate_umath.py 2008-11-06 15:25:12 UTC (rev 5980) @@ -333,11 +333,6 @@ "", TD(flts, f="logaddexp") ), -'logsumexp' : - Ufunc(2, 1, None, - "", - TD(flts, f="logsumexp") - ), 'bitwise_and' : Ufunc(2, 1, One, docstrings.get('numpy.core.umath.bitwise_and'), From numpy-svn at scipy.org Thu Nov 6 13:58:12 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 6 Nov 2008 12:58:12 -0600 (CST) Subject: [Numpy-svn] r5981 - trunk/numpy/core/code_generators Message-ID: <20081106185812.42BED39C4BD@scipy.org> Author: charris Date: 2008-11-06 12:58:06 -0600 (Thu, 06 Nov 2008) New Revision: 5981 Modified: trunk/numpy/core/code_generators/generate_umath.py Log: Merge branch 'ufunc' Modified: trunk/numpy/core/code_generators/generate_umath.py =================================================================== --- trunk/numpy/core/code_generators/generate_umath.py 2008-11-06 15:25:12 UTC (rev 5980) +++ trunk/numpy/core/code_generators/generate_umath.py 2008-11-06 18:58:06 UTC (rev 5981) @@ -503,6 +503,12 @@ TD(flts, f='ceil'), TD(M, f='ceil'), ), +'trunc' : + Ufunc(1, 1, None, + '', + TD(flts, f='trunc'), + TD(M, f='trunc'), + ), 'fabs' : Ufunc(1, 1, None, docstrings.get('numpy.core.umath.fabs'), From numpy-svn at scipy.org Thu Nov 6 16:24:51 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 6 Nov 2008 15:24:51 -0600 (CST) Subject: [Numpy-svn] r5982 - numpy-docs/trunk/source/reference Message-ID: <20081106212451.E3BBF39C3F0@scipy.org> Author: stefan Date: 2008-11-06 15:24:37 -0600 (Thu, 06 Nov 2008) New Revision: 5982 Modified: numpy-docs/trunk/source/reference/ufuncs.rst Log: Alphabetise some lists. Add logaddexp, deg2rad, rad2deg and trunc ufuncs. Modified: numpy-docs/trunk/source/reference/ufuncs.rst =================================================================== --- numpy-docs/trunk/source/reference/ufuncs.rst 2008-11-06 18:58:06 UTC (rev 5981) +++ numpy-docs/trunk/source/reference/ufuncs.rst 2008-11-06 21:24:37 UTC (rev 5982) @@ -393,30 +393,31 @@ .. autosummary:: + absolute add - subtract - multiply + conj divide - true_divide + exp + expm1 floor_divide + fmod + log + log1p + log10 + logaddexp + mod + multiply negative + ones_like power remainder - mod - fmod - absolute rint + reciprocal sign - conj - exp - log - expm1 - log1p - log10 sqrt square - reciprocal - ones_like + subtract + true_divide .. tip:: @@ -436,20 +437,22 @@ .. autosummary:: - sin - cos - tan + arccos arcsin - arccos arctan + arcsinh + arccosh + arctanh arctan2 + cos + cosh + deg2rad hypot + rad2deg + sin sinh - cosh + tan tanh - arcsinh - arccosh - arctanh Bit-twiddling functions ----------------------- @@ -537,15 +540,16 @@ .. autosummary:: - isreal + ceil iscomplex isfinite isinf isnan - signbit + isreal + ldexp modf - ldexp + floor + fmod frexp - fmod - floor - ceil + signbit + trunk From numpy-svn at scipy.org Fri Nov 7 02:46:55 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 7 Nov 2008 01:46:55 -0600 (CST) Subject: [Numpy-svn] r5983 - trunk/numpy/distutils Message-ID: <20081107074655.E03EB39C260@scipy.org> Author: cdavid Date: 2008-11-07 01:46:47 -0600 (Fri, 07 Nov 2008) New Revision: 5983 Modified: trunk/numpy/distutils/system_info.py Log: Deal with g3f extension used by Debian/Ubuntu for gfortrab ABI transition. Modified: trunk/numpy/distutils/system_info.py =================================================================== --- trunk/numpy/distutils/system_info.py 2008-11-06 21:24:37 UTC (rev 5982) +++ trunk/numpy/distutils/system_info.py 2008-11-07 07:46:47 UTC (rev 5983) @@ -518,6 +518,10 @@ exts.append('.dll.a') if sys.platform == 'darwin': exts.append('.dylib') + # Debian and Ubuntu added a g3f suffix to shared library to deal with + # g77 -> gfortran ABI transition + if sys.platform[:5] == 'linux': + exts.append('.so.g3f') return exts def check_libs(self,lib_dir,libs,opt_libs =[]): From numpy-svn at scipy.org Fri Nov 7 02:50:49 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 7 Nov 2008 01:50:49 -0600 (CST) Subject: [Numpy-svn] r5984 - trunk/numpy/distutils Message-ID: <20081107075049.5E71C39C4B4@scipy.org> Author: cdavid Date: 2008-11-07 01:50:45 -0600 (Fri, 07 Nov 2008) New Revision: 5984 Modified: trunk/numpy/distutils/system_info.py Log: Fix typo (g3f->3gf). Modified: trunk/numpy/distutils/system_info.py =================================================================== --- trunk/numpy/distutils/system_info.py 2008-11-07 07:46:47 UTC (rev 5983) +++ trunk/numpy/distutils/system_info.py 2008-11-07 07:50:45 UTC (rev 5984) @@ -521,7 +521,7 @@ # Debian and Ubuntu added a g3f suffix to shared library to deal with # g77 -> gfortran ABI transition if sys.platform[:5] == 'linux': - exts.append('.so.g3f') + exts.append('.so.3gf') return exts def check_libs(self,lib_dir,libs,opt_libs =[]): From numpy-svn at scipy.org Fri Nov 7 03:09:33 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 7 Nov 2008 02:09:33 -0600 (CST) Subject: [Numpy-svn] r5985 - trunk Message-ID: <20081107080933.0E01F39C4B4@scipy.org> Author: cdavid Date: 2008-11-07 02:09:30 -0600 (Fri, 07 Nov 2008) New Revision: 5985 Modified: trunk/INSTALL.txt Log: Mention which packages to install for atlas on Ubuntu (Pre intrepid and intrepid). Modified: trunk/INSTALL.txt =================================================================== --- trunk/INSTALL.txt 2008-11-07 07:50:45 UTC (rev 5984) +++ trunk/INSTALL.txt 2008-11-07 08:09:30 UTC (rev 5985) @@ -28,3 +28,32 @@ Python must also be compiled with the zlib module enabled. __ http://www.python.org + +Building with ATLAS support +=========================== + +Ubuntu 8.10 (Intrepid) +---------------------- + +You can install the necessary packages for optimized ATLAS with this command: + + sudo apt-get install libatlas-base-dev + +If you have a recent CPU with SIMD suppport (SSE, SSE2, etc...), you should +also install the corresponding package for optimal performances. For example, +for SSE2: + + sudo apt-get install libatlas3gf-sse2 + +Ubuntu 8.04 and lower +--------------------- + +You can install the necessary packages for optimized ATLAS with this command: + + sudo apt-get install atlas3-base-dev + +If you have a recent CPU with SIMD suppport (SSE, SSE2, etc...), you should +also install the corresponding package for optimal performances. For example, +for SSE2: + + sudo apt-get install atlas3-sse2 From numpy-svn at scipy.org Fri Nov 7 03:14:11 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 7 Nov 2008 02:14:11 -0600 (CST) Subject: [Numpy-svn] r5986 - in trunk: . numpy/distutils Message-ID: <20081107081411.87B2239C260@scipy.org> Author: cdavid Date: 2008-11-07 02:14:06 -0600 (Fri, 07 Nov 2008) New Revision: 5986 Modified: trunk/INSTALL.txt trunk/numpy/distutils/system_info.py Log: Disable looking for 3gf libraries; it causes more problem than it solves. Modified: trunk/INSTALL.txt =================================================================== --- trunk/INSTALL.txt 2008-11-07 08:09:30 UTC (rev 5985) +++ trunk/INSTALL.txt 2008-11-07 08:14:06 UTC (rev 5986) @@ -45,6 +45,10 @@ sudo apt-get install libatlas3gf-sse2 +*NOTE*: if you build your own atlas, Intrepid changed its default fortran +compiler to gfortran. So you should rebuild everything from scratch, including +lapack, to use it on Intrepid. + Ubuntu 8.04 and lower --------------------- Modified: trunk/numpy/distutils/system_info.py =================================================================== --- trunk/numpy/distutils/system_info.py 2008-11-07 08:09:30 UTC (rev 5985) +++ trunk/numpy/distutils/system_info.py 2008-11-07 08:14:06 UTC (rev 5986) @@ -520,8 +520,9 @@ exts.append('.dylib') # Debian and Ubuntu added a g3f suffix to shared library to deal with # g77 -> gfortran ABI transition - if sys.platform[:5] == 'linux': - exts.append('.so.3gf') + # XXX: disabled, it hides more problem than it solves. + #if sys.platform[:5] == 'linux': + # exts.append('.so.3gf') return exts def check_libs(self,lib_dir,libs,opt_libs =[]): From numpy-svn at scipy.org Sat Nov 8 01:51:49 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 8 Nov 2008 00:51:49 -0600 (CST) Subject: [Numpy-svn] r5987 - trunk/numpy/linalg/tests Message-ID: <20081108065149.7071F39C226@scipy.org> Author: cdavid Date: 2008-11-08 00:51:42 -0600 (Sat, 08 Nov 2008) New Revision: 5987 Added: trunk/numpy/linalg/tests/test_build.py Log: Start working on test to detect fortran compiler mismatch. Added: trunk/numpy/linalg/tests/test_build.py =================================================================== --- trunk/numpy/linalg/tests/test_build.py 2008-11-07 08:14:06 UTC (rev 5986) +++ trunk/numpy/linalg/tests/test_build.py 2008-11-08 06:51:42 UTC (rev 5987) @@ -0,0 +1,35 @@ +from subprocess import call, PIPE, Popen +import sys +import re + +import numpy as np +from numpy.testing import TestCase + +class FindDependenciesLdd: + def __init__(self): + self.cmd = ['ldd'] + + try: + st = call(self.cmd, stdout=PIPE, stderr=PIPE) + except OSError: + raise RuntimeError("command %s cannot be run" % self.cmd) + + def get_dependencies(self, file): + p = Popen(self.cmd + [file], stdout=PIPE, stderr=PIPE) + stdout, stderr = p.communicate() + if not (p.returncode == 0): + raise RuntimeError("Failed to check dependencies for %s" % libfile) + + return stdout + + def grep_dependencies(self, file, deps): + stdout = self.get_dependencies(file) + + rdeps = dict([(dep, re.compile(dep)) for dep in deps]) + founds = [] + for l in stdout.splitlines(): + for k, v in rdeps.items(): + if v.search(l): + founds.append(k) + + return founds From numpy-svn at scipy.org Sat Nov 8 01:52:06 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 8 Nov 2008 00:52:06 -0600 (CST) Subject: [Numpy-svn] r5988 - trunk/numpy/linalg/tests Message-ID: <20081108065206.F096D39C226@scipy.org> Author: cdavid Date: 2008-11-08 00:51:59 -0600 (Sat, 08 Nov 2008) New Revision: 5988 Modified: trunk/numpy/linalg/tests/test_build.py Log: Detect gfrotran/g77 mismatch. Modified: trunk/numpy/linalg/tests/test_build.py =================================================================== --- trunk/numpy/linalg/tests/test_build.py 2008-11-08 06:51:42 UTC (rev 5987) +++ trunk/numpy/linalg/tests/test_build.py 2008-11-08 06:51:59 UTC (rev 5988) @@ -3,7 +3,8 @@ import re import numpy as np -from numpy.testing import TestCase +from numpy.linalg import lapack_lite +from numpy.testing import TestCase, dec class FindDependenciesLdd: def __init__(self): @@ -33,3 +34,14 @@ founds.append(k) return founds + + at dec.skipif(not(sys.platform[:5] == 'linux'), + "Skipping fortran compiler mismatch on non Linux platform") +def test_f77_mismatch(): + f = FindDependenciesLdd() + deps = f.grep_dependencies(lapack_lite.__file__, + ['libg2c', 'libgfortran']) + if len(deps) > 1: + raise AssertionFailure("Both g77 and gfortran runtimes linked in "\ + "lapack_lite ! This is likely to cause random crashes and wrong "\ + "results") From numpy-svn at scipy.org Sat Nov 8 02:01:21 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 8 Nov 2008 01:01:21 -0600 (CST) Subject: [Numpy-svn] r5989 - trunk/numpy/linalg/tests Message-ID: <20081108070121.B603039C226@scipy.org> Author: cdavid Date: 2008-11-08 01:01:16 -0600 (Sat, 08 Nov 2008) New Revision: 5989 Modified: trunk/numpy/linalg/tests/test_build.py Log: Fix fortrab ABI mismatch test. Modified: trunk/numpy/linalg/tests/test_build.py =================================================================== --- trunk/numpy/linalg/tests/test_build.py 2008-11-08 06:51:59 UTC (rev 5988) +++ trunk/numpy/linalg/tests/test_build.py 2008-11-08 07:01:16 UTC (rev 5989) @@ -35,13 +35,14 @@ return founds - at dec.skipif(not(sys.platform[:5] == 'linux'), - "Skipping fortran compiler mismatch on non Linux platform") -def test_f77_mismatch(): - f = FindDependenciesLdd() - deps = f.grep_dependencies(lapack_lite.__file__, - ['libg2c', 'libgfortran']) - if len(deps) > 1: - raise AssertionFailure("Both g77 and gfortran runtimes linked in "\ - "lapack_lite ! This is likely to cause random crashes and wrong "\ - "results") +class TestF77Mismatch(TestCase): + @dec.skipif(not(sys.platform[:5] == 'linux'), + "Skipping fortran compiler mismatch on non Linux platform") + def test_lapack(self): + f = FindDependenciesLdd() + deps = f.grep_dependencies(lapack_lite.__file__, + ['libg2c', 'libgfortran']) + self.failIf(len(deps) > 1, +"""Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to +cause random crashes and wrong results. See numpy INSTALL.txt for more +information.""") From numpy-svn at scipy.org Sat Nov 8 02:07:29 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 8 Nov 2008 01:07:29 -0600 (CST) Subject: [Numpy-svn] r5990 - trunk Message-ID: <20081108070729.C916239C226@scipy.org> Author: cdavid Date: 2008-11-08 01:07:17 -0600 (Sat, 08 Nov 2008) New Revision: 5990 Modified: trunk/INSTALL.txt Log: Add mention of g77 vs gfortran in INSTALL notes. Modified: trunk/INSTALL.txt =================================================================== --- trunk/INSTALL.txt 2008-11-08 07:01:16 UTC (rev 5989) +++ trunk/INSTALL.txt 2008-11-08 07:07:17 UTC (rev 5990) @@ -29,6 +29,36 @@ __ http://www.python.org +Fortran ABI mismatch +==================== + +The two most popular open source fortran compilers are g77 and gfortran. +Unfortunately, they are not ABI compatible, which means that concretely you +should avoid mixing libraries built with one with another. In particular, if +your blas/lapack/atlas is built with g77, you *must* use g77 when building +numpy and scipy; on the contrary, if your atlas is built with gfortran, you +*must* build numpy/scipy with gfortran. + +Choosing the fortran compiler +----------------------------- + +To build with g77: + + python setup.py build --fcompiler=gnu + +To build with gfortran: + + python setup.py build --fcompiler=gnu95 + +How to check the ABI of blas/lapack/atlas +----------------------------------------- + +One relatively simple and reliable way to check for the compiler used to build +a library is to use ldd on the library. If libg2c.so is a dependency, this +means that g77 has been used. If libgfortran.so is a a dependency, gfortran has +been used. If both are dependencies, this means both have been used, which is +almost always a very bad idea. + Building with ATLAS support =========================== From numpy-svn at scipy.org Sat Nov 8 02:11:51 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 8 Nov 2008 01:11:51 -0600 (CST) Subject: [Numpy-svn] r5991 - trunk/numpy/linalg/tests Message-ID: <20081108071151.2F25339C226@scipy.org> Author: cdavid Date: 2008-11-08 01:11:33 -0600 (Sat, 08 Nov 2008) New Revision: 5991 Modified: trunk/numpy/linalg/tests/test_build.py Log: Trailing spaces. Modified: trunk/numpy/linalg/tests/test_build.py =================================================================== --- trunk/numpy/linalg/tests/test_build.py 2008-11-08 07:07:17 UTC (rev 5990) +++ trunk/numpy/linalg/tests/test_build.py 2008-11-08 07:11:33 UTC (rev 5991) @@ -36,11 +36,11 @@ return founds class TestF77Mismatch(TestCase): - @dec.skipif(not(sys.platform[:5] == 'linux'), + @dec.skipif(not(sys.platform[:5] == 'linux'), "Skipping fortran compiler mismatch on non Linux platform") def test_lapack(self): f = FindDependenciesLdd() - deps = f.grep_dependencies(lapack_lite.__file__, + deps = f.grep_dependencies(lapack_lite.__file__, ['libg2c', 'libgfortran']) self.failIf(len(deps) > 1, """Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to From numpy-svn at scipy.org Sun Nov 9 18:12:34 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 9 Nov 2008 17:12:34 -0600 (CST) Subject: [Numpy-svn] r5992 - numpy-docs/trunk/source/reference Message-ID: <20081109231234.6116639C0F1@scipy.org> Author: ptvirtan Date: 2008-11-09 17:12:21 -0600 (Sun, 09 Nov 2008) New Revision: 5992 Modified: numpy-docs/trunk/source/reference/ufuncs.rst Log: numpy-docs: group functions by type instead of alphabetical order Modified: numpy-docs/trunk/source/reference/ufuncs.rst =================================================================== --- numpy-docs/trunk/source/reference/ufuncs.rst 2008-11-08 07:11:33 UTC (rev 5991) +++ numpy-docs/trunk/source/reference/ufuncs.rst 2008-11-09 23:12:21 UTC (rev 5992) @@ -393,31 +393,31 @@ .. autosummary:: - absolute add - conj + subtract + multiply divide - exp - expm1 + logaddexp + true_divide floor_divide - fmod - log - log1p - log10 - logaddexp - mod - multiply negative - ones_like power remainder + mod + fmod + absolute rint - reciprocal sign + conj + exp + log + expm1 + log1p + log10 sqrt square - subtract - true_divide + reciprocal + ones_like .. tip:: @@ -437,22 +437,22 @@ .. autosummary:: + sin + cos + tan + arcsin arccos - arcsin arctan + arctan2 + hypot + sinh + cosh + tanh arcsinh arccosh arctanh - arctan2 - cos - cosh deg2rad - hypot rad2deg - sin - sinh - tan - tanh Bit-twiddling functions ----------------------- @@ -540,16 +540,16 @@ .. autosummary:: - ceil + isreal iscomplex isfinite isinf isnan - isreal + signbit + modf ldexp - modf + frexp + fmod floor - fmod - frexp - signbit - trunk + ceil + trunc From numpy-svn at scipy.org Sun Nov 9 19:27:26 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 9 Nov 2008 18:27:26 -0600 (CST) Subject: [Numpy-svn] r5993 - in trunk/numpy/core: code_generators include/numpy src Message-ID: <20081110002726.947E339C088@scipy.org> Author: stefan Date: 2008-11-09 18:27:06 -0600 (Sun, 09 Nov 2008) New Revision: 5993 Modified: trunk/numpy/core/code_generators/ufunc_api_order.txt trunk/numpy/core/include/numpy/ufuncobject.h trunk/numpy/core/src/ufuncobject.c Log: Generalised ufuncs patch by Wenjie Fu and Hans-Andreas Engel (see ticket Modified: trunk/numpy/core/code_generators/ufunc_api_order.txt =================================================================== --- trunk/numpy/core/code_generators/ufunc_api_order.txt 2008-11-09 23:12:21 UTC (rev 5992) +++ trunk/numpy/core/code_generators/ufunc_api_order.txt 2008-11-10 00:27:06 UTC (rev 5993) @@ -2,6 +2,7 @@ # here so that the order is set. Append new functions # to the end of the list. PyUFunc_FromFuncAndData +PyUFunc_FromFuncAndDataAndSignature PyUFunc_RegisterLoopForType PyUFunc_GenericFunction PyUFunc_f_f_As_d_d Modified: trunk/numpy/core/include/numpy/ufuncobject.h =================================================================== --- trunk/numpy/core/include/numpy/ufuncobject.h 2008-11-09 23:12:21 UTC (rev 5992) +++ trunk/numpy/core/include/numpy/ufuncobject.h 2008-11-10 00:27:06 UTC (rev 5993) @@ -19,6 +19,20 @@ void *ptr; PyObject *obj; PyObject *userloops; + + /* generalized ufunc */ + int core_enabled; /* 0 for scalar ufunc; 1 for generalized ufunc */ + int core_num_dim_ix; /* number of distinct dimension names in + signature */ + + /* dimension indices of input/output argument k are stored in + core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1] */ + int *core_num_dims; /* numbers of core dimensions of each argument */ + int *core_dim_ixs; /* dimension indices in a flatted form; indices + are in the range of [0,core_num_dim_ix) */ + int *core_offsets; /* positions of 1st core dimensions of each + argument in core_dim_ixs */ + char *core_signature; /* signature string for printing purpose */ } PyUFuncObject; #include "arrayobject.h" @@ -122,6 +136,11 @@ int notimplemented; /* The loop caused notimplemented */ int objfunc; /* This loop calls object functions (an inner-loop function with argument types */ + + /* generalized ufunc */ + npy_intp *core_dim_sizes; /* stores sizes of core dimensions; + contains 1 + core_num_dim_ix elements */ + npy_intp *core_strides; /* strides of loop and core dimensions */ } PyUFuncLoopObject; /* Could make this more clever someday */ Modified: trunk/numpy/core/src/ufuncobject.c =================================================================== --- trunk/numpy/core/src/ufuncobject.c 2008-11-09 23:12:21 UTC (rev 5992) +++ trunk/numpy/core/src/ufuncobject.c 2008-11-10 00:27:06 UTC (rev 5993) @@ -795,6 +795,7 @@ #define NOBUFFER_REDUCELOOP 2 #define BUFFER_UFUNCLOOP 3 #define BUFFER_REDUCELOOP 3 +#define SIGNATURE_NOBUFFER_UFUNCLOOP 4 static char @@ -1259,7 +1260,7 @@ } /* Create copies for any arrays that are less than loop->bufsize - in total size and are mis-behaved or in need + in total size (or core_enabled) and are mis-behaved or in need of casting. */ @@ -1287,7 +1288,7 @@ } Py_DECREF(atype); } - if (size < loop->bufsize) { + if (size < loop->bufsize || loop->ufunc->core_enabled) { if (!(PyArray_ISBEHAVED_RO(mps[i])) || \ PyArray_TYPE(mps[i]) != arg_types[i]) { ntype = PyArray_DescrFromType(arg_types[i]); @@ -1328,6 +1329,280 @@ #undef _GETATTR_ + +/* Return the position of next non-white-space char in the string +*/ +static int +_next_non_white_space(const char* str, int offset) +{ + int ret = offset; + while (str[ret] == ' ' || str[ret] == '\t') ret++; + return ret; +} + +static int +_is_alpha_underscore(char ch) +{ + return (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z') || ch == '_'; +} + +static int +_is_alnum_underscore(char ch) +{ + return _is_alpha_underscore(ch) || (ch >= '0' && ch <= '9'); +} + +/* Return the ending position of a variable name +*/ +static int +_get_end_of_name(const char* str, int offset) +{ + int ret = offset; + while (_is_alnum_underscore(str[ret])) ret++; + return ret; +} + +/* Returns 1 if the dimension names pointed by s1 and s2 are the same, + otherwise returns 0. +*/ +static int +_is_same_name(const char* s1, const char* s2) +{ + while (_is_alnum_underscore(*s1) && _is_alnum_underscore(*s2)) { + if (*s1 != *s2) return 0; + s1++; + s2++; + } + return !_is_alnum_underscore(*s1) && !_is_alnum_underscore(*s2); +} + +/* Sets core_num_dim_ix, core_num_dims, core_dim_ixs, core_offsets, + and core_signature in PyUFuncObject "self". Returns 0 unless an + error occured. +*/ +static int +_parse_signature(PyUFuncObject *self, const char *signature) +{ + size_t len; + char const **var_names; + int nd = 0; /* number of dimension of the current argument */ + int cur_arg = 0; /* index into core_num_dims&core_offsets */ + int cur_core_dim = 0; /* index into core_dim_ixs */ + int i = 0; + char *parse_error = NULL; + + if (signature == NULL) { + PyErr_SetString(PyExc_RuntimeError, + "_parse_signature with NULL signature"); + return -1; + } + + len = strlen(signature); + self->core_signature = _pya_malloc(sizeof(char) * (len+1)); + if (self->core_signature) + strcpy(self->core_signature, signature); + + /* Allocate sufficient memory to store pointers to all dimension names */ + var_names = _pya_malloc(sizeof(char const*) * len); + if (var_names == NULL) { + PyErr_NoMemory(); + return -1; + } + + self->core_enabled = 1; + self->core_num_dim_ix = 0; + self->core_num_dims = _pya_malloc(sizeof(int) * self->nargs); + self->core_dim_ixs = _pya_malloc(sizeof(int) * len); /* shrink this later */ + self->core_offsets = _pya_malloc(sizeof(int) * self->nargs); + if (self->core_num_dims == NULL || self->core_dim_ixs == NULL || + self->core_offsets == NULL) { + PyErr_NoMemory(); + goto fail; + } + + i = _next_non_white_space(signature, 0); + + while (signature[i] != '\0') { /* loop over input/output arguments */ + if (cur_arg == self->nin) { + /* expect "->" */ + if (signature[i] != '-' || signature[i+1] != '>') { + parse_error = "expect '->'"; + goto fail; + } + i = _next_non_white_space(signature, i+2); + } + + /* parse core dimensions of one argument, e.g. "()", "(i)", or + "(i,j)" */ + if (signature[i] != '(') { + parse_error = "expect '('"; + goto fail; + } + i = _next_non_white_space(signature, i+1); + while (signature[i] != ')') { /* loop over core dimensions */ + int j = 0; + if (!_is_alpha_underscore(signature[i])) { + parse_error = "expect dimension name"; + goto fail; + } + while (j < self->core_num_dim_ix) { + if (_is_same_name(signature+i, var_names[j])) break; + j++; + } + if (j >= self->core_num_dim_ix) { + var_names[j] = signature+i; + self->core_num_dim_ix++; + } + self->core_dim_ixs[cur_core_dim] = j; + cur_core_dim++; + nd++; + i = _get_end_of_name(signature, i); + i = _next_non_white_space(signature, i); + if (signature[i] != ',' && signature[i] != ')') { + parse_error = "expect ',' or ')'"; + goto fail; + } + if (signature[i] == ',') + { + i = _next_non_white_space(signature, i+1); + if (signature[i] == ')') { + parse_error = "',' must not be followed by ')'"; + goto fail; + } + } + } + self->core_num_dims[cur_arg] = nd; + self->core_offsets[cur_arg] = cur_core_dim-nd; + cur_arg++; + nd = 0; + i = _next_non_white_space(signature, i+1); + + if (cur_arg != self->nin && cur_arg != self->nargs) { + /* The list of input arguments (or output arguments) was + only read partially */ + if (signature[i] != ',') { + parse_error = "expect ','"; + goto fail; + } + i = _next_non_white_space(signature, i+1); + } + } + if (cur_arg != self->nargs) { + parse_error = "incomplete signature: not all arguments found"; + goto fail; + } + self->core_dim_ixs = _pya_realloc(self->core_dim_ixs, + sizeof(int) * cur_core_dim); + /* check for trivial core-signature, e.g. "(),()->()" */ + if (cur_core_dim == 0) + self->core_enabled = 0; + _pya_free((void*)var_names); + return 0; +fail: + _pya_free((void*)var_names); + if (parse_error) { + char *buf = _pya_malloc(sizeof(char) * (len + 200)); + if (buf) { + sprintf(buf, "%s at position %d in \"%s\"", + parse_error, i, signature); + PyErr_SetString(PyExc_ValueError, signature); + _pya_free(buf); + } + else { + PyErr_NoMemory(); + } + } + return -1; +} + +/* Concatenate the loop and core dimensions of + PyArrayMultiIterObject's iarg-th argument, to recover a full + dimension array (used for output arguments). +*/ +static npy_intp* +_compute_output_dims(PyUFuncLoopObject *loop, int iarg, + int *out_nd, npy_intp *tmp_dims) +{ + int i; + PyUFuncObject *ufunc = loop->ufunc; + if (ufunc->core_enabled == 0) { + /* case of ufunc with trivial core-signature */ + *out_nd = loop->nd; + return loop->dimensions; + } + + *out_nd = loop->nd + ufunc->core_num_dims[iarg]; + if (*out_nd > NPY_MAXARGS) { + PyErr_SetString(PyExc_ValueError, + "dimension of output variable exceeds limit"); + return NULL; + } + + /* copy loop dimensions */ + memcpy(tmp_dims, loop->dimensions, sizeof(npy_intp) * loop->nd); + + /* copy core dimension */ + for (i = 0; i < ufunc->core_num_dims[iarg]; i++) + tmp_dims[loop->nd + i] = loop->core_dim_sizes[1 + + ufunc->core_dim_ixs[ufunc->core_offsets[iarg]+i]]; + return tmp_dims; +} + +/* Check and set core_dim_sizes and core_strides for the i-th argument. +*/ +static int +_compute_dimension_size(PyUFuncLoopObject *loop, PyArrayObject **mps, int i) +{ + PyUFuncObject *ufunc = loop->ufunc; + int j = ufunc->core_offsets[i]; + int k = PyArray_NDIM(mps[i]) - ufunc->core_num_dims[i]; + int ind; + for (ind = 0; ind < ufunc->core_num_dims[i]; ind++, j++, k++) { + npy_intp dim = k<0 ? 1 : PyArray_DIM(mps[i], k); + /* First element of core_dim_sizes will be used for looping */ + int dim_ix = ufunc->core_dim_ixs[j] + 1; + if (loop->core_dim_sizes[dim_ix] == 1) { + /* broadcast core dimension */ + loop->core_dim_sizes[dim_ix] = dim; + } + else if (dim != 1 && dim != loop->core_dim_sizes[dim_ix]) { + PyErr_SetString(PyExc_ValueError, + "core dimensions mismatch"); + return -1; + } + /* First ufunc->nargs elements will be used for looping */ + loop->core_strides[ufunc->nargs + j] = + dim == 1 ? 0 : PyArray_STRIDE(mps[i], k); + } + return 0; +} + +/* Return a view of array "ap" with "core_nd" dimensions cut from tail. */ +static PyArrayObject * +_trunc_coredim(PyArrayObject *ap, int core_nd) +{ + PyArrayObject *ret; + int nd = ap->nd - core_nd; + if (nd < 0) nd = 0; + + /* The following code is basically taken from PyArray_Transpose */ + Py_INCREF(ap->descr); /* NewFromDescr will steal this reference */ + ret = (PyArrayObject *) + PyArray_NewFromDescr(ap->ob_type, ap->descr, + nd, ap->dimensions, + ap->strides, ap->data, ap->flags, + (PyObject *)ap); + if (ret == NULL) return NULL; + + /* point at true owner of memory: */ + ret->base = (PyObject *)ap; + Py_INCREF(ap); + + PyArray_UpdateFlags(ret, CONTIGUOUS | FORTRAN); + + return ret; +} + static Py_ssize_t construct_arrays(PyUFuncLoopObject *loop, PyObject *args, PyArrayObject **mps, PyObject *typetup) @@ -1345,6 +1620,10 @@ int flexible = 0; int object = 0; + npy_intp temp_dims[NPY_MAXDIMS]; + npy_intp *out_dims; + int out_nd; + /* Check number of arguments */ nargs = PyTuple_Size(args); if ((nargs < self->nin) || (nargs > self->nargs)) { @@ -1451,6 +1730,23 @@ return -1; } + /* Only use loop dimensions when constructing Iterator: + * temporarily replace mps[i] (will be recovered below). + */ + if (self->core_enabled) { + for (i = 0; i < self->nin; i++) { + PyArrayObject *ao; + + if (_compute_dimension_size(loop, mps, i) < 0) + return -1; + + ao = _trunc_coredim(mps[i], self->core_num_dims[i]); + if (ao == NULL) + return -1; + mps[i] = ao; + } + } + /* Create Iterators for the Inputs */ for(i = 0; i < self->nin; i++) { loop->iters[i] = (PyArrayIterObject *) \ @@ -1460,6 +1756,16 @@ } } + + /* Recover mps[i]. */ + if (self->core_enabled) { + for (i = 0; i < self->nin; i++) { + PyArrayObject *ao = mps[i]; + mps[i] = (PyArrayObject *)mps[i]->base; + Py_DECREF(ao); + } + } + /* Broadcast the result */ loop->numiter = self->nin; if (PyArray_Broadcast((PyArrayMultiIterObject *)loop) < 0) { @@ -1491,9 +1797,18 @@ return -1; } } - if (mps[i]->nd != loop->nd || + + + if (self->core_enabled) { + if (_compute_dimension_size(loop, mps, i) < 0) + return -1; + } + out_dims = _compute_output_dims(loop, i, &out_nd, temp_dims); + if (!out_dims) return -1; + + if (mps[i]->nd != out_nd || !PyArray_CompareLists(mps[i]->dimensions, - loop->dimensions, loop->nd)) { + out_dims, out_nd)) { PyErr_SetString(PyExc_ValueError, "invalid return array shape"); Py_DECREF(mps[i]); @@ -1514,9 +1829,12 @@ PyArray_Descr *ntype; if (mps[i] == NULL) { + out_dims = _compute_output_dims(loop, i, &out_nd, temp_dims); + if (!out_dims) return -1; + mps[i] = (PyArrayObject *)PyArray_New(subtype, - loop->nd, - loop->dimensions, + out_nd, + out_dims, arg_types[i], NULL, NULL, 0, 0, NULL); @@ -1543,7 +1861,7 @@ /* still not the same -- or will we have to use buffers?*/ if (mps[i]->descr->type_num != arg_types[i] || !PyArray_ISBEHAVED_RO(mps[i])) { - if (loop->size < loop->bufsize) { + if (loop->size < loop->bufsize || self->core_enabled) { PyObject *new; /* * Copy the array to a temporary copy @@ -1563,14 +1881,35 @@ } } + if (self->core_enabled) { + PyArrayObject *ao; + + /* computer for all output arguments, and set strides in "loop" */ + if (_compute_dimension_size(loop, mps, i) < 0) + return -1; + + ao = _trunc_coredim(mps[i], self->core_num_dims[i]); + if (ao == NULL) + return -1; + /* Temporarily modify mps[i] for constructing iterator. */ + mps[i] = ao; + } + loop->iters[i] = (PyArrayIterObject *) \ PyArray_IterNew((PyObject *)mps[i]); if (loop->iters[i] == NULL) { return -1; } + + /* Recover mps[i]. */ + if (self->core_enabled) { + PyArrayObject *ao = mps[i]; + mps[i] = (PyArrayObject *)mps[i]->base; + Py_DECREF(ao); + } + } - /* * If any of different type, or misaligned or swapped * then must use buffers @@ -1585,10 +1924,19 @@ return nargs; } + if (self->core_enabled) { + loop->meth = SIGNATURE_NOBUFFER_UFUNCLOOP; + } + for(i = 0; i < self->nargs; i++) { loop->needbuffer[i] = 0; if (arg_types[i] != mps[i]->descr->type_num || !PyArray_ISBEHAVED_RO(mps[i])) { + if (self->core_enabled) { + PyErr_SetString(PyExc_RuntimeError, + "never reached; copy should have been made"); + return -1; + } loop->meth = BUFFER_UFUNCLOOP; loop->needbuffer[i] = 1; } @@ -1598,6 +1946,13 @@ } } + + if (self->core_enabled && loop->obj) { + PyErr_SetString(PyExc_TypeError, + "Object type not allowed in ufunc with signature"); + return -1; + } + if (loop->meth == NO_UFUNCLOOP) { loop->meth = ONE_UFUNCLOOP; @@ -1625,8 +1980,11 @@ loop->numiter = self->nargs; - /* Fill in steps */ - if (loop->meth != ONE_UFUNCLOOP) { + /* Fill in steps */ + if (loop->meth == SIGNATURE_NOBUFFER_UFUNCLOOP && loop->nd == 0) { + /* Use default core_strides */ + } + else if (loop->meth != ONE_UFUNCLOOP) { int ldim; intp minsum; intp maxdim; @@ -1695,6 +2053,16 @@ } /* + * Set looping part of core_dim_sizes and core_strides. + */ + if (loop->meth == SIGNATURE_NOBUFFER_UFUNCLOOP) { + loop->core_dim_sizes[0] = maxdim; + for (i = 0; i < self->nargs; i++) { + loop->core_strides[i] = loop->steps[i]; + } + } + + /* * fix up steps where we will be copying data to * buffers and calculate the ninnerloops and leftover * values -- if step size is already zero that is not changed... @@ -1710,8 +2078,8 @@ } } } - else { - /* uniformly-strided case ONE_UFUNCLOOP */ + else if (loop->meth == ONE_UFUNCLOOP) { + /* uniformly-strided case */ for(i = 0; i < self->nargs; i++) { if (PyArray_SIZE(mps[i]) == 1) loop->steps[i] = 0; @@ -1850,6 +2218,10 @@ int i; if (self->ufunc != NULL) { + if (self->core_dim_sizes) + _pya_free(self->core_dim_sizes); + if (self->core_strides) + _pya_free(self->core_strides); for(i = 0; i < self->ufunc->nargs; i++) Py_XDECREF(self->iters[i]); if (self->buffer[0]) { @@ -1890,7 +2262,24 @@ loop->errobj = NULL; loop->notimplemented = 0; loop->first = 1; + loop->core_dim_sizes = NULL; + loop->core_strides = NULL; + if (self->core_enabled) { + int num_dim_ix = 1 + self->core_num_dim_ix; + int nstrides = self->nargs + self->core_offsets[self->nargs-1] + + self->core_num_dims[self->nargs-1]; + loop->core_dim_sizes = _pya_malloc(sizeof(npy_intp) * num_dim_ix); + loop->core_strides = _pya_malloc(sizeof(npy_intp) * nstrides); + if (loop->core_dim_sizes == NULL || loop->core_strides == NULL) { + PyErr_NoMemory(); + goto fail; + } + memset(loop->core_strides, 0, sizeof(npy_intp) * nstrides); + for (i = 0; i < num_dim_ix; i++) + loop->core_dim_sizes[i] = 1; + } + name = self->name ? self->name : ""; /* @@ -2036,6 +2425,11 @@ ufuncloop_dealloc(loop); return -2; } + if (self->core_enabled && loop->meth != SIGNATURE_NOBUFFER_UFUNCLOOP) { + PyErr_SetString(PyExc_RuntimeError, + "illegal loop method for ufunc with signature"); + goto fail; + } NPY_LOOP_BEGIN_THREADS; switch(loop->meth) { @@ -2058,7 +2452,8 @@ * right type but not contiguous. -- Almost as fast. */ /*fprintf(stderr, "NOBUFFER...%d\n", loop->size);*/ - while (loop->index < loop->size) { + + while (loop->index < loop->size) { for(i = 0; i < self->nargs; i++) { loop->bufptr[i] = loop->iters[i]->dataptr; } @@ -2074,6 +2469,23 @@ } break; + case SIGNATURE_NOBUFFER_UFUNCLOOP: + while (loop->index < loop->size) { + for(i = 0; i < self->nargs; i++) { + loop->bufptr[i] = loop->iters[i]->dataptr; + } + loop->function((char **)loop->bufptr, loop->core_dim_sizes, + loop->core_strides, loop->funcdata); + UFUNC_CHECK_ERROR(loop); + + /* Adjust loop pointers */ + for(i = 0; i < self->nargs; i++) { + PyArray_ITER_NEXT(loop->iters[i]); + } + loop->index++; + } + break; + case BUFFER_UFUNCLOOP: { PyArray_CopySwapNFunc *copyswapn[NPY_MAXARGS]; PyArrayIterObject **iters=loop->iters; @@ -2400,6 +2812,12 @@ /* Reduce type is the type requested of the input during reduction */ + if (self->core_enabled) { + PyErr_Format(PyExc_RuntimeError, + "construct_reduce not allowed on ufunc with signature"); + return NULL; + } + nd = (*arr)->nd; arg_types[0] = otype; arg_types[1] = otype; @@ -3118,6 +3536,12 @@ return NULL; } + if (self->core_enabled) { + PyErr_Format(PyExc_RuntimeError, + "Reduction not defined on ufunc with signature"); + return NULL; + } + if (self->nin != 2) { PyErr_Format(PyExc_ValueError, "%s only supported for binary functions", @@ -3631,6 +4055,14 @@ self->ntypes = 1; self->check_return = 0; + + /* generalized ufunc */ + self->core_enabled = 0; + self->core_num_dim_ix = 0; + self->core_num_dims = NULL; + self->core_dim_ixs = NULL; + self->core_offsets = NULL; + self->core_signature = NULL; pyname = PyObject_GetAttrString(function, "__name__"); if (pyname) @@ -3725,6 +4157,18 @@ int nin, int nout, int identity, char *name, char *doc, int check_return) { + return PyUFunc_FromFuncAndDataAndSignature(func, data, types, ntypes, + nin, nout, identity, name, doc, check_return, NULL); +} + +/*UFUNC_API*/ +static PyObject * +PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data, + char *types, int ntypes, + int nin, int nout, int identity, + char *name, char *doc, + int check_return, const char *signature) +{ PyUFuncObject *self; self = _pya_malloc(sizeof(PyUFuncObject)); @@ -3750,6 +4194,18 @@ if (doc == NULL) self->doc = "NULL"; else self->doc = doc; + + /* generalized ufunc */ + self->core_enabled = 0; + self->core_num_dim_ix = 0; + self->core_num_dims = NULL; + self->core_dim_ixs = NULL; + self->core_offsets = NULL; + self->core_signature = NULL; + if (signature != NULL) { + if (_parse_signature(self, signature) != 0) + return NULL; + } return (PyObject *)self; } @@ -3915,6 +4371,10 @@ static void ufunc_dealloc(PyUFuncObject *self) { + if (self->core_num_dims) _pya_free(self->core_num_dims); + if (self->core_dim_ixs) _pya_free(self->core_dim_ixs); + if (self->core_offsets) _pya_free(self->core_offsets); + if (self->core_signature) _pya_free(self->core_signature); if (self->ptr) _pya_free(self->ptr); Py_XDECREF(self->userloops); Py_XDECREF(self->obj); @@ -3949,6 +4409,13 @@ PyObject *new_args, *tmp; PyObject *shape1, *shape2, *newshape; + if (self->core_enabled) { + PyErr_Format(PyExc_TypeError, + "method outer is not allowed in ufunc with non-trivial"\ + " signature"); + return NULL; + } + if(self->nin != 2) { PyErr_SetString(PyExc_ValueError, "outer product only supported "\ @@ -4186,6 +4653,13 @@ return Py_None; } +static PyObject * +ufunc_get_signature(PyUFuncObject *self) +{ + if (!self->core_enabled) + Py_RETURN_NONE; + return PyString_FromString(self->core_signature); +} #undef _typecharfromnum @@ -4201,6 +4675,7 @@ {"types", (getter)ufunc_get_types, NULL, "return a list with types grouped input->output", NULL}, {"__name__", (getter)ufunc_get_name, NULL, "function name", NULL}, {"identity", (getter)ufunc_get_identity, NULL, "identity value", NULL}, + {"signature",(getter)ufunc_get_signature,NULL, "signature"}, {NULL, NULL, NULL, NULL, NULL}, /* Sentinel */ }; From numpy-svn at scipy.org Sun Nov 9 19:28:24 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 9 Nov 2008 18:28:24 -0600 (CST) Subject: [Numpy-svn] r5994 - in trunk/numpy/core: . src tests Message-ID: <20081110002824.6019139C088@scipy.org> Author: stefan Date: 2008-11-09 18:28:04 -0600 (Sun, 09 Nov 2008) New Revision: 5994 Added: trunk/numpy/core/src/umath_tests.c.src Modified: trunk/numpy/core/setup.py trunk/numpy/core/tests/test_ufunc.py Log: Add tests for generalized ufuncs. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2008-11-10 00:27:06 UTC (rev 5993) +++ trunk/numpy/core/setup.py 2008-11-10 00:28:04 UTC (rev 5994) @@ -399,7 +399,15 @@ extra_info = blas_info ) + config.add_extension('umath_tests', + sources = [join('src','umath_tests.c.src'), + ], + depends = [join('blasdot','cblas.h'),] + deps, + include_dirs = ['blasdot'], + extra_info = blas_info + ) + config.add_data_dir('tests') config.add_data_dir('tests/data') Added: trunk/numpy/core/src/umath_tests.c.src =================================================================== --- trunk/numpy/core/src/umath_tests.c.src 2008-11-10 00:27:06 UTC (rev 5993) +++ trunk/numpy/core/src/umath_tests.c.src 2008-11-10 00:28:04 UTC (rev 5994) @@ -0,0 +1,417 @@ +/* -*- c -*- */ + +/* + ***************************************************************************** + ** INCLUDES ** + ***************************************************************************** + */ +#include "Python.h" +#include "numpy/arrayobject.h" +#include "numpy/ufuncobject.h" + +#ifndef CBLAS_HEADER +#define CBLAS_HEADER "cblas.h" +#endif +#include CBLAS_HEADER + +/* + ***************************************************************************** + ** BASICS ** + ***************************************************************************** + */ + +typedef npy_intp intp; + +#define INIT_OUTER_LOOP_1 \ + intp dN = *dimensions++; \ + intp N_; \ + intp s0 = *steps++; + +#define INIT_OUTER_LOOP_2 \ + INIT_OUTER_LOOP_1 \ + intp s1 = *steps++; + +#define INIT_OUTER_LOOP_3 \ + INIT_OUTER_LOOP_2 \ + intp s2 = *steps++; + +#define INIT_OUTER_LOOP_4 \ + INIT_OUTER_LOOP_3 \ + intp s3 = *steps++; + +#define BEGIN_OUTER_LOOP_3 \ + for (N_ = 0; N_ < dN; N_++, args[0] += s0, args[1] += s1, args[2] += s2) { + +#define BEGIN_OUTER_LOOP_4 \ + for (N_ = 0; N_ < dN; N_++, args[0] += s0, args[1] += s1, args[2] += s2, args[3] += s3) { + +#define END_OUTER_LOOP } + + +/* + ***************************************************************************** + ** UFUNC LOOPS ** + ***************************************************************************** + */ + +char *inner1d_signature = "(i),(i)->()"; + +/**begin repeat + + #TYPE=LONG,DOUBLE# + #typ=npy_long, npy_double# +*/ + +/* + * This implements the function + * out[n] = sum_i { in1[n, i] * in2[n, i] }. + */ +static void + at TYPE@_inner1d(char **args, intp *dimensions, intp *steps, void *func) +{ + INIT_OUTER_LOOP_3 + intp di = dimensions[0]; + intp i; + intp is1=steps[0], is2=steps[1]; + BEGIN_OUTER_LOOP_3 + char *ip1=args[0], *ip2=args[1], *op=args[2]; + @typ@ sum = 0; + for (i = 0; i < di; i++) { + sum += (*(@typ@ *)ip1) * (*(@typ@ *)ip2); + ip1 += is1; + ip2 += is2; + } + *(@typ@ *)op = sum; + END_OUTER_LOOP +} + +/**end repeat**/ + +char *innerwt_signature = "(i),(i),(i)->()"; + +/**begin repeat + + #TYPE=LONG,DOUBLE# + #typ=npy_long, npy_double# +*/ + + +/* + * This implements the function + * out[n] = sum_i { in1[n, i] * in2[n, i] * in3[n, i] }. + */ + +static void + at TYPE@_innerwt(char **args, intp *dimensions, intp *steps, void *func) +{ + INIT_OUTER_LOOP_4 + intp di = dimensions[0]; + intp i; + intp is1=steps[0], is2=steps[1], is3=steps[2]; + BEGIN_OUTER_LOOP_4 + char *ip1=args[0], *ip2=args[1], *ip3=args[2], *op=args[3]; + @typ@ sum = 0; + for (i = 0; i < di; i++) { + sum += (*(@typ@ *)ip1) * (*(@typ@ *)ip2) * (*(@typ@ *)ip3); + ip1 += is1; + ip2 += is2; + ip3 += is3; + } + *(@typ@ *)op = sum; + END_OUTER_LOOP +} + +/**end repeat**/ + +char *matrix_multiply_signature = "(m,n),(n,p)->(m,p)"; + +/**begin repeat + + #TYPE=LONG# + #typ=npy_long# +*/ + +/* + * This implements the function + * out[k, m, p] = sum_n { in1[k, m, n] * in2[k, n, p] }. + */ + + +static void + at TYPE@_matrix_multiply(char **args, intp *dimensions, intp *steps, void *func) +{ + /* no BLAS is available */ + INIT_OUTER_LOOP_3 + intp dm = dimensions[0]; + intp dn = dimensions[1]; + intp dp = dimensions[2]; + intp m,n,p; + intp is1_m=steps[0], is1_n=steps[1], is2_n=steps[2], is2_p=steps[3], + os_m=steps[4], os_p=steps[5]; + intp ib1_n = is1_n*dn; + intp ib2_n = is2_n*dn; + intp ib2_p = is2_p*dp; + intp ob_p = os_p *dp; + BEGIN_OUTER_LOOP_3 + char *ip1=args[0], *ip2=args[1], *op=args[2]; + for (m = 0; m < dm; m++) { + for (n = 0; n < dn; n++) { + register @typ@ val1 = (*(@typ@ *)ip1); + for (p = 0; p < dp; p++) { + if (n == 0) *(@typ@ *)op = 0; + *(@typ@ *)op += val1 * (*(@typ@ *)ip2); + ip2 += is2_p; + op += os_p; + } + ip2 -= ib2_p; + op -= ob_p; + ip1 += is1_n; + ip2 += is2_n; + } + ip1 -= ib1_n; + ip2 -= ib2_n; + ip1 += is1_m; + op += os_m; + } + END_OUTER_LOOP +} + +/**end repeat**/ + +/**begin repeat + + #TYPE=FLOAT,DOUBLE# + #B_TYPE=s, d# + #typ=npy_float, npy_double# +*/ + +static void + at TYPE@_matrix_multiply(char **args, intp *dimensions, intp *steps, void *func) +{ + INIT_OUTER_LOOP_3 + intp dm = dimensions[0]; + intp dn = dimensions[1]; + intp dp = dimensions[2]; + intp m,n,p; + intp is1_m=steps[0], is1_n=steps[1], is2_n=steps[2], is2_p=steps[3], + os_m=steps[4], os_p=steps[5]; + intp ib1_n = is1_n*dn; + intp ib2_n = is2_n*dn; + intp ib2_p = is2_p*dp; + intp ob_p = os_p *dp; + + enum CBLAS_ORDER Order = CblasRowMajor; + enum CBLAS_TRANSPOSE Trans1, Trans2; + int M, N, L; + int lda, ldb, ldc; + int typeSize = sizeof(@typ@); + + /* + * BLAS requires each array to have contiguous memory layout on one + * dimension and a positive stride for the other dimension. + */ + if (is1_m <= 0 || is1_n <= 0 || is2_n <= 0 || is2_p <= 0) + goto no_blas; + + if (is1_n == typeSize && is1_m % typeSize == 0) { + Trans1 = CblasNoTrans; + lda = is1_m / typeSize; + } + else if (is1_m == typeSize && is1_n % typeSize == 0) { + Trans1 = CblasTrans; + lda = is1_n / typeSize; + } + else { + goto no_blas; + } + + if (is2_p == typeSize && is2_n % typeSize == 0) { + Trans2 = CblasNoTrans; + ldb = is2_n / typeSize; + } + else if (is2_n == typeSize && is2_p % typeSize == 0) { + Trans2 = CblasTrans; + ldb = is2_p / typeSize; + } + else { + goto no_blas; + } + + M = dm; + N = dp; + L = dn; + if (os_p == typeSize && os_m % typeSize == 0) { + ldc = os_m / typeSize; + BEGIN_OUTER_LOOP_3 + cblas_ at B_TYPE@gemm(Order, Trans1, Trans2, + M, N, L, + 1.0, (@typ@*)args[0], lda, + (@typ@*)args[1], ldb, + 0.0, (@typ@*)args[2], ldc); + END_OUTER_LOOP + return; + } + else if (os_m == typeSize && os_p % typeSize == 0) { + enum CBLAS_TRANSPOSE Trans1r, Trans2r; + ldc = os_p / typeSize; + Trans1r = (Trans1 == CblasTrans) ? CblasNoTrans : CblasTrans; + Trans2r = (Trans2 == CblasTrans) ? CblasNoTrans : CblasTrans; + BEGIN_OUTER_LOOP_3 + /* compute C^T = B^T * A^T */ + cblas_ at B_TYPE@gemm(Order, Trans2r, Trans1r, + N, M, L, + 1.0, (@typ@*)args[1], ldb, + (@typ@*)args[0], lda, + 0.0, (@typ@*)args[2], ldc); + END_OUTER_LOOP + return; + } + + +no_blas: + BEGIN_OUTER_LOOP_3 + char *ip1=args[0], *ip2=args[1], *op=args[2]; + for (m = 0; m < dm; m++) { + for (n = 0; n < dn; n++) { + register @typ@ val1 = (*(@typ@ *)ip1); + for (p = 0; p < dp; p++) { + if (n == 0) *(@typ@ *)op = 0; + *(@typ@ *)op += val1 * (*(@typ@ *)ip2); + ip2 += is2_p; + op += os_p; + } + ip2 -= ib2_p; + op -= ob_p; + ip1 += is1_n; + ip2 += is2_n; + } + ip1 -= ib1_n; + ip2 -= ib2_n; + ip1 += is1_m; + op += os_m; + } + END_OUTER_LOOP +} + +/**end repeat**/ + +/* The following lines were generated using a slightly modified + version of code_generators/generate_umath.py and adding these + lines to defdict: + +defdict = { +'inner1d' : + Ufunc(2, 1, None_, + r'''inner on the last dimension and broadcast on the rest \n" + " \"(i),(i)->()\" \n''', + TD('ld'), + ), +'innerwt' : + Ufunc(3, 1, None_, + r'''inner1d with a weight argument \n" + " \"(i),(i),(i)->()\" \n''', + TD('ld'), + ), +} + +*/ + +static PyUFuncGenericFunction inner1d_functions[] = { LONG_inner1d, DOUBLE_inner1d }; +static void * inner1d_data[] = { (void *)NULL, (void *)NULL }; +static char inner1d_signatures[] = { PyArray_LONG, PyArray_LONG, PyArray_LONG, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE }; +static PyUFuncGenericFunction innerwt_functions[] = { LONG_innerwt, DOUBLE_innerwt }; +static void * innerwt_data[] = { (void *)NULL, (void *)NULL }; +static char innerwt_signatures[] = { PyArray_LONG, PyArray_LONG, PyArray_LONG, PyArray_LONG, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE }; +static PyUFuncGenericFunction matrix_multiply_functions[] = { LONG_matrix_multiply, FLOAT_matrix_multiply, DOUBLE_matrix_multiply }; +static void *matrix_multiply_data[] = { (void *)NULL, (void *)NULL, (void *)NULL }; +static char matrix_multiply_signatures[] = { PyArray_LONG, PyArray_LONG, PyArray_LONG, PyArray_FLOAT, PyArray_FLOAT, PyArray_FLOAT, PyArray_DOUBLE, PyArray_DOUBLE, PyArray_DOUBLE }; + +static void +addUfuncs(PyObject *dictionary) { + PyObject *f; + + f = PyUFunc_FromFuncAndDataAndSignature(inner1d_functions, inner1d_data, inner1d_signatures, 2, + 2, 1, PyUFunc_None, "inner1d", + "inner on the last dimension and broadcast on the rest \n"\ + " \"(i),(i)->()\" \n", + 0, inner1d_signature); + PyDict_SetItemString(dictionary, "inner1d", f); + Py_DECREF(f); + f = PyUFunc_FromFuncAndDataAndSignature(innerwt_functions, innerwt_data, innerwt_signatures, 2, + 3, 1, PyUFunc_None, "innerwt", + "inner1d with a weight argument \n"\ + " \"(i),(i),(i)->()\" \n", + 0, innerwt_signature); + PyDict_SetItemString(dictionary, "innerwt", f); + Py_DECREF(f); + f = PyUFunc_FromFuncAndDataAndSignature(matrix_multiply_functions, + matrix_multiply_data, matrix_multiply_signatures, + 3, 2, 1, PyUFunc_None, "matrix_multiply", + "matrix multiplication on last two dimensions \n"\ + " \"(m,n),(n,p)->(m,p)\" \n", + 0, matrix_multiply_signature); + PyDict_SetItemString(dictionary, "matrix_multiply", f); + Py_DECREF(f); +} + +/* + End of auto-generated code. +*/ + + + +static PyObject * +UMath_Tests_test_signature(PyObject *dummy, PyObject *args) +{ + int nin, nout; + PyObject *signature; + PyObject *f; + int core_enabled; + + if (!PyArg_ParseTuple(args, "iiO", &nin, &nout, &signature)) return NULL; + f = PyUFunc_FromFuncAndDataAndSignature(NULL, NULL, NULL, + 0, nin, nout, PyUFunc_None, "no name", + "doc:none", + 1, PyString_AS_STRING(signature)); + if (f == NULL) return NULL; + core_enabled = ((PyUFuncObject*)f)->core_enabled; + return Py_BuildValue("i", core_enabled); +} + +static PyMethodDef UMath_TestsMethods[] = { + {"test_signature", UMath_Tests_test_signature, METH_VARARGS, + "Test signature parsing of ufunc. \n" + "Arguments: nin nout signature \n" + "If fails, it returns NULL. Otherwise it will returns 0 for scalar ufunc " + "and 1 for generalized ufunc. \n", + }, + {NULL, NULL, 0, NULL} /* Sentinel */ +}; + +PyMODINIT_FUNC +initumath_tests(void) +{ + PyObject *m; + PyObject *d; + PyObject *version; + + m = Py_InitModule("umath_tests", UMath_TestsMethods); + if (m == NULL) return; + + import_array(); + import_ufunc(); + + d = PyModule_GetDict(m); + + version = PyString_FromString("0.1"); + PyDict_SetItemString(d, "__version__", version); + Py_DECREF(version); + + /* Load the ufunc operators into the module's namespace */ + addUfuncs(d); + + if (PyErr_Occurred()) { + PyErr_SetString(PyExc_RuntimeError, + "cannot load umath_tests module."); + } +} Modified: trunk/numpy/core/tests/test_ufunc.py =================================================================== --- trunk/numpy/core/tests/test_ufunc.py 2008-11-10 00:27:06 UTC (rev 5993) +++ trunk/numpy/core/tests/test_ufunc.py 2008-11-10 00:28:04 UTC (rev 5994) @@ -1,5 +1,6 @@ import numpy as np from numpy.testing import * +import numpy.core.umath_tests as umt class TestUfunc(TestCase): def test_reduceat_shifting_sum(self) : @@ -230,6 +231,193 @@ """ pass + def test_signature(self): + # the arguments to test_signature are: nin, nout, core_signature + # pass + assert_equal(umt.test_signature(2,1,"(i),(i)->()"), 1) + # pass. empty core signature; treat as plain ufunc (with trivial core) + assert_equal(umt.test_signature(2,1,"(),()->()"), 0) + + # in the following calls, a ValueError should be raised because + # of error in core signature + # error: extra parenthesis + msg = "core_sig: extra parenthesis" + try: + ret = umt.test_signature(2,1,"((i)),(i)->()") + assert_equal(ret, None, err_msg=msg) + except ValueError: None + # error: parenthesis matching + msg = "core_sig: parenthesis matching" + try: + ret = umt.test_signature(2,1,"(i),)i(->()") + assert_equal(ret, None, err_msg=msg) + except ValueError: None + # error: incomplete signature. letters outside of parenthesis are ignored + msg = "core_sig: incomplete signature" + try: + ret = umt.test_signature(2,1,"(i),->()") + assert_equal(ret, None, err_msg=msg) + except ValueError: None + # error: incomplete signature. 2 output arguments are specified + msg = "core_sig: incomplete signature" + try: + ret = umt.test_signature(2,2,"(i),(i)->()") + assert_equal(ret, None, err_msg=msg) + except ValueError: None + + # more complicated names for variables + assert_equal(umt.test_signature(2,1,"(i1,i2),(J_1)->(_kAB)"),1) + + def test_get_signature(self): + assert_equal(umt.inner1d.signature, "(i),(i)->()") + + def test_inner1d(self): + a = np.arange(6).reshape((2,3)) + assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1)) + + def test_broadcast(self): + msg = "broadcast" + a = np.arange(4).reshape((2,1,2)) + b = np.arange(4).reshape((1,2,2)) + assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) + msg = "extend & broadcast loop dimensions" + b = np.arange(4).reshape((2,2)) + assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) + msg = "broadcast in core dimensions" + a = np.arange(8).reshape((4,2)) + b = np.arange(4).reshape((4,1)) + assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) + msg = "extend & broadcast core and loop dimensions" + a = np.arange(8).reshape((4,2)) + b = np.array(7) + assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) + msg = "broadcast should fail" + a = np.arange(2).reshape((2,1,1)) + b = np.arange(3).reshape((3,1,1)) + try: + ret = umt.inner1d(a,b) + assert_equal(ret, None, err_msg=msg) + except ValueError: None + + def test_type_cast(self): + msg = "type cast" + a = np.arange(6, dtype='short').reshape((2,3)) + assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg) + msg = "type cast on one argument" + a = np.arange(6).reshape((2,3)) + b = a+0.1 + assert_array_almost_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), + err_msg=msg) + + def test_endian(self): + msg = "big endian" + a = np.arange(6, dtype='>i4').reshape((2,3)) + assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg) + msg = "little endian" + a = np.arange(6, dtype=' Author: charris Date: 2008-11-09 22:49:05 -0600 (Sun, 09 Nov 2008) New Revision: 5995 Modified: trunk/numpy/core/code_generators/ufunc_api_order.txt Log: Move new generalized ufunc function to end of api list. Modified: trunk/numpy/core/code_generators/ufunc_api_order.txt =================================================================== --- trunk/numpy/core/code_generators/ufunc_api_order.txt 2008-11-10 00:28:04 UTC (rev 5994) +++ trunk/numpy/core/code_generators/ufunc_api_order.txt 2008-11-10 04:49:05 UTC (rev 5995) @@ -2,7 +2,6 @@ # here so that the order is set. Append new functions # to the end of the list. PyUFunc_FromFuncAndData -PyUFunc_FromFuncAndDataAndSignature PyUFunc_RegisterLoopForType PyUFunc_GenericFunction PyUFunc_f_f_As_d_d @@ -32,3 +31,4 @@ PyUFunc_getfperr PyUFunc_handlefperr PyUFunc_ReplaceLoopBySignature +PyUFunc_FromFuncAndDataAndSignature From numpy-svn at scipy.org Mon Nov 10 19:00:14 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 10 Nov 2008 18:00:14 -0600 (CST) Subject: [Numpy-svn] r5996 - in trunk/numpy: core core/code_generators core/src lib Message-ID: <20081111000014.9266D39C05F@scipy.org> Author: charris Date: 2008-11-10 18:00:05 -0600 (Mon, 10 Nov 2008) New Revision: 5996 Modified: trunk/numpy/core/code_generators/generate_umath.py trunk/numpy/core/setup.py trunk/numpy/core/src/math_c99.inc.src trunk/numpy/lib/scimath.py Log: Add log2 and exp2. Fix scimath to use log2. Complex versions of these functions need to be added. MPL also defines log2 with slightly different properties. For instance, it returns an integer value for log2(2). Modified: trunk/numpy/core/code_generators/generate_umath.py =================================================================== --- trunk/numpy/core/code_generators/generate_umath.py 2008-11-10 04:49:05 UTC (rev 5995) +++ trunk/numpy/core/code_generators/generate_umath.py 2008-11-11 00:00:05 UTC (rev 5996) @@ -467,6 +467,12 @@ TD(inexact, f='exp'), TD(M, f='exp'), ), +'exp2' : + Ufunc(1, 1, None, + '', + TD(flts, f='exp2'), + TD(M, f='exp2'), + ), 'expm1' : Ufunc(1, 1, None, docstrings.get('numpy.core.umath.expm1'), @@ -479,6 +485,12 @@ TD(inexact, f='log'), TD(M, f='log'), ), +'log2' : + Ufunc(1, 1, None, + '', + TD(flts, f='log2'), + TD(M, f='log2'), + ), 'log10' : Ufunc(1, 1, None, docstrings.get('numpy.core.umath.log10'), Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2008-11-10 04:49:05 UTC (rev 5995) +++ trunk/numpy/core/setup.py 2008-11-11 00:00:05 UTC (rev 5996) @@ -75,11 +75,11 @@ " available (the list is %s)." % str(mandatory_funcs)) # Standard functions which may not be available and for which we have a - # replacement implementation + # replacement implementation. Note that some of these are C99 functions. # XXX: we do not test for hypot because python checks for it (HAVE_HYPOT in # python.h... I wish they would clean their public headers someday) optional_stdfuncs = ["expm1", "log1p", "acosh", "asinh", "atanh", - "rint", "trunc"] + "rint", "trunc", "exp2", "log2"] check_funcs(optional_stdfuncs) @@ -87,7 +87,8 @@ c99_funcs = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil", "rint", "trunc", "sqrt", "log10", "log", "exp", "expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh", - "hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp'] + "hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp', + "exp2", "log2"] for prec in ['l', 'f']: fns = [f + prec for f in c99_funcs] Modified: trunk/numpy/core/src/math_c99.inc.src =================================================================== --- trunk/numpy/core/src/math_c99.inc.src 2008-11-10 04:49:05 UTC (rev 5995) +++ trunk/numpy/core/src/math_c99.inc.src 2008-11-11 00:00:05 UTC (rev 5996) @@ -120,16 +120,28 @@ #ifndef HAVE_TRUNC double trunc(double x) { - if (x < 0) { - return ceil(x); - } - else { - return floor(x); - } + return x < 0 ? ceil(x) : floor(x); +} +#endif +#ifndef HAVE_EXP2 +#define LOG2 0.69314718055994530943 +double trunc(double x) +{ + return exp(LOG2*x) } +#undef LOG2 #endif +#ifndef HAVE_LOG2 +#define INVLOG2 1.4426950408889634074 +double trunc(double x) +{ + return INVLOG2*log(x) +} +#undef INVLOG2 +#endif + /* ***************************************************************************** ** IEEE 754 FPU HANDLING ** @@ -196,9 +208,9 @@ /**begin repeat1 * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, - * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p# + * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2# * #KIND = SIN,COS,TAN,SINH,COSH,TANH,FABS,FLOOR,CEIL,RINT,TRUNC,SQRT,LOG10, - * LOG,EXP,EXPM1,ASIN,ACOS,ATAN,ASINH,ACOSH,ATANH,LOG1P# + * LOG,EXP,EXPM1,ASIN,ACOS,ATAN,ASINH,ACOSH,ATANH,LOG1P,EXP2,LOG2# */ #ifndef HAVE_ at KIND@@C@ #ifdef @kind@@c@ Modified: trunk/numpy/lib/scimath.py =================================================================== --- trunk/numpy/lib/scimath.py 2008-11-10 04:49:05 UTC (rev 5995) +++ trunk/numpy/lib/scimath.py 2008-11-11 00:00:05 UTC (rev 5996) @@ -328,7 +328,7 @@ array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) """ x = _fix_real_lt_zero(x) - return nx.log(x)/_ln2 + return nx.log2(x) def power(x, p): """Return x**p. From numpy-svn at scipy.org Mon Nov 10 20:42:06 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 10 Nov 2008 19:42:06 -0600 (CST) Subject: [Numpy-svn] r5997 - trunk/numpy/core/src Message-ID: <20081111014206.383BD39C1EC@scipy.org> Author: charris Date: 2008-11-10 19:41:59 -0600 (Mon, 10 Nov 2008) New Revision: 5997 Modified: trunk/numpy/core/src/math_c99.inc.src Log: Fix exp2, log2 fallback functions. Modified: trunk/numpy/core/src/math_c99.inc.src =================================================================== --- trunk/numpy/core/src/math_c99.inc.src 2008-11-11 00:00:05 UTC (rev 5996) +++ trunk/numpy/core/src/math_c99.inc.src 2008-11-11 01:41:59 UTC (rev 5997) @@ -126,18 +126,18 @@ #ifndef HAVE_EXP2 #define LOG2 0.69314718055994530943 -double trunc(double x) +double exp2(double x) { - return exp(LOG2*x) + return exp(LOG2*x); } #undef LOG2 #endif #ifndef HAVE_LOG2 #define INVLOG2 1.4426950408889634074 -double trunc(double x) +double log2(double x) { - return INVLOG2*log(x) + return INVLOG2*log(x); } #undef INVLOG2 #endif From numpy-svn at scipy.org Mon Nov 10 21:58:06 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 10 Nov 2008 20:58:06 -0600 (CST) Subject: [Numpy-svn] r5998 - trunk/numpy/core/tests Message-ID: <20081111025806.D52D139C0EA@scipy.org> Author: charris Date: 2008-11-10 20:57:58 -0600 (Mon, 10 Nov 2008) New Revision: 5998 Modified: trunk/numpy/core/tests/test_umath.py Log: Add tests for log2, exp2, and logaddexp. Modified: trunk/numpy/core/tests/test_umath.py =================================================================== --- trunk/numpy/core/tests/test_umath.py 2008-11-11 01:41:59 UTC (rev 5997) +++ trunk/numpy/core/tests/test_umath.py 2008-11-11 02:57:58 UTC (rev 5998) @@ -35,6 +35,54 @@ assert_almost_equal(x**14, [-76443+16124j, 23161315+58317492j, 5583548873 + 2465133864j]) +class TestLog2(TestCase): + def test_log2_values(self) : + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f','d','g'] : + xf = np.array(x, dtype=dt) + assert_almost_equal(np.log2(xf), y) + +class TestExp2(TestCase): + def test_exp2_values(self) : + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f','d','g'] : + yf = np.array(y, dtype=dt) + assert_almost_equal(np.exp2(yf), x) + +class TestLogAddExp(TestCase): + def test_logaddexp_values(self) : + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt in ['f','d','g'] : + logxf = np.log(np.array(x, dtype=dt)) + logyf = np.log(np.array(y, dtype=dt)) + logzf = np.log(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp(logxf, logyf), logzf) + + def test_logaddexp_range(self) : + x = [1000000, -1000000, 2000000, -2000000] + y = [2000000, -2000000, 1000000, -1000000] + z = [2000000, -1000000, 2000000, -1000000] + for dt in ['f','d','g'] : + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp(logxf, logyf), logzf) + +class TestLogAddExp(TestCase): + def test_logaddexp(self) : + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt in ['f','d','g'] : + logxf = np.log2(np.array(x, dtype=dt)) + logyf = np.log2(np.array(y, dtype=dt)) + logzf = np.log2(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp2(logxf, logyf), logzf) + class TestLog1p(TestCase): def test_log1p(self): assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) From numpy-svn at scipy.org Mon Nov 10 22:26:55 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 10 Nov 2008 21:26:55 -0600 (CST) Subject: [Numpy-svn] r5999 - trunk/numpy/core/tests Message-ID: <20081111032655.1620C39C0EA@scipy.org> Author: jarrod.millman Date: 2008-11-10 21:26:53 -0600 (Mon, 10 Nov 2008) New Revision: 5999 Modified: trunk/numpy/core/tests/test_umath.py Log: typo Modified: trunk/numpy/core/tests/test_umath.py =================================================================== --- trunk/numpy/core/tests/test_umath.py 2008-11-11 02:57:58 UTC (rev 5998) +++ trunk/numpy/core/tests/test_umath.py 2008-11-11 03:26:53 UTC (rev 5999) @@ -69,7 +69,7 @@ for dt in ['f','d','g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) - logzf = np.array(z, dtype=dt)) + logzf = np.array(z, dtype=dt) assert_almost_equal(np.logaddexp(logxf, logyf), logzf) class TestLogAddExp(TestCase): From numpy-svn at scipy.org Mon Nov 10 23:42:34 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 10 Nov 2008 22:42:34 -0600 (CST) Subject: [Numpy-svn] r6000 - in trunk/numpy/core: code_generators src tests Message-ID: <20081111044234.330C739C05F@scipy.org> Author: charris Date: 2008-11-10 22:42:25 -0600 (Mon, 10 Nov 2008) New Revision: 6000 Modified: trunk/numpy/core/code_generators/generate_umath.py trunk/numpy/core/src/umathmodule.c.src trunk/numpy/core/tests/test_umath.py Log: Add logaddexp2. Add tests for log, exp, logaddexp2. Modified: trunk/numpy/core/code_generators/generate_umath.py =================================================================== --- trunk/numpy/core/code_generators/generate_umath.py 2008-11-11 03:26:53 UTC (rev 5999) +++ trunk/numpy/core/code_generators/generate_umath.py 2008-11-11 04:42:25 UTC (rev 6000) @@ -333,6 +333,11 @@ "", TD(flts, f="logaddexp") ), +'logaddexp2' : + Ufunc(2, 1, None, + "", + TD(flts, f="logaddexp2") + ), 'bitwise_and' : Ufunc(2, 1, One, docstrings.get('numpy.core.umath.bitwise_and'), Modified: trunk/numpy/core/src/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2008-11-11 03:26:53 UTC (rev 5999) +++ trunk/numpy/core/src/umathmodule.c.src 2008-11-11 04:42:25 UTC (rev 6000) @@ -35,7 +35,11 @@ * #C = F, ,L# */ +/* fixme: need more precision for LOG2 and INVLOG2 */ + #define PI 3.14159265358979323846264338328 at c@ +#define LOG2 0.69314718055994530943 at c@ +#define INVLOG2 1.4426950408889634074 at c@ #define degrees at c@ rad2deg at c@ #define radians at c@ deg2rad at c@ @@ -50,6 +54,30 @@ } static @type@ +log2_1p at c@(@type@ x) +{ + @type@ u = 1 + x; + if (u == 1) { + return INVLOG2*x; + } else { + return log2 at c@(u) * x / (u - 1); + } +} + +static @type@ +exp2_1m at c@(@type@ x) +{ + @type@ u = exp at c@(x); + if (u == 1.0) { + return LOG2*x; + } else if (u - 1 == -1) { + return -LOG2; + } else { + return (u - 1) * x/log2 at c@(u); + } +} + +static @type@ logaddexp at c@(@type@ x, @type@ y) { const @type@ tmp = x - y; @@ -61,7 +89,21 @@ } } +static @type@ +logaddexp2 at c@(@type@ x, @type@ y) +{ + const @type@ tmp = x - y; + if (tmp > 0) { + return x + log2_1p at c@(exp2 at c@(-tmp)); + } + else { + return y + log2_1p at c@(exp2 at c@(tmp)); + } +} + #undef PI +#undef LOG2 +#undef INVLOG2 /**end repeat**/ Modified: trunk/numpy/core/tests/test_umath.py =================================================================== --- trunk/numpy/core/tests/test_umath.py 2008-11-11 03:26:53 UTC (rev 5999) +++ trunk/numpy/core/tests/test_umath.py 2008-11-11 04:42:25 UTC (rev 6000) @@ -41,28 +41,31 @@ y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] for dt in ['f','d','g'] : xf = np.array(x, dtype=dt) - assert_almost_equal(np.log2(xf), y) + yf = np.array(y, dtype=dt) + assert_almost_equal(np.log2(xf), yf) class TestExp2(TestCase): def test_exp2_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] for dt in ['f','d','g'] : + xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt) - assert_almost_equal(np.exp2(yf), x) + assert_almost_equal(np.exp2(yf), xf) -class TestLogAddExp(TestCase): - def test_logaddexp_values(self) : +class TestLogAddExp2(TestCase): + # Need test for intermediate precisions + def test_logaddexp2_values(self) : x = [1, 2, 3, 4, 5] y = [5, 4, 3, 2, 1] z = [6, 6, 6, 6, 6] - for dt in ['f','d','g'] : - logxf = np.log(np.array(x, dtype=dt)) - logyf = np.log(np.array(y, dtype=dt)) - logzf = np.log(np.array(z, dtype=dt)) - assert_almost_equal(np.logaddexp(logxf, logyf), logzf) + for dt, dec in zip(['f','d','g'],[6, 15, 15]) : + xf = np.log2(np.array(x, dtype=dt)) + yf = np.log2(np.array(y, dtype=dt)) + zf = np.log2(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec) - def test_logaddexp_range(self) : + def test_logaddexp2_range(self) : x = [1000000, -1000000, 2000000, -2000000] y = [2000000, -2000000, 1000000, -1000000] z = [2000000, -1000000, 2000000, -1000000] @@ -72,16 +75,46 @@ logzf = np.array(z, dtype=dt) assert_almost_equal(np.logaddexp(logxf, logyf), logzf) +class TestLog(TestCase): + def test_log_values(self) : + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f','d','g'] : + log2_ = 0.69314718055994530943 + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt)*log2_ + assert_almost_equal(np.log(xf), yf) + +class TestExp(TestCase): + def test_exp_values(self) : + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f','d','g'] : + log2_ = 0.69314718055994530943 + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt)*log2_ + assert_almost_equal(np.exp(yf), xf) + class TestLogAddExp(TestCase): - def test_logaddexp(self) : + def test_logaddexp_values(self) : x = [1, 2, 3, 4, 5] y = [5, 4, 3, 2, 1] z = [6, 6, 6, 6, 6] + for dt, dec in zip(['f','d','g'],[6, 15, 15]) : + xf = np.log(np.array(x, dtype=dt)) + yf = np.log(np.array(y, dtype=dt)) + zf = np.log(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec) + + def test_logaddexp_range(self) : + x = [1000000, -1000000, 2000000, -2000000] + y = [2000000, -2000000, 1000000, -1000000] + z = [2000000, -1000000, 2000000, -1000000] for dt in ['f','d','g'] : - logxf = np.log2(np.array(x, dtype=dt)) - logyf = np.log2(np.array(y, dtype=dt)) - logzf = np.log2(np.array(z, dtype=dt)) - assert_almost_equal(np.logaddexp2(logxf, logyf), logzf) + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp(logxf, logyf), logzf) class TestLog1p(TestCase): def test_log1p(self): From numpy-svn at scipy.org Tue Nov 11 01:52:19 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 11 Nov 2008 00:52:19 -0600 (CST) Subject: [Numpy-svn] r6001 - trunk/numpy/core/tests Message-ID: <20081111065219.D50AA39C05F@scipy.org> Author: charris Date: 2008-11-11 00:52:15 -0600 (Tue, 11 Nov 2008) New Revision: 6001 Modified: trunk/numpy/core/tests/test_ufunc.py Log: Comment out generalized ufunc test until linkage problem is resolved. Modified: trunk/numpy/core/tests/test_ufunc.py =================================================================== --- trunk/numpy/core/tests/test_ufunc.py 2008-11-11 04:42:25 UTC (rev 6000) +++ trunk/numpy/core/tests/test_ufunc.py 2008-11-11 06:52:15 UTC (rev 6001) @@ -1,6 +1,6 @@ import numpy as np from numpy.testing import * -import numpy.core.umath_tests as umt +#import numpy.core.umath_tests as umt class TestUfunc(TestCase): def test_reduceat_shifting_sum(self) : @@ -231,193 +231,193 @@ """ pass - def test_signature(self): - # the arguments to test_signature are: nin, nout, core_signature - # pass - assert_equal(umt.test_signature(2,1,"(i),(i)->()"), 1) - # pass. empty core signature; treat as plain ufunc (with trivial core) - assert_equal(umt.test_signature(2,1,"(),()->()"), 0) +# def test_signature(self): +# # the arguments to test_signature are: nin, nout, core_signature +# # pass +# assert_equal(umt.test_signature(2,1,"(i),(i)->()"), 1) +# +# # pass. empty core signature; treat as plain ufunc (with trivial core) +# assert_equal(umt.test_signature(2,1,"(),()->()"), 0) +# +# # in the following calls, a ValueError should be raised because +# # of error in core signature +# # error: extra parenthesis +# msg = "core_sig: extra parenthesis" +# try: +# ret = umt.test_signature(2,1,"((i)),(i)->()") +# assert_equal(ret, None, err_msg=msg) +# except ValueError: None +# # error: parenthesis matching +# msg = "core_sig: parenthesis matching" +# try: +# ret = umt.test_signature(2,1,"(i),)i(->()") +# assert_equal(ret, None, err_msg=msg) +# except ValueError: None +# # error: incomplete signature. letters outside of parenthesis are ignored +# msg = "core_sig: incomplete signature" +# try: +# ret = umt.test_signature(2,1,"(i),->()") +# assert_equal(ret, None, err_msg=msg) +# except ValueError: None +# # error: incomplete signature. 2 output arguments are specified +# msg = "core_sig: incomplete signature" +# try: +# ret = umt.test_signature(2,2,"(i),(i)->()") +# assert_equal(ret, None, err_msg=msg) +# except ValueError: None +# +# # more complicated names for variables +# assert_equal(umt.test_signature(2,1,"(i1,i2),(J_1)->(_kAB)"),1) +# +# def test_get_signature(self): +# assert_equal(umt.inner1d.signature, "(i),(i)->()") +# +# def test_inner1d(self): +# a = np.arange(6).reshape((2,3)) +# assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1)) +# +# def test_broadcast(self): +# msg = "broadcast" +# a = np.arange(4).reshape((2,1,2)) +# b = np.arange(4).reshape((1,2,2)) +# assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) +# msg = "extend & broadcast loop dimensions" +# b = np.arange(4).reshape((2,2)) +# assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) +# msg = "broadcast in core dimensions" +# a = np.arange(8).reshape((4,2)) +# b = np.arange(4).reshape((4,1)) +# assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) +# msg = "extend & broadcast core and loop dimensions" +# a = np.arange(8).reshape((4,2)) +# b = np.array(7) +# assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) +# msg = "broadcast should fail" +# a = np.arange(2).reshape((2,1,1)) +# b = np.arange(3).reshape((3,1,1)) +# try: +# ret = umt.inner1d(a,b) +# assert_equal(ret, None, err_msg=msg) +# except ValueError: None +# +# def test_type_cast(self): +# msg = "type cast" +# a = np.arange(6, dtype='short').reshape((2,3)) +# assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg) +# msg = "type cast on one argument" +# a = np.arange(6).reshape((2,3)) +# b = a+0.1 +# assert_array_almost_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), +# err_msg=msg) +# +# def test_endian(self): +# msg = "big endian" +# a = np.arange(6, dtype='>i4').reshape((2,3)) +# assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg) +# msg = "little endian" +# a = np.arange(6, dtype='()") - assert_equal(ret, None, err_msg=msg) - except ValueError: None - # error: parenthesis matching - msg = "core_sig: parenthesis matching" - try: - ret = umt.test_signature(2,1,"(i),)i(->()") - assert_equal(ret, None, err_msg=msg) - except ValueError: None - # error: incomplete signature. letters outside of parenthesis are ignored - msg = "core_sig: incomplete signature" - try: - ret = umt.test_signature(2,1,"(i),->()") - assert_equal(ret, None, err_msg=msg) - except ValueError: None - # error: incomplete signature. 2 output arguments are specified - msg = "core_sig: incomplete signature" - try: - ret = umt.test_signature(2,2,"(i),(i)->()") - assert_equal(ret, None, err_msg=msg) - except ValueError: None - - # more complicated names for variables - assert_equal(umt.test_signature(2,1,"(i1,i2),(J_1)->(_kAB)"),1) - - def test_get_signature(self): - assert_equal(umt.inner1d.signature, "(i),(i)->()") - - def test_inner1d(self): - a = np.arange(6).reshape((2,3)) - assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1)) - - def test_broadcast(self): - msg = "broadcast" - a = np.arange(4).reshape((2,1,2)) - b = np.arange(4).reshape((1,2,2)) - assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) - msg = "extend & broadcast loop dimensions" - b = np.arange(4).reshape((2,2)) - assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) - msg = "broadcast in core dimensions" - a = np.arange(8).reshape((4,2)) - b = np.arange(4).reshape((4,1)) - assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) - msg = "extend & broadcast core and loop dimensions" - a = np.arange(8).reshape((4,2)) - b = np.array(7) - assert_array_equal(umt.inner1d(a,b), np.sum(a*b,axis=-1), err_msg=msg) - msg = "broadcast should fail" - a = np.arange(2).reshape((2,1,1)) - b = np.arange(3).reshape((3,1,1)) - try: - ret = umt.inner1d(a,b) - assert_equal(ret, None, err_msg=msg) - except ValueError: None - - def test_type_cast(self): - msg = "type cast" - a = np.arange(6, dtype='short').reshape((2,3)) - assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg) - msg = "type cast on one argument" - a = np.arange(6).reshape((2,3)) - b = a+0.1 - assert_array_almost_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), - err_msg=msg) - - def test_endian(self): - msg = "big endian" - a = np.arange(6, dtype='>i4').reshape((2,3)) - assert_array_equal(umt.inner1d(a,a), np.sum(a*a,axis=-1), err_msg=msg) - msg = "little endian" - a = np.arange(6, dtype=' Author: charris Date: 2008-11-11 01:34:25 -0600 (Tue, 11 Nov 2008) New Revision: 6002 Modified: trunk/numpy/core/tests/test_umath.py Log: Test to see what problem with logaddexp is on some machines. Modified: trunk/numpy/core/tests/test_umath.py =================================================================== --- trunk/numpy/core/tests/test_umath.py 2008-11-11 06:52:15 UTC (rev 6001) +++ trunk/numpy/core/tests/test_umath.py 2008-11-11 07:34:25 UTC (rev 6002) @@ -53,7 +53,7 @@ yf = np.array(y, dtype=dt) assert_almost_equal(np.exp2(yf), xf) -class TestLogAddExp2(TestCase): +class TestLogAddExp2(object): # Need test for intermediate precisions def test_logaddexp2_values(self) : x = [1, 2, 3, 4, 5] @@ -66,9 +66,9 @@ assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec) def test_logaddexp2_range(self) : - x = [1000000, -1000000, 2000000, -2000000] - y = [2000000, -2000000, 1000000, -1000000] - z = [2000000, -1000000, 2000000, -1000000] + x = [1000000., -1000000., 2000000., -2000000.] + y = [2000000., -2000000., 1000000., -1000000.] + z = [2000000., -1000000., 2000000., -1000000.] for dt in ['f','d','g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) @@ -95,7 +95,7 @@ yf = np.array(y, dtype=dt)*log2_ assert_almost_equal(np.exp(yf), xf) -class TestLogAddExp(TestCase): +class TestLogAddExp(object): def test_logaddexp_values(self) : x = [1, 2, 3, 4, 5] y = [5, 4, 3, 2, 1] @@ -107,9 +107,9 @@ assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec) def test_logaddexp_range(self) : - x = [1000000, -1000000, 2000000, -2000000] - y = [2000000, -2000000, 1000000, -1000000] - z = [2000000, -1000000, 2000000, -1000000] + x = [1000000., -1000000., 2000000., -2000000.] + y = [2000000., -2000000., 1000000., -1000000.] + z = [2000000., -1000000., 2000000., -1000000.] for dt in ['f','d','g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) From numpy-svn at scipy.org Tue Nov 11 02:55:49 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 11 Nov 2008 01:55:49 -0600 (CST) Subject: [Numpy-svn] r6003 - trunk/numpy/core/tests Message-ID: <20081111075549.2B94B39C05F@scipy.org> Author: charris Date: 2008-11-11 01:55:43 -0600 (Tue, 11 Nov 2008) New Revision: 6003 Modified: trunk/numpy/core/tests/test_umath.py Log: Make logaddexp range test less severe. Modified: trunk/numpy/core/tests/test_umath.py =================================================================== --- trunk/numpy/core/tests/test_umath.py 2008-11-11 07:34:25 UTC (rev 6002) +++ trunk/numpy/core/tests/test_umath.py 2008-11-11 07:55:43 UTC (rev 6003) @@ -66,9 +66,9 @@ assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec) def test_logaddexp2_range(self) : - x = [1000000., -1000000., 2000000., -2000000.] - y = [2000000., -2000000., 1000000., -1000000.] - z = [2000000., -1000000., 2000000., -1000000.] + x = [1000000., -1000000., 1000050., -1000050.] + y = [1000050., -1000050., 1000000., -1000000.] + z = [1000050., -1000000., 1000050., -1000000.] for dt in ['f','d','g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) @@ -107,9 +107,9 @@ assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec) def test_logaddexp_range(self) : - x = [1000000., -1000000., 2000000., -2000000.] - y = [2000000., -2000000., 1000000., -1000000.] - z = [2000000., -1000000., 2000000., -1000000.] + x = [1000000., -1000000., 1000050., -1000050.] + y = [1000050., -1000050., 1000000., -1000000.] + z = [1000050., -1000000., 1000050., -1000000.] for dt in ['f','d','g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) From numpy-svn at scipy.org Tue Nov 11 02:55:58 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 11 Nov 2008 01:55:58 -0600 (CST) Subject: [Numpy-svn] r6004 - trunk/numpy/core/tests Message-ID: <20081111075558.3F7BA39C088@scipy.org> Author: charris Date: 2008-11-11 01:55:54 -0600 (Tue, 11 Nov 2008) New Revision: 6004 Modified: trunk/numpy/core/tests/test_umath.py Log: Make logaddexp just a bit more severe. Modified: trunk/numpy/core/tests/test_umath.py =================================================================== --- trunk/numpy/core/tests/test_umath.py 2008-11-11 07:55:43 UTC (rev 6003) +++ trunk/numpy/core/tests/test_umath.py 2008-11-11 07:55:54 UTC (rev 6004) @@ -66,9 +66,9 @@ assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec) def test_logaddexp2_range(self) : - x = [1000000., -1000000., 1000050., -1000050.] - y = [1000050., -1000050., 1000000., -1000000.] - z = [1000050., -1000000., 1000050., -1000000.] + x = [1000000., -1000000., 1000200., -1000200.] + y = [1000200., -1000200., 1000000., -1000000.] + z = [1000200., -1000000., 1000200., -1000000.] for dt in ['f','d','g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) @@ -107,9 +107,9 @@ assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec) def test_logaddexp_range(self) : - x = [1000000., -1000000., 1000050., -1000050.] - y = [1000050., -1000050., 1000000., -1000000.] - z = [1000050., -1000000., 1000050., -1000000.] + x = [1000000., -1000000., 1000200., -1000200.] + y = [1000200., -1000200., 1000000., -1000000.] + z = [1000200., -1000000., 1000200., -1000000.] for dt in ['f','d','g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) From numpy-svn at scipy.org Tue Nov 11 03:36:26 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 11 Nov 2008 02:36:26 -0600 (CST) Subject: [Numpy-svn] r6005 - trunk/numpy/core/tests Message-ID: <20081111083626.6FDE139C0F1@scipy.org> Author: charris Date: 2008-11-11 02:36:22 -0600 (Tue, 11 Nov 2008) New Revision: 6005 Modified: trunk/numpy/core/tests/test_umath.py Log: Comment out tests that hang on some builtbots. I think this is due to buggy versions of log1p, so add a log1p evaluation with small number in test_log1p. This might also be a problem with exp, so test that next it this works. Modified: trunk/numpy/core/tests/test_umath.py =================================================================== --- trunk/numpy/core/tests/test_umath.py 2008-11-11 07:55:54 UTC (rev 6004) +++ trunk/numpy/core/tests/test_umath.py 2008-11-11 08:36:22 UTC (rev 6005) @@ -65,15 +65,15 @@ zf = np.log2(np.array(z, dtype=dt)) assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec) - def test_logaddexp2_range(self) : - x = [1000000., -1000000., 1000200., -1000200.] - y = [1000200., -1000200., 1000000., -1000000.] - z = [1000200., -1000000., 1000200., -1000000.] - for dt in ['f','d','g'] : - logxf = np.array(x, dtype=dt) - logyf = np.array(y, dtype=dt) - logzf = np.array(z, dtype=dt) - assert_almost_equal(np.logaddexp(logxf, logyf), logzf) +# def test_logaddexp2_range(self) : +# x = [1000000., -1000000., 1000200., -1000200.] +# y = [1000200., -1000200., 1000000., -1000000.] +# z = [1000200., -1000000., 1000200., -1000000.] +# for dt in ['f','d','g'] : +# logxf = np.array(x, dtype=dt) +# logyf = np.array(y, dtype=dt) +# logzf = np.array(z, dtype=dt) +# assert_almost_equal(np.logaddexp(logxf, logyf), logzf) class TestLog(TestCase): def test_log_values(self) : @@ -106,18 +106,19 @@ zf = np.log(np.array(z, dtype=dt)) assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec) - def test_logaddexp_range(self) : - x = [1000000., -1000000., 1000200., -1000200.] - y = [1000200., -1000200., 1000000., -1000000.] - z = [1000200., -1000000., 1000200., -1000000.] - for dt in ['f','d','g'] : - logxf = np.array(x, dtype=dt) - logyf = np.array(y, dtype=dt) - logzf = np.array(z, dtype=dt) - assert_almost_equal(np.logaddexp(logxf, logyf), logzf) +# def test_logaddexp_range(self) : +# x = [1000000., -1000000., 1000200., -1000200.] +# y = [1000200., -1000200., 1000000., -1000000.] +# z = [1000200., -1000000., 1000200., -1000000.] +# for dt in ['f','d','g'] : +# logxf = np.array(x, dtype=dt) +# logyf = np.array(y, dtype=dt) +# logzf = np.array(z, dtype=dt) +# assert_almost_equal(np.logaddexp(logxf, logyf), logzf) class TestLog1p(TestCase): def test_log1p(self): + np.log1p(1e-100) assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6)) From numpy-svn at scipy.org Tue Nov 11 13:03:51 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 11 Nov 2008 12:03:51 -0600 (CST) Subject: [Numpy-svn] r6006 - trunk/numpy/core/tests Message-ID: <20081111180351.A5CCD39C088@scipy.org> Author: charris Date: 2008-11-11 12:03:47 -0600 (Tue, 11 Nov 2008) New Revision: 6006 Modified: trunk/numpy/core/tests/test_umath.py Log: Comment out tests that hang on some machines. Add tests that might hang in more informative places. Modified: trunk/numpy/core/tests/test_umath.py =================================================================== --- trunk/numpy/core/tests/test_umath.py 2008-11-11 08:36:22 UTC (rev 6005) +++ trunk/numpy/core/tests/test_umath.py 2008-11-11 18:03:47 UTC (rev 6006) @@ -54,16 +54,17 @@ assert_almost_equal(np.exp2(yf), xf) class TestLogAddExp2(object): + pass # Need test for intermediate precisions - def test_logaddexp2_values(self) : - x = [1, 2, 3, 4, 5] - y = [5, 4, 3, 2, 1] - z = [6, 6, 6, 6, 6] - for dt, dec in zip(['f','d','g'],[6, 15, 15]) : - xf = np.log2(np.array(x, dtype=dt)) - yf = np.log2(np.array(y, dtype=dt)) - zf = np.log2(np.array(z, dtype=dt)) - assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec) +# def test_logaddexp2_values(self) : +# x = [1, 2, 3, 4, 5] +# y = [5, 4, 3, 2, 1] +# z = [6, 6, 6, 6, 6] +# for dt, dec in zip(['f','d','g'],[6, 15, 15]) : +# xf = np.log2(np.array(x, dtype=dt)) +# yf = np.log2(np.array(y, dtype=dt)) +# zf = np.log2(np.array(z, dtype=dt)) +# assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec) # def test_logaddexp2_range(self) : # x = [1000000., -1000000., 1000200., -1000200.] @@ -96,15 +97,16 @@ assert_almost_equal(np.exp(yf), xf) class TestLogAddExp(object): - def test_logaddexp_values(self) : - x = [1, 2, 3, 4, 5] - y = [5, 4, 3, 2, 1] - z = [6, 6, 6, 6, 6] - for dt, dec in zip(['f','d','g'],[6, 15, 15]) : - xf = np.log(np.array(x, dtype=dt)) - yf = np.log(np.array(y, dtype=dt)) - zf = np.log(np.array(z, dtype=dt)) - assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec) + pass +# def test_logaddexp_values(self) : +# x = [1, 2, 3, 4, 5] +# y = [5, 4, 3, 2, 1] +# z = [6, 6, 6, 6, 6] +# for dt, dec in zip(['f','d','g'],[6, 15, 15]) : +# xf = np.log(np.array(x, dtype=dt)) +# yf = np.log(np.array(y, dtype=dt)) +# zf = np.log(np.array(z, dtype=dt)) +# assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec) # def test_logaddexp_range(self) : # x = [1000000., -1000000., 1000200., -1000200.] @@ -117,8 +119,16 @@ # assert_almost_equal(np.logaddexp(logxf, logyf), logzf) class TestLog1p(TestCase): + def test_log1p_d(self): + np.log1p(np.array(1e-100, dtype='d')) + + def test_log1p_f(self): + np.log1p(np.array(1e-100, dtype='f')) + + def test_log1p_g(self): + np.log1p(np.array(1e-100, dtype='g')) + def test_log1p(self): - np.log1p(1e-100) assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6)) From numpy-svn at scipy.org Tue Nov 11 13:44:54 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 11 Nov 2008 12:44:54 -0600 (CST) Subject: [Numpy-svn] r6007 - trunk/numpy/core/tests Message-ID: <20081111184454.AC6AC39C089@scipy.org> Author: charris Date: 2008-11-11 12:44:50 -0600 (Tue, 11 Nov 2008) New Revision: 6007 Modified: trunk/numpy/core/tests/test_umath.py Log: Comment out some tests that use functions broken on some machines. Modified: trunk/numpy/core/tests/test_umath.py =================================================================== --- trunk/numpy/core/tests/test_umath.py 2008-11-11 18:03:47 UTC (rev 6006) +++ trunk/numpy/core/tests/test_umath.py 2008-11-11 18:44:50 UTC (rev 6007) @@ -54,27 +54,26 @@ assert_almost_equal(np.exp2(yf), xf) class TestLogAddExp2(object): - pass # Need test for intermediate precisions -# def test_logaddexp2_values(self) : -# x = [1, 2, 3, 4, 5] -# y = [5, 4, 3, 2, 1] -# z = [6, 6, 6, 6, 6] -# for dt, dec in zip(['f','d','g'],[6, 15, 15]) : -# xf = np.log2(np.array(x, dtype=dt)) -# yf = np.log2(np.array(y, dtype=dt)) -# zf = np.log2(np.array(z, dtype=dt)) -# assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec) + def test_logaddexp2_values(self) : + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt, dec in zip(['f','d','g'],[6, 15, 15]) : + xf = np.log2(np.array(x, dtype=dt)) + yf = np.log2(np.array(y, dtype=dt)) + zf = np.log2(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec) -# def test_logaddexp2_range(self) : -# x = [1000000., -1000000., 1000200., -1000200.] -# y = [1000200., -1000200., 1000000., -1000000.] -# z = [1000200., -1000000., 1000200., -1000000.] -# for dt in ['f','d','g'] : -# logxf = np.array(x, dtype=dt) -# logyf = np.array(y, dtype=dt) -# logzf = np.array(z, dtype=dt) -# assert_almost_equal(np.logaddexp(logxf, logyf), logzf) + def test_logaddexp2_range(self) : + x = [1000000., -1000000., 1000200., -1000200.] + y = [1000200., -1000200., 1000000., -1000000.] + z = [1000200., -1000000., 1000200., -1000000.] + for dt in ['f','d','g'] : + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp(logxf, logyf), logzf) class TestLog(TestCase): def test_log_values(self) : @@ -97,36 +96,37 @@ assert_almost_equal(np.exp(yf), xf) class TestLogAddExp(object): - pass -# def test_logaddexp_values(self) : -# x = [1, 2, 3, 4, 5] -# y = [5, 4, 3, 2, 1] -# z = [6, 6, 6, 6, 6] -# for dt, dec in zip(['f','d','g'],[6, 15, 15]) : -# xf = np.log(np.array(x, dtype=dt)) -# yf = np.log(np.array(y, dtype=dt)) -# zf = np.log(np.array(z, dtype=dt)) -# assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec) + def test_logaddexp_values(self) : + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + #for dt, dec in zip(['f','d','g'],[6, 15, 15]) : + for dt, dec in zip(['d'],[15]) : + xf = np.log(np.array(x, dtype=dt)) + yf = np.log(np.array(y, dtype=dt)) + zf = np.log(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec) -# def test_logaddexp_range(self) : -# x = [1000000., -1000000., 1000200., -1000200.] -# y = [1000200., -1000200., 1000000., -1000000.] -# z = [1000200., -1000000., 1000200., -1000000.] -# for dt in ['f','d','g'] : -# logxf = np.array(x, dtype=dt) -# logyf = np.array(y, dtype=dt) -# logzf = np.array(z, dtype=dt) -# assert_almost_equal(np.logaddexp(logxf, logyf), logzf) + def test_logaddexp_range(self) : + x = [1000000., -1000000., 1000200., -1000200.] + y = [1000200., -1000200., 1000000., -1000000.] + z = [1000200., -1000000., 1000200., -1000000.] + #for dt in ['f','d','g'] : + for dt in ['d'] : + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp(logxf, logyf), logzf) class TestLog1p(TestCase): def test_log1p_d(self): np.log1p(np.array(1e-100, dtype='d')) - def test_log1p_f(self): - np.log1p(np.array(1e-100, dtype='f')) +# def test_log1p_f(self): +# np.log1p(np.array(1e-100, dtype='f')) - def test_log1p_g(self): - np.log1p(np.array(1e-100, dtype='g')) +# def test_log1p_g(self): +# np.log1p(np.array(1e-100, dtype='g')) def test_log1p(self): assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) From numpy-svn at scipy.org Tue Nov 11 14:18:52 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 11 Nov 2008 13:18:52 -0600 (CST) Subject: [Numpy-svn] r6008 - trunk/numpy/core/tests Message-ID: <20081111191852.B3F9239C088@scipy.org> Author: charris Date: 2008-11-11 13:18:48 -0600 (Tue, 11 Nov 2008) New Revision: 6008 Modified: trunk/numpy/core/tests/test_umath.py Log: Comment out all tests for data types that seem broken on some machines. Modified: trunk/numpy/core/tests/test_umath.py =================================================================== --- trunk/numpy/core/tests/test_umath.py 2008-11-11 18:44:50 UTC (rev 6007) +++ trunk/numpy/core/tests/test_umath.py 2008-11-11 19:18:48 UTC (rev 6008) @@ -39,7 +39,8 @@ def test_log2_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f','d','g'] : + #for dt in ['f','d','g'] : + for dt in ['d'] : xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt) assert_almost_equal(np.log2(xf), yf) @@ -48,7 +49,8 @@ def test_exp2_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f','d','g'] : + #for dt in ['f','d','g'] : + for dt in ['d'] : xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt) assert_almost_equal(np.exp2(yf), xf) @@ -59,7 +61,8 @@ x = [1, 2, 3, 4, 5] y = [5, 4, 3, 2, 1] z = [6, 6, 6, 6, 6] - for dt, dec in zip(['f','d','g'],[6, 15, 15]) : + #for dt, dec in zip(['f','d','g'],[6, 15, 15]) : + for dt, dec in zip(['d'],[15]) : xf = np.log2(np.array(x, dtype=dt)) yf = np.log2(np.array(y, dtype=dt)) zf = np.log2(np.array(z, dtype=dt)) @@ -69,7 +72,8 @@ x = [1000000., -1000000., 1000200., -1000200.] y = [1000200., -1000200., 1000000., -1000000.] z = [1000200., -1000000., 1000200., -1000000.] - for dt in ['f','d','g'] : + #for dt in ['f','d','g'] : + for dt in ['d'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) logzf = np.array(z, dtype=dt) @@ -79,7 +83,8 @@ def test_log_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f','d','g'] : + #for dt in ['f','d','g'] : + for dt in ['d'] : log2_ = 0.69314718055994530943 xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt)*log2_ @@ -89,7 +94,8 @@ def test_exp_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - for dt in ['f','d','g'] : + #for dt in ['f','d','g'] : + for dt in ['d'] : log2_ = 0.69314718055994530943 xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt)*log2_ From numpy-svn at scipy.org Tue Nov 11 16:09:26 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 11 Nov 2008 15:09:26 -0600 (CST) Subject: [Numpy-svn] r6009 - in trunk/numpy/core: . tests Message-ID: <20081111210926.8B27239C05F@scipy.org> Author: charris Date: 2008-11-11 15:09:17 -0600 (Tue, 11 Nov 2008) New Revision: 6009 Modified: trunk/numpy/core/setup.py trunk/numpy/core/tests/test_umath.py Log: Debugging log1p problem. Add log1p to the search list for f,l types. Add back tests of log1pf, log1pl to see if they work. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2008-11-11 19:18:48 UTC (rev 6008) +++ trunk/numpy/core/setup.py 2008-11-11 21:09:17 UTC (rev 6009) @@ -85,7 +85,7 @@ # C99 functions: float and long double versions c99_funcs = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", - "ceil", "rint", "trunc", "sqrt", "log10", "log", "exp", + "ceil", "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", "expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh", "hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp', "exp2", "log2"] Modified: trunk/numpy/core/tests/test_umath.py =================================================================== --- trunk/numpy/core/tests/test_umath.py 2008-11-11 19:18:48 UTC (rev 6008) +++ trunk/numpy/core/tests/test_umath.py 2008-11-11 21:09:17 UTC (rev 6009) @@ -128,11 +128,11 @@ def test_log1p_d(self): np.log1p(np.array(1e-100, dtype='d')) -# def test_log1p_f(self): -# np.log1p(np.array(1e-100, dtype='f')) + def test_log1p_f(self): + np.log1p(np.array(1e-100, dtype='f')) -# def test_log1p_g(self): -# np.log1p(np.array(1e-100, dtype='g')) + def test_log1p_g(self): + np.log1p(np.array(1e-100, dtype='g')) def test_log1p(self): assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) From numpy-svn at scipy.org Tue Nov 11 16:57:03 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 11 Nov 2008 15:57:03 -0600 (CST) Subject: [Numpy-svn] r6010 - trunk/numpy/core/tests Message-ID: <20081111215703.3CA7839C088@scipy.org> Author: charris Date: 2008-11-11 15:57:01 -0600 (Tue, 11 Nov 2008) New Revision: 6010 Modified: trunk/numpy/core/tests/test_umath.py Log: Debug commit seemed to work. Add back all logaddexp, logaddexp2 tests. Remove special log1p tests. The existing log1p tests need some additions... Modified: trunk/numpy/core/tests/test_umath.py =================================================================== --- trunk/numpy/core/tests/test_umath.py 2008-11-11 21:09:17 UTC (rev 6009) +++ trunk/numpy/core/tests/test_umath.py 2008-11-11 21:57:01 UTC (rev 6010) @@ -39,8 +39,7 @@ def test_log2_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - #for dt in ['f','d','g'] : - for dt in ['d'] : + for dt in ['f','d','g'] : xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt) assert_almost_equal(np.log2(xf), yf) @@ -49,8 +48,7 @@ def test_exp2_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - #for dt in ['f','d','g'] : - for dt in ['d'] : + for dt in ['f','d','g'] : xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt) assert_almost_equal(np.exp2(yf), xf) @@ -61,19 +59,17 @@ x = [1, 2, 3, 4, 5] y = [5, 4, 3, 2, 1] z = [6, 6, 6, 6, 6] - #for dt, dec in zip(['f','d','g'],[6, 15, 15]) : - for dt, dec in zip(['d'],[15]) : + for dt, dec in zip(['f','d','g'],[6, 15, 15]) : xf = np.log2(np.array(x, dtype=dt)) yf = np.log2(np.array(y, dtype=dt)) zf = np.log2(np.array(z, dtype=dt)) assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec) def test_logaddexp2_range(self) : - x = [1000000., -1000000., 1000200., -1000200.] - y = [1000200., -1000200., 1000000., -1000000.] - z = [1000200., -1000000., 1000200., -1000000.] - #for dt in ['f','d','g'] : - for dt in ['d'] : + x = [1000000, -1000000, 1000200, -1000200] + y = [1000200, -1000200, 1000000, -1000000] + z = [1000200, -1000000, 1000200, -1000000] + for dt in ['f','d','g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) logzf = np.array(z, dtype=dt) @@ -83,8 +79,7 @@ def test_log_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - #for dt in ['f','d','g'] : - for dt in ['d'] : + for dt in ['f','d','g'] : log2_ = 0.69314718055994530943 xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt)*log2_ @@ -94,8 +89,7 @@ def test_exp_values(self) : x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - #for dt in ['f','d','g'] : - for dt in ['d'] : + for dt in ['f','d','g'] : log2_ = 0.69314718055994530943 xf = np.array(x, dtype=dt) yf = np.array(y, dtype=dt)*log2_ @@ -106,34 +100,23 @@ x = [1, 2, 3, 4, 5] y = [5, 4, 3, 2, 1] z = [6, 6, 6, 6, 6] - #for dt, dec in zip(['f','d','g'],[6, 15, 15]) : - for dt, dec in zip(['d'],[15]) : + for dt, dec in zip(['f','d','g'],[6, 15, 15]) : xf = np.log(np.array(x, dtype=dt)) yf = np.log(np.array(y, dtype=dt)) zf = np.log(np.array(z, dtype=dt)) assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec) def test_logaddexp_range(self) : - x = [1000000., -1000000., 1000200., -1000200.] - y = [1000200., -1000200., 1000000., -1000000.] - z = [1000200., -1000000., 1000200., -1000000.] - #for dt in ['f','d','g'] : - for dt in ['d'] : + x = [1000000, -1000000, 1000200, -1000200] + y = [1000200, -1000200, 1000000, -1000000] + z = [1000200, -1000000, 1000200, -1000000] + for dt in ['f','d','g'] : logxf = np.array(x, dtype=dt) logyf = np.array(y, dtype=dt) logzf = np.array(z, dtype=dt) assert_almost_equal(np.logaddexp(logxf, logyf), logzf) class TestLog1p(TestCase): - def test_log1p_d(self): - np.log1p(np.array(1e-100, dtype='d')) - - def test_log1p_f(self): - np.log1p(np.array(1e-100, dtype='f')) - - def test_log1p_g(self): - np.log1p(np.array(1e-100, dtype='g')) - def test_log1p(self): assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6)) From numpy-svn at scipy.org Tue Nov 11 17:20:49 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 11 Nov 2008 16:20:49 -0600 (CST) Subject: [Numpy-svn] r6011 - trunk/numpy/ma Message-ID: <20081111222049.B198739C0EA@scipy.org> Author: pierregm Date: 2008-11-11 16:20:46 -0600 (Tue, 11 Nov 2008) New Revision: 6011 Modified: trunk/numpy/ma/mrecords.py Log: MaskedRecords : reintroduced _fieldmask as a property (for backcompatibility). Modified: trunk/numpy/ma/mrecords.py =================================================================== --- trunk/numpy/ma/mrecords.py 2008-11-11 21:57:01 UTC (rev 6010) +++ trunk/numpy/ma/mrecords.py 2008-11-11 22:20:46 UTC (rev 6011) @@ -174,6 +174,11 @@ return ndarray.view(self,recarray) _data = property(fget=_getdata) + def _getfieldmask(self): + "Alias to mask" + return self._mask + _fieldmask = property(fget=_getfieldmask) + def __len__(self): "Returns the length" # We have more than one record From numpy-svn at scipy.org Tue Nov 11 20:12:51 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 11 Nov 2008 19:12:51 -0600 (CST) Subject: [Numpy-svn] r6012 - trunk/numpy/core/src Message-ID: <20081112011251.9BADA39C088@scipy.org> Author: charris Date: 2008-11-11 19:12:43 -0600 (Tue, 11 Nov 2008) New Revision: 6012 Modified: trunk/numpy/core/src/math_c99.inc.src Log: Add C99 prototypes. Attempt to cure problems of SPARC Debian Etch. Modified: trunk/numpy/core/src/math_c99.inc.src =================================================================== --- trunk/numpy/core/src/math_c99.inc.src 2008-11-11 22:20:46 UTC (rev 6011) +++ trunk/numpy/core/src/math_c99.inc.src 2008-11-12 01:12:43 UTC (rev 6012) @@ -7,6 +7,37 @@ /* ***************************************************************************** + ** C99 PROTOTYPES ** + ***************************************************************************** + */ + +/**begin repeat + * #type = float, double, longdouble# + * #c = f, ,l# +*/ + +/**begin repeat1 + * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, + * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2# + */ + + at type@ @kind@@c@(@type@ x); + +/**end repeat1**/ + +/**begin repeat1 + * #kind = atan2,hypot,pow,fmod# + */ + + at type@ @kind@@c@(@type@ x, @type@ y) + +/**end repeat1**/ + + at type@ modf at c@(@type@ x, @type@ *iptr) + +/**end repeat**/ +/* + ***************************************************************************** ** BASIC MATH FUNCTIONS ** ***************************************************************************** */ From numpy-svn at scipy.org Tue Nov 11 20:28:32 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 11 Nov 2008 19:28:32 -0600 (CST) Subject: [Numpy-svn] r6013 - trunk/numpy/core/src Message-ID: <20081112012832.3CCCB39C088@scipy.org> Author: charris Date: 2008-11-11 19:28:23 -0600 (Tue, 11 Nov 2008) New Revision: 6013 Modified: trunk/numpy/core/src/math_c99.inc.src Log: Fix missing semicolons on some function declarations. Modified: trunk/numpy/core/src/math_c99.inc.src =================================================================== --- trunk/numpy/core/src/math_c99.inc.src 2008-11-12 01:12:43 UTC (rev 6012) +++ trunk/numpy/core/src/math_c99.inc.src 2008-11-12 01:28:23 UTC (rev 6013) @@ -29,11 +29,11 @@ * #kind = atan2,hypot,pow,fmod# */ - at type@ @kind@@c@(@type@ x, @type@ y) + at type@ @kind@@c@(@type@ x, @type@ y); /**end repeat1**/ - at type@ modf at c@(@type@ x, @type@ *iptr) + at type@ modf at c@(@type@ x, @type@ *iptr); /**end repeat**/ /* From numpy-svn at scipy.org Wed Nov 12 00:47:16 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 11 Nov 2008 23:47:16 -0600 (CST) Subject: [Numpy-svn] r6014 - in trunk/numpy: core/src distutils/command Message-ID: <20081112054716.99A5B39C05F@scipy.org> Author: charris Date: 2008-11-11 23:47:09 -0600 (Tue, 11 Nov 2008) New Revision: 6014 Modified: trunk/numpy/core/src/math_c99.inc.src trunk/numpy/distutils/command/config.py Log: Some spelling corrections. Some experiments for MSVC. Modified: trunk/numpy/core/src/math_c99.inc.src =================================================================== --- trunk/numpy/core/src/math_c99.inc.src 2008-11-12 01:28:23 UTC (rev 6013) +++ trunk/numpy/core/src/math_c99.inc.src 2008-11-12 05:47:09 UTC (rev 6014) @@ -11,6 +11,8 @@ ***************************************************************************** */ +#ifndef _MSC_VER + /**begin repeat * #type = float, double, longdouble# * #c = f, ,l# @@ -18,7 +20,8 @@ /**begin repeat1 * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, - * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2# + * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2, + * logb# */ @type@ @kind@@c@(@type@ x); @@ -26,7 +29,7 @@ /**end repeat1**/ /**begin repeat1 - * #kind = atan2,hypot,pow,fmod# + * #kind = atan2,hypot,pow,fmod,scalb# */ @type@ @kind@@c@(@type@ x, @type@ y); @@ -34,7 +37,13 @@ /**end repeat1**/ @type@ modf at c@(@type@ x, @type@ *iptr); + at type@ ldexp at c@(@type@ x, int iexp); + at type@ frexp at c@(@type@ x, int *iptr); + at type@ scalbn at c@(@type@ x, int pow); + at type@ scalbln at c@(@type@ x, long pow); +#endif + /**end repeat**/ /* ***************************************************************************** Modified: trunk/numpy/distutils/command/config.py =================================================================== --- trunk/numpy/distutils/command/config.py 2008-11-12 01:28:23 UTC (rev 6013) +++ trunk/numpy/distutils/command/config.py 2008-11-12 05:47:09 UTC (rev 6014) @@ -126,9 +126,9 @@ body = [] if decl: body.append("int %s (void);" % func) - # Handle MSVC intrisincs: force MS compiler to make a function call. + # Handle MSVC intrinsics: force MS compiler to make a function call. # Useful to test for some functions when built with optimization on, to - # avoid build error because the intrisinc and our 'fake' test + # avoid build error because the intrinsic and our 'fake' test # declaration do not match. body.append("#ifdef _MSC_VER") body.append("#pragma function(%s)" % func) From numpy-svn at scipy.org Wed Nov 12 01:20:42 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 12 Nov 2008 00:20:42 -0600 (CST) Subject: [Numpy-svn] r6015 - trunk/numpy/core/src Message-ID: <20081112062042.4BCCC39C05F@scipy.org> Author: charris Date: 2008-11-12 00:20:36 -0600 (Wed, 12 Nov 2008) New Revision: 6015 Modified: trunk/numpy/core/src/math_c99.inc.src Log: Fix misplaced end statement. Modified: trunk/numpy/core/src/math_c99.inc.src =================================================================== --- trunk/numpy/core/src/math_c99.inc.src 2008-11-12 05:47:09 UTC (rev 6014) +++ trunk/numpy/core/src/math_c99.inc.src 2008-11-12 06:20:36 UTC (rev 6015) @@ -42,9 +42,10 @@ @type@ scalbn at c@(@type@ x, int pow); @type@ scalbln at c@(@type@ x, long pow); +/**end repeat**/ + #endif -/**end repeat**/ /* ***************************************************************************** ** BASIC MATH FUNCTIONS ** From numpy-svn at scipy.org Wed Nov 12 02:27:00 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 12 Nov 2008 01:27:00 -0600 (CST) Subject: [Numpy-svn] r6016 - trunk/numpy/core/src Message-ID: <20081112072700.6B6E039C05F@scipy.org> Author: charris Date: 2008-11-12 01:26:55 -0600 (Wed, 12 Nov 2008) New Revision: 6016 Modified: trunk/numpy/core/src/math_c99.inc.src Log: Another test: make functions defined in math_c99 static. Modified: trunk/numpy/core/src/math_c99.inc.src =================================================================== --- trunk/numpy/core/src/math_c99.inc.src 2008-11-12 06:20:36 UTC (rev 6015) +++ trunk/numpy/core/src/math_c99.inc.src 2008-11-12 07:26:55 UTC (rev 6016) @@ -11,7 +11,7 @@ ***************************************************************************** */ -#ifndef _MSC_VER +#if 0 /**begin repeat * #type = float, double, longdouble# @@ -54,7 +54,7 @@ /* Original code by Konrad Hinsen. */ #ifndef HAVE_EXPM1 -double expm1(double x) +static double expm1(double x) { double u = exp(x); if (u == 1.0) { @@ -68,7 +68,7 @@ #endif #ifndef HAVE_LOG1P -double log1p(double x) +static double log1p(double x) { double u = 1. + x; if (u == 1.0) { @@ -80,7 +80,7 @@ #endif #ifndef HAVE_HYPOT -double hypot(double x, double y) +static double hypot(double x, double y) { double yx; @@ -101,14 +101,14 @@ #endif #ifndef HAVE_ACOSH -double acosh(double x) +static double acosh(double x) { return 2*log(sqrt((x+1.0)/2)+sqrt((x-1.0)/2)); } #endif #ifndef HAVE_ASINH -double asinh(double xx) +static double asinh(double xx) { double x, d; int sign; @@ -137,7 +137,7 @@ #endif #ifndef HAVE_RINT -double rint(double x) +static double rint(double x) { double y, r; @@ -159,7 +159,7 @@ #endif #ifndef HAVE_TRUNC -double trunc(double x) +static double trunc(double x) { return x < 0 ? ceil(x) : floor(x); } @@ -167,7 +167,7 @@ #ifndef HAVE_EXP2 #define LOG2 0.69314718055994530943 -double exp2(double x) +static double exp2(double x) { return exp(LOG2*x); } @@ -176,7 +176,7 @@ #ifndef HAVE_LOG2 #define INVLOG2 1.4426950408889634074 -double log2(double x) +static double log2(double x) { return INVLOG2*log(x); } @@ -257,7 +257,7 @@ #ifdef @kind@@c@ #undef @kind@@c@ #endif - at type@ @kind@@c@(@type@ x) +static @type@ @kind@@c@(@type@ x) { return (@type@) @kind@((double)x); } @@ -272,7 +272,7 @@ #ifdef @kind@@c@ #undef @kind@@c@ #endif - at type@ @kind@@c@(@type@ x, @type@ y) +static @type@ @kind@@c@(@type@ x, @type@ y) { return (@type@) @kind@((double)x, (double) y); } @@ -283,7 +283,7 @@ #ifdef modf at c@ #undef modf at c@ #endif - at type@ modf at c@(@type@ x, @type@ *iptr) +static @type@ modf at c@(@type@ x, @type@ *iptr) { double niptr; double y = modf((double)x, &niptr); From numpy-svn at scipy.org Wed Nov 12 04:28:36 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 12 Nov 2008 03:28:36 -0600 (CST) Subject: [Numpy-svn] r6017 - trunk/numpy/core/src Message-ID: <20081112092836.59FE139C089@scipy.org> Author: charris Date: 2008-11-12 03:28:30 -0600 (Wed, 12 Nov 2008) New Revision: 6017 Modified: trunk/numpy/core/src/math_c99.inc.src Log: Another test for microsoft buildbot. Modified: trunk/numpy/core/src/math_c99.inc.src =================================================================== --- trunk/numpy/core/src/math_c99.inc.src 2008-11-12 07:26:55 UTC (rev 6016) +++ trunk/numpy/core/src/math_c99.inc.src 2008-11-12 09:28:30 UTC (rev 6017) @@ -11,12 +11,12 @@ ***************************************************************************** */ -#if 0 +#ifndef _MSC_VER /**begin repeat * #type = float, double, longdouble# * #c = f, ,l# -*/ + */ /**begin repeat1 * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, @@ -44,6 +44,10 @@ /**end repeat**/ +#else +#include +#define HAVE_TANHF + #endif /* @@ -245,7 +249,7 @@ * #TYPE = LONGDOUBLE, FLOAT# * #c = l,f# * #C = L,F# -*/ + */ /**begin repeat1 * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, From numpy-svn at scipy.org Wed Nov 12 06:28:16 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 12 Nov 2008 05:28:16 -0600 (CST) Subject: [Numpy-svn] r6018 - trunk/numpy/core/src Message-ID: <20081112112816.E436839C05F@scipy.org> Author: charris Date: 2008-11-12 05:28:11 -0600 (Wed, 12 Nov 2008) New Revision: 6018 Modified: trunk/numpy/core/src/math_c99.inc.src Log: Another stab at MSVC voodoo. Modified: trunk/numpy/core/src/math_c99.inc.src =================================================================== --- trunk/numpy/core/src/math_c99.inc.src 2008-11-12 09:28:30 UTC (rev 6017) +++ trunk/numpy/core/src/math_c99.inc.src 2008-11-12 11:28:11 UTC (rev 6018) @@ -7,49 +7,11 @@ /* ***************************************************************************** - ** C99 PROTOTYPES ** + ** DISTRO VOODOO ** ***************************************************************************** */ -#ifndef _MSC_VER -/**begin repeat - * #type = float, double, longdouble# - * #c = f, ,l# - */ - -/**begin repeat1 - * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, - * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2, - * logb# - */ - - at type@ @kind@@c@(@type@ x); - -/**end repeat1**/ - -/**begin repeat1 - * #kind = atan2,hypot,pow,fmod,scalb# - */ - - at type@ @kind@@c@(@type@ x, @type@ y); - -/**end repeat1**/ - - at type@ modf at c@(@type@ x, @type@ *iptr); - at type@ ldexp at c@(@type@ x, int iexp); - at type@ frexp at c@(@type@ x, int *iptr); - at type@ scalbn at c@(@type@ x, int pow); - at type@ scalbln at c@(@type@ x, long pow); - -/**end repeat**/ - -#else -#include -#define HAVE_TANHF - -#endif - /* ***************************************************************************** ** BASIC MATH FUNCTIONS ** @@ -58,7 +20,7 @@ /* Original code by Konrad Hinsen. */ #ifndef HAVE_EXPM1 -static double expm1(double x) +double expm1(double x) { double u = exp(x); if (u == 1.0) { @@ -69,10 +31,12 @@ return (u-1.0) * x/log(u); } } +#else +double expm1(double x); #endif #ifndef HAVE_LOG1P -static double log1p(double x) +double log1p(double x) { double u = 1. + x; if (u == 1.0) { @@ -81,10 +45,12 @@ return log(u) * x / (u - 1); } } +#else +double log1p(double x); #endif #ifndef HAVE_HYPOT -static double hypot(double x, double y) +double hypot(double x, double y) { double yx; @@ -102,17 +68,21 @@ return x*sqrt(1.+yx*yx); } } +#else +double hypot(double x, double y); #endif #ifndef HAVE_ACOSH -static double acosh(double x) +double acosh(double x) { return 2*log(sqrt((x+1.0)/2)+sqrt((x-1.0)/2)); } +#else +double acosh(double x); #endif #ifndef HAVE_ASINH -static double asinh(double xx) +double asinh(double xx) { double x, d; int sign; @@ -131,17 +101,21 @@ } return sign*log1p(x*(1.0 + x/(d+1))); } +#else +double asinh(double xx); #endif #ifndef HAVE_ATANH -static double atanh(double x) +double atanh(double x) { return 0.5*log1p(2.0*x/(1.0-x)); } +#else +double atanh(double x); #endif #ifndef HAVE_RINT -static double rint(double x) +double rint(double x) { double y, r; @@ -160,31 +134,39 @@ } return y; } +#else +double rint(double x); #endif #ifndef HAVE_TRUNC -static double trunc(double x) +double trunc(double x) { return x < 0 ? ceil(x) : floor(x); } +#else +double trunc(double x); #endif #ifndef HAVE_EXP2 #define LOG2 0.69314718055994530943 -static double exp2(double x) +double exp2(double x) { return exp(LOG2*x); } #undef LOG2 +#else +double exp2(double x); #endif #ifndef HAVE_LOG2 #define INVLOG2 1.4426950408889634074 -static double log2(double x) +double log2(double x) { return INVLOG2*log(x); } #undef INVLOG2 +#else +double log2(double x); #endif /* @@ -257,43 +239,54 @@ * #KIND = SIN,COS,TAN,SINH,COSH,TANH,FABS,FLOOR,CEIL,RINT,TRUNC,SQRT,LOG10, * LOG,EXP,EXPM1,ASIN,ACOS,ATAN,ASINH,ACOSH,ATANH,LOG1P,EXP2,LOG2# */ -#ifndef HAVE_ at KIND@@C@ + #ifdef @kind@@c@ #undef @kind@@c@ #endif -static @type@ @kind@@c@(@type@ x) +#ifndef HAVE_ at KIND@@C@ + at type@ npy_ at kind@@c@(@type@ x) { return (@type@) @kind@((double)x); } +#define @kind@@c@ npy_ at kind@@c@ +#else + at type@ @kind@@c@(@type@ x); #endif + /**end repeat1**/ /**begin repeat1 * #kind = atan2,hypot,pow,fmod# * #KIND = ATAN2,HYPOT,POW,FMOD# */ -#ifndef HAVE_ at KIND@@C@ #ifdef @kind@@c@ #undef @kind@@c@ #endif -static @type@ @kind@@c@(@type@ x, @type@ y) +#ifndef HAVE_ at KIND@@C@ + at type@ npy_ at kind@@c@(@type@ x, @type@ y) { return (@type@) @kind@((double)x, (double) y); } +#define @kind@@c@ npy_ at kind@@c@ +#else + at type@ @kind@@c@(@type@ x, @type@ y); #endif /**end repeat1**/ -#ifndef HAVE_MODF at C@ #ifdef modf at c@ #undef modf at c@ #endif -static @type@ modf at c@(@type@ x, @type@ *iptr) +#ifndef HAVE_MODF at C@ + at type@ npy_modf at c@(@type@ x, @type@ *iptr) { double niptr; double y = modf((double)x, &niptr); *iptr = (@type@) niptr; return (@type@) y; } +#define modf at c@ npy_modf at c@ +#else + at type@ modf at c@(@type@ x, @type@ *iptr); #endif /**end repeat**/ From numpy-svn at scipy.org Wed Nov 12 07:39:22 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 12 Nov 2008 06:39:22 -0600 (CST) Subject: [Numpy-svn] r6019 - trunk/numpy/core Message-ID: <20081112123922.DD9E639C089@scipy.org> Author: charris Date: 2008-11-12 06:39:18 -0600 (Wed, 12 Nov 2008) New Revision: 6019 Modified: trunk/numpy/core/setup.py Log: Temporarily remove umath_test from build, it isn't linking on windows. The problem is the blas_gemm problem seen elsewhere and I want to get windows compiling first. It's almost there. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2008-11-12 11:28:11 UTC (rev 6018) +++ trunk/numpy/core/setup.py 2008-11-12 12:39:18 UTC (rev 6019) @@ -400,13 +400,13 @@ extra_info = blas_info ) - config.add_extension('umath_tests', - sources = [join('src','umath_tests.c.src'), - ], - depends = [join('blasdot','cblas.h'),] + deps, - include_dirs = ['blasdot'], - extra_info = blas_info - ) +# config.add_extension('umath_tests', +# sources = [join('src','umath_tests.c.src'), +# ], +# depends = [join('blasdot','cblas.h'),] + deps, +# include_dirs = ['blasdot'], +# extra_info = blas_info +# ) config.add_data_dir('tests') From numpy-svn at scipy.org Wed Nov 12 15:42:56 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 12 Nov 2008 14:42:56 -0600 (CST) Subject: [Numpy-svn] r6020 - in trunk/numpy/lib: . tests Message-ID: <20081112204256.4816F39C05F@scipy.org> Author: dhuard Date: 2008-11-12 14:42:53 -0600 (Wed, 12 Nov 2008) New Revision: 6020 Modified: trunk/numpy/lib/function_base.py trunk/numpy/lib/tests/test_function_base.py Log: Follow up on changes to histogram: new=False now raises a DeprecationWarning, new=True warns users that `new` will disappear in 1.4. Modified: trunk/numpy/lib/function_base.py =================================================================== --- trunk/numpy/lib/function_base.py 2008-11-12 12:39:18 UTC (rev 6019) +++ trunk/numpy/lib/function_base.py 2008-11-12 20:42:53 UTC (rev 6020) @@ -225,10 +225,13 @@ The `weights` keyword is only available with `new` set to True. new : {None, True, False}, optional Whether to use the new semantics for histogram: - * None : the new behaviour is used, and a warning is printed, - * True : the new behaviour is used and no warning is printed, - * False : the old behaviour is used and a message is printed - warning about future deprecation. + * None : the new behaviour is used, no warning is printed. + * True : the new behaviour is used and a warning is raised about + the future removal of the `new` keyword. + * False : the old behaviour is used and a DeprecationWarning + is raised. + As of NumPy 1.3, this keyword should not be used explicitly since it + will disappear in NumPy 1.4. Returns ------- @@ -264,18 +267,11 @@ # Old behavior if new == False: warnings.warn(""" - The original semantics of histogram is scheduled to be - deprecated in NumPy 1.3. The new semantics fixes - long-standing issues with outliers handling. The main - changes concern - 1. the definition of the bin edges, - now including the rightmost edge, and - 2. the handling of upper outliers, - now ignored rather than tallied in the rightmost bin. + The histogram semantics being used is now deprecated and + will disappear in NumPy 1.4. Please update your code to + use the default semantics. + """, DeprecationWarning) - Please read the docstring for more information. - """, Warning) - a = asarray(a).ravel() if (range is not None): @@ -322,24 +318,10 @@ # New behavior elif new in [True, None]: - if new is None: + if new is True: warnings.warn(""" - The semantics of histogram has been modified in - the current release to fix long-standing issues with - outliers handling. The main changes concern - 1. the definition of the bin edges, - now including the rightmost edge, and - 2. the handling of upper outliers, now ignored rather - than tallied in the rightmost bin. - The previous behaviour is still accessible using - `new=False`, but is scheduled to be deprecated in the - next release (1.3). - - *This warning will not printed in the 1.3 release.* - - Use `new=True` to bypass this warning. - - Please read the docstring for more information. + The new semantics of histogram is now the default and the `new` + keyword will be removed in NumPy 1.4. """, Warning) a = asarray(a) if weights is not None: Modified: trunk/numpy/lib/tests/test_function_base.py =================================================================== --- trunk/numpy/lib/tests/test_function_base.py 2008-11-12 12:39:18 UTC (rev 6019) +++ trunk/numpy/lib/tests/test_function_base.py 2008-11-12 20:42:53 UTC (rev 6020) @@ -439,7 +439,7 @@ class TestHistogram(TestCase): def setUp(self): - warnings.simplefilter('ignore', Warning) + warnings.simplefilter('ignore', DeprecationWarning) def tearDown(self): warnings.resetwarnings() From numpy-svn at scipy.org Wed Nov 12 15:53:56 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 12 Nov 2008 14:53:56 -0600 (CST) Subject: [Numpy-svn] r6021 - trunk/numpy/lib Message-ID: <20081112205356.C0C4E39C05F@scipy.org> Author: dhuard Date: 2008-11-12 14:53:55 -0600 (Wed, 12 Nov 2008) New Revision: 6021 Modified: trunk/numpy/lib/function_base.py Log: removed the `new` argument in the histogram docstring example Modified: trunk/numpy/lib/function_base.py =================================================================== --- trunk/numpy/lib/function_base.py 2008-11-12 20:42:53 UTC (rev 6020) +++ trunk/numpy/lib/function_base.py 2008-11-12 20:53:55 UTC (rev 6021) @@ -260,7 +260,7 @@ Examples -------- - >>> np.histogram([1,2,1], bins=[0,1,2,3], new=True) + >>> np.histogram([1,2,1], bins=[0,1,2,3]) (array([0, 2, 1]), array([0, 1, 2, 3])) """ From numpy-svn at scipy.org Wed Nov 12 23:55:10 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 12 Nov 2008 22:55:10 -0600 (CST) Subject: [Numpy-svn] r6022 - trunk/numpy/core/src Message-ID: <20081113045510.C98DB39C05F@scipy.org> Author: charris Date: 2008-11-12 22:55:06 -0600 (Wed, 12 Nov 2008) New Revision: 6022 Modified: trunk/numpy/core/src/math_c99.inc.src Log: Add some documentation to math_c99. Modified: trunk/numpy/core/src/math_c99.inc.src =================================================================== --- trunk/numpy/core/src/math_c99.inc.src 2008-11-12 20:53:55 UTC (rev 6021) +++ trunk/numpy/core/src/math_c99.inc.src 2008-11-13 04:55:06 UTC (rev 6022) @@ -3,6 +3,65 @@ * A small module to implement missing C99 math capabilities required by numpy * * Please keep this independant of python ! + * + * How to add a function to this section + * ------------------------------------- + * + * Say you want to add `foo`, these are the steps and the reasons for them. + * + * 1) Add foo to the appropriate list in the configuration system. The + * lists can be found in numpy/core/setup.py lines 63-105. Read the + * comments that come with them, they are very helpful. + * + * 2) The configuration system will define a macro HAVE_FOO if your function + * can be linked from the math library. The result can depend on the + * optimization flags as well as the compiler, so can't be known ahead of + * time. If the function can't be linked, then either it is absent, defined + * as a macro, or is an intrinsic (hardware) function. If it is linkable it + * may still be the case that no prototype is available. So to cover all the + * cases requires the following construction. + * + * i) Undefine any possible macros: + * + * #ifdef foo + * #undef foo + * #endif + * + * ii) Check if the function was in the library, If not, define the + * function with npy_ prepended to its name to avoid conflict with any + * intrinsic versions, then use a define so that the preprocessor will + * replace foo with npy_foo before the compilation pass. + * + * #ifdef foo + * #undef foo + * #endif + * #ifndef HAVE_FOO + * double npy_foo(double x) + * { + * return x; + * } + * #define foo npy_foo + * + * iii) Finally, even if foo is in the library, add a prototype. Just being + * in the library doesn't guarantee a prototype in math.h, and in any case + * you want to make sure the prototype is what you think it is. Count on it, + * whatever can go wrong will go wrong. Think defensively! The result: + * + * #ifdef foo + * #undef foo + * #endif + * #ifndef HAVE_FOO + * double npy_foo(double x) + * { + * return x; + * } + * #define foo npy_foo + * #else + * double foo(double x); + * #end + * + * And there you have it. + * */ /* From numpy-svn at scipy.org Thu Nov 13 23:25:24 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 13 Nov 2008 22:25:24 -0600 (CST) Subject: [Numpy-svn] r6023 - trunk/numpy/core/tests Message-ID: <20081114042524.560DC39C05F@scipy.org> Author: charris Date: 2008-11-13 22:25:19 -0600 (Thu, 13 Nov 2008) New Revision: 6023 Modified: trunk/numpy/core/tests/test_ufunc.py Log: Correct spelling errors. Modified: trunk/numpy/core/tests/test_ufunc.py =================================================================== --- trunk/numpy/core/tests/test_ufunc.py 2008-11-13 04:55:06 UTC (rev 6022) +++ trunk/numpy/core/tests/test_ufunc.py 2008-11-14 04:25:19 UTC (rev 6023) @@ -123,22 +123,22 @@ def logical_and(self, obj) : return np.bool_(1) - # check unary PyUFunc_O_0 + # check unary PyUFunc_O_O msg = "PyUFunc_O_O" x = np.ones(10, dtype=np.object)[0::2] assert np.all(np.abs(x) == 1), msg - # check unary PyUFunc_O_0_method + # check unary PyUFunc_O_O_method msg = "PyUFunc_O_O_method" x = np.zeros(10, dtype=np.object)[0::2] for i in range(len(x)) : x[i] = foo() assert np.all(np.logical_not(x) == True), msg - # check binary PyUFunc_OO_0 + # check binary PyUFunc_OO_O msg = "PyUFunc_OO_O" x = np.ones(10, dtype=np.object)[0::2] assert np.all(np.add(x,x) == 2), msg - # check binary PyUFunc_OO_0_method + # check binary PyUFunc_OO_O_method msg = "PyUFunc_OO_O_method" x = np.zeros(10, dtype=np.object)[0::2] for i in range(len(x)) : From numpy-svn at scipy.org Thu Nov 13 23:25:34 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 13 Nov 2008 22:25:34 -0600 (CST) Subject: [Numpy-svn] r6024 - trunk/numpy/core/src Message-ID: <20081114042534.41EED39C05F@scipy.org> Author: charris Date: 2008-11-13 22:25:29 -0600 (Thu, 13 Nov 2008) New Revision: 6024 Modified: trunk/numpy/core/src/ufuncobject.c Log: Use loop macros in generic loops. This is a bit fragile at the moment because the macros a defined in the umathmodule.c.src file. This will be fixed... Modified: trunk/numpy/core/src/ufuncobject.c =================================================================== --- trunk/numpy/core/src/ufuncobject.c 2008-11-14 04:25:19 UTC (rev 6023) +++ trunk/numpy/core/src/ufuncobject.c 2008-11-14 04:25:29 UTC (rev 6024) @@ -33,11 +33,11 @@ *****************************************************************************/ -typedef double doubleUnaryFunc(double x); typedef float floatUnaryFunc(float x); +typedef double doubleUnaryFunc(double x); typedef longdouble longdoubleUnaryFunc(longdouble x); -typedef double doubleBinaryFunc(double x, double y); typedef float floatBinaryFunc(float x, float y); +typedef double doubleBinaryFunc(double x, double y); typedef longdouble longdoubleBinaryFunc(longdouble x, longdouble y); @@ -45,175 +45,96 @@ static void PyUFunc_f_f(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp os = steps[1]; - char *ip1 = args[0]; - char *op = args[1]; floatUnaryFunc *f = (floatUnaryFunc *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, op += os) { - float *in1 = (float *)ip1; - float *out = (float *)op; - - *out = f(*in1); + UNARY_LOOP { + const float in1 = *(float *)ip1; + *(float *)op1 = f(in1); } } /*UFUNC_API*/ static void -PyUFunc_ff_f(char **args, intp *dimensions, intp *steps, void *func) +PyUFunc_f_f_As_d_d(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp is2 = steps[1]; - intp os = steps[2]; - char *ip1 = args[0]; - char *ip2 = args[1]; - char *op = args[2]; - floatBinaryFunc *f = (floatBinaryFunc *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op += os) { - float *in1 = (float *)ip1; - float *in2 = (float *)ip2; - float *out = (float *)op; - - *out = f(*in1, *in2); + doubleUnaryFunc *f = (doubleUnaryFunc *)func; + UNARY_LOOP { + const float in1 = *(float *)ip1; + *(float *)op1 = (float)f((double)in1); } } /*UFUNC_API*/ static void -PyUFunc_d_d(char **args, intp *dimensions, intp *steps, void *func) +PyUFunc_ff_f(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp os = steps[1]; - char *ip1 = args[0]; - char *op = args[1]; - doubleUnaryFunc *f = (doubleUnaryFunc *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, op += os) { - double *in1 = (double *)ip1; - double *out = (double *)op; - - *out = f(*in1); + floatBinaryFunc *f = (floatBinaryFunc *)func; + BINARY_LOOP { + float in1 = *(float *)ip1; + float in2 = *(float *)ip2; + *(float *)op1 = f(in1, in2); } } /*UFUNC_API*/ static void -PyUFunc_dd_d(char **args, intp *dimensions, intp *steps, void *func) +PyUFunc_ff_f_As_dd_d(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp is2 = steps[1]; - intp os = steps[2]; - char *ip1 = args[0]; - char *ip2 = args[1]; - char *op = args[2]; doubleBinaryFunc *f = (doubleBinaryFunc *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op += os) { - double *in1 = (double *)ip1; - double *in2 = (double *)ip2; - double *out = (double *)op; - - *out = f(*in1, *in2); + BINARY_LOOP { + float in1 = *(float *)ip1; + float in2 = *(float *)ip2; + *(float *)op1 = (double)f((double)in1, (double)in2); } } /*UFUNC_API*/ static void -PyUFunc_g_g(char **args, intp *dimensions, intp *steps, void *func) +PyUFunc_d_d(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp os = steps[1]; - char *ip1 = args[0]; - char *op = args[1]; - longdoubleUnaryFunc *f = (longdoubleUnaryFunc *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, op += os) { - longdouble *in1 = (longdouble *)ip1; - longdouble *out = (longdouble *)op; - - *out = f(*in1); + doubleUnaryFunc *f = (floatUnaryFunc *)func; + UNARY_LOOP { + double in1 = *(double *)ip1; + *(double *)op1 = f(in1); } } /*UFUNC_API*/ static void -PyUFunc_gg_g(char **args, intp *dimensions, intp *steps, void *func) +PyUFunc_dd_d(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp is2 = steps[1]; - intp os = steps[2]; - char *ip1 = args[0]; - char *ip2 = args[1]; - char *op = args[2]; - longdoubleBinaryFunc *f = (longdoubleBinaryFunc *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op += os) { - longdouble *in1 = (longdouble *)ip1; - longdouble *in2 = (longdouble *)ip2; - longdouble *out = (longdouble *)op; - - *out = f(*in1, *in2); + doubleBinaryFunc *f = (doubleBinaryFunc *)func; + BINARY_LOOP { + double in1 = *(double *)ip1; + double in2 = *(double *)ip2; + *(double *)op1 = f(in1, in2); } } /*UFUNC_API*/ static void -PyUFunc_f_f_As_d_d(char **args, intp *dimensions, intp *steps, void *func) +PyUFunc_g_g(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp os = steps[1]; - char *ip1 = args[0]; - char *op = args[1]; - doubleUnaryFunc *f = (doubleUnaryFunc *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, op += os) { - float *in1 = (float *)ip1; - float *out = (float *)op; - - *out = (float)f((double)*in1); + longdoubleUnaryFunc *f = (longdoubleUnaryFunc *)func; + UNARY_LOOP { + longdouble in1 = *(longdouble *)ip1; + *(longdouble *)op1 = f(in1); } } /*UFUNC_API*/ static void -PyUFunc_ff_f_As_dd_d(char **args, intp *dimensions, intp *steps, void *func) +PyUFunc_gg_g(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp is2 = steps[1]; - intp os = steps[2]; - char *ip1 = args[0]; - char *ip2 = args[1]; - char *op = args[2]; - doubleBinaryFunc *f = (doubleBinaryFunc *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op += os) { - float *in1 = (float *)ip1; - float *in2 = (float *)ip2; - float *out = (float *)op; - - *out = (float)f((double)*in1, (double)*in2); + longdoubleBinaryFunc *f = (longdoubleBinaryFunc *)func; + BINARY_LOOP { + longdouble in1 = *(longdouble *)ip1; + longdouble in2 = *(longdouble *)ip2; + *(longdouble *)op1 = f(in1, in2); } } + /****************************************************************************** * Generic Complex Floating Type Loops *****************************************************************************/ @@ -231,219 +152,106 @@ static void PyUFunc_F_F(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp os = steps[1]; - char *ip1 = args[0]; - char *op = args[1]; cfloatUnaryFunc *f = (cfloatUnaryFunc *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, op += os) { - float *in1 = (float *)ip1; - float *out = (float *)op; - cfloat x, r; - - x.real = in1[0]; - x.imag = in1[1]; - f(&x, &r); - out[0] = r.real; - out[1] = r.imag; + UNARY_LOOP { + cfloat in1 = *(cfloat *)ip1; + cfloat *out = (cfloat *)op1; + f(&in1, out); } } /*UFUNC_API*/ static void -PyUFunc_FF_F(char **args, intp *dimensions, intp *steps, void *func) +PyUFunc_F_F_As_D_D(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp is2 = steps[1]; - intp os = steps[2]; - char *ip1 = args[0]; - char *ip2 = args[1]; - char *op = args[2]; - cfloatBinaryFunc *f = (cfloatBinaryFunc *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op += os) { - float *in1 = (float *)ip1; - float *in2 = (float *)ip2; - float *out = (float *)op; - cfloat x,y,r; - - x.real = in1[0]; - x.imag = in1[1]; - y.real = in2[0]; - y.imag = in2[1]; - f(&x, &y, &r); - out[0] = r.real; - out[1] = r.imag; + cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; + UNARY_LOOP { + const float *in1 = (float *)ip1; + cdouble tmp = {(double)(in1[0]),(double)in1[1]}; + cdouble out; + f(&tmp, &out); + ((float *)op1)[0] = (float)out.real; + ((float *)op1)[1] = (float)out.imag; } } /*UFUNC_API*/ static void -PyUFunc_D_D(char **args, intp *dimensions, intp *steps, void *func) +PyUFunc_FF_F(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp os = steps[1]; - char *ip1 = args[0]; - char *op = args[1]; - cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, op += os) { - double *in1 = (double *)ip1; - double *out = (double *)op; - cdouble x, r; - - x.real = in1[0]; - x.imag = in1[1]; - f(&x, &r); - out[0] = r.real; - out[1] = r.imag; + cfloatBinaryFunc *f = (cfloatBinaryFunc *)func; + BINARY_LOOP { + cfloat in1 = *(cfloat *)ip1; + cfloat in2 = *(cfloat *)ip2; + cfloat *out = (cfloat *)op1; + f(&in1, &in2, out); } } /*UFUNC_API*/ static void -PyUFunc_DD_D(char **args, intp *dimensions, intp *steps, void *func) +PyUFunc_FF_F_As_DD_D(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp is2 = steps[1]; - intp os = steps[2]; - char *ip1 = args[0]; - char *ip2 = args[1]; - char *op = args[2]; cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op += os) { - double *in1 = (double *)ip1; - double *in2 = (double *)ip2; - double *out = (double *)op; - cdouble x,y,r; - - x.real = in1[0]; - x.imag = in1[1]; - y.real = in2[0]; - y.imag = in2[1]; - f(&x, &y, &r); - out[0] = r.real; - out[1] = r.imag; + BINARY_LOOP { + const float *in1 = (float *)ip1; + const float *in2 = (float *)ip2; + cdouble tmp1 = {(double)(in1[0]),(double)in1[1]}; + cdouble tmp2 = {(double)(in2[0]),(double)in2[1]}; + cdouble out; + f(&tmp1, &tmp2, &out); + ((float *)op1)[0] = (float)out.real; + ((float *)op1)[1] = (float)out.imag; } } /*UFUNC_API*/ static void -PyUFunc_G_G(char **args, intp *dimensions, intp *steps, void *func) +PyUFunc_D_D(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp os = steps[1]; - char *ip1 = args[0]; - char *op = args[1]; - clongdoubleUnaryFunc *f = (clongdoubleUnaryFunc *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, op += os) { - longdouble *in1 = (longdouble *)ip1; - longdouble *out = (longdouble *)op; - clongdouble x, r; - - x.real = in1[0]; - x.imag = in1[1]; - f(&x, &r); - out[0] = r.real; - out[1] = r.imag; + cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; + UNARY_LOOP { + cdouble in1 = *(cdouble *)ip1; + cdouble *out = (cdouble *)op1; + f(&in1, out); } } /*UFUNC_API*/ static void -PyUFunc_GG_G(char **args, intp *dimensions, intp *steps, void *func) +PyUFunc_DD_D(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp is2 = steps[1]; - intp os = steps[2]; - char *ip1 = args[0]; - char *ip2 = args[1]; - char *op = args[2]; - clongdoubleBinaryFunc *f = (clongdoubleBinaryFunc *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op += os) { - longdouble *in1 = (longdouble *)ip1; - longdouble *in2 = (longdouble *)ip2; - longdouble *out = (longdouble *)op; - clongdouble x,y,r; - - x.real = in1[0]; - x.imag = in1[1]; - y.real = in2[0]; - y.imag = in2[1]; - f(&x, &y, &r); - out[0] = r.real; - out[1] = r.imag; + cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; + BINARY_LOOP { + cdouble in1 = *(cdouble *)ip1; + cdouble in2 = *(cdouble *)ip2; + cdouble *out = (cdouble *)op1; + f(&in1, &in2, out); } } /*UFUNC_API*/ static void -PyUFunc_F_F_As_D_D(char **args, intp *dimensions, intp *steps, void *func) +PyUFunc_G_G(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp os = steps[1]; - char *ip1 = args[0]; - char *op = args[1]; - cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, op += os) { - float *in1 = (float *)ip1; - float *out = (float *)op; - cdouble x, r; - - x.real = in1[0]; - x.imag = in1[1]; - f(&x, &r); - out[0] = (float)r.real; - out[1] = (float)r.imag; + clongdoubleUnaryFunc *f = (clongdoubleUnaryFunc *)func; + UNARY_LOOP { + clongdouble in1 = *(clongdouble *)ip1; + clongdouble *out = (clongdouble *)op1; + f(&in1, out); } } /*UFUNC_API*/ static void -PyUFunc_FF_F_As_DD_D(char **args, intp *dimensions, intp *steps, void *func) +PyUFunc_GG_G(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp is2 = steps[1]; - intp os = steps[2]; - char *ip1 = args[0]; - char *ip2 = args[1]; - char *op = args[2]; - cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op += os) { - float *in1 = (float *)ip1; - float *in2 = (float *)ip2; - float *out = (float *)op; - cdouble x,y,r; - - x.real = in1[0]; - x.imag = in1[1]; - y.real = in2[0]; - y.imag = in2[1]; - f(&x, &y, &r); - out[0] = (float)r.real; - out[1] = (float)r.imag; + clongdoubleBinaryFunc *f = (clongdoubleBinaryFunc *)func; + BINARY_LOOP { + clongdouble in1 = *(clongdouble *)ip1; + clongdouble in2 = *(clongdouble *)ip2; + clongdouble *out = (clongdouble *)op1; + f(&in1, &in2, out); } } @@ -456,17 +264,10 @@ static void PyUFunc_O_O(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp os = steps[1]; - char *ip1 = args[0]; - char *op = args[1]; unaryfunc f = (unaryfunc)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, op += os) { + UNARY_LOOP { PyObject *in1 = *(PyObject **)ip1; - PyObject **out = (PyObject **)op; + PyObject **out = (PyObject **)op1; PyObject *ret; if (in1 == NULL) { @@ -485,17 +286,10 @@ static void PyUFunc_O_O_method(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp os = steps[1]; - char *ip1 = args[0]; - char *op = args[1]; char *meth = (char *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, op += os) { + UNARY_LOOP { PyObject *in1 = *(PyObject **)ip1; - PyObject **out = (PyObject **)op; + PyObject **out = (PyObject **)op1; PyObject *ret = PyObject_CallMethod(in1, meth, NULL); if (ret == NULL) { @@ -510,19 +304,10 @@ static void PyUFunc_OO_O(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp is2 = steps[1]; - intp os = steps[2]; - char *ip1 = args[0]; - char *ip2 = args[1]; - char *op = args[2]; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op += os) { + BINARY_LOOP { PyObject *in1 = *(PyObject **)ip1; PyObject *in2 = *(PyObject **)ip2; - PyObject **out = (PyObject **)op; + PyObject **out = (PyObject **)op1; PyObject *ret; if ((in1 == NULL) || (in2 == NULL)) { @@ -546,20 +331,11 @@ static void PyUFunc_OO_O_method(char **args, intp *dimensions, intp *steps, void *func) { - intp n = dimensions[0]; - intp is1 = steps[0]; - intp is2 = steps[1]; - intp os = steps[2]; - char *ip1 = args[0]; - char *ip2 = args[1]; - char *op = args[2]; char *meth = (char *)func; - intp i; - - for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op += os) { + BINARY_LOOP { PyObject *in1 = *(PyObject **)ip1; PyObject *in2 = *(PyObject **)ip2; - PyObject **out = (PyObject **)op; + PyObject **out = (PyObject **)op1; PyObject *ret = PyObject_CallMethod(in1, meth, "(O)", in2); if (ret == NULL) { From numpy-svn at scipy.org Thu Nov 13 23:25:43 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 13 Nov 2008 22:25:43 -0600 (CST) Subject: [Numpy-svn] r6025 - trunk/numpy/core/src Message-ID: <20081114042543.5CF6F39C05F@scipy.org> Author: charris Date: 2008-11-13 22:25:38 -0600 (Thu, 13 Nov 2008) New Revision: 6025 Modified: trunk/numpy/core/src/ufuncobject.c Log: Remove trailing whitespace. Modified: trunk/numpy/core/src/ufuncobject.c =================================================================== --- trunk/numpy/core/src/ufuncobject.c 2008-11-14 04:25:29 UTC (rev 6024) +++ trunk/numpy/core/src/ufuncobject.c 2008-11-14 04:25:38 UTC (rev 6025) @@ -168,7 +168,7 @@ UNARY_LOOP { const float *in1 = (float *)ip1; cdouble tmp = {(double)(in1[0]),(double)in1[1]}; - cdouble out; + cdouble out; f(&tmp, &out); ((float *)op1)[0] = (float)out.real; ((float *)op1)[1] = (float)out.imag; @@ -198,7 +198,7 @@ const float *in2 = (float *)ip2; cdouble tmp1 = {(double)(in1[0]),(double)in1[1]}; cdouble tmp2 = {(double)(in2[0]),(double)in2[1]}; - cdouble out; + cdouble out; f(&tmp1, &tmp2, &out); ((float *)op1)[0] = (float)out.real; ((float *)op1)[1] = (float)out.imag; From numpy-svn at scipy.org Fri Nov 14 03:27:08 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 14 Nov 2008 02:27:08 -0600 (CST) Subject: [Numpy-svn] r6026 - trunk/numpy/core/src Message-ID: <20081114082708.EC02F39C05F@scipy.org> Author: charris Date: 2008-11-14 02:27:02 -0600 (Fri, 14 Nov 2008) New Revision: 6026 Modified: trunk/numpy/core/src/math_c99.inc.src trunk/numpy/core/src/ufuncobject.c Log: Merge branch 'ufunc' Modified: trunk/numpy/core/src/math_c99.inc.src =================================================================== --- trunk/numpy/core/src/math_c99.inc.src 2008-11-14 04:25:38 UTC (rev 6025) +++ trunk/numpy/core/src/math_c99.inc.src 2008-11-14 08:27:02 UTC (rev 6026) @@ -30,13 +30,15 @@ * ii) Check if the function was in the library, If not, define the * function with npy_ prepended to its name to avoid conflict with any * intrinsic versions, then use a define so that the preprocessor will - * replace foo with npy_foo before the compilation pass. + * replace foo with npy_foo before the compilation pass. Make the + * function static to avoid poluting the module library. * * #ifdef foo * #undef foo * #endif * #ifndef HAVE_FOO - * double npy_foo(double x) + * static double + * npy_foo(double x) * { * return x; * } @@ -51,7 +53,8 @@ * #undef foo * #endif * #ifndef HAVE_FOO - * double npy_foo(double x) + * static double + * npy_foo(double x) * { * return x; * } @@ -79,7 +82,8 @@ /* Original code by Konrad Hinsen. */ #ifndef HAVE_EXPM1 -double expm1(double x) +static double +npy_expm1(double x) { double u = exp(x); if (u == 1.0) { @@ -90,12 +94,14 @@ return (u-1.0) * x/log(u); } } +#define expm1 npy_expm1 #else double expm1(double x); #endif #ifndef HAVE_LOG1P -double log1p(double x) +static double +npy_log1p(double x) { double u = 1. + x; if (u == 1.0) { @@ -104,12 +110,14 @@ return log(u) * x / (u - 1); } } +#define log1p npy_log1p #else double log1p(double x); #endif #ifndef HAVE_HYPOT -double hypot(double x, double y) +static double +npy_hypot(double x, double y) { double yx; @@ -127,21 +135,25 @@ return x*sqrt(1.+yx*yx); } } +#define hypot npy_hypot #else double hypot(double x, double y); #endif #ifndef HAVE_ACOSH -double acosh(double x) +static double +npy_acosh(double x) { return 2*log(sqrt((x+1.0)/2)+sqrt((x-1.0)/2)); } +#define acosh npy_acosh #else double acosh(double x); #endif #ifndef HAVE_ASINH -double asinh(double xx) +static double +npy_asinh(double xx) { double x, d; int sign; @@ -160,21 +172,25 @@ } return sign*log1p(x*(1.0 + x/(d+1))); } +#define asinh npy_asinh #else double asinh(double xx); #endif #ifndef HAVE_ATANH -double atanh(double x) +static double +npy_atanh(double x) { return 0.5*log1p(2.0*x/(1.0-x)); } +#define atanh npy_atanh #else double atanh(double x); #endif #ifndef HAVE_RINT -double rint(double x) +static double +npy_rint(double x) { double y, r; @@ -193,25 +209,30 @@ } return y; } +#define rint npy_rint #else double rint(double x); #endif #ifndef HAVE_TRUNC -double trunc(double x) +static double +npy_trunc(double x) { return x < 0 ? ceil(x) : floor(x); } +#define trunc npy_trunc #else double trunc(double x); #endif #ifndef HAVE_EXP2 #define LOG2 0.69314718055994530943 -double exp2(double x) +static double +npy_exp2(double x) { return exp(LOG2*x); } +#define exp2 npy_exp2 #undef LOG2 #else double exp2(double x); @@ -219,10 +240,12 @@ #ifndef HAVE_LOG2 #define INVLOG2 1.4426950408889634074 -double log2(double x) +static double +npy_log2(double x) { return INVLOG2*log(x); } +#define log2 npy_log2 #undef INVLOG2 #else double log2(double x); @@ -303,7 +326,8 @@ #undef @kind@@c@ #endif #ifndef HAVE_ at KIND@@C@ - at type@ npy_ at kind@@c@(@type@ x) +static @type@ +npy_ at kind@@c@(@type@ x) { return (@type@) @kind@((double)x); } @@ -322,7 +346,8 @@ #undef @kind@@c@ #endif #ifndef HAVE_ at KIND@@C@ - at type@ npy_ at kind@@c@(@type@ x, @type@ y) +static @type@ +npy_ at kind@@c@(@type@ x, @type@ y) { return (@type@) @kind@((double)x, (double) y); } @@ -336,7 +361,8 @@ #undef modf at c@ #endif #ifndef HAVE_MODF at C@ - at type@ npy_modf at c@(@type@ x, @type@ *iptr) +static @type@ +npy_modf at c@(@type@ x, @type@ *iptr) { double niptr; double y = modf((double)x, &niptr); Modified: trunk/numpy/core/src/ufuncobject.c =================================================================== --- trunk/numpy/core/src/ufuncobject.c 2008-11-14 04:25:38 UTC (rev 6025) +++ trunk/numpy/core/src/ufuncobject.c 2008-11-14 08:27:02 UTC (rev 6026) @@ -91,7 +91,7 @@ static void PyUFunc_d_d(char **args, intp *dimensions, intp *steps, void *func) { - doubleUnaryFunc *f = (floatUnaryFunc *)func; + doubleUnaryFunc *f = (doubleUnaryFunc *)func; UNARY_LOOP { double in1 = *(double *)ip1; *(double *)op1 = f(in1); From numpy-svn at scipy.org Fri Nov 14 11:10:37 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 14 Nov 2008 10:10:37 -0600 (CST) Subject: [Numpy-svn] r6027 - trunk/numpy/random Message-ID: <20081114161037.8FDEE39C05F@scipy.org> Author: cdavid Date: 2008-11-14 10:10:21 -0600 (Fri, 14 Nov 2008) New Revision: 6027 Modified: trunk/numpy/random/setup.py Log: Add code to detect msvc used to build python. Modified: trunk/numpy/random/setup.py =================================================================== --- trunk/numpy/random/setup.py 2008-11-14 08:27:02 UTC (rev 6026) +++ trunk/numpy/random/setup.py 2008-11-14 16:10:21 UTC (rev 6027) @@ -1,5 +1,14 @@ from os.path import join, split +import sys +def msvc_version(): + """Return the msvc version used to build the running python, None if not + built with MSVC.""" + msc_pos = sys.version.find('MSC v.') + if msc_pos != -1: + return sys.version[msc_pos+6:msc_pos+10] + return None + def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration, get_mathlibs config = Configuration('random',parent_package,top_path) From numpy-svn at scipy.org Fri Nov 14 11:52:05 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 14 Nov 2008 10:52:05 -0600 (CST) Subject: [Numpy-svn] r6028 - in trunk/numpy/random: . mtrand Message-ID: <20081114165205.5B66939C05F@scipy.org> Author: cdavid Date: 2008-11-14 10:51:51 -0600 (Fri, 14 Nov 2008) New Revision: 6028 Modified: trunk/numpy/random/mtrand/randomkit.c trunk/numpy/random/setup.py Log: Generate config header for random kit (empty for now). Modified: trunk/numpy/random/mtrand/randomkit.c =================================================================== --- trunk/numpy/random/mtrand/randomkit.c 2008-11-14 16:10:21 UTC (rev 6027) +++ trunk/numpy/random/mtrand/randomkit.c 2008-11-14 16:51:51 UTC (rev 6028) @@ -64,6 +64,7 @@ /* static char const rcsid[] = "@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */ +#include "config.h" #include #include Modified: trunk/numpy/random/setup.py =================================================================== --- trunk/numpy/random/setup.py 2008-11-14 16:10:21 UTC (rev 6027) +++ trunk/numpy/random/setup.py 2008-11-14 16:51:51 UTC (rev 6028) @@ -1,5 +1,7 @@ -from os.path import join, split +from os.path import join, split, dirname +import os import sys +from distutils.dep_util import newer def msvc_version(): """Return the msvc version used to build the running python, None if not @@ -22,12 +24,27 @@ ext.libraries.extend(libs) return None + def generate_config_h(ext, build_dir): + defs = [] + target = join(build_dir, "mtrand", 'config.h') + dir = dirname(target) + if not os.path.exists(dir): + os.makedirs(dir) + + if newer(__file__, target): + target_f = open(target, 'a') + for d in defs: + if isinstance(d, str): + target_f.write('#define %s\n' % (d)) + target_f.close() + libs = [] # Configure mtrand config.add_extension('mtrand', sources=[join('mtrand', x) for x in ['mtrand.c', 'randomkit.c', 'initarray.c', - 'distributions.c']]+[generate_libraries], + 'distributions.c']]+[generate_libraries] + + [generate_config_h], libraries=libs, depends = [join('mtrand','*.h'), join('mtrand','*.pyx'), From numpy-svn at scipy.org Fri Nov 14 11:52:29 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 14 Nov 2008 10:52:29 -0600 (CST) Subject: [Numpy-svn] r6029 - trunk/numpy/random/mtrand Message-ID: <20081114165229.4B30539C05F@scipy.org> Author: cdavid Date: 2008-11-14 10:52:18 -0600 (Fri, 14 Nov 2008) New Revision: 6029 Modified: trunk/numpy/random/mtrand/randomkit.c Log: Postpone time.h include because we will need to customize it on windows. Modified: trunk/numpy/random/mtrand/randomkit.c =================================================================== --- trunk/numpy/random/mtrand/randomkit.c 2008-11-14 16:51:51 UTC (rev 6028) +++ trunk/numpy/random/mtrand/randomkit.c 2008-11-14 16:52:18 UTC (rev 6029) @@ -70,12 +70,12 @@ #include #include #include -#include #include #include #ifdef _WIN32 /* Windows */ +#include #include #ifndef RK_NO_WINCRYPT /* Windows crypto */ @@ -87,6 +87,7 @@ #endif #else /* Unix */ +#include #include #include #endif From numpy-svn at scipy.org Fri Nov 14 11:52:49 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 14 Nov 2008 10:52:49 -0600 (CST) Subject: [Numpy-svn] r6030 - trunk/numpy/random Message-ID: <20081114165249.979F639C05F@scipy.org> Author: cdavid Date: 2008-11-14 10:52:41 -0600 (Fri, 14 Nov 2008) New Revision: 6030 Modified: trunk/numpy/random/setup.py Log: Add a macro to know whether we need the mingw workaround for _ftime + add a define for the MSVCR version. Modified: trunk/numpy/random/setup.py =================================================================== --- trunk/numpy/random/setup.py 2008-11-14 16:52:18 UTC (rev 6029) +++ trunk/numpy/random/setup.py 2008-11-14 16:52:41 UTC (rev 6030) @@ -2,6 +2,7 @@ import os import sys from distutils.dep_util import newer +from numpy.distutils.misc_util import msvc_runtime_library def msvc_version(): """Return the msvc version used to build the running python, None if not @@ -11,6 +12,11 @@ return sys.version[msc_pos+6:msc_pos+10] return None +def msvcrt_to_hex(msvc): + major = msvc / 100 + minor = msvc - major * 100 + return hex(major * 256 + minor) + def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration, get_mathlibs config = Configuration('random',parent_package,top_path) @@ -31,6 +37,17 @@ if not os.path.exists(dir): os.makedirs(dir) + msv = msvc_version() + if msv and msv >= 1400: + msvcrt = msvc_runtime_library() + if msvcrt is None: + raise ValueError("Discrepancy between " \ + "msvc_runtime_library " \ + "and our msvc detection scheme ?" + hmsvc = msvc_to_hex(msvcrt) + defs.append("NPY_NEEDS_MINGW_TIME_WORKAROUND") + defs.append(("NPY_MSVCRT_VERSION", str(hmsvc))) + if newer(__file__, target): target_f = open(target, 'a') for d in defs: From numpy-svn at scipy.org Fri Nov 14 11:54:12 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 14 Nov 2008 10:54:12 -0600 (CST) Subject: [Numpy-svn] r6031 - trunk/numpy/random Message-ID: <20081114165412.238A139C05F@scipy.org> Author: cdavid Date: 2008-11-14 10:54:03 -0600 (Fri, 14 Nov 2008) New Revision: 6031 Modified: trunk/numpy/random/setup.py Log: Typo. Modified: trunk/numpy/random/setup.py =================================================================== --- trunk/numpy/random/setup.py 2008-11-14 16:52:41 UTC (rev 6030) +++ trunk/numpy/random/setup.py 2008-11-14 16:54:03 UTC (rev 6031) @@ -43,7 +43,7 @@ if msvcrt is None: raise ValueError("Discrepancy between " \ "msvc_runtime_library " \ - "and our msvc detection scheme ?" + "and our msvc detection scheme ?") hmsvc = msvc_to_hex(msvcrt) defs.append("NPY_NEEDS_MINGW_TIME_WORKAROUND") defs.append(("NPY_MSVCRT_VERSION", str(hmsvc))) From numpy-svn at scipy.org Fri Nov 14 11:56:02 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 14 Nov 2008 10:56:02 -0600 (CST) Subject: [Numpy-svn] r6032 - trunk/numpy/random Message-ID: <20081114165602.BE9E039C05F@scipy.org> Author: cdavid Date: 2008-11-14 10:55:54 -0600 (Fri, 14 Nov 2008) New Revision: 6032 Modified: trunk/numpy/random/setup.py Log: Typo. Modified: trunk/numpy/random/setup.py =================================================================== --- trunk/numpy/random/setup.py 2008-11-14 16:54:03 UTC (rev 6031) +++ trunk/numpy/random/setup.py 2008-11-14 16:55:54 UTC (rev 6032) @@ -44,7 +44,7 @@ raise ValueError("Discrepancy between " \ "msvc_runtime_library " \ "and our msvc detection scheme ?") - hmsvc = msvc_to_hex(msvcrt) + hmsvc = msvcrt_to_hex(msvcrt) defs.append("NPY_NEEDS_MINGW_TIME_WORKAROUND") defs.append(("NPY_MSVCRT_VERSION", str(hmsvc))) From numpy-svn at scipy.org Fri Nov 14 11:58:28 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 14 Nov 2008 10:58:28 -0600 (CST) Subject: [Numpy-svn] r6033 - trunk/numpy/random Message-ID: <20081114165828.3DBB639C05F@scipy.org> Author: cdavid Date: 2008-11-14 10:58:16 -0600 (Fri, 14 Nov 2008) New Revision: 6033 Modified: trunk/numpy/random/setup.py Log: Fix hex conversion for msvcrt (we do not need to convert VS verion, but MSVCRT version). Modified: trunk/numpy/random/setup.py =================================================================== --- trunk/numpy/random/setup.py 2008-11-14 16:55:54 UTC (rev 6032) +++ trunk/numpy/random/setup.py 2008-11-14 16:58:16 UTC (rev 6033) @@ -13,8 +13,8 @@ return None def msvcrt_to_hex(msvc): - major = msvc / 100 - minor = msvc - major * 100 + major = msvc / 10 + minor = msvc - major * 10 return hex(major * 256 + minor) def configuration(parent_package='',top_path=None): @@ -44,7 +44,7 @@ raise ValueError("Discrepancy between " \ "msvc_runtime_library " \ "and our msvc detection scheme ?") - hmsvc = msvcrt_to_hex(msvcrt) + hmsvc = msvcrt_to_hex(int(msvcrt[5:]) defs.append("NPY_NEEDS_MINGW_TIME_WORKAROUND") defs.append(("NPY_MSVCRT_VERSION", str(hmsvc))) From numpy-svn at scipy.org Fri Nov 14 11:59:24 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 14 Nov 2008 10:59:24 -0600 (CST) Subject: [Numpy-svn] r6034 - trunk/numpy/random Message-ID: <20081114165924.9F70439C05F@scipy.org> Author: cdavid Date: 2008-11-14 10:59:14 -0600 (Fri, 14 Nov 2008) New Revision: 6034 Modified: trunk/numpy/random/setup.py Log: Another typo. Modified: trunk/numpy/random/setup.py =================================================================== --- trunk/numpy/random/setup.py 2008-11-14 16:58:16 UTC (rev 6033) +++ trunk/numpy/random/setup.py 2008-11-14 16:59:14 UTC (rev 6034) @@ -44,7 +44,7 @@ raise ValueError("Discrepancy between " \ "msvc_runtime_library " \ "and our msvc detection scheme ?") - hmsvc = msvcrt_to_hex(int(msvcrt[5:]) + hmsvc = msvcrt_to_hex(int(msvcrt[5:])) defs.append("NPY_NEEDS_MINGW_TIME_WORKAROUND") defs.append(("NPY_MSVCRT_VERSION", str(hmsvc))) From numpy-svn at scipy.org Sat Nov 15 23:01:08 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 15 Nov 2008 22:01:08 -0600 (CST) Subject: [Numpy-svn] r6035 - trunk/numpy/core/src Message-ID: <20081116040108.7242539C249@scipy.org> Author: charris Date: 2008-11-15 22:01:01 -0600 (Sat, 15 Nov 2008) New Revision: 6035 Modified: trunk/numpy/core/src/ufuncobject.c trunk/numpy/core/src/umathmodule.c.src Log: Add error checking to Object loops. Fix reference leak in Sign Object loop. Define a binary version of PyNumber_Power so that the generic object loop doesn't have to check for that function. Clean up generic object loops. Modified: trunk/numpy/core/src/ufuncobject.c =================================================================== --- trunk/numpy/core/src/ufuncobject.c 2008-11-14 16:59:14 UTC (rev 6034) +++ trunk/numpy/core/src/ufuncobject.c 2008-11-16 04:01:01 UTC (rev 6035) @@ -268,12 +268,7 @@ UNARY_LOOP { PyObject *in1 = *(PyObject **)ip1; PyObject **out = (PyObject **)op1; - PyObject *ret; - - if (in1 == NULL) { - return; - } - ret = f(in1); + PyObject *ret = f(in1); if ((ret == NULL) || PyErr_Occurred()) { return; } @@ -291,7 +286,6 @@ PyObject *in1 = *(PyObject **)ip1; PyObject **out = (PyObject **)op1; PyObject *ret = PyObject_CallMethod(in1, meth, NULL); - if (ret == NULL) { return; } @@ -304,21 +298,12 @@ static void PyUFunc_OO_O(char **args, intp *dimensions, intp *steps, void *func) { + binaryfunc f = (binaryfunc)func; BINARY_LOOP { PyObject *in1 = *(PyObject **)ip1; PyObject *in2 = *(PyObject **)ip2; PyObject **out = (PyObject **)op1; - PyObject *ret; - - if ((in1 == NULL) || (in2 == NULL)) { - return; - } - if ( (void *) func == (void *) PyNumber_Power) { - ret = ((ternaryfunc)func)(in1, in2, Py_None); - } - else { - ret = ((binaryfunc)func)(in1, in2); - } + PyObject *ret = f(in1, in2); if (PyErr_Occurred()) { return; } @@ -337,7 +322,6 @@ PyObject *in2 = *(PyObject **)ip2; PyObject **out = (PyObject **)op1; PyObject *ret = PyObject_CallMethod(in1, meth, "(O)", in2); - if (ret == NULL) { return; } Modified: trunk/numpy/core/src/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2008-11-14 16:59:14 UTC (rev 6034) +++ trunk/numpy/core/src/umathmodule.c.src 2008-11-16 04:01:01 UTC (rev 6035) @@ -139,6 +139,16 @@ return result; } +/* + * Define numpy version of PyNumber_Power as binary function. + */ +static PyObject * +npy_PyNumber_Power(PyObject *x, PyObject *y) +{ + PyNumber_Power(x, y, Py_None); +} +#define PyNumber_Power npy_PyNumber_Power + /**begin repeat * #Kind = Max, Min# * #OP = >=, <=# @@ -1520,7 +1530,11 @@ BINARY_LOOP { PyObject *in1 = *(PyObject **)ip1; PyObject *in2 = *(PyObject **)ip2; - *((Bool *)op1) = (Bool) PyObject_RichCompareBool(in1, in2, Py_ at OP@); + int ret = PyObject_RichCompareBool(in1, in2, Py_ at OP@); + if (ret == -1) { + return; + } + *((Bool *)op1) = (Bool)ret; } } /**end repeat**/ @@ -1531,7 +1545,13 @@ PyObject *zero = PyInt_FromLong(0); UNARY_LOOP { PyObject *in1 = *(PyObject **)ip1; - *((PyObject **)op1) = PyInt_FromLong(PyObject_Compare(in1, zero)); + PyObject **out = (PyObject **)op1; + PyObject *ret = PyInt_FromLong(PyObject_Compare(in1, zero)); + if (PyErr_Occurred()) { + return; + } + Py_XDECREF(*out); + *out = ret; } Py_DECREF(zero); } From numpy-svn at scipy.org Sun Nov 16 01:23:36 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 00:23:36 -0600 (CST) Subject: [Numpy-svn] r6036 - in trunk/numpy/core: code_generators src Message-ID: <20081116062336.C109439C249@scipy.org> Author: charris Date: 2008-11-16 00:23:29 -0600 (Sun, 16 Nov 2008) New Revision: 6036 Modified: trunk/numpy/core/code_generators/generate_umath.py trunk/numpy/core/src/umathmodule.c.src Log: Add missing return. Remove pointless define. Small name cleanup. Modified: trunk/numpy/core/code_generators/generate_umath.py =================================================================== --- trunk/numpy/core/code_generators/generate_umath.py 2008-11-16 04:01:01 UTC (rev 6035) +++ trunk/numpy/core/code_generators/generate_umath.py 2008-11-16 06:23:29 UTC (rev 6036) @@ -229,7 +229,7 @@ docstrings.get('numpy.core.umath.power'), TD(ints), TD(inexact, f='pow'), - TD(O, f='PyNumber_Power'), + TD(O, f='npy_ObjectPower'), ), 'absolute' : Ufunc(1, 1, None, @@ -308,25 +308,25 @@ Ufunc(2, 1, None, docstrings.get('numpy.core.umath.maximum'), TD(noobj), - TD(O, f='_npy_ObjectMax') + TD(O, f='npy_ObjectMax') ), 'minimum' : Ufunc(2, 1, None, docstrings.get('numpy.core.umath.minimum'), TD(noobj), - TD(O, f='_npy_ObjectMin') + TD(O, f='npy_ObjectMin') ), 'fmax' : Ufunc(2, 1, None, "", TD(noobj), - TD(O, f='_npy_ObjectMax') + TD(O, f='npy_ObjectMax') ), 'fmin' : Ufunc(2, 1, None, "", TD(noobj), - TD(O, f='_npy_ObjectMin') + TD(O, f='npy_ObjectMin') ), 'logaddexp' : Ufunc(2, 1, None, Modified: trunk/numpy/core/src/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2008-11-16 04:01:01 UTC (rev 6035) +++ trunk/numpy/core/src/umathmodule.c.src 2008-11-16 06:23:29 UTC (rev 6036) @@ -143,18 +143,17 @@ * Define numpy version of PyNumber_Power as binary function. */ static PyObject * -npy_PyNumber_Power(PyObject *x, PyObject *y) +npy_ObjectPower(PyObject *x, PyObject *y) { - PyNumber_Power(x, y, Py_None); + return PyNumber_Power(x, y, Py_None); } -#define PyNumber_Power npy_PyNumber_Power /**begin repeat * #Kind = Max, Min# * #OP = >=, <=# */ static PyObject * -_npy_Object at Kind@(PyObject *i1, PyObject *i2) +npy_Object at Kind@(PyObject *i1, PyObject *i2) { PyObject *result; int cmp; @@ -173,6 +172,7 @@ } /**end repeat**/ + /* ***************************************************************************** ** COMPLEX FUNCTIONS ** From numpy-svn at scipy.org Sun Nov 16 03:34:38 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 02:34:38 -0600 (CST) Subject: [Numpy-svn] r6037 - trunk/doc Message-ID: <20081116083438.9282F39C088@scipy.org> Author: jarrod.millman Date: 2008-11-16 02:34:36 -0600 (Sun, 16 Nov 2008) New Revision: 6037 Added: trunk/doc/release/ Log: directory for release notes From numpy-svn at scipy.org Sun Nov 16 03:49:18 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 02:49:18 -0600 (CST) Subject: [Numpy-svn] r6038 - trunk/doc/neps Message-ID: <20081116084918.0C19D39C088@scipy.org> Author: jarrod.millman Date: 2008-11-16 02:49:17 -0600 (Sun, 16 Nov 2008) New Revision: 6038 Added: trunk/doc/neps/generalized-ufuncs.rst Log: moved generalized ufunc proposal from the wiki Added: trunk/doc/neps/generalized-ufuncs.rst =================================================================== --- trunk/doc/neps/generalized-ufuncs.rst 2008-11-16 08:34:36 UTC (rev 6037) +++ trunk/doc/neps/generalized-ufuncs.rst 2008-11-16 08:49:17 UTC (rev 6038) @@ -0,0 +1,170 @@ +=============================== +Generalized Universal Functions +=============================== + +There is a general need for looping over not only functions on scalars +but also over functions on vectors (or arrays), as explained on +http://scipy.org/scipy/numpy/wiki/GeneralLoopingFunctions. We propose +to realize this concept by generalizing the universal functions +(ufuncs), and provide a C implementation that adds ~500 lines +to the numpy code base. In current (specialized) ufuncs, the elementary +function is limited to element-by-element operations, whereas the +generalized version supports "sub-array" by "sub-array" operations. +The Perl vector library PDL provides a similar functionality and its +terms are re-used in the following. + +Each generalized ufunc has information associated with it that states +what the "core" dimensionality of the inputs is, as well as the +corresponding dimensionality of the outputs (the element-wise ufuncs +have zero core dimensions). The list of the core dimensions for all +arguments is called the "signature" of a ufunc. For example, the +ufunc numpy.add has signature ``"(),()->()"`` defining two scalar inputs +and one scalar output. + +Another example is (see the GeneralLoopingFunctions page) the function +``inner1d(a,b)`` with a signature of ``"(i),(i)->()"``. This applies the +inner product along the last axis of each input, but keeps the +remaining indices intact. For example, where ``a`` is of shape ``(3,5,N)`` +and ``b`` is of shape ``(5,N)``, this will return an output of shape ``(3,5)``. +The underlying elementary function is called 3*5 times. In the +signature, we specify one core dimension ``"(i)"`` for each input and zero core +dimensions ``"()"`` for the output, since it takes two 1-d arrays and +returns a scalar. By using the same name ``"i"``, we specify that the two +corresponding dimensions should be of the same size (or one of them is +of size 1 and will be broadcasted). + +The dimensions beyond the core dimensions are called "loop" dimensions. In +the above example, this corresponds to ``(3,5)``. + +The usual numpy "broadcasting" rules apply, where the signature +determines how the dimensions of each input/output object are split +into core and loop dimensions: + +#. While an input array has a smaller dimensionality than the corresponding + number of core dimensions, 1's are pre-pended to its shape. +#. The core dimensions are removed from all inputs and the remaining + dimensions are broadcasted; defining the loop dimensions. +#. The output is given by the loop dimensions plus the output core dimensions. + + + +Definitions +----------- + +Elementary Function + Each ufunc consists of an elementary function that performs the + most basic operation on the smallest portion of array arguments + (e.g. adding two numbers is the most basic operation in adding two + arrays). The ufunc applies the elementary function multiple times + on different parts of the arrays. The input/output of elementary + functions can be vectors; e.g., the elementary function of inner1d + takes two vectors as input. + +Signature + A signature is a string describing the input/output dimensions of + the elementary function of a ufunc. See section below for more + details. + +Core Dimension + The dimensionality of each input/output of an elementary function + is defined by its core dimensions (zero core dimensions correspond + to a scalar input/output). The core dimensions are mapped to the + last dimensions of the input/output arrays. + +Dimension Name + A dimension name represents a core dimension in the signature. + Different dimensions may share a name, indicating that they are of + the same size (or are broadcastable). + +Dimension Index + A dimension index is an integer representing a dimension name. It + enumerates the dimension names according to the order of the first + occurrence of each name in the signature. + + +Details of Signature +-------------------- + +The signature defines "core" dimensionality of input and output +variables, and thereby also defines the contraction of the +dimensions. The signature is represented by a string of the +following format: + +* Core dimensions of each input or output array are represented by a + list of dimension names in parentheses, ``"(i_1,...,i_N)"``; a scalar + input/output is denoted by ``"()"``. Instead of ``"i_1"``, ``"i_2"``, + etc, one can use any valid Python variable name. +* Dimension lists for different arguments are separated by ``","``. + Input/output arguments are separated by ``"->"``. +* If one uses the same dimension name in multiple locations, this + enforces the same size (or broadcastable size) of the corresponding + dimensions. + +The formal syntax of signatures is as follows:: + + ::= "->" + ::= + ::= + ::= nil | | "," + ::= "(" ")" + ::= nil | | + "," + ::= valid Python variable name + + +Notes: + +#. All quotes are for clarity. +#. Core dimensions that share the same name must be broadcastable, as + the two ``i`` in our example above. Each dimension name typically + corresponding to one level of looping in the elementary function's + implementation. +#. White spaces are ignored. + +Here are some examples of signatures: + + + || add || `"(),()->()"` || || + || inner1d || `"(i),(i)->()"` || || + || sum1d || `"(i)->()"` || || + || dot2d || `"(m,n),(n,p)->(m,p)"` || (matrix multiplication) || + || outer_inner || `"(i,t),(j,t)->(i,j)"` || (inner over the last dimension, outer over the second to last, and loop/broadcast over the rest.) || + + + +C-API for implementing Elementary Functions +------------------------------------------- + +The current interface remains unchanged, and ``PyUFunc_FromFuncAndData`` +can still be used to implement (specialized) ufuncs, consisting of +scalar elementary functions. + +One can use ``PyUFunc_FromFuncAndDataAndSignature`` to declare a more +general ufunc. The argument list is the same as +``PyUFunc_FromFuncAndData``, with an additional argument specifying the +signature as C string. + +Furthermore, the callback function is of the same type as before, +``void (*foo)(char **args, intp *dimensions, intp *steps, void *func)``. +When invoked, ``args`` is a list of length ``nargs`` containing +the data of all input/output arguments. For a scalar elementary +function, ``steps`` is also of length ``nargs``, denoting the strides used +for the arguments. ``dimensions`` is a pointer to a single integer +defining the size of the axis to be looped over. + +For a non-trivial signature, ``dimensions`` will also contain the sizes +of the core dimensions as well, starting at the second entry. Only +one size is provided for each unique dimension name and the sizes are +given according to the first occurrence of a dimension name in the +signature. + +The first ``nargs`` elements of ``steps`` remain the same as for scalar +ufuncs. The following elements contain the strides of all core +dimensions for all arguments in order. + +For example, consider a ufunc with signature ``"(i,j),(i)->()"``. In +this case, ``args`` will contain three pointers to the data of the +input/output arrays ``a``, ``b``, ``c``. Furthermore, ``dimensions`` will be +``[N, I, J]`` to define the size of ``N`` of the loop and the sizes ``I`` and ``J`` +for the core dimensions ``i`` and ``j``. Finally, ``steps`` will be +``[a_N, b_N, c_N, a_i, a_j, b_i]``, containing all necessary strides. From numpy-svn at scipy.org Sun Nov 16 04:00:45 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 03:00:45 -0600 (CST) Subject: [Numpy-svn] r6039 - trunk/doc/neps Message-ID: <20081116090045.7871939C088@scipy.org> Author: jarrod.millman Date: 2008-11-16 03:00:44 -0600 (Sun, 16 Nov 2008) New Revision: 6039 Modified: trunk/doc/neps/generalized-ufuncs.rst Log: fixed table Modified: trunk/doc/neps/generalized-ufuncs.rst =================================================================== --- trunk/doc/neps/generalized-ufuncs.rst 2008-11-16 08:49:17 UTC (rev 6038) +++ trunk/doc/neps/generalized-ufuncs.rst 2008-11-16 09:00:44 UTC (rev 6039) @@ -123,15 +123,20 @@ Here are some examples of signatures: ++-------------+--------------------------+-----------------------------------+ +| add | ``"(),()->()"`` | | ++-------------+--------------------------+-----------------------------------+ +| inner1d | ``"(i),(i)->()"`` | | ++-------------+--------------------------+-----------------------------------+ +| sum1d | ``"(i)->()"`` | | ++-------------+--------------------------+-----------------------------------+ +| dot2d | ``"(m,n),(n,p)->(m,p)"`` | matrix multiplication | ++-------------+--------------------------+-----------------------------------+ +| outer_inner | ``"(i,t),(j,t)->(i,j)"`` | inner over the last dimension, | +| | | outer over the second to last, | +| | | and loop/broadcast over the rest. | ++-------------+--------------------------+-----------------------------------+ - || add || `"(),()->()"` || || - || inner1d || `"(i),(i)->()"` || || - || sum1d || `"(i)->()"` || || - || dot2d || `"(m,n),(n,p)->(m,p)"` || (matrix multiplication) || - || outer_inner || `"(i,t),(j,t)->(i,j)"` || (inner over the last dimension, outer over the second to last, and loop/broadcast over the rest.) || - - - C-API for implementing Elementary Functions ------------------------------------------- From numpy-svn at scipy.org Sun Nov 16 04:02:13 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 03:02:13 -0600 (CST) Subject: [Numpy-svn] r6040 - branches Message-ID: <20081116090213.3AE1939C088@scipy.org> Author: cdavid Date: 2008-11-16 03:02:07 -0600 (Sun, 16 Nov 2008) New Revision: 6040 Added: branches/visualstudio_manifest/ Log: Start a branch to deal with visual studio manifests in the numpy build system Copied: branches/visualstudio_manifest (from rev 6039, trunk) From numpy-svn at scipy.org Sun Nov 16 04:03:09 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 03:03:09 -0600 (CST) Subject: [Numpy-svn] r6041 - trunk Message-ID: <20081116090309.3312F39C088@scipy.org> Author: cdavid Date: 2008-11-16 03:03:06 -0600 (Sun, 16 Nov 2008) New Revision: 6041 Modified: trunk/ Log: Initialized merge tracking via "svnmerge" with revisions "1-6040" from http://svn.scipy.org/svn/numpy/branches/visualstudio_manifest Property changes on: trunk ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /trunk:1-2871 + /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /branches/visualstudio_manifest:1-6040 /trunk:1-2871 From numpy-svn at scipy.org Sun Nov 16 04:04:40 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 03:04:40 -0600 (CST) Subject: [Numpy-svn] r6042 - branches/visualstudio_manifest Message-ID: <20081116090440.41D5A39C088@scipy.org> Author: cdavid Date: 2008-11-16 03:04:37 -0600 (Sun, 16 Nov 2008) New Revision: 6042 Modified: branches/visualstudio_manifest/ Log: Initialized merge tracking via "svnmerge" with revisions "1-6041" from http://svn.scipy.org/svn/numpy/trunk Property changes on: branches/visualstudio_manifest ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /trunk:1-2871 + /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /trunk:1-6041 From numpy-svn at scipy.org Sun Nov 16 04:14:03 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 03:14:03 -0600 (CST) Subject: [Numpy-svn] r6043 - trunk/doc/neps Message-ID: <20081116091403.3EA6739C088@scipy.org> Author: jarrod.millman Date: 2008-11-16 03:14:01 -0600 (Sun, 16 Nov 2008) New Revision: 6043 Modified: trunk/doc/neps/generalized-ufuncs.rst Log: removing some quotes Modified: trunk/doc/neps/generalized-ufuncs.rst =================================================================== --- trunk/doc/neps/generalized-ufuncs.rst 2008-11-16 09:04:37 UTC (rev 6042) +++ trunk/doc/neps/generalized-ufuncs.rst 2008-11-16 09:14:01 UTC (rev 6043) @@ -18,18 +18,18 @@ corresponding dimensionality of the outputs (the element-wise ufuncs have zero core dimensions). The list of the core dimensions for all arguments is called the "signature" of a ufunc. For example, the -ufunc numpy.add has signature ``"(),()->()"`` defining two scalar inputs +ufunc numpy.add has signature ``(),()->()`` defining two scalar inputs and one scalar output. Another example is (see the GeneralLoopingFunctions page) the function -``inner1d(a,b)`` with a signature of ``"(i),(i)->()"``. This applies the +``inner1d(a,b)`` with a signature of ``(i),(i)->()``. This applies the inner product along the last axis of each input, but keeps the remaining indices intact. For example, where ``a`` is of shape ``(3,5,N)`` and ``b`` is of shape ``(5,N)``, this will return an output of shape ``(3,5)``. The underlying elementary function is called 3*5 times. In the -signature, we specify one core dimension ``"(i)"`` for each input and zero core -dimensions ``"()"`` for the output, since it takes two 1-d arrays and -returns a scalar. By using the same name ``"i"``, we specify that the two +signature, we specify one core dimension ``(i)`` for each input and zero core +dimensions ``()`` for the output, since it takes two 1-d arrays and +returns a scalar. By using the same name ``i``, we specify that the two corresponding dimensions should be of the same size (or one of them is of size 1 and will be broadcasted). @@ -91,11 +91,11 @@ following format: * Core dimensions of each input or output array are represented by a - list of dimension names in parentheses, ``"(i_1,...,i_N)"``; a scalar - input/output is denoted by ``"()"``. Instead of ``"i_1"``, ``"i_2"``, + list of dimension names in parentheses, ``(i_1,...,i_N)``; a scalar + input/output is denoted by ``()``. Instead of ``i_1``, ``i_2``, etc, one can use any valid Python variable name. -* Dimension lists for different arguments are separated by ``","``. - Input/output arguments are separated by ``"->"``. +* Dimension lists for different arguments are separated by ``*,*``. + Input/output arguments are separated by ``->``. * If one uses the same dimension name in multiple locations, this enforces the same size (or broadcastable size) of the corresponding dimensions. @@ -123,19 +123,19 @@ Here are some examples of signatures: -+-------------+--------------------------+-----------------------------------+ -| add | ``"(),()->()"`` | | -+-------------+--------------------------+-----------------------------------+ -| inner1d | ``"(i),(i)->()"`` | | -+-------------+--------------------------+-----------------------------------+ -| sum1d | ``"(i)->()"`` | | -+-------------+--------------------------+-----------------------------------+ -| dot2d | ``"(m,n),(n,p)->(m,p)"`` | matrix multiplication | -+-------------+--------------------------+-----------------------------------+ -| outer_inner | ``"(i,t),(j,t)->(i,j)"`` | inner over the last dimension, | -| | | outer over the second to last, | -| | | and loop/broadcast over the rest. | -+-------------+--------------------------+-----------------------------------+ ++-------------+------------------------+-----------------------------------+ +| add | ``(),()->()`` | | ++-------------+------------------------+-----------------------------------+ +| inner1d | ``(i),(i)->()`` | | ++-------------+------------------------+-----------------------------------+ +| sum1d | ``(i)->()`` | | ++-------------+------------------------+-----------------------------------+ +| dot2d | ``(m,n),(n,p)->(m,p)`` | matrix multiplication | ++-------------+------------------------+-----------------------------------+ +| outer_inner | ``(i,t),(j,t)->(i,j)`` | inner over the last dimension, | +| | | outer over the second to last, | +| | | and loop/broadcast over the rest. | ++-------------+------------------------+-----------------------------------+ C-API for implementing Elementary Functions ------------------------------------------- @@ -167,7 +167,7 @@ ufuncs. The following elements contain the strides of all core dimensions for all arguments in order. -For example, consider a ufunc with signature ``"(i,j),(i)->()"``. In +For example, consider a ufunc with signature ``(i,j),(i)->()``. In this case, ``args`` will contain three pointers to the data of the input/output arrays ``a``, ``b``, ``c``. Furthermore, ``dimensions`` will be ``[N, I, J]`` to define the size of ``N`` of the loop and the sizes ``I`` and ``J`` From numpy-svn at scipy.org Sun Nov 16 04:16:01 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 03:16:01 -0600 (CST) Subject: [Numpy-svn] r6044 - trunk/doc/neps Message-ID: <20081116091601.2DA0C39C088@scipy.org> Author: jarrod.millman Date: 2008-11-16 03:15:59 -0600 (Sun, 16 Nov 2008) New Revision: 6044 Modified: trunk/doc/neps/generalized-ufuncs.rst Log: adding back a few quotes Modified: trunk/doc/neps/generalized-ufuncs.rst =================================================================== --- trunk/doc/neps/generalized-ufuncs.rst 2008-11-16 09:14:01 UTC (rev 6043) +++ trunk/doc/neps/generalized-ufuncs.rst 2008-11-16 09:15:59 UTC (rev 6044) @@ -94,8 +94,8 @@ list of dimension names in parentheses, ``(i_1,...,i_N)``; a scalar input/output is denoted by ``()``. Instead of ``i_1``, ``i_2``, etc, one can use any valid Python variable name. -* Dimension lists for different arguments are separated by ``*,*``. - Input/output arguments are separated by ``->``. +* Dimension lists for different arguments are separated by ``","``. + Input/output arguments are separated by ``"->"``. * If one uses the same dimension name in multiple locations, this enforces the same size (or broadcastable size) of the corresponding dimensions. From numpy-svn at scipy.org Sun Nov 16 06:44:09 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 05:44:09 -0600 (CST) Subject: [Numpy-svn] r6045 - branches/visualstudio_manifest/numpy/distutils Message-ID: <20081116114409.8F53839C088@scipy.org> Author: cdavid Date: 2008-11-16 05:44:02 -0600 (Sun, 16 Nov 2008) New Revision: 6045 Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py Log: Add a function to get the content of the xml version of manifest to deal with VS. Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-16 09:15:59 UTC (rev 6044) +++ branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-16 11:44:02 UTC (rev 6045) @@ -225,3 +225,49 @@ # msg = "Couldn't find import library, and failed to build it." # raise DistutilsPlatformError, msg return + +# Functions to deal with visual studio manifests. Manifest are a mechanism to +# enforce strong DLL versioning on windows, and has nothing to do with +# distutils MANIFEST. manifests are XML files with version info, and used by +# the OS loader; they are necessary when linking against a DLL no in the system +# path; in particular, python 2.6 is built against the MS runtime 9 (the one +# from VS 2008), which is not available on most windows systems; python 2.6 +# installer does install it in the Win SxS (Side by side) directory, but this +# requires the manifest too. This is a big mess, thanks MS for a wonderful +# system. + +# XXX: ideally, we should use exactly the same version as used by python, but I +# have no idea how to obtain the exact version. +_MSVCRVER_TO_FULLVER = {'90': "9.0.21022.8"} + +def msvc_manifest_xml(maj, min): + """Given a major and minor version of the MSVCR, returns the + corresponding XML file.""" + try: + fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] + except KeyError: + raise ValueError("Version %d,%d of MSVCRT not supported yet" \ + % (maj, min)) + # Don't be fooled, it looks like an XML, but it is not. In particular, it + # should not have any space before starting, and its size should be + # divisible by 4, most likely for alignement constraints when the xml is + # embedded in the binary... + # This template was copied directly from the python 2.6 binary (using + # strings.exe from mingw on python.exe). + template = """\ + + + + + + + + + + + + + +""" + + return template % {'fullver': fullver, 'maj': maj, 'min': min} From numpy-svn at scipy.org Sun Nov 16 06:44:25 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 05:44:25 -0600 (CST) Subject: [Numpy-svn] r6046 - branches/visualstudio_manifest/numpy/distutils Message-ID: <20081116114425.1ED6639C088@scipy.org> Author: cdavid Date: 2008-11-16 05:44:18 -0600 (Sun, 16 Nov 2008) New Revision: 6046 Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py Log: Add comment on how to get exact MSVCR version. Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-16 11:44:02 UTC (rev 6045) +++ branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-16 11:44:18 UTC (rev 6046) @@ -237,7 +237,8 @@ # system. # XXX: ideally, we should use exactly the same version as used by python, but I -# have no idea how to obtain the exact version. +# have no idea how to obtain the exact version from python. We could use the +# strings utility on python.exe, maybe ? _MSVCRVER_TO_FULLVER = {'90': "9.0.21022.8"} def msvc_manifest_xml(maj, min): From numpy-svn at scipy.org Sun Nov 16 06:44:45 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 05:44:45 -0600 (CST) Subject: [Numpy-svn] r6047 - branches/visualstudio_manifest/numpy/distutils Message-ID: <20081116114445.76F4939C088@scipy.org> Author: cdavid Date: 2008-11-16 05:44:33 -0600 (Sun, 16 Nov 2008) New Revision: 6047 Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py Log: Add manifest_rc to generate the .rc file which will be used to embed the manifest file. Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-16 11:44:18 UTC (rev 6046) +++ branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-16 11:44:33 UTC (rev 6047) @@ -272,3 +272,17 @@ """ return template % {'fullver': fullver, 'maj': maj, 'min': min} + +def manifest_rc(biname, type='dll'): + """Return the rc file used to generate the res file which will be embedded + as manifest for binary biname, of given type ('dll' or 'exe').""" + if type == 'dll': + rctype = 2 + elif type == 'exe': + rctype = 1 + else: + raise ValueError("Type %s not supported" % type) + + return """\ +#include "winuser.h" +%d RT_MANIFEST %s.manifest""" % (rctype, biname) From numpy-svn at scipy.org Sun Nov 16 06:45:00 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 05:45:00 -0600 (CST) Subject: [Numpy-svn] r6048 - branches/visualstudio_manifest/numpy/distutils Message-ID: <20081116114500.12C5839C088@scipy.org> Author: cdavid Date: 2008-11-16 05:44:54 -0600 (Sun, 16 Nov 2008) New Revision: 6048 Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py Log: Use directly the manifest name for generating the rc file + add docstring. Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-16 11:44:33 UTC (rev 6047) +++ branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-16 11:44:54 UTC (rev 6048) @@ -275,7 +275,15 @@ def manifest_rc(biname, type='dll'): """Return the rc file used to generate the res file which will be embedded - as manifest for binary biname, of given type ('dll' or 'exe').""" + as manifest for given manifest file name, of given type ('dll' or + 'exe'). + + Parameters + ---------- + name: str + name of the manifest file to embed + type: str ('dll', 'exe') + type of the binary which will embed the manifest""" if type == 'dll': rctype = 2 elif type == 'exe': @@ -285,4 +293,4 @@ return """\ #include "winuser.h" -%d RT_MANIFEST %s.manifest""" % (rctype, biname) +%d RT_MANIFEST %s""" % (rctype, name) From numpy-svn at scipy.org Sun Nov 16 06:45:16 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 05:45:16 -0600 (CST) Subject: [Numpy-svn] r6049 - branches/visualstudio_manifest/numpy/distutils Message-ID: <20081116114516.8BC2839C088@scipy.org> Author: cdavid Date: 2008-11-16 05:45:09 -0600 (Sun, 16 Nov 2008) New Revision: 6049 Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py Log: Forgot to change argument of manifest_rc. Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-16 11:44:54 UTC (rev 6048) +++ branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-16 11:45:09 UTC (rev 6049) @@ -273,7 +273,7 @@ return template % {'fullver': fullver, 'maj': maj, 'min': min} -def manifest_rc(biname, type='dll'): +def manifest_rc(name, type='dll'): """Return the rc file used to generate the res file which will be embedded as manifest for given manifest file name, of given type ('dll' or 'exe'). From numpy-svn at scipy.org Sun Nov 16 07:30:24 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 06:30:24 -0600 (CST) Subject: [Numpy-svn] r6050 - trunk/numpy/distutils/command Message-ID: <20081116123024.BE64239C088@scipy.org> Author: cdavid Date: 2008-11-16 06:30:17 -0600 (Sun, 16 Nov 2008) New Revision: 6050 Modified: trunk/numpy/distutils/command/config.py Log: Add deprecation warning for get_output and try_run: we should not use it anymore. Modified: trunk/numpy/distutils/command/config.py =================================================================== --- trunk/numpy/distutils/command/config.py 2008-11-16 11:45:09 UTC (rev 6049) +++ trunk/numpy/distutils/command/config.py 2008-11-16 12:30:17 UTC (rev 6050) @@ -4,6 +4,8 @@ # Pearu Peterson import os, signal +import warnings + from distutils.command.config import config as old_config from distutils.command.config import LANG_EXT from distutils import log @@ -22,6 +24,17 @@ self.fcompiler = None old_config.initialize_options(self) + def try_run(self, body, headers=None, include_dirs=None, + libraries=None, library_dirs=None, lang="c"): + warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \ + "Usage of try_run is deprecated: please do not \n" \ + "use it anymore, and avoid configuration checks \n" \ + "involving running executable on the target machine.\n" \ + "+++++++++++++++++++++++++++++++++++++++++++++++++\n", + DeprecationWarning) + return old_config.try_run(self, body, headers, include_dirs, libraries, + library_dirs, lang) + def _check_compiler (self): old_config._check_compiler(self) from numpy.distutils.fcompiler import FCompiler, new_fcompiler @@ -215,6 +228,12 @@ built from 'body' and 'headers'. Returns the exit status code of the program and its output. """ + warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \ + "Usage of get_output is deprecated: please do not \n" \ + "use it anymore, and avoid configuration checks \n" \ + "involving running executable on the target machine.\n" \ + "+++++++++++++++++++++++++++++++++++++++++++++++++\n", + DeprecationWarning) from distutils.ccompiler import CompileError, LinkError self._check_compiler() exitcode, output = 255, '' From numpy-svn at scipy.org Sun Nov 16 08:02:17 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 07:02:17 -0600 (CST) Subject: [Numpy-svn] r6051 - in branches/visualstudio_manifest: . doc/neps numpy/distutils/command Message-ID: <20081116130217.6BD4A39C088@scipy.org> Author: cdavid Date: 2008-11-16 07:02:09 -0600 (Sun, 16 Nov 2008) New Revision: 6051 Modified: branches/visualstudio_manifest/ branches/visualstudio_manifest/doc/neps/generalized-ufuncs.rst branches/visualstudio_manifest/numpy/distutils/command/config.py Log: Merged revisions 6042-6050 via svnmerge from http://svn.scipy.org/svn/numpy/trunk ........ r6043 | jarrod.millman | 2008-11-16 18:14:01 +0900 (Sun, 16 Nov 2008) | 2 lines removing some quotes ........ r6044 | jarrod.millman | 2008-11-16 18:15:59 +0900 (Sun, 16 Nov 2008) | 2 lines adding back a few quotes ........ r6050 | cdavid | 2008-11-16 21:30:17 +0900 (Sun, 16 Nov 2008) | 1 line Add deprecation warning for get_output and try_run: we should not use it anymore. ........ Property changes on: branches/visualstudio_manifest ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /trunk:1-6041 + /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /trunk:1-6050 Modified: branches/visualstudio_manifest/doc/neps/generalized-ufuncs.rst =================================================================== --- branches/visualstudio_manifest/doc/neps/generalized-ufuncs.rst 2008-11-16 12:30:17 UTC (rev 6050) +++ branches/visualstudio_manifest/doc/neps/generalized-ufuncs.rst 2008-11-16 13:02:09 UTC (rev 6051) @@ -18,18 +18,18 @@ corresponding dimensionality of the outputs (the element-wise ufuncs have zero core dimensions). The list of the core dimensions for all arguments is called the "signature" of a ufunc. For example, the -ufunc numpy.add has signature ``"(),()->()"`` defining two scalar inputs +ufunc numpy.add has signature ``(),()->()`` defining two scalar inputs and one scalar output. Another example is (see the GeneralLoopingFunctions page) the function -``inner1d(a,b)`` with a signature of ``"(i),(i)->()"``. This applies the +``inner1d(a,b)`` with a signature of ``(i),(i)->()``. This applies the inner product along the last axis of each input, but keeps the remaining indices intact. For example, where ``a`` is of shape ``(3,5,N)`` and ``b`` is of shape ``(5,N)``, this will return an output of shape ``(3,5)``. The underlying elementary function is called 3*5 times. In the -signature, we specify one core dimension ``"(i)"`` for each input and zero core -dimensions ``"()"`` for the output, since it takes two 1-d arrays and -returns a scalar. By using the same name ``"i"``, we specify that the two +signature, we specify one core dimension ``(i)`` for each input and zero core +dimensions ``()`` for the output, since it takes two 1-d arrays and +returns a scalar. By using the same name ``i``, we specify that the two corresponding dimensions should be of the same size (or one of them is of size 1 and will be broadcasted). @@ -91,8 +91,8 @@ following format: * Core dimensions of each input or output array are represented by a - list of dimension names in parentheses, ``"(i_1,...,i_N)"``; a scalar - input/output is denoted by ``"()"``. Instead of ``"i_1"``, ``"i_2"``, + list of dimension names in parentheses, ``(i_1,...,i_N)``; a scalar + input/output is denoted by ``()``. Instead of ``i_1``, ``i_2``, etc, one can use any valid Python variable name. * Dimension lists for different arguments are separated by ``","``. Input/output arguments are separated by ``"->"``. @@ -123,19 +123,19 @@ Here are some examples of signatures: -+-------------+--------------------------+-----------------------------------+ -| add | ``"(),()->()"`` | | -+-------------+--------------------------+-----------------------------------+ -| inner1d | ``"(i),(i)->()"`` | | -+-------------+--------------------------+-----------------------------------+ -| sum1d | ``"(i)->()"`` | | -+-------------+--------------------------+-----------------------------------+ -| dot2d | ``"(m,n),(n,p)->(m,p)"`` | matrix multiplication | -+-------------+--------------------------+-----------------------------------+ -| outer_inner | ``"(i,t),(j,t)->(i,j)"`` | inner over the last dimension, | -| | | outer over the second to last, | -| | | and loop/broadcast over the rest. | -+-------------+--------------------------+-----------------------------------+ ++-------------+------------------------+-----------------------------------+ +| add | ``(),()->()`` | | ++-------------+------------------------+-----------------------------------+ +| inner1d | ``(i),(i)->()`` | | ++-------------+------------------------+-----------------------------------+ +| sum1d | ``(i)->()`` | | ++-------------+------------------------+-----------------------------------+ +| dot2d | ``(m,n),(n,p)->(m,p)`` | matrix multiplication | ++-------------+------------------------+-----------------------------------+ +| outer_inner | ``(i,t),(j,t)->(i,j)`` | inner over the last dimension, | +| | | outer over the second to last, | +| | | and loop/broadcast over the rest. | ++-------------+------------------------+-----------------------------------+ C-API for implementing Elementary Functions ------------------------------------------- @@ -167,7 +167,7 @@ ufuncs. The following elements contain the strides of all core dimensions for all arguments in order. -For example, consider a ufunc with signature ``"(i,j),(i)->()"``. In +For example, consider a ufunc with signature ``(i,j),(i)->()``. In this case, ``args`` will contain three pointers to the data of the input/output arrays ``a``, ``b``, ``c``. Furthermore, ``dimensions`` will be ``[N, I, J]`` to define the size of ``N`` of the loop and the sizes ``I`` and ``J`` Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-16 12:30:17 UTC (rev 6050) +++ branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-16 13:02:09 UTC (rev 6051) @@ -4,6 +4,8 @@ # Pearu Peterson import os, signal +import warnings + from distutils.command.config import config as old_config from distutils.command.config import LANG_EXT from distutils import log @@ -22,6 +24,17 @@ self.fcompiler = None old_config.initialize_options(self) + def try_run(self, body, headers=None, include_dirs=None, + libraries=None, library_dirs=None, lang="c"): + warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \ + "Usage of try_run is deprecated: please do not \n" \ + "use it anymore, and avoid configuration checks \n" \ + "involving running executable on the target machine.\n" \ + "+++++++++++++++++++++++++++++++++++++++++++++++++\n", + DeprecationWarning) + return old_config.try_run(self, body, headers, include_dirs, libraries, + library_dirs, lang) + def _check_compiler (self): old_config._check_compiler(self) from numpy.distutils.fcompiler import FCompiler, new_fcompiler @@ -215,6 +228,12 @@ built from 'body' and 'headers'. Returns the exit status code of the program and its output. """ + warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \ + "Usage of get_output is deprecated: please do not \n" \ + "use it anymore, and avoid configuration checks \n" \ + "involving running executable on the target machine.\n" \ + "+++++++++++++++++++++++++++++++++++++++++++++++++\n", + DeprecationWarning) from distutils.ccompiler import CompileError, LinkError self._check_compiler() exitcode, output = 255, '' From numpy-svn at scipy.org Sun Nov 16 08:05:06 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 07:05:06 -0600 (CST) Subject: [Numpy-svn] r6052 - branches/visualstudio_manifest/numpy/distutils Message-ID: <20081116130506.1E0D839C088@scipy.org> Author: cdavid Date: 2008-11-16 07:04:59 -0600 (Sun, 16 Nov 2008) New Revision: 6052 Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py Log: Trailing spaces. Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-16 13:02:09 UTC (rev 6051) +++ branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-16 13:04:59 UTC (rev 6052) @@ -277,7 +277,7 @@ """Return the rc file used to generate the res file which will be embedded as manifest for given manifest file name, of given type ('dll' or 'exe'). - + Parameters ---------- name: str From numpy-svn at scipy.org Sun Nov 16 22:27:35 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 21:27:35 -0600 (CST) Subject: [Numpy-svn] r6053 - trunk/numpy/core/src Message-ID: <20081117032735.4F18539C05F@scipy.org> Author: charris Date: 2008-11-16 21:27:28 -0600 (Sun, 16 Nov 2008) New Revision: 6053 Modified: trunk/numpy/core/src/ufuncobject.c trunk/numpy/core/src/umathmodule.c.src Log: Test moving generic loops to umathmodule. Modified: trunk/numpy/core/src/ufuncobject.c =================================================================== --- trunk/numpy/core/src/ufuncobject.c 2008-11-16 13:04:59 UTC (rev 6052) +++ trunk/numpy/core/src/ufuncobject.c 2008-11-17 03:27:28 UTC (rev 6053) @@ -28,377 +28,8 @@ #define USE_USE_DEFAULTS 1 -/****************************************************************************** - * Generic Real Floating Type Loops - *****************************************************************************/ -typedef float floatUnaryFunc(float x); -typedef double doubleUnaryFunc(double x); -typedef longdouble longdoubleUnaryFunc(longdouble x); -typedef float floatBinaryFunc(float x, float y); -typedef double doubleBinaryFunc(double x, double y); -typedef longdouble longdoubleBinaryFunc(longdouble x, longdouble y); - - -/*UFUNC_API*/ -static void -PyUFunc_f_f(char **args, intp *dimensions, intp *steps, void *func) -{ - floatUnaryFunc *f = (floatUnaryFunc *)func; - UNARY_LOOP { - const float in1 = *(float *)ip1; - *(float *)op1 = f(in1); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_f_f_As_d_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleUnaryFunc *f = (doubleUnaryFunc *)func; - UNARY_LOOP { - const float in1 = *(float *)ip1; - *(float *)op1 = (float)f((double)in1); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_ff_f(char **args, intp *dimensions, intp *steps, void *func) -{ - floatBinaryFunc *f = (floatBinaryFunc *)func; - BINARY_LOOP { - float in1 = *(float *)ip1; - float in2 = *(float *)ip2; - *(float *)op1 = f(in1, in2); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_ff_f_As_dd_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleBinaryFunc *f = (doubleBinaryFunc *)func; - BINARY_LOOP { - float in1 = *(float *)ip1; - float in2 = *(float *)ip2; - *(float *)op1 = (double)f((double)in1, (double)in2); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_d_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleUnaryFunc *f = (doubleUnaryFunc *)func; - UNARY_LOOP { - double in1 = *(double *)ip1; - *(double *)op1 = f(in1); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_dd_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleBinaryFunc *f = (doubleBinaryFunc *)func; - BINARY_LOOP { - double in1 = *(double *)ip1; - double in2 = *(double *)ip2; - *(double *)op1 = f(in1, in2); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_g_g(char **args, intp *dimensions, intp *steps, void *func) -{ - longdoubleUnaryFunc *f = (longdoubleUnaryFunc *)func; - UNARY_LOOP { - longdouble in1 = *(longdouble *)ip1; - *(longdouble *)op1 = f(in1); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_gg_g(char **args, intp *dimensions, intp *steps, void *func) -{ - longdoubleBinaryFunc *f = (longdoubleBinaryFunc *)func; - BINARY_LOOP { - longdouble in1 = *(longdouble *)ip1; - longdouble in2 = *(longdouble *)ip2; - *(longdouble *)op1 = f(in1, in2); - } -} - - - -/****************************************************************************** - * Generic Complex Floating Type Loops - *****************************************************************************/ - - -typedef void cdoubleUnaryFunc(cdouble *x, cdouble *r); -typedef void cfloatUnaryFunc(cfloat *x, cfloat *r); -typedef void clongdoubleUnaryFunc(clongdouble *x, clongdouble *r); -typedef void cdoubleBinaryFunc(cdouble *x, cdouble *y, cdouble *r); -typedef void cfloatBinaryFunc(cfloat *x, cfloat *y, cfloat *r); -typedef void clongdoubleBinaryFunc(clongdouble *x, clongdouble *y, - clongdouble *r); - -/*UFUNC_API*/ -static void -PyUFunc_F_F(char **args, intp *dimensions, intp *steps, void *func) -{ - cfloatUnaryFunc *f = (cfloatUnaryFunc *)func; - UNARY_LOOP { - cfloat in1 = *(cfloat *)ip1; - cfloat *out = (cfloat *)op1; - f(&in1, out); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_F_F_As_D_D(char **args, intp *dimensions, intp *steps, void *func) -{ - cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; - UNARY_LOOP { - const float *in1 = (float *)ip1; - cdouble tmp = {(double)(in1[0]),(double)in1[1]}; - cdouble out; - f(&tmp, &out); - ((float *)op1)[0] = (float)out.real; - ((float *)op1)[1] = (float)out.imag; - } -} - -/*UFUNC_API*/ -static void -PyUFunc_FF_F(char **args, intp *dimensions, intp *steps, void *func) -{ - cfloatBinaryFunc *f = (cfloatBinaryFunc *)func; - BINARY_LOOP { - cfloat in1 = *(cfloat *)ip1; - cfloat in2 = *(cfloat *)ip2; - cfloat *out = (cfloat *)op1; - f(&in1, &in2, out); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_FF_F_As_DD_D(char **args, intp *dimensions, intp *steps, void *func) -{ - cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; - BINARY_LOOP { - const float *in1 = (float *)ip1; - const float *in2 = (float *)ip2; - cdouble tmp1 = {(double)(in1[0]),(double)in1[1]}; - cdouble tmp2 = {(double)(in2[0]),(double)in2[1]}; - cdouble out; - f(&tmp1, &tmp2, &out); - ((float *)op1)[0] = (float)out.real; - ((float *)op1)[1] = (float)out.imag; - } -} - -/*UFUNC_API*/ -static void -PyUFunc_D_D(char **args, intp *dimensions, intp *steps, void *func) -{ - cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; - UNARY_LOOP { - cdouble in1 = *(cdouble *)ip1; - cdouble *out = (cdouble *)op1; - f(&in1, out); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_DD_D(char **args, intp *dimensions, intp *steps, void *func) -{ - cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; - BINARY_LOOP { - cdouble in1 = *(cdouble *)ip1; - cdouble in2 = *(cdouble *)ip2; - cdouble *out = (cdouble *)op1; - f(&in1, &in2, out); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_G_G(char **args, intp *dimensions, intp *steps, void *func) -{ - clongdoubleUnaryFunc *f = (clongdoubleUnaryFunc *)func; - UNARY_LOOP { - clongdouble in1 = *(clongdouble *)ip1; - clongdouble *out = (clongdouble *)op1; - f(&in1, out); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_GG_G(char **args, intp *dimensions, intp *steps, void *func) -{ - clongdoubleBinaryFunc *f = (clongdoubleBinaryFunc *)func; - BINARY_LOOP { - clongdouble in1 = *(clongdouble *)ip1; - clongdouble in2 = *(clongdouble *)ip2; - clongdouble *out = (clongdouble *)op1; - f(&in1, &in2, out); - } -} - - -/****************************************************************************** - * Generic Object Type Loops - *****************************************************************************/ - -/*UFUNC_API*/ -static void -PyUFunc_O_O(char **args, intp *dimensions, intp *steps, void *func) -{ - unaryfunc f = (unaryfunc)func; - UNARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject **out = (PyObject **)op1; - PyObject *ret = f(in1); - if ((ret == NULL) || PyErr_Occurred()) { - return; - } - Py_XDECREF(*out); - *out = ret; - } -} - -/*UFUNC_API*/ -static void -PyUFunc_O_O_method(char **args, intp *dimensions, intp *steps, void *func) -{ - char *meth = (char *)func; - UNARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject **out = (PyObject **)op1; - PyObject *ret = PyObject_CallMethod(in1, meth, NULL); - if (ret == NULL) { - return; - } - Py_XDECREF(*out); - *out = ret; - } -} - -/*UFUNC_API*/ -static void -PyUFunc_OO_O(char **args, intp *dimensions, intp *steps, void *func) -{ - binaryfunc f = (binaryfunc)func; - BINARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject *in2 = *(PyObject **)ip2; - PyObject **out = (PyObject **)op1; - PyObject *ret = f(in1, in2); - if (PyErr_Occurred()) { - return; - } - Py_XDECREF(*out); - *out = ret; - } -} - -/*UFUNC_API*/ -static void -PyUFunc_OO_O_method(char **args, intp *dimensions, intp *steps, void *func) -{ - char *meth = (char *)func; - BINARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject *in2 = *(PyObject **)ip2; - PyObject **out = (PyObject **)op1; - PyObject *ret = PyObject_CallMethod(in1, meth, "(O)", in2); - if (ret == NULL) { - return; - } - Py_XDECREF(*out); - *out = ret; - } -} - -/* - * A general-purpose ufunc that deals with general-purpose Python callable. - * func is a structure with nin, nout, and a Python callable function - */ - -/*UFUNC_API*/ -static void -PyUFunc_On_Om(char **args, intp *dimensions, intp *steps, void *func) -{ - intp n = dimensions[0]; - PyUFunc_PyFuncData *data = (PyUFunc_PyFuncData *)func; - int nin = data->nin; - int nout = data->nout; - PyObject *tocall = data->callable; - char *ptrs[NPY_MAXARGS]; - PyObject *arglist, *result; - PyObject *in, **op; - intp i, j, ntot; - - ntot = nin+nout; - - for(j = 0; j < ntot; j++) { - ptrs[j] = args[j]; - } - for(i = 0; i < n; i++) { - arglist = PyTuple_New(nin); - if (arglist == NULL) { - return; - } - for(j = 0; j < nin; j++) { - in = *((PyObject **)ptrs[j]); - if (in == NULL) { - Py_DECREF(arglist); - return; - } - PyTuple_SET_ITEM(arglist, j, in); - Py_INCREF(in); - } - result = PyEval_CallObject(tocall, arglist); - Py_DECREF(arglist); - if (result == NULL) { - return; - } - if PyTuple_Check(result) { - if (nout != PyTuple_Size(result)) { - Py_DECREF(result); - return; - } - for(j = 0; j < nout; j++) { - op = (PyObject **)ptrs[j+nin]; - Py_XDECREF(*op); - *op = PyTuple_GET_ITEM(result, j); - Py_INCREF(*op); - } - Py_DECREF(result); - } - else { - op = (PyObject **)ptrs[nin]; - Py_XDECREF(*op); - *op = result; - } - for(j = 0; j < ntot; j++) { - ptrs[j] += steps[j]; - } - } -} - - - /* ---------------------------------------------------------------- */ Modified: trunk/numpy/core/src/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2008-11-16 13:04:59 UTC (rev 6052) +++ trunk/numpy/core/src/umathmodule.c.src 2008-11-17 03:27:28 UTC (rev 6053) @@ -606,7 +606,376 @@ intp i;\ for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1, op2 += os2) +/****************************************************************************** + ** GENERIC FLOAT LOOPS ** + *****************************************************************************/ + + +typedef float floatUnaryFunc(float x); +typedef double doubleUnaryFunc(double x); +typedef longdouble longdoubleUnaryFunc(longdouble x); +typedef float floatBinaryFunc(float x, float y); +typedef double doubleBinaryFunc(double x, double y); +typedef longdouble longdoubleBinaryFunc(longdouble x, longdouble y); + + +/*UFUNC_API*/ +static void +PyUFunc_f_f(char **args, intp *dimensions, intp *steps, void *func) +{ + floatUnaryFunc *f = (floatUnaryFunc *)func; + UNARY_LOOP { + const float in1 = *(float *)ip1; + *(float *)op1 = f(in1); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_f_f_As_d_d(char **args, intp *dimensions, intp *steps, void *func) +{ + doubleUnaryFunc *f = (doubleUnaryFunc *)func; + UNARY_LOOP { + const float in1 = *(float *)ip1; + *(float *)op1 = (float)f((double)in1); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_ff_f(char **args, intp *dimensions, intp *steps, void *func) +{ + floatBinaryFunc *f = (floatBinaryFunc *)func; + BINARY_LOOP { + float in1 = *(float *)ip1; + float in2 = *(float *)ip2; + *(float *)op1 = f(in1, in2); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_ff_f_As_dd_d(char **args, intp *dimensions, intp *steps, void *func) +{ + doubleBinaryFunc *f = (doubleBinaryFunc *)func; + BINARY_LOOP { + float in1 = *(float *)ip1; + float in2 = *(float *)ip2; + *(float *)op1 = (double)f((double)in1, (double)in2); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_d_d(char **args, intp *dimensions, intp *steps, void *func) +{ + doubleUnaryFunc *f = (doubleUnaryFunc *)func; + UNARY_LOOP { + double in1 = *(double *)ip1; + *(double *)op1 = f(in1); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_dd_d(char **args, intp *dimensions, intp *steps, void *func) +{ + doubleBinaryFunc *f = (doubleBinaryFunc *)func; + BINARY_LOOP { + double in1 = *(double *)ip1; + double in2 = *(double *)ip2; + *(double *)op1 = f(in1, in2); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_g_g(char **args, intp *dimensions, intp *steps, void *func) +{ + longdoubleUnaryFunc *f = (longdoubleUnaryFunc *)func; + UNARY_LOOP { + longdouble in1 = *(longdouble *)ip1; + *(longdouble *)op1 = f(in1); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_gg_g(char **args, intp *dimensions, intp *steps, void *func) +{ + longdoubleBinaryFunc *f = (longdoubleBinaryFunc *)func; + BINARY_LOOP { + longdouble in1 = *(longdouble *)ip1; + longdouble in2 = *(longdouble *)ip2; + *(longdouble *)op1 = f(in1, in2); + } +} + + + +/****************************************************************************** + ** GENERIC COMPLEX LOOPS ** + *****************************************************************************/ + + +typedef void cdoubleUnaryFunc(cdouble *x, cdouble *r); +typedef void cfloatUnaryFunc(cfloat *x, cfloat *r); +typedef void clongdoubleUnaryFunc(clongdouble *x, clongdouble *r); +typedef void cdoubleBinaryFunc(cdouble *x, cdouble *y, cdouble *r); +typedef void cfloatBinaryFunc(cfloat *x, cfloat *y, cfloat *r); +typedef void clongdoubleBinaryFunc(clongdouble *x, clongdouble *y, + clongdouble *r); + +/*UFUNC_API*/ +static void +PyUFunc_F_F(char **args, intp *dimensions, intp *steps, void *func) +{ + cfloatUnaryFunc *f = (cfloatUnaryFunc *)func; + UNARY_LOOP { + cfloat in1 = *(cfloat *)ip1; + cfloat *out = (cfloat *)op1; + f(&in1, out); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_F_F_As_D_D(char **args, intp *dimensions, intp *steps, void *func) +{ + cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; + UNARY_LOOP { + const float *in1 = (float *)ip1; + cdouble tmp = {(double)(in1[0]),(double)in1[1]}; + cdouble out; + f(&tmp, &out); + ((float *)op1)[0] = (float)out.real; + ((float *)op1)[1] = (float)out.imag; + } +} + +/*UFUNC_API*/ +static void +PyUFunc_FF_F(char **args, intp *dimensions, intp *steps, void *func) +{ + cfloatBinaryFunc *f = (cfloatBinaryFunc *)func; + BINARY_LOOP { + cfloat in1 = *(cfloat *)ip1; + cfloat in2 = *(cfloat *)ip2; + cfloat *out = (cfloat *)op1; + f(&in1, &in2, out); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_FF_F_As_DD_D(char **args, intp *dimensions, intp *steps, void *func) +{ + cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; + BINARY_LOOP { + const float *in1 = (float *)ip1; + const float *in2 = (float *)ip2; + cdouble tmp1 = {(double)(in1[0]),(double)in1[1]}; + cdouble tmp2 = {(double)(in2[0]),(double)in2[1]}; + cdouble out; + f(&tmp1, &tmp2, &out); + ((float *)op1)[0] = (float)out.real; + ((float *)op1)[1] = (float)out.imag; + } +} + +/*UFUNC_API*/ +static void +PyUFunc_D_D(char **args, intp *dimensions, intp *steps, void *func) +{ + cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; + UNARY_LOOP { + cdouble in1 = *(cdouble *)ip1; + cdouble *out = (cdouble *)op1; + f(&in1, out); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_DD_D(char **args, intp *dimensions, intp *steps, void *func) +{ + cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; + BINARY_LOOP { + cdouble in1 = *(cdouble *)ip1; + cdouble in2 = *(cdouble *)ip2; + cdouble *out = (cdouble *)op1; + f(&in1, &in2, out); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_G_G(char **args, intp *dimensions, intp *steps, void *func) +{ + clongdoubleUnaryFunc *f = (clongdoubleUnaryFunc *)func; + UNARY_LOOP { + clongdouble in1 = *(clongdouble *)ip1; + clongdouble *out = (clongdouble *)op1; + f(&in1, out); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_GG_G(char **args, intp *dimensions, intp *steps, void *func) +{ + clongdoubleBinaryFunc *f = (clongdoubleBinaryFunc *)func; + BINARY_LOOP { + clongdouble in1 = *(clongdouble *)ip1; + clongdouble in2 = *(clongdouble *)ip2; + clongdouble *out = (clongdouble *)op1; + f(&in1, &in2, out); + } +} + + +/****************************************************************************** + ** GENERIC OBJECT lOOPS ** + *****************************************************************************/ + +/*UFUNC_API*/ +static void +PyUFunc_O_O(char **args, intp *dimensions, intp *steps, void *func) +{ + unaryfunc f = (unaryfunc)func; + UNARY_LOOP { + PyObject *in1 = *(PyObject **)ip1; + PyObject **out = (PyObject **)op1; + PyObject *ret = f(in1); + if ((ret == NULL) || PyErr_Occurred()) { + return; + } + Py_XDECREF(*out); + *out = ret; + } +} + +/*UFUNC_API*/ +static void +PyUFunc_O_O_method(char **args, intp *dimensions, intp *steps, void *func) +{ + char *meth = (char *)func; + UNARY_LOOP { + PyObject *in1 = *(PyObject **)ip1; + PyObject **out = (PyObject **)op1; + PyObject *ret = PyObject_CallMethod(in1, meth, NULL); + if (ret == NULL) { + return; + } + Py_XDECREF(*out); + *out = ret; + } +} + +/*UFUNC_API*/ +static void +PyUFunc_OO_O(char **args, intp *dimensions, intp *steps, void *func) +{ + binaryfunc f = (binaryfunc)func; + BINARY_LOOP { + PyObject *in1 = *(PyObject **)ip1; + PyObject *in2 = *(PyObject **)ip2; + PyObject **out = (PyObject **)op1; + PyObject *ret = f(in1, in2); + if (PyErr_Occurred()) { + return; + } + Py_XDECREF(*out); + *out = ret; + } +} + +/*UFUNC_API*/ +static void +PyUFunc_OO_O_method(char **args, intp *dimensions, intp *steps, void *func) +{ + char *meth = (char *)func; + BINARY_LOOP { + PyObject *in1 = *(PyObject **)ip1; + PyObject *in2 = *(PyObject **)ip2; + PyObject **out = (PyObject **)op1; + PyObject *ret = PyObject_CallMethod(in1, meth, "(O)", in2); + if (ret == NULL) { + return; + } + Py_XDECREF(*out); + *out = ret; + } +} + /* + * A general-purpose ufunc that deals with general-purpose Python callable. + * func is a structure with nin, nout, and a Python callable function + */ + +/*UFUNC_API*/ +static void +PyUFunc_On_Om(char **args, intp *dimensions, intp *steps, void *func) +{ + intp n = dimensions[0]; + PyUFunc_PyFuncData *data = (PyUFunc_PyFuncData *)func; + int nin = data->nin; + int nout = data->nout; + PyObject *tocall = data->callable; + char *ptrs[NPY_MAXARGS]; + PyObject *arglist, *result; + PyObject *in, **op; + intp i, j, ntot; + + ntot = nin+nout; + + for(j = 0; j < ntot; j++) { + ptrs[j] = args[j]; + } + for(i = 0; i < n; i++) { + arglist = PyTuple_New(nin); + if (arglist == NULL) { + return; + } + for(j = 0; j < nin; j++) { + in = *((PyObject **)ptrs[j]); + if (in == NULL) { + Py_DECREF(arglist); + return; + } + PyTuple_SET_ITEM(arglist, j, in); + Py_INCREF(in); + } + result = PyEval_CallObject(tocall, arglist); + Py_DECREF(arglist); + if (result == NULL) { + return; + } + if PyTuple_Check(result) { + if (nout != PyTuple_Size(result)) { + Py_DECREF(result); + return; + } + for(j = 0; j < nout; j++) { + op = (PyObject **)ptrs[j+nin]; + Py_XDECREF(*op); + *op = PyTuple_GET_ITEM(result, j); + Py_INCREF(*op); + } + Py_DECREF(result); + } + else { + op = (PyObject **)ptrs[nin]; + Py_XDECREF(*op); + *op = result; + } + for(j = 0; j < ntot; j++) { + ptrs[j] += steps[j]; + } + } +} + +/* ***************************************************************************** ** BOOLEAN LOOPS ** ***************************************************************************** From numpy-svn at scipy.org Sun Nov 16 22:27:50 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 Nov 2008 21:27:50 -0600 (CST) Subject: [Numpy-svn] r6054 - trunk/numpy/core/code_generators Message-ID: <20081117032750.BC99E39C05F@scipy.org> Author: charris Date: 2008-11-16 21:27:46 -0600 (Sun, 16 Nov 2008) New Revision: 6054 Modified: trunk/numpy/core/code_generators/genapi.py Log: Add umathmodule.c.src to files scanned for ufunc api. This is preparation for splitting the umathmodule.c.src file. Modified: trunk/numpy/core/code_generators/genapi.py =================================================================== --- trunk/numpy/core/code_generators/genapi.py 2008-11-17 03:27:28 UTC (rev 6053) +++ trunk/numpy/core/code_generators/genapi.py 2008-11-17 03:27:46 UTC (rev 6054) @@ -18,6 +18,7 @@ 'multiarraymodule.c', 'scalartypes.inc.src', 'ufuncobject.c', + 'umathmodule.c.src' ] THIS_DIR = os.path.dirname(__file__) API_FILES = [os.path.join(THIS_DIR, '..', 'src', a) for a in API_FILES] From numpy-svn at scipy.org Mon Nov 17 02:00:45 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 01:00:45 -0600 (CST) Subject: [Numpy-svn] r6055 - trunk/numpy/ma Message-ID: <20081117070045.9718C39C05F@scipy.org> Author: pierregm Date: 2008-11-17 01:00:42 -0600 (Mon, 17 Nov 2008) New Revision: 6055 Modified: trunk/numpy/ma/core.py Log: simplify MAError Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-11-17 03:27:46 UTC (rev 6054) +++ trunk/numpy/ma/core.py 2008-11-17 07:00:42 UTC (rev 6055) @@ -95,15 +95,9 @@ #####-------------------------------------------------------------------------- class MAError(Exception): "Class for MA related errors." - def __init__ (self, args=None): - "Creates an exception." - Exception.__init__(self, args) - self.args = args - def __str__(self): - "Calculates the string representation." - return str(self.args) - __repr__ = __str__ + pass + #####-------------------------------------------------------------------------- #---- --- Filling options --- #####-------------------------------------------------------------------------- @@ -4102,8 +4096,27 @@ #.............................................................................. def asarray(a, dtype=None): - """asarray(data, dtype) = array(data, dtype, copy=0, subok=0) - + """ + Convert the input to a masked array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Defaults to 'C'. + + Returns + ------- + out : ndarray + MaskedArray interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. Return a as a MaskedArray object of the given dtype. If dtype is not given or None, is is set to the dtype of a. No copy is performed if a is already an array. From numpy-svn at scipy.org Mon Nov 17 07:04:11 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 06:04:11 -0600 (CST) Subject: [Numpy-svn] r6056 - branches/visualstudio_manifest/numpy/distutils/command Message-ID: <20081117120411.0C80239C05F@scipy.org> Author: cdavid Date: 2008-11-17 06:03:57 -0600 (Mon, 17 Nov 2008) New Revision: 6056 Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py Log: Detect whether config link needs embedding the manifest for the MSVC runtime. Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 07:00:42 UTC (rev 6055) +++ branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:03:57 UTC (rev 6056) @@ -10,7 +10,10 @@ from distutils.command.config import LANG_EXT from distutils import log from distutils.file_util import copy_file +from distutils.msvccompiler import get_build_version as get_build_msvc_version from numpy.distutils.exec_command import exec_command +from numpy.distutils.misc_util import msvc_runtime_library +from numpy.distutils.mingw32compiler import msvc_manifest_xml LANG_EXT['f77'] = '.f' LANG_EXT['f90'] = '.f90' @@ -110,6 +113,21 @@ if fileexists: continue log.warn('could not find library %r in directories %s' \ % (libname, library_dirs)) + elif self.compiler.compiler_type == 'mingw32': + msver = get_build_msvc_version() + if msver is not None: + if msver >= 8: + # check msvcr major version are the same for linking and + # embedding + msvcv = msvc_runtime_library() + if msvcv: + maj = msvcv[5:6] + if not maj == int(msver): + raise ValueError, + "Dyscrepancy between linked msvcr " \ + "(%f) and the one about to be embedded " \ + "(%f)" % (int(msver), maj) + return self._wrap_method(old_config._link,lang, (body, headers, include_dirs, libraries, library_dirs, lang)) From numpy-svn at scipy.org Mon Nov 17 07:04:43 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 06:04:43 -0600 (CST) Subject: [Numpy-svn] r6057 - in branches/visualstudio_manifest/numpy/distutils: . command Message-ID: <20081117120443.C6E6839C05F@scipy.org> Author: cdavid Date: 2008-11-17 06:04:27 -0600 (Mon, 17 Nov 2008) New Revision: 6057 Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py Log: Refactor msvc runtime checking, put it into mingw32compiler Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:03:57 UTC (rev 6056) +++ branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:04:27 UTC (rev 6057) @@ -12,8 +12,8 @@ from distutils.file_util import copy_file from distutils.msvccompiler import get_build_version as get_build_msvc_version from numpy.distutils.exec_command import exec_command -from numpy.distutils.misc_util import msvc_runtime_library -from numpy.distutils.mingw32compiler import msvc_manifest_xml +from numpy.distutils.mingw32compiler import msvc_manifest_xml, + check_embedded_match_linked LANG_EXT['f77'] = '.f' LANG_EXT['f90'] = '.f90' @@ -117,16 +117,7 @@ msver = get_build_msvc_version() if msver is not None: if msver >= 8: - # check msvcr major version are the same for linking and - # embedding - msvcv = msvc_runtime_library() - if msvcv: - maj = msvcv[5:6] - if not maj == int(msver): - raise ValueError, - "Dyscrepancy between linked msvcr " \ - "(%f) and the one about to be embedded " \ - "(%f)" % (int(msver), maj) + check_embedded_msvcr_match_linked(msver) return self._wrap_method(old_config._link,lang, (body, headers, include_dirs, Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-17 12:03:57 UTC (rev 6056) +++ branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-17 12:04:27 UTC (rev 6057) @@ -294,3 +294,16 @@ return """\ #include "winuser.h" %d RT_MANIFEST %s""" % (rctype, name) + +def check_embedded_msvcr_match_linked(msver): + """msver is the ms runtime version used for the MANIFEST.""" + # check msvcr major version are the same for linking and + # embedding + msvcv = msvc_runtime_library() + if msvcv: + maj = msvcv[5:6] + if not maj == int(msver): + raise ValueError, + "Dyscrepancy between linked msvcr " \ + "(%f) and the one about to be embedded " \ + "(%f)" % (int(msver), maj) From numpy-svn at scipy.org Mon Nov 17 07:05:12 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 06:05:12 -0600 (CST) Subject: [Numpy-svn] r6058 - branches/visualstudio_manifest/numpy/distutils Message-ID: <20081117120512.9E6BA39C088@scipy.org> Author: cdavid Date: 2008-11-17 06:05:01 -0600 (Mon, 17 Nov 2008) New Revision: 6058 Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py Log: Fix string formatting. Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-17 12:04:27 UTC (rev 6057) +++ branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-17 12:05:01 UTC (rev 6058) @@ -301,9 +301,9 @@ # embedding msvcv = msvc_runtime_library() if msvcv: - maj = msvcv[5:6] + maj = int(msvcv[5:6]) if not maj == int(msver): - raise ValueError, + raise ValueError, \ "Dyscrepancy between linked msvcr " \ - "(%f) and the one about to be embedded " \ - "(%f)" % (int(msver), maj) + "(%d) and the one about to be embedded " \ + "(%d)" % (int(msver), maj) From numpy-svn at scipy.org Mon Nov 17 07:05:41 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 06:05:41 -0600 (CST) Subject: [Numpy-svn] r6059 - branches/visualstudio_manifest/numpy/distutils/command Message-ID: <20081117120541.788DF39C05F@scipy.org> Author: cdavid Date: 2008-11-17 06:05:29 -0600 (Mon, 17 Nov 2008) New Revision: 6059 Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py Log: fix imports. Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:05:01 UTC (rev 6058) +++ branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:05:29 UTC (rev 6059) @@ -12,8 +12,8 @@ from distutils.file_util import copy_file from distutils.msvccompiler import get_build_version as get_build_msvc_version from numpy.distutils.exec_command import exec_command -from numpy.distutils.mingw32compiler import msvc_manifest_xml, - check_embedded_match_linked +from numpy.distutils.mingw32ccompiler import msvc_manifest_xml, \ + check_embedded_msvcr_match_linked LANG_EXT['f77'] = '.f' LANG_EXT['f90'] = '.f90' From numpy-svn at scipy.org Mon Nov 17 07:06:12 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 06:06:12 -0600 (CST) Subject: [Numpy-svn] r6060 - in branches/visualstudio_manifest/numpy/distutils: . command Message-ID: <20081117120612.24BD539C05F@scipy.org> Author: cdavid Date: 2008-11-17 06:05:56 -0600 (Mon, 17 Nov 2008) New Revision: 6060 Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py Log: Generate the xml manifest file. Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:05:29 UTC (rev 6059) +++ branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:05:56 UTC (rev 6060) @@ -13,7 +13,7 @@ from distutils.msvccompiler import get_build_version as get_build_msvc_version from numpy.distutils.exec_command import exec_command from numpy.distutils.mingw32ccompiler import msvc_manifest_xml, \ - check_embedded_msvcr_match_linked + check_embedded_msvcr_match_linked, manifest_name LANG_EXT['f77'] = '.f' LANG_EXT['f90'] = '.f90' @@ -118,7 +118,14 @@ if msver is not None: if msver >= 8: check_embedded_msvcr_match_linked(msver) - + ma = int(msver) + mi = int((msver - ma) * 10) + # Write the manifest file + manxml = msvc_manifest_xml(ma, mi) + man = open(manifest_name(self), "w") + self.temp_files.append(man) + man.write(manxml) + man.close() return self._wrap_method(old_config._link,lang, (body, headers, include_dirs, libraries, library_dirs, lang)) Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-17 12:05:29 UTC (rev 6059) +++ branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-17 12:05:56 UTC (rev 6060) @@ -279,8 +279,7 @@ 'exe'). Parameters - ---------- - name: str + ---------- name: str name of the manifest file to embed type: str ('dll', 'exe') type of the binary which will embed the manifest""" @@ -307,3 +306,10 @@ "Dyscrepancy between linked msvcr " \ "(%d) and the one about to be embedded " \ "(%d)" % (int(msver), maj) + +def manifest_name(config): + # Get configest name (including suffix) + base = os.path.basename(config._gen_temp_sourcefile("yo", [], lang)) + root, ext = os.path.splitext(base) + exext = self.compiler.exe_extension + return root + exect + ".manifest" From numpy-svn at scipy.org Mon Nov 17 07:06:39 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 06:06:39 -0600 (CST) Subject: [Numpy-svn] r6061 - branches/visualstudio_manifest/numpy/distutils Message-ID: <20081117120639.33D2039C05F@scipy.org> Author: cdavid Date: 2008-11-17 06:06:26 -0600 (Mon, 17 Nov 2008) New Revision: 6061 Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py Log: Separate function to get the configtest name. Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-17 12:05:56 UTC (rev 6060) +++ branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-17 12:06:26 UTC (rev 6061) @@ -307,9 +307,12 @@ "(%d) and the one about to be embedded " \ "(%d)" % (int(msver), maj) +def configtest_name(config): + base = os.path.basename(config._gen_temp_sourcefile("yo", [], lang)) + return os.path.splitext(base)[0] + def manifest_name(config): # Get configest name (including suffix) - base = os.path.basename(config._gen_temp_sourcefile("yo", [], lang)) - root, ext = os.path.splitext(base) + root = configtest_name(config) exext = self.compiler.exe_extension return root + exect + ".manifest" From numpy-svn at scipy.org Mon Nov 17 07:07:16 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 06:07:16 -0600 (CST) Subject: [Numpy-svn] r6062 - in branches/visualstudio_manifest/numpy/distutils: . command Message-ID: <20081117120716.065CA39C05F@scipy.org> Author: cdavid Date: 2008-11-17 06:06:58 -0600 (Mon, 17 Nov 2008) New Revision: 6062 Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py Log: Generate the rc file for manifest embedding. Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:06:26 UTC (rev 6061) +++ branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:06:58 UTC (rev 6062) @@ -13,7 +13,7 @@ from distutils.msvccompiler import get_build_version as get_build_msvc_version from numpy.distutils.exec_command import exec_command from numpy.distutils.mingw32ccompiler import msvc_manifest_xml, \ - check_embedded_msvcr_match_linked, manifest_name + check_embedded_msvcr_match_linked, manifest_name, rc_name LANG_EXT['f77'] = '.f' LANG_EXT['f90'] = '.f90' @@ -126,6 +126,12 @@ self.temp_files.append(man) man.write(manxml) man.close() + # Write the rc file + manrc = manifest_rc(manifest_name(self), "exe") + rc = open(rc_name(self), "w") + self.temp_files.append(rc) + rc.write(manrc) + rc.close() return self._wrap_method(old_config._link,lang, (body, headers, include_dirs, libraries, library_dirs, lang)) Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-17 12:06:26 UTC (rev 6061) +++ branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-17 12:06:58 UTC (rev 6062) @@ -316,3 +316,8 @@ root = configtest_name(config) exext = self.compiler.exe_extension return root + exect + ".manifest" + +def rc_name(config): + # Get configest name (including suffix) + root = configtest_name(config) + return root + ".rc" From numpy-svn at scipy.org Mon Nov 17 07:07:52 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 06:07:52 -0600 (CST) Subject: [Numpy-svn] r6063 - in branches/visualstudio_manifest/numpy/distutils: . command Message-ID: <20081117120752.13D3739C05F@scipy.org> Author: cdavid Date: 2008-11-17 06:07:36 -0600 (Mon, 17 Nov 2008) New Revision: 6063 Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py Log: Fix configtest and manifest_name. Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:06:58 UTC (rev 6062) +++ branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:07:36 UTC (rev 6063) @@ -13,7 +13,7 @@ from distutils.msvccompiler import get_build_version as get_build_msvc_version from numpy.distutils.exec_command import exec_command from numpy.distutils.mingw32ccompiler import msvc_manifest_xml, \ - check_embedded_msvcr_match_linked, manifest_name, rc_name + check_embedded_msvcr_match_linked, manifest_name, rc_name, manifest_rc LANG_EXT['f77'] = '.f' LANG_EXT['f90'] = '.f90' Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-17 12:06:58 UTC (rev 6062) +++ branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-17 12:07:36 UTC (rev 6063) @@ -308,14 +308,14 @@ "(%d)" % (int(msver), maj) def configtest_name(config): - base = os.path.basename(config._gen_temp_sourcefile("yo", [], lang)) + base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) return os.path.splitext(base)[0] def manifest_name(config): # Get configest name (including suffix) root = configtest_name(config) - exext = self.compiler.exe_extension - return root + exect + ".manifest" + exext = config.compiler.exe_extension + return root + exext + ".manifest" def rc_name(config): # Get configest name (including suffix) From numpy-svn at scipy.org Mon Nov 17 07:08:22 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 06:08:22 -0600 (CST) Subject: [Numpy-svn] r6064 - branches/visualstudio_manifest/numpy/distutils/command Message-ID: <20081117120822.675D239C05F@scipy.org> Author: cdavid Date: 2008-11-17 06:08:08 -0600 (Mon, 17 Nov 2008) New Revision: 6064 Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py Log: temp_files is a list of filenames, not files. Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:07:36 UTC (rev 6063) +++ branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:08:08 UTC (rev 6064) @@ -123,13 +123,13 @@ # Write the manifest file manxml = msvc_manifest_xml(ma, mi) man = open(manifest_name(self), "w") - self.temp_files.append(man) + self.temp_files.append(manxml) man.write(manxml) man.close() # Write the rc file manrc = manifest_rc(manifest_name(self), "exe") rc = open(rc_name(self), "w") - self.temp_files.append(rc) + self.temp_files.append(manrc) rc.write(manrc) rc.close() return self._wrap_method(old_config._link,lang, From numpy-svn at scipy.org Mon Nov 17 07:08:53 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 06:08:53 -0600 (CST) Subject: [Numpy-svn] r6065 - branches/visualstudio_manifest/numpy/distutils/command Message-ID: <20081117120853.718FE39C05F@scipy.org> Author: cdavid Date: 2008-11-17 06:08:38 -0600 (Mon, 17 Nov 2008) New Revision: 6065 Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py Log: Disable .rc generation for manifest: having the xml file in the same dir as the _configtest.exe is enough for now. Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:08:08 UTC (rev 6064) +++ branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:08:38 UTC (rev 6065) @@ -126,12 +126,12 @@ self.temp_files.append(manxml) man.write(manxml) man.close() - # Write the rc file - manrc = manifest_rc(manifest_name(self), "exe") - rc = open(rc_name(self), "w") - self.temp_files.append(manrc) - rc.write(manrc) - rc.close() + # # Write the rc file + # manrc = manifest_rc(manifest_name(self), "exe") + # rc = open(rc_name(self), "w") + # self.temp_files.append(manrc) + # rc.write(manrc) + # rc.close() return self._wrap_method(old_config._link,lang, (body, headers, include_dirs, libraries, library_dirs, lang)) From numpy-svn at scipy.org Mon Nov 17 07:09:25 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 06:09:25 -0600 (CST) Subject: [Numpy-svn] r6066 - in branches/visualstudio_manifest/numpy/distutils: . command Message-ID: <20081117120925.9B3E239C05F@scipy.org> Author: cdavid Date: 2008-11-17 06:09:08 -0600 (Mon, 17 Nov 2008) New Revision: 6066 Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py Log: Refactor manifest generation and put it back into mingw32ccompiler module. Modified: branches/visualstudio_manifest/numpy/distutils/command/config.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:08:38 UTC (rev 6065) +++ branches/visualstudio_manifest/numpy/distutils/command/config.py 2008-11-17 12:09:08 UTC (rev 6066) @@ -10,10 +10,8 @@ from distutils.command.config import LANG_EXT from distutils import log from distutils.file_util import copy_file -from distutils.msvccompiler import get_build_version as get_build_msvc_version from numpy.distutils.exec_command import exec_command -from numpy.distutils.mingw32ccompiler import msvc_manifest_xml, \ - check_embedded_msvcr_match_linked, manifest_name, rc_name, manifest_rc +from numpy.distutils.mingw32ccompiler import generate_manifest LANG_EXT['f77'] = '.f' LANG_EXT['f90'] = '.f90' @@ -114,24 +112,7 @@ log.warn('could not find library %r in directories %s' \ % (libname, library_dirs)) elif self.compiler.compiler_type == 'mingw32': - msver = get_build_msvc_version() - if msver is not None: - if msver >= 8: - check_embedded_msvcr_match_linked(msver) - ma = int(msver) - mi = int((msver - ma) * 10) - # Write the manifest file - manxml = msvc_manifest_xml(ma, mi) - man = open(manifest_name(self), "w") - self.temp_files.append(manxml) - man.write(manxml) - man.close() - # # Write the rc file - # manrc = manifest_rc(manifest_name(self), "exe") - # rc = open(rc_name(self), "w") - # self.temp_files.append(manrc) - # rc.write(manrc) - # rc.close() + generate_manifest(self) return self._wrap_method(old_config._link,lang, (body, headers, include_dirs, libraries, library_dirs, lang)) Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-17 12:08:38 UTC (rev 6065) +++ branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-17 12:09:08 UTC (rev 6066) @@ -27,6 +27,7 @@ from distutils.errors import DistutilsExecError, CompileError, UnknownFileError from distutils.unixccompiler import UnixCCompiler +from distutils.msvccompiler import get_build_version as get_build_msvc_version from numpy.distutils.misc_util import msvc_runtime_library # the same as cygwin plus some additional parameters @@ -321,3 +322,23 @@ # Get configest name (including suffix) root = configtest_name(config) return root + ".rc" + +def generate_manifest(config): + msver = get_build_msvc_version() + if msver is not None: + if msver >= 8: + check_embedded_msvcr_match_linked(msver) + ma = int(msver) + mi = int((msver - ma) * 10) + # Write the manifest file + manxml = msvc_manifest_xml(ma, mi) + man = open(manifest_name(config), "w") + config.temp_files.append(manifest_name(config)) + man.write(manxml) + man.close() + # # Write the rc file + # manrc = manifest_rc(manifest_name(self), "exe") + # rc = open(rc_name(self), "w") + # self.temp_files.append(manrc) + # rc.write(manrc) + # rc.close() From numpy-svn at scipy.org Mon Nov 17 07:32:50 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 06:32:50 -0600 (CST) Subject: [Numpy-svn] r6067 - branches/visualstudio_manifest/numpy/distutils Message-ID: <20081117123250.E0AB839C1EC@scipy.org> Author: cdavid Date: 2008-11-17 06:32:46 -0600 (Mon, 17 Nov 2008) New Revision: 6067 Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py Log: Fix spelling. Modified: branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-17 12:09:08 UTC (rev 6066) +++ branches/visualstudio_manifest/numpy/distutils/mingw32ccompiler.py 2008-11-17 12:32:46 UTC (rev 6067) @@ -304,7 +304,7 @@ maj = int(msvcv[5:6]) if not maj == int(msver): raise ValueError, \ - "Dyscrepancy between linked msvcr " \ + "Discrepancy between linked msvcr " \ "(%d) and the one about to be embedded " \ "(%d)" % (int(msver), maj) From numpy-svn at scipy.org Mon Nov 17 07:35:54 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 06:35:54 -0600 (CST) Subject: [Numpy-svn] r6068 - trunk/numpy/random Message-ID: <20081117123554.A135039C088@scipy.org> Author: cdavid Date: 2008-11-17 06:35:43 -0600 (Mon, 17 Nov 2008) New Revision: 6068 Modified: trunk/numpy/random/setup.py Log: Simplify ftime workaround for python 2.6 under mingw: we only need to make _ftime an alias to _ftime64 when needed. Modified: trunk/numpy/random/setup.py =================================================================== --- trunk/numpy/random/setup.py 2008-11-17 12:32:46 UTC (rev 6067) +++ trunk/numpy/random/setup.py 2008-11-17 12:35:43 UTC (rev 6068) @@ -2,20 +2,17 @@ import os import sys from distutils.dep_util import newer -from numpy.distutils.misc_util import msvc_runtime_library +from distutils.msvccompiler import get_build_version as get_msvc_build_version -def msvc_version(): - """Return the msvc version used to build the running python, None if not - built with MSVC.""" - msc_pos = sys.version.find('MSC v.') - if msc_pos != -1: - return sys.version[msc_pos+6:msc_pos+10] - return None +def needs_mingw_ftime_workaround(config): + # We need the mingw workaround for _ftime if the msvc runtime version is + # 7.1 or above and we build with mingw + if config.compiler.compiler_type == 'mingw32': + msver = get_msvc_build_version() + if msver and msver > 7: + return True -def msvcrt_to_hex(msvc): - major = msvc / 10 - minor = msvc - major * 10 - return hex(major * 256 + minor) + return False def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration, get_mathlibs @@ -37,16 +34,9 @@ if not os.path.exists(dir): os.makedirs(dir) - msv = msvc_version() - if msv and msv >= 1400: - msvcrt = msvc_runtime_library() - if msvcrt is None: - raise ValueError("Discrepancy between " \ - "msvc_runtime_library " \ - "and our msvc detection scheme ?") - hmsvc = msvcrt_to_hex(int(msvcrt[5:])) + config_cmd = config.get_config_cmd() + if needs_mingw_ftime_workaround(config_cmd): defs.append("NPY_NEEDS_MINGW_TIME_WORKAROUND") - defs.append(("NPY_MSVCRT_VERSION", str(hmsvc))) if newer(__file__, target): target_f = open(target, 'a') From numpy-svn at scipy.org Mon Nov 17 07:36:30 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 06:36:30 -0600 (CST) Subject: [Numpy-svn] r6069 - trunk/numpy/random/mtrand Message-ID: <20081117123630.22E6939C088@scipy.org> Author: cdavid Date: 2008-11-17 06:36:17 -0600 (Mon, 17 Nov 2008) New Revision: 6069 Modified: trunk/numpy/random/mtrand/randomkit.c Log: Use a wrapper around _ftime to work around a mingw bug in msvc runtimes import libraries. Modified: trunk/numpy/random/mtrand/randomkit.c =================================================================== --- trunk/numpy/random/mtrand/randomkit.c 2008-11-17 12:35:43 UTC (rev 6068) +++ trunk/numpy/random/mtrand/randomkit.c 2008-11-17 12:36:17 UTC (rev 6069) @@ -76,6 +76,15 @@ #ifdef _WIN32 /* Windows */ #include +#ifdef NPY_NEEDS_MINGW_TIME_WORKAROUND +/* mingw msvcr lib import wrongly export _ftime, which does not exist in the + * actual msvc runtime for version >= 8; we make it an alist to _ftime64, which + * is available in those versions of the runtime and should be ABI compatible + */ +#define _FTIME(x) _ftime64((x)) +#else +#define _FTIME(x) _ftime((x)) +#endif #include #ifndef RK_NO_WINCRYPT /* Windows crypto */ @@ -169,7 +178,7 @@ rk_seed(rk_hash(getpid()) ^ rk_hash(tv.tv_sec) ^ rk_hash(tv.tv_usec) ^ rk_hash(clock()), state); #else - _ftime(&tv); + _FTIME(&tv); rk_seed(rk_hash(tv.time) ^ rk_hash(tv.millitm) ^ rk_hash(clock()), state); #endif From numpy-svn at scipy.org Mon Nov 17 07:41:21 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 06:41:21 -0600 (CST) Subject: [Numpy-svn] r6070 - trunk/numpy/random/mtrand Message-ID: <20081117124121.EFEE739C088@scipy.org> Author: cdavid Date: 2008-11-17 06:41:13 -0600 (Mon, 17 Nov 2008) New Revision: 6070 Modified: trunk/numpy/random/mtrand/randomkit.c Log: Set __MSVCRT_VERSION__ to make _ftime64 visible from time.h header when we need to workaround mingw bug. Modified: trunk/numpy/random/mtrand/randomkit.c =================================================================== --- trunk/numpy/random/mtrand/randomkit.c 2008-11-17 12:36:17 UTC (rev 6069) +++ trunk/numpy/random/mtrand/randomkit.c 2008-11-17 12:41:13 UTC (rev 6070) @@ -75,8 +75,10 @@ #ifdef _WIN32 /* Windows */ -#include #ifdef NPY_NEEDS_MINGW_TIME_WORKAROUND +/* FIXME: ideally, we should set this to the real version of MSVCRT. We need + * something higher than 0x601 to enable _ftime64 and co */ +#define __MSVCRT_VERSION__ 0x0700 /* mingw msvcr lib import wrongly export _ftime, which does not exist in the * actual msvc runtime for version >= 8; we make it an alist to _ftime64, which * is available in those versions of the runtime and should be ABI compatible @@ -85,6 +87,7 @@ #else #define _FTIME(x) _ftime((x)) #endif +#include #include #ifndef RK_NO_WINCRYPT /* Windows crypto */ From numpy-svn at scipy.org Mon Nov 17 07:49:51 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 06:49:51 -0600 (CST) Subject: [Numpy-svn] r6071 - in branches/visualstudio_manifest: . numpy/core/code_generators numpy/core/src numpy/ma numpy/random numpy/random/mtrand Message-ID: <20081117124951.898B639C088@scipy.org> Author: cdavid Date: 2008-11-17 06:49:26 -0600 (Mon, 17 Nov 2008) New Revision: 6071 Modified: branches/visualstudio_manifest/ branches/visualstudio_manifest/numpy/core/code_generators/genapi.py branches/visualstudio_manifest/numpy/core/src/ufuncobject.c branches/visualstudio_manifest/numpy/core/src/umathmodule.c.src branches/visualstudio_manifest/numpy/ma/core.py branches/visualstudio_manifest/numpy/random/mtrand/randomkit.c branches/visualstudio_manifest/numpy/random/setup.py Log: Merged revisions 6051-6070 via svnmerge from http://svn.scipy.org/svn/numpy/trunk ........ r6053 | charris | 2008-11-17 12:27:28 +0900 (Mon, 17 Nov 2008) | 1 line Test moving generic loops to umathmodule. ........ r6054 | charris | 2008-11-17 12:27:46 +0900 (Mon, 17 Nov 2008) | 2 lines Add umathmodule.c.src to files scanned for ufunc api. This is preparation for splitting the umathmodule.c.src file. ........ r6055 | pierregm | 2008-11-17 16:00:42 +0900 (Mon, 17 Nov 2008) | 1 line simplify MAError ........ r6068 | cdavid | 2008-11-17 21:35:43 +0900 (Mon, 17 Nov 2008) | 1 line Simplify ftime workaround for python 2.6 under mingw: we only need to make _ftime an alias to _ftime64 when needed. ........ r6069 | cdavid | 2008-11-17 21:36:17 +0900 (Mon, 17 Nov 2008) | 1 line Use a wrapper around _ftime to work around a mingw bug in msvc runtimes import libraries. ........ r6070 | cdavid | 2008-11-17 21:41:13 +0900 (Mon, 17 Nov 2008) | 1 line Set __MSVCRT_VERSION__ to make _ftime64 visible from time.h header when we need to workaround mingw bug. ........ Property changes on: branches/visualstudio_manifest ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /trunk:1-6050 + /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /trunk:1-6070 Modified: branches/visualstudio_manifest/numpy/core/code_generators/genapi.py =================================================================== --- branches/visualstudio_manifest/numpy/core/code_generators/genapi.py 2008-11-17 12:41:13 UTC (rev 6070) +++ branches/visualstudio_manifest/numpy/core/code_generators/genapi.py 2008-11-17 12:49:26 UTC (rev 6071) @@ -18,6 +18,7 @@ 'multiarraymodule.c', 'scalartypes.inc.src', 'ufuncobject.c', + 'umathmodule.c.src' ] THIS_DIR = os.path.dirname(__file__) API_FILES = [os.path.join(THIS_DIR, '..', 'src', a) for a in API_FILES] Modified: branches/visualstudio_manifest/numpy/core/src/ufuncobject.c =================================================================== --- branches/visualstudio_manifest/numpy/core/src/ufuncobject.c 2008-11-17 12:41:13 UTC (rev 6070) +++ branches/visualstudio_manifest/numpy/core/src/ufuncobject.c 2008-11-17 12:49:26 UTC (rev 6071) @@ -28,377 +28,8 @@ #define USE_USE_DEFAULTS 1 -/****************************************************************************** - * Generic Real Floating Type Loops - *****************************************************************************/ -typedef float floatUnaryFunc(float x); -typedef double doubleUnaryFunc(double x); -typedef longdouble longdoubleUnaryFunc(longdouble x); -typedef float floatBinaryFunc(float x, float y); -typedef double doubleBinaryFunc(double x, double y); -typedef longdouble longdoubleBinaryFunc(longdouble x, longdouble y); - - -/*UFUNC_API*/ -static void -PyUFunc_f_f(char **args, intp *dimensions, intp *steps, void *func) -{ - floatUnaryFunc *f = (floatUnaryFunc *)func; - UNARY_LOOP { - const float in1 = *(float *)ip1; - *(float *)op1 = f(in1); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_f_f_As_d_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleUnaryFunc *f = (doubleUnaryFunc *)func; - UNARY_LOOP { - const float in1 = *(float *)ip1; - *(float *)op1 = (float)f((double)in1); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_ff_f(char **args, intp *dimensions, intp *steps, void *func) -{ - floatBinaryFunc *f = (floatBinaryFunc *)func; - BINARY_LOOP { - float in1 = *(float *)ip1; - float in2 = *(float *)ip2; - *(float *)op1 = f(in1, in2); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_ff_f_As_dd_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleBinaryFunc *f = (doubleBinaryFunc *)func; - BINARY_LOOP { - float in1 = *(float *)ip1; - float in2 = *(float *)ip2; - *(float *)op1 = (double)f((double)in1, (double)in2); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_d_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleUnaryFunc *f = (doubleUnaryFunc *)func; - UNARY_LOOP { - double in1 = *(double *)ip1; - *(double *)op1 = f(in1); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_dd_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleBinaryFunc *f = (doubleBinaryFunc *)func; - BINARY_LOOP { - double in1 = *(double *)ip1; - double in2 = *(double *)ip2; - *(double *)op1 = f(in1, in2); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_g_g(char **args, intp *dimensions, intp *steps, void *func) -{ - longdoubleUnaryFunc *f = (longdoubleUnaryFunc *)func; - UNARY_LOOP { - longdouble in1 = *(longdouble *)ip1; - *(longdouble *)op1 = f(in1); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_gg_g(char **args, intp *dimensions, intp *steps, void *func) -{ - longdoubleBinaryFunc *f = (longdoubleBinaryFunc *)func; - BINARY_LOOP { - longdouble in1 = *(longdouble *)ip1; - longdouble in2 = *(longdouble *)ip2; - *(longdouble *)op1 = f(in1, in2); - } -} - - - -/****************************************************************************** - * Generic Complex Floating Type Loops - *****************************************************************************/ - - -typedef void cdoubleUnaryFunc(cdouble *x, cdouble *r); -typedef void cfloatUnaryFunc(cfloat *x, cfloat *r); -typedef void clongdoubleUnaryFunc(clongdouble *x, clongdouble *r); -typedef void cdoubleBinaryFunc(cdouble *x, cdouble *y, cdouble *r); -typedef void cfloatBinaryFunc(cfloat *x, cfloat *y, cfloat *r); -typedef void clongdoubleBinaryFunc(clongdouble *x, clongdouble *y, - clongdouble *r); - -/*UFUNC_API*/ -static void -PyUFunc_F_F(char **args, intp *dimensions, intp *steps, void *func) -{ - cfloatUnaryFunc *f = (cfloatUnaryFunc *)func; - UNARY_LOOP { - cfloat in1 = *(cfloat *)ip1; - cfloat *out = (cfloat *)op1; - f(&in1, out); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_F_F_As_D_D(char **args, intp *dimensions, intp *steps, void *func) -{ - cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; - UNARY_LOOP { - const float *in1 = (float *)ip1; - cdouble tmp = {(double)(in1[0]),(double)in1[1]}; - cdouble out; - f(&tmp, &out); - ((float *)op1)[0] = (float)out.real; - ((float *)op1)[1] = (float)out.imag; - } -} - -/*UFUNC_API*/ -static void -PyUFunc_FF_F(char **args, intp *dimensions, intp *steps, void *func) -{ - cfloatBinaryFunc *f = (cfloatBinaryFunc *)func; - BINARY_LOOP { - cfloat in1 = *(cfloat *)ip1; - cfloat in2 = *(cfloat *)ip2; - cfloat *out = (cfloat *)op1; - f(&in1, &in2, out); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_FF_F_As_DD_D(char **args, intp *dimensions, intp *steps, void *func) -{ - cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; - BINARY_LOOP { - const float *in1 = (float *)ip1; - const float *in2 = (float *)ip2; - cdouble tmp1 = {(double)(in1[0]),(double)in1[1]}; - cdouble tmp2 = {(double)(in2[0]),(double)in2[1]}; - cdouble out; - f(&tmp1, &tmp2, &out); - ((float *)op1)[0] = (float)out.real; - ((float *)op1)[1] = (float)out.imag; - } -} - -/*UFUNC_API*/ -static void -PyUFunc_D_D(char **args, intp *dimensions, intp *steps, void *func) -{ - cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; - UNARY_LOOP { - cdouble in1 = *(cdouble *)ip1; - cdouble *out = (cdouble *)op1; - f(&in1, out); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_DD_D(char **args, intp *dimensions, intp *steps, void *func) -{ - cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; - BINARY_LOOP { - cdouble in1 = *(cdouble *)ip1; - cdouble in2 = *(cdouble *)ip2; - cdouble *out = (cdouble *)op1; - f(&in1, &in2, out); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_G_G(char **args, intp *dimensions, intp *steps, void *func) -{ - clongdoubleUnaryFunc *f = (clongdoubleUnaryFunc *)func; - UNARY_LOOP { - clongdouble in1 = *(clongdouble *)ip1; - clongdouble *out = (clongdouble *)op1; - f(&in1, out); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_GG_G(char **args, intp *dimensions, intp *steps, void *func) -{ - clongdoubleBinaryFunc *f = (clongdoubleBinaryFunc *)func; - BINARY_LOOP { - clongdouble in1 = *(clongdouble *)ip1; - clongdouble in2 = *(clongdouble *)ip2; - clongdouble *out = (clongdouble *)op1; - f(&in1, &in2, out); - } -} - - -/****************************************************************************** - * Generic Object Type Loops - *****************************************************************************/ - -/*UFUNC_API*/ -static void -PyUFunc_O_O(char **args, intp *dimensions, intp *steps, void *func) -{ - unaryfunc f = (unaryfunc)func; - UNARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject **out = (PyObject **)op1; - PyObject *ret = f(in1); - if ((ret == NULL) || PyErr_Occurred()) { - return; - } - Py_XDECREF(*out); - *out = ret; - } -} - -/*UFUNC_API*/ -static void -PyUFunc_O_O_method(char **args, intp *dimensions, intp *steps, void *func) -{ - char *meth = (char *)func; - UNARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject **out = (PyObject **)op1; - PyObject *ret = PyObject_CallMethod(in1, meth, NULL); - if (ret == NULL) { - return; - } - Py_XDECREF(*out); - *out = ret; - } -} - -/*UFUNC_API*/ -static void -PyUFunc_OO_O(char **args, intp *dimensions, intp *steps, void *func) -{ - binaryfunc f = (binaryfunc)func; - BINARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject *in2 = *(PyObject **)ip2; - PyObject **out = (PyObject **)op1; - PyObject *ret = f(in1, in2); - if (PyErr_Occurred()) { - return; - } - Py_XDECREF(*out); - *out = ret; - } -} - -/*UFUNC_API*/ -static void -PyUFunc_OO_O_method(char **args, intp *dimensions, intp *steps, void *func) -{ - char *meth = (char *)func; - BINARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject *in2 = *(PyObject **)ip2; - PyObject **out = (PyObject **)op1; - PyObject *ret = PyObject_CallMethod(in1, meth, "(O)", in2); - if (ret == NULL) { - return; - } - Py_XDECREF(*out); - *out = ret; - } -} - -/* - * A general-purpose ufunc that deals with general-purpose Python callable. - * func is a structure with nin, nout, and a Python callable function - */ - -/*UFUNC_API*/ -static void -PyUFunc_On_Om(char **args, intp *dimensions, intp *steps, void *func) -{ - intp n = dimensions[0]; - PyUFunc_PyFuncData *data = (PyUFunc_PyFuncData *)func; - int nin = data->nin; - int nout = data->nout; - PyObject *tocall = data->callable; - char *ptrs[NPY_MAXARGS]; - PyObject *arglist, *result; - PyObject *in, **op; - intp i, j, ntot; - - ntot = nin+nout; - - for(j = 0; j < ntot; j++) { - ptrs[j] = args[j]; - } - for(i = 0; i < n; i++) { - arglist = PyTuple_New(nin); - if (arglist == NULL) { - return; - } - for(j = 0; j < nin; j++) { - in = *((PyObject **)ptrs[j]); - if (in == NULL) { - Py_DECREF(arglist); - return; - } - PyTuple_SET_ITEM(arglist, j, in); - Py_INCREF(in); - } - result = PyEval_CallObject(tocall, arglist); - Py_DECREF(arglist); - if (result == NULL) { - return; - } - if PyTuple_Check(result) { - if (nout != PyTuple_Size(result)) { - Py_DECREF(result); - return; - } - for(j = 0; j < nout; j++) { - op = (PyObject **)ptrs[j+nin]; - Py_XDECREF(*op); - *op = PyTuple_GET_ITEM(result, j); - Py_INCREF(*op); - } - Py_DECREF(result); - } - else { - op = (PyObject **)ptrs[nin]; - Py_XDECREF(*op); - *op = result; - } - for(j = 0; j < ntot; j++) { - ptrs[j] += steps[j]; - } - } -} - - - /* ---------------------------------------------------------------- */ Modified: branches/visualstudio_manifest/numpy/core/src/umathmodule.c.src =================================================================== --- branches/visualstudio_manifest/numpy/core/src/umathmodule.c.src 2008-11-17 12:41:13 UTC (rev 6070) +++ branches/visualstudio_manifest/numpy/core/src/umathmodule.c.src 2008-11-17 12:49:26 UTC (rev 6071) @@ -606,7 +606,376 @@ intp i;\ for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1, op2 += os2) +/****************************************************************************** + ** GENERIC FLOAT LOOPS ** + *****************************************************************************/ + + +typedef float floatUnaryFunc(float x); +typedef double doubleUnaryFunc(double x); +typedef longdouble longdoubleUnaryFunc(longdouble x); +typedef float floatBinaryFunc(float x, float y); +typedef double doubleBinaryFunc(double x, double y); +typedef longdouble longdoubleBinaryFunc(longdouble x, longdouble y); + + +/*UFUNC_API*/ +static void +PyUFunc_f_f(char **args, intp *dimensions, intp *steps, void *func) +{ + floatUnaryFunc *f = (floatUnaryFunc *)func; + UNARY_LOOP { + const float in1 = *(float *)ip1; + *(float *)op1 = f(in1); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_f_f_As_d_d(char **args, intp *dimensions, intp *steps, void *func) +{ + doubleUnaryFunc *f = (doubleUnaryFunc *)func; + UNARY_LOOP { + const float in1 = *(float *)ip1; + *(float *)op1 = (float)f((double)in1); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_ff_f(char **args, intp *dimensions, intp *steps, void *func) +{ + floatBinaryFunc *f = (floatBinaryFunc *)func; + BINARY_LOOP { + float in1 = *(float *)ip1; + float in2 = *(float *)ip2; + *(float *)op1 = f(in1, in2); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_ff_f_As_dd_d(char **args, intp *dimensions, intp *steps, void *func) +{ + doubleBinaryFunc *f = (doubleBinaryFunc *)func; + BINARY_LOOP { + float in1 = *(float *)ip1; + float in2 = *(float *)ip2; + *(float *)op1 = (double)f((double)in1, (double)in2); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_d_d(char **args, intp *dimensions, intp *steps, void *func) +{ + doubleUnaryFunc *f = (doubleUnaryFunc *)func; + UNARY_LOOP { + double in1 = *(double *)ip1; + *(double *)op1 = f(in1); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_dd_d(char **args, intp *dimensions, intp *steps, void *func) +{ + doubleBinaryFunc *f = (doubleBinaryFunc *)func; + BINARY_LOOP { + double in1 = *(double *)ip1; + double in2 = *(double *)ip2; + *(double *)op1 = f(in1, in2); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_g_g(char **args, intp *dimensions, intp *steps, void *func) +{ + longdoubleUnaryFunc *f = (longdoubleUnaryFunc *)func; + UNARY_LOOP { + longdouble in1 = *(longdouble *)ip1; + *(longdouble *)op1 = f(in1); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_gg_g(char **args, intp *dimensions, intp *steps, void *func) +{ + longdoubleBinaryFunc *f = (longdoubleBinaryFunc *)func; + BINARY_LOOP { + longdouble in1 = *(longdouble *)ip1; + longdouble in2 = *(longdouble *)ip2; + *(longdouble *)op1 = f(in1, in2); + } +} + + + +/****************************************************************************** + ** GENERIC COMPLEX LOOPS ** + *****************************************************************************/ + + +typedef void cdoubleUnaryFunc(cdouble *x, cdouble *r); +typedef void cfloatUnaryFunc(cfloat *x, cfloat *r); +typedef void clongdoubleUnaryFunc(clongdouble *x, clongdouble *r); +typedef void cdoubleBinaryFunc(cdouble *x, cdouble *y, cdouble *r); +typedef void cfloatBinaryFunc(cfloat *x, cfloat *y, cfloat *r); +typedef void clongdoubleBinaryFunc(clongdouble *x, clongdouble *y, + clongdouble *r); + +/*UFUNC_API*/ +static void +PyUFunc_F_F(char **args, intp *dimensions, intp *steps, void *func) +{ + cfloatUnaryFunc *f = (cfloatUnaryFunc *)func; + UNARY_LOOP { + cfloat in1 = *(cfloat *)ip1; + cfloat *out = (cfloat *)op1; + f(&in1, out); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_F_F_As_D_D(char **args, intp *dimensions, intp *steps, void *func) +{ + cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; + UNARY_LOOP { + const float *in1 = (float *)ip1; + cdouble tmp = {(double)(in1[0]),(double)in1[1]}; + cdouble out; + f(&tmp, &out); + ((float *)op1)[0] = (float)out.real; + ((float *)op1)[1] = (float)out.imag; + } +} + +/*UFUNC_API*/ +static void +PyUFunc_FF_F(char **args, intp *dimensions, intp *steps, void *func) +{ + cfloatBinaryFunc *f = (cfloatBinaryFunc *)func; + BINARY_LOOP { + cfloat in1 = *(cfloat *)ip1; + cfloat in2 = *(cfloat *)ip2; + cfloat *out = (cfloat *)op1; + f(&in1, &in2, out); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_FF_F_As_DD_D(char **args, intp *dimensions, intp *steps, void *func) +{ + cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; + BINARY_LOOP { + const float *in1 = (float *)ip1; + const float *in2 = (float *)ip2; + cdouble tmp1 = {(double)(in1[0]),(double)in1[1]}; + cdouble tmp2 = {(double)(in2[0]),(double)in2[1]}; + cdouble out; + f(&tmp1, &tmp2, &out); + ((float *)op1)[0] = (float)out.real; + ((float *)op1)[1] = (float)out.imag; + } +} + +/*UFUNC_API*/ +static void +PyUFunc_D_D(char **args, intp *dimensions, intp *steps, void *func) +{ + cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; + UNARY_LOOP { + cdouble in1 = *(cdouble *)ip1; + cdouble *out = (cdouble *)op1; + f(&in1, out); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_DD_D(char **args, intp *dimensions, intp *steps, void *func) +{ + cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; + BINARY_LOOP { + cdouble in1 = *(cdouble *)ip1; + cdouble in2 = *(cdouble *)ip2; + cdouble *out = (cdouble *)op1; + f(&in1, &in2, out); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_G_G(char **args, intp *dimensions, intp *steps, void *func) +{ + clongdoubleUnaryFunc *f = (clongdoubleUnaryFunc *)func; + UNARY_LOOP { + clongdouble in1 = *(clongdouble *)ip1; + clongdouble *out = (clongdouble *)op1; + f(&in1, out); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_GG_G(char **args, intp *dimensions, intp *steps, void *func) +{ + clongdoubleBinaryFunc *f = (clongdoubleBinaryFunc *)func; + BINARY_LOOP { + clongdouble in1 = *(clongdouble *)ip1; + clongdouble in2 = *(clongdouble *)ip2; + clongdouble *out = (clongdouble *)op1; + f(&in1, &in2, out); + } +} + + +/****************************************************************************** + ** GENERIC OBJECT lOOPS ** + *****************************************************************************/ + +/*UFUNC_API*/ +static void +PyUFunc_O_O(char **args, intp *dimensions, intp *steps, void *func) +{ + unaryfunc f = (unaryfunc)func; + UNARY_LOOP { + PyObject *in1 = *(PyObject **)ip1; + PyObject **out = (PyObject **)op1; + PyObject *ret = f(in1); + if ((ret == NULL) || PyErr_Occurred()) { + return; + } + Py_XDECREF(*out); + *out = ret; + } +} + +/*UFUNC_API*/ +static void +PyUFunc_O_O_method(char **args, intp *dimensions, intp *steps, void *func) +{ + char *meth = (char *)func; + UNARY_LOOP { + PyObject *in1 = *(PyObject **)ip1; + PyObject **out = (PyObject **)op1; + PyObject *ret = PyObject_CallMethod(in1, meth, NULL); + if (ret == NULL) { + return; + } + Py_XDECREF(*out); + *out = ret; + } +} + +/*UFUNC_API*/ +static void +PyUFunc_OO_O(char **args, intp *dimensions, intp *steps, void *func) +{ + binaryfunc f = (binaryfunc)func; + BINARY_LOOP { + PyObject *in1 = *(PyObject **)ip1; + PyObject *in2 = *(PyObject **)ip2; + PyObject **out = (PyObject **)op1; + PyObject *ret = f(in1, in2); + if (PyErr_Occurred()) { + return; + } + Py_XDECREF(*out); + *out = ret; + } +} + +/*UFUNC_API*/ +static void +PyUFunc_OO_O_method(char **args, intp *dimensions, intp *steps, void *func) +{ + char *meth = (char *)func; + BINARY_LOOP { + PyObject *in1 = *(PyObject **)ip1; + PyObject *in2 = *(PyObject **)ip2; + PyObject **out = (PyObject **)op1; + PyObject *ret = PyObject_CallMethod(in1, meth, "(O)", in2); + if (ret == NULL) { + return; + } + Py_XDECREF(*out); + *out = ret; + } +} + /* + * A general-purpose ufunc that deals with general-purpose Python callable. + * func is a structure with nin, nout, and a Python callable function + */ + +/*UFUNC_API*/ +static void +PyUFunc_On_Om(char **args, intp *dimensions, intp *steps, void *func) +{ + intp n = dimensions[0]; + PyUFunc_PyFuncData *data = (PyUFunc_PyFuncData *)func; + int nin = data->nin; + int nout = data->nout; + PyObject *tocall = data->callable; + char *ptrs[NPY_MAXARGS]; + PyObject *arglist, *result; + PyObject *in, **op; + intp i, j, ntot; + + ntot = nin+nout; + + for(j = 0; j < ntot; j++) { + ptrs[j] = args[j]; + } + for(i = 0; i < n; i++) { + arglist = PyTuple_New(nin); + if (arglist == NULL) { + return; + } + for(j = 0; j < nin; j++) { + in = *((PyObject **)ptrs[j]); + if (in == NULL) { + Py_DECREF(arglist); + return; + } + PyTuple_SET_ITEM(arglist, j, in); + Py_INCREF(in); + } + result = PyEval_CallObject(tocall, arglist); + Py_DECREF(arglist); + if (result == NULL) { + return; + } + if PyTuple_Check(result) { + if (nout != PyTuple_Size(result)) { + Py_DECREF(result); + return; + } + for(j = 0; j < nout; j++) { + op = (PyObject **)ptrs[j+nin]; + Py_XDECREF(*op); + *op = PyTuple_GET_ITEM(result, j); + Py_INCREF(*op); + } + Py_DECREF(result); + } + else { + op = (PyObject **)ptrs[nin]; + Py_XDECREF(*op); + *op = result; + } + for(j = 0; j < ntot; j++) { + ptrs[j] += steps[j]; + } + } +} + +/* ***************************************************************************** ** BOOLEAN LOOPS ** ***************************************************************************** Modified: branches/visualstudio_manifest/numpy/ma/core.py =================================================================== --- branches/visualstudio_manifest/numpy/ma/core.py 2008-11-17 12:41:13 UTC (rev 6070) +++ branches/visualstudio_manifest/numpy/ma/core.py 2008-11-17 12:49:26 UTC (rev 6071) @@ -95,15 +95,9 @@ #####-------------------------------------------------------------------------- class MAError(Exception): "Class for MA related errors." - def __init__ (self, args=None): - "Creates an exception." - Exception.__init__(self, args) - self.args = args - def __str__(self): - "Calculates the string representation." - return str(self.args) - __repr__ = __str__ + pass + #####-------------------------------------------------------------------------- #---- --- Filling options --- #####-------------------------------------------------------------------------- @@ -4102,8 +4096,27 @@ #.............................................................................. def asarray(a, dtype=None): - """asarray(data, dtype) = array(data, dtype, copy=0, subok=0) - + """ + Convert the input to a masked array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Defaults to 'C'. + + Returns + ------- + out : ndarray + MaskedArray interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. Return a as a MaskedArray object of the given dtype. If dtype is not given or None, is is set to the dtype of a. No copy is performed if a is already an array. Modified: branches/visualstudio_manifest/numpy/random/mtrand/randomkit.c =================================================================== --- branches/visualstudio_manifest/numpy/random/mtrand/randomkit.c 2008-11-17 12:41:13 UTC (rev 6070) +++ branches/visualstudio_manifest/numpy/random/mtrand/randomkit.c 2008-11-17 12:49:26 UTC (rev 6071) @@ -75,6 +75,18 @@ #ifdef _WIN32 /* Windows */ +#ifdef NPY_NEEDS_MINGW_TIME_WORKAROUND +/* FIXME: ideally, we should set this to the real version of MSVCRT. We need + * something higher than 0x601 to enable _ftime64 and co */ +#define __MSVCRT_VERSION__ 0x0700 +/* mingw msvcr lib import wrongly export _ftime, which does not exist in the + * actual msvc runtime for version >= 8; we make it an alist to _ftime64, which + * is available in those versions of the runtime and should be ABI compatible + */ +#define _FTIME(x) _ftime64((x)) +#else +#define _FTIME(x) _ftime((x)) +#endif #include #include #ifndef RK_NO_WINCRYPT @@ -169,7 +181,7 @@ rk_seed(rk_hash(getpid()) ^ rk_hash(tv.tv_sec) ^ rk_hash(tv.tv_usec) ^ rk_hash(clock()), state); #else - _ftime(&tv); + _FTIME(&tv); rk_seed(rk_hash(tv.time) ^ rk_hash(tv.millitm) ^ rk_hash(clock()), state); #endif Modified: branches/visualstudio_manifest/numpy/random/setup.py =================================================================== --- branches/visualstudio_manifest/numpy/random/setup.py 2008-11-17 12:41:13 UTC (rev 6070) +++ branches/visualstudio_manifest/numpy/random/setup.py 2008-11-17 12:49:26 UTC (rev 6071) @@ -2,20 +2,17 @@ import os import sys from distutils.dep_util import newer -from numpy.distutils.misc_util import msvc_runtime_library +from distutils.msvccompiler import get_build_version as get_msvc_build_version -def msvc_version(): - """Return the msvc version used to build the running python, None if not - built with MSVC.""" - msc_pos = sys.version.find('MSC v.') - if msc_pos != -1: - return sys.version[msc_pos+6:msc_pos+10] - return None +def needs_mingw_ftime_workaround(config): + # We need the mingw workaround for _ftime if the msvc runtime version is + # 7.1 or above and we build with mingw + if config.compiler.compiler_type == 'mingw32': + msver = get_msvc_build_version() + if msver and msver > 7: + return True -def msvcrt_to_hex(msvc): - major = msvc / 10 - minor = msvc - major * 10 - return hex(major * 256 + minor) + return False def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration, get_mathlibs @@ -37,16 +34,9 @@ if not os.path.exists(dir): os.makedirs(dir) - msv = msvc_version() - if msv and msv >= 1400: - msvcrt = msvc_runtime_library() - if msvcrt is None: - raise ValueError("Discrepancy between " \ - "msvc_runtime_library " \ - "and our msvc detection scheme ?") - hmsvc = msvcrt_to_hex(int(msvcrt[5:])) + config_cmd = config.get_config_cmd() + if needs_mingw_ftime_workaround(config_cmd): defs.append("NPY_NEEDS_MINGW_TIME_WORKAROUND") - defs.append(("NPY_MSVCRT_VERSION", str(hmsvc))) if newer(__file__, target): target_f = open(target, 'a') From numpy-svn at scipy.org Mon Nov 17 08:52:55 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 07:52:55 -0600 (CST) Subject: [Numpy-svn] r6072 - trunk/numpy/random/mtrand Message-ID: <20081117135255.DAD2C39C088@scipy.org> Author: cdavid Date: 2008-11-17 07:52:47 -0600 (Mon, 17 Nov 2008) New Revision: 6072 Modified: trunk/numpy/random/mtrand/randomkit.c Log: Fix the inaccurate comment regarding _ftime issues with mingw. Modified: trunk/numpy/random/mtrand/randomkit.c =================================================================== --- trunk/numpy/random/mtrand/randomkit.c 2008-11-17 12:49:26 UTC (rev 6071) +++ trunk/numpy/random/mtrand/randomkit.c 2008-11-17 13:52:47 UTC (rev 6072) @@ -80,8 +80,8 @@ * something higher than 0x601 to enable _ftime64 and co */ #define __MSVCRT_VERSION__ 0x0700 /* mingw msvcr lib import wrongly export _ftime, which does not exist in the - * actual msvc runtime for version >= 8; we make it an alist to _ftime64, which - * is available in those versions of the runtime and should be ABI compatible + * actual msvc runtime for version >= 8; we make it an alias to _ftime64, which + * is available in those versions of the runtime */ #define _FTIME(x) _ftime64((x)) #else From numpy-svn at scipy.org Mon Nov 17 08:53:15 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 07:53:15 -0600 (CST) Subject: [Numpy-svn] r6073 - in trunk/numpy/random: . mtrand Message-ID: <20081117135315.7E09B39C088@scipy.org> Author: cdavid Date: 2008-11-17 07:53:05 -0600 (Mon, 17 Nov 2008) New Revision: 6073 Modified: trunk/numpy/random/mtrand/randomkit.c trunk/numpy/random/setup.py Log: Do not generate a config.h for randomkit: it does not work as it is, and adding per-subpackage include path is a PITA with distutils. Modified: trunk/numpy/random/mtrand/randomkit.c =================================================================== --- trunk/numpy/random/mtrand/randomkit.c 2008-11-17 13:52:47 UTC (rev 6072) +++ trunk/numpy/random/mtrand/randomkit.c 2008-11-17 13:53:05 UTC (rev 6073) @@ -64,8 +64,6 @@ /* static char const rcsid[] = "@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */ -#include "config.h" - #include #include #include Modified: trunk/numpy/random/setup.py =================================================================== --- trunk/numpy/random/setup.py 2008-11-17 13:52:47 UTC (rev 6072) +++ trunk/numpy/random/setup.py 2008-11-17 13:53:05 UTC (rev 6073) @@ -6,11 +6,12 @@ def needs_mingw_ftime_workaround(config): # We need the mingw workaround for _ftime if the msvc runtime version is - # 7.1 or above and we build with mingw - if config.compiler.compiler_type == 'mingw32': - msver = get_msvc_build_version() - if msver and msver > 7: - return True + # 7.1 or above and we build with mingw ... + # ... but we can't easily detect compiler version outside distutils command + # context, so we will need to detect in randomkit whether we build with gcc + msver = get_msvc_build_version() + if msver and msver > 7: + return True return False @@ -27,36 +28,22 @@ ext.libraries.extend(libs) return None - def generate_config_h(ext, build_dir): - defs = [] - target = join(build_dir, "mtrand", 'config.h') - dir = dirname(target) - if not os.path.exists(dir): - os.makedirs(dir) + defs = [] + if needs_mingw_ftime_workaround(): + defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None)) - config_cmd = config.get_config_cmd() - if needs_mingw_ftime_workaround(config_cmd): - defs.append("NPY_NEEDS_MINGW_TIME_WORKAROUND") - - if newer(__file__, target): - target_f = open(target, 'a') - for d in defs: - if isinstance(d, str): - target_f.write('#define %s\n' % (d)) - target_f.close() - libs = [] # Configure mtrand config.add_extension('mtrand', sources=[join('mtrand', x) for x in ['mtrand.c', 'randomkit.c', 'initarray.c', - 'distributions.c']]+[generate_libraries] - + [generate_config_h], + 'distributions.c']]+[generate_libraries], libraries=libs, depends = [join('mtrand','*.h'), join('mtrand','*.pyx'), join('mtrand','*.pxi'), - ] + ], + define_macros = defs, ) config.add_data_files(('.', join('mtrand', 'randomkit.h'))) From numpy-svn at scipy.org Mon Nov 17 08:53:33 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 07:53:33 -0600 (CST) Subject: [Numpy-svn] r6074 - trunk/numpy/random/mtrand Message-ID: <20081117135333.B703539C088@scipy.org> Author: cdavid Date: 2008-11-17 07:53:25 -0600 (Mon, 17 Nov 2008) New Revision: 6074 Modified: trunk/numpy/random/mtrand/randomkit.c Log: Conditionally setup mingw workaround on __GNUC__ since we can't detect if we are built with mingw in distutils setup.py. Modified: trunk/numpy/random/mtrand/randomkit.c =================================================================== --- trunk/numpy/random/mtrand/randomkit.c 2008-11-17 13:53:05 UTC (rev 6073) +++ trunk/numpy/random/mtrand/randomkit.c 2008-11-17 13:53:25 UTC (rev 6074) @@ -73,7 +73,9 @@ #ifdef _WIN32 /* Windows */ -#ifdef NPY_NEEDS_MINGW_TIME_WORKAROUND +/* XXX: we have to use this ugly defined(__GNUC__) because it is not easy to + * detect the compiler used in distutils itself */ +#if (defined(__GNUC__) && defined(NPY_NEEDS_MINGW_TIME_WORKAROUND)) /* FIXME: ideally, we should set this to the real version of MSVCRT. We need * something higher than 0x601 to enable _ftime64 and co */ #define __MSVCRT_VERSION__ 0x0700 From numpy-svn at scipy.org Mon Nov 17 08:53:48 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 07:53:48 -0600 (CST) Subject: [Numpy-svn] r6075 - trunk/numpy/random Message-ID: <20081117135348.127F239C088@scipy.org> Author: cdavid Date: 2008-11-17 07:53:42 -0600 (Mon, 17 Nov 2008) New Revision: 6075 Modified: trunk/numpy/random/setup.py Log: Forgot to update needs_mingw_ftime_workaround function. Modified: trunk/numpy/random/setup.py =================================================================== --- trunk/numpy/random/setup.py 2008-11-17 13:53:25 UTC (rev 6074) +++ trunk/numpy/random/setup.py 2008-11-17 13:53:42 UTC (rev 6075) @@ -4,7 +4,7 @@ from distutils.dep_util import newer from distutils.msvccompiler import get_build_version as get_msvc_build_version -def needs_mingw_ftime_workaround(config): +def needs_mingw_ftime_workaround(): # We need the mingw workaround for _ftime if the msvc runtime version is # 7.1 or above and we build with mingw ... # ... but we can't easily detect compiler version outside distutils command From numpy-svn at scipy.org Mon Nov 17 08:54:06 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 07:54:06 -0600 (CST) Subject: [Numpy-svn] r6076 - trunk/numpy/random/mtrand Message-ID: <20081117135406.8420139C088@scipy.org> Author: cdavid Date: 2008-11-17 07:53:58 -0600 (Mon, 17 Nov 2008) New Revision: 6076 Modified: trunk/numpy/random/mtrand/randomkit.c Log: Include time.h and sys/timeb.h just after defining our custom __MSVCRT_VERSION__ to avoid possible duplicate. Modified: trunk/numpy/random/mtrand/randomkit.c =================================================================== --- trunk/numpy/random/mtrand/randomkit.c 2008-11-17 13:53:42 UTC (rev 6075) +++ trunk/numpy/random/mtrand/randomkit.c 2008-11-17 13:53:58 UTC (rev 6076) @@ -79,6 +79,8 @@ /* FIXME: ideally, we should set this to the real version of MSVCRT. We need * something higher than 0x601 to enable _ftime64 and co */ #define __MSVCRT_VERSION__ 0x0700 +#include +#include /* mingw msvcr lib import wrongly export _ftime, which does not exist in the * actual msvc runtime for version >= 8; we make it an alias to _ftime64, which * is available in those versions of the runtime @@ -87,8 +89,6 @@ #else #define _FTIME(x) _ftime((x)) #endif -#include -#include #ifndef RK_NO_WINCRYPT /* Windows crypto */ #ifndef _WIN32_WINNT From numpy-svn at scipy.org Mon Nov 17 08:58:31 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 07:58:31 -0600 (CST) Subject: [Numpy-svn] r6077 - in branches/visualstudio_manifest: . numpy/random numpy/random/mtrand Message-ID: <20081117135831.154EC39C088@scipy.org> Author: cdavid Date: 2008-11-17 07:58:23 -0600 (Mon, 17 Nov 2008) New Revision: 6077 Modified: branches/visualstudio_manifest/ branches/visualstudio_manifest/numpy/random/mtrand/randomkit.c branches/visualstudio_manifest/numpy/random/setup.py Log: Merged revisions 6071-6076 via svnmerge from http://svn.scipy.org/svn/numpy/trunk ........ r6072 | cdavid | 2008-11-17 22:52:47 +0900 (Mon, 17 Nov 2008) | 1 line Fix the inaccurate comment regarding _ftime issues with mingw. ........ r6073 | cdavid | 2008-11-17 22:53:05 +0900 (Mon, 17 Nov 2008) | 1 line Do not generate a config.h for randomkit: it does not work as it is, and adding per-subpackage include path is a PITA with distutils. ........ r6074 | cdavid | 2008-11-17 22:53:25 +0900 (Mon, 17 Nov 2008) | 1 line Conditionally setup mingw workaround on __GNUC__ since we can't detect if we are built with mingw in distutils setup.py. ........ r6075 | cdavid | 2008-11-17 22:53:42 +0900 (Mon, 17 Nov 2008) | 1 line Forgot to update needs_mingw_ftime_workaround function. ........ r6076 | cdavid | 2008-11-17 22:53:58 +0900 (Mon, 17 Nov 2008) | 1 line Include time.h and sys/timeb.h just after defining our custom __MSVCRT_VERSION__ to avoid possible duplicate. ........ Property changes on: branches/visualstudio_manifest ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /trunk:1-6070 + /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /trunk:1-6076 Modified: branches/visualstudio_manifest/numpy/random/mtrand/randomkit.c =================================================================== --- branches/visualstudio_manifest/numpy/random/mtrand/randomkit.c 2008-11-17 13:53:58 UTC (rev 6076) +++ branches/visualstudio_manifest/numpy/random/mtrand/randomkit.c 2008-11-17 13:58:23 UTC (rev 6077) @@ -64,8 +64,6 @@ /* static char const rcsid[] = "@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */ -#include "config.h" - #include #include #include @@ -75,20 +73,22 @@ #ifdef _WIN32 /* Windows */ -#ifdef NPY_NEEDS_MINGW_TIME_WORKAROUND +/* XXX: we have to use this ugly defined(__GNUC__) because it is not easy to + * detect the compiler used in distutils itself */ +#if (defined(__GNUC__) && defined(NPY_NEEDS_MINGW_TIME_WORKAROUND)) /* FIXME: ideally, we should set this to the real version of MSVCRT. We need * something higher than 0x601 to enable _ftime64 and co */ #define __MSVCRT_VERSION__ 0x0700 +#include +#include /* mingw msvcr lib import wrongly export _ftime, which does not exist in the - * actual msvc runtime for version >= 8; we make it an alist to _ftime64, which - * is available in those versions of the runtime and should be ABI compatible + * actual msvc runtime for version >= 8; we make it an alias to _ftime64, which + * is available in those versions of the runtime */ #define _FTIME(x) _ftime64((x)) #else #define _FTIME(x) _ftime((x)) #endif -#include -#include #ifndef RK_NO_WINCRYPT /* Windows crypto */ #ifndef _WIN32_WINNT Modified: branches/visualstudio_manifest/numpy/random/setup.py =================================================================== --- branches/visualstudio_manifest/numpy/random/setup.py 2008-11-17 13:53:58 UTC (rev 6076) +++ branches/visualstudio_manifest/numpy/random/setup.py 2008-11-17 13:58:23 UTC (rev 6077) @@ -4,13 +4,14 @@ from distutils.dep_util import newer from distutils.msvccompiler import get_build_version as get_msvc_build_version -def needs_mingw_ftime_workaround(config): +def needs_mingw_ftime_workaround(): # We need the mingw workaround for _ftime if the msvc runtime version is - # 7.1 or above and we build with mingw - if config.compiler.compiler_type == 'mingw32': - msver = get_msvc_build_version() - if msver and msver > 7: - return True + # 7.1 or above and we build with mingw ... + # ... but we can't easily detect compiler version outside distutils command + # context, so we will need to detect in randomkit whether we build with gcc + msver = get_msvc_build_version() + if msver and msver > 7: + return True return False @@ -27,36 +28,22 @@ ext.libraries.extend(libs) return None - def generate_config_h(ext, build_dir): - defs = [] - target = join(build_dir, "mtrand", 'config.h') - dir = dirname(target) - if not os.path.exists(dir): - os.makedirs(dir) + defs = [] + if needs_mingw_ftime_workaround(): + defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None)) - config_cmd = config.get_config_cmd() - if needs_mingw_ftime_workaround(config_cmd): - defs.append("NPY_NEEDS_MINGW_TIME_WORKAROUND") - - if newer(__file__, target): - target_f = open(target, 'a') - for d in defs: - if isinstance(d, str): - target_f.write('#define %s\n' % (d)) - target_f.close() - libs = [] # Configure mtrand config.add_extension('mtrand', sources=[join('mtrand', x) for x in ['mtrand.c', 'randomkit.c', 'initarray.c', - 'distributions.c']]+[generate_libraries] - + [generate_config_h], + 'distributions.c']]+[generate_libraries], libraries=libs, depends = [join('mtrand','*.h'), join('mtrand','*.pyx'), join('mtrand','*.pxi'), - ] + ], + define_macros = defs, ) config.add_data_files(('.', join('mtrand', 'randomkit.h'))) From numpy-svn at scipy.org Mon Nov 17 09:09:50 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 08:09:50 -0600 (CST) Subject: [Numpy-svn] r6078 - in trunk: . numpy/distutils numpy/distutils/command Message-ID: <20081117140950.08D9039C088@scipy.org> Author: cdavid Date: 2008-11-17 08:09:39 -0600 (Mon, 17 Nov 2008) New Revision: 6078 Modified: trunk/ trunk/numpy/distutils/command/config.py trunk/numpy/distutils/mingw32ccompiler.py Log: Merged revisions 6041-6077 via svnmerge from http://svn.scipy.org/svn/numpy/branches/visualstudio_manifest ................ r6042 | cdavid | 2008-11-16 18:04:37 +0900 (Sun, 16 Nov 2008) | 3 lines Initialized merge tracking via "svnmerge" with revisions "1-6041" from http://svn.scipy.org/svn/numpy/trunk ................ r6045 | cdavid | 2008-11-16 20:44:02 +0900 (Sun, 16 Nov 2008) | 1 line Add a function to get the content of the xml version of manifest to deal with VS. ................ r6046 | cdavid | 2008-11-16 20:44:18 +0900 (Sun, 16 Nov 2008) | 1 line Add comment on how to get exact MSVCR version. ................ r6047 | cdavid | 2008-11-16 20:44:33 +0900 (Sun, 16 Nov 2008) | 1 line Add manifest_rc to generate the .rc file which will be used to embed the manifest file. ................ r6048 | cdavid | 2008-11-16 20:44:54 +0900 (Sun, 16 Nov 2008) | 1 line Use directly the manifest name for generating the rc file + add docstring. ................ r6049 | cdavid | 2008-11-16 20:45:09 +0900 (Sun, 16 Nov 2008) | 1 line Forgot to change argument of manifest_rc. ................ r6051 | cdavid | 2008-11-16 22:02:09 +0900 (Sun, 16 Nov 2008) | 17 lines Merged revisions 6042-6050 via svnmerge from http://svn.scipy.org/svn/numpy/trunk ........ r6043 | jarrod.millman | 2008-11-16 18:14:01 +0900 (Sun, 16 Nov 2008) | 2 lines removing some quotes ........ r6044 | jarrod.millman | 2008-11-16 18:15:59 +0900 (Sun, 16 Nov 2008) | 2 lines adding back a few quotes ........ r6050 | cdavid | 2008-11-16 21:30:17 +0900 (Sun, 16 Nov 2008) | 1 line Add deprecation warning for get_output and try_run: we should not use it anymore. ........ ................ r6052 | cdavid | 2008-11-16 22:04:59 +0900 (Sun, 16 Nov 2008) | 1 line Trailing spaces. ................ r6056 | cdavid | 2008-11-17 21:03:57 +0900 (Mon, 17 Nov 2008) | 1 line Detect whether config link needs embedding the manifest for the MSVC runtime. ................ r6057 | cdavid | 2008-11-17 21:04:27 +0900 (Mon, 17 Nov 2008) | 1 line Refactor msvc runtime checking, put it into mingw32compiler ................ r6058 | cdavid | 2008-11-17 21:05:01 +0900 (Mon, 17 Nov 2008) | 1 line Fix string formatting. ................ r6059 | cdavid | 2008-11-17 21:05:29 +0900 (Mon, 17 Nov 2008) | 1 line fix imports. ................ r6060 | cdavid | 2008-11-17 21:05:56 +0900 (Mon, 17 Nov 2008) | 1 line Generate the xml manifest file. ................ r6061 | cdavid | 2008-11-17 21:06:26 +0900 (Mon, 17 Nov 2008) | 1 line Separate function to get the configtest name. ................ r6062 | cdavid | 2008-11-17 21:06:58 +0900 (Mon, 17 Nov 2008) | 1 line Generate the rc file for manifest embedding. ................ r6063 | cdavid | 2008-11-17 21:07:36 +0900 (Mon, 17 Nov 2008) | 1 line Fix configtest and manifest_name. ................ r6064 | cdavid | 2008-11-17 21:08:08 +0900 (Mon, 17 Nov 2008) | 1 line temp_files is a list of filenames, not files. ................ r6065 | cdavid | 2008-11-17 21:08:38 +0900 (Mon, 17 Nov 2008) | 1 line Disable .rc generation for manifest: having the xml file in the same dir as the _configtest.exe is enough for now. ................ r6066 | cdavid | 2008-11-17 21:09:08 +0900 (Mon, 17 Nov 2008) | 1 line Refactor manifest generation and put it back into mingw32ccompiler module. ................ r6067 | cdavid | 2008-11-17 21:32:46 +0900 (Mon, 17 Nov 2008) | 1 line Fix spelling. ................ r6071 | cdavid | 2008-11-17 21:49:26 +0900 (Mon, 17 Nov 2008) | 30 lines Merged revisions 6051-6070 via svnmerge from http://svn.scipy.org/svn/numpy/trunk ........ r6053 | charris | 2008-11-17 12:27:28 +0900 (Mon, 17 Nov 2008) | 1 line Test moving generic loops to umathmodule. ........ r6054 | charris | 2008-11-17 12:27:46 +0900 (Mon, 17 Nov 2008) | 2 lines Add umathmodule.c.src to files scanned for ufunc api. This is preparation for splitting the umathmodule.c.src file. ........ r6055 | pierregm | 2008-11-17 16:00:42 +0900 (Mon, 17 Nov 2008) | 1 line simplify MAError ........ r6068 | cdavid | 2008-11-17 21:35:43 +0900 (Mon, 17 Nov 2008) | 1 line Simplify ftime workaround for python 2.6 under mingw: we only need to make _ftime an alias to _ftime64 when needed. ........ r6069 | cdavid | 2008-11-17 21:36:17 +0900 (Mon, 17 Nov 2008) | 1 line Use a wrapper around _ftime to work around a mingw bug in msvc runtimes import libraries. ........ r6070 | cdavid | 2008-11-17 21:41:13 +0900 (Mon, 17 Nov 2008) | 1 line Set __MSVCRT_VERSION__ to make _ftime64 visible from time.h header when we need to workaround mingw bug. ........ ................ r6077 | cdavid | 2008-11-17 22:58:23 +0900 (Mon, 17 Nov 2008) | 25 lines Merged revisions 6071-6076 via svnmerge from http://svn.scipy.org/svn/numpy/trunk ........ r6072 | cdavid | 2008-11-17 22:52:47 +0900 (Mon, 17 Nov 2008) | 1 line Fix the inaccurate comment regarding _ftime issues with mingw. ........ r6073 | cdavid | 2008-11-17 22:53:05 +0900 (Mon, 17 Nov 2008) | 1 line Do not generate a config.h for randomkit: it does not work as it is, and adding per-subpackage include path is a PITA with distutils. ........ r6074 | cdavid | 2008-11-17 22:53:25 +0900 (Mon, 17 Nov 2008) | 1 line Conditionally setup mingw workaround on __GNUC__ since we can't detect if we are built with mingw in distutils setup.py. ........ r6075 | cdavid | 2008-11-17 22:53:42 +0900 (Mon, 17 Nov 2008) | 1 line Forgot to update needs_mingw_ftime_workaround function. ........ r6076 | cdavid | 2008-11-17 22:53:58 +0900 (Mon, 17 Nov 2008) | 1 line Include time.h and sys/timeb.h just after defining our custom __MSVCRT_VERSION__ to avoid possible duplicate. ........ ................ Property changes on: trunk ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /branches/visualstudio_manifest:1-6040 /trunk:1-2871 + /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /branches/visualstudio_manifest:1-6077 /trunk:1-2871 Modified: trunk/numpy/distutils/command/config.py =================================================================== --- trunk/numpy/distutils/command/config.py 2008-11-17 13:58:23 UTC (rev 6077) +++ trunk/numpy/distutils/command/config.py 2008-11-17 14:09:39 UTC (rev 6078) @@ -11,6 +11,7 @@ from distutils import log from distutils.file_util import copy_file from numpy.distutils.exec_command import exec_command +from numpy.distutils.mingw32ccompiler import generate_manifest LANG_EXT['f77'] = '.f' LANG_EXT['f90'] = '.f90' @@ -110,6 +111,8 @@ if fileexists: continue log.warn('could not find library %r in directories %s' \ % (libname, library_dirs)) + elif self.compiler.compiler_type == 'mingw32': + generate_manifest(self) return self._wrap_method(old_config._link,lang, (body, headers, include_dirs, libraries, library_dirs, lang)) Modified: trunk/numpy/distutils/mingw32ccompiler.py =================================================================== --- trunk/numpy/distutils/mingw32ccompiler.py 2008-11-17 13:58:23 UTC (rev 6077) +++ trunk/numpy/distutils/mingw32ccompiler.py 2008-11-17 14:09:39 UTC (rev 6078) @@ -27,6 +27,7 @@ from distutils.errors import DistutilsExecError, CompileError, UnknownFileError from distutils.unixccompiler import UnixCCompiler +from distutils.msvccompiler import get_build_version as get_build_msvc_version from numpy.distutils.misc_util import msvc_runtime_library # the same as cygwin plus some additional parameters @@ -225,3 +226,119 @@ # msg = "Couldn't find import library, and failed to build it." # raise DistutilsPlatformError, msg return + +# Functions to deal with visual studio manifests. Manifest are a mechanism to +# enforce strong DLL versioning on windows, and has nothing to do with +# distutils MANIFEST. manifests are XML files with version info, and used by +# the OS loader; they are necessary when linking against a DLL no in the system +# path; in particular, python 2.6 is built against the MS runtime 9 (the one +# from VS 2008), which is not available on most windows systems; python 2.6 +# installer does install it in the Win SxS (Side by side) directory, but this +# requires the manifest too. This is a big mess, thanks MS for a wonderful +# system. + +# XXX: ideally, we should use exactly the same version as used by python, but I +# have no idea how to obtain the exact version from python. We could use the +# strings utility on python.exe, maybe ? +_MSVCRVER_TO_FULLVER = {'90': "9.0.21022.8"} + +def msvc_manifest_xml(maj, min): + """Given a major and minor version of the MSVCR, returns the + corresponding XML file.""" + try: + fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] + except KeyError: + raise ValueError("Version %d,%d of MSVCRT not supported yet" \ + % (maj, min)) + # Don't be fooled, it looks like an XML, but it is not. In particular, it + # should not have any space before starting, and its size should be + # divisible by 4, most likely for alignement constraints when the xml is + # embedded in the binary... + # This template was copied directly from the python 2.6 binary (using + # strings.exe from mingw on python.exe). + template = """\ + + + + + + + + + + + + + +""" + + return template % {'fullver': fullver, 'maj': maj, 'min': min} + +def manifest_rc(name, type='dll'): + """Return the rc file used to generate the res file which will be embedded + as manifest for given manifest file name, of given type ('dll' or + 'exe'). + + Parameters + ---------- name: str + name of the manifest file to embed + type: str ('dll', 'exe') + type of the binary which will embed the manifest""" + if type == 'dll': + rctype = 2 + elif type == 'exe': + rctype = 1 + else: + raise ValueError("Type %s not supported" % type) + + return """\ +#include "winuser.h" +%d RT_MANIFEST %s""" % (rctype, name) + +def check_embedded_msvcr_match_linked(msver): + """msver is the ms runtime version used for the MANIFEST.""" + # check msvcr major version are the same for linking and + # embedding + msvcv = msvc_runtime_library() + if msvcv: + maj = int(msvcv[5:6]) + if not maj == int(msver): + raise ValueError, \ + "Discrepancy between linked msvcr " \ + "(%d) and the one about to be embedded " \ + "(%d)" % (int(msver), maj) + +def configtest_name(config): + base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) + return os.path.splitext(base)[0] + +def manifest_name(config): + # Get configest name (including suffix) + root = configtest_name(config) + exext = config.compiler.exe_extension + return root + exext + ".manifest" + +def rc_name(config): + # Get configest name (including suffix) + root = configtest_name(config) + return root + ".rc" + +def generate_manifest(config): + msver = get_build_msvc_version() + if msver is not None: + if msver >= 8: + check_embedded_msvcr_match_linked(msver) + ma = int(msver) + mi = int((msver - ma) * 10) + # Write the manifest file + manxml = msvc_manifest_xml(ma, mi) + man = open(manifest_name(config), "w") + config.temp_files.append(manifest_name(config)) + man.write(manxml) + man.close() + # # Write the rc file + # manrc = manifest_rc(manifest_name(self), "exe") + # rc = open(rc_name(self), "w") + # self.temp_files.append(manrc) + # rc.write(manrc) + # rc.close() From numpy-svn at scipy.org Mon Nov 17 09:19:32 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 08:19:32 -0600 (CST) Subject: [Numpy-svn] r6079 - trunk/numpy/random Message-ID: <20081117141932.0C83639C088@scipy.org> Author: cdavid Date: 2008-11-17 08:19:19 -0600 (Mon, 17 Nov 2008) New Revision: 6079 Modified: trunk/numpy/random/setup.py Log: Do not use mingw workaround for msvc runtime 7.1. Modified: trunk/numpy/random/setup.py =================================================================== --- trunk/numpy/random/setup.py 2008-11-17 14:09:39 UTC (rev 6078) +++ trunk/numpy/random/setup.py 2008-11-17 14:19:19 UTC (rev 6079) @@ -10,7 +10,7 @@ # ... but we can't easily detect compiler version outside distutils command # context, so we will need to detect in randomkit whether we build with gcc msver = get_msvc_build_version() - if msver and msver > 7: + if msver and msver >= 8: return True return False From numpy-svn at scipy.org Mon Nov 17 09:19:58 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 17 Nov 2008 08:19:58 -0600 (CST) Subject: [Numpy-svn] r6080 - trunk/numpy/random/mtrand Message-ID: <20081117141958.3CD7B39C088@scipy.org> Author: cdavid Date: 2008-11-17 08:19:54 -0600 (Mon, 17 Nov 2008) New Revision: 6080 Modified: trunk/numpy/random/mtrand/randomkit.c Log: Forgot to add time headers when mingw workaround is not used on windows. Modified: trunk/numpy/random/mtrand/randomkit.c =================================================================== --- trunk/numpy/random/mtrand/randomkit.c 2008-11-17 14:19:19 UTC (rev 6079) +++ trunk/numpy/random/mtrand/randomkit.c 2008-11-17 14:19:54 UTC (rev 6080) @@ -87,6 +87,8 @@ */ #define _FTIME(x) _ftime64((x)) #else +#include +#include #define _FTIME(x) _ftime((x)) #endif #ifndef RK_NO_WINCRYPT From numpy-svn at scipy.org Wed Nov 19 07:30:30 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 19 Nov 2008 06:30:30 -0600 (CST) Subject: [Numpy-svn] r6081 - trunk/numpy/random Message-ID: <20081119123030.2C25B39C05F@scipy.org> Author: cdavid Date: 2008-11-19 06:30:26 -0600 (Wed, 19 Nov 2008) New Revision: 6081 Modified: trunk/numpy/random/SConscript Log: Handle ftime mingw problem with numscons. Modified: trunk/numpy/random/SConscript =================================================================== --- trunk/numpy/random/SConscript 2008-11-17 14:19:54 UTC (rev 6080) +++ trunk/numpy/random/SConscript 2008-11-19 12:30:26 UTC (rev 6081) @@ -1,9 +1,11 @@ -# Last Change: Thu Jun 12 06:00 PM 2008 J +# Last Change: Wed Nov 19 09:00 PM 2008 J # vim:syntax=python import os from numscons import GetNumpyEnvironment, scons_get_mathlib +from setup import needs_mingw_ftime_workaround + def CheckWincrypt(context): from copy import deepcopy src = """\ @@ -37,6 +39,9 @@ if config.CheckWincrypt: config.env.AppendUnique(LIBS = 'Advapi32') +if needs_mingw_ftime_workaround(): + env.Append(CPPDEFINES=['NPY_NEEDS_MINGW_TIME_WORKAROUND']) + sources = [os.path.join('mtrand', x) for x in ['mtrand.c', 'randomkit.c', 'initarray.c', 'distributions.c']] From numpy-svn at scipy.org Wed Nov 19 07:30:58 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 19 Nov 2008 06:30:58 -0600 (CST) Subject: [Numpy-svn] r6082 - trunk/numpy/random Message-ID: <20081119123058.906E439C05F@scipy.org> Author: cdavid Date: 2008-11-19 06:30:55 -0600 (Wed, 19 Nov 2008) New Revision: 6082 Modified: trunk/numpy/random/SConscript Log: Forgot to finish the configure context in numscons build of random. Modified: trunk/numpy/random/SConscript =================================================================== --- trunk/numpy/random/SConscript 2008-11-19 12:30:26 UTC (rev 6081) +++ trunk/numpy/random/SConscript 2008-11-19 12:30:55 UTC (rev 6082) @@ -38,6 +38,7 @@ config = env.NumpyConfigure(custom_tests = {'CheckWincrypt' : CheckWincrypt}) if config.CheckWincrypt: config.env.AppendUnique(LIBS = 'Advapi32') + config.Finish() if needs_mingw_ftime_workaround(): env.Append(CPPDEFINES=['NPY_NEEDS_MINGW_TIME_WORKAROUND']) From numpy-svn at scipy.org Wed Nov 19 10:00:56 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 19 Nov 2008 09:00:56 -0600 (CST) Subject: [Numpy-svn] r6083 - trunk/numpy/ma Message-ID: <20081119150056.1115039C088@scipy.org> Author: pierregm Date: 2008-11-19 09:00:37 -0600 (Wed, 19 Nov 2008) New Revision: 6083 Modified: trunk/numpy/ma/core.py Log: make_mask : Added a dtype keyword to support flexible-dtype mask_or : Added support to flexible-dtype. Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-11-19 12:30:55 UTC (rev 6082) +++ trunk/numpy/ma/core.py 2008-11-19 15:00:37 UTC (rev 6083) @@ -13,13 +13,14 @@ Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) :author: Pierre Gerard-Marchant -:contact: pierregm_at_uga_dot_edu + + """ __author__ = "Pierre GF Gerard-Marchant" __docformat__ = "restructuredtext en" __all__ = ['MAError', 'MaskType', 'MaskedArray', - 'bool_', 'complex_', 'float_', 'int_', 'object_', + 'bool_', 'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin', 'anom', 'anomalies', 'any', 'arange', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', @@ -38,8 +39,8 @@ 'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log10', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', - 'make_mask', 'make_mask_descr', 'make_mask_none', 'mask_or', 'masked', - 'masked_array', 'masked_equal', 'masked_greater', + 'make_mask', 'make_mask_descr', 'make_mask_none', 'mask_or', + 'masked', 'masked_array', 'masked_equal', 'masked_greater', 'masked_greater_equal', 'masked_inside', 'masked_invalid', 'masked_less','masked_less_equal', 'masked_not_equal', 'masked_object','masked_outside', 'masked_print_option', @@ -61,8 +62,7 @@ import operator import numpy as np -from numpy import ndarray, amax, amin, iscomplexobj, bool_, complex_, float_,\ - int_, object_ +from numpy import ndarray, amax, amin, iscomplexobj, bool_ from numpy import array as narray import numpy.core.umath as umath @@ -79,8 +79,13 @@ def doc_note(initialdoc, note): + """ + Adds a Notes section to an existing docstring. + """ if initialdoc is None: return + if note is None: + return initialdoc newdoc = """ %s @@ -121,7 +126,8 @@ def default_fill_value(obj): - """Calculate the default fill value for the argument object. + """ + Calculate the default fill value for the argument object. """ if hasattr(obj,'dtype'): @@ -144,15 +150,16 @@ return defval def minimum_fill_value(obj): - """Calculate the default fill value suitable for taking the - minimum of ``obj``. + """ + Calculate the default fill value suitable for taking the minimum of ``obj``. """ + errmsg = "Unsuitable type for calculating minimum." if hasattr(obj, 'dtype'): objtype = obj.dtype filler = min_filler[objtype] if filler is None: - raise TypeError, 'Unsuitable type for calculating minimum.' + raise TypeError(errmsg) return filler elif isinstance(obj, float): return min_filler[ntypes.typeDict['float_']] @@ -163,18 +170,19 @@ elif isinstance(obj, np.dtype): return min_filler[obj] else: - raise TypeError, 'Unsuitable type for calculating minimum.' + raise TypeError(errmsg) def maximum_fill_value(obj): - """Calculate the default fill value suitable for taking the maximum - of ``obj``. + """ + Calculate the default fill value suitable for taking the maximum of ``obj``. """ + errmsg = "Unsuitable type for calculating maximum." if hasattr(obj, 'dtype'): objtype = obj.dtype filler = max_filler[objtype] if filler is None: - raise TypeError, 'Unsuitable type for calculating minimum.' + raise TypeError(errmsg) return filler elif isinstance(obj, float): return max_filler[ntypes.typeDict['float_']] @@ -185,10 +193,21 @@ elif isinstance(obj, np.dtype): return max_filler[obj] else: - raise TypeError, 'Unsuitable type for calculating minimum.' + raise TypeError(errmsg) def _check_fill_value(fill_value, ndtype): + """ + Private function validating the given `fill_value` for the given dtype. + + If fill_value is None, it is set to the default corresponding to the dtype + if this latter is standard (no fields). If the datatype is flexible (named + fields), fill_value is set to a tuple whose elements are the default fill + values corresponding to each field. + + If fill_value is not None, its value is forced to the given dtype. + + """ ndtype = np.dtype(ndtype) fields = ndtype.fields if fill_value is None: @@ -206,7 +225,7 @@ fill_value = np.array(fill_value, copy=False, dtype=fdtype) except ValueError: err_msg = "Unable to transform %s to dtype %s" - raise ValueError(err_msg % (fill_value,fdtype)) + raise ValueError(err_msg % (fill_value, fdtype)) else: fval = np.resize(fill_value, len(ndtype.descr)) fill_value = [np.asarray(f).astype(desc[1]).item() @@ -225,9 +244,18 @@ def set_fill_value(a, fill_value): - """Set the filling value of a, if a is a masked array. Otherwise, + """ + Set the filling value of a, if a is a masked array. Otherwise, do nothing. + Parameters + ---------- + a : ndarray + Input array + fill_value : var + Filling value. A consistency test is performed to make sure + the value is compatible with the dtype of a. + Returns ------- None @@ -238,7 +266,8 @@ return def get_fill_value(a): - """Return the filling value of a, if any. Otherwise, returns the + """ + Return the filling value of a, if any. Otherwise, returns the default filling value for that type. """ @@ -249,7 +278,8 @@ return result def common_fill_value(a, b): - """Return the common filling value of a and b, if any. + """ + Return the common filling value of a and b, if any. If a and b have different filling values, returns None. """ @@ -261,18 +291,21 @@ #####-------------------------------------------------------------------------- -def filled(a, value = None): - """Return a as an array with masked data replaced by value. If - value is None, get_fill_value(a) is used instead. If a is already - a ndarray, a itself is returned. +def filled(a, fill_value = None): + """ + Return `a` as an array where masked data have been replaced by `value`. + + If `a` is not a MaskedArray, `a` itself is returned. + If `a` is a MaskedArray and `fill_value` is None, `fill_value` is set to + `a.fill_value`. Parameters ---------- a : maskedarray or array_like An input object. - value : {var}, optional - Filling value. If not given, the output of get_fill_value(a) - is used instead. + fill_value : {var}, optional + Filling value. If None, the output of :func:`get_fill_value(a)` is used + instead. Returns ------- @@ -280,7 +313,7 @@ """ if hasattr(a, 'filled'): - return a.filled(value) + return a.filled(fill_value) elif isinstance(a, ndarray): # Should we check for contiguity ? and a.flags['CONTIGUOUS']: return a @@ -291,8 +324,9 @@ #####-------------------------------------------------------------------------- def get_masked_subclass(*arrays): - """Return the youngest subclass of MaskedArray from a list of - (masked) arrays. In case of siblings, the first takes over. + """ + Return the youngest subclass of MaskedArray from a list of (masked) arrays. + In case of siblings, the first listed takes over. """ if len(arrays) == 1: @@ -313,13 +347,14 @@ #####-------------------------------------------------------------------------- def get_data(a, subok=True): - """Return the _data part of a (if any), or a as a ndarray. + """ + Return the `_data` part of `a` if `a` is a MaskedArray, or `a` itself. Parameters ---------- a : array_like A ndarray or a subclass of. - subok : bool + subok : {True, False}, optional Whether to force the output to a 'pure' ndarray (False) or to return a subclass of ndarray if approriate (True). @@ -332,7 +367,8 @@ getdata = get_data def fix_invalid(a, mask=nomask, copy=True, fill_value=None): - """Return (a copy of) a where invalid data (nan/inf) are masked + """ + Return (a copy of) a where invalid data (nan/inf) are masked and replaced by fill_value. Note that a copy is performed by default (just in case...). @@ -342,7 +378,7 @@ a : array_like A (subclass of) ndarray. copy : bool - Whether to use a copy of a (True) or to fix a in place (False). + Whether to use a copy of `a` (True) or to fix `a` in place (False). fill_value : {var}, optional Value used for fixing invalid data. If not given, the output of get_fill_value(a) is used instead. @@ -643,9 +679,9 @@ mb = mask_or(mb, t) # The following line controls the domain filling if t.size == d2.size: - d2 = np.where(t,self.filly,d2) + d2 = np.where(t, self.filly, d2) else: - d2 = np.where(np.resize(t, d2.shape),self.filly, d2) + d2 = np.where(np.resize(t, d2.shape), self.filly, d2) m = mask_or(ma, mb) if (not m.ndim) and m: return masked @@ -781,9 +817,13 @@ return mask def is_mask(m): - """Return True if m is a legal mask. + """ + Return True if m is a valid, standard mask. - Does not check contents, only type. + Notes + ----- + This function does not check contents, only the type. In particular, + this function returns False if the mask has a flexible dtype. """ try: @@ -791,8 +831,9 @@ except AttributeError: return False -def make_mask(m, copy=False, shrink=True, flag=None): - """Return m as a mask, creating a copy if necessary or requested. +def make_mask(m, copy=False, shrink=True, flag=None, dtype=MaskType): + """ + Return m as a mask, creating a copy if necessary or requested. The function can accept any sequence of integers or nomask. Does not check that contents must be 0s and 1s. @@ -805,6 +846,10 @@ Whether to return a copy of m (True) or m itself (False). shrink : bool Whether to shrink m to nomask if all its values are False. + dtype : dtype + Data-type of the output mask. By default, the output mask has + a dtype of MaskType (bool). If the dtype is flexible, each field + has a boolean dtype. """ if flag is not None: @@ -814,31 +859,38 @@ if m is nomask: return nomask elif isinstance(m, ndarray): + # We won't return after this point to make sure we can shrink the mask + # Fill the mask in case there are missing data m = filled(m, True) - if m.dtype.type is MaskType: + # Make sure the input dtype is valid + dtype = make_mask_descr(dtype) + if m.dtype == dtype: if copy: - result = narray(m, dtype=MaskType, copy=copy) + result = m.copy() else: result = m else: - result = narray(m, dtype=MaskType) + result = np.array(m, dtype=dtype, copy=copy) else: - result = narray(filled(m, True), dtype=MaskType) + result = np.array(filled(m, True), dtype=MaskType) # Bas les masques ! if shrink and not result.any(): return nomask else: return result + def make_mask_none(newshape, dtype=None): - """Return a mask of shape s, filled with False. + """ + Return a mask of shape s, filled with False. Parameters ---------- news : tuple A tuple indicating the shape of the final mask. dtype: {None, dtype}, optional - A dtype. + If None, use MaskType. Otherwise, use a new datatype with the same fields + as `dtype` with boolean type. """ if dtype is None: @@ -848,7 +900,8 @@ return result def mask_or (m1, m2, copy=False, shrink=True): - """Return the combination of two masks m1 and m2. + """ + Return the combination of two masks m1 and m2. The masks are combined with the *logical_or* operator, treating nomask as False. The result may equal m1 or m2 if the other is @@ -865,13 +918,28 @@ shrink : {True, False}, optional Whether to shrink m to nomask if all its values are False. + Raises + ------ + ValueError + If m1 and m2 have different flexible dtypes. + """ - if m1 is nomask: - return make_mask(m2, copy=copy, shrink=shrink) - if m2 is nomask: - return make_mask(m1, copy=copy, shrink=shrink) + if (m1 is nomask) or (m1 is False): + dtype = getattr(m2, 'dtype', MaskType) + return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) + if (m2 is nomask) or (m2 is False): + dtype = getattr(m1, 'dtype', MaskType) + return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) if m1 is m2 and is_mask(m1): return m1 + (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) + if (dtype1 != dtype2): + raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) + if dtype1.names: + newmask = np.empty_like(m1) + for n in dtype1.names: + newmask[n] = umath.logical_or(m1[n], m2[n]) + return newmask return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) @@ -894,6 +962,7 @@ Whether to return a copy of ``a`` (True) or modify ``a`` in place (False). """ + # Make sure that condition is a valid standard-type mask. cond = make_mask(condition) a = np.array(a, copy=copy, subok=True) @@ -911,7 +980,11 @@ return result def masked_greater(x, value, copy=True): - "Shortcut to masked_where, with condition = (x > value)." + """ + Return the array `x` masked where (x > value). + Any value of mask already masked is kept masked. + + """ return masked_where(greater(x, value), x, copy=copy) def masked_greater_equal(x, value, copy=True): @@ -1181,6 +1254,7 @@ a[index] = value def next(self): + "Returns the next element of the iterator." d = self.ma_iter.next() if self.maskiter is not None and self.maskiter.next(): d = masked @@ -1253,7 +1327,7 @@ _data = np.array(data, dtype=dtype, copy=copy, subok=True, ndmin=ndmin) _baseclass = getattr(data, '_baseclass', type(_data)) # Check that we'ew not erasing the mask.......... - if isinstance(data,MaskedArray) and (data.shape != _data.shape): + if isinstance(data, MaskedArray) and (data.shape != _data.shape): copy = True # Careful, cls might not always be MaskedArray... if not isinstance(data, cls) or not subok: @@ -1358,7 +1432,7 @@ # We need to copy the _basedict to avoid backward propagation _optinfo = {} _optinfo.update(getattr(obj, '_optinfo', {})) - _optinfo.update(getattr(obj, '_basedict',{})) + _optinfo.update(getattr(obj, '_basedict', {})) if not isinstance(obj, MaskedArray): _optinfo.update(getattr(obj, '__dict__', {})) _dict = dict(_fill_value=getattr(obj, '_fill_value', None), @@ -1454,7 +1528,7 @@ else: output = ndarray.view(self, dtype, type) # Should we update the mask ? - if (getattr(output,'_mask', nomask) is not nomask): + if (getattr(output, '_mask', nomask) is not nomask): if dtype is None: dtype = output.dtype mdtype = make_mask_descr(dtype) @@ -1633,13 +1707,13 @@ indices is not supported. """ - return self.__getitem__(slice(i,j)) + return self.__getitem__(slice(i, j)) #........................ def __setslice__(self, i, j, value): """x.__setslice__(i, j, value) <==> x[i:j]=value - Set the slice (i,j) of a to value. If value is masked, mask - those locations. + Set the slice (i,j) of a to value. If value is masked, mask + those locations. """ self.__setitem__(slice(i,j), value) @@ -2022,7 +2096,7 @@ parameters = dict(name=name, data=str(self), mask=str(self._mask), fill=str(self.fill_value), dtype=str(self.dtype)) if self.dtype.names: - if n<= 1: + if n <= 1: return with_mask1_flx % parameters return with_mask_flx % parameters elif n <= 1: @@ -2149,13 +2223,13 @@ result = self._data.imag.view(type(self)) result.__setmask__(self._mask) return result - imag = property(fget=get_imag,doc="Imaginary part") + imag = property(fget=get_imag, doc="Imaginary part.") def get_real(self): result = self._data.real.view(type(self)) result.__setmask__(self._mask) return result - real = property(fget=get_real,doc="Real part") + real = property(fget=get_real, doc="Real part") #............................................ @@ -2310,7 +2384,7 @@ Parameters ---------- - indicies : 1-D array_like + indices : 1-D array_like Target indices, interpreted as integers. values : array_like Values to place in self._data copy at target indices. @@ -2380,34 +2454,34 @@ #............................................ def all(self, axis=None, out=None): """ - Check if all of the elements of `a` are true. + Check if all of the elements of `a` are true. - Performs a :func:`logical_and` over the given axis and returns the result. - Masked values are considered as True during computation. - For convenience, the output array is masked where ALL the values along the - current axis are masked: if the output would have been a scalar and that - all the values are masked, then the output is `masked`. + Performs a :func:`logical_and` over the given axis and returns the result. + Masked values are considered as True during computation. + For convenience, the output array is masked where ALL the values along the + current axis are masked: if the output would have been a scalar and that + all the values are masked, then the output is `masked`. - Parameters - ---------- - axis : {None, integer} - Axis to perform the operation over. - If None, perform over flattened array. - out : {None, array}, optional - Array into which the result can be placed. Its type is preserved - and it must be of the right shape to hold the output. + Parameters + ---------- + axis : {None, integer} + Axis to perform the operation over. + If None, perform over flattened array. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. - See Also - -------- - all : equivalent function + See Also + -------- + all : equivalent function - Examples - -------- - >>> np.ma.array([1,2,3]).all() - True - >>> a = np.ma.array([1,2,3], mask=True) - >>> (a.all() is np.ma.masked) - True + Examples + -------- + >>> np.ma.array([1,2,3]).all() + True + >>> a = np.ma.array([1,2,3], mask=True) + >>> (a.all() is np.ma.masked) + True """ mask = self._mask.all(axis) @@ -2462,19 +2536,20 @@ def nonzero(self): - """Return the indices of the elements of a that are not zero - nor masked, as a tuple of arrays. + """ + Return the indices of the elements of a that are not zero + nor masked, as a tuple of arrays. - There are as many tuples as dimensions of a, each tuple - contains the indices of the non-zero elements in that - dimension. The corresponding non-zero values can be obtained - with ``a[a.nonzero()]``. + There are as many tuples as dimensions of a, each tuple + contains the indices of the non-zero elements in that + dimension. The corresponding non-zero values can be obtained + with ``a[a.nonzero()]``. - To group the indices by element, rather than dimension, use - instead: ``transpose(a.nonzero())``. + To group the indices by element, rather than dimension, use + instead: ``transpose(a.nonzero())``. - The result of this is always a 2d array, with a row for each - non-zero element. + The result of this is always a 2d array, with a row for each + non-zero element. """ return narray(self.filled(0), copy=False).nonzero() @@ -2684,7 +2759,7 @@ return result # Explicit output result = self.filled(1).prod(axis, dtype=dtype, out=out) - if isinstance(out,MaskedArray): + if isinstance(out, MaskedArray): outmask = getattr(out, '_mask', nomask) if (outmask is nomask): outmask = out._mask = make_mask_none(out.shape) @@ -2824,7 +2899,7 @@ def std(self, axis=None, dtype=None, out=None, ddof=0): "" - dvar = self.var(axis=axis,dtype=dtype,out=out, ddof=ddof) + dvar = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof) if dvar is not masked: dvar = sqrt(dvar) if out is not None: @@ -3039,7 +3114,7 @@ """ if self._mask is nomask: - ndarray.sort(self,axis=axis, kind=kind, order=order) + ndarray.sort(self, axis=axis, kind=kind, order=order) else: if fill_value is None: if endwith: @@ -3700,7 +3775,15 @@ def compressed(x): - """Return a 1-D array of all the non-masked data.""" + """ + Return a 1-D array of all the non-masked data. + + See Also + -------- + MaskedArray.compressed + equivalent method + + """ if getmask(x) is nomask: return np.asanyarray(x) else: @@ -3737,11 +3820,12 @@ def expand_dims(x,axis): - """Expand the shape of the array by including a new axis before + """ + Expand the shape of the array by including a new axis before the given one. """ - result = n_expand_dims(x,axis) + result = n_expand_dims(x, axis) if isinstance(x, MaskedArray): new_shape = result.shape result = x.view() @@ -4148,8 +4232,8 @@ """ if not hasattr(F,'readline'): - F = open(F,'w') - return cPickle.dump(a,F) + F = open(F, 'w') + return cPickle.dump(a, F) def dumps(a): """ From numpy-svn at scipy.org Thu Nov 20 04:01:59 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 20 Nov 2008 03:01:59 -0600 (CST) Subject: [Numpy-svn] r6084 - trunk/numpy/ma/tests Message-ID: <20081120090159.9C28439C343@scipy.org> Author: cdavid Date: 2008-11-20 03:01:53 -0600 (Thu, 20 Nov 2008) New Revision: 6084 Modified: trunk/numpy/ma/tests/test_core.py Log: Import np_ in ma tests since the import is not available in numpy.ma anymore. Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2008-11-19 15:00:37 UTC (rev 6083) +++ trunk/numpy/ma/tests/test_core.py 2008-11-20 09:01:53 UTC (rev 6084) @@ -12,6 +12,7 @@ import numpy as np import numpy.core.fromnumeric as fromnumeric from numpy import ndarray +from numpy import int_ from numpy.ma.testutils import * import numpy.ma.core From numpy-svn at scipy.org Thu Nov 20 04:13:17 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 20 Nov 2008 03:13:17 -0600 (CST) Subject: [Numpy-svn] r6085 - trunk/numpy/lib/tests Message-ID: <20081120091317.DB8DD39C343@scipy.org> Author: cdavid Date: 2008-11-20 03:13:07 -0600 (Thu, 20 Nov 2008) New Revision: 6085 Modified: trunk/numpy/lib/tests/test_io.py Log: Fix ambiguous assert. Modified: trunk/numpy/lib/tests/test_io.py =================================================================== --- trunk/numpy/lib/tests/test_io.py 2008-11-20 09:01:53 UTC (rev 6084) +++ trunk/numpy/lib/tests/test_io.py 2008-11-20 09:13:07 UTC (rev 6085) @@ -65,7 +65,7 @@ c = StringIO.StringIO() np.savetxt(c, a) c.seek(0) - assert(c.readlines(), + assert(c.readlines() == ['1.000000000000000000e+00 2.000000000000000000e+00\n', '3.000000000000000000e+00 4.000000000000000000e+00\n']) From numpy-svn at scipy.org Fri Nov 21 12:12:49 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 21 Nov 2008 11:12:49 -0600 (CST) Subject: [Numpy-svn] r6086 - in trunk/numpy/ma: . tests Message-ID: <20081121171249.2447C39C088@scipy.org> Author: pierregm Date: 2008-11-21 11:12:47 -0600 (Fri, 21 Nov 2008) New Revision: 6086 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/tests/test_core.py Log: * Prevent the shape of a 1d-singleton to be lost when interacting with masked (bug fix #948) Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-11-20 09:13:07 UTC (rev 6085) +++ trunk/numpy/ma/core.py 2008-11-21 17:12:47 UTC (rev 6086) @@ -565,7 +565,7 @@ m = mask_or(getmask(a), getmask(b)) (d1, d2) = (get_data(a), get_data(b)) result = self.f(d1, d2, *args, **kwargs).view(get_masked_subclass(a, b)) - if result.size > 1: + if len(result.shape): if m is not nomask: result._mask = make_mask_none(result.shape) result._mask.flat = m @@ -874,7 +874,7 @@ else: result = np.array(filled(m, True), dtype=MaskType) # Bas les masques ! - if shrink and not result.any(): + if shrink and (not result.dtype.names) and (not result.any()): return nomask else: return result @@ -1590,17 +1590,17 @@ # if getmask(indx) is not nomask: # msg = "Masked arrays must be filled before they can be used as indices!" # raise IndexError, msg - dout = ndarray.__getitem__(ndarray.view(self,ndarray), indx) + dout = ndarray.__getitem__(ndarray.view(self, ndarray), indx) # We could directly use ndarray.__getitem__ on self... # But then we would have to modify __array_finalize__ to prevent the # mask of being reshaped if it hasn't been set up properly yet... # So it's easier to stick to the current version _mask = self._mask - if not getattr(dout,'ndim', False): + if not getattr(dout, 'ndim', False): # A record ................ if isinstance(dout, np.void): mask = _mask[indx] - if mask.view((bool,len(mask.dtype))).any(): + if mask.view((bool, len(mask.dtype))).any(): dout = masked_array(dout, mask=mask) else: return dout @@ -1656,7 +1656,7 @@ _mask = self._mask = make_mask_none(self.shape, _dtype) # Now, set the mask to its value. if nbfields: - _mask[indx] = tuple([True,] * nbfields) + _mask[indx] = tuple([True] * nbfields) else: _mask[indx] = True if not self._isfield: @@ -1795,7 +1795,7 @@ if _mask.size > 1: axis = 1 else: - axis=None + axis = None # try: return _mask.view((bool_, len(self.dtype))).all(axis) @@ -2197,7 +2197,7 @@ new_mask = mask_or(other_mask, invalid) self._mask = mask_or(self._mask, new_mask) # The following line is potentially problematic, as we change _data... - np.putmask(self._data,invalid,self.fill_value) + np.putmask(self._data, invalid, self.fill_value) return self #............................................ def __float__(self): @@ -2857,7 +2857,7 @@ if not axis: return (self - m) else: - return (self - expand_dims(m,axis)) + return (self - expand_dims(m, axis)) def var(self, axis=None, dtype=None, out=None, ddof=0): "" @@ -3124,7 +3124,8 @@ else: filler = fill_value idx = np.indices(self.shape) - idx[axis] = self.filled(filler).argsort(axis=axis,kind=kind,order=order) + idx[axis] = self.filled(filler).argsort(axis=axis, kind=kind, + order=order) idx_l = idx.tolist() tmp_mask = self._mask[idx_l].flat tmp_data = self._data[idx_l].flat @@ -3315,11 +3316,11 @@ nbdims = self.ndim dtypesize = len(self.dtype) if nbdims == 0: - return tuple([None]*dtypesize) + return tuple([None] * dtypesize) elif nbdims == 1: maskedidx = _mask.nonzero()[0].tolist() if dtypesize: - nodata = tuple([None]*dtypesize) + nodata = tuple([None] * dtypesize) else: nodata = None [operator.setitem(result,i,nodata) for i in maskedidx] Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2008-11-20 09:13:07 UTC (rev 6085) +++ trunk/numpy/ma/tests/test_core.py 2008-11-21 17:12:47 UTC (rev 6086) @@ -12,7 +12,6 @@ import numpy as np import numpy.core.fromnumeric as fromnumeric from numpy import ndarray -from numpy import int_ from numpy.ma.testutils import * import numpy.ma.core @@ -70,7 +69,7 @@ self.failUnless(not isMaskedArray(x)) self.failUnless(isMaskedArray(xm)) self.failUnless((xm-ym).filled(0).any()) - fail_if_equal(xm.mask.astype(int_), ym.mask.astype(int_)) + fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) s = x.shape assert_equal(np.shape(xm), s) assert_equal(xm.shape, s) @@ -594,8 +593,13 @@ assert_equal(y.shape, x.shape) assert_equal(y._mask, [True, True]) + def test_arithmetic_with_masked_singleton_on_1d_singleton(self): + "Check that we're not losing the shape of a singleton" + x = masked_array([1, ]) + y = x + masked + assert_equal(y.shape, x.shape) + assert_equal(y.mask, [True, ]) - def test_scalar_arithmetic(self): x = array(0, mask=0) assert_equal(x.filled().ctypes.data, x.ctypes.data) @@ -1728,7 +1732,7 @@ x = array(zip([1,2,3], [1.1,2.2,3.3], ['one','two','thr']), - dtype=[('a',int_),('b',float),('c','|S8')]) + dtype=[('a',int),('b',float),('c','|S8')]) x[-1] = masked assert_equal(x.tolist(), [(1,1.1,'one'),(2,2.2,'two'),(None,None,None)]) @@ -1953,7 +1957,7 @@ m2XX = array(data=XX,mask=m2.reshape(XX.shape)) self.d = (x,X,XX,m,mx,mX,mXX,m2x,m2X,m2XX) - #------------------------------------------------------ + def test_varstd(self): "Tests var & std on MaskedArrays." (x,X,XX,m,mx,mX,mXX,m2x,m2X,m2XX) = self.d @@ -2017,6 +2021,7 @@ assert_equal(masked_where(not_equal(x,2), x), masked_not_equal(x,2)) assert_equal(masked_where([1,1,0,0,0], [1,2,3,4,5]), [99,99,3,4,5]) + def test_masked_where_oddities(self): """Tests some generic features.""" atest = ones((10,10,10), dtype=float) @@ -2024,6 +2029,7 @@ ctest = masked_where(btest,atest) assert_equal(atest,ctest) + def test_masked_where_shape_constraint(self): a = arange(10) try: @@ -2141,11 +2147,12 @@ tmp[(xm<=2).filled(True)] = True assert_equal(d._mask, tmp) # - ixm = xm.astype(int_) + ixm = xm.astype(int) d = where(ixm>2, ixm, masked) assert_equal(d, [-9,-9,-9,-9, -9, 4, -9, -9, 10, -9, -9, 3]) assert_equal(d.dtype, ixm.dtype) + def test_where_with_masked_choice(self): x = arange(10) x[3] = masked @@ -2286,6 +2293,70 @@ test = make_mask_descr(ntype) assert_equal(test, np.dtype(np.bool)) + + def test_make_mask(self): + "Test make_mask" + # w/ a list as an input + mask = [0,1] + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0,1]) + # w/ a ndarray as an input + mask = np.array([0,1], dtype=np.bool) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0,1]) + # w/ a flexible-type ndarray as an input - use default + mdtype = [('a', np.bool), ('b', np.bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [1,1]) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', np.bool), ('b', np.bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, mdtype) + assert_equal(test, mask) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', np.float), ('b', np.float)] + bdtype = [('a', np.bool), ('b', np.bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, bdtype) + assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) + + + def test_mask_or(self): + # Initialize + mtype = [('a', np.bool), ('b', np.bool)] + mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) + # Test using nomask as input + test = mask_or(mask, nomask) + assert_equal(test, mask) + test = mask_or(nomask, mask) + assert_equal(test, mask) + # Using False as input + test = mask_or(mask, False) + assert_equal(test, mask) + # Using True as input. Won't work, but keep it for the kicks + #test = ma.mask_or(mask, True) + #control = np.array([(1, 1), (1, 1), (1, 1), (1, 1)], dtype=mtype) + #assert_equal(test, control) + # Using another array w/ the same dtype + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) + test = mask_or(mask, other) + control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) + assert_equal(test, control) + # Using another array w/ a different dtype + othertype = [('A', np.bool), ('B', np.bool)] + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) + try: + test = mask_or(mask, other) + except ValueError: + pass + + #------------------------------------------------------------------------------ class TestMaskedFields(TestCase): @@ -2408,6 +2479,8 @@ assert_equal_records(a[-2]._mask, a._mask[-2]) +#------------------------------------------------------------------------------ + class TestMaskedView(TestCase): # def setUp(self): From numpy-svn at scipy.org Fri Nov 21 15:49:35 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 21 Nov 2008 14:49:35 -0600 (CST) Subject: [Numpy-svn] r6087 - in trunk/numpy/ma: . tests Message-ID: <20081121204935.E17B939C088@scipy.org> Author: pierregm Date: 2008-11-21 14:49:33 -0600 (Fri, 21 Nov 2008) New Revision: 6087 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/tests/test_core.py Log: Rewrote allclose to allow comparison with a scalar Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-11-21 17:12:47 UTC (rev 6086) +++ trunk/numpy/ma/core.py 2008-11-21 20:49:33 UTC (rev 6087) @@ -1105,10 +1105,10 @@ Whether to collapse a mask full of False to nomask """ - abs = umath.absolute + mabs = umath.absolute xnew = filled(x, value) if issubclass(xnew.dtype.type, np.floating): - condition = umath.less_equal(abs(xnew-value), atol+rtol*abs(value)) + condition = umath.less_equal(mabs(xnew-value), atol + rtol*mabs(value)) mask = getattr(x, '_mask', nomask) else: condition = umath.equal(xnew, value) @@ -1446,7 +1446,7 @@ self.__dict__.update(_optinfo) return #........................ - def __array_finalize__(self,obj): + def __array_finalize__(self, obj): """Finalizes the masked array. """ # Get main attributes ......... @@ -1574,7 +1574,7 @@ if self._mask is nomask: output._mask = nomask else: - output._mask = self._mask.astype([(n,bool) for n in names]) + output._mask = self._mask.astype([(n, bool) for n in names]) # Don't check _fill_value if it's None, that'll speed things up if self._fill_value is not None: output._fill_value = _check_fill_value(self._fill_value, newtype) @@ -1685,7 +1685,7 @@ ndarray.__setitem__(_mask, indx, mval) elif hasattr(indx, 'dtype') and (indx.dtype==MaskType): indx = indx * umath.logical_not(_mask) - ndarray.__setitem__(_data,indx,dval) + ndarray.__setitem__(_data, indx, dval) else: if nbfields: err_msg = "Flexible 'hard' masks are not yet supported..." @@ -1716,7 +1716,7 @@ those locations. """ - self.__setitem__(slice(i,j), value) + self.__setitem__(slice(i, j), value) #............................................ def __setmask__(self, mask, copy=False): """Set the mask. @@ -2220,12 +2220,14 @@ return int(self.item()) #............................................ def get_imag(self): + "Returns the imaginary part." result = self._data.imag.view(type(self)) result.__setmask__(self._mask) return result imag = property(fget=get_imag, doc="Imaginary part.") def get_real(self): + "Returns the real part." result = self._data.real.view(type(self)) result.__setmask__(self._mask) return result @@ -2234,14 +2236,14 @@ #............................................ def count(self, axis=None): - """Count the non-masked elements of the array along the given - axis. + """ + Count the non-masked elements of the array along the given axis. Parameters ---------- axis : int, optional - Axis along which to count the non-masked elements. If - not given, all the non masked elements are counted. + Axis along which to count the non-masked elements. If axis is None, + all the non masked elements are counted. Returns ------- @@ -3447,9 +3449,11 @@ (self.__class__, self._baseclass, (0,), 'b', ), self.__getstate__()) # - def __deepcopy__(self, memo={}): + def __deepcopy__(self, memo=None): from copy import deepcopy copied = MaskedArray.__new__(type(self), self, copy=True) + if memo is None: + memo = {} memo[id(self)] = copied for (k,v) in self.__dict__.iteritems(): copied.__dict__[k] = deepcopy(v, memo) @@ -3687,16 +3691,16 @@ fa = getdata(a) fb = getdata(b) # Get the type of the result (so that we preserve subclasses) - if isinstance(a,MaskedArray): + if isinstance(a, MaskedArray): basetype = type(a) else: basetype = MaskedArray # Get the result and view it as a (subclass of) MaskedArray - result = umath.power(fa,fb).view(basetype) + result = umath.power(fa, fb).view(basetype) # Find where we're in trouble w/ NaNs and Infs invalid = np.logical_not(np.isfinite(result.view(ndarray))) # Retrieve some extra attributes if needed - if isinstance(result,MaskedArray): + if isinstance(result, MaskedArray): result._update_from(a) # Add the initial mask if m is not nomask: @@ -3770,7 +3774,7 @@ filler = fill_value # return indx = np.indices(a.shape).tolist() - indx[axis] = filled(a,filler).argsort(axis=axis,kind=kind,order=order) + indx[axis] = filled(a, filler).argsort(axis=axis, kind=kind, order=order) return a[indx] sort.__doc__ = MaskedArray.sort.__doc__ @@ -3820,7 +3824,7 @@ count.__doc__ = MaskedArray.count.__doc__ -def expand_dims(x,axis): +def expand_dims(x, axis): """ Expand the shape of the array by including a new axis before the given one. @@ -4160,24 +4164,76 @@ else: return False -def allclose (a, b, fill_value=True, rtol=1.e-5, atol=1.e-8): - """ Return True if all elements of a and b are equal subject to +def allclose (a, b, masked_equal=True, rtol=1.e-5, atol=1.e-8, fill_value=None): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + The tolerance values are positive, typically very small numbers. The + relative difference (`rtol` * `b`) and the absolute difference (`atol`) + are added together to compare against the absolute difference between `a` + and `b`. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + fill_value : boolean, optional + Whether masked values in a or b are considered equal (True) or not + (False). + + rtol : Relative tolerance + The relative difference is equal to `rtol` * `b`. + atol : Absolute tolerance + The absolute difference is equal to `atol`. + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance; False otherwise. If either array contains NaN, then + False is returned. + + See Also + -------- + all, any, alltrue, sometrue + + Notes + ----- + If the following equation is element-wise True, then allclose returns + True. + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + Return True if all elements of a and b are equal subject to given tolerances. - If fill_value is True, masked values are considered equal. - If fill_value is False, masked values considered unequal. - The relative error rtol should be positive and << 1.0 - The absolute error atol comes into play for those elements of b - that are very small or zero; it says how small `a` must be also. - """ - m = mask_or(getmask(a), getmask(b)) - d1 = getdata(a) - d2 = getdata(b) - x = filled(array(d1, copy=0, mask=m), fill_value).astype(float) - y = filled(array(d2, copy=0, mask=m), 1).astype(float) - d = umath.less_equal(umath.absolute(x-y), atol + rtol * umath.absolute(y)) - return np.alltrue(np.ravel(d)) + if fill_value is not None: + warnings.warn("The use of fill_value is deprecated."\ + " Please use masked_equal instead.") + masked_equal = fill_value + # + x = masked_array(a, copy=False) + y = masked_array(b, copy=False) + m = mask_or(getmask(x), getmask(y)) + xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) + # If we have some infs, they should fall at the same place. + if not np.all(xinf == filled(np.isinf(y), False)): + return False + # No infs at all + if not np.any(xinf): + d = filled(umath.less_equal(umath.absolute(x-y), + atol + rtol * umath.absolute(y)), + masked_equal) + return np.all(d) + if not np.all(filled(x[xinf] == y[xinf], masked_equal)): + return False + x = x[~xinf] + y = y[~xinf] + d = filled(umath.less_equal(umath.absolute(x-y), + atol + rtol * umath.absolute(y)), + masked_equal) + return np.all(d) #.............................................................................. def asarray(a, dtype=None): @@ -4225,7 +4281,7 @@ #####-------------------------------------------------------------------------- #---- --- Pickling --- #####-------------------------------------------------------------------------- -def dump(a,F): +def dump(a, F): """ Pickle the MaskedArray `a` to the file `F`. `F` can either be the handle of an exiting file, or a string representing a file Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2008-11-21 17:12:47 UTC (rev 6086) +++ trunk/numpy/ma/tests/test_core.py 2008-11-21 20:49:33 UTC (rev 6087) @@ -1298,6 +1298,27 @@ assert_equal(m.transpose(), m._data.transpose()) + def test_allclose(self): + "Tests allclose on arrays" + a = np.random.rand(10) + b = a + np.random.rand(10) * 1e-8 + self.failUnless(allclose(a,b)) + # Test allclose w/ infs + a[0] = np.inf + self.failUnless(not allclose(a,b)) + b[0] = np.inf + self.failUnless(allclose(a,b)) + # Test all close w/ masked + a = masked_array(a) + a[-1] = masked + self.failUnless(allclose(a,b, masked_equal=True)) + self.failUnless(not allclose(a, b, masked_equal=False)) + # Test comparison w/ scalar + a *= 1e-8 + a[0] = 0 + self.failUnless(allclose(a, 0, masked_equal=True)) + + def test_allany(self): """Checks the any/all methods/functions.""" x = np.array([[ 0.13, 0.26, 0.90], @@ -1467,7 +1488,7 @@ def test_empty(self): "Tests empty/like" - datatype = [('a',int_),('b',float),('c','|S8')] + datatype = [('a',int),('b',float),('c','|S8')] a = masked_array([(1,1.1,'1.1'),(2,2.2,'2.2'),(3,3.3,'3.3')], dtype=datatype) assert_equal(len(a.fill_value.item()), len(datatype)) From numpy-svn at scipy.org Fri Nov 21 16:50:19 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 21 Nov 2008 15:50:19 -0600 (CST) Subject: [Numpy-svn] r6088 - in branches/1.2.x/numpy/ma: . tests Message-ID: <20081121215019.BA4F139C088@scipy.org> Author: pierregm Date: 2008-11-21 15:50:17 -0600 (Fri, 21 Nov 2008) New Revision: 6088 Modified: branches/1.2.x/numpy/ma/core.py branches/1.2.x/numpy/ma/tests/test_core.py Log: Backporting bug fixes (6055, 6083, 6086, 6087) Modified: branches/1.2.x/numpy/ma/core.py =================================================================== --- branches/1.2.x/numpy/ma/core.py 2008-11-21 20:49:33 UTC (rev 6087) +++ branches/1.2.x/numpy/ma/core.py 2008-11-21 21:50:17 UTC (rev 6088) @@ -19,7 +19,7 @@ __docformat__ = "restructuredtext en" __all__ = ['MAError', 'MaskType', 'MaskedArray', - 'bool_', 'complex_', 'float_', 'int_', 'object_', + 'bool_', 'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin', 'anom', 'anomalies', 'any', 'arange', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', @@ -38,8 +38,8 @@ 'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log10', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', - 'make_mask', 'make_mask_descr', 'make_mask_none', 'mask_or', 'masked', - 'masked_array', 'masked_equal', 'masked_greater', + 'make_mask', 'make_mask_descr', 'make_mask_none', 'mask_or', + 'masked', 'masked_array', 'masked_equal', 'masked_greater', 'masked_greater_equal', 'masked_inside', 'masked_invalid', 'masked_less','masked_less_equal', 'masked_not_equal', 'masked_object','masked_outside', 'masked_print_option', @@ -61,8 +61,7 @@ import operator import numpy as np -from numpy import ndarray, amax, amin, iscomplexobj, bool_, complex_, float_,\ - int_, object_ +from numpy import ndarray, amax, amin, iscomplexobj, bool_ from numpy import array as narray import numpy.core.umath as umath @@ -79,8 +78,13 @@ def doc_note(initialdoc, note): + """ + Adds a Notes section to an existing docstring. + """ if initialdoc is None: return + if note is None: + return initialdoc newdoc = """ %s @@ -95,15 +99,9 @@ #####-------------------------------------------------------------------------- class MAError(Exception): "Class for MA related errors." - def __init__ (self, args=None): - "Creates an exception." - Exception.__init__(self, args) - self.args = args - def __str__(self): - "Calculates the string representation." - return str(self.args) - __repr__ = __str__ + pass + #####-------------------------------------------------------------------------- #---- --- Filling options --- #####-------------------------------------------------------------------------- @@ -127,7 +125,8 @@ def default_fill_value(obj): - """Calculate the default fill value for the argument object. + """ + Calculate the default fill value for the argument object. """ if hasattr(obj,'dtype'): @@ -150,15 +149,16 @@ return defval def minimum_fill_value(obj): - """Calculate the default fill value suitable for taking the - minimum of ``obj``. + """ + Calculate the default fill value suitable for taking the minimum of ``obj``. """ + errmsg = "Unsuitable type for calculating minimum." if hasattr(obj, 'dtype'): objtype = obj.dtype filler = min_filler[objtype] if filler is None: - raise TypeError, 'Unsuitable type for calculating minimum.' + raise TypeError(errmsg) return filler elif isinstance(obj, float): return min_filler[ntypes.typeDict['float_']] @@ -169,18 +169,19 @@ elif isinstance(obj, np.dtype): return min_filler[obj] else: - raise TypeError, 'Unsuitable type for calculating minimum.' + raise TypeError(errmsg) def maximum_fill_value(obj): - """Calculate the default fill value suitable for taking the maximum - of ``obj``. + """ + Calculate the default fill value suitable for taking the maximum of ``obj``. """ + errmsg = "Unsuitable type for calculating maximum." if hasattr(obj, 'dtype'): objtype = obj.dtype filler = max_filler[objtype] if filler is None: - raise TypeError, 'Unsuitable type for calculating minimum.' + raise TypeError(errmsg) return filler elif isinstance(obj, float): return max_filler[ntypes.typeDict['float_']] @@ -191,10 +192,21 @@ elif isinstance(obj, np.dtype): return max_filler[obj] else: - raise TypeError, 'Unsuitable type for calculating minimum.' + raise TypeError(errmsg) def _check_fill_value(fill_value, ndtype): + """ + Private function validating the given `fill_value` for the given dtype. + + If fill_value is None, it is set to the default corresponding to the dtype + if this latter is standard (no fields). If the datatype is flexible (named + fields), fill_value is set to a tuple whose elements are the default fill + values corresponding to each field. + + If fill_value is not None, its value is forced to the given dtype. + + """ ndtype = np.dtype(ndtype) fields = ndtype.fields if fill_value is None: @@ -212,7 +224,7 @@ fill_value = np.array(fill_value, copy=False, dtype=fdtype) except ValueError: err_msg = "Unable to transform %s to dtype %s" - raise ValueError(err_msg % (fill_value,fdtype)) + raise ValueError(err_msg % (fill_value, fdtype)) else: fval = np.resize(fill_value, len(ndtype.descr)) fill_value = [np.asarray(f).astype(desc[1]).item() @@ -231,9 +243,18 @@ def set_fill_value(a, fill_value): - """Set the filling value of a, if a is a masked array. Otherwise, + """ + Set the filling value of a, if a is a masked array. Otherwise, do nothing. + Parameters + ---------- + a : ndarray + Input array + fill_value : var + Filling value. A consistency test is performed to make sure + the value is compatible with the dtype of a. + Returns ------- None @@ -244,7 +265,8 @@ return def get_fill_value(a): - """Return the filling value of a, if any. Otherwise, returns the + """ + Return the filling value of a, if any. Otherwise, returns the default filling value for that type. """ @@ -255,7 +277,8 @@ return result def common_fill_value(a, b): - """Return the common filling value of a and b, if any. + """ + Return the common filling value of a and b, if any. If a and b have different filling values, returns None. """ @@ -267,7 +290,7 @@ #####-------------------------------------------------------------------------- -def filled(a, value = None): +def filled(a, fill_value = None): """Return a as an array with masked data replaced by value. If value is None, get_fill_value(a) is used instead. If a is already a ndarray, a itself is returned. @@ -276,9 +299,9 @@ ---------- a : maskedarray or array_like An input object. - value : {var}, optional - Filling value. If not given, the output of get_fill_value(a) - is used instead. + fill_value : {var}, optional + Filling value. If None, the output of :func:`get_fill_value(a)` is used + instead. Returns ------- @@ -286,7 +309,7 @@ """ if hasattr(a, 'filled'): - return a.filled(value) + return a.filled(fill_value) elif isinstance(a, ndarray): # Should we check for contiguity ? and a.flags['CONTIGUOUS']: return a @@ -297,8 +320,9 @@ #####-------------------------------------------------------------------------- def get_masked_subclass(*arrays): - """Return the youngest subclass of MaskedArray from a list of - (masked) arrays. In case of siblings, the first takes over. + """ + Return the youngest subclass of MaskedArray from a list of (masked) arrays. + In case of siblings, the first listed takes over. """ if len(arrays) == 1: @@ -319,13 +343,14 @@ #####-------------------------------------------------------------------------- def get_data(a, subok=True): - """Return the _data part of a (if any), or a as a ndarray. + """ + Return the `_data` part of `a` if `a` is a MaskedArray, or `a` itself. Parameters ---------- a : array_like A ndarray or a subclass of. - subok : bool + subok : {True, False}, optional Whether to force the output to a 'pure' ndarray (False) or to return a subclass of ndarray if approriate (True). @@ -338,7 +363,8 @@ getdata = get_data def fix_invalid(a, mask=nomask, copy=True, fill_value=None): - """Return (a copy of) a where invalid data (nan/inf) are masked + """ + Return (a copy of) a where invalid data (nan/inf) are masked and replaced by fill_value. Note that a copy is performed by default (just in case...). @@ -348,7 +374,7 @@ a : array_like A (subclass of) ndarray. copy : bool - Whether to use a copy of a (True) or to fix a in place (False). + Whether to use a copy of `a` (True) or to fix `a` in place (False). fill_value : {var}, optional Value used for fixing invalid data. If not given, the output of get_fill_value(a) is used instead. @@ -535,7 +561,7 @@ m = mask_or(getmask(a), getmask(b)) (d1, d2) = (get_data(a), get_data(b)) result = self.f(d1, d2, *args, **kwargs).view(get_masked_subclass(a, b)) - if result.size > 1: + if len(result.shape): if m is not nomask: result._mask = make_mask_none(result.shape) result._mask.flat = m @@ -649,9 +675,9 @@ mb = mask_or(mb, t) # The following line controls the domain filling if t.size == d2.size: - d2 = np.where(t,self.filly,d2) + d2 = np.where(t, self.filly, d2) else: - d2 = np.where(np.resize(t, d2.shape),self.filly, d2) + d2 = np.where(np.resize(t, d2.shape), self.filly, d2) m = mask_or(ma, mb) if (not m.ndim) and m: return masked @@ -787,9 +813,13 @@ return mask def is_mask(m): - """Return True if m is a legal mask. + """ + Return True if m is a valid, standard mask. - Does not check contents, only type. + Notes + ----- + This function does not check contents, only the type. In particular, + this function returns False if the mask has a flexible dtype. """ try: @@ -797,8 +827,9 @@ except AttributeError: return False -def make_mask(m, copy=False, shrink=True, flag=None): - """Return m as a mask, creating a copy if necessary or requested. +def make_mask(m, copy=False, shrink=True, flag=None, dtype=MaskType): + """ + Return m as a mask, creating a copy if necessary or requested. The function can accept any sequence of integers or nomask. Does not check that contents must be 0s and 1s. @@ -811,6 +842,10 @@ Whether to return a copy of m (True) or m itself (False). shrink : bool Whether to shrink m to nomask if all its values are False. + dtype : dtype + Data-type of the output mask. By default, the output mask has + a dtype of MaskType (bool). If the dtype is flexible, each field + has a boolean dtype. """ if flag is not None: @@ -820,24 +855,29 @@ if m is nomask: return nomask elif isinstance(m, ndarray): + # We won't return after this point to make sure we can shrink the mask + # Fill the mask in case there are missing data m = filled(m, True) - if m.dtype.type is MaskType: + # Make sure the input dtype is valid + dtype = make_mask_descr(dtype) + if m.dtype == dtype: if copy: - result = narray(m, dtype=MaskType, copy=copy) + result = m.copy() else: result = m else: - result = narray(m, dtype=MaskType) + result = np.array(m, dtype=dtype, copy=copy) else: - result = narray(filled(m, True), dtype=MaskType) + result = np.array(filled(m, True), dtype=MaskType) # Bas les masques ! - if shrink and not result.any(): + if shrink and (not result.dtype.names) and (not result.any()): return nomask else: return result def make_mask_none(newshape, dtype=None): - """Return a mask of shape s, filled with False. + """ + Return a mask of shape s, filled with False. Parameters ---------- @@ -854,7 +894,8 @@ return result def mask_or (m1, m2, copy=False, shrink=True): - """Return the combination of two masks m1 and m2. + """ + Return the combination of two masks m1 and m2. The masks are combined with the *logical_or* operator, treating nomask as False. The result may equal m1 or m2 if the other is @@ -871,13 +912,28 @@ shrink : {True, False}, optional Whether to shrink m to nomask if all its values are False. + Raises + ------ + ValueError + If m1 and m2 have different flexible dtypes. + """ - if m1 is nomask: - return make_mask(m2, copy=copy, shrink=shrink) - if m2 is nomask: - return make_mask(m1, copy=copy, shrink=shrink) + if (m1 is nomask) or (m1 is False): + dtype = getattr(m2, 'dtype', MaskType) + return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) + if (m2 is nomask) or (m2 is False): + dtype = getattr(m1, 'dtype', MaskType) + return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) if m1 is m2 and is_mask(m1): return m1 + (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) + if (dtype1 != dtype2): + raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) + if dtype1.names: + newmask = np.empty_like(m1) + for n in dtype1.names: + newmask[n] = umath.logical_or(m1[n], m2[n]) + return newmask return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) @@ -900,6 +956,7 @@ Whether to return a copy of ``a`` (True) or modify ``a`` in place (False). """ + # Make sure that condition is a valid standard-type mask. cond = make_mask(condition) a = np.array(a, copy=copy, subok=True) @@ -917,7 +974,11 @@ return result def masked_greater(x, value, copy=True): - "Shortcut to masked_where, with condition = (x > value)." + """ + Return the array `x` masked where (x > value). + Any value of mask already masked is kept masked. + + """ return masked_where(greater(x, value), x, copy=copy) def masked_greater_equal(x, value, copy=True): @@ -1038,10 +1099,10 @@ Whether to collapse a mask full of False to nomask """ - abs = umath.absolute + mabs = umath.absolute xnew = filled(x, value) if issubclass(xnew.dtype.type, np.floating): - condition = umath.less_equal(abs(xnew-value), atol+rtol*abs(value)) + condition = umath.less_equal(mabs(xnew-value), atol + rtol*mabs(value)) mask = getattr(x, '_mask', nomask) else: condition = umath.equal(xnew, value) @@ -1187,6 +1248,7 @@ a[index] = value def next(self): + "Returns the next element of the iterator." d = self.ma_iter.next() if self.maskiter is not None and self.maskiter.next(): d = masked @@ -1259,7 +1321,7 @@ _data = np.array(data, dtype=dtype, copy=copy, subok=True, ndmin=ndmin) _baseclass = getattr(data, '_baseclass', type(_data)) # Check that we'ew not erasing the mask.......... - if isinstance(data,MaskedArray) and (data.shape != _data.shape): + if isinstance(data, MaskedArray) and (data.shape != _data.shape): copy = True # Careful, cls might not always be MaskedArray... if not isinstance(data, cls) or not subok: @@ -1364,7 +1426,7 @@ # We need to copy the _basedict to avoid backward propagation _optinfo = {} _optinfo.update(getattr(obj, '_optinfo', {})) - _optinfo.update(getattr(obj, '_basedict',{})) + _optinfo.update(getattr(obj, '_basedict', {})) if not isinstance(obj, MaskedArray): _optinfo.update(getattr(obj, '__dict__', {})) _dict = dict(_fill_value=getattr(obj, '_fill_value', None), @@ -1378,7 +1440,7 @@ self.__dict__.update(_optinfo) return #........................ - def __array_finalize__(self,obj): + def __array_finalize__(self, obj): """Finalizes the masked array. """ # Get main attributes ......... @@ -1460,7 +1522,7 @@ else: output = ndarray.view(self, dtype, type) # Should we update the mask ? - if (getattr(output,'_mask', nomask) is not nomask): + if (getattr(output, '_mask', nomask) is not nomask): if dtype is None: dtype = output.dtype mdtype = make_mask_descr(dtype) @@ -1506,7 +1568,7 @@ if self._mask is nomask: output._mask = nomask else: - output._mask = self._mask.astype([(n,bool) for n in names]) + output._mask = self._mask.astype([(n, bool) for n in names]) # Don't check _fill_value if it's None, that'll speed things up if self._fill_value is not None: output._fill_value = _check_fill_value(self._fill_value, newtype) @@ -1522,17 +1584,17 @@ # if getmask(indx) is not nomask: # msg = "Masked arrays must be filled before they can be used as indices!" # raise IndexError, msg - dout = ndarray.__getitem__(ndarray.view(self,ndarray), indx) + dout = ndarray.__getitem__(ndarray.view(self, ndarray), indx) # We could directly use ndarray.__getitem__ on self... # But then we would have to modify __array_finalize__ to prevent the # mask of being reshaped if it hasn't been set up properly yet... # So it's easier to stick to the current version _mask = self._mask - if not getattr(dout,'ndim', False): + if not getattr(dout, 'ndim', False): # A record ................ if isinstance(dout, np.void): mask = _mask[indx] - if mask.view((bool,len(mask.dtype))).any(): + if mask.view((bool, len(mask.dtype))).any(): dout = masked_array(dout, mask=mask) else: return dout @@ -1588,7 +1650,7 @@ _mask = self._mask = make_mask_none(self.shape, _dtype) # Now, set the mask to its value. if nbfields: - _mask[indx] = tuple([True,] * nbfields) + _mask[indx] = tuple([True] * nbfields) else: _mask[indx] = True if not self._isfield: @@ -1617,7 +1679,7 @@ ndarray.__setitem__(_mask, indx, mval) elif hasattr(indx, 'dtype') and (indx.dtype==MaskType): indx = indx * umath.logical_not(_mask) - ndarray.__setitem__(_data,indx,dval) + ndarray.__setitem__(_data, indx, dval) else: if nbfields: err_msg = "Flexible 'hard' masks are not yet supported..." @@ -1639,16 +1701,16 @@ indices is not supported. """ - return self.__getitem__(slice(i,j)) + return self.__getitem__(slice(i, j)) #........................ def __setslice__(self, i, j, value): """x.__setslice__(i, j, value) <==> x[i:j]=value - Set the slice (i,j) of a to value. If value is masked, mask - those locations. + Set the slice (i,j) of a to value. If value is masked, mask + those locations. """ - self.__setitem__(slice(i,j), value) + self.__setitem__(slice(i, j), value) #............................................ def __setmask__(self, mask, copy=False): """Set the mask. @@ -1727,7 +1789,7 @@ if _mask.size > 1: axis = 1 else: - axis=None + axis = None # try: return _mask.view((bool_, len(self.dtype))).all(axis) @@ -2028,7 +2090,7 @@ parameters = dict(name=name, data=str(self), mask=str(self._mask), fill=str(self.fill_value), dtype=str(self.dtype)) if self.dtype.names: - if n<= 1: + if n <= 1: return with_mask1_flx % parameters return with_mask_flx % parameters elif n <= 1: @@ -2129,7 +2191,7 @@ new_mask = mask_or(other_mask, invalid) self._mask = mask_or(self._mask, new_mask) # The following line is potentially problematic, as we change _data... - np.putmask(self._data,invalid,self.fill_value) + np.putmask(self._data, invalid, self.fill_value) return self #............................................ def __float__(self): @@ -2152,28 +2214,30 @@ return int(self.item()) #............................................ def get_imag(self): + "Returns the imaginary part." result = self._data.imag.view(type(self)) result.__setmask__(self._mask) return result - imag = property(fget=get_imag,doc="Imaginary part") + imag = property(fget=get_imag, doc="Imaginary part.") def get_real(self): + "Returns the real part." result = self._data.real.view(type(self)) result.__setmask__(self._mask) return result - real = property(fget=get_real,doc="Real part") + real = property(fget=get_real, doc="Real part") #............................................ def count(self, axis=None): - """Count the non-masked elements of the array along the given - axis. + """ + Count the non-masked elements of the array along the given axis. Parameters ---------- axis : int, optional - Axis along which to count the non-masked elements. If - not given, all the non masked elements are counted. + Axis along which to count the non-masked elements. If axis is None, + all the non masked elements are counted. Returns ------- @@ -2316,7 +2380,7 @@ Parameters ---------- - indicies : 1-D array_like + indices : 1-D array_like Target indices, interpreted as integers. values : array_like Values to place in self._data copy at target indices. @@ -2386,34 +2450,34 @@ #............................................ def all(self, axis=None, out=None): """ - Check if all of the elements of `a` are true. + Check if all of the elements of `a` are true. - Performs a :func:`logical_and` over the given axis and returns the result. - Masked values are considered as True during computation. - For convenience, the output array is masked where ALL the values along the - current axis are masked: if the output would have been a scalar and that - all the values are masked, then the output is `masked`. + Performs a :func:`logical_and` over the given axis and returns the result. + Masked values are considered as True during computation. + For convenience, the output array is masked where ALL the values along the + current axis are masked: if the output would have been a scalar and that + all the values are masked, then the output is `masked`. - Parameters - ---------- - axis : {None, integer} - Axis to perform the operation over. - If None, perform over flattened array. - out : {None, array}, optional - Array into which the result can be placed. Its type is preserved - and it must be of the right shape to hold the output. + Parameters + ---------- + axis : {None, integer} + Axis to perform the operation over. + If None, perform over flattened array. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. - See Also - -------- - all : equivalent function + See Also + -------- + all : equivalent function - Examples - -------- - >>> np.ma.array([1,2,3]).all() - True - >>> a = np.ma.array([1,2,3], mask=True) - >>> (a.all() is np.ma.masked) - True + Examples + -------- + >>> np.ma.array([1,2,3]).all() + True + >>> a = np.ma.array([1,2,3], mask=True) + >>> (a.all() is np.ma.masked) + True """ mask = self._mask.all(axis) @@ -2468,19 +2532,20 @@ def nonzero(self): - """Return the indices of the elements of a that are not zero - nor masked, as a tuple of arrays. + """ + Return the indices of the elements of a that are not zero + nor masked, as a tuple of arrays. - There are as many tuples as dimensions of a, each tuple - contains the indices of the non-zero elements in that - dimension. The corresponding non-zero values can be obtained - with ``a[a.nonzero()]``. + There are as many tuples as dimensions of a, each tuple + contains the indices of the non-zero elements in that + dimension. The corresponding non-zero values can be obtained + with ``a[a.nonzero()]``. - To group the indices by element, rather than dimension, use - instead: ``transpose(a.nonzero())``. + To group the indices by element, rather than dimension, use + instead: ``transpose(a.nonzero())``. - The result of this is always a 2d array, with a row for each - non-zero element. + The result of this is always a 2d array, with a row for each + non-zero element. """ return narray(self.filled(0), copy=False).nonzero() @@ -2690,7 +2755,7 @@ return result # Explicit output result = self.filled(1).prod(axis, dtype=dtype, out=out) - if isinstance(out,MaskedArray): + if isinstance(out, MaskedArray): outmask = getattr(out, '_mask', nomask) if (outmask is nomask): outmask = out._mask = make_mask_none(out.shape) @@ -2788,7 +2853,7 @@ if not axis: return (self - m) else: - return (self - expand_dims(m,axis)) + return (self - expand_dims(m, axis)) def var(self, axis=None, dtype=None, out=None, ddof=0): "" @@ -2830,7 +2895,7 @@ def std(self, axis=None, dtype=None, out=None, ddof=0): "" - dvar = self.var(axis=axis,dtype=dtype,out=out, ddof=ddof) + dvar = self.var(axis=axis, dtype=dtype, out=out, ddof=ddof) if dvar is not masked: dvar = sqrt(dvar) if out is not None: @@ -3045,7 +3110,7 @@ """ if self._mask is nomask: - ndarray.sort(self,axis=axis, kind=kind, order=order) + ndarray.sort(self, axis=axis, kind=kind, order=order) else: if fill_value is None: if endwith: @@ -3055,7 +3120,8 @@ else: filler = fill_value idx = np.indices(self.shape) - idx[axis] = self.filled(filler).argsort(axis=axis,kind=kind,order=order) + idx[axis] = self.filled(filler).argsort(axis=axis, kind=kind, + order=order) idx_l = idx.tolist() tmp_mask = self._mask[idx_l].flat tmp_data = self._data[idx_l].flat @@ -3246,11 +3312,11 @@ nbdims = self.ndim dtypesize = len(self.dtype) if nbdims == 0: - return tuple([None]*dtypesize) + return tuple([None] * dtypesize) elif nbdims == 1: maskedidx = _mask.nonzero()[0].tolist() if dtypesize: - nodata = tuple([None]*dtypesize) + nodata = tuple([None] * dtypesize) else: nodata = None [operator.setitem(result,i,nodata) for i in maskedidx] @@ -3377,9 +3443,11 @@ (self.__class__, self._baseclass, (0,), 'b', ), self.__getstate__()) # - def __deepcopy__(self, memo={}): + def __deepcopy__(self, memo=None): from copy import deepcopy copied = MaskedArray.__new__(type(self), self, copy=True) + if memo is None: + memo = {} memo[id(self)] = copied for (k,v) in self.__dict__.iteritems(): copied.__dict__[k] = deepcopy(v, memo) @@ -3617,16 +3685,16 @@ fa = getdata(a) fb = getdata(b) # Get the type of the result (so that we preserve subclasses) - if isinstance(a,MaskedArray): + if isinstance(a, MaskedArray): basetype = type(a) else: basetype = MaskedArray # Get the result and view it as a (subclass of) MaskedArray - result = umath.power(fa,fb).view(basetype) + result = umath.power(fa, fb).view(basetype) # Find where we're in trouble w/ NaNs and Infs invalid = np.logical_not(np.isfinite(result.view(ndarray))) # Retrieve some extra attributes if needed - if isinstance(result,MaskedArray): + if isinstance(result, MaskedArray): result._update_from(a) # Add the initial mask if m is not nomask: @@ -3700,13 +3768,21 @@ filler = fill_value # return indx = np.indices(a.shape).tolist() - indx[axis] = filled(a,filler).argsort(axis=axis,kind=kind,order=order) + indx[axis] = filled(a, filler).argsort(axis=axis, kind=kind, order=order) return a[indx] sort.__doc__ = MaskedArray.sort.__doc__ def compressed(x): - """Return a 1-D array of all the non-masked data.""" + """ + Return a 1-D array of all the non-masked data. + + See Also + -------- + MaskedArray.compressed + equivalent method + + """ if getmask(x) is nomask: return np.asanyarray(x) else: @@ -3742,12 +3818,13 @@ count.__doc__ = MaskedArray.count.__doc__ -def expand_dims(x,axis): - """Expand the shape of the array by including a new axis before +def expand_dims(x, axis): + """ + Expand the shape of the array by including a new axis before the given one. """ - result = n_expand_dims(x,axis) + result = n_expand_dims(x, axis) if isinstance(x, MaskedArray): new_shape = result.shape result = x.view() @@ -4081,29 +4158,100 @@ else: return False -def allclose (a, b, fill_value=True, rtol=1.e-5, atol=1.e-8): - """ Return True if all elements of a and b are equal subject to +def allclose (a, b, masked_equal=True, rtol=1.e-5, atol=1.e-8, fill_value=None): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + The tolerance values are positive, typically very small numbers. The + relative difference (`rtol` * `b`) and the absolute difference (`atol`) + are added together to compare against the absolute difference between `a` + and `b`. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + fill_value : boolean, optional + Whether masked values in a or b are considered equal (True) or not + (False). + + rtol : Relative tolerance + The relative difference is equal to `rtol` * `b`. + atol : Absolute tolerance + The absolute difference is equal to `atol`. + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance; False otherwise. If either array contains NaN, then + False is returned. + + See Also + -------- + all, any, alltrue, sometrue + + Notes + ----- + If the following equation is element-wise True, then allclose returns + True. + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + Return True if all elements of a and b are equal subject to given tolerances. - If fill_value is True, masked values are considered equal. - If fill_value is False, masked values considered unequal. - The relative error rtol should be positive and << 1.0 - The absolute error atol comes into play for those elements of b - that are very small or zero; it says how small `a` must be also. - """ - m = mask_or(getmask(a), getmask(b)) - d1 = getdata(a) - d2 = getdata(b) - x = filled(array(d1, copy=0, mask=m), fill_value).astype(float) - y = filled(array(d2, copy=0, mask=m), 1).astype(float) - d = umath.less_equal(umath.absolute(x-y), atol + rtol * umath.absolute(y)) - return np.alltrue(np.ravel(d)) + if fill_value is not None: + warnings.warn("The use of fill_value is deprecated."\ + " Please use masked_equal instead.") + masked_equal = fill_value + # + x = masked_array(a, copy=False) + y = masked_array(b, copy=False) + m = mask_or(getmask(x), getmask(y)) + xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) + # If we have some infs, they should fall at the same place. + if not np.all(xinf == filled(np.isinf(y), False)): + return False + # No infs at all + if not np.any(xinf): + d = filled(umath.less_equal(umath.absolute(x-y), + atol + rtol * umath.absolute(y)), + masked_equal) + return np.all(d) + if not np.all(filled(x[xinf] == y[xinf], masked_equal)): + return False + x = x[~xinf] + y = y[~xinf] + d = filled(umath.less_equal(umath.absolute(x-y), + atol + rtol * umath.absolute(y)), + masked_equal) + return np.all(d) #.............................................................................. def asarray(a, dtype=None): - """asarray(data, dtype) = array(data, dtype, copy=0, subok=0) - + """ + Convert the input to a masked array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Defaults to 'C'. + + Returns + ------- + out : ndarray + MaskedArray interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. Return a as a MaskedArray object of the given dtype. If dtype is not given or None, is is set to the dtype of a. No copy is performed if a is already an array. @@ -4127,7 +4275,7 @@ #####-------------------------------------------------------------------------- #---- --- Pickling --- #####-------------------------------------------------------------------------- -def dump(a,F): +def dump(a, F): """ Pickle the MaskedArray `a` to the file `F`. `F` can either be the handle of an exiting file, or a string representing a file @@ -4135,8 +4283,8 @@ """ if not hasattr(F,'readline'): - F = open(F,'w') - return cPickle.dump(a,F) + F = open(F, 'w') + return cPickle.dump(a, F) def dumps(a): """ Modified: branches/1.2.x/numpy/ma/tests/test_core.py =================================================================== --- branches/1.2.x/numpy/ma/tests/test_core.py 2008-11-21 20:49:33 UTC (rev 6087) +++ branches/1.2.x/numpy/ma/tests/test_core.py 2008-11-21 21:50:17 UTC (rev 6088) @@ -69,7 +69,7 @@ self.failUnless(not isMaskedArray(x)) self.failUnless(isMaskedArray(xm)) self.failUnless((xm-ym).filled(0).any()) - fail_if_equal(xm.mask.astype(int_), ym.mask.astype(int_)) + fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) s = x.shape assert_equal(np.shape(xm), s) assert_equal(xm.shape, s) @@ -593,8 +593,13 @@ assert_equal(y.shape, x.shape) assert_equal(y._mask, [True, True]) + def test_arithmetic_with_masked_singleton_on_1d_singleton(self): + "Check that we're not losing the shape of a singleton" + x = masked_array([1, ]) + y = x + masked + assert_equal(y.shape, x.shape) + assert_equal(y.mask, [True, ]) - def test_scalar_arithmetic(self): x = array(0, mask=0) assert_equal(x.filled().ctypes.data, x.ctypes.data) @@ -1293,6 +1298,27 @@ assert_equal(m.transpose(), m._data.transpose()) + def test_allclose(self): + "Tests allclose on arrays" + a = np.random.rand(10) + b = a + np.random.rand(10) * 1e-8 + self.failUnless(allclose(a,b)) + # Test allclose w/ infs + a[0] = np.inf + self.failUnless(not allclose(a,b)) + b[0] = np.inf + self.failUnless(allclose(a,b)) + # Test all close w/ masked + a = masked_array(a) + a[-1] = masked + self.failUnless(allclose(a,b, masked_equal=True)) + self.failUnless(not allclose(a, b, masked_equal=False)) + # Test comparison w/ scalar + a *= 1e-8 + a[0] = 0 + self.failUnless(allclose(a, 0, masked_equal=True)) + + def test_allany(self): """Checks the any/all methods/functions.""" x = np.array([[ 0.13, 0.26, 0.90], @@ -1462,7 +1488,7 @@ def test_empty(self): "Tests empty/like" - datatype = [('a',int_),('b',float),('c','|S8')] + datatype = [('a',int),('b',float),('c','|S8')] a = masked_array([(1,1.1,'1.1'),(2,2.2,'2.2'),(3,3.3,'3.3')], dtype=datatype) assert_equal(len(a.fill_value.item()), len(datatype)) @@ -1727,7 +1753,7 @@ x = array(zip([1,2,3], [1.1,2.2,3.3], ['one','two','thr']), - dtype=[('a',int_),('b',float),('c','|S8')]) + dtype=[('a',int),('b',float),('c','|S8')]) x[-1] = masked assert_equal(x.tolist(), [(1,1.1,'one'),(2,2.2,'two'),(None,None,None)]) @@ -1952,7 +1978,7 @@ m2XX = array(data=XX,mask=m2.reshape(XX.shape)) self.d = (x,X,XX,m,mx,mX,mXX,m2x,m2X,m2XX) - #------------------------------------------------------ + def test_varstd(self): "Tests var & std on MaskedArrays." (x,X,XX,m,mx,mX,mXX,m2x,m2X,m2XX) = self.d @@ -2016,6 +2042,7 @@ assert_equal(masked_where(not_equal(x,2), x), masked_not_equal(x,2)) assert_equal(masked_where([1,1,0,0,0], [1,2,3,4,5]), [99,99,3,4,5]) + def test_masked_where_oddities(self): """Tests some generic features.""" atest = ones((10,10,10), dtype=float) @@ -2023,6 +2050,7 @@ ctest = masked_where(btest,atest) assert_equal(atest,ctest) + def test_masked_where_shape_constraint(self): a = arange(10) try: @@ -2140,11 +2168,12 @@ tmp[(xm<=2).filled(True)] = True assert_equal(d._mask, tmp) # - ixm = xm.astype(int_) + ixm = xm.astype(int) d = where(ixm>2, ixm, masked) assert_equal(d, [-9,-9,-9,-9, -9, 4, -9, -9, 10, -9, -9, 3]) assert_equal(d.dtype, ixm.dtype) + def test_where_with_masked_choice(self): x = arange(10) x[3] = masked @@ -2285,6 +2314,70 @@ test = make_mask_descr(ntype) assert_equal(test, np.dtype(np.bool)) + + def test_make_mask(self): + "Test make_mask" + # w/ a list as an input + mask = [0,1] + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0,1]) + # w/ a ndarray as an input + mask = np.array([0,1], dtype=np.bool) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0,1]) + # w/ a flexible-type ndarray as an input - use default + mdtype = [('a', np.bool), ('b', np.bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [1,1]) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', np.bool), ('b', np.bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, mdtype) + assert_equal(test, mask) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', np.float), ('b', np.float)] + bdtype = [('a', np.bool), ('b', np.bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, bdtype) + assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) + + + def test_mask_or(self): + # Initialize + mtype = [('a', np.bool), ('b', np.bool)] + mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) + # Test using nomask as input + test = mask_or(mask, nomask) + assert_equal(test, mask) + test = mask_or(nomask, mask) + assert_equal(test, mask) + # Using False as input + test = mask_or(mask, False) + assert_equal(test, mask) + # Using True as input. Won't work, but keep it for the kicks + #test = ma.mask_or(mask, True) + #control = np.array([(1, 1), (1, 1), (1, 1), (1, 1)], dtype=mtype) + #assert_equal(test, control) + # Using another array w/ the same dtype + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) + test = mask_or(mask, other) + control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) + assert_equal(test, control) + # Using another array w/ a different dtype + othertype = [('A', np.bool), ('B', np.bool)] + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) + try: + test = mask_or(mask, other) + except ValueError: + pass + + #------------------------------------------------------------------------------ class TestMaskedFields(TestCase): @@ -2407,6 +2500,8 @@ assert_equal_records(a[-2]._mask, a._mask[-2]) +#------------------------------------------------------------------------------ + class TestMaskedView(TestCase): # def setUp(self): From numpy-svn at scipy.org Fri Nov 21 20:29:04 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 21 Nov 2008 19:29:04 -0600 (CST) Subject: [Numpy-svn] r6089 - in trunk/numpy/core: . code_generators src Message-ID: <20081122012904.D897739C088@scipy.org> Author: charris Date: 2008-11-21 19:28:52 -0600 (Fri, 21 Nov 2008) New Revision: 6089 Added: trunk/numpy/core/src/umath_funcs_c99.inc.src trunk/numpy/core/src/umath_ufunc_object.inc Removed: trunk/numpy/core/src/math_c99.inc.src trunk/numpy/core/src/ufuncobject.c Modified: trunk/numpy/core/SConscript trunk/numpy/core/code_generators/genapi.py trunk/numpy/core/setup.py trunk/numpy/core/src/umathmodule.c.src Log: Merge branch 'ufunc' Conflicts: numpy/core/code_generators/genapi.py Modified: trunk/numpy/core/SConscript =================================================================== --- trunk/numpy/core/SConscript 2008-11-21 21:50:17 UTC (rev 6088) +++ trunk/numpy/core/SConscript 2008-11-22 01:28:52 UTC (rev 6089) @@ -251,7 +251,7 @@ # Generate generated code #------------------------ scalartypes_src = env.GenerateFromTemplate(pjoin('src', 'scalartypes.inc.src')) -math_c99_src = env.GenerateFromTemplate(pjoin('src', 'math_c99.inc.src')) +math_c99_src = env.GenerateFromTemplate(pjoin('src', 'umath_funcs_c99.inc.src')) arraytypes_src = env.GenerateFromTemplate(pjoin('src', 'arraytypes.inc.src')) sortmodule_src = env.GenerateFromTemplate(pjoin('src', '_sortmodule.c.src')) umathmodule_src = env.GenerateFromTemplate(pjoin('src', 'umathmodule.c.src')) Modified: trunk/numpy/core/code_generators/genapi.py =================================================================== --- trunk/numpy/core/code_generators/genapi.py 2008-11-21 21:50:17 UTC (rev 6088) +++ trunk/numpy/core/code_generators/genapi.py 2008-11-22 01:28:52 UTC (rev 6089) @@ -17,7 +17,7 @@ 'arraytypes.inc.src', 'multiarraymodule.c', 'scalartypes.inc.src', - 'ufuncobject.c', + 'umath_ufunc_object.inc', 'umathmodule.c.src' ] THIS_DIR = os.path.dirname(__file__) Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2008-11-21 21:50:17 UTC (rev 6088) +++ trunk/numpy/core/setup.py 2008-11-22 01:28:52 UTC (rev 6089) @@ -357,9 +357,9 @@ generate_ufunc_api, join('src','scalartypes.inc.src'), join('src','arraytypes.inc.src'), - join('src','math_c99.inc.src'), + join('src','umath_funcs_c99.inc.src'), ], - depends = [join('src','ufuncobject.c'), + depends = [join('src','umath_ufunc_object.inc'), generate_umath_py, join(codegen_dir,'generate_ufunc_api.py'), ]+deps, Deleted: trunk/numpy/core/src/math_c99.inc.src =================================================================== --- trunk/numpy/core/src/math_c99.inc.src 2008-11-21 21:50:17 UTC (rev 6088) +++ trunk/numpy/core/src/math_c99.inc.src 2008-11-22 01:28:52 UTC (rev 6089) @@ -1,377 +0,0 @@ -/* - * vim:syntax=c - * A small module to implement missing C99 math capabilities required by numpy - * - * Please keep this independant of python ! - * - * How to add a function to this section - * ------------------------------------- - * - * Say you want to add `foo`, these are the steps and the reasons for them. - * - * 1) Add foo to the appropriate list in the configuration system. The - * lists can be found in numpy/core/setup.py lines 63-105. Read the - * comments that come with them, they are very helpful. - * - * 2) The configuration system will define a macro HAVE_FOO if your function - * can be linked from the math library. The result can depend on the - * optimization flags as well as the compiler, so can't be known ahead of - * time. If the function can't be linked, then either it is absent, defined - * as a macro, or is an intrinsic (hardware) function. If it is linkable it - * may still be the case that no prototype is available. So to cover all the - * cases requires the following construction. - * - * i) Undefine any possible macros: - * - * #ifdef foo - * #undef foo - * #endif - * - * ii) Check if the function was in the library, If not, define the - * function with npy_ prepended to its name to avoid conflict with any - * intrinsic versions, then use a define so that the preprocessor will - * replace foo with npy_foo before the compilation pass. Make the - * function static to avoid poluting the module library. - * - * #ifdef foo - * #undef foo - * #endif - * #ifndef HAVE_FOO - * static double - * npy_foo(double x) - * { - * return x; - * } - * #define foo npy_foo - * - * iii) Finally, even if foo is in the library, add a prototype. Just being - * in the library doesn't guarantee a prototype in math.h, and in any case - * you want to make sure the prototype is what you think it is. Count on it, - * whatever can go wrong will go wrong. Think defensively! The result: - * - * #ifdef foo - * #undef foo - * #endif - * #ifndef HAVE_FOO - * static double - * npy_foo(double x) - * { - * return x; - * } - * #define foo npy_foo - * #else - * double foo(double x); - * #end - * - * And there you have it. - * - */ - -/* - ***************************************************************************** - ** DISTRO VOODOO ** - ***************************************************************************** - */ - - -/* - ***************************************************************************** - ** BASIC MATH FUNCTIONS ** - ***************************************************************************** - */ - -/* Original code by Konrad Hinsen. */ -#ifndef HAVE_EXPM1 -static double -npy_expm1(double x) -{ - double u = exp(x); - if (u == 1.0) { - return x; - } else if (u-1.0 == -1.0) { - return -1; - } else { - return (u-1.0) * x/log(u); - } -} -#define expm1 npy_expm1 -#else -double expm1(double x); -#endif - -#ifndef HAVE_LOG1P -static double -npy_log1p(double x) -{ - double u = 1. + x; - if (u == 1.0) { - return x; - } else { - return log(u) * x / (u - 1); - } -} -#define log1p npy_log1p -#else -double log1p(double x); -#endif - -#ifndef HAVE_HYPOT -static double -npy_hypot(double x, double y) -{ - double yx; - - x = fabs(x); - y = fabs(y); - if (x < y) { - double temp = x; - x = y; - y = temp; - } - if (x == 0.) - return 0.; - else { - yx = y/x; - return x*sqrt(1.+yx*yx); - } -} -#define hypot npy_hypot -#else -double hypot(double x, double y); -#endif - -#ifndef HAVE_ACOSH -static double -npy_acosh(double x) -{ - return 2*log(sqrt((x+1.0)/2)+sqrt((x-1.0)/2)); -} -#define acosh npy_acosh -#else -double acosh(double x); -#endif - -#ifndef HAVE_ASINH -static double -npy_asinh(double xx) -{ - double x, d; - int sign; - if (xx < 0.0) { - sign = -1; - x = -xx; - } - else { - sign = 1; - x = xx; - } - if (x > 1e8) { - d = x; - } else { - d = sqrt(x*x + 1); - } - return sign*log1p(x*(1.0 + x/(d+1))); -} -#define asinh npy_asinh -#else -double asinh(double xx); -#endif - -#ifndef HAVE_ATANH -static double -npy_atanh(double x) -{ - return 0.5*log1p(2.0*x/(1.0-x)); -} -#define atanh npy_atanh -#else -double atanh(double x); -#endif - -#ifndef HAVE_RINT -static double -npy_rint(double x) -{ - double y, r; - - y = floor(x); - r = x - y; - - if (r > 0.5) goto rndup; - - /* Round to nearest even */ - if (r==0.5) { - r = y - 2.0*floor(0.5*y); - if (r==1.0) { - rndup: - y+=1.0; - } - } - return y; -} -#define rint npy_rint -#else -double rint(double x); -#endif - -#ifndef HAVE_TRUNC -static double -npy_trunc(double x) -{ - return x < 0 ? ceil(x) : floor(x); -} -#define trunc npy_trunc -#else -double trunc(double x); -#endif - -#ifndef HAVE_EXP2 -#define LOG2 0.69314718055994530943 -static double -npy_exp2(double x) -{ - return exp(LOG2*x); -} -#define exp2 npy_exp2 -#undef LOG2 -#else -double exp2(double x); -#endif - -#ifndef HAVE_LOG2 -#define INVLOG2 1.4426950408889634074 -static double -npy_log2(double x) -{ - return INVLOG2*log(x); -} -#define log2 npy_log2 -#undef INVLOG2 -#else -double log2(double x); -#endif - -/* - ***************************************************************************** - ** IEEE 754 FPU HANDLING ** - ***************************************************************************** - */ -#if !defined(HAVE_DECL_ISNAN) - # define isnan(x) ((x) != (x)) -#endif - -/* VS 2003 with /Ox optimizes (x)-(x) to 0, which is not IEEE compliant. So we - * force (x) + (-x), which seems to work. */ -#if !defined(HAVE_DECL_ISFINITE) - # define isfinite(x) !isnan((x) + (-x)) -#endif - -#if !defined(HAVE_DECL_ISINF) -#define isinf(x) (!isfinite(x) && !isnan(x)) -#endif - -#if !defined(HAVE_DECL_SIGNBIT) - #include "_signbit.c" - # define signbit(x) \ - (sizeof (x) == sizeof (long double) ? signbit_ld (x) \ - : sizeof (x) == sizeof (double) ? signbit_d (x) \ - : signbit_f (x)) - -static int signbit_f (float x) -{ - return signbit_d((double)x); -} - -static int signbit_ld (long double x) -{ - return signbit_d((double)x); -} -#endif - -/* - * if C99 extensions not available then define dummy functions that use the - * double versions for - * - * sin, cos, tan - * sinh, cosh, tanh, - * fabs, floor, ceil, rint, trunc - * sqrt, log10, log, exp, expm1 - * asin, acos, atan, - * asinh, acosh, atanh - * - * hypot, atan2, pow, fmod, modf - * - * We assume the above are always available in their double versions. - * - * NOTE: some facilities may be available as macro only instead of functions. - * For simplicity, we define our own functions and undef the macros. We could - * instead test for the macro, but I am lazy to do that for now. - */ - -/**begin repeat - * #type = longdouble, float# - * #TYPE = LONGDOUBLE, FLOAT# - * #c = l,f# - * #C = L,F# - */ - -/**begin repeat1 - * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, - * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2# - * #KIND = SIN,COS,TAN,SINH,COSH,TANH,FABS,FLOOR,CEIL,RINT,TRUNC,SQRT,LOG10, - * LOG,EXP,EXPM1,ASIN,ACOS,ATAN,ASINH,ACOSH,ATANH,LOG1P,EXP2,LOG2# - */ - -#ifdef @kind@@c@ -#undef @kind@@c@ -#endif -#ifndef HAVE_ at KIND@@C@ -static @type@ -npy_ at kind@@c@(@type@ x) -{ - return (@type@) @kind@((double)x); -} -#define @kind@@c@ npy_ at kind@@c@ -#else - at type@ @kind@@c@(@type@ x); -#endif - -/**end repeat1**/ - -/**begin repeat1 - * #kind = atan2,hypot,pow,fmod# - * #KIND = ATAN2,HYPOT,POW,FMOD# - */ -#ifdef @kind@@c@ -#undef @kind@@c@ -#endif -#ifndef HAVE_ at KIND@@C@ -static @type@ -npy_ at kind@@c@(@type@ x, @type@ y) -{ - return (@type@) @kind@((double)x, (double) y); -} -#define @kind@@c@ npy_ at kind@@c@ -#else - at type@ @kind@@c@(@type@ x, @type@ y); -#endif -/**end repeat1**/ - -#ifdef modf at c@ -#undef modf at c@ -#endif -#ifndef HAVE_MODF at C@ -static @type@ -npy_modf at c@(@type@ x, @type@ *iptr) -{ - double niptr; - double y = modf((double)x, &niptr); - *iptr = (@type@) niptr; - return (@type@) y; -} -#define modf at c@ npy_modf at c@ -#else - at type@ modf at c@(@type@ x, @type@ *iptr); -#endif - -/**end repeat**/ Deleted: trunk/numpy/core/src/ufuncobject.c =================================================================== --- trunk/numpy/core/src/ufuncobject.c 2008-11-21 21:50:17 UTC (rev 6088) +++ trunk/numpy/core/src/ufuncobject.c 2008-11-22 01:28:52 UTC (rev 6089) @@ -1,4134 +0,0 @@ -/* - * Python Universal Functions Object -- Math for all types, plus fast - * arrays math - * - * Full description - * - * This supports mathematical (and Boolean) functions on arrays and other python - * objects. Math on large arrays of basic C types is rather efficient. - * - * Travis E. Oliphant 2005, 2006 oliphant at ee.byu.edu (oliphant.travis at ieee.org) - * Brigham Young University - * - * based on the - * - * Original Implementation: - * Copyright (c) 1995, 1996, 1997 Jim Hugunin, hugunin at mit.edu - * - * with inspiration and code from - * Numarray - * Space Science Telescope Institute - * J. Todd Miller - * Perry Greenfield - * Rick White - * - */ - - -#define USE_USE_DEFAULTS 1 - - - - -/* ---------------------------------------------------------------- */ - - -/* fpstatus is the ufunc_formatted hardware status - errmask is the handling mask specified by the user. - errobj is a Python object with (string, callable object or None) - or NULL -*/ - -/* - 2. for each of the flags - determine whether to ignore, warn, raise error, or call Python function. - If ignore, do nothing - If warn, print a warning and continue - If raise return an error - If call, call a user-defined function with string -*/ - -static int -_error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int *first) -{ - PyObject *pyfunc, *ret, *args; - char *name=PyString_AS_STRING(PyTuple_GET_ITEM(errobj,0)); - char msg[100]; - - ALLOW_C_API_DEF - - ALLOW_C_API - - switch(method) { - case UFUNC_ERR_WARN: - PyOS_snprintf(msg, sizeof(msg), - "%s encountered in %s", errtype, name); - if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) goto fail; - break; - case UFUNC_ERR_RAISE: - PyErr_Format(PyExc_FloatingPointError, - "%s encountered in %s", - errtype, name); - goto fail; - case UFUNC_ERR_CALL: - pyfunc = PyTuple_GET_ITEM(errobj, 1); - - if (pyfunc == Py_None) { - PyErr_Format(PyExc_NameError, - "python callback specified for %s (in " \ - " %s) but no function found.", - errtype, name); - goto fail; - } - args = Py_BuildValue("NN", PyString_FromString(errtype), - PyInt_FromLong((long) retstatus)); - if (args == NULL) goto fail; - ret = PyObject_CallObject(pyfunc, args); - Py_DECREF(args); - if (ret == NULL) goto fail; - Py_DECREF(ret); - - break; - case UFUNC_ERR_PRINT: - if (*first) { - fprintf(stderr, "Warning: %s encountered in %s\n", errtype, name); - *first = 0; - } - break; - case UFUNC_ERR_LOG: - if (first) { - *first = 0; - pyfunc = PyTuple_GET_ITEM(errobj, 1); - if (pyfunc == Py_None) { - PyErr_Format(PyExc_NameError, - "log specified for %s (in %s) but no " \ - "object with write method found.", - errtype, name); - goto fail; - } - PyOS_snprintf(msg, sizeof(msg), - "Warning: %s encountered in %s\n", errtype, name); - ret = PyObject_CallMethod(pyfunc, "write", "s", msg); - if (ret == NULL) goto fail; - Py_DECREF(ret); - } - break; - } - DISABLE_C_API - return 0; - - fail: - DISABLE_C_API - return -1; -} - - -/*UFUNC_API*/ -static int -PyUFunc_getfperr(void) -{ - int retstatus; - UFUNC_CHECK_STATUS(retstatus); - return retstatus; -} - -#define HANDLEIT(NAME, str) {if (retstatus & UFUNC_FPE_##NAME) { \ - handle = errmask & UFUNC_MASK_##NAME; \ - if (handle && \ - _error_handler(handle >> UFUNC_SHIFT_##NAME, \ - errobj, str, retstatus, first) < 0) \ - return -1; \ - }} - -/*UFUNC_API*/ -static int -PyUFunc_handlefperr(int errmask, PyObject *errobj, int retstatus, int *first) -{ - int handle; - if (errmask && retstatus) { - HANDLEIT(DIVIDEBYZERO, "divide by zero"); - HANDLEIT(OVERFLOW, "overflow"); - HANDLEIT(UNDERFLOW, "underflow"); - HANDLEIT(INVALID, "invalid value"); - } - return 0; -} - -#undef HANDLEIT - - -/*UFUNC_API*/ -static int -PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first) -{ - int retstatus; - - /* 1. check hardware flag --- this is platform dependent code */ - retstatus = PyUFunc_getfperr(); - return PyUFunc_handlefperr(errmask, errobj, retstatus, first); -} - - -/* Checking the status flag clears it */ -/*UFUNC_API*/ -static void -PyUFunc_clearfperr() -{ - PyUFunc_getfperr(); -} - - -#define NO_UFUNCLOOP 0 -#define ZERO_EL_REDUCELOOP 0 -#define ONE_UFUNCLOOP 1 -#define ONE_EL_REDUCELOOP 1 -#define NOBUFFER_UFUNCLOOP 2 -#define NOBUFFER_REDUCELOOP 2 -#define BUFFER_UFUNCLOOP 3 -#define BUFFER_REDUCELOOP 3 -#define SIGNATURE_NOBUFFER_UFUNCLOOP 4 - - -static char -_lowest_type(char intype) -{ - switch(intype) { - /* case PyArray_BYTE */ - case PyArray_SHORT: - case PyArray_INT: - case PyArray_LONG: - case PyArray_LONGLONG: - return PyArray_BYTE; - /* case PyArray_UBYTE */ - case PyArray_USHORT: - case PyArray_UINT: - case PyArray_ULONG: - case PyArray_ULONGLONG: - return PyArray_UBYTE; - /* case PyArray_FLOAT:*/ - case PyArray_DOUBLE: - case PyArray_LONGDOUBLE: - return PyArray_FLOAT; - /* case PyArray_CFLOAT:*/ - case PyArray_CDOUBLE: - case PyArray_CLONGDOUBLE: - return PyArray_CFLOAT; - default: - return intype; - } -} - -static char *_types_msg = "function not supported for these types, " \ - "and can't coerce safely to supported types"; - -/* Called for non-NULL user-defined functions. - The object should be a CObject pointing to a linked-list of functions - storing the function, data, and signature of all user-defined functions. - There must be a match with the input argument types or an error - will occur. -*/ -static int -_find_matching_userloop(PyObject *obj, int *arg_types, - PyArray_SCALARKIND *scalars, - PyUFuncGenericFunction *function, void **data, - int nargs, int nin) -{ - PyUFunc_Loop1d *funcdata; - int i; - funcdata = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(obj); - while (funcdata != NULL) { - for(i=0; iarg_types[i], - scalars[i])) - break; - } - if (i==nin) { /* match found */ - *function = funcdata->func; - *data = funcdata->data; - /* Make sure actual arg_types supported - by the loop are used */ - for(i=0; iarg_types[i]; - } - return 0; - } - funcdata = funcdata->next; - } - return -1; -} - -/* if only one type is specified then it is the "first" output data-type - and the first signature matching this output data-type is returned. - - if a tuple of types is specified then an exact match to the signature - is searched and it much match exactly or an error occurs -*/ -static int -extract_specified_loop(PyUFuncObject *self, int *arg_types, - PyUFuncGenericFunction *function, void **data, - PyObject *type_tup, int userdef) -{ - Py_ssize_t n=1; - int *rtypenums; - static char msg[] = "loop written to specified type(s) not found"; - PyArray_Descr *dtype; - int nargs; - int i, j; - int strtype=0; - - nargs = self->nargs; - - if (PyTuple_Check(type_tup)) { - n = PyTuple_GET_SIZE(type_tup); - if (n != 1 && n != nargs) { - PyErr_Format(PyExc_ValueError, - "a type-tuple must be specified " \ - "of length 1 or %d for %s", nargs, - self->name ? self->name : "(unknown)"); - return -1; - } - } - else if PyString_Check(type_tup) { - Py_ssize_t slen; - char *thestr; - slen = PyString_GET_SIZE(type_tup); - thestr = PyString_AS_STRING(type_tup); - for(i=0; i < slen-2; i++) { - if (thestr[i] == '-' && thestr[i+1] == '>') - break; - } - if (i < slen-2) { - strtype = 1; - n = slen-2; - if (i != self->nin || - slen-2-i != self->nout) { - PyErr_Format(PyExc_ValueError, - "a type-string for %s, " \ - "requires %d typecode(s) before " \ - "and %d after the -> sign", - self->name ? self->name : "(unknown)", - self->nin, self->nout); - return -1; - } - } - } - rtypenums = (int *)_pya_malloc(n*sizeof(int)); - if (rtypenums==NULL) { - PyErr_NoMemory(); - return -1; - } - - if (strtype) { - char *ptr; - ptr = PyString_AS_STRING(type_tup); - i = 0; - while (i < n) { - if (*ptr == '-' || *ptr == '>') { - ptr++; - continue; - } - dtype = PyArray_DescrFromType((int) *ptr); - if (dtype == NULL) goto fail; - rtypenums[i] = dtype->type_num; - Py_DECREF(dtype); - ptr++; i++; - } - } - else if (PyTuple_Check(type_tup)) { - for(i=0; itype_num; - Py_DECREF(dtype); - } - } - else { - if (PyArray_DescrConverter(type_tup, &dtype) == NPY_FAIL) { - goto fail; - } - rtypenums[0] = dtype->type_num; - Py_DECREF(dtype); - } - - if (userdef > 0) { /* search in the user-defined functions */ - PyObject *key, *obj; - PyUFunc_Loop1d *funcdata; - obj = NULL; - key = PyInt_FromLong((long) userdef); - if (key == NULL) goto fail; - obj = PyDict_GetItem(self->userloops, key); - Py_DECREF(key); - if (obj == NULL) { - PyErr_SetString(PyExc_TypeError, - "user-defined type used in ufunc" \ - " with no registered loops"); - goto fail; - } - /* extract the correct function - data and argtypes - */ - funcdata = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(obj); - while (funcdata != NULL) { - if (n != 1) { - for(i=0; iarg_types[i]) - break; - } - } - else if (rtypenums[0] == funcdata->arg_types[self->nin]) { - i = nargs; - } - else i = -1; - if (i == nargs) { - *function = funcdata->func; - *data = funcdata->data; - for(i=0; iarg_types[i]; - } - Py_DECREF(obj); - goto finish; - } - funcdata = funcdata->next; - } - PyErr_SetString(PyExc_TypeError, msg); - goto fail; - } - - /* look for match in self->functions */ - - for(j=0; jntypes; j++) { - if (n != 1) { - for(i=0; itypes[j*nargs + i]) - break; - } - } - else if (rtypenums[0] == self->types[j*nargs+self->nin]) { - i = nargs; - } - else i = -1; - if (i == nargs) { - *function = self->functions[j]; - *data = self->data[j]; - for(i=0; itypes[j*nargs+i]; - } - goto finish; - } - } - PyErr_SetString(PyExc_TypeError, msg); - - - fail: - _pya_free(rtypenums); - return -1; - - finish: - _pya_free(rtypenums); - return 0; - -} - - -/* - * Called to determine coercion - * Can change arg_types. - */ - -static int -select_types(PyUFuncObject *self, int *arg_types, - PyUFuncGenericFunction *function, void **data, - PyArray_SCALARKIND *scalars, - PyObject *typetup) -{ - int i, j; - char start_type; - int userdef = -1; - int userdef_ind = -1; - - if (self->userloops) { - for(i = 0; i < self->nin; i++) { - if (PyTypeNum_ISUSERDEF(arg_types[i])) { - userdef = arg_types[i]; - userdef_ind = i; - break; - } - } - } - - if (typetup != NULL) - return extract_specified_loop(self, arg_types, function, data, - typetup, userdef); - - if (userdef > 0) { - PyObject *key, *obj; - int ret = -1; - obj = NULL; - - /* - * Look through all the registered loops for all the user-defined - * types to find a match. - */ - while (ret == -1) { - if (userdef_ind >= self->nin) { - break; - } - userdef = arg_types[userdef_ind++]; - if (!(PyTypeNum_ISUSERDEF(userdef))) { - continue; - } - key = PyInt_FromLong((long) userdef); - if (key == NULL) { - return -1; - } - obj = PyDict_GetItem(self->userloops, key); - Py_DECREF(key); - if (obj == NULL) { - continue; - } - /* - * extract the correct function - * data and argtypes for this user-defined type. - */ - ret = _find_matching_userloop(obj, arg_types, scalars, - function, data, self->nargs, - self->nin); - } - if (ret == 0) { - return ret; - } - PyErr_SetString(PyExc_TypeError, _types_msg); - return ret; - } - - start_type = arg_types[0]; - /* - * If the first argument is a scalar we need to place - * the start type as the lowest type in the class - */ - if (scalars[0] != PyArray_NOSCALAR) { - start_type = _lowest_type(start_type); - } - - i = 0; - while (i < self->ntypes && start_type > self->types[i*self->nargs]) { - i++; - } - for (; i < self->ntypes; i++) { - for (j = 0; j < self->nin; j++) { - if (!PyArray_CanCoerceScalar(arg_types[j], - self->types[i*self->nargs + j], - scalars[j])) - break; - } - if (j == self->nin) { - break; - } - } - if (i >= self->ntypes) { - PyErr_SetString(PyExc_TypeError, _types_msg); - return -1; - } - for (j = 0; j < self->nargs; j++) { - arg_types[j] = self->types[i*self->nargs+j]; - } - if (self->data) { - *data = self->data[i]; - } - else { - *data = NULL; - } - *function = self->functions[i]; - - return 0; -} - -#if USE_USE_DEFAULTS==1 -static int PyUFunc_NUM_NODEFAULTS=0; -#endif -static PyObject *PyUFunc_PYVALS_NAME=NULL; - - -static int -_extract_pyvals(PyObject *ref, char *name, int *bufsize, - int *errmask, PyObject **errobj) -{ - PyObject *retval; - - *errobj = NULL; - if (!PyList_Check(ref) || (PyList_GET_SIZE(ref)!=3)) { - PyErr_Format(PyExc_TypeError, "%s must be a length 3 list.", - UFUNC_PYVALS_NAME); - return -1; - } - - *bufsize = PyInt_AsLong(PyList_GET_ITEM(ref, 0)); - if ((*bufsize == -1) && PyErr_Occurred()) { - return -1; - } - if ((*bufsize < PyArray_MIN_BUFSIZE) || - (*bufsize > PyArray_MAX_BUFSIZE) || - (*bufsize % 16 != 0)) { - PyErr_Format(PyExc_ValueError, - "buffer size (%d) is not in range " - "(%"INTP_FMT" - %"INTP_FMT") or not a multiple of 16", - *bufsize, (intp) PyArray_MIN_BUFSIZE, - (intp) PyArray_MAX_BUFSIZE); - return -1; - } - - *errmask = PyInt_AsLong(PyList_GET_ITEM(ref, 1)); - if (*errmask < 0) { - if (PyErr_Occurred()) { - return -1; - } - PyErr_Format(PyExc_ValueError, - "invalid error mask (%d)", - *errmask); - return -1; - } - - retval = PyList_GET_ITEM(ref, 2); - if (retval != Py_None && !PyCallable_Check(retval)) { - PyObject *temp; - temp = PyObject_GetAttrString(retval, "write"); - if (temp == NULL || !PyCallable_Check(temp)) { - PyErr_SetString(PyExc_TypeError, - "python object must be callable or have " \ - "a callable write method"); - Py_XDECREF(temp); - return -1; - } - Py_DECREF(temp); - } - - *errobj = Py_BuildValue("NO", - PyString_FromString(name), - retval); - if (*errobj == NULL) { - return -1; - } - - return 0; -} - - - -/*UFUNC_API*/ -static int -PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject **errobj) -{ - PyObject *thedict; - PyObject *ref = NULL; - -#if USE_USE_DEFAULTS==1 - if (PyUFunc_NUM_NODEFAULTS != 0) { -#endif - if (PyUFunc_PYVALS_NAME == NULL) { - PyUFunc_PYVALS_NAME = \ - PyString_InternFromString(UFUNC_PYVALS_NAME); - } - thedict = PyThreadState_GetDict(); - if (thedict == NULL) { - thedict = PyEval_GetBuiltins(); - } - ref = PyDict_GetItem(thedict, PyUFunc_PYVALS_NAME); -#if USE_USE_DEFAULTS==1 - } -#endif - if (ref == NULL) { - *errmask = UFUNC_ERR_DEFAULT; - *errobj = Py_BuildValue("NO", - PyString_FromString(name), - Py_None); - *bufsize = PyArray_BUFSIZE; - return 0; - } - return _extract_pyvals(ref, name, bufsize, errmask, errobj); -} - -/* Create copies for any arrays that are less than loop->bufsize - in total size (or core_enabled) and are mis-behaved or in need - of casting. -*/ - -static int -_create_copies(PyUFuncLoopObject *loop, int *arg_types, PyArrayObject **mps) -{ - int nin = loop->ufunc->nin; - int i; - intp size; - PyObject *new; - PyArray_Descr *ntype; - PyArray_Descr *atype; - - for(i=0; idescr; - atype = PyArray_DescrFromType(arg_types[i]); - if (PyArray_EquivTypes(atype, ntype)) { - arg_types[i] = ntype->type_num; - } - Py_DECREF(atype); - } - if (size < loop->bufsize || loop->ufunc->core_enabled) { - if (!(PyArray_ISBEHAVED_RO(mps[i])) || \ - PyArray_TYPE(mps[i]) != arg_types[i]) { - ntype = PyArray_DescrFromType(arg_types[i]); - new = PyArray_FromAny((PyObject *)mps[i], - ntype, 0, 0, - FORCECAST | ALIGNED, NULL); - if (new == NULL) return -1; - Py_DECREF(mps[i]); - mps[i] = (PyArrayObject *)new; - } - } - } - - return 0; -} - -#define _GETATTR_(str, rstr) do {if (strcmp(name, #str) == 0) \ - return PyObject_HasAttrString(op, "__" #rstr "__");} while (0); - -static int -_has_reflected_op(PyObject *op, char *name) -{ - _GETATTR_(add, radd); - _GETATTR_(subtract, rsub); - _GETATTR_(multiply, rmul); - _GETATTR_(divide, rdiv); - _GETATTR_(true_divide, rtruediv); - _GETATTR_(floor_divide, rfloordiv); - _GETATTR_(remainder, rmod); - _GETATTR_(power, rpow); - _GETATTR_(left_shift, rlshift); - _GETATTR_(right_shift, rrshift); - _GETATTR_(bitwise_and, rand); - _GETATTR_(bitwise_xor, rxor); - _GETATTR_(bitwise_or, ror); - return 0; -} - -#undef _GETATTR_ - - -/* Return the position of next non-white-space char in the string -*/ -static int -_next_non_white_space(const char* str, int offset) -{ - int ret = offset; - while (str[ret] == ' ' || str[ret] == '\t') ret++; - return ret; -} - -static int -_is_alpha_underscore(char ch) -{ - return (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z') || ch == '_'; -} - -static int -_is_alnum_underscore(char ch) -{ - return _is_alpha_underscore(ch) || (ch >= '0' && ch <= '9'); -} - -/* Return the ending position of a variable name -*/ -static int -_get_end_of_name(const char* str, int offset) -{ - int ret = offset; - while (_is_alnum_underscore(str[ret])) ret++; - return ret; -} - -/* Returns 1 if the dimension names pointed by s1 and s2 are the same, - otherwise returns 0. -*/ -static int -_is_same_name(const char* s1, const char* s2) -{ - while (_is_alnum_underscore(*s1) && _is_alnum_underscore(*s2)) { - if (*s1 != *s2) return 0; - s1++; - s2++; - } - return !_is_alnum_underscore(*s1) && !_is_alnum_underscore(*s2); -} - -/* Sets core_num_dim_ix, core_num_dims, core_dim_ixs, core_offsets, - and core_signature in PyUFuncObject "self". Returns 0 unless an - error occured. -*/ -static int -_parse_signature(PyUFuncObject *self, const char *signature) -{ - size_t len; - char const **var_names; - int nd = 0; /* number of dimension of the current argument */ - int cur_arg = 0; /* index into core_num_dims&core_offsets */ - int cur_core_dim = 0; /* index into core_dim_ixs */ - int i = 0; - char *parse_error = NULL; - - if (signature == NULL) { - PyErr_SetString(PyExc_RuntimeError, - "_parse_signature with NULL signature"); - return -1; - } - - len = strlen(signature); - self->core_signature = _pya_malloc(sizeof(char) * (len+1)); - if (self->core_signature) - strcpy(self->core_signature, signature); - - /* Allocate sufficient memory to store pointers to all dimension names */ - var_names = _pya_malloc(sizeof(char const*) * len); - if (var_names == NULL) { - PyErr_NoMemory(); - return -1; - } - - self->core_enabled = 1; - self->core_num_dim_ix = 0; - self->core_num_dims = _pya_malloc(sizeof(int) * self->nargs); - self->core_dim_ixs = _pya_malloc(sizeof(int) * len); /* shrink this later */ - self->core_offsets = _pya_malloc(sizeof(int) * self->nargs); - if (self->core_num_dims == NULL || self->core_dim_ixs == NULL || - self->core_offsets == NULL) { - PyErr_NoMemory(); - goto fail; - } - - i = _next_non_white_space(signature, 0); - - while (signature[i] != '\0') { /* loop over input/output arguments */ - if (cur_arg == self->nin) { - /* expect "->" */ - if (signature[i] != '-' || signature[i+1] != '>') { - parse_error = "expect '->'"; - goto fail; - } - i = _next_non_white_space(signature, i+2); - } - - /* parse core dimensions of one argument, e.g. "()", "(i)", or - "(i,j)" */ - if (signature[i] != '(') { - parse_error = "expect '('"; - goto fail; - } - i = _next_non_white_space(signature, i+1); - while (signature[i] != ')') { /* loop over core dimensions */ - int j = 0; - if (!_is_alpha_underscore(signature[i])) { - parse_error = "expect dimension name"; - goto fail; - } - while (j < self->core_num_dim_ix) { - if (_is_same_name(signature+i, var_names[j])) break; - j++; - } - if (j >= self->core_num_dim_ix) { - var_names[j] = signature+i; - self->core_num_dim_ix++; - } - self->core_dim_ixs[cur_core_dim] = j; - cur_core_dim++; - nd++; - i = _get_end_of_name(signature, i); - i = _next_non_white_space(signature, i); - if (signature[i] != ',' && signature[i] != ')') { - parse_error = "expect ',' or ')'"; - goto fail; - } - if (signature[i] == ',') - { - i = _next_non_white_space(signature, i+1); - if (signature[i] == ')') { - parse_error = "',' must not be followed by ')'"; - goto fail; - } - } - } - self->core_num_dims[cur_arg] = nd; - self->core_offsets[cur_arg] = cur_core_dim-nd; - cur_arg++; - nd = 0; - i = _next_non_white_space(signature, i+1); - - if (cur_arg != self->nin && cur_arg != self->nargs) { - /* The list of input arguments (or output arguments) was - only read partially */ - if (signature[i] != ',') { - parse_error = "expect ','"; - goto fail; - } - i = _next_non_white_space(signature, i+1); - } - } - if (cur_arg != self->nargs) { - parse_error = "incomplete signature: not all arguments found"; - goto fail; - } - self->core_dim_ixs = _pya_realloc(self->core_dim_ixs, - sizeof(int) * cur_core_dim); - /* check for trivial core-signature, e.g. "(),()->()" */ - if (cur_core_dim == 0) - self->core_enabled = 0; - _pya_free((void*)var_names); - return 0; -fail: - _pya_free((void*)var_names); - if (parse_error) { - char *buf = _pya_malloc(sizeof(char) * (len + 200)); - if (buf) { - sprintf(buf, "%s at position %d in \"%s\"", - parse_error, i, signature); - PyErr_SetString(PyExc_ValueError, signature); - _pya_free(buf); - } - else { - PyErr_NoMemory(); - } - } - return -1; -} - -/* Concatenate the loop and core dimensions of - PyArrayMultiIterObject's iarg-th argument, to recover a full - dimension array (used for output arguments). -*/ -static npy_intp* -_compute_output_dims(PyUFuncLoopObject *loop, int iarg, - int *out_nd, npy_intp *tmp_dims) -{ - int i; - PyUFuncObject *ufunc = loop->ufunc; - if (ufunc->core_enabled == 0) { - /* case of ufunc with trivial core-signature */ - *out_nd = loop->nd; - return loop->dimensions; - } - - *out_nd = loop->nd + ufunc->core_num_dims[iarg]; - if (*out_nd > NPY_MAXARGS) { - PyErr_SetString(PyExc_ValueError, - "dimension of output variable exceeds limit"); - return NULL; - } - - /* copy loop dimensions */ - memcpy(tmp_dims, loop->dimensions, sizeof(npy_intp) * loop->nd); - - /* copy core dimension */ - for (i = 0; i < ufunc->core_num_dims[iarg]; i++) - tmp_dims[loop->nd + i] = loop->core_dim_sizes[1 + - ufunc->core_dim_ixs[ufunc->core_offsets[iarg]+i]]; - return tmp_dims; -} - -/* Check and set core_dim_sizes and core_strides for the i-th argument. -*/ -static int -_compute_dimension_size(PyUFuncLoopObject *loop, PyArrayObject **mps, int i) -{ - PyUFuncObject *ufunc = loop->ufunc; - int j = ufunc->core_offsets[i]; - int k = PyArray_NDIM(mps[i]) - ufunc->core_num_dims[i]; - int ind; - for (ind = 0; ind < ufunc->core_num_dims[i]; ind++, j++, k++) { - npy_intp dim = k<0 ? 1 : PyArray_DIM(mps[i], k); - /* First element of core_dim_sizes will be used for looping */ - int dim_ix = ufunc->core_dim_ixs[j] + 1; - if (loop->core_dim_sizes[dim_ix] == 1) { - /* broadcast core dimension */ - loop->core_dim_sizes[dim_ix] = dim; - } - else if (dim != 1 && dim != loop->core_dim_sizes[dim_ix]) { - PyErr_SetString(PyExc_ValueError, - "core dimensions mismatch"); - return -1; - } - /* First ufunc->nargs elements will be used for looping */ - loop->core_strides[ufunc->nargs + j] = - dim == 1 ? 0 : PyArray_STRIDE(mps[i], k); - } - return 0; -} - -/* Return a view of array "ap" with "core_nd" dimensions cut from tail. */ -static PyArrayObject * -_trunc_coredim(PyArrayObject *ap, int core_nd) -{ - PyArrayObject *ret; - int nd = ap->nd - core_nd; - if (nd < 0) nd = 0; - - /* The following code is basically taken from PyArray_Transpose */ - Py_INCREF(ap->descr); /* NewFromDescr will steal this reference */ - ret = (PyArrayObject *) - PyArray_NewFromDescr(ap->ob_type, ap->descr, - nd, ap->dimensions, - ap->strides, ap->data, ap->flags, - (PyObject *)ap); - if (ret == NULL) return NULL; - - /* point at true owner of memory: */ - ret->base = (PyObject *)ap; - Py_INCREF(ap); - - PyArray_UpdateFlags(ret, CONTIGUOUS | FORTRAN); - - return ret; -} - -static Py_ssize_t -construct_arrays(PyUFuncLoopObject *loop, PyObject *args, PyArrayObject **mps, - PyObject *typetup) -{ - Py_ssize_t nargs; - int i; - int arg_types[NPY_MAXARGS]; - PyArray_SCALARKIND scalars[NPY_MAXARGS]; - PyArray_SCALARKIND maxarrkind, maxsckind, new; - PyUFuncObject *self = loop->ufunc; - Bool allscalars = TRUE; - PyTypeObject *subtype = &PyArray_Type; - PyObject *context = NULL; - PyObject *obj; - int flexible = 0; - int object = 0; - - npy_intp temp_dims[NPY_MAXDIMS]; - npy_intp *out_dims; - int out_nd; - - /* Check number of arguments */ - nargs = PyTuple_Size(args); - if ((nargs < self->nin) || (nargs > self->nargs)) { - PyErr_SetString(PyExc_ValueError, - "invalid number of arguments"); - return -1; - } - - /* Get each input argument */ - maxarrkind = PyArray_NOSCALAR; - maxsckind = PyArray_NOSCALAR; - for(i = 0; i < self->nin; i++) { - obj = PyTuple_GET_ITEM(args,i); - if (!PyArray_Check(obj) && !PyArray_IsScalar(obj, Generic)) { - context = Py_BuildValue("OOi", self, args, i); - } - else { - context = NULL; - } - mps[i] = (PyArrayObject *)PyArray_FromAny(obj, NULL, 0, 0, 0, context); - Py_XDECREF(context); - if (mps[i] == NULL) { - return -1; - } - arg_types[i] = PyArray_TYPE(mps[i]); - if (!flexible && PyTypeNum_ISFLEXIBLE(arg_types[i])) { - flexible = 1; - } - if (!object && PyTypeNum_ISOBJECT(arg_types[i])) { - object = 1; - } - /* debug - * fprintf(stderr, "array %d has reference %d\n", i, - * (mps[i])->ob_refcnt); - */ - - /* - * Scalars are 0-dimensional arrays at this point - */ - - /* - * We need to keep track of whether or not scalars - * are mixed with arrays of different kinds. - */ - - if (mps[i]->nd > 0) { - scalars[i] = PyArray_NOSCALAR; - allscalars = FALSE; - new = PyArray_ScalarKind(arg_types[i], NULL); - maxarrkind = NPY_MAX(new, maxarrkind); - } - else { - scalars[i] = PyArray_ScalarKind(arg_types[i], &(mps[i])); - maxsckind = NPY_MAX(scalars[i], maxsckind); - } - } - - /* We don't do strings */ - if (flexible && !object) { - loop->notimplemented = 1; - return nargs; - } - - /* - * If everything is a scalar, or scalars mixed with arrays of - * different kinds of lesser kinds then use normal coercion rules - */ - if (allscalars || (maxsckind > maxarrkind)) { - for(i = 0; i < self->nin; i++) { - scalars[i] = PyArray_NOSCALAR; - } - } - - /* Select an appropriate function for these argument types. */ - if (select_types(loop->ufunc, arg_types, &(loop->function), - &(loop->funcdata), scalars, typetup) == -1) - return -1; - - /* - * FAIL with NotImplemented if the other object has - * the __r__ method and has __array_priority__ as - * an attribute (signalling it can handle ndarray's) - * and is not already an ndarray or a subtype of the same type. - */ - if ((arg_types[1] == PyArray_OBJECT) && \ - (loop->ufunc->nin==2) && (loop->ufunc->nout == 1)) { - PyObject *_obj = PyTuple_GET_ITEM(args, 1); - if (!PyArray_CheckExact(_obj) && - /* If both are same subtype of object arrays, then proceed */ - !(_obj->ob_type == (PyTuple_GET_ITEM(args, 0))->ob_type) && \ - - PyObject_HasAttrString(_obj, "__array_priority__") && \ - _has_reflected_op(_obj, loop->ufunc->name)) { - loop->notimplemented = 1; - return nargs; - } - } - - /* - * Create copies for some of the arrays if they are small - * enough and not already contiguous - */ - if (_create_copies(loop, arg_types, mps) < 0) { - return -1; - } - - /* Only use loop dimensions when constructing Iterator: - * temporarily replace mps[i] (will be recovered below). - */ - if (self->core_enabled) { - for (i = 0; i < self->nin; i++) { - PyArrayObject *ao; - - if (_compute_dimension_size(loop, mps, i) < 0) - return -1; - - ao = _trunc_coredim(mps[i], self->core_num_dims[i]); - if (ao == NULL) - return -1; - mps[i] = ao; - } - } - - /* Create Iterators for the Inputs */ - for(i = 0; i < self->nin; i++) { - loop->iters[i] = (PyArrayIterObject *) \ - PyArray_IterNew((PyObject *)mps[i]); - if (loop->iters[i] == NULL) { - return -1; - } - } - - - /* Recover mps[i]. */ - if (self->core_enabled) { - for (i = 0; i < self->nin; i++) { - PyArrayObject *ao = mps[i]; - mps[i] = (PyArrayObject *)mps[i]->base; - Py_DECREF(ao); - } - } - - /* Broadcast the result */ - loop->numiter = self->nin; - if (PyArray_Broadcast((PyArrayMultiIterObject *)loop) < 0) { - return -1; - } - - /* Get any return arguments */ - for(i = self->nin; i < nargs; i++) { - mps[i] = (PyArrayObject *)PyTuple_GET_ITEM(args, i); - if (((PyObject *)mps[i])==Py_None) { - mps[i] = NULL; - continue; - } - Py_INCREF(mps[i]); - if (!PyArray_Check((PyObject *)mps[i])) { - PyObject *new; - if (PyArrayIter_Check(mps[i])) { - new = PyObject_CallMethod((PyObject *)mps[i], - "__array__", NULL); - Py_DECREF(mps[i]); - mps[i] = (PyArrayObject *)new; - } - else { - PyErr_SetString(PyExc_TypeError, - "return arrays must be "\ - "of ArrayType"); - Py_DECREF(mps[i]); - mps[i] = NULL; - return -1; - } - } - - - if (self->core_enabled) { - if (_compute_dimension_size(loop, mps, i) < 0) - return -1; - } - out_dims = _compute_output_dims(loop, i, &out_nd, temp_dims); - if (!out_dims) return -1; - - if (mps[i]->nd != out_nd || - !PyArray_CompareLists(mps[i]->dimensions, - out_dims, out_nd)) { - PyErr_SetString(PyExc_ValueError, - "invalid return array shape"); - Py_DECREF(mps[i]); - mps[i] = NULL; - return -1; - } - if (!PyArray_ISWRITEABLE(mps[i])) { - PyErr_SetString(PyExc_ValueError, - "return array is not writeable"); - Py_DECREF(mps[i]); - mps[i] = NULL; - return -1; - } - } - - /* construct any missing return arrays and make output iterators */ - for(i = self->nin; i < self->nargs; i++) { - PyArray_Descr *ntype; - - if (mps[i] == NULL) { - out_dims = _compute_output_dims(loop, i, &out_nd, temp_dims); - if (!out_dims) return -1; - - mps[i] = (PyArrayObject *)PyArray_New(subtype, - out_nd, - out_dims, - arg_types[i], - NULL, NULL, - 0, 0, NULL); - if (mps[i] == NULL) { - return -1; - } - } - - /* - * reset types for outputs that are equivalent - * -- no sense casting uselessly - */ - else { - if (mps[i]->descr->type_num != arg_types[i]) { - PyArray_Descr *atype; - ntype = mps[i]->descr; - atype = PyArray_DescrFromType(arg_types[i]); - if (PyArray_EquivTypes(atype, ntype)) { - arg_types[i] = ntype->type_num; - } - Py_DECREF(atype); - } - - /* still not the same -- or will we have to use buffers?*/ - if (mps[i]->descr->type_num != arg_types[i] || - !PyArray_ISBEHAVED_RO(mps[i])) { - if (loop->size < loop->bufsize || self->core_enabled) { - PyObject *new; - /* - * Copy the array to a temporary copy - * and set the UPDATEIFCOPY flag - */ - ntype = PyArray_DescrFromType(arg_types[i]); - new = PyArray_FromAny((PyObject *)mps[i], - ntype, 0, 0, - FORCECAST | ALIGNED | - UPDATEIFCOPY, NULL); - if (new == NULL) { - return -1; - } - Py_DECREF(mps[i]); - mps[i] = (PyArrayObject *)new; - } - } - } - - if (self->core_enabled) { - PyArrayObject *ao; - - /* computer for all output arguments, and set strides in "loop" */ - if (_compute_dimension_size(loop, mps, i) < 0) - return -1; - - ao = _trunc_coredim(mps[i], self->core_num_dims[i]); - if (ao == NULL) - return -1; - /* Temporarily modify mps[i] for constructing iterator. */ - mps[i] = ao; - } - - loop->iters[i] = (PyArrayIterObject *) \ - PyArray_IterNew((PyObject *)mps[i]); - if (loop->iters[i] == NULL) { - return -1; - } - - /* Recover mps[i]. */ - if (self->core_enabled) { - PyArrayObject *ao = mps[i]; - mps[i] = (PyArrayObject *)mps[i]->base; - Py_DECREF(ao); - } - - } - - /* - * If any of different type, or misaligned or swapped - * then must use buffers - */ - loop->bufcnt = 0; - loop->obj = 0; - - /* Determine looping method needed */ - loop->meth = NO_UFUNCLOOP; - - if (loop->size == 0) { - return nargs; - } - - if (self->core_enabled) { - loop->meth = SIGNATURE_NOBUFFER_UFUNCLOOP; - } - - for(i = 0; i < self->nargs; i++) { - loop->needbuffer[i] = 0; - if (arg_types[i] != mps[i]->descr->type_num || - !PyArray_ISBEHAVED_RO(mps[i])) { - if (self->core_enabled) { - PyErr_SetString(PyExc_RuntimeError, - "never reached; copy should have been made"); - return -1; - } - loop->meth = BUFFER_UFUNCLOOP; - loop->needbuffer[i] = 1; - } - if (!loop->obj && ((mps[i]->descr->type_num == PyArray_OBJECT) || - (arg_types[i] == PyArray_OBJECT))) { - loop->obj = 1; - } - } - - - if (self->core_enabled && loop->obj) { - PyErr_SetString(PyExc_TypeError, - "Object type not allowed in ufunc with signature"); - return -1; - } - - if (loop->meth == NO_UFUNCLOOP) { - loop->meth = ONE_UFUNCLOOP; - - /* All correct type and BEHAVED */ - /* Check for non-uniform stridedness */ - for(i = 0; i < self->nargs; i++) { - if (!(loop->iters[i]->contiguous)) { - /* - * May still have uniform stride - * if (broadcast result) <= 1-d - */ - if (mps[i]->nd != 0 && \ - (loop->iters[i]->nd_m1 > 0)) { - loop->meth = NOBUFFER_UFUNCLOOP; - break; - } - } - } - if (loop->meth == ONE_UFUNCLOOP) { - for(i = 0; i < self->nargs; i++) { - loop->bufptr[i] = mps[i]->data; - } - } - } - - loop->numiter = self->nargs; - - /* Fill in steps */ - if (loop->meth == SIGNATURE_NOBUFFER_UFUNCLOOP && loop->nd == 0) { - /* Use default core_strides */ - } - else if (loop->meth != ONE_UFUNCLOOP) { - int ldim; - intp minsum; - intp maxdim; - PyArrayIterObject *it; - intp stride_sum[NPY_MAXDIMS]; - int j; - - /* Fix iterators */ - - /* - * Optimize axis the iteration takes place over - * - * The first thought was to have the loop go - * over the largest dimension to minimize the number of loops - * - * However, on processors with slow memory bus and cache, - * the slowest loops occur when the memory access occurs for - * large strides. - * - * Thus, choose the axis for which strides of the last iterator is - * smallest but non-zero. - */ - - for(i = 0; i < loop->nd; i++) { - stride_sum[i] = 0; - for(j = 0; j < loop->numiter; j++) { - stride_sum[i] += loop->iters[j]->strides[i]; - } - } - - ldim = loop->nd - 1; - minsum = stride_sum[loop->nd-1]; - for(i = loop->nd - 2; i >= 0; i--) { - if (stride_sum[i] < minsum ) { - ldim = i; - minsum = stride_sum[i]; - } - } - - maxdim = loop->dimensions[ldim]; - loop->size /= maxdim; - loop->bufcnt = maxdim; - loop->lastdim = ldim; - - /* - * Fix the iterators so the inner loop occurs over the - * largest dimensions -- This can be done by - * setting the size to 1 in that dimension - * (just in the iterators) - */ - for(i = 0; i < loop->numiter; i++) { - it = loop->iters[i]; - it->contiguous = 0; - it->size /= (it->dims_m1[ldim]+1); - it->dims_m1[ldim] = 0; - it->backstrides[ldim] = 0; - - /* - * (won't fix factors because we - * don't use PyArray_ITER_GOTO1D - * so don't change them) - * - * Set the steps to the strides in that dimension - */ - loop->steps[i] = it->strides[ldim]; - } - - /* - * Set looping part of core_dim_sizes and core_strides. - */ - if (loop->meth == SIGNATURE_NOBUFFER_UFUNCLOOP) { - loop->core_dim_sizes[0] = maxdim; - for (i = 0; i < self->nargs; i++) { - loop->core_strides[i] = loop->steps[i]; - } - } - - /* - * fix up steps where we will be copying data to - * buffers and calculate the ninnerloops and leftover - * values -- if step size is already zero that is not changed... - */ - if (loop->meth == BUFFER_UFUNCLOOP) { - loop->leftover = maxdim % loop->bufsize; - loop->ninnerloops = (maxdim / loop->bufsize) + 1; - for(i = 0; i < self->nargs; i++) { - if (loop->needbuffer[i] && loop->steps[i]) { - loop->steps[i] = mps[i]->descr->elsize; - } - /* These are changed later if casting is needed */ - } - } - } - else if (loop->meth == ONE_UFUNCLOOP) { - /* uniformly-strided case */ - for(i = 0; i < self->nargs; i++) { - if (PyArray_SIZE(mps[i]) == 1) - loop->steps[i] = 0; - else - loop->steps[i] = mps[i]->strides[mps[i]->nd-1]; - } - } - - - /* Finally, create memory for buffers if we need them */ - - /* - * Buffers for scalars are specially made small -- scalars are - * not copied multiple times - */ - if (loop->meth == BUFFER_UFUNCLOOP) { - int cnt = 0, cntcast = 0; /* keeps track of bytes to allocate */ - int scnt = 0, scntcast = 0; - char *castptr; - char *bufptr; - int last_was_scalar=0; - int last_cast_was_scalar=0; - int oldbufsize=0; - int oldsize=0; - int scbufsize = 4*sizeof(double); - int memsize; - PyArray_Descr *descr; - - /* compute the element size */ - for(i = 0; i < self->nargs; i++) { - if (!loop->needbuffer[i]) { - continue; - } - if (arg_types[i] != mps[i]->descr->type_num) { - descr = PyArray_DescrFromType(arg_types[i]); - if (loop->steps[i]) { - cntcast += descr->elsize; - } - else { - scntcast += descr->elsize; - } - if (i < self->nin) { - loop->cast[i] = PyArray_GetCastFunc(mps[i]->descr, - arg_types[i]); - } - else { - loop->cast[i] = PyArray_GetCastFunc \ - (descr, mps[i]->descr->type_num); - } - Py_DECREF(descr); - if (!loop->cast[i]) { - return -1; - } - } - loop->swap[i] = !(PyArray_ISNOTSWAPPED(mps[i])); - if (loop->steps[i]) { - cnt += mps[i]->descr->elsize; - } - else { - scnt += mps[i]->descr->elsize; - } - } - memsize = loop->bufsize*(cnt+cntcast) + scbufsize*(scnt+scntcast); - loop->buffer[0] = PyDataMem_NEW(memsize); - - /* debug - * fprintf(stderr, "Allocated buffer at %p of size %d, cnt=%d, cntcast=%d\n", - * loop->buffer[0], loop->bufsize * (cnt + cntcast), cnt, cntcast); - */ - - if (loop->buffer[0] == NULL) { - PyErr_NoMemory(); - return -1; - } - if (loop->obj) { - memset(loop->buffer[0], 0, memsize); - } - castptr = loop->buffer[0] + loop->bufsize*cnt + scbufsize*scnt; - bufptr = loop->buffer[0]; - loop->objfunc = 0; - for(i = 0; i < self->nargs; i++) { - if (!loop->needbuffer[i]) { - continue; - } - loop->buffer[i] = bufptr + (last_was_scalar ? scbufsize : \ - loop->bufsize)*oldbufsize; - last_was_scalar = (loop->steps[i] == 0); - bufptr = loop->buffer[i]; - oldbufsize = mps[i]->descr->elsize; - /* fprintf(stderr, "buffer[%d] = %p\n", i, loop->buffer[i]); */ - if (loop->cast[i]) { - PyArray_Descr *descr; - loop->castbuf[i] = castptr + (last_cast_was_scalar ? scbufsize : \ - loop->bufsize)*oldsize; - last_cast_was_scalar = last_was_scalar; - /* fprintf(stderr, "castbuf[%d] = %p\n", i, loop->castbuf[i]); */ - descr = PyArray_DescrFromType(arg_types[i]); - oldsize = descr->elsize; - Py_DECREF(descr); - loop->bufptr[i] = loop->castbuf[i]; - castptr = loop->castbuf[i]; - if (loop->steps[i]) - loop->steps[i] = oldsize; - } - else { - loop->bufptr[i] = loop->buffer[i]; - } - if (!loop->objfunc && loop->obj) { - if (arg_types[i] == PyArray_OBJECT) { - loop->objfunc = 1; - } - } - } - } - return nargs; -} - -static void -ufuncreduce_dealloc(PyUFuncReduceObject *self) -{ - if (self->ufunc) { - Py_XDECREF(self->it); - Py_XDECREF(self->rit); - Py_XDECREF(self->ret); - Py_XDECREF(self->errobj); - Py_XDECREF(self->decref); - if (self->buffer) PyDataMem_FREE(self->buffer); - Py_DECREF(self->ufunc); - } - _pya_free(self); -} - -static void -ufuncloop_dealloc(PyUFuncLoopObject *self) -{ - int i; - - if (self->ufunc != NULL) { - if (self->core_dim_sizes) - _pya_free(self->core_dim_sizes); - if (self->core_strides) - _pya_free(self->core_strides); - for(i = 0; i < self->ufunc->nargs; i++) - Py_XDECREF(self->iters[i]); - if (self->buffer[0]) { - PyDataMem_FREE(self->buffer[0]); - } - Py_XDECREF(self->errobj); - Py_DECREF(self->ufunc); - } - _pya_free(self); -} - -static PyUFuncLoopObject * -construct_loop(PyUFuncObject *self, PyObject *args, PyObject *kwds, PyArrayObject **mps) -{ - PyUFuncLoopObject *loop; - int i; - PyObject *typetup = NULL; - PyObject *extobj = NULL; - char *name; - - if (self == NULL) { - PyErr_SetString(PyExc_ValueError, "function not supported"); - return NULL; - } - if ((loop = _pya_malloc(sizeof(PyUFuncLoopObject))) == NULL) { - PyErr_NoMemory(); - return loop; - } - - loop->index = 0; - loop->ufunc = self; - Py_INCREF(self); - loop->buffer[0] = NULL; - for(i = 0; i < self->nargs; i++) { - loop->iters[i] = NULL; - loop->cast[i] = NULL; - } - loop->errobj = NULL; - loop->notimplemented = 0; - loop->first = 1; - loop->core_dim_sizes = NULL; - loop->core_strides = NULL; - - if (self->core_enabled) { - int num_dim_ix = 1 + self->core_num_dim_ix; - int nstrides = self->nargs + self->core_offsets[self->nargs-1] - + self->core_num_dims[self->nargs-1]; - loop->core_dim_sizes = _pya_malloc(sizeof(npy_intp) * num_dim_ix); - loop->core_strides = _pya_malloc(sizeof(npy_intp) * nstrides); - if (loop->core_dim_sizes == NULL || loop->core_strides == NULL) { - PyErr_NoMemory(); - goto fail; - } - memset(loop->core_strides, 0, sizeof(npy_intp) * nstrides); - for (i = 0; i < num_dim_ix; i++) - loop->core_dim_sizes[i] = 1; - } - - name = self->name ? self->name : ""; - - /* - * Extract sig= keyword and extobj= keyword if present. - * Raise an error if anything else is present in the - * keyword dictionary - */ - if (kwds != NULL) { - PyObject *key, *value; - Py_ssize_t pos=0; - while (PyDict_Next(kwds, &pos, &key, &value)) { - char *keystring = PyString_AsString(key); - if (keystring == NULL) { - PyErr_Clear(); - PyErr_SetString(PyExc_TypeError, "invalid keyword"); - goto fail; - } - if (strncmp(keystring,"extobj",6) == 0) { - extobj = value; - } - else if (strncmp(keystring,"sig",3) == 0) { - typetup = value; - } - else { - char *format = "'%s' is an invalid keyword to %s"; - PyErr_Format(PyExc_TypeError,format,keystring, name); - goto fail; - } - } - } - - if (extobj == NULL) { - if (PyUFunc_GetPyValues(name, - &(loop->bufsize), &(loop->errormask), - &(loop->errobj)) < 0) { - goto fail; - } - } - else { - if (_extract_pyvals(extobj, name, - &(loop->bufsize), &(loop->errormask), - &(loop->errobj)) < 0) { - goto fail; - } - } - - /* Setup the arrays */ - if (construct_arrays(loop, args, mps, typetup) < 0) { - goto fail; - } - - PyUFunc_clearfperr(); - return loop; - -fail: - ufuncloop_dealloc(loop); - return NULL; -} - - -/* - static void - _printbytebuf(PyUFuncLoopObject *loop, int bufnum) - { - int i; - - fprintf(stderr, "Printing byte buffer %d\n", bufnum); - for(i=0; ibufcnt; i++) { - fprintf(stderr, " %d\n", *(((byte *)(loop->buffer[bufnum]))+i)); - } - } - - static void - _printlongbuf(PyUFuncLoopObject *loop, int bufnum) - { - int i; - - fprintf(stderr, "Printing long buffer %d\n", bufnum); - for(i=0; ibufcnt; i++) { - fprintf(stderr, " %ld\n", *(((long *)(loop->buffer[bufnum]))+i)); - } - } - - static void - _printlongbufptr(PyUFuncLoopObject *loop, int bufnum) - { - int i; - - fprintf(stderr, "Printing long buffer %d\n", bufnum); - for(i=0; ibufcnt; i++) { - fprintf(stderr, " %ld\n", *(((long *)(loop->bufptr[bufnum]))+i)); - } - } - - - - static void - _printcastbuf(PyUFuncLoopObject *loop, int bufnum) - { - int i; - - fprintf(stderr, "Printing long buffer %d\n", bufnum); - for(i=0; ibufcnt; i++) { - fprintf(stderr, " %ld\n", *(((long *)(loop->castbuf[bufnum]))+i)); - } - } - -*/ - - - - -/* - * currently generic ufuncs cannot be built for use on flexible arrays. - * - * The cast functions in the generic loop would need to be fixed to pass - * in something besides NULL, NULL. - * - * Also the underlying ufunc loops would not know the element-size unless - * that was passed in as data (which could be arranged). - * - */ - -/* - * This generic function is called with the ufunc object, the arguments to it, - * and an array of (pointers to) PyArrayObjects which are NULL. The - * arguments are parsed and placed in mps in construct_loop (construct_arrays) - */ - -/*UFUNC_API*/ -static int -PyUFunc_GenericFunction(PyUFuncObject *self, PyObject *args, PyObject *kwds, - PyArrayObject **mps) -{ - PyUFuncLoopObject *loop; - int i; - NPY_BEGIN_THREADS_DEF; - - if (!(loop = construct_loop(self, args, kwds, mps))) { - return -1; - } - if (loop->notimplemented) { - ufuncloop_dealloc(loop); - return -2; - } - if (self->core_enabled && loop->meth != SIGNATURE_NOBUFFER_UFUNCLOOP) { - PyErr_SetString(PyExc_RuntimeError, - "illegal loop method for ufunc with signature"); - goto fail; - } - - NPY_LOOP_BEGIN_THREADS; - switch(loop->meth) { - case ONE_UFUNCLOOP: - /* - * Everything is contiguous, notswapped, aligned, - * and of the right type. -- Fastest. - * Or if not contiguous, then a single-stride - * increment moves through the entire array. - */ - /*fprintf(stderr, "ONE...%d\n", loop->size);*/ - loop->function((char **)loop->bufptr, &(loop->size), - loop->steps, loop->funcdata); - UFUNC_CHECK_ERROR(loop); - break; - - case NOBUFFER_UFUNCLOOP: - /* - * Everything is notswapped, aligned and of the - * right type but not contiguous. -- Almost as fast. - */ - /*fprintf(stderr, "NOBUFFER...%d\n", loop->size);*/ - - while (loop->index < loop->size) { - for(i = 0; i < self->nargs; i++) { - loop->bufptr[i] = loop->iters[i]->dataptr; - } - loop->function((char **)loop->bufptr, &(loop->bufcnt), - loop->steps, loop->funcdata); - UFUNC_CHECK_ERROR(loop); - - /* Adjust loop pointers */ - for(i = 0; i < self->nargs; i++) { - PyArray_ITER_NEXT(loop->iters[i]); - } - loop->index++; - } - break; - - case SIGNATURE_NOBUFFER_UFUNCLOOP: - while (loop->index < loop->size) { - for(i = 0; i < self->nargs; i++) { - loop->bufptr[i] = loop->iters[i]->dataptr; - } - loop->function((char **)loop->bufptr, loop->core_dim_sizes, - loop->core_strides, loop->funcdata); - UFUNC_CHECK_ERROR(loop); - - /* Adjust loop pointers */ - for(i = 0; i < self->nargs; i++) { - PyArray_ITER_NEXT(loop->iters[i]); - } - loop->index++; - } - break; - - case BUFFER_UFUNCLOOP: { - PyArray_CopySwapNFunc *copyswapn[NPY_MAXARGS]; - PyArrayIterObject **iters=loop->iters; - int *swap=loop->swap; - char **dptr=loop->dptr; - int mpselsize[NPY_MAXARGS]; - intp laststrides[NPY_MAXARGS]; - int fastmemcpy[NPY_MAXARGS]; - int *needbuffer=loop->needbuffer; - intp index=loop->index, size=loop->size; - int bufsize; - intp bufcnt; - int copysizes[NPY_MAXARGS]; - char **bufptr = loop->bufptr; - char **buffer = loop->buffer; - char **castbuf = loop->castbuf; - intp *steps = loop->steps; - char *tptr[NPY_MAXARGS]; - int ninnerloops = loop->ninnerloops; - Bool pyobject[NPY_MAXARGS]; - int datasize[NPY_MAXARGS]; - int j, k, stopcondition; - char *myptr1, *myptr2; - - for(i = 0; i nargs; i++) { - copyswapn[i] = mps[i]->descr->f->copyswapn; - mpselsize[i] = mps[i]->descr->elsize; - pyobject[i] = (loop->obj && \ - (mps[i]->descr->type_num == PyArray_OBJECT)); - laststrides[i] = iters[i]->strides[loop->lastdim]; - if (steps[i] && laststrides[i] != mpselsize[i]) { - fastmemcpy[i] = 0; - } - else { - fastmemcpy[i] = 1; - } - } - /* Do generic buffered looping here (works for any kind of - * arrays -- some need buffers, some don't. - * - * - * New algorithm: N is the largest dimension. B is the buffer-size. - * quotient is loop->ninnerloops-1 - * remainder is loop->leftover - * - * Compute N = quotient * B + remainder. - * quotient = N / B # integer math - * (store quotient + 1) as the number of innerloops - * remainder = N % B # integer remainder - * - * On the inner-dimension we will have (quotient + 1) loops where - * the size of the inner function is B for all but the last when the niter size is - * remainder. - * - * So, the code looks very similar to NOBUFFER_LOOP except the inner-most loop is - * replaced with... - * - * for(i=0; isize, - * loop->ninnerloops, loop->leftover); - */ - /* - * for(i=0; inargs; i++) { - * fprintf(stderr, "iters[%d]->dataptr = %p, %p of size %d\n", i, - * iters[i], iters[i]->ao->data, PyArray_NBYTES(iters[i]->ao)); - * } - */ - stopcondition = ninnerloops; - if (loop->leftover == 0) stopcondition--; - while (index < size) { - bufsize=loop->bufsize; - for(i = 0; inargs; i++) { - tptr[i] = loop->iters[i]->dataptr; - if (needbuffer[i]) { - dptr[i] = bufptr[i]; - datasize[i] = (steps[i] ? bufsize : 1); - copysizes[i] = datasize[i] * mpselsize[i]; - } - else { - dptr[i] = tptr[i]; - } - } - - /* This is the inner function over the last dimension */ - for(k = 1; k<=stopcondition; k++) { - if (k == ninnerloops) { - bufsize = loop->leftover; - for(i=0; inargs;i++) { - if (!needbuffer[i]) { - continue; - } - datasize[i] = (steps[i] ? bufsize : 1); - copysizes[i] = datasize[i] * mpselsize[i]; - } - } - for(i = 0; i < self->nin; i++) { - if (!needbuffer[i]) { - continue; - } - if (fastmemcpy[i]) { - memcpy(buffer[i], tptr[i], copysizes[i]); - } - else { - myptr1 = buffer[i]; - myptr2 = tptr[i]; - for(j = 0; j < bufsize; j++) { - memcpy(myptr1, myptr2, mpselsize[i]); - myptr1 += mpselsize[i]; - myptr2 += laststrides[i]; - } - } - - /* swap the buffer if necessary */ - if (swap[i]) { - /* fprintf(stderr, "swapping...\n");*/ - copyswapn[i](buffer[i], mpselsize[i], NULL, -1, - (intp) datasize[i], 1, - mps[i]); - } - /* cast to the other buffer if necessary */ - if (loop->cast[i]) { - /* fprintf(stderr, "casting... %d, %p %p\n", i, buffer[i]); */ - loop->cast[i](buffer[i], castbuf[i], - (intp) datasize[i], - NULL, NULL); - } - } - - bufcnt = (intp) bufsize; - loop->function((char **)dptr, &bufcnt, steps, loop->funcdata); - UFUNC_CHECK_ERROR(loop); - - for(i=self->nin; inargs; i++) { - if (!needbuffer[i]) { - continue; - } - if (loop->cast[i]) { - /* fprintf(stderr, "casting back... %d, %p", i, castbuf[i]); */ - loop->cast[i](castbuf[i], - buffer[i], - (intp) datasize[i], - NULL, NULL); - } - if (swap[i]) { - copyswapn[i](buffer[i], mpselsize[i], NULL, -1, - (intp) datasize[i], 1, - mps[i]); - } - /* - * copy back to output arrays - * decref what's already there for object arrays - */ - if (pyobject[i]) { - myptr1 = tptr[i]; - for(j = 0; j < datasize[i]; j++) { - Py_XDECREF(*((PyObject **)myptr1)); - myptr1 += laststrides[i]; - } - } - if (fastmemcpy[i]) - memcpy(tptr[i], buffer[i], copysizes[i]); - else { - myptr2 = buffer[i]; - myptr1 = tptr[i]; - for(j = 0; j < bufsize; j++) { - memcpy(myptr1, myptr2, - mpselsize[i]); - myptr1 += laststrides[i]; - myptr2 += mpselsize[i]; - } - } - } - if (k == stopcondition) { - continue; - } - for(i = 0; i < self->nargs; i++) { - tptr[i] += bufsize * laststrides[i]; - if (!needbuffer[i]) { - dptr[i] = tptr[i]; - } - } - } - /* end inner function over last dimension */ - - if (loop->objfunc) { - /* - * DECREF castbuf when underlying function used - * object arrays and casting was needed to get - * to object arrays - */ - for(i = 0; i < self->nargs; i++) { - if (loop->cast[i]) { - if (steps[i] == 0) { - Py_XDECREF(*((PyObject **)castbuf[i])); - } - else { - int size = loop->bufsize; - - PyObject **objptr = (PyObject **)castbuf[i]; - /* - * size is loop->bufsize unless there - * was only one loop - */ - if (ninnerloops == 1) { - size = loop->leftover; - } - for(j = 0; j < size; j++) { - Py_XDECREF(*objptr); - *objptr = NULL; - objptr += 1; - } - } - } - } - - } - /* fixme -- probably not needed here*/ - UFUNC_CHECK_ERROR(loop); - - for(i=0; inargs; i++) { - PyArray_ITER_NEXT(loop->iters[i]); - } - index++; - } - } - } - - NPY_LOOP_END_THREADS; - ufuncloop_dealloc(loop); - return 0; - -fail: - NPY_LOOP_END_THREADS; - if (loop) ufuncloop_dealloc(loop); - return -1; -} - -static PyArrayObject * -_getidentity(PyUFuncObject *self, int otype, char *str) -{ - PyObject *obj, *arr; - PyArray_Descr *typecode; - - if (self->identity == PyUFunc_None) { - PyErr_Format(PyExc_ValueError, - "zero-size array to ufunc.%s " \ - "without identity", str); - return NULL; - } - if (self->identity == PyUFunc_One) { - obj = PyInt_FromLong((long) 1); - } else { - obj = PyInt_FromLong((long) 0); - } - - typecode = PyArray_DescrFromType(otype); - arr = PyArray_FromAny(obj, typecode, 0, 0, CARRAY, NULL); - Py_DECREF(obj); - return (PyArrayObject *)arr; -} - -static int -_create_reduce_copy(PyUFuncReduceObject *loop, PyArrayObject **arr, int rtype) -{ - intp maxsize; - PyObject *new; - PyArray_Descr *ntype; - - maxsize = PyArray_SIZE(*arr); - - if (maxsize < loop->bufsize) { - if (!(PyArray_ISBEHAVED_RO(*arr)) || - PyArray_TYPE(*arr) != rtype) { - ntype = PyArray_DescrFromType(rtype); - new = PyArray_FromAny((PyObject *)(*arr), - ntype, 0, 0, - FORCECAST | ALIGNED, NULL); - if (new == NULL) { - return -1; - } - *arr = (PyArrayObject *)new; - loop->decref = new; - } - } - - /* Don't decref *arr before re-assigning - because it was not going to be DECREF'd anyway. - - If a copy is made, then the copy will be removed - on deallocation of the loop structure by setting - loop->decref. - */ - - return 0; -} - -static PyUFuncReduceObject * -construct_reduce(PyUFuncObject *self, PyArrayObject **arr, PyArrayObject *out, - int axis, int otype, int operation, intp ind_size, char *str) -{ - PyUFuncReduceObject *loop; - PyArrayObject *idarr; - PyArrayObject *aar; - intp loop_i[MAX_DIMS], outsize=0; - int arg_types[3]; - PyArray_SCALARKIND scalars[3] = {PyArray_NOSCALAR, PyArray_NOSCALAR, - PyArray_NOSCALAR}; - int i, j, nd; - int flags; - /* Reduce type is the type requested of the input - during reduction */ - - if (self->core_enabled) { - PyErr_Format(PyExc_RuntimeError, - "construct_reduce not allowed on ufunc with signature"); - return NULL; - } - - nd = (*arr)->nd; - arg_types[0] = otype; - arg_types[1] = otype; - arg_types[2] = otype; - if ((loop = _pya_malloc(sizeof(PyUFuncReduceObject)))==NULL) { - PyErr_NoMemory(); return loop; - } - - loop->retbase=0; - loop->swap = 0; - loop->index = 0; - loop->ufunc = self; - Py_INCREF(self); - loop->cast = NULL; - loop->buffer = NULL; - loop->ret = NULL; - loop->it = NULL; - loop->rit = NULL; - loop->errobj = NULL; - loop->first = 1; - loop->decref=NULL; - loop->N = (*arr)->dimensions[axis]; - loop->instrides = (*arr)->strides[axis]; - - if (select_types(loop->ufunc, arg_types, &(loop->function), - &(loop->funcdata), scalars, NULL) == -1) goto fail; - - /* output type may change -- if it does - reduction is forced into that type - and we need to select the reduction function again - */ - if (otype != arg_types[2]) { - otype = arg_types[2]; - arg_types[0] = otype; - arg_types[1] = otype; - if (select_types(loop->ufunc, arg_types, &(loop->function), - &(loop->funcdata), scalars, NULL) == -1) - goto fail; - } - - /* get looping parameters from Python */ - if (PyUFunc_GetPyValues(str, &(loop->bufsize), &(loop->errormask), - &(loop->errobj)) < 0) goto fail; - - /* Make copy if misbehaved or not otype for small arrays */ - if (_create_reduce_copy(loop, arr, otype) < 0) goto fail; - aar = *arr; - - if (loop->N == 0) { - loop->meth = ZERO_EL_REDUCELOOP; - } - else if (PyArray_ISBEHAVED_RO(aar) && \ - otype == (aar)->descr->type_num) { - if (loop->N == 1) { - loop->meth = ONE_EL_REDUCELOOP; - } - else { - loop->meth = NOBUFFER_UFUNCLOOP; - loop->steps[1] = (aar)->strides[axis]; - loop->N -= 1; - } - } - else { - loop->meth = BUFFER_UFUNCLOOP; - loop->swap = !(PyArray_ISNOTSWAPPED(aar)); - } - - /* Determine if object arrays are involved */ - if (otype == PyArray_OBJECT || aar->descr->type_num == PyArray_OBJECT) - loop->obj = 1; - else - loop->obj = 0; - - if (loop->meth == ZERO_EL_REDUCELOOP) { - idarr = _getidentity(self, otype, str); - if (idarr == NULL) goto fail; - if (idarr->descr->elsize > UFUNC_MAXIDENTITY) { - PyErr_Format(PyExc_RuntimeError, - "UFUNC_MAXIDENTITY (%d)" \ - " is too small (needs to be at least %d)", - UFUNC_MAXIDENTITY, idarr->descr->elsize); - Py_DECREF(idarr); - goto fail; - } - memcpy(loop->idptr, idarr->data, idarr->descr->elsize); - Py_DECREF(idarr); - } - - /* Construct return array */ - flags = NPY_CARRAY | NPY_UPDATEIFCOPY | NPY_FORCECAST; - switch(operation) { - case UFUNC_REDUCE: - for(j=0, i=0; idimensions[i]; - - } - if (out == NULL) { - loop->ret = (PyArrayObject *) \ - PyArray_New(aar->ob_type, aar->nd-1, loop_i, - otype, NULL, NULL, 0, 0, - (PyObject *)aar); - } - else { - outsize = PyArray_MultiplyList(loop_i, aar->nd-1); - } - break; - case UFUNC_ACCUMULATE: - if (out == NULL) { - loop->ret = (PyArrayObject *) \ - PyArray_New(aar->ob_type, aar->nd, aar->dimensions, - otype, NULL, NULL, 0, 0, (PyObject *)aar); - } - else { - outsize = PyArray_MultiplyList(aar->dimensions, aar->nd); - } - break; - case UFUNC_REDUCEAT: - memcpy(loop_i, aar->dimensions, nd*sizeof(intp)); - /* Index is 1-d array */ - loop_i[axis] = ind_size; - if (out == NULL) { - loop->ret = (PyArrayObject *) \ - PyArray_New(aar->ob_type, aar->nd, loop_i, otype, - NULL, NULL, 0, 0, (PyObject *)aar); - } - else { - outsize = PyArray_MultiplyList(loop_i, aar->nd); - } - if (ind_size == 0) { - loop->meth = ZERO_EL_REDUCELOOP; - return loop; - } - if (loop->meth == ONE_EL_REDUCELOOP) - loop->meth = NOBUFFER_REDUCELOOP; - break; - } - if (out) { - if (PyArray_SIZE(out) != outsize) { - PyErr_SetString(PyExc_ValueError, - "wrong shape for output"); - goto fail; - } - loop->ret = (PyArrayObject *) \ - PyArray_FromArray(out, PyArray_DescrFromType(otype), - flags); - if (loop->ret && loop->ret != out) { - loop->retbase = 1; - } - } - if (loop->ret == NULL) goto fail; - loop->insize = aar->descr->elsize; - loop->outsize = loop->ret->descr->elsize; - loop->bufptr[0] = loop->ret->data; - - if (loop->meth == ZERO_EL_REDUCELOOP) { - loop->size = PyArray_SIZE(loop->ret); - return loop; - } - - loop->it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)aar); - if (loop->it == NULL) return NULL; - - if (loop->meth == ONE_EL_REDUCELOOP) { - loop->size = loop->it->size; - return loop; - } - - /* Fix iterator to loop over correct dimension */ - /* Set size in axis dimension to 1 */ - - loop->it->contiguous = 0; - loop->it->size /= (loop->it->dims_m1[axis]+1); - loop->it->dims_m1[axis] = 0; - loop->it->backstrides[axis] = 0; - - - loop->size = loop->it->size; - - if (operation == UFUNC_REDUCE) { - loop->steps[0] = 0; - } - else { - loop->rit = (PyArrayIterObject *) \ - PyArray_IterNew((PyObject *)(loop->ret)); - if (loop->rit == NULL) return NULL; - - /* Fix iterator to loop over correct dimension */ - /* Set size in axis dimension to 1 */ - - loop->rit->contiguous = 0; - loop->rit->size /= (loop->rit->dims_m1[axis]+1); - loop->rit->dims_m1[axis] = 0; - loop->rit->backstrides[axis] = 0; - - if (operation == UFUNC_ACCUMULATE) - loop->steps[0] = loop->ret->strides[axis]; - else - loop->steps[0] = 0; - } - loop->steps[2] = loop->steps[0]; - loop->bufptr[2] = loop->bufptr[0] + loop->steps[2]; - - - if (loop->meth == BUFFER_UFUNCLOOP) { - int _size; - loop->steps[1] = loop->outsize; - if (otype != aar->descr->type_num) { - _size=loop->bufsize*(loop->outsize + \ - aar->descr->elsize); - loop->buffer = PyDataMem_NEW(_size); - if (loop->buffer == NULL) goto fail; - if (loop->obj) memset(loop->buffer, 0, _size); - loop->castbuf = loop->buffer + \ - loop->bufsize*aar->descr->elsize; - loop->bufptr[1] = loop->castbuf; - loop->cast = PyArray_GetCastFunc(aar->descr, otype); - if (loop->cast == NULL) goto fail; - } - else { - _size = loop->bufsize * loop->outsize; - loop->buffer = PyDataMem_NEW(_size); - if (loop->buffer == NULL) goto fail; - if (loop->obj) memset(loop->buffer, 0, _size); - loop->bufptr[1] = loop->buffer; - } - } - - - PyUFunc_clearfperr(); - return loop; - - fail: - ufuncreduce_dealloc(loop); - return NULL; -} - - -/* We have two basic kinds of loops */ -/* One is used when arr is not-swapped and aligned and output type - is the same as input type. - and another using buffers when one of these is not satisfied. - - Zero-length and one-length axes-to-be-reduced are handled separately. -*/ - - static PyObject * -PyUFunc_Reduce(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *out, - int axis, int otype) -{ - PyArrayObject *ret=NULL; - PyUFuncReduceObject *loop; - intp i, n; - char *dptr; - NPY_BEGIN_THREADS_DEF; - - /* Construct loop object */ - loop = construct_reduce(self, &arr, out, axis, otype, UFUNC_REDUCE, 0, - "reduce"); - if (!loop) return NULL; - - NPY_LOOP_BEGIN_THREADS; - switch(loop->meth) { - case ZERO_EL_REDUCELOOP: - /* fprintf(stderr, "ZERO..%d\n", loop->size); */ - for(i=0; isize; i++) { - if (loop->obj) Py_INCREF(*((PyObject **)loop->idptr)); - memmove(loop->bufptr[0], loop->idptr, loop->outsize); - loop->bufptr[0] += loop->outsize; - } - break; - case ONE_EL_REDUCELOOP: - /*fprintf(stderr, "ONEDIM..%d\n", loop->size); */ - while(loop->index < loop->size) { - if (loop->obj) - Py_INCREF(*((PyObject **)loop->it->dataptr)); - memmove(loop->bufptr[0], loop->it->dataptr, - loop->outsize); - PyArray_ITER_NEXT(loop->it); - loop->bufptr[0] += loop->outsize; - loop->index++; - } - break; - case NOBUFFER_UFUNCLOOP: - /*fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ - while(loop->index < loop->size) { - /* Copy first element to output */ - if (loop->obj) - Py_INCREF(*((PyObject **)loop->it->dataptr)); - memmove(loop->bufptr[0], loop->it->dataptr, - loop->outsize); - /* Adjust input pointer */ - loop->bufptr[1] = loop->it->dataptr+loop->steps[1]; - loop->function((char **)loop->bufptr, - &(loop->N), - loop->steps, loop->funcdata); - UFUNC_CHECK_ERROR(loop); - - PyArray_ITER_NEXT(loop->it) - loop->bufptr[0] += loop->outsize; - loop->bufptr[2] = loop->bufptr[0]; - loop->index++; - } - break; - case BUFFER_UFUNCLOOP: - /* use buffer for arr */ - /* - For each row to reduce - 1. copy first item over to output (casting if necessary) - 2. Fill inner buffer - 3. When buffer is filled or end of row - a. Cast input buffers if needed - b. Call inner function. - 4. Repeat 2 until row is done. - */ - /* fprintf(stderr, "BUFFERED..%d %d\n", loop->size, - loop->swap); */ - while(loop->index < loop->size) { - loop->inptr = loop->it->dataptr; - /* Copy (cast) First term over to output */ - if (loop->cast) { - /* A little tricky because we need to - cast it first */ - arr->descr->f->copyswap(loop->buffer, - loop->inptr, - loop->swap, - NULL); - loop->cast(loop->buffer, loop->castbuf, - 1, NULL, NULL); - if (loop->obj) { - Py_XINCREF(*((PyObject **)loop->castbuf)); - } - memcpy(loop->bufptr[0], loop->castbuf, - loop->outsize); - } - else { /* Simple copy */ - arr->descr->f->copyswap(loop->bufptr[0], - loop->inptr, - loop->swap, NULL); - } - loop->inptr += loop->instrides; - n = 1; - while(n < loop->N) { - /* Copy up to loop->bufsize elements to - buffer */ - dptr = loop->buffer; - for(i=0; ibufsize; i++, n++) { - if (n == loop->N) break; - arr->descr->f->copyswap(dptr, - loop->inptr, - loop->swap, - NULL); - loop->inptr += loop->instrides; - dptr += loop->insize; - } - if (loop->cast) - loop->cast(loop->buffer, - loop->castbuf, - i, NULL, NULL); - loop->function((char **)loop->bufptr, - &i, - loop->steps, loop->funcdata); - loop->bufptr[0] += loop->steps[0]*i; - loop->bufptr[2] += loop->steps[2]*i; - UFUNC_CHECK_ERROR(loop); - } - PyArray_ITER_NEXT(loop->it); - loop->bufptr[0] += loop->outsize; - loop->bufptr[2] = loop->bufptr[0]; - loop->index++; - } - } - - NPY_LOOP_END_THREADS; - - /* Hang on to this reference -- will be decref'd with loop */ - if (loop->retbase) ret = (PyArrayObject *)loop->ret->base; - else ret = loop->ret; - Py_INCREF(ret); - ufuncreduce_dealloc(loop); - return (PyObject *)ret; - -fail: - NPY_LOOP_END_THREADS; - - if (loop) ufuncreduce_dealloc(loop); - return NULL; -} - - -static PyObject * -PyUFunc_Accumulate(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *out, - int axis, int otype) -{ - PyArrayObject *ret=NULL; - PyUFuncReduceObject *loop; - intp i, n; - char *dptr; - NPY_BEGIN_THREADS_DEF; - - /* Construct loop object */ - loop = construct_reduce(self, &arr, out, axis, otype, UFUNC_ACCUMULATE, 0, - "accumulate"); - if (!loop) return NULL; - - NPY_LOOP_BEGIN_THREADS; - switch(loop->meth) { - case ZERO_EL_REDUCELOOP: /* Accumulate */ - /* fprintf(stderr, "ZERO..%d\n", loop->size); */ - for(i=0; isize; i++) { - if (loop->obj) - Py_INCREF(*((PyObject **)loop->idptr)); - memcpy(loop->bufptr[0], loop->idptr, loop->outsize); - loop->bufptr[0] += loop->outsize; - } - break; - case ONE_EL_REDUCELOOP: /* Accumulate */ - /* fprintf(stderr, "ONEDIM..%d\n", loop->size); */ - while(loop->index < loop->size) { - if (loop->obj) - Py_INCREF(*((PyObject **)loop->it->dataptr)); - memmove(loop->bufptr[0], loop->it->dataptr, - loop->outsize); - PyArray_ITER_NEXT(loop->it); - loop->bufptr[0] += loop->outsize; - loop->index++; - } - break; - case NOBUFFER_UFUNCLOOP: /* Accumulate */ - /* fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ - while(loop->index < loop->size) { - /* Copy first element to output */ - if (loop->obj) - Py_INCREF(*((PyObject **)loop->it->dataptr)); - memmove(loop->bufptr[0], loop->it->dataptr, - loop->outsize); - /* Adjust input pointer */ - loop->bufptr[1] = loop->it->dataptr+loop->steps[1]; - loop->function((char **)loop->bufptr, - &(loop->N), - loop->steps, loop->funcdata); - UFUNC_CHECK_ERROR(loop); - - PyArray_ITER_NEXT(loop->it); - PyArray_ITER_NEXT(loop->rit); - loop->bufptr[0] = loop->rit->dataptr; - loop->bufptr[2] = loop->bufptr[0] + loop->steps[0]; - loop->index++; - } - break; - case BUFFER_UFUNCLOOP: /* Accumulate */ - /* use buffer for arr */ - /* - For each row to reduce - 1. copy identity over to output (casting if necessary) - 2. Fill inner buffer - 3. When buffer is filled or end of row - a. Cast input buffers if needed - b. Call inner function. - 4. Repeat 2 until row is done. - */ - /* fprintf(stderr, "BUFFERED..%d %p\n", loop->size, - loop->cast); */ - while(loop->index < loop->size) { - loop->inptr = loop->it->dataptr; - /* Copy (cast) First term over to output */ - if (loop->cast) { - /* A little tricky because we need to - cast it first */ - arr->descr->f->copyswap(loop->buffer, - loop->inptr, - loop->swap, - NULL); - loop->cast(loop->buffer, loop->castbuf, - 1, NULL, NULL); - if (loop->obj) { - Py_XINCREF(*((PyObject **)loop->castbuf)); - } - memcpy(loop->bufptr[0], loop->castbuf, - loop->outsize); - } - else { /* Simple copy */ - arr->descr->f->copyswap(loop->bufptr[0], - loop->inptr, - loop->swap, - NULL); - } - loop->inptr += loop->instrides; - n = 1; - while(n < loop->N) { - /* Copy up to loop->bufsize elements to - buffer */ - dptr = loop->buffer; - for(i=0; ibufsize; i++, n++) { - if (n == loop->N) break; - arr->descr->f->copyswap(dptr, - loop->inptr, - loop->swap, - NULL); - loop->inptr += loop->instrides; - dptr += loop->insize; - } - if (loop->cast) - loop->cast(loop->buffer, - loop->castbuf, - i, NULL, NULL); - loop->function((char **)loop->bufptr, - &i, - loop->steps, loop->funcdata); - loop->bufptr[0] += loop->steps[0]*i; - loop->bufptr[2] += loop->steps[2]*i; - UFUNC_CHECK_ERROR(loop); - } - PyArray_ITER_NEXT(loop->it); - PyArray_ITER_NEXT(loop->rit); - loop->bufptr[0] = loop->rit->dataptr; - loop->bufptr[2] = loop->bufptr[0] + loop->steps[0]; - loop->index++; - } - } - - NPY_LOOP_END_THREADS; - - /* Hang on to this reference -- will be decref'd with loop */ - if (loop->retbase) ret = (PyArrayObject *)loop->ret->base; - else ret = loop->ret; - Py_INCREF(ret); - ufuncreduce_dealloc(loop); - return (PyObject *)ret; - - fail: - NPY_LOOP_END_THREADS; - - if (loop) ufuncreduce_dealloc(loop); - return NULL; -} - -/* Reduceat performs a reduce over an axis using the indices as a guide - - op.reduceat(array,indices) computes - op.reduce(array[indices[i]:indices[i+1]] - for i=0..end with an implicit indices[i+1]=len(array) - assumed when i=end-1 - - if indices[i+1] <= indices[i]+1 - then the result is array[indices[i]] for that value - - op.accumulate(array) is the same as - op.reduceat(array,indices)[::2] - where indices is range(len(array)-1) with a zero placed in every other sample - indices = zeros(len(array)*2-1) - indices[1::2] = range(1,len(array)) - - output shape is based on the size of indices -*/ - -static PyObject * -PyUFunc_Reduceat(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *ind, - PyArrayObject *out, int axis, int otype) -{ - PyArrayObject *ret; - PyUFuncReduceObject *loop; - intp *ptr=(intp *)ind->data; - intp nn=ind->dimensions[0]; - intp mm=arr->dimensions[axis]-1; - intp n, i, j; - char *dptr; - NPY_BEGIN_THREADS_DEF; - - /* Check for out-of-bounds values in indices array */ - for(i=0; i mm)) { - PyErr_Format(PyExc_IndexError, - "index out-of-bounds (0, %d)", (int) mm); - return NULL; - } - ptr++; - } - - ptr = (intp *)ind->data; - /* Construct loop object */ - loop = construct_reduce(self, &arr, out, axis, otype, UFUNC_REDUCEAT, nn, - "reduceat"); - if (!loop) return NULL; - - NPY_LOOP_BEGIN_THREADS; - switch(loop->meth) { - /* zero-length index -- return array immediately */ - case ZERO_EL_REDUCELOOP: - /* fprintf(stderr, "ZERO..\n"); */ - break; - /* NOBUFFER -- behaved array and same type */ - case NOBUFFER_UFUNCLOOP: /* Reduceat */ - /* fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ - while(loop->index < loop->size) { - ptr = (intp *)ind->data; - for(i=0; ibufptr[1] = loop->it->dataptr + \ - (*ptr)*loop->instrides; - if (loop->obj) { - Py_XINCREF(*((PyObject **)loop->bufptr[1])); - } - memcpy(loop->bufptr[0], loop->bufptr[1], - loop->outsize); - mm = (i==nn-1 ? arr->dimensions[axis]-*ptr : \ - *(ptr+1) - *ptr) - 1; - if (mm > 0) { - loop->bufptr[1] += loop->instrides; - loop->bufptr[2] = loop->bufptr[0]; - loop->function((char **)loop->bufptr, - &mm, loop->steps, - loop->funcdata); - UFUNC_CHECK_ERROR(loop); - } - loop->bufptr[0] += loop->ret->strides[axis]; - ptr++; - } - PyArray_ITER_NEXT(loop->it); - PyArray_ITER_NEXT(loop->rit); - loop->bufptr[0] = loop->rit->dataptr; - loop->index++; - } - break; - - /* BUFFER -- misbehaved array or different types */ - case BUFFER_UFUNCLOOP: /* Reduceat */ - /* fprintf(stderr, "BUFFERED..%d\n", loop->size); */ - while(loop->index < loop->size) { - ptr = (intp *)ind->data; - for(i=0; iobj) { - Py_XINCREF(*((PyObject **)loop->idptr)); - } - memcpy(loop->bufptr[0], loop->idptr, - loop->outsize); - n = 0; - mm = (i==nn-1 ? arr->dimensions[axis] - *ptr :\ - *(ptr+1) - *ptr); - if (mm < 1) mm = 1; - loop->inptr = loop->it->dataptr + \ - (*ptr)*loop->instrides; - while (n < mm) { - /* Copy up to loop->bufsize elements - to buffer */ - dptr = loop->buffer; - for(j=0; jbufsize; j++, n++) { - if (n == mm) break; - arr->descr->f->copyswap\ - (dptr, - loop->inptr, - loop->swap, NULL); - loop->inptr += loop->instrides; - dptr += loop->insize; - } - if (loop->cast) - loop->cast(loop->buffer, - loop->castbuf, - j, NULL, NULL); - loop->bufptr[2] = loop->bufptr[0]; - loop->function((char **)loop->bufptr, - &j, loop->steps, - loop->funcdata); - UFUNC_CHECK_ERROR(loop); - loop->bufptr[0] += j*loop->steps[0]; - } - loop->bufptr[0] += loop->ret->strides[axis]; - ptr++; - } - PyArray_ITER_NEXT(loop->it); - PyArray_ITER_NEXT(loop->rit); - loop->bufptr[0] = loop->rit->dataptr; - loop->index++; - } - break; - } - - NPY_LOOP_END_THREADS; - - /* Hang on to this reference -- will be decref'd with loop */ - if (loop->retbase) ret = (PyArrayObject *)loop->ret->base; - else ret = loop->ret; - Py_INCREF(ret); - ufuncreduce_dealloc(loop); - return (PyObject *)ret; - -fail: - NPY_LOOP_END_THREADS; - - if (loop) ufuncreduce_dealloc(loop); - return NULL; -} - - -/* This code handles reduce, reduceat, and accumulate - (accumulate and reduce are special cases of the more general reduceat - but they are handled separately for speed) -*/ - -static PyObject * -PyUFunc_GenericReduction(PyUFuncObject *self, PyObject *args, - PyObject *kwds, int operation) -{ - int axis=0; - PyArrayObject *mp, *ret = NULL; - PyObject *op, *res=NULL; - PyObject *obj_ind, *context; - PyArrayObject *indices = NULL; - PyArray_Descr *otype=NULL; - PyArrayObject *out=NULL; - static char *kwlist1[] = {"array", "axis", "dtype", "out", NULL}; - static char *kwlist2[] = {"array", "indices", "axis", "dtype", "out", NULL}; - static char *_reduce_type[] = {"reduce", "accumulate", \ - "reduceat", NULL}; - if (self == NULL) { - PyErr_SetString(PyExc_ValueError, "function not supported"); - return NULL; - } - - if (self->core_enabled) { - PyErr_Format(PyExc_RuntimeError, - "Reduction not defined on ufunc with signature"); - return NULL; - } - - if (self->nin != 2) { - PyErr_Format(PyExc_ValueError, - "%s only supported for binary functions", - _reduce_type[operation]); - return NULL; - } - if (self->nout != 1) { - PyErr_Format(PyExc_ValueError, - "%s only supported for functions " \ - "returning a single value", - _reduce_type[operation]); - return NULL; - } - - if (operation == UFUNC_REDUCEAT) { - PyArray_Descr *indtype; - indtype = PyArray_DescrFromType(PyArray_INTP); - if(!PyArg_ParseTupleAndKeywords(args, kwds, "OO|iO&O&", kwlist2, - &op, &obj_ind, &axis, - PyArray_DescrConverter2, - &otype, - PyArray_OutputConverter, - &out)) { - Py_XDECREF(otype); - return NULL; - } - indices = (PyArrayObject *)PyArray_FromAny(obj_ind, indtype, - 1, 1, CARRAY, NULL); - if (indices == NULL) {Py_XDECREF(otype); return NULL;} - } - else { - if(!PyArg_ParseTupleAndKeywords(args, kwds, "O|iO&O&", kwlist1, - &op, &axis, - PyArray_DescrConverter2, - &otype, - PyArray_OutputConverter, - &out)) { - Py_XDECREF(otype); - return NULL; - } - } - - /* Ensure input is an array */ - if (!PyArray_Check(op) && !PyArray_IsScalar(op, Generic)) { - context = Py_BuildValue("O(O)i", self, op, 0); - } - else { - context = NULL; - } - mp = (PyArrayObject *)PyArray_FromAny(op, NULL, 0, 0, 0, context); - Py_XDECREF(context); - if (mp == NULL) return NULL; - - /* Check to see if input is zero-dimensional */ - if (mp->nd == 0) { - PyErr_Format(PyExc_TypeError, "cannot %s on a scalar", - _reduce_type[operation]); - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; - } - - /* Check to see that type (and otype) is not FLEXIBLE */ - if (PyArray_ISFLEXIBLE(mp) || - (otype && PyTypeNum_ISFLEXIBLE(otype->type_num))) { - PyErr_Format(PyExc_TypeError, - "cannot perform %s with flexible type", - _reduce_type[operation]); - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; - } - - if (axis < 0) axis += mp->nd; - if (axis < 0 || axis >= mp->nd) { - PyErr_SetString(PyExc_ValueError, "axis not in array"); - Py_XDECREF(otype); - Py_DECREF(mp); - return NULL; - } - - /* If out is specified it determines otype unless otype - already specified. - */ - if (otype == NULL && out != NULL) { - otype = out->descr; - Py_INCREF(otype); - } - - if (otype == NULL) { - /* For integer types --- make sure at - least a long is used for add and multiply - reduction --- to avoid overflow */ - int typenum = PyArray_TYPE(mp); - if ((typenum < NPY_FLOAT) && \ - ((strcmp(self->name,"add")==0) || \ - (strcmp(self->name,"multiply")==0))) { - if (PyTypeNum_ISBOOL(typenum)) - typenum = PyArray_LONG; - else if (mp->descr->elsize < sizeof(long)) { - if (PyTypeNum_ISUNSIGNED(typenum)) - typenum = PyArray_ULONG; - else - typenum = PyArray_LONG; - } - } - otype = PyArray_DescrFromType(typenum); - } - - - switch(operation) { - case UFUNC_REDUCE: - ret = (PyArrayObject *)PyUFunc_Reduce(self, mp, out, axis, - otype->type_num); - break; - case UFUNC_ACCUMULATE: - ret = (PyArrayObject *)PyUFunc_Accumulate(self, mp, out, axis, - otype->type_num); - break; - case UFUNC_REDUCEAT: - ret = (PyArrayObject *)PyUFunc_Reduceat(self, mp, indices, out, - axis, otype->type_num); - Py_DECREF(indices); - break; - } - Py_DECREF(mp); - Py_DECREF(otype); - if (ret==NULL) return NULL; - if (op->ob_type != ret->ob_type) { - res = PyObject_CallMethod(op, "__array_wrap__", "O", ret); - if (res == NULL) PyErr_Clear(); - else if (res == Py_None) Py_DECREF(res); - else { - Py_DECREF(ret); - return res; - } - } - return PyArray_Return(ret); - -} - -/* This function analyzes the input arguments - and determines an appropriate __array_wrap__ function to call - for the outputs. - - If an output argument is provided, then it is wrapped - with its own __array_wrap__ not with the one determined by - the input arguments. - - if the provided output argument is already an array, - the wrapping function is None (which means no wrapping will - be done --- not even PyArray_Return). - - A NULL is placed in output_wrap for outputs that - should just have PyArray_Return called. -*/ - -static void -_find_array_wrap(PyObject *args, PyObject **output_wrap, int nin, int nout) -{ - Py_ssize_t nargs; - int i; - int np = 0; - double priority, maxpriority; - PyObject *with_wrap[NPY_MAXARGS], *wraps[NPY_MAXARGS]; - PyObject *obj, *wrap = NULL; - - nargs = PyTuple_GET_SIZE(args); - for(i = 0; i < nin; i++) { - obj = PyTuple_GET_ITEM(args, i); - if (PyArray_CheckExact(obj) || \ - PyArray_IsAnyScalar(obj)) - continue; - wrap = PyObject_GetAttrString(obj, "__array_wrap__"); - if (wrap) { - if (PyCallable_Check(wrap)) { - with_wrap[np] = obj; - wraps[np] = wrap; - ++np; - } - else { - Py_DECREF(wrap); - wrap = NULL; - } - } - else { - PyErr_Clear(); - } - } - if (np >= 2) { - wrap = wraps[0]; - maxpriority = PyArray_GetPriority(with_wrap[0], - PyArray_SUBTYPE_PRIORITY); - for(i = 1; i < np; ++i) { - priority = \ - PyArray_GetPriority(with_wrap[i], - PyArray_SUBTYPE_PRIORITY); - if (priority > maxpriority) { - maxpriority = priority; - Py_DECREF(wrap); - wrap = wraps[i]; - } else { - Py_DECREF(wraps[i]); - } - } - } - - /* Here wrap is the wrapping function determined from the - input arrays (could be NULL). - - For all the output arrays decide what to do. - - 1) Use the wrap function determined from the input arrays - This is the default if the output array is not - passed in. - - 2) Use the __array_wrap__ method of the output object - passed in. -- this is special cased for - exact ndarray so that no PyArray_Return is - done in that case. - */ - - for(i=0; inargs; i++) { - mps[i] = NULL; - } - - errval = PyUFunc_GenericFunction(self, args, kwds, mps); - if (errval < 0) { - for(i = 0; i < self->nargs; i++) { - PyArray_XDECREF_ERR(mps[i]); - } - if (errval == -1) - return NULL; - else { - /* - * PyErr_SetString(PyExc_TypeError,""); - * return NULL; - */ - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - } - - for(i = 0; i < self->nin; i++) { - Py_DECREF(mps[i]); - } - - - /* - * Use __array_wrap__ on all outputs - * if present on one of the input arguments. - * If present for multiple inputs: - * use __array_wrap__ of input object with largest - * __array_priority__ (default = 0.0) - * - * Exception: we should not wrap outputs for items already - * passed in as output-arguments. These items should either - * be left unwrapped or wrapped by calling their own __array_wrap__ - * routine. - * - * For each output argument, wrap will be either - * NULL --- call PyArray_Return() -- default if no output arguments given - * None --- array-object passed in don't call PyArray_Return - * method --- the __array_wrap__ method to call. - */ - _find_array_wrap(args, wraparr, self->nin, self->nout); - - /* wrap outputs */ - for(i = 0; i < self->nout; i++) { - int j=self->nin+i; - PyObject *wrap; - - /* - * check to see if any UPDATEIFCOPY flags are set - * which meant that a temporary output was generated - */ - if (mps[j]->flags & UPDATEIFCOPY) { - PyObject *old = mps[j]->base; - /* we want to hang on to this */ - Py_INCREF(old); - /* should trigger the copyback into old */ - Py_DECREF(mps[j]); - mps[j] = (PyArrayObject *)old; - } - wrap = wraparr[i]; - if (wrap != NULL) { - if (wrap == Py_None) { - Py_DECREF(wrap); - retobj[i] = (PyObject *)mps[j]; - continue; - } - res = PyObject_CallFunction(wrap, "O(OOi)", - mps[j], self, args, i); - if (res == NULL && \ - PyErr_ExceptionMatches(PyExc_TypeError)) { - PyErr_Clear(); - res = PyObject_CallFunctionObjArgs(wrap, - mps[j], - NULL); - } - Py_DECREF(wrap); - if (res == NULL) { - goto fail; - } - else if (res == Py_None) { - Py_DECREF(res); - } - else { - Py_DECREF(mps[j]); - retobj[i] = res; - continue; - } - } - /* default behavior */ - retobj[i] = PyArray_Return(mps[j]); - } - - if (self->nout == 1) { - return retobj[0]; - } else { - ret = (PyTupleObject *)PyTuple_New(self->nout); - for(i = 0; i < self->nout; i++) { - PyTuple_SET_ITEM(ret, i, retobj[i]); - } - return (PyObject *)ret; - } -fail: - for(i = self->nin; i < self->nargs; i++) { - Py_XDECREF(mps[i]); - } - return NULL; -} - -static PyObject * -ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyObject *thedict; - PyObject *res; - - if (!PyArg_ParseTuple(args, "")) return NULL; - - if (PyUFunc_PYVALS_NAME == NULL) { - PyUFunc_PYVALS_NAME = PyString_InternFromString(UFUNC_PYVALS_NAME); - } - thedict = PyThreadState_GetDict(); - if (thedict == NULL) { - thedict = PyEval_GetBuiltins(); - } - res = PyDict_GetItem(thedict, PyUFunc_PYVALS_NAME); - if (res != NULL) { - Py_INCREF(res); - return res; - } - /* Construct list of defaults */ - res = PyList_New(3); - if (res == NULL) return NULL; - PyList_SET_ITEM(res, 0, PyInt_FromLong(PyArray_BUFSIZE)); - PyList_SET_ITEM(res, 1, PyInt_FromLong(UFUNC_ERR_DEFAULT)); - PyList_SET_ITEM(res, 2, Py_None); Py_INCREF(Py_None); - return res; -} - -#if USE_USE_DEFAULTS==1 -/* - This is a strategy to buy a little speed up and avoid the dictionary - look-up in the default case. It should work in the presence of - threads. If it is deemed too complicated or it doesn't actually work - it could be taken out. -*/ -static int -ufunc_update_use_defaults(void) -{ - PyObject *errobj=NULL; - int errmask, bufsize; - int res; - - PyUFunc_NUM_NODEFAULTS += 1; - res = PyUFunc_GetPyValues("test", &bufsize, &errmask, - &errobj); - PyUFunc_NUM_NODEFAULTS -= 1; - - if (res < 0) {Py_XDECREF(errobj); return -1;} - - if ((errmask != UFUNC_ERR_DEFAULT) || \ - (bufsize != PyArray_BUFSIZE) || \ - (PyTuple_GET_ITEM(errobj, 1) != Py_None)) { - PyUFunc_NUM_NODEFAULTS += 1; - } - else if (PyUFunc_NUM_NODEFAULTS > 0) { - PyUFunc_NUM_NODEFAULTS -= 1; - } - Py_XDECREF(errobj); - return 0; -} -#endif - -static PyObject * -ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args) -{ - PyObject *thedict; - int res; - PyObject *val; - static char *msg = "Error object must be a list of length 3"; - - if (!PyArg_ParseTuple(args, "O", &val)) return NULL; - - if (!PyList_CheckExact(val) || PyList_GET_SIZE(val) != 3) { - PyErr_SetString(PyExc_ValueError, msg); - return NULL; - } - if (PyUFunc_PYVALS_NAME == NULL) { - PyUFunc_PYVALS_NAME = PyString_InternFromString(UFUNC_PYVALS_NAME); - } - thedict = PyThreadState_GetDict(); - if (thedict == NULL) { - thedict = PyEval_GetBuiltins(); - } - res = PyDict_SetItem(thedict, PyUFunc_PYVALS_NAME, val); - if (res < 0) return NULL; -#if USE_USE_DEFAULTS==1 - if (ufunc_update_use_defaults() < 0) return NULL; -#endif - Py_INCREF(Py_None); - return Py_None; -} - - - -static PyUFuncGenericFunction pyfunc_functions[] = {PyUFunc_On_Om}; - -static char -doc_frompyfunc[] = "frompyfunc(func, nin, nout) take an arbitrary python function that takes nin objects as input and returns nout objects and return a universal function (ufunc). This ufunc always returns PyObject arrays"; - -static PyObject * -ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)) { - /* Keywords are ignored for now */ - - PyObject *function, *pyname=NULL; - int nin, nout, i; - PyUFunc_PyFuncData *fdata; - PyUFuncObject *self; - char *fname, *str; - Py_ssize_t fname_len=-1; - int offset[2]; - - if (!PyArg_ParseTuple(args, "Oii", &function, &nin, &nout)) return NULL; - - if (!PyCallable_Check(function)) { - PyErr_SetString(PyExc_TypeError, "function must be callable"); - return NULL; - } - - self = _pya_malloc(sizeof(PyUFuncObject)); - if (self == NULL) return NULL; - PyObject_Init((PyObject *)self, &PyUFunc_Type); - - self->userloops = NULL; - self->nin = nin; - self->nout = nout; - self->nargs = nin+nout; - self->identity = PyUFunc_None; - self->functions = pyfunc_functions; - - self->ntypes = 1; - self->check_return = 0; - - /* generalized ufunc */ - self->core_enabled = 0; - self->core_num_dim_ix = 0; - self->core_num_dims = NULL; - self->core_dim_ixs = NULL; - self->core_offsets = NULL; - self->core_signature = NULL; - - pyname = PyObject_GetAttrString(function, "__name__"); - if (pyname) - (void) PyString_AsStringAndSize(pyname, &fname, &fname_len); - - if (PyErr_Occurred()) { - fname = "?"; - fname_len = 1; - PyErr_Clear(); - } - Py_XDECREF(pyname); - - - - /* self->ptr holds a pointer for enough memory for - self->data[0] (fdata) - self->data - self->name - self->types - - To be safest, all of these need their memory aligned on void * pointers - Therefore, we may need to allocate extra space. - */ - offset[0] = sizeof(PyUFunc_PyFuncData); - i = (sizeof(PyUFunc_PyFuncData) % sizeof(void *)); - if (i) offset[0] += (sizeof(void *) - i); - offset[1] = self->nargs; - i = (self->nargs % sizeof(void *)); - if (i) offset[1] += (sizeof(void *)-i); - - self->ptr = _pya_malloc(offset[0] + offset[1] + sizeof(void *) + \ - (fname_len+14)); - - if (self->ptr == NULL) return PyErr_NoMemory(); - Py_INCREF(function); - self->obj = function; - fdata = (PyUFunc_PyFuncData *)(self->ptr); - fdata->nin = nin; - fdata->nout = nout; - fdata->callable = function; - - self->data = (void **)(((char *)self->ptr) + offset[0]); - self->data[0] = (void *)fdata; - - self->types = (char *)self->data + sizeof(void *); - for(i=0; inargs; i++) self->types[i] = PyArray_OBJECT; - - str = self->types + offset[1]; - memcpy(str, fname, fname_len); - memcpy(str+fname_len, " (vectorized)", 14); - - self->name = str; - - /* Do a better job someday */ - self->doc = "dynamic ufunc based on a python function"; - - - return (PyObject *)self; -} - -/*UFUNC_API*/ -static int -PyUFunc_ReplaceLoopBySignature(PyUFuncObject *func, - PyUFuncGenericFunction newfunc, - int *signature, - PyUFuncGenericFunction *oldfunc) -{ - int i,j; - int res = -1; - /* Find the location of the matching signature */ - for(i=0; intypes; i++) { - for(j=0; jnargs; j++) { - if (signature[j] != func->types[i*func->nargs+j]) - break; - } - if (j < func->nargs) continue; - - if (oldfunc != NULL) { - *oldfunc = func->functions[i]; - } - func->functions[i] = newfunc; - res = 0; - break; - } - return res; -} - -/*UFUNC_API*/ -static PyObject * -PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void **data, - char *types, int ntypes, - int nin, int nout, int identity, - char *name, char *doc, int check_return) -{ - return PyUFunc_FromFuncAndDataAndSignature(func, data, types, ntypes, - nin, nout, identity, name, doc, check_return, NULL); -} - -/*UFUNC_API*/ -static PyObject * -PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data, - char *types, int ntypes, - int nin, int nout, int identity, - char *name, char *doc, - int check_return, const char *signature) -{ - PyUFuncObject *self; - - self = _pya_malloc(sizeof(PyUFuncObject)); - if (self == NULL) return NULL; - PyObject_Init((PyObject *)self, &PyUFunc_Type); - - self->nin = nin; - self->nout = nout; - self->nargs = nin+nout; - self->identity = identity; - - self->functions = func; - self->data = data; - self->types = types; - self->ntypes = ntypes; - self->check_return = check_return; - self->ptr = NULL; - self->obj = NULL; - self->userloops=NULL; - - if (name == NULL) self->name = "?"; - else self->name = name; - - if (doc == NULL) self->doc = "NULL"; - else self->doc = doc; - - /* generalized ufunc */ - self->core_enabled = 0; - self->core_num_dim_ix = 0; - self->core_num_dims = NULL; - self->core_dim_ixs = NULL; - self->core_offsets = NULL; - self->core_signature = NULL; - if (signature != NULL) { - if (_parse_signature(self, signature) != 0) - return NULL; - } - - return (PyObject *)self; -} - -/* This is the first-part of the CObject structure. - - I don't think this will change, but if it should, then - this needs to be fixed. The exposed C-API was insufficient - because I needed to replace the pointer and it wouldn't - let me with a destructor set (even though it works fine - with the destructor). -*/ - -typedef struct { - PyObject_HEAD - void *c_obj; -} _simple_cobj; - -#define _SETCPTR(cobj, val) ((_simple_cobj *)(cobj))->c_obj = (val) - -/* return 1 if arg1 > arg2, 0 if arg1 == arg2, and -1 if arg1 < arg2 - */ -static int -cmp_arg_types(int *arg1, int *arg2, int n) -{ - for(;n>0; n--, arg1++, arg2++) { - if (PyArray_EquivTypenums(*arg1, *arg2)) continue; - if (PyArray_CanCastSafely(*arg1, *arg2)) - return -1; - return 1; - } - return 0; -} - -/* This frees the linked-list structure - when the CObject is destroyed (removed - from the internal dictionary) -*/ -static void -_loop1d_list_free(void *ptr) -{ - PyUFunc_Loop1d *funcdata; - if (ptr == NULL) return; - funcdata = (PyUFunc_Loop1d *)ptr; - if (funcdata == NULL) return; - _pya_free(funcdata->arg_types); - _loop1d_list_free(funcdata->next); - _pya_free(funcdata); -} - - -/*UFUNC_API*/ -static int -PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, - int usertype, - PyUFuncGenericFunction function, - int *arg_types, - void *data) -{ - PyArray_Descr *descr; - PyUFunc_Loop1d *funcdata; - PyObject *key, *cobj; - int i; - int *newtypes=NULL; - - descr=PyArray_DescrFromType(usertype); - if ((usertype < PyArray_USERDEF) || (descr==NULL)) { - PyErr_SetString(PyExc_TypeError, - "unknown user-defined type"); - return -1; - } - Py_DECREF(descr); - - if (ufunc->userloops == NULL) { - ufunc->userloops = PyDict_New(); - } - key = PyInt_FromLong((long) usertype); - if (key == NULL) return -1; - funcdata = _pya_malloc(sizeof(PyUFunc_Loop1d)); - if (funcdata == NULL) goto fail; - newtypes = _pya_malloc(sizeof(int)*ufunc->nargs); - if (newtypes == NULL) goto fail; - if (arg_types != NULL) { - for(i=0; inargs; i++) { - newtypes[i] = arg_types[i]; - } - } - else { - for(i=0; inargs; i++) { - newtypes[i] = usertype; - } - } - - funcdata->func = function; - funcdata->arg_types = newtypes; - funcdata->data = data; - funcdata->next = NULL; - - /* Get entry for this user-defined type*/ - cobj = PyDict_GetItem(ufunc->userloops, key); - - /* If it's not there, then make one and return. */ - if (cobj == NULL) { - cobj = PyCObject_FromVoidPtr((void *)funcdata, - _loop1d_list_free); - if (cobj == NULL) goto fail; - PyDict_SetItem(ufunc->userloops, key, cobj); - Py_DECREF(cobj); - Py_DECREF(key); - return 0; - } - else { - PyUFunc_Loop1d *current, *prev=NULL; - int cmp=1; - /* There is already at least 1 loop. Place this one in - lexicographic order. If the next one signature - is exactly like this one, then just replace. - Otherwise insert. - */ - current = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(cobj); - while (current != NULL) { - cmp = cmp_arg_types(current->arg_types, newtypes, - ufunc->nargs); - if (cmp >= 0) break; - prev = current; - current = current->next; - } - if (cmp == 0) { /* just replace it with new function */ - current->func = function; - current->data = data; - _pya_free(newtypes); - _pya_free(funcdata); - } - else { /* insert it before the current one - by hacking the internals of cobject to - replace the function pointer --- - can't use CObject API because destructor is set. - */ - funcdata->next = current; - if (prev == NULL) { /* place this at front */ - _SETCPTR(cobj, funcdata); - } - else { - prev->next = funcdata; - } - } - } - Py_DECREF(key); - return 0; - - - fail: - Py_DECREF(key); - _pya_free(funcdata); - _pya_free(newtypes); - if (!PyErr_Occurred()) PyErr_NoMemory(); - return -1; -} - -#undef _SETCPTR - - -static void -ufunc_dealloc(PyUFuncObject *self) -{ - if (self->core_num_dims) _pya_free(self->core_num_dims); - if (self->core_dim_ixs) _pya_free(self->core_dim_ixs); - if (self->core_offsets) _pya_free(self->core_offsets); - if (self->core_signature) _pya_free(self->core_signature); - if (self->ptr) _pya_free(self->ptr); - Py_XDECREF(self->userloops); - Py_XDECREF(self->obj); - _pya_free(self); -} - -static PyObject * -ufunc_repr(PyUFuncObject *self) -{ - char buf[100]; - - sprintf(buf, "", self->name); - - return PyString_FromString(buf); -} - - -/* -------------------------------------------------------- */ - -/* op.outer(a,b) is equivalent to op(a[:,NewAxis,NewAxis,etc.],b) - where a has b.ndim NewAxis terms appended. - - The result has dimensions a.ndim + b.ndim -*/ - -static PyObject * -ufunc_outer(PyUFuncObject *self, PyObject *args, PyObject *kwds) -{ - int i; - PyObject *ret; - PyArrayObject *ap1=NULL, *ap2=NULL, *ap_new=NULL; - PyObject *new_args, *tmp; - PyObject *shape1, *shape2, *newshape; - - if (self->core_enabled) { - PyErr_Format(PyExc_TypeError, - "method outer is not allowed in ufunc with non-trivial"\ - " signature"); - return NULL; - } - - if(self->nin != 2) { - PyErr_SetString(PyExc_ValueError, - "outer product only supported "\ - "for binary functions"); - return NULL; - } - - if (PySequence_Length(args) != 2) { - PyErr_SetString(PyExc_TypeError, - "exactly two arguments expected"); - return NULL; - } - - tmp = PySequence_GetItem(args, 0); - if (tmp == NULL) return NULL; - ap1 = (PyArrayObject *) \ - PyArray_FromObject(tmp, PyArray_NOTYPE, 0, 0); - Py_DECREF(tmp); - if (ap1 == NULL) return NULL; - - tmp = PySequence_GetItem(args, 1); - if (tmp == NULL) return NULL; - ap2 = (PyArrayObject *)PyArray_FromObject(tmp, PyArray_NOTYPE, 0, 0); - Py_DECREF(tmp); - if (ap2 == NULL) {Py_DECREF(ap1); return NULL;} - - /* Construct new shape tuple */ - shape1 = PyTuple_New(ap1->nd); - if (shape1 == NULL) goto fail; - for(i=0; ind; i++) - PyTuple_SET_ITEM(shape1, i, - PyLong_FromLongLong((longlong)ap1-> \ - dimensions[i])); - - shape2 = PyTuple_New(ap2->nd); - for(i=0; ind; i++) - PyTuple_SET_ITEM(shape2, i, PyInt_FromLong((long) 1)); - if (shape2 == NULL) {Py_DECREF(shape1); goto fail;} - newshape = PyNumber_Add(shape1, shape2); - Py_DECREF(shape1); - Py_DECREF(shape2); - if (newshape == NULL) goto fail; - - ap_new = (PyArrayObject *)PyArray_Reshape(ap1, newshape); - Py_DECREF(newshape); - if (ap_new == NULL) goto fail; - - new_args = Py_BuildValue("(OO)", ap_new, ap2); - Py_DECREF(ap1); - Py_DECREF(ap2); - Py_DECREF(ap_new); - ret = ufunc_generic_call(self, new_args, kwds); - Py_DECREF(new_args); - return ret; - - fail: - Py_XDECREF(ap1); - Py_XDECREF(ap2); - Py_XDECREF(ap_new); - return NULL; -} - - -static PyObject * -ufunc_reduce(PyUFuncObject *self, PyObject *args, PyObject *kwds) -{ - - return PyUFunc_GenericReduction(self, args, kwds, UFUNC_REDUCE); -} - -static PyObject * -ufunc_accumulate(PyUFuncObject *self, PyObject *args, PyObject *kwds) -{ - - return PyUFunc_GenericReduction(self, args, kwds, UFUNC_ACCUMULATE); -} - -static PyObject * -ufunc_reduceat(PyUFuncObject *self, PyObject *args, PyObject *kwds) -{ - return PyUFunc_GenericReduction(self, args, kwds, UFUNC_REDUCEAT); -} - - -static struct PyMethodDef ufunc_methods[] = { - {"reduce", (PyCFunction)ufunc_reduce, METH_VARARGS | METH_KEYWORDS, NULL }, - {"accumulate", (PyCFunction)ufunc_accumulate, - METH_VARARGS | METH_KEYWORDS, NULL }, - {"reduceat", (PyCFunction)ufunc_reduceat, - METH_VARARGS | METH_KEYWORDS, NULL }, - {"outer", (PyCFunction)ufunc_outer, METH_VARARGS | METH_KEYWORDS, NULL}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - - - -/* construct the string - y1,y2,...,yn -*/ -static PyObject * -_makeargs(int num, char *ltr, int null_if_none) -{ - PyObject *str; - int i; - switch (num) { - case 0: - if (null_if_none) return NULL; - return PyString_FromString(""); - case 1: - return PyString_FromString(ltr); - } - str = PyString_FromFormat("%s1, %s2", ltr, ltr); - for(i = 3; i <= num; ++i) { - PyString_ConcatAndDel(&str, PyString_FromFormat(", %s%d", ltr, i)); - } - return str; -} - -static char -_typecharfromnum(int num) { - PyArray_Descr *descr; - char ret; - - descr = PyArray_DescrFromType(num); - ret = descr->type; - Py_DECREF(descr); - return ret; -} - -static PyObject * -ufunc_get_doc(PyUFuncObject *self) -{ - /* Put docstring first or FindMethod finds it...*/ - /* could so some introspection on name and nin + nout */ - /* to automate the first part of it */ - /* the doc string shouldn't need the calling convention */ - /* construct - name(x1, x2, ...,[ out1, out2, ...]) - - __doc__ - */ - PyObject *outargs, *inargs, *doc; - outargs = _makeargs(self->nout, "out", 1); - inargs = _makeargs(self->nin, "x", 0); - if (outargs == NULL) { - doc = PyString_FromFormat("%s(%s)\n\n%s", - self->name, - PyString_AS_STRING(inargs), - self->doc); - } else { - doc = PyString_FromFormat("%s(%s[, %s])\n\n%s", - self->name, - PyString_AS_STRING(inargs), - PyString_AS_STRING(outargs), - self->doc); - Py_DECREF(outargs); - } - Py_DECREF(inargs); - return doc; -} - -static PyObject * -ufunc_get_nin(PyUFuncObject *self) -{ - return PyInt_FromLong(self->nin); -} - -static PyObject * -ufunc_get_nout(PyUFuncObject *self) -{ - return PyInt_FromLong(self->nout); -} - -static PyObject * -ufunc_get_nargs(PyUFuncObject *self) -{ - return PyInt_FromLong(self->nargs); -} - -static PyObject * -ufunc_get_ntypes(PyUFuncObject *self) -{ - return PyInt_FromLong(self->ntypes); -} - -static PyObject * -ufunc_get_types(PyUFuncObject *self) -{ - /* return a list with types grouped - input->output */ - PyObject *list; - PyObject *str; - int k, j, n, nt=self->ntypes; - int ni = self->nin; - int no = self->nout; - char *t; - list = PyList_New(nt); - if (list == NULL) return NULL; - t = _pya_malloc(no+ni+2); - n = 0; - for(k=0; ktypes[n]); - n++; - } - t[ni] = '-'; - t[ni+1] = '>'; - for(j=0; jtypes[n]); - n++; - } - str = PyString_FromStringAndSize(t, no+ni+2); - PyList_SET_ITEM(list, k, str); - } - _pya_free(t); - return list; -} - -static PyObject * -ufunc_get_name(PyUFuncObject *self) -{ - return PyString_FromString(self->name); -} - -static PyObject * -ufunc_get_identity(PyUFuncObject *self) -{ - switch(self->identity) { - case PyUFunc_One: - return PyInt_FromLong(1); - case PyUFunc_Zero: - return PyInt_FromLong(0); - } - return Py_None; -} - -static PyObject * -ufunc_get_signature(PyUFuncObject *self) -{ - if (!self->core_enabled) - Py_RETURN_NONE; - return PyString_FromString(self->core_signature); -} - -#undef _typecharfromnum - -/* Docstring is now set from python */ -/* static char *Ufunctype__doc__ = NULL; */ - -static PyGetSetDef ufunc_getset[] = { - {"__doc__", (getter)ufunc_get_doc, NULL, "documentation string", NULL}, - {"nin", (getter)ufunc_get_nin, NULL, "number of inputs", NULL}, - {"nout", (getter)ufunc_get_nout, NULL, "number of outputs", NULL}, - {"nargs", (getter)ufunc_get_nargs, NULL, "number of arguments", NULL}, - {"ntypes", (getter)ufunc_get_ntypes, NULL, "number of types", NULL}, - {"types", (getter)ufunc_get_types, NULL, "return a list with types grouped input->output", NULL}, - {"__name__", (getter)ufunc_get_name, NULL, "function name", NULL}, - {"identity", (getter)ufunc_get_identity, NULL, "identity value", NULL}, - {"signature",(getter)ufunc_get_signature,NULL, "signature"}, - {NULL, NULL, NULL, NULL, NULL}, /* Sentinel */ -}; - -static PyTypeObject PyUFunc_Type = { - PyObject_HEAD_INIT(0) - 0, /*ob_size*/ - "numpy.ufunc", /*tp_name*/ - sizeof(PyUFuncObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - /* methods */ - (destructor)ufunc_dealloc, /*tp_dealloc*/ - (printfunc)0, /*tp_print*/ - (getattrfunc)0, /*tp_getattr*/ - (setattrfunc)0, /*tp_setattr*/ - (cmpfunc)0, /*tp_compare*/ - (reprfunc)ufunc_repr, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - (hashfunc)0, /*tp_hash*/ - (ternaryfunc)ufunc_generic_call, /*tp_call*/ - (reprfunc)ufunc_repr, /*tp_str*/ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - NULL, /* tp_doc */ /* was Ufunctype__doc__ */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - ufunc_methods, /* tp_methods */ - 0, /* tp_members */ - ufunc_getset, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ - -#ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ -#endif -}; - -/* End of code for ufunc objects */ -/* -------------------------------------------------------- */ Copied: trunk/numpy/core/src/umath_funcs_c99.inc.src (from rev 6087, trunk/numpy/core/src/math_c99.inc.src) =================================================================== --- trunk/numpy/core/src/math_c99.inc.src 2008-11-21 20:49:33 UTC (rev 6087) +++ trunk/numpy/core/src/umath_funcs_c99.inc.src 2008-11-22 01:28:52 UTC (rev 6089) @@ -0,0 +1,377 @@ +/* + * vim:syntax=c + * A small module to implement missing C99 math capabilities required by numpy + * + * Please keep this independant of python ! + * + * How to add a function to this section + * ------------------------------------- + * + * Say you want to add `foo`, these are the steps and the reasons for them. + * + * 1) Add foo to the appropriate list in the configuration system. The + * lists can be found in numpy/core/setup.py lines 63-105. Read the + * comments that come with them, they are very helpful. + * + * 2) The configuration system will define a macro HAVE_FOO if your function + * can be linked from the math library. The result can depend on the + * optimization flags as well as the compiler, so can't be known ahead of + * time. If the function can't be linked, then either it is absent, defined + * as a macro, or is an intrinsic (hardware) function. If it is linkable it + * may still be the case that no prototype is available. So to cover all the + * cases requires the following construction. + * + * i) Undefine any possible macros: + * + * #ifdef foo + * #undef foo + * #endif + * + * ii) Check if the function was in the library, If not, define the + * function with npy_ prepended to its name to avoid conflict with any + * intrinsic versions, then use a define so that the preprocessor will + * replace foo with npy_foo before the compilation pass. Make the + * function static to avoid poluting the module library. + * + * #ifdef foo + * #undef foo + * #endif + * #ifndef HAVE_FOO + * static double + * npy_foo(double x) + * { + * return x; + * } + * #define foo npy_foo + * + * iii) Finally, even if foo is in the library, add a prototype. Just being + * in the library doesn't guarantee a prototype in math.h, and in any case + * you want to make sure the prototype is what you think it is. Count on it, + * whatever can go wrong will go wrong. Think defensively! The result: + * + * #ifdef foo + * #undef foo + * #endif + * #ifndef HAVE_FOO + * static double + * npy_foo(double x) + * { + * return x; + * } + * #define foo npy_foo + * #else + * double foo(double x); + * #end + * + * And there you have it. + * + */ + +/* + ***************************************************************************** + ** DISTRO VOODOO ** + ***************************************************************************** + */ + + +/* + ***************************************************************************** + ** BASIC MATH FUNCTIONS ** + ***************************************************************************** + */ + +/* Original code by Konrad Hinsen. */ +#ifndef HAVE_EXPM1 +static double +npy_expm1(double x) +{ + double u = exp(x); + if (u == 1.0) { + return x; + } else if (u-1.0 == -1.0) { + return -1; + } else { + return (u-1.0) * x/log(u); + } +} +#define expm1 npy_expm1 +#else +double expm1(double x); +#endif + +#ifndef HAVE_LOG1P +static double +npy_log1p(double x) +{ + double u = 1. + x; + if (u == 1.0) { + return x; + } else { + return log(u) * x / (u - 1); + } +} +#define log1p npy_log1p +#else +double log1p(double x); +#endif + +#ifndef HAVE_HYPOT +static double +npy_hypot(double x, double y) +{ + double yx; + + x = fabs(x); + y = fabs(y); + if (x < y) { + double temp = x; + x = y; + y = temp; + } + if (x == 0.) + return 0.; + else { + yx = y/x; + return x*sqrt(1.+yx*yx); + } +} +#define hypot npy_hypot +#else +double hypot(double x, double y); +#endif + +#ifndef HAVE_ACOSH +static double +npy_acosh(double x) +{ + return 2*log(sqrt((x+1.0)/2)+sqrt((x-1.0)/2)); +} +#define acosh npy_acosh +#else +double acosh(double x); +#endif + +#ifndef HAVE_ASINH +static double +npy_asinh(double xx) +{ + double x, d; + int sign; + if (xx < 0.0) { + sign = -1; + x = -xx; + } + else { + sign = 1; + x = xx; + } + if (x > 1e8) { + d = x; + } else { + d = sqrt(x*x + 1); + } + return sign*log1p(x*(1.0 + x/(d+1))); +} +#define asinh npy_asinh +#else +double asinh(double xx); +#endif + +#ifndef HAVE_ATANH +static double +npy_atanh(double x) +{ + return 0.5*log1p(2.0*x/(1.0-x)); +} +#define atanh npy_atanh +#else +double atanh(double x); +#endif + +#ifndef HAVE_RINT +static double +npy_rint(double x) +{ + double y, r; + + y = floor(x); + r = x - y; + + if (r > 0.5) goto rndup; + + /* Round to nearest even */ + if (r==0.5) { + r = y - 2.0*floor(0.5*y); + if (r==1.0) { + rndup: + y+=1.0; + } + } + return y; +} +#define rint npy_rint +#else +double rint(double x); +#endif + +#ifndef HAVE_TRUNC +static double +npy_trunc(double x) +{ + return x < 0 ? ceil(x) : floor(x); +} +#define trunc npy_trunc +#else +double trunc(double x); +#endif + +#ifndef HAVE_EXP2 +#define LOG2 0.69314718055994530943 +static double +npy_exp2(double x) +{ + return exp(LOG2*x); +} +#define exp2 npy_exp2 +#undef LOG2 +#else +double exp2(double x); +#endif + +#ifndef HAVE_LOG2 +#define INVLOG2 1.4426950408889634074 +static double +npy_log2(double x) +{ + return INVLOG2*log(x); +} +#define log2 npy_log2 +#undef INVLOG2 +#else +double log2(double x); +#endif + +/* + ***************************************************************************** + ** IEEE 754 FPU HANDLING ** + ***************************************************************************** + */ +#if !defined(HAVE_DECL_ISNAN) + # define isnan(x) ((x) != (x)) +#endif + +/* VS 2003 with /Ox optimizes (x)-(x) to 0, which is not IEEE compliant. So we + * force (x) + (-x), which seems to work. */ +#if !defined(HAVE_DECL_ISFINITE) + # define isfinite(x) !isnan((x) + (-x)) +#endif + +#if !defined(HAVE_DECL_ISINF) +#define isinf(x) (!isfinite(x) && !isnan(x)) +#endif + +#if !defined(HAVE_DECL_SIGNBIT) + #include "_signbit.c" + # define signbit(x) \ + (sizeof (x) == sizeof (long double) ? signbit_ld (x) \ + : sizeof (x) == sizeof (double) ? signbit_d (x) \ + : signbit_f (x)) + +static int signbit_f (float x) +{ + return signbit_d((double)x); +} + +static int signbit_ld (long double x) +{ + return signbit_d((double)x); +} +#endif + +/* + * if C99 extensions not available then define dummy functions that use the + * double versions for + * + * sin, cos, tan + * sinh, cosh, tanh, + * fabs, floor, ceil, rint, trunc + * sqrt, log10, log, exp, expm1 + * asin, acos, atan, + * asinh, acosh, atanh + * + * hypot, atan2, pow, fmod, modf + * + * We assume the above are always available in their double versions. + * + * NOTE: some facilities may be available as macro only instead of functions. + * For simplicity, we define our own functions and undef the macros. We could + * instead test for the macro, but I am lazy to do that for now. + */ + +/**begin repeat + * #type = longdouble, float# + * #TYPE = LONGDOUBLE, FLOAT# + * #c = l,f# + * #C = L,F# + */ + +/**begin repeat1 + * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, + * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2# + * #KIND = SIN,COS,TAN,SINH,COSH,TANH,FABS,FLOOR,CEIL,RINT,TRUNC,SQRT,LOG10, + * LOG,EXP,EXPM1,ASIN,ACOS,ATAN,ASINH,ACOSH,ATANH,LOG1P,EXP2,LOG2# + */ + +#ifdef @kind@@c@ +#undef @kind@@c@ +#endif +#ifndef HAVE_ at KIND@@C@ +static @type@ +npy_ at kind@@c@(@type@ x) +{ + return (@type@) @kind@((double)x); +} +#define @kind@@c@ npy_ at kind@@c@ +#else + at type@ @kind@@c@(@type@ x); +#endif + +/**end repeat1**/ + +/**begin repeat1 + * #kind = atan2,hypot,pow,fmod# + * #KIND = ATAN2,HYPOT,POW,FMOD# + */ +#ifdef @kind@@c@ +#undef @kind@@c@ +#endif +#ifndef HAVE_ at KIND@@C@ +static @type@ +npy_ at kind@@c@(@type@ x, @type@ y) +{ + return (@type@) @kind@((double)x, (double) y); +} +#define @kind@@c@ npy_ at kind@@c@ +#else + at type@ @kind@@c@(@type@ x, @type@ y); +#endif +/**end repeat1**/ + +#ifdef modf at c@ +#undef modf at c@ +#endif +#ifndef HAVE_MODF at C@ +static @type@ +npy_modf at c@(@type@ x, @type@ *iptr) +{ + double niptr; + double y = modf((double)x, &niptr); + *iptr = (@type@) niptr; + return (@type@) y; +} +#define modf at c@ npy_modf at c@ +#else + at type@ modf at c@(@type@ x, @type@ *iptr); +#endif + +/**end repeat**/ Copied: trunk/numpy/core/src/umath_ufunc_object.inc (from rev 6087, trunk/numpy/core/src/ufuncobject.c) =================================================================== --- trunk/numpy/core/src/ufuncobject.c 2008-11-21 20:49:33 UTC (rev 6087) +++ trunk/numpy/core/src/umath_ufunc_object.inc 2008-11-22 01:28:52 UTC (rev 6089) @@ -0,0 +1,4134 @@ +/* + * Python Universal Functions Object -- Math for all types, plus fast + * arrays math + * + * Full description + * + * This supports mathematical (and Boolean) functions on arrays and other python + * objects. Math on large arrays of basic C types is rather efficient. + * + * Travis E. Oliphant 2005, 2006 oliphant at ee.byu.edu (oliphant.travis at ieee.org) + * Brigham Young University + * + * based on the + * + * Original Implementation: + * Copyright (c) 1995, 1996, 1997 Jim Hugunin, hugunin at mit.edu + * + * with inspiration and code from + * Numarray + * Space Science Telescope Institute + * J. Todd Miller + * Perry Greenfield + * Rick White + * + */ + + +#define USE_USE_DEFAULTS 1 + + + + +/* ---------------------------------------------------------------- */ + + +/* fpstatus is the ufunc_formatted hardware status + errmask is the handling mask specified by the user. + errobj is a Python object with (string, callable object or None) + or NULL +*/ + +/* + 2. for each of the flags + determine whether to ignore, warn, raise error, or call Python function. + If ignore, do nothing + If warn, print a warning and continue + If raise return an error + If call, call a user-defined function with string +*/ + +static int +_error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int *first) +{ + PyObject *pyfunc, *ret, *args; + char *name=PyString_AS_STRING(PyTuple_GET_ITEM(errobj,0)); + char msg[100]; + + ALLOW_C_API_DEF + + ALLOW_C_API + + switch(method) { + case UFUNC_ERR_WARN: + PyOS_snprintf(msg, sizeof(msg), + "%s encountered in %s", errtype, name); + if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) goto fail; + break; + case UFUNC_ERR_RAISE: + PyErr_Format(PyExc_FloatingPointError, + "%s encountered in %s", + errtype, name); + goto fail; + case UFUNC_ERR_CALL: + pyfunc = PyTuple_GET_ITEM(errobj, 1); + + if (pyfunc == Py_None) { + PyErr_Format(PyExc_NameError, + "python callback specified for %s (in " \ + " %s) but no function found.", + errtype, name); + goto fail; + } + args = Py_BuildValue("NN", PyString_FromString(errtype), + PyInt_FromLong((long) retstatus)); + if (args == NULL) goto fail; + ret = PyObject_CallObject(pyfunc, args); + Py_DECREF(args); + if (ret == NULL) goto fail; + Py_DECREF(ret); + + break; + case UFUNC_ERR_PRINT: + if (*first) { + fprintf(stderr, "Warning: %s encountered in %s\n", errtype, name); + *first = 0; + } + break; + case UFUNC_ERR_LOG: + if (first) { + *first = 0; + pyfunc = PyTuple_GET_ITEM(errobj, 1); + if (pyfunc == Py_None) { + PyErr_Format(PyExc_NameError, + "log specified for %s (in %s) but no " \ + "object with write method found.", + errtype, name); + goto fail; + } + PyOS_snprintf(msg, sizeof(msg), + "Warning: %s encountered in %s\n", errtype, name); + ret = PyObject_CallMethod(pyfunc, "write", "s", msg); + if (ret == NULL) goto fail; + Py_DECREF(ret); + } + break; + } + DISABLE_C_API + return 0; + + fail: + DISABLE_C_API + return -1; +} + + +/*UFUNC_API*/ +static int +PyUFunc_getfperr(void) +{ + int retstatus; + UFUNC_CHECK_STATUS(retstatus); + return retstatus; +} + +#define HANDLEIT(NAME, str) {if (retstatus & UFUNC_FPE_##NAME) { \ + handle = errmask & UFUNC_MASK_##NAME; \ + if (handle && \ + _error_handler(handle >> UFUNC_SHIFT_##NAME, \ + errobj, str, retstatus, first) < 0) \ + return -1; \ + }} + +/*UFUNC_API*/ +static int +PyUFunc_handlefperr(int errmask, PyObject *errobj, int retstatus, int *first) +{ + int handle; + if (errmask && retstatus) { + HANDLEIT(DIVIDEBYZERO, "divide by zero"); + HANDLEIT(OVERFLOW, "overflow"); + HANDLEIT(UNDERFLOW, "underflow"); + HANDLEIT(INVALID, "invalid value"); + } + return 0; +} + +#undef HANDLEIT + + +/*UFUNC_API*/ +static int +PyUFunc_checkfperr(int errmask, PyObject *errobj, int *first) +{ + int retstatus; + + /* 1. check hardware flag --- this is platform dependent code */ + retstatus = PyUFunc_getfperr(); + return PyUFunc_handlefperr(errmask, errobj, retstatus, first); +} + + +/* Checking the status flag clears it */ +/*UFUNC_API*/ +static void +PyUFunc_clearfperr() +{ + PyUFunc_getfperr(); +} + + +#define NO_UFUNCLOOP 0 +#define ZERO_EL_REDUCELOOP 0 +#define ONE_UFUNCLOOP 1 +#define ONE_EL_REDUCELOOP 1 +#define NOBUFFER_UFUNCLOOP 2 +#define NOBUFFER_REDUCELOOP 2 +#define BUFFER_UFUNCLOOP 3 +#define BUFFER_REDUCELOOP 3 +#define SIGNATURE_NOBUFFER_UFUNCLOOP 4 + + +static char +_lowest_type(char intype) +{ + switch(intype) { + /* case PyArray_BYTE */ + case PyArray_SHORT: + case PyArray_INT: + case PyArray_LONG: + case PyArray_LONGLONG: + return PyArray_BYTE; + /* case PyArray_UBYTE */ + case PyArray_USHORT: + case PyArray_UINT: + case PyArray_ULONG: + case PyArray_ULONGLONG: + return PyArray_UBYTE; + /* case PyArray_FLOAT:*/ + case PyArray_DOUBLE: + case PyArray_LONGDOUBLE: + return PyArray_FLOAT; + /* case PyArray_CFLOAT:*/ + case PyArray_CDOUBLE: + case PyArray_CLONGDOUBLE: + return PyArray_CFLOAT; + default: + return intype; + } +} + +static char *_types_msg = "function not supported for these types, " \ + "and can't coerce safely to supported types"; + +/* Called for non-NULL user-defined functions. + The object should be a CObject pointing to a linked-list of functions + storing the function, data, and signature of all user-defined functions. + There must be a match with the input argument types or an error + will occur. +*/ +static int +_find_matching_userloop(PyObject *obj, int *arg_types, + PyArray_SCALARKIND *scalars, + PyUFuncGenericFunction *function, void **data, + int nargs, int nin) +{ + PyUFunc_Loop1d *funcdata; + int i; + funcdata = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(obj); + while (funcdata != NULL) { + for(i=0; iarg_types[i], + scalars[i])) + break; + } + if (i==nin) { /* match found */ + *function = funcdata->func; + *data = funcdata->data; + /* Make sure actual arg_types supported + by the loop are used */ + for(i=0; iarg_types[i]; + } + return 0; + } + funcdata = funcdata->next; + } + return -1; +} + +/* if only one type is specified then it is the "first" output data-type + and the first signature matching this output data-type is returned. + + if a tuple of types is specified then an exact match to the signature + is searched and it much match exactly or an error occurs +*/ +static int +extract_specified_loop(PyUFuncObject *self, int *arg_types, + PyUFuncGenericFunction *function, void **data, + PyObject *type_tup, int userdef) +{ + Py_ssize_t n=1; + int *rtypenums; + static char msg[] = "loop written to specified type(s) not found"; + PyArray_Descr *dtype; + int nargs; + int i, j; + int strtype=0; + + nargs = self->nargs; + + if (PyTuple_Check(type_tup)) { + n = PyTuple_GET_SIZE(type_tup); + if (n != 1 && n != nargs) { + PyErr_Format(PyExc_ValueError, + "a type-tuple must be specified " \ + "of length 1 or %d for %s", nargs, + self->name ? self->name : "(unknown)"); + return -1; + } + } + else if PyString_Check(type_tup) { + Py_ssize_t slen; + char *thestr; + slen = PyString_GET_SIZE(type_tup); + thestr = PyString_AS_STRING(type_tup); + for(i=0; i < slen-2; i++) { + if (thestr[i] == '-' && thestr[i+1] == '>') + break; + } + if (i < slen-2) { + strtype = 1; + n = slen-2; + if (i != self->nin || + slen-2-i != self->nout) { + PyErr_Format(PyExc_ValueError, + "a type-string for %s, " \ + "requires %d typecode(s) before " \ + "and %d after the -> sign", + self->name ? self->name : "(unknown)", + self->nin, self->nout); + return -1; + } + } + } + rtypenums = (int *)_pya_malloc(n*sizeof(int)); + if (rtypenums==NULL) { + PyErr_NoMemory(); + return -1; + } + + if (strtype) { + char *ptr; + ptr = PyString_AS_STRING(type_tup); + i = 0; + while (i < n) { + if (*ptr == '-' || *ptr == '>') { + ptr++; + continue; + } + dtype = PyArray_DescrFromType((int) *ptr); + if (dtype == NULL) goto fail; + rtypenums[i] = dtype->type_num; + Py_DECREF(dtype); + ptr++; i++; + } + } + else if (PyTuple_Check(type_tup)) { + for(i=0; itype_num; + Py_DECREF(dtype); + } + } + else { + if (PyArray_DescrConverter(type_tup, &dtype) == NPY_FAIL) { + goto fail; + } + rtypenums[0] = dtype->type_num; + Py_DECREF(dtype); + } + + if (userdef > 0) { /* search in the user-defined functions */ + PyObject *key, *obj; + PyUFunc_Loop1d *funcdata; + obj = NULL; + key = PyInt_FromLong((long) userdef); + if (key == NULL) goto fail; + obj = PyDict_GetItem(self->userloops, key); + Py_DECREF(key); + if (obj == NULL) { + PyErr_SetString(PyExc_TypeError, + "user-defined type used in ufunc" \ + " with no registered loops"); + goto fail; + } + /* extract the correct function + data and argtypes + */ + funcdata = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(obj); + while (funcdata != NULL) { + if (n != 1) { + for(i=0; iarg_types[i]) + break; + } + } + else if (rtypenums[0] == funcdata->arg_types[self->nin]) { + i = nargs; + } + else i = -1; + if (i == nargs) { + *function = funcdata->func; + *data = funcdata->data; + for(i=0; iarg_types[i]; + } + Py_DECREF(obj); + goto finish; + } + funcdata = funcdata->next; + } + PyErr_SetString(PyExc_TypeError, msg); + goto fail; + } + + /* look for match in self->functions */ + + for(j=0; jntypes; j++) { + if (n != 1) { + for(i=0; itypes[j*nargs + i]) + break; + } + } + else if (rtypenums[0] == self->types[j*nargs+self->nin]) { + i = nargs; + } + else i = -1; + if (i == nargs) { + *function = self->functions[j]; + *data = self->data[j]; + for(i=0; itypes[j*nargs+i]; + } + goto finish; + } + } + PyErr_SetString(PyExc_TypeError, msg); + + + fail: + _pya_free(rtypenums); + return -1; + + finish: + _pya_free(rtypenums); + return 0; + +} + + +/* + * Called to determine coercion + * Can change arg_types. + */ + +static int +select_types(PyUFuncObject *self, int *arg_types, + PyUFuncGenericFunction *function, void **data, + PyArray_SCALARKIND *scalars, + PyObject *typetup) +{ + int i, j; + char start_type; + int userdef = -1; + int userdef_ind = -1; + + if (self->userloops) { + for(i = 0; i < self->nin; i++) { + if (PyTypeNum_ISUSERDEF(arg_types[i])) { + userdef = arg_types[i]; + userdef_ind = i; + break; + } + } + } + + if (typetup != NULL) + return extract_specified_loop(self, arg_types, function, data, + typetup, userdef); + + if (userdef > 0) { + PyObject *key, *obj; + int ret = -1; + obj = NULL; + + /* + * Look through all the registered loops for all the user-defined + * types to find a match. + */ + while (ret == -1) { + if (userdef_ind >= self->nin) { + break; + } + userdef = arg_types[userdef_ind++]; + if (!(PyTypeNum_ISUSERDEF(userdef))) { + continue; + } + key = PyInt_FromLong((long) userdef); + if (key == NULL) { + return -1; + } + obj = PyDict_GetItem(self->userloops, key); + Py_DECREF(key); + if (obj == NULL) { + continue; + } + /* + * extract the correct function + * data and argtypes for this user-defined type. + */ + ret = _find_matching_userloop(obj, arg_types, scalars, + function, data, self->nargs, + self->nin); + } + if (ret == 0) { + return ret; + } + PyErr_SetString(PyExc_TypeError, _types_msg); + return ret; + } + + start_type = arg_types[0]; + /* + * If the first argument is a scalar we need to place + * the start type as the lowest type in the class + */ + if (scalars[0] != PyArray_NOSCALAR) { + start_type = _lowest_type(start_type); + } + + i = 0; + while (i < self->ntypes && start_type > self->types[i*self->nargs]) { + i++; + } + for (; i < self->ntypes; i++) { + for (j = 0; j < self->nin; j++) { + if (!PyArray_CanCoerceScalar(arg_types[j], + self->types[i*self->nargs + j], + scalars[j])) + break; + } + if (j == self->nin) { + break; + } + } + if (i >= self->ntypes) { + PyErr_SetString(PyExc_TypeError, _types_msg); + return -1; + } + for (j = 0; j < self->nargs; j++) { + arg_types[j] = self->types[i*self->nargs+j]; + } + if (self->data) { + *data = self->data[i]; + } + else { + *data = NULL; + } + *function = self->functions[i]; + + return 0; +} + +#if USE_USE_DEFAULTS==1 +static int PyUFunc_NUM_NODEFAULTS=0; +#endif +static PyObject *PyUFunc_PYVALS_NAME=NULL; + + +static int +_extract_pyvals(PyObject *ref, char *name, int *bufsize, + int *errmask, PyObject **errobj) +{ + PyObject *retval; + + *errobj = NULL; + if (!PyList_Check(ref) || (PyList_GET_SIZE(ref)!=3)) { + PyErr_Format(PyExc_TypeError, "%s must be a length 3 list.", + UFUNC_PYVALS_NAME); + return -1; + } + + *bufsize = PyInt_AsLong(PyList_GET_ITEM(ref, 0)); + if ((*bufsize == -1) && PyErr_Occurred()) { + return -1; + } + if ((*bufsize < PyArray_MIN_BUFSIZE) || + (*bufsize > PyArray_MAX_BUFSIZE) || + (*bufsize % 16 != 0)) { + PyErr_Format(PyExc_ValueError, + "buffer size (%d) is not in range " + "(%"INTP_FMT" - %"INTP_FMT") or not a multiple of 16", + *bufsize, (intp) PyArray_MIN_BUFSIZE, + (intp) PyArray_MAX_BUFSIZE); + return -1; + } + + *errmask = PyInt_AsLong(PyList_GET_ITEM(ref, 1)); + if (*errmask < 0) { + if (PyErr_Occurred()) { + return -1; + } + PyErr_Format(PyExc_ValueError, + "invalid error mask (%d)", + *errmask); + return -1; + } + + retval = PyList_GET_ITEM(ref, 2); + if (retval != Py_None && !PyCallable_Check(retval)) { + PyObject *temp; + temp = PyObject_GetAttrString(retval, "write"); + if (temp == NULL || !PyCallable_Check(temp)) { + PyErr_SetString(PyExc_TypeError, + "python object must be callable or have " \ + "a callable write method"); + Py_XDECREF(temp); + return -1; + } + Py_DECREF(temp); + } + + *errobj = Py_BuildValue("NO", + PyString_FromString(name), + retval); + if (*errobj == NULL) { + return -1; + } + + return 0; +} + + + +/*UFUNC_API*/ +static int +PyUFunc_GetPyValues(char *name, int *bufsize, int *errmask, PyObject **errobj) +{ + PyObject *thedict; + PyObject *ref = NULL; + +#if USE_USE_DEFAULTS==1 + if (PyUFunc_NUM_NODEFAULTS != 0) { +#endif + if (PyUFunc_PYVALS_NAME == NULL) { + PyUFunc_PYVALS_NAME = \ + PyString_InternFromString(UFUNC_PYVALS_NAME); + } + thedict = PyThreadState_GetDict(); + if (thedict == NULL) { + thedict = PyEval_GetBuiltins(); + } + ref = PyDict_GetItem(thedict, PyUFunc_PYVALS_NAME); +#if USE_USE_DEFAULTS==1 + } +#endif + if (ref == NULL) { + *errmask = UFUNC_ERR_DEFAULT; + *errobj = Py_BuildValue("NO", + PyString_FromString(name), + Py_None); + *bufsize = PyArray_BUFSIZE; + return 0; + } + return _extract_pyvals(ref, name, bufsize, errmask, errobj); +} + +/* Create copies for any arrays that are less than loop->bufsize + in total size (or core_enabled) and are mis-behaved or in need + of casting. +*/ + +static int +_create_copies(PyUFuncLoopObject *loop, int *arg_types, PyArrayObject **mps) +{ + int nin = loop->ufunc->nin; + int i; + intp size; + PyObject *new; + PyArray_Descr *ntype; + PyArray_Descr *atype; + + for(i=0; idescr; + atype = PyArray_DescrFromType(arg_types[i]); + if (PyArray_EquivTypes(atype, ntype)) { + arg_types[i] = ntype->type_num; + } + Py_DECREF(atype); + } + if (size < loop->bufsize || loop->ufunc->core_enabled) { + if (!(PyArray_ISBEHAVED_RO(mps[i])) || \ + PyArray_TYPE(mps[i]) != arg_types[i]) { + ntype = PyArray_DescrFromType(arg_types[i]); + new = PyArray_FromAny((PyObject *)mps[i], + ntype, 0, 0, + FORCECAST | ALIGNED, NULL); + if (new == NULL) return -1; + Py_DECREF(mps[i]); + mps[i] = (PyArrayObject *)new; + } + } + } + + return 0; +} + +#define _GETATTR_(str, rstr) do {if (strcmp(name, #str) == 0) \ + return PyObject_HasAttrString(op, "__" #rstr "__");} while (0); + +static int +_has_reflected_op(PyObject *op, char *name) +{ + _GETATTR_(add, radd); + _GETATTR_(subtract, rsub); + _GETATTR_(multiply, rmul); + _GETATTR_(divide, rdiv); + _GETATTR_(true_divide, rtruediv); + _GETATTR_(floor_divide, rfloordiv); + _GETATTR_(remainder, rmod); + _GETATTR_(power, rpow); + _GETATTR_(left_shift, rlshift); + _GETATTR_(right_shift, rrshift); + _GETATTR_(bitwise_and, rand); + _GETATTR_(bitwise_xor, rxor); + _GETATTR_(bitwise_or, ror); + return 0; +} + +#undef _GETATTR_ + + +/* Return the position of next non-white-space char in the string +*/ +static int +_next_non_white_space(const char* str, int offset) +{ + int ret = offset; + while (str[ret] == ' ' || str[ret] == '\t') ret++; + return ret; +} + +static int +_is_alpha_underscore(char ch) +{ + return (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z') || ch == '_'; +} + +static int +_is_alnum_underscore(char ch) +{ + return _is_alpha_underscore(ch) || (ch >= '0' && ch <= '9'); +} + +/* Return the ending position of a variable name +*/ +static int +_get_end_of_name(const char* str, int offset) +{ + int ret = offset; + while (_is_alnum_underscore(str[ret])) ret++; + return ret; +} + +/* Returns 1 if the dimension names pointed by s1 and s2 are the same, + otherwise returns 0. +*/ +static int +_is_same_name(const char* s1, const char* s2) +{ + while (_is_alnum_underscore(*s1) && _is_alnum_underscore(*s2)) { + if (*s1 != *s2) return 0; + s1++; + s2++; + } + return !_is_alnum_underscore(*s1) && !_is_alnum_underscore(*s2); +} + +/* Sets core_num_dim_ix, core_num_dims, core_dim_ixs, core_offsets, + and core_signature in PyUFuncObject "self". Returns 0 unless an + error occured. +*/ +static int +_parse_signature(PyUFuncObject *self, const char *signature) +{ + size_t len; + char const **var_names; + int nd = 0; /* number of dimension of the current argument */ + int cur_arg = 0; /* index into core_num_dims&core_offsets */ + int cur_core_dim = 0; /* index into core_dim_ixs */ + int i = 0; + char *parse_error = NULL; + + if (signature == NULL) { + PyErr_SetString(PyExc_RuntimeError, + "_parse_signature with NULL signature"); + return -1; + } + + len = strlen(signature); + self->core_signature = _pya_malloc(sizeof(char) * (len+1)); + if (self->core_signature) + strcpy(self->core_signature, signature); + + /* Allocate sufficient memory to store pointers to all dimension names */ + var_names = _pya_malloc(sizeof(char const*) * len); + if (var_names == NULL) { + PyErr_NoMemory(); + return -1; + } + + self->core_enabled = 1; + self->core_num_dim_ix = 0; + self->core_num_dims = _pya_malloc(sizeof(int) * self->nargs); + self->core_dim_ixs = _pya_malloc(sizeof(int) * len); /* shrink this later */ + self->core_offsets = _pya_malloc(sizeof(int) * self->nargs); + if (self->core_num_dims == NULL || self->core_dim_ixs == NULL || + self->core_offsets == NULL) { + PyErr_NoMemory(); + goto fail; + } + + i = _next_non_white_space(signature, 0); + + while (signature[i] != '\0') { /* loop over input/output arguments */ + if (cur_arg == self->nin) { + /* expect "->" */ + if (signature[i] != '-' || signature[i+1] != '>') { + parse_error = "expect '->'"; + goto fail; + } + i = _next_non_white_space(signature, i+2); + } + + /* parse core dimensions of one argument, e.g. "()", "(i)", or + "(i,j)" */ + if (signature[i] != '(') { + parse_error = "expect '('"; + goto fail; + } + i = _next_non_white_space(signature, i+1); + while (signature[i] != ')') { /* loop over core dimensions */ + int j = 0; + if (!_is_alpha_underscore(signature[i])) { + parse_error = "expect dimension name"; + goto fail; + } + while (j < self->core_num_dim_ix) { + if (_is_same_name(signature+i, var_names[j])) break; + j++; + } + if (j >= self->core_num_dim_ix) { + var_names[j] = signature+i; + self->core_num_dim_ix++; + } + self->core_dim_ixs[cur_core_dim] = j; + cur_core_dim++; + nd++; + i = _get_end_of_name(signature, i); + i = _next_non_white_space(signature, i); + if (signature[i] != ',' && signature[i] != ')') { + parse_error = "expect ',' or ')'"; + goto fail; + } + if (signature[i] == ',') + { + i = _next_non_white_space(signature, i+1); + if (signature[i] == ')') { + parse_error = "',' must not be followed by ')'"; + goto fail; + } + } + } + self->core_num_dims[cur_arg] = nd; + self->core_offsets[cur_arg] = cur_core_dim-nd; + cur_arg++; + nd = 0; + i = _next_non_white_space(signature, i+1); + + if (cur_arg != self->nin && cur_arg != self->nargs) { + /* The list of input arguments (or output arguments) was + only read partially */ + if (signature[i] != ',') { + parse_error = "expect ','"; + goto fail; + } + i = _next_non_white_space(signature, i+1); + } + } + if (cur_arg != self->nargs) { + parse_error = "incomplete signature: not all arguments found"; + goto fail; + } + self->core_dim_ixs = _pya_realloc(self->core_dim_ixs, + sizeof(int) * cur_core_dim); + /* check for trivial core-signature, e.g. "(),()->()" */ + if (cur_core_dim == 0) + self->core_enabled = 0; + _pya_free((void*)var_names); + return 0; +fail: + _pya_free((void*)var_names); + if (parse_error) { + char *buf = _pya_malloc(sizeof(char) * (len + 200)); + if (buf) { + sprintf(buf, "%s at position %d in \"%s\"", + parse_error, i, signature); + PyErr_SetString(PyExc_ValueError, signature); + _pya_free(buf); + } + else { + PyErr_NoMemory(); + } + } + return -1; +} + +/* Concatenate the loop and core dimensions of + PyArrayMultiIterObject's iarg-th argument, to recover a full + dimension array (used for output arguments). +*/ +static npy_intp* +_compute_output_dims(PyUFuncLoopObject *loop, int iarg, + int *out_nd, npy_intp *tmp_dims) +{ + int i; + PyUFuncObject *ufunc = loop->ufunc; + if (ufunc->core_enabled == 0) { + /* case of ufunc with trivial core-signature */ + *out_nd = loop->nd; + return loop->dimensions; + } + + *out_nd = loop->nd + ufunc->core_num_dims[iarg]; + if (*out_nd > NPY_MAXARGS) { + PyErr_SetString(PyExc_ValueError, + "dimension of output variable exceeds limit"); + return NULL; + } + + /* copy loop dimensions */ + memcpy(tmp_dims, loop->dimensions, sizeof(npy_intp) * loop->nd); + + /* copy core dimension */ + for (i = 0; i < ufunc->core_num_dims[iarg]; i++) + tmp_dims[loop->nd + i] = loop->core_dim_sizes[1 + + ufunc->core_dim_ixs[ufunc->core_offsets[iarg]+i]]; + return tmp_dims; +} + +/* Check and set core_dim_sizes and core_strides for the i-th argument. +*/ +static int +_compute_dimension_size(PyUFuncLoopObject *loop, PyArrayObject **mps, int i) +{ + PyUFuncObject *ufunc = loop->ufunc; + int j = ufunc->core_offsets[i]; + int k = PyArray_NDIM(mps[i]) - ufunc->core_num_dims[i]; + int ind; + for (ind = 0; ind < ufunc->core_num_dims[i]; ind++, j++, k++) { + npy_intp dim = k<0 ? 1 : PyArray_DIM(mps[i], k); + /* First element of core_dim_sizes will be used for looping */ + int dim_ix = ufunc->core_dim_ixs[j] + 1; + if (loop->core_dim_sizes[dim_ix] == 1) { + /* broadcast core dimension */ + loop->core_dim_sizes[dim_ix] = dim; + } + else if (dim != 1 && dim != loop->core_dim_sizes[dim_ix]) { + PyErr_SetString(PyExc_ValueError, + "core dimensions mismatch"); + return -1; + } + /* First ufunc->nargs elements will be used for looping */ + loop->core_strides[ufunc->nargs + j] = + dim == 1 ? 0 : PyArray_STRIDE(mps[i], k); + } + return 0; +} + +/* Return a view of array "ap" with "core_nd" dimensions cut from tail. */ +static PyArrayObject * +_trunc_coredim(PyArrayObject *ap, int core_nd) +{ + PyArrayObject *ret; + int nd = ap->nd - core_nd; + if (nd < 0) nd = 0; + + /* The following code is basically taken from PyArray_Transpose */ + Py_INCREF(ap->descr); /* NewFromDescr will steal this reference */ + ret = (PyArrayObject *) + PyArray_NewFromDescr(ap->ob_type, ap->descr, + nd, ap->dimensions, + ap->strides, ap->data, ap->flags, + (PyObject *)ap); + if (ret == NULL) return NULL; + + /* point at true owner of memory: */ + ret->base = (PyObject *)ap; + Py_INCREF(ap); + + PyArray_UpdateFlags(ret, CONTIGUOUS | FORTRAN); + + return ret; +} + +static Py_ssize_t +construct_arrays(PyUFuncLoopObject *loop, PyObject *args, PyArrayObject **mps, + PyObject *typetup) +{ + Py_ssize_t nargs; + int i; + int arg_types[NPY_MAXARGS]; + PyArray_SCALARKIND scalars[NPY_MAXARGS]; + PyArray_SCALARKIND maxarrkind, maxsckind, new; + PyUFuncObject *self = loop->ufunc; + Bool allscalars = TRUE; + PyTypeObject *subtype = &PyArray_Type; + PyObject *context = NULL; + PyObject *obj; + int flexible = 0; + int object = 0; + + npy_intp temp_dims[NPY_MAXDIMS]; + npy_intp *out_dims; + int out_nd; + + /* Check number of arguments */ + nargs = PyTuple_Size(args); + if ((nargs < self->nin) || (nargs > self->nargs)) { + PyErr_SetString(PyExc_ValueError, + "invalid number of arguments"); + return -1; + } + + /* Get each input argument */ + maxarrkind = PyArray_NOSCALAR; + maxsckind = PyArray_NOSCALAR; + for(i = 0; i < self->nin; i++) { + obj = PyTuple_GET_ITEM(args,i); + if (!PyArray_Check(obj) && !PyArray_IsScalar(obj, Generic)) { + context = Py_BuildValue("OOi", self, args, i); + } + else { + context = NULL; + } + mps[i] = (PyArrayObject *)PyArray_FromAny(obj, NULL, 0, 0, 0, context); + Py_XDECREF(context); + if (mps[i] == NULL) { + return -1; + } + arg_types[i] = PyArray_TYPE(mps[i]); + if (!flexible && PyTypeNum_ISFLEXIBLE(arg_types[i])) { + flexible = 1; + } + if (!object && PyTypeNum_ISOBJECT(arg_types[i])) { + object = 1; + } + /* debug + * fprintf(stderr, "array %d has reference %d\n", i, + * (mps[i])->ob_refcnt); + */ + + /* + * Scalars are 0-dimensional arrays at this point + */ + + /* + * We need to keep track of whether or not scalars + * are mixed with arrays of different kinds. + */ + + if (mps[i]->nd > 0) { + scalars[i] = PyArray_NOSCALAR; + allscalars = FALSE; + new = PyArray_ScalarKind(arg_types[i], NULL); + maxarrkind = NPY_MAX(new, maxarrkind); + } + else { + scalars[i] = PyArray_ScalarKind(arg_types[i], &(mps[i])); + maxsckind = NPY_MAX(scalars[i], maxsckind); + } + } + + /* We don't do strings */ + if (flexible && !object) { + loop->notimplemented = 1; + return nargs; + } + + /* + * If everything is a scalar, or scalars mixed with arrays of + * different kinds of lesser kinds then use normal coercion rules + */ + if (allscalars || (maxsckind > maxarrkind)) { + for(i = 0; i < self->nin; i++) { + scalars[i] = PyArray_NOSCALAR; + } + } + + /* Select an appropriate function for these argument types. */ + if (select_types(loop->ufunc, arg_types, &(loop->function), + &(loop->funcdata), scalars, typetup) == -1) + return -1; + + /* + * FAIL with NotImplemented if the other object has + * the __r__ method and has __array_priority__ as + * an attribute (signalling it can handle ndarray's) + * and is not already an ndarray or a subtype of the same type. + */ + if ((arg_types[1] == PyArray_OBJECT) && \ + (loop->ufunc->nin==2) && (loop->ufunc->nout == 1)) { + PyObject *_obj = PyTuple_GET_ITEM(args, 1); + if (!PyArray_CheckExact(_obj) && + /* If both are same subtype of object arrays, then proceed */ + !(_obj->ob_type == (PyTuple_GET_ITEM(args, 0))->ob_type) && \ + + PyObject_HasAttrString(_obj, "__array_priority__") && \ + _has_reflected_op(_obj, loop->ufunc->name)) { + loop->notimplemented = 1; + return nargs; + } + } + + /* + * Create copies for some of the arrays if they are small + * enough and not already contiguous + */ + if (_create_copies(loop, arg_types, mps) < 0) { + return -1; + } + + /* Only use loop dimensions when constructing Iterator: + * temporarily replace mps[i] (will be recovered below). + */ + if (self->core_enabled) { + for (i = 0; i < self->nin; i++) { + PyArrayObject *ao; + + if (_compute_dimension_size(loop, mps, i) < 0) + return -1; + + ao = _trunc_coredim(mps[i], self->core_num_dims[i]); + if (ao == NULL) + return -1; + mps[i] = ao; + } + } + + /* Create Iterators for the Inputs */ + for(i = 0; i < self->nin; i++) { + loop->iters[i] = (PyArrayIterObject *) \ + PyArray_IterNew((PyObject *)mps[i]); + if (loop->iters[i] == NULL) { + return -1; + } + } + + + /* Recover mps[i]. */ + if (self->core_enabled) { + for (i = 0; i < self->nin; i++) { + PyArrayObject *ao = mps[i]; + mps[i] = (PyArrayObject *)mps[i]->base; + Py_DECREF(ao); + } + } + + /* Broadcast the result */ + loop->numiter = self->nin; + if (PyArray_Broadcast((PyArrayMultiIterObject *)loop) < 0) { + return -1; + } + + /* Get any return arguments */ + for(i = self->nin; i < nargs; i++) { + mps[i] = (PyArrayObject *)PyTuple_GET_ITEM(args, i); + if (((PyObject *)mps[i])==Py_None) { + mps[i] = NULL; + continue; + } + Py_INCREF(mps[i]); + if (!PyArray_Check((PyObject *)mps[i])) { + PyObject *new; + if (PyArrayIter_Check(mps[i])) { + new = PyObject_CallMethod((PyObject *)mps[i], + "__array__", NULL); + Py_DECREF(mps[i]); + mps[i] = (PyArrayObject *)new; + } + else { + PyErr_SetString(PyExc_TypeError, + "return arrays must be "\ + "of ArrayType"); + Py_DECREF(mps[i]); + mps[i] = NULL; + return -1; + } + } + + + if (self->core_enabled) { + if (_compute_dimension_size(loop, mps, i) < 0) + return -1; + } + out_dims = _compute_output_dims(loop, i, &out_nd, temp_dims); + if (!out_dims) return -1; + + if (mps[i]->nd != out_nd || + !PyArray_CompareLists(mps[i]->dimensions, + out_dims, out_nd)) { + PyErr_SetString(PyExc_ValueError, + "invalid return array shape"); + Py_DECREF(mps[i]); + mps[i] = NULL; + return -1; + } + if (!PyArray_ISWRITEABLE(mps[i])) { + PyErr_SetString(PyExc_ValueError, + "return array is not writeable"); + Py_DECREF(mps[i]); + mps[i] = NULL; + return -1; + } + } + + /* construct any missing return arrays and make output iterators */ + for(i = self->nin; i < self->nargs; i++) { + PyArray_Descr *ntype; + + if (mps[i] == NULL) { + out_dims = _compute_output_dims(loop, i, &out_nd, temp_dims); + if (!out_dims) return -1; + + mps[i] = (PyArrayObject *)PyArray_New(subtype, + out_nd, + out_dims, + arg_types[i], + NULL, NULL, + 0, 0, NULL); + if (mps[i] == NULL) { + return -1; + } + } + + /* + * reset types for outputs that are equivalent + * -- no sense casting uselessly + */ + else { + if (mps[i]->descr->type_num != arg_types[i]) { + PyArray_Descr *atype; + ntype = mps[i]->descr; + atype = PyArray_DescrFromType(arg_types[i]); + if (PyArray_EquivTypes(atype, ntype)) { + arg_types[i] = ntype->type_num; + } + Py_DECREF(atype); + } + + /* still not the same -- or will we have to use buffers?*/ + if (mps[i]->descr->type_num != arg_types[i] || + !PyArray_ISBEHAVED_RO(mps[i])) { + if (loop->size < loop->bufsize || self->core_enabled) { + PyObject *new; + /* + * Copy the array to a temporary copy + * and set the UPDATEIFCOPY flag + */ + ntype = PyArray_DescrFromType(arg_types[i]); + new = PyArray_FromAny((PyObject *)mps[i], + ntype, 0, 0, + FORCECAST | ALIGNED | + UPDATEIFCOPY, NULL); + if (new == NULL) { + return -1; + } + Py_DECREF(mps[i]); + mps[i] = (PyArrayObject *)new; + } + } + } + + if (self->core_enabled) { + PyArrayObject *ao; + + /* computer for all output arguments, and set strides in "loop" */ + if (_compute_dimension_size(loop, mps, i) < 0) + return -1; + + ao = _trunc_coredim(mps[i], self->core_num_dims[i]); + if (ao == NULL) + return -1; + /* Temporarily modify mps[i] for constructing iterator. */ + mps[i] = ao; + } + + loop->iters[i] = (PyArrayIterObject *) \ + PyArray_IterNew((PyObject *)mps[i]); + if (loop->iters[i] == NULL) { + return -1; + } + + /* Recover mps[i]. */ + if (self->core_enabled) { + PyArrayObject *ao = mps[i]; + mps[i] = (PyArrayObject *)mps[i]->base; + Py_DECREF(ao); + } + + } + + /* + * If any of different type, or misaligned or swapped + * then must use buffers + */ + loop->bufcnt = 0; + loop->obj = 0; + + /* Determine looping method needed */ + loop->meth = NO_UFUNCLOOP; + + if (loop->size == 0) { + return nargs; + } + + if (self->core_enabled) { + loop->meth = SIGNATURE_NOBUFFER_UFUNCLOOP; + } + + for(i = 0; i < self->nargs; i++) { + loop->needbuffer[i] = 0; + if (arg_types[i] != mps[i]->descr->type_num || + !PyArray_ISBEHAVED_RO(mps[i])) { + if (self->core_enabled) { + PyErr_SetString(PyExc_RuntimeError, + "never reached; copy should have been made"); + return -1; + } + loop->meth = BUFFER_UFUNCLOOP; + loop->needbuffer[i] = 1; + } + if (!loop->obj && ((mps[i]->descr->type_num == PyArray_OBJECT) || + (arg_types[i] == PyArray_OBJECT))) { + loop->obj = 1; + } + } + + + if (self->core_enabled && loop->obj) { + PyErr_SetString(PyExc_TypeError, + "Object type not allowed in ufunc with signature"); + return -1; + } + + if (loop->meth == NO_UFUNCLOOP) { + loop->meth = ONE_UFUNCLOOP; + + /* All correct type and BEHAVED */ + /* Check for non-uniform stridedness */ + for(i = 0; i < self->nargs; i++) { + if (!(loop->iters[i]->contiguous)) { + /* + * May still have uniform stride + * if (broadcast result) <= 1-d + */ + if (mps[i]->nd != 0 && \ + (loop->iters[i]->nd_m1 > 0)) { + loop->meth = NOBUFFER_UFUNCLOOP; + break; + } + } + } + if (loop->meth == ONE_UFUNCLOOP) { + for(i = 0; i < self->nargs; i++) { + loop->bufptr[i] = mps[i]->data; + } + } + } + + loop->numiter = self->nargs; + + /* Fill in steps */ + if (loop->meth == SIGNATURE_NOBUFFER_UFUNCLOOP && loop->nd == 0) { + /* Use default core_strides */ + } + else if (loop->meth != ONE_UFUNCLOOP) { + int ldim; + intp minsum; + intp maxdim; + PyArrayIterObject *it; + intp stride_sum[NPY_MAXDIMS]; + int j; + + /* Fix iterators */ + + /* + * Optimize axis the iteration takes place over + * + * The first thought was to have the loop go + * over the largest dimension to minimize the number of loops + * + * However, on processors with slow memory bus and cache, + * the slowest loops occur when the memory access occurs for + * large strides. + * + * Thus, choose the axis for which strides of the last iterator is + * smallest but non-zero. + */ + + for(i = 0; i < loop->nd; i++) { + stride_sum[i] = 0; + for(j = 0; j < loop->numiter; j++) { + stride_sum[i] += loop->iters[j]->strides[i]; + } + } + + ldim = loop->nd - 1; + minsum = stride_sum[loop->nd-1]; + for(i = loop->nd - 2; i >= 0; i--) { + if (stride_sum[i] < minsum ) { + ldim = i; + minsum = stride_sum[i]; + } + } + + maxdim = loop->dimensions[ldim]; + loop->size /= maxdim; + loop->bufcnt = maxdim; + loop->lastdim = ldim; + + /* + * Fix the iterators so the inner loop occurs over the + * largest dimensions -- This can be done by + * setting the size to 1 in that dimension + * (just in the iterators) + */ + for(i = 0; i < loop->numiter; i++) { + it = loop->iters[i]; + it->contiguous = 0; + it->size /= (it->dims_m1[ldim]+1); + it->dims_m1[ldim] = 0; + it->backstrides[ldim] = 0; + + /* + * (won't fix factors because we + * don't use PyArray_ITER_GOTO1D + * so don't change them) + * + * Set the steps to the strides in that dimension + */ + loop->steps[i] = it->strides[ldim]; + } + + /* + * Set looping part of core_dim_sizes and core_strides. + */ + if (loop->meth == SIGNATURE_NOBUFFER_UFUNCLOOP) { + loop->core_dim_sizes[0] = maxdim; + for (i = 0; i < self->nargs; i++) { + loop->core_strides[i] = loop->steps[i]; + } + } + + /* + * fix up steps where we will be copying data to + * buffers and calculate the ninnerloops and leftover + * values -- if step size is already zero that is not changed... + */ + if (loop->meth == BUFFER_UFUNCLOOP) { + loop->leftover = maxdim % loop->bufsize; + loop->ninnerloops = (maxdim / loop->bufsize) + 1; + for(i = 0; i < self->nargs; i++) { + if (loop->needbuffer[i] && loop->steps[i]) { + loop->steps[i] = mps[i]->descr->elsize; + } + /* These are changed later if casting is needed */ + } + } + } + else if (loop->meth == ONE_UFUNCLOOP) { + /* uniformly-strided case */ + for(i = 0; i < self->nargs; i++) { + if (PyArray_SIZE(mps[i]) == 1) + loop->steps[i] = 0; + else + loop->steps[i] = mps[i]->strides[mps[i]->nd-1]; + } + } + + + /* Finally, create memory for buffers if we need them */ + + /* + * Buffers for scalars are specially made small -- scalars are + * not copied multiple times + */ + if (loop->meth == BUFFER_UFUNCLOOP) { + int cnt = 0, cntcast = 0; /* keeps track of bytes to allocate */ + int scnt = 0, scntcast = 0; + char *castptr; + char *bufptr; + int last_was_scalar=0; + int last_cast_was_scalar=0; + int oldbufsize=0; + int oldsize=0; + int scbufsize = 4*sizeof(double); + int memsize; + PyArray_Descr *descr; + + /* compute the element size */ + for(i = 0; i < self->nargs; i++) { + if (!loop->needbuffer[i]) { + continue; + } + if (arg_types[i] != mps[i]->descr->type_num) { + descr = PyArray_DescrFromType(arg_types[i]); + if (loop->steps[i]) { + cntcast += descr->elsize; + } + else { + scntcast += descr->elsize; + } + if (i < self->nin) { + loop->cast[i] = PyArray_GetCastFunc(mps[i]->descr, + arg_types[i]); + } + else { + loop->cast[i] = PyArray_GetCastFunc \ + (descr, mps[i]->descr->type_num); + } + Py_DECREF(descr); + if (!loop->cast[i]) { + return -1; + } + } + loop->swap[i] = !(PyArray_ISNOTSWAPPED(mps[i])); + if (loop->steps[i]) { + cnt += mps[i]->descr->elsize; + } + else { + scnt += mps[i]->descr->elsize; + } + } + memsize = loop->bufsize*(cnt+cntcast) + scbufsize*(scnt+scntcast); + loop->buffer[0] = PyDataMem_NEW(memsize); + + /* debug + * fprintf(stderr, "Allocated buffer at %p of size %d, cnt=%d, cntcast=%d\n", + * loop->buffer[0], loop->bufsize * (cnt + cntcast), cnt, cntcast); + */ + + if (loop->buffer[0] == NULL) { + PyErr_NoMemory(); + return -1; + } + if (loop->obj) { + memset(loop->buffer[0], 0, memsize); + } + castptr = loop->buffer[0] + loop->bufsize*cnt + scbufsize*scnt; + bufptr = loop->buffer[0]; + loop->objfunc = 0; + for(i = 0; i < self->nargs; i++) { + if (!loop->needbuffer[i]) { + continue; + } + loop->buffer[i] = bufptr + (last_was_scalar ? scbufsize : \ + loop->bufsize)*oldbufsize; + last_was_scalar = (loop->steps[i] == 0); + bufptr = loop->buffer[i]; + oldbufsize = mps[i]->descr->elsize; + /* fprintf(stderr, "buffer[%d] = %p\n", i, loop->buffer[i]); */ + if (loop->cast[i]) { + PyArray_Descr *descr; + loop->castbuf[i] = castptr + (last_cast_was_scalar ? scbufsize : \ + loop->bufsize)*oldsize; + last_cast_was_scalar = last_was_scalar; + /* fprintf(stderr, "castbuf[%d] = %p\n", i, loop->castbuf[i]); */ + descr = PyArray_DescrFromType(arg_types[i]); + oldsize = descr->elsize; + Py_DECREF(descr); + loop->bufptr[i] = loop->castbuf[i]; + castptr = loop->castbuf[i]; + if (loop->steps[i]) + loop->steps[i] = oldsize; + } + else { + loop->bufptr[i] = loop->buffer[i]; + } + if (!loop->objfunc && loop->obj) { + if (arg_types[i] == PyArray_OBJECT) { + loop->objfunc = 1; + } + } + } + } + return nargs; +} + +static void +ufuncreduce_dealloc(PyUFuncReduceObject *self) +{ + if (self->ufunc) { + Py_XDECREF(self->it); + Py_XDECREF(self->rit); + Py_XDECREF(self->ret); + Py_XDECREF(self->errobj); + Py_XDECREF(self->decref); + if (self->buffer) PyDataMem_FREE(self->buffer); + Py_DECREF(self->ufunc); + } + _pya_free(self); +} + +static void +ufuncloop_dealloc(PyUFuncLoopObject *self) +{ + int i; + + if (self->ufunc != NULL) { + if (self->core_dim_sizes) + _pya_free(self->core_dim_sizes); + if (self->core_strides) + _pya_free(self->core_strides); + for(i = 0; i < self->ufunc->nargs; i++) + Py_XDECREF(self->iters[i]); + if (self->buffer[0]) { + PyDataMem_FREE(self->buffer[0]); + } + Py_XDECREF(self->errobj); + Py_DECREF(self->ufunc); + } + _pya_free(self); +} + +static PyUFuncLoopObject * +construct_loop(PyUFuncObject *self, PyObject *args, PyObject *kwds, PyArrayObject **mps) +{ + PyUFuncLoopObject *loop; + int i; + PyObject *typetup = NULL; + PyObject *extobj = NULL; + char *name; + + if (self == NULL) { + PyErr_SetString(PyExc_ValueError, "function not supported"); + return NULL; + } + if ((loop = _pya_malloc(sizeof(PyUFuncLoopObject))) == NULL) { + PyErr_NoMemory(); + return loop; + } + + loop->index = 0; + loop->ufunc = self; + Py_INCREF(self); + loop->buffer[0] = NULL; + for(i = 0; i < self->nargs; i++) { + loop->iters[i] = NULL; + loop->cast[i] = NULL; + } + loop->errobj = NULL; + loop->notimplemented = 0; + loop->first = 1; + loop->core_dim_sizes = NULL; + loop->core_strides = NULL; + + if (self->core_enabled) { + int num_dim_ix = 1 + self->core_num_dim_ix; + int nstrides = self->nargs + self->core_offsets[self->nargs-1] + + self->core_num_dims[self->nargs-1]; + loop->core_dim_sizes = _pya_malloc(sizeof(npy_intp) * num_dim_ix); + loop->core_strides = _pya_malloc(sizeof(npy_intp) * nstrides); + if (loop->core_dim_sizes == NULL || loop->core_strides == NULL) { + PyErr_NoMemory(); + goto fail; + } + memset(loop->core_strides, 0, sizeof(npy_intp) * nstrides); + for (i = 0; i < num_dim_ix; i++) + loop->core_dim_sizes[i] = 1; + } + + name = self->name ? self->name : ""; + + /* + * Extract sig= keyword and extobj= keyword if present. + * Raise an error if anything else is present in the + * keyword dictionary + */ + if (kwds != NULL) { + PyObject *key, *value; + Py_ssize_t pos=0; + while (PyDict_Next(kwds, &pos, &key, &value)) { + char *keystring = PyString_AsString(key); + if (keystring == NULL) { + PyErr_Clear(); + PyErr_SetString(PyExc_TypeError, "invalid keyword"); + goto fail; + } + if (strncmp(keystring,"extobj",6) == 0) { + extobj = value; + } + else if (strncmp(keystring,"sig",3) == 0) { + typetup = value; + } + else { + char *format = "'%s' is an invalid keyword to %s"; + PyErr_Format(PyExc_TypeError,format,keystring, name); + goto fail; + } + } + } + + if (extobj == NULL) { + if (PyUFunc_GetPyValues(name, + &(loop->bufsize), &(loop->errormask), + &(loop->errobj)) < 0) { + goto fail; + } + } + else { + if (_extract_pyvals(extobj, name, + &(loop->bufsize), &(loop->errormask), + &(loop->errobj)) < 0) { + goto fail; + } + } + + /* Setup the arrays */ + if (construct_arrays(loop, args, mps, typetup) < 0) { + goto fail; + } + + PyUFunc_clearfperr(); + return loop; + +fail: + ufuncloop_dealloc(loop); + return NULL; +} + + +/* + static void + _printbytebuf(PyUFuncLoopObject *loop, int bufnum) + { + int i; + + fprintf(stderr, "Printing byte buffer %d\n", bufnum); + for(i=0; ibufcnt; i++) { + fprintf(stderr, " %d\n", *(((byte *)(loop->buffer[bufnum]))+i)); + } + } + + static void + _printlongbuf(PyUFuncLoopObject *loop, int bufnum) + { + int i; + + fprintf(stderr, "Printing long buffer %d\n", bufnum); + for(i=0; ibufcnt; i++) { + fprintf(stderr, " %ld\n", *(((long *)(loop->buffer[bufnum]))+i)); + } + } + + static void + _printlongbufptr(PyUFuncLoopObject *loop, int bufnum) + { + int i; + + fprintf(stderr, "Printing long buffer %d\n", bufnum); + for(i=0; ibufcnt; i++) { + fprintf(stderr, " %ld\n", *(((long *)(loop->bufptr[bufnum]))+i)); + } + } + + + + static void + _printcastbuf(PyUFuncLoopObject *loop, int bufnum) + { + int i; + + fprintf(stderr, "Printing long buffer %d\n", bufnum); + for(i=0; ibufcnt; i++) { + fprintf(stderr, " %ld\n", *(((long *)(loop->castbuf[bufnum]))+i)); + } + } + +*/ + + + + +/* + * currently generic ufuncs cannot be built for use on flexible arrays. + * + * The cast functions in the generic loop would need to be fixed to pass + * in something besides NULL, NULL. + * + * Also the underlying ufunc loops would not know the element-size unless + * that was passed in as data (which could be arranged). + * + */ + +/* + * This generic function is called with the ufunc object, the arguments to it, + * and an array of (pointers to) PyArrayObjects which are NULL. The + * arguments are parsed and placed in mps in construct_loop (construct_arrays) + */ + +/*UFUNC_API*/ +static int +PyUFunc_GenericFunction(PyUFuncObject *self, PyObject *args, PyObject *kwds, + PyArrayObject **mps) +{ + PyUFuncLoopObject *loop; + int i; + NPY_BEGIN_THREADS_DEF; + + if (!(loop = construct_loop(self, args, kwds, mps))) { + return -1; + } + if (loop->notimplemented) { + ufuncloop_dealloc(loop); + return -2; + } + if (self->core_enabled && loop->meth != SIGNATURE_NOBUFFER_UFUNCLOOP) { + PyErr_SetString(PyExc_RuntimeError, + "illegal loop method for ufunc with signature"); + goto fail; + } + + NPY_LOOP_BEGIN_THREADS; + switch(loop->meth) { + case ONE_UFUNCLOOP: + /* + * Everything is contiguous, notswapped, aligned, + * and of the right type. -- Fastest. + * Or if not contiguous, then a single-stride + * increment moves through the entire array. + */ + /*fprintf(stderr, "ONE...%d\n", loop->size);*/ + loop->function((char **)loop->bufptr, &(loop->size), + loop->steps, loop->funcdata); + UFUNC_CHECK_ERROR(loop); + break; + + case NOBUFFER_UFUNCLOOP: + /* + * Everything is notswapped, aligned and of the + * right type but not contiguous. -- Almost as fast. + */ + /*fprintf(stderr, "NOBUFFER...%d\n", loop->size);*/ + + while (loop->index < loop->size) { + for(i = 0; i < self->nargs; i++) { + loop->bufptr[i] = loop->iters[i]->dataptr; + } + loop->function((char **)loop->bufptr, &(loop->bufcnt), + loop->steps, loop->funcdata); + UFUNC_CHECK_ERROR(loop); + + /* Adjust loop pointers */ + for(i = 0; i < self->nargs; i++) { + PyArray_ITER_NEXT(loop->iters[i]); + } + loop->index++; + } + break; + + case SIGNATURE_NOBUFFER_UFUNCLOOP: + while (loop->index < loop->size) { + for(i = 0; i < self->nargs; i++) { + loop->bufptr[i] = loop->iters[i]->dataptr; + } + loop->function((char **)loop->bufptr, loop->core_dim_sizes, + loop->core_strides, loop->funcdata); + UFUNC_CHECK_ERROR(loop); + + /* Adjust loop pointers */ + for(i = 0; i < self->nargs; i++) { + PyArray_ITER_NEXT(loop->iters[i]); + } + loop->index++; + } + break; + + case BUFFER_UFUNCLOOP: { + PyArray_CopySwapNFunc *copyswapn[NPY_MAXARGS]; + PyArrayIterObject **iters=loop->iters; + int *swap=loop->swap; + char **dptr=loop->dptr; + int mpselsize[NPY_MAXARGS]; + intp laststrides[NPY_MAXARGS]; + int fastmemcpy[NPY_MAXARGS]; + int *needbuffer=loop->needbuffer; + intp index=loop->index, size=loop->size; + int bufsize; + intp bufcnt; + int copysizes[NPY_MAXARGS]; + char **bufptr = loop->bufptr; + char **buffer = loop->buffer; + char **castbuf = loop->castbuf; + intp *steps = loop->steps; + char *tptr[NPY_MAXARGS]; + int ninnerloops = loop->ninnerloops; + Bool pyobject[NPY_MAXARGS]; + int datasize[NPY_MAXARGS]; + int j, k, stopcondition; + char *myptr1, *myptr2; + + for(i = 0; i nargs; i++) { + copyswapn[i] = mps[i]->descr->f->copyswapn; + mpselsize[i] = mps[i]->descr->elsize; + pyobject[i] = (loop->obj && \ + (mps[i]->descr->type_num == PyArray_OBJECT)); + laststrides[i] = iters[i]->strides[loop->lastdim]; + if (steps[i] && laststrides[i] != mpselsize[i]) { + fastmemcpy[i] = 0; + } + else { + fastmemcpy[i] = 1; + } + } + /* Do generic buffered looping here (works for any kind of + * arrays -- some need buffers, some don't. + * + * + * New algorithm: N is the largest dimension. B is the buffer-size. + * quotient is loop->ninnerloops-1 + * remainder is loop->leftover + * + * Compute N = quotient * B + remainder. + * quotient = N / B # integer math + * (store quotient + 1) as the number of innerloops + * remainder = N % B # integer remainder + * + * On the inner-dimension we will have (quotient + 1) loops where + * the size of the inner function is B for all but the last when the niter size is + * remainder. + * + * So, the code looks very similar to NOBUFFER_LOOP except the inner-most loop is + * replaced with... + * + * for(i=0; isize, + * loop->ninnerloops, loop->leftover); + */ + /* + * for(i=0; inargs; i++) { + * fprintf(stderr, "iters[%d]->dataptr = %p, %p of size %d\n", i, + * iters[i], iters[i]->ao->data, PyArray_NBYTES(iters[i]->ao)); + * } + */ + stopcondition = ninnerloops; + if (loop->leftover == 0) stopcondition--; + while (index < size) { + bufsize=loop->bufsize; + for(i = 0; inargs; i++) { + tptr[i] = loop->iters[i]->dataptr; + if (needbuffer[i]) { + dptr[i] = bufptr[i]; + datasize[i] = (steps[i] ? bufsize : 1); + copysizes[i] = datasize[i] * mpselsize[i]; + } + else { + dptr[i] = tptr[i]; + } + } + + /* This is the inner function over the last dimension */ + for(k = 1; k<=stopcondition; k++) { + if (k == ninnerloops) { + bufsize = loop->leftover; + for(i=0; inargs;i++) { + if (!needbuffer[i]) { + continue; + } + datasize[i] = (steps[i] ? bufsize : 1); + copysizes[i] = datasize[i] * mpselsize[i]; + } + } + for(i = 0; i < self->nin; i++) { + if (!needbuffer[i]) { + continue; + } + if (fastmemcpy[i]) { + memcpy(buffer[i], tptr[i], copysizes[i]); + } + else { + myptr1 = buffer[i]; + myptr2 = tptr[i]; + for(j = 0; j < bufsize; j++) { + memcpy(myptr1, myptr2, mpselsize[i]); + myptr1 += mpselsize[i]; + myptr2 += laststrides[i]; + } + } + + /* swap the buffer if necessary */ + if (swap[i]) { + /* fprintf(stderr, "swapping...\n");*/ + copyswapn[i](buffer[i], mpselsize[i], NULL, -1, + (intp) datasize[i], 1, + mps[i]); + } + /* cast to the other buffer if necessary */ + if (loop->cast[i]) { + /* fprintf(stderr, "casting... %d, %p %p\n", i, buffer[i]); */ + loop->cast[i](buffer[i], castbuf[i], + (intp) datasize[i], + NULL, NULL); + } + } + + bufcnt = (intp) bufsize; + loop->function((char **)dptr, &bufcnt, steps, loop->funcdata); + UFUNC_CHECK_ERROR(loop); + + for(i=self->nin; inargs; i++) { + if (!needbuffer[i]) { + continue; + } + if (loop->cast[i]) { + /* fprintf(stderr, "casting back... %d, %p", i, castbuf[i]); */ + loop->cast[i](castbuf[i], + buffer[i], + (intp) datasize[i], + NULL, NULL); + } + if (swap[i]) { + copyswapn[i](buffer[i], mpselsize[i], NULL, -1, + (intp) datasize[i], 1, + mps[i]); + } + /* + * copy back to output arrays + * decref what's already there for object arrays + */ + if (pyobject[i]) { + myptr1 = tptr[i]; + for(j = 0; j < datasize[i]; j++) { + Py_XDECREF(*((PyObject **)myptr1)); + myptr1 += laststrides[i]; + } + } + if (fastmemcpy[i]) + memcpy(tptr[i], buffer[i], copysizes[i]); + else { + myptr2 = buffer[i]; + myptr1 = tptr[i]; + for(j = 0; j < bufsize; j++) { + memcpy(myptr1, myptr2, + mpselsize[i]); + myptr1 += laststrides[i]; + myptr2 += mpselsize[i]; + } + } + } + if (k == stopcondition) { + continue; + } + for(i = 0; i < self->nargs; i++) { + tptr[i] += bufsize * laststrides[i]; + if (!needbuffer[i]) { + dptr[i] = tptr[i]; + } + } + } + /* end inner function over last dimension */ + + if (loop->objfunc) { + /* + * DECREF castbuf when underlying function used + * object arrays and casting was needed to get + * to object arrays + */ + for(i = 0; i < self->nargs; i++) { + if (loop->cast[i]) { + if (steps[i] == 0) { + Py_XDECREF(*((PyObject **)castbuf[i])); + } + else { + int size = loop->bufsize; + + PyObject **objptr = (PyObject **)castbuf[i]; + /* + * size is loop->bufsize unless there + * was only one loop + */ + if (ninnerloops == 1) { + size = loop->leftover; + } + for(j = 0; j < size; j++) { + Py_XDECREF(*objptr); + *objptr = NULL; + objptr += 1; + } + } + } + } + + } + /* fixme -- probably not needed here*/ + UFUNC_CHECK_ERROR(loop); + + for(i=0; inargs; i++) { + PyArray_ITER_NEXT(loop->iters[i]); + } + index++; + } + } + } + + NPY_LOOP_END_THREADS; + ufuncloop_dealloc(loop); + return 0; + +fail: + NPY_LOOP_END_THREADS; + if (loop) ufuncloop_dealloc(loop); + return -1; +} + +static PyArrayObject * +_getidentity(PyUFuncObject *self, int otype, char *str) +{ + PyObject *obj, *arr; + PyArray_Descr *typecode; + + if (self->identity == PyUFunc_None) { + PyErr_Format(PyExc_ValueError, + "zero-size array to ufunc.%s " \ + "without identity", str); + return NULL; + } + if (self->identity == PyUFunc_One) { + obj = PyInt_FromLong((long) 1); + } else { + obj = PyInt_FromLong((long) 0); + } + + typecode = PyArray_DescrFromType(otype); + arr = PyArray_FromAny(obj, typecode, 0, 0, CARRAY, NULL); + Py_DECREF(obj); + return (PyArrayObject *)arr; +} + +static int +_create_reduce_copy(PyUFuncReduceObject *loop, PyArrayObject **arr, int rtype) +{ + intp maxsize; + PyObject *new; + PyArray_Descr *ntype; + + maxsize = PyArray_SIZE(*arr); + + if (maxsize < loop->bufsize) { + if (!(PyArray_ISBEHAVED_RO(*arr)) || + PyArray_TYPE(*arr) != rtype) { + ntype = PyArray_DescrFromType(rtype); + new = PyArray_FromAny((PyObject *)(*arr), + ntype, 0, 0, + FORCECAST | ALIGNED, NULL); + if (new == NULL) { + return -1; + } + *arr = (PyArrayObject *)new; + loop->decref = new; + } + } + + /* Don't decref *arr before re-assigning + because it was not going to be DECREF'd anyway. + + If a copy is made, then the copy will be removed + on deallocation of the loop structure by setting + loop->decref. + */ + + return 0; +} + +static PyUFuncReduceObject * +construct_reduce(PyUFuncObject *self, PyArrayObject **arr, PyArrayObject *out, + int axis, int otype, int operation, intp ind_size, char *str) +{ + PyUFuncReduceObject *loop; + PyArrayObject *idarr; + PyArrayObject *aar; + intp loop_i[MAX_DIMS], outsize=0; + int arg_types[3]; + PyArray_SCALARKIND scalars[3] = {PyArray_NOSCALAR, PyArray_NOSCALAR, + PyArray_NOSCALAR}; + int i, j, nd; + int flags; + /* Reduce type is the type requested of the input + during reduction */ + + if (self->core_enabled) { + PyErr_Format(PyExc_RuntimeError, + "construct_reduce not allowed on ufunc with signature"); + return NULL; + } + + nd = (*arr)->nd; + arg_types[0] = otype; + arg_types[1] = otype; + arg_types[2] = otype; + if ((loop = _pya_malloc(sizeof(PyUFuncReduceObject)))==NULL) { + PyErr_NoMemory(); return loop; + } + + loop->retbase=0; + loop->swap = 0; + loop->index = 0; + loop->ufunc = self; + Py_INCREF(self); + loop->cast = NULL; + loop->buffer = NULL; + loop->ret = NULL; + loop->it = NULL; + loop->rit = NULL; + loop->errobj = NULL; + loop->first = 1; + loop->decref=NULL; + loop->N = (*arr)->dimensions[axis]; + loop->instrides = (*arr)->strides[axis]; + + if (select_types(loop->ufunc, arg_types, &(loop->function), + &(loop->funcdata), scalars, NULL) == -1) goto fail; + + /* output type may change -- if it does + reduction is forced into that type + and we need to select the reduction function again + */ + if (otype != arg_types[2]) { + otype = arg_types[2]; + arg_types[0] = otype; + arg_types[1] = otype; + if (select_types(loop->ufunc, arg_types, &(loop->function), + &(loop->funcdata), scalars, NULL) == -1) + goto fail; + } + + /* get looping parameters from Python */ + if (PyUFunc_GetPyValues(str, &(loop->bufsize), &(loop->errormask), + &(loop->errobj)) < 0) goto fail; + + /* Make copy if misbehaved or not otype for small arrays */ + if (_create_reduce_copy(loop, arr, otype) < 0) goto fail; + aar = *arr; + + if (loop->N == 0) { + loop->meth = ZERO_EL_REDUCELOOP; + } + else if (PyArray_ISBEHAVED_RO(aar) && \ + otype == (aar)->descr->type_num) { + if (loop->N == 1) { + loop->meth = ONE_EL_REDUCELOOP; + } + else { + loop->meth = NOBUFFER_UFUNCLOOP; + loop->steps[1] = (aar)->strides[axis]; + loop->N -= 1; + } + } + else { + loop->meth = BUFFER_UFUNCLOOP; + loop->swap = !(PyArray_ISNOTSWAPPED(aar)); + } + + /* Determine if object arrays are involved */ + if (otype == PyArray_OBJECT || aar->descr->type_num == PyArray_OBJECT) + loop->obj = 1; + else + loop->obj = 0; + + if (loop->meth == ZERO_EL_REDUCELOOP) { + idarr = _getidentity(self, otype, str); + if (idarr == NULL) goto fail; + if (idarr->descr->elsize > UFUNC_MAXIDENTITY) { + PyErr_Format(PyExc_RuntimeError, + "UFUNC_MAXIDENTITY (%d)" \ + " is too small (needs to be at least %d)", + UFUNC_MAXIDENTITY, idarr->descr->elsize); + Py_DECREF(idarr); + goto fail; + } + memcpy(loop->idptr, idarr->data, idarr->descr->elsize); + Py_DECREF(idarr); + } + + /* Construct return array */ + flags = NPY_CARRAY | NPY_UPDATEIFCOPY | NPY_FORCECAST; + switch(operation) { + case UFUNC_REDUCE: + for(j=0, i=0; idimensions[i]; + + } + if (out == NULL) { + loop->ret = (PyArrayObject *) \ + PyArray_New(aar->ob_type, aar->nd-1, loop_i, + otype, NULL, NULL, 0, 0, + (PyObject *)aar); + } + else { + outsize = PyArray_MultiplyList(loop_i, aar->nd-1); + } + break; + case UFUNC_ACCUMULATE: + if (out == NULL) { + loop->ret = (PyArrayObject *) \ + PyArray_New(aar->ob_type, aar->nd, aar->dimensions, + otype, NULL, NULL, 0, 0, (PyObject *)aar); + } + else { + outsize = PyArray_MultiplyList(aar->dimensions, aar->nd); + } + break; + case UFUNC_REDUCEAT: + memcpy(loop_i, aar->dimensions, nd*sizeof(intp)); + /* Index is 1-d array */ + loop_i[axis] = ind_size; + if (out == NULL) { + loop->ret = (PyArrayObject *) \ + PyArray_New(aar->ob_type, aar->nd, loop_i, otype, + NULL, NULL, 0, 0, (PyObject *)aar); + } + else { + outsize = PyArray_MultiplyList(loop_i, aar->nd); + } + if (ind_size == 0) { + loop->meth = ZERO_EL_REDUCELOOP; + return loop; + } + if (loop->meth == ONE_EL_REDUCELOOP) + loop->meth = NOBUFFER_REDUCELOOP; + break; + } + if (out) { + if (PyArray_SIZE(out) != outsize) { + PyErr_SetString(PyExc_ValueError, + "wrong shape for output"); + goto fail; + } + loop->ret = (PyArrayObject *) \ + PyArray_FromArray(out, PyArray_DescrFromType(otype), + flags); + if (loop->ret && loop->ret != out) { + loop->retbase = 1; + } + } + if (loop->ret == NULL) goto fail; + loop->insize = aar->descr->elsize; + loop->outsize = loop->ret->descr->elsize; + loop->bufptr[0] = loop->ret->data; + + if (loop->meth == ZERO_EL_REDUCELOOP) { + loop->size = PyArray_SIZE(loop->ret); + return loop; + } + + loop->it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)aar); + if (loop->it == NULL) return NULL; + + if (loop->meth == ONE_EL_REDUCELOOP) { + loop->size = loop->it->size; + return loop; + } + + /* Fix iterator to loop over correct dimension */ + /* Set size in axis dimension to 1 */ + + loop->it->contiguous = 0; + loop->it->size /= (loop->it->dims_m1[axis]+1); + loop->it->dims_m1[axis] = 0; + loop->it->backstrides[axis] = 0; + + + loop->size = loop->it->size; + + if (operation == UFUNC_REDUCE) { + loop->steps[0] = 0; + } + else { + loop->rit = (PyArrayIterObject *) \ + PyArray_IterNew((PyObject *)(loop->ret)); + if (loop->rit == NULL) return NULL; + + /* Fix iterator to loop over correct dimension */ + /* Set size in axis dimension to 1 */ + + loop->rit->contiguous = 0; + loop->rit->size /= (loop->rit->dims_m1[axis]+1); + loop->rit->dims_m1[axis] = 0; + loop->rit->backstrides[axis] = 0; + + if (operation == UFUNC_ACCUMULATE) + loop->steps[0] = loop->ret->strides[axis]; + else + loop->steps[0] = 0; + } + loop->steps[2] = loop->steps[0]; + loop->bufptr[2] = loop->bufptr[0] + loop->steps[2]; + + + if (loop->meth == BUFFER_UFUNCLOOP) { + int _size; + loop->steps[1] = loop->outsize; + if (otype != aar->descr->type_num) { + _size=loop->bufsize*(loop->outsize + \ + aar->descr->elsize); + loop->buffer = PyDataMem_NEW(_size); + if (loop->buffer == NULL) goto fail; + if (loop->obj) memset(loop->buffer, 0, _size); + loop->castbuf = loop->buffer + \ + loop->bufsize*aar->descr->elsize; + loop->bufptr[1] = loop->castbuf; + loop->cast = PyArray_GetCastFunc(aar->descr, otype); + if (loop->cast == NULL) goto fail; + } + else { + _size = loop->bufsize * loop->outsize; + loop->buffer = PyDataMem_NEW(_size); + if (loop->buffer == NULL) goto fail; + if (loop->obj) memset(loop->buffer, 0, _size); + loop->bufptr[1] = loop->buffer; + } + } + + + PyUFunc_clearfperr(); + return loop; + + fail: + ufuncreduce_dealloc(loop); + return NULL; +} + + +/* We have two basic kinds of loops */ +/* One is used when arr is not-swapped and aligned and output type + is the same as input type. + and another using buffers when one of these is not satisfied. + + Zero-length and one-length axes-to-be-reduced are handled separately. +*/ + + static PyObject * +PyUFunc_Reduce(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *out, + int axis, int otype) +{ + PyArrayObject *ret=NULL; + PyUFuncReduceObject *loop; + intp i, n; + char *dptr; + NPY_BEGIN_THREADS_DEF; + + /* Construct loop object */ + loop = construct_reduce(self, &arr, out, axis, otype, UFUNC_REDUCE, 0, + "reduce"); + if (!loop) return NULL; + + NPY_LOOP_BEGIN_THREADS; + switch(loop->meth) { + case ZERO_EL_REDUCELOOP: + /* fprintf(stderr, "ZERO..%d\n", loop->size); */ + for(i=0; isize; i++) { + if (loop->obj) Py_INCREF(*((PyObject **)loop->idptr)); + memmove(loop->bufptr[0], loop->idptr, loop->outsize); + loop->bufptr[0] += loop->outsize; + } + break; + case ONE_EL_REDUCELOOP: + /*fprintf(stderr, "ONEDIM..%d\n", loop->size); */ + while(loop->index < loop->size) { + if (loop->obj) + Py_INCREF(*((PyObject **)loop->it->dataptr)); + memmove(loop->bufptr[0], loop->it->dataptr, + loop->outsize); + PyArray_ITER_NEXT(loop->it); + loop->bufptr[0] += loop->outsize; + loop->index++; + } + break; + case NOBUFFER_UFUNCLOOP: + /*fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ + while(loop->index < loop->size) { + /* Copy first element to output */ + if (loop->obj) + Py_INCREF(*((PyObject **)loop->it->dataptr)); + memmove(loop->bufptr[0], loop->it->dataptr, + loop->outsize); + /* Adjust input pointer */ + loop->bufptr[1] = loop->it->dataptr+loop->steps[1]; + loop->function((char **)loop->bufptr, + &(loop->N), + loop->steps, loop->funcdata); + UFUNC_CHECK_ERROR(loop); + + PyArray_ITER_NEXT(loop->it) + loop->bufptr[0] += loop->outsize; + loop->bufptr[2] = loop->bufptr[0]; + loop->index++; + } + break; + case BUFFER_UFUNCLOOP: + /* use buffer for arr */ + /* + For each row to reduce + 1. copy first item over to output (casting if necessary) + 2. Fill inner buffer + 3. When buffer is filled or end of row + a. Cast input buffers if needed + b. Call inner function. + 4. Repeat 2 until row is done. + */ + /* fprintf(stderr, "BUFFERED..%d %d\n", loop->size, + loop->swap); */ + while(loop->index < loop->size) { + loop->inptr = loop->it->dataptr; + /* Copy (cast) First term over to output */ + if (loop->cast) { + /* A little tricky because we need to + cast it first */ + arr->descr->f->copyswap(loop->buffer, + loop->inptr, + loop->swap, + NULL); + loop->cast(loop->buffer, loop->castbuf, + 1, NULL, NULL); + if (loop->obj) { + Py_XINCREF(*((PyObject **)loop->castbuf)); + } + memcpy(loop->bufptr[0], loop->castbuf, + loop->outsize); + } + else { /* Simple copy */ + arr->descr->f->copyswap(loop->bufptr[0], + loop->inptr, + loop->swap, NULL); + } + loop->inptr += loop->instrides; + n = 1; + while(n < loop->N) { + /* Copy up to loop->bufsize elements to + buffer */ + dptr = loop->buffer; + for(i=0; ibufsize; i++, n++) { + if (n == loop->N) break; + arr->descr->f->copyswap(dptr, + loop->inptr, + loop->swap, + NULL); + loop->inptr += loop->instrides; + dptr += loop->insize; + } + if (loop->cast) + loop->cast(loop->buffer, + loop->castbuf, + i, NULL, NULL); + loop->function((char **)loop->bufptr, + &i, + loop->steps, loop->funcdata); + loop->bufptr[0] += loop->steps[0]*i; + loop->bufptr[2] += loop->steps[2]*i; + UFUNC_CHECK_ERROR(loop); + } + PyArray_ITER_NEXT(loop->it); + loop->bufptr[0] += loop->outsize; + loop->bufptr[2] = loop->bufptr[0]; + loop->index++; + } + } + + NPY_LOOP_END_THREADS; + + /* Hang on to this reference -- will be decref'd with loop */ + if (loop->retbase) ret = (PyArrayObject *)loop->ret->base; + else ret = loop->ret; + Py_INCREF(ret); + ufuncreduce_dealloc(loop); + return (PyObject *)ret; + +fail: + NPY_LOOP_END_THREADS; + + if (loop) ufuncreduce_dealloc(loop); + return NULL; +} + + +static PyObject * +PyUFunc_Accumulate(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *out, + int axis, int otype) +{ + PyArrayObject *ret=NULL; + PyUFuncReduceObject *loop; + intp i, n; + char *dptr; + NPY_BEGIN_THREADS_DEF; + + /* Construct loop object */ + loop = construct_reduce(self, &arr, out, axis, otype, UFUNC_ACCUMULATE, 0, + "accumulate"); + if (!loop) return NULL; + + NPY_LOOP_BEGIN_THREADS; + switch(loop->meth) { + case ZERO_EL_REDUCELOOP: /* Accumulate */ + /* fprintf(stderr, "ZERO..%d\n", loop->size); */ + for(i=0; isize; i++) { + if (loop->obj) + Py_INCREF(*((PyObject **)loop->idptr)); + memcpy(loop->bufptr[0], loop->idptr, loop->outsize); + loop->bufptr[0] += loop->outsize; + } + break; + case ONE_EL_REDUCELOOP: /* Accumulate */ + /* fprintf(stderr, "ONEDIM..%d\n", loop->size); */ + while(loop->index < loop->size) { + if (loop->obj) + Py_INCREF(*((PyObject **)loop->it->dataptr)); + memmove(loop->bufptr[0], loop->it->dataptr, + loop->outsize); + PyArray_ITER_NEXT(loop->it); + loop->bufptr[0] += loop->outsize; + loop->index++; + } + break; + case NOBUFFER_UFUNCLOOP: /* Accumulate */ + /* fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ + while(loop->index < loop->size) { + /* Copy first element to output */ + if (loop->obj) + Py_INCREF(*((PyObject **)loop->it->dataptr)); + memmove(loop->bufptr[0], loop->it->dataptr, + loop->outsize); + /* Adjust input pointer */ + loop->bufptr[1] = loop->it->dataptr+loop->steps[1]; + loop->function((char **)loop->bufptr, + &(loop->N), + loop->steps, loop->funcdata); + UFUNC_CHECK_ERROR(loop); + + PyArray_ITER_NEXT(loop->it); + PyArray_ITER_NEXT(loop->rit); + loop->bufptr[0] = loop->rit->dataptr; + loop->bufptr[2] = loop->bufptr[0] + loop->steps[0]; + loop->index++; + } + break; + case BUFFER_UFUNCLOOP: /* Accumulate */ + /* use buffer for arr */ + /* + For each row to reduce + 1. copy identity over to output (casting if necessary) + 2. Fill inner buffer + 3. When buffer is filled or end of row + a. Cast input buffers if needed + b. Call inner function. + 4. Repeat 2 until row is done. + */ + /* fprintf(stderr, "BUFFERED..%d %p\n", loop->size, + loop->cast); */ + while(loop->index < loop->size) { + loop->inptr = loop->it->dataptr; + /* Copy (cast) First term over to output */ + if (loop->cast) { + /* A little tricky because we need to + cast it first */ + arr->descr->f->copyswap(loop->buffer, + loop->inptr, + loop->swap, + NULL); + loop->cast(loop->buffer, loop->castbuf, + 1, NULL, NULL); + if (loop->obj) { + Py_XINCREF(*((PyObject **)loop->castbuf)); + } + memcpy(loop->bufptr[0], loop->castbuf, + loop->outsize); + } + else { /* Simple copy */ + arr->descr->f->copyswap(loop->bufptr[0], + loop->inptr, + loop->swap, + NULL); + } + loop->inptr += loop->instrides; + n = 1; + while(n < loop->N) { + /* Copy up to loop->bufsize elements to + buffer */ + dptr = loop->buffer; + for(i=0; ibufsize; i++, n++) { + if (n == loop->N) break; + arr->descr->f->copyswap(dptr, + loop->inptr, + loop->swap, + NULL); + loop->inptr += loop->instrides; + dptr += loop->insize; + } + if (loop->cast) + loop->cast(loop->buffer, + loop->castbuf, + i, NULL, NULL); + loop->function((char **)loop->bufptr, + &i, + loop->steps, loop->funcdata); + loop->bufptr[0] += loop->steps[0]*i; + loop->bufptr[2] += loop->steps[2]*i; + UFUNC_CHECK_ERROR(loop); + } + PyArray_ITER_NEXT(loop->it); + PyArray_ITER_NEXT(loop->rit); + loop->bufptr[0] = loop->rit->dataptr; + loop->bufptr[2] = loop->bufptr[0] + loop->steps[0]; + loop->index++; + } + } + + NPY_LOOP_END_THREADS; + + /* Hang on to this reference -- will be decref'd with loop */ + if (loop->retbase) ret = (PyArrayObject *)loop->ret->base; + else ret = loop->ret; + Py_INCREF(ret); + ufuncreduce_dealloc(loop); + return (PyObject *)ret; + + fail: + NPY_LOOP_END_THREADS; + + if (loop) ufuncreduce_dealloc(loop); + return NULL; +} + +/* Reduceat performs a reduce over an axis using the indices as a guide + + op.reduceat(array,indices) computes + op.reduce(array[indices[i]:indices[i+1]] + for i=0..end with an implicit indices[i+1]=len(array) + assumed when i=end-1 + + if indices[i+1] <= indices[i]+1 + then the result is array[indices[i]] for that value + + op.accumulate(array) is the same as + op.reduceat(array,indices)[::2] + where indices is range(len(array)-1) with a zero placed in every other sample + indices = zeros(len(array)*2-1) + indices[1::2] = range(1,len(array)) + + output shape is based on the size of indices +*/ + +static PyObject * +PyUFunc_Reduceat(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *ind, + PyArrayObject *out, int axis, int otype) +{ + PyArrayObject *ret; + PyUFuncReduceObject *loop; + intp *ptr=(intp *)ind->data; + intp nn=ind->dimensions[0]; + intp mm=arr->dimensions[axis]-1; + intp n, i, j; + char *dptr; + NPY_BEGIN_THREADS_DEF; + + /* Check for out-of-bounds values in indices array */ + for(i=0; i mm)) { + PyErr_Format(PyExc_IndexError, + "index out-of-bounds (0, %d)", (int) mm); + return NULL; + } + ptr++; + } + + ptr = (intp *)ind->data; + /* Construct loop object */ + loop = construct_reduce(self, &arr, out, axis, otype, UFUNC_REDUCEAT, nn, + "reduceat"); + if (!loop) return NULL; + + NPY_LOOP_BEGIN_THREADS; + switch(loop->meth) { + /* zero-length index -- return array immediately */ + case ZERO_EL_REDUCELOOP: + /* fprintf(stderr, "ZERO..\n"); */ + break; + /* NOBUFFER -- behaved array and same type */ + case NOBUFFER_UFUNCLOOP: /* Reduceat */ + /* fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ + while(loop->index < loop->size) { + ptr = (intp *)ind->data; + for(i=0; ibufptr[1] = loop->it->dataptr + \ + (*ptr)*loop->instrides; + if (loop->obj) { + Py_XINCREF(*((PyObject **)loop->bufptr[1])); + } + memcpy(loop->bufptr[0], loop->bufptr[1], + loop->outsize); + mm = (i==nn-1 ? arr->dimensions[axis]-*ptr : \ + *(ptr+1) - *ptr) - 1; + if (mm > 0) { + loop->bufptr[1] += loop->instrides; + loop->bufptr[2] = loop->bufptr[0]; + loop->function((char **)loop->bufptr, + &mm, loop->steps, + loop->funcdata); + UFUNC_CHECK_ERROR(loop); + } + loop->bufptr[0] += loop->ret->strides[axis]; + ptr++; + } + PyArray_ITER_NEXT(loop->it); + PyArray_ITER_NEXT(loop->rit); + loop->bufptr[0] = loop->rit->dataptr; + loop->index++; + } + break; + + /* BUFFER -- misbehaved array or different types */ + case BUFFER_UFUNCLOOP: /* Reduceat */ + /* fprintf(stderr, "BUFFERED..%d\n", loop->size); */ + while(loop->index < loop->size) { + ptr = (intp *)ind->data; + for(i=0; iobj) { + Py_XINCREF(*((PyObject **)loop->idptr)); + } + memcpy(loop->bufptr[0], loop->idptr, + loop->outsize); + n = 0; + mm = (i==nn-1 ? arr->dimensions[axis] - *ptr :\ + *(ptr+1) - *ptr); + if (mm < 1) mm = 1; + loop->inptr = loop->it->dataptr + \ + (*ptr)*loop->instrides; + while (n < mm) { + /* Copy up to loop->bufsize elements + to buffer */ + dptr = loop->buffer; + for(j=0; jbufsize; j++, n++) { + if (n == mm) break; + arr->descr->f->copyswap\ + (dptr, + loop->inptr, + loop->swap, NULL); + loop->inptr += loop->instrides; + dptr += loop->insize; + } + if (loop->cast) + loop->cast(loop->buffer, + loop->castbuf, + j, NULL, NULL); + loop->bufptr[2] = loop->bufptr[0]; + loop->function((char **)loop->bufptr, + &j, loop->steps, + loop->funcdata); + UFUNC_CHECK_ERROR(loop); + loop->bufptr[0] += j*loop->steps[0]; + } + loop->bufptr[0] += loop->ret->strides[axis]; + ptr++; + } + PyArray_ITER_NEXT(loop->it); + PyArray_ITER_NEXT(loop->rit); + loop->bufptr[0] = loop->rit->dataptr; + loop->index++; + } + break; + } + + NPY_LOOP_END_THREADS; + + /* Hang on to this reference -- will be decref'd with loop */ + if (loop->retbase) ret = (PyArrayObject *)loop->ret->base; + else ret = loop->ret; + Py_INCREF(ret); + ufuncreduce_dealloc(loop); + return (PyObject *)ret; + +fail: + NPY_LOOP_END_THREADS; + + if (loop) ufuncreduce_dealloc(loop); + return NULL; +} + + +/* This code handles reduce, reduceat, and accumulate + (accumulate and reduce are special cases of the more general reduceat + but they are handled separately for speed) +*/ + +static PyObject * +PyUFunc_GenericReduction(PyUFuncObject *self, PyObject *args, + PyObject *kwds, int operation) +{ + int axis=0; + PyArrayObject *mp, *ret = NULL; + PyObject *op, *res=NULL; + PyObject *obj_ind, *context; + PyArrayObject *indices = NULL; + PyArray_Descr *otype=NULL; + PyArrayObject *out=NULL; + static char *kwlist1[] = {"array", "axis", "dtype", "out", NULL}; + static char *kwlist2[] = {"array", "indices", "axis", "dtype", "out", NULL}; + static char *_reduce_type[] = {"reduce", "accumulate", \ + "reduceat", NULL}; + if (self == NULL) { + PyErr_SetString(PyExc_ValueError, "function not supported"); + return NULL; + } + + if (self->core_enabled) { + PyErr_Format(PyExc_RuntimeError, + "Reduction not defined on ufunc with signature"); + return NULL; + } + + if (self->nin != 2) { + PyErr_Format(PyExc_ValueError, + "%s only supported for binary functions", + _reduce_type[operation]); + return NULL; + } + if (self->nout != 1) { + PyErr_Format(PyExc_ValueError, + "%s only supported for functions " \ + "returning a single value", + _reduce_type[operation]); + return NULL; + } + + if (operation == UFUNC_REDUCEAT) { + PyArray_Descr *indtype; + indtype = PyArray_DescrFromType(PyArray_INTP); + if(!PyArg_ParseTupleAndKeywords(args, kwds, "OO|iO&O&", kwlist2, + &op, &obj_ind, &axis, + PyArray_DescrConverter2, + &otype, + PyArray_OutputConverter, + &out)) { + Py_XDECREF(otype); + return NULL; + } + indices = (PyArrayObject *)PyArray_FromAny(obj_ind, indtype, + 1, 1, CARRAY, NULL); + if (indices == NULL) {Py_XDECREF(otype); return NULL;} + } + else { + if(!PyArg_ParseTupleAndKeywords(args, kwds, "O|iO&O&", kwlist1, + &op, &axis, + PyArray_DescrConverter2, + &otype, + PyArray_OutputConverter, + &out)) { + Py_XDECREF(otype); + return NULL; + } + } + + /* Ensure input is an array */ + if (!PyArray_Check(op) && !PyArray_IsScalar(op, Generic)) { + context = Py_BuildValue("O(O)i", self, op, 0); + } + else { + context = NULL; + } + mp = (PyArrayObject *)PyArray_FromAny(op, NULL, 0, 0, 0, context); + Py_XDECREF(context); + if (mp == NULL) return NULL; + + /* Check to see if input is zero-dimensional */ + if (mp->nd == 0) { + PyErr_Format(PyExc_TypeError, "cannot %s on a scalar", + _reduce_type[operation]); + Py_XDECREF(otype); + Py_DECREF(mp); + return NULL; + } + + /* Check to see that type (and otype) is not FLEXIBLE */ + if (PyArray_ISFLEXIBLE(mp) || + (otype && PyTypeNum_ISFLEXIBLE(otype->type_num))) { + PyErr_Format(PyExc_TypeError, + "cannot perform %s with flexible type", + _reduce_type[operation]); + Py_XDECREF(otype); + Py_DECREF(mp); + return NULL; + } + + if (axis < 0) axis += mp->nd; + if (axis < 0 || axis >= mp->nd) { + PyErr_SetString(PyExc_ValueError, "axis not in array"); + Py_XDECREF(otype); + Py_DECREF(mp); + return NULL; + } + + /* If out is specified it determines otype unless otype + already specified. + */ + if (otype == NULL && out != NULL) { + otype = out->descr; + Py_INCREF(otype); + } + + if (otype == NULL) { + /* For integer types --- make sure at + least a long is used for add and multiply + reduction --- to avoid overflow */ + int typenum = PyArray_TYPE(mp); + if ((typenum < NPY_FLOAT) && \ + ((strcmp(self->name,"add")==0) || \ + (strcmp(self->name,"multiply")==0))) { + if (PyTypeNum_ISBOOL(typenum)) + typenum = PyArray_LONG; + else if (mp->descr->elsize < sizeof(long)) { + if (PyTypeNum_ISUNSIGNED(typenum)) + typenum = PyArray_ULONG; + else + typenum = PyArray_LONG; + } + } + otype = PyArray_DescrFromType(typenum); + } + + + switch(operation) { + case UFUNC_REDUCE: + ret = (PyArrayObject *)PyUFunc_Reduce(self, mp, out, axis, + otype->type_num); + break; + case UFUNC_ACCUMULATE: + ret = (PyArrayObject *)PyUFunc_Accumulate(self, mp, out, axis, + otype->type_num); + break; + case UFUNC_REDUCEAT: + ret = (PyArrayObject *)PyUFunc_Reduceat(self, mp, indices, out, + axis, otype->type_num); + Py_DECREF(indices); + break; + } + Py_DECREF(mp); + Py_DECREF(otype); + if (ret==NULL) return NULL; + if (op->ob_type != ret->ob_type) { + res = PyObject_CallMethod(op, "__array_wrap__", "O", ret); + if (res == NULL) PyErr_Clear(); + else if (res == Py_None) Py_DECREF(res); + else { + Py_DECREF(ret); + return res; + } + } + return PyArray_Return(ret); + +} + +/* This function analyzes the input arguments + and determines an appropriate __array_wrap__ function to call + for the outputs. + + If an output argument is provided, then it is wrapped + with its own __array_wrap__ not with the one determined by + the input arguments. + + if the provided output argument is already an array, + the wrapping function is None (which means no wrapping will + be done --- not even PyArray_Return). + + A NULL is placed in output_wrap for outputs that + should just have PyArray_Return called. +*/ + +static void +_find_array_wrap(PyObject *args, PyObject **output_wrap, int nin, int nout) +{ + Py_ssize_t nargs; + int i; + int np = 0; + double priority, maxpriority; + PyObject *with_wrap[NPY_MAXARGS], *wraps[NPY_MAXARGS]; + PyObject *obj, *wrap = NULL; + + nargs = PyTuple_GET_SIZE(args); + for(i = 0; i < nin; i++) { + obj = PyTuple_GET_ITEM(args, i); + if (PyArray_CheckExact(obj) || \ + PyArray_IsAnyScalar(obj)) + continue; + wrap = PyObject_GetAttrString(obj, "__array_wrap__"); + if (wrap) { + if (PyCallable_Check(wrap)) { + with_wrap[np] = obj; + wraps[np] = wrap; + ++np; + } + else { + Py_DECREF(wrap); + wrap = NULL; + } + } + else { + PyErr_Clear(); + } + } + if (np >= 2) { + wrap = wraps[0]; + maxpriority = PyArray_GetPriority(with_wrap[0], + PyArray_SUBTYPE_PRIORITY); + for(i = 1; i < np; ++i) { + priority = \ + PyArray_GetPriority(with_wrap[i], + PyArray_SUBTYPE_PRIORITY); + if (priority > maxpriority) { + maxpriority = priority; + Py_DECREF(wrap); + wrap = wraps[i]; + } else { + Py_DECREF(wraps[i]); + } + } + } + + /* Here wrap is the wrapping function determined from the + input arrays (could be NULL). + + For all the output arrays decide what to do. + + 1) Use the wrap function determined from the input arrays + This is the default if the output array is not + passed in. + + 2) Use the __array_wrap__ method of the output object + passed in. -- this is special cased for + exact ndarray so that no PyArray_Return is + done in that case. + */ + + for(i=0; inargs; i++) { + mps[i] = NULL; + } + + errval = PyUFunc_GenericFunction(self, args, kwds, mps); + if (errval < 0) { + for(i = 0; i < self->nargs; i++) { + PyArray_XDECREF_ERR(mps[i]); + } + if (errval == -1) + return NULL; + else { + /* + * PyErr_SetString(PyExc_TypeError,""); + * return NULL; + */ + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; + } + } + + for(i = 0; i < self->nin; i++) { + Py_DECREF(mps[i]); + } + + + /* + * Use __array_wrap__ on all outputs + * if present on one of the input arguments. + * If present for multiple inputs: + * use __array_wrap__ of input object with largest + * __array_priority__ (default = 0.0) + * + * Exception: we should not wrap outputs for items already + * passed in as output-arguments. These items should either + * be left unwrapped or wrapped by calling their own __array_wrap__ + * routine. + * + * For each output argument, wrap will be either + * NULL --- call PyArray_Return() -- default if no output arguments given + * None --- array-object passed in don't call PyArray_Return + * method --- the __array_wrap__ method to call. + */ + _find_array_wrap(args, wraparr, self->nin, self->nout); + + /* wrap outputs */ + for(i = 0; i < self->nout; i++) { + int j=self->nin+i; + PyObject *wrap; + + /* + * check to see if any UPDATEIFCOPY flags are set + * which meant that a temporary output was generated + */ + if (mps[j]->flags & UPDATEIFCOPY) { + PyObject *old = mps[j]->base; + /* we want to hang on to this */ + Py_INCREF(old); + /* should trigger the copyback into old */ + Py_DECREF(mps[j]); + mps[j] = (PyArrayObject *)old; + } + wrap = wraparr[i]; + if (wrap != NULL) { + if (wrap == Py_None) { + Py_DECREF(wrap); + retobj[i] = (PyObject *)mps[j]; + continue; + } + res = PyObject_CallFunction(wrap, "O(OOi)", + mps[j], self, args, i); + if (res == NULL && \ + PyErr_ExceptionMatches(PyExc_TypeError)) { + PyErr_Clear(); + res = PyObject_CallFunctionObjArgs(wrap, + mps[j], + NULL); + } + Py_DECREF(wrap); + if (res == NULL) { + goto fail; + } + else if (res == Py_None) { + Py_DECREF(res); + } + else { + Py_DECREF(mps[j]); + retobj[i] = res; + continue; + } + } + /* default behavior */ + retobj[i] = PyArray_Return(mps[j]); + } + + if (self->nout == 1) { + return retobj[0]; + } else { + ret = (PyTupleObject *)PyTuple_New(self->nout); + for(i = 0; i < self->nout; i++) { + PyTuple_SET_ITEM(ret, i, retobj[i]); + } + return (PyObject *)ret; + } +fail: + for(i = self->nin; i < self->nargs; i++) { + Py_XDECREF(mps[i]); + } + return NULL; +} + +static PyObject * +ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args) +{ + PyObject *thedict; + PyObject *res; + + if (!PyArg_ParseTuple(args, "")) return NULL; + + if (PyUFunc_PYVALS_NAME == NULL) { + PyUFunc_PYVALS_NAME = PyString_InternFromString(UFUNC_PYVALS_NAME); + } + thedict = PyThreadState_GetDict(); + if (thedict == NULL) { + thedict = PyEval_GetBuiltins(); + } + res = PyDict_GetItem(thedict, PyUFunc_PYVALS_NAME); + if (res != NULL) { + Py_INCREF(res); + return res; + } + /* Construct list of defaults */ + res = PyList_New(3); + if (res == NULL) return NULL; + PyList_SET_ITEM(res, 0, PyInt_FromLong(PyArray_BUFSIZE)); + PyList_SET_ITEM(res, 1, PyInt_FromLong(UFUNC_ERR_DEFAULT)); + PyList_SET_ITEM(res, 2, Py_None); Py_INCREF(Py_None); + return res; +} + +#if USE_USE_DEFAULTS==1 +/* + This is a strategy to buy a little speed up and avoid the dictionary + look-up in the default case. It should work in the presence of + threads. If it is deemed too complicated or it doesn't actually work + it could be taken out. +*/ +static int +ufunc_update_use_defaults(void) +{ + PyObject *errobj=NULL; + int errmask, bufsize; + int res; + + PyUFunc_NUM_NODEFAULTS += 1; + res = PyUFunc_GetPyValues("test", &bufsize, &errmask, + &errobj); + PyUFunc_NUM_NODEFAULTS -= 1; + + if (res < 0) {Py_XDECREF(errobj); return -1;} + + if ((errmask != UFUNC_ERR_DEFAULT) || \ + (bufsize != PyArray_BUFSIZE) || \ + (PyTuple_GET_ITEM(errobj, 1) != Py_None)) { + PyUFunc_NUM_NODEFAULTS += 1; + } + else if (PyUFunc_NUM_NODEFAULTS > 0) { + PyUFunc_NUM_NODEFAULTS -= 1; + } + Py_XDECREF(errobj); + return 0; +} +#endif + +static PyObject * +ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args) +{ + PyObject *thedict; + int res; + PyObject *val; + static char *msg = "Error object must be a list of length 3"; + + if (!PyArg_ParseTuple(args, "O", &val)) return NULL; + + if (!PyList_CheckExact(val) || PyList_GET_SIZE(val) != 3) { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + if (PyUFunc_PYVALS_NAME == NULL) { + PyUFunc_PYVALS_NAME = PyString_InternFromString(UFUNC_PYVALS_NAME); + } + thedict = PyThreadState_GetDict(); + if (thedict == NULL) { + thedict = PyEval_GetBuiltins(); + } + res = PyDict_SetItem(thedict, PyUFunc_PYVALS_NAME, val); + if (res < 0) return NULL; +#if USE_USE_DEFAULTS==1 + if (ufunc_update_use_defaults() < 0) return NULL; +#endif + Py_INCREF(Py_None); + return Py_None; +} + + + +static PyUFuncGenericFunction pyfunc_functions[] = {PyUFunc_On_Om}; + +static char +doc_frompyfunc[] = "frompyfunc(func, nin, nout) take an arbitrary python function that takes nin objects as input and returns nout objects and return a universal function (ufunc). This ufunc always returns PyObject arrays"; + +static PyObject * +ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)) { + /* Keywords are ignored for now */ + + PyObject *function, *pyname=NULL; + int nin, nout, i; + PyUFunc_PyFuncData *fdata; + PyUFuncObject *self; + char *fname, *str; + Py_ssize_t fname_len=-1; + int offset[2]; + + if (!PyArg_ParseTuple(args, "Oii", &function, &nin, &nout)) return NULL; + + if (!PyCallable_Check(function)) { + PyErr_SetString(PyExc_TypeError, "function must be callable"); + return NULL; + } + + self = _pya_malloc(sizeof(PyUFuncObject)); + if (self == NULL) return NULL; + PyObject_Init((PyObject *)self, &PyUFunc_Type); + + self->userloops = NULL; + self->nin = nin; + self->nout = nout; + self->nargs = nin+nout; + self->identity = PyUFunc_None; + self->functions = pyfunc_functions; + + self->ntypes = 1; + self->check_return = 0; + + /* generalized ufunc */ + self->core_enabled = 0; + self->core_num_dim_ix = 0; + self->core_num_dims = NULL; + self->core_dim_ixs = NULL; + self->core_offsets = NULL; + self->core_signature = NULL; + + pyname = PyObject_GetAttrString(function, "__name__"); + if (pyname) + (void) PyString_AsStringAndSize(pyname, &fname, &fname_len); + + if (PyErr_Occurred()) { + fname = "?"; + fname_len = 1; + PyErr_Clear(); + } + Py_XDECREF(pyname); + + + + /* self->ptr holds a pointer for enough memory for + self->data[0] (fdata) + self->data + self->name + self->types + + To be safest, all of these need their memory aligned on void * pointers + Therefore, we may need to allocate extra space. + */ + offset[0] = sizeof(PyUFunc_PyFuncData); + i = (sizeof(PyUFunc_PyFuncData) % sizeof(void *)); + if (i) offset[0] += (sizeof(void *) - i); + offset[1] = self->nargs; + i = (self->nargs % sizeof(void *)); + if (i) offset[1] += (sizeof(void *)-i); + + self->ptr = _pya_malloc(offset[0] + offset[1] + sizeof(void *) + \ + (fname_len+14)); + + if (self->ptr == NULL) return PyErr_NoMemory(); + Py_INCREF(function); + self->obj = function; + fdata = (PyUFunc_PyFuncData *)(self->ptr); + fdata->nin = nin; + fdata->nout = nout; + fdata->callable = function; + + self->data = (void **)(((char *)self->ptr) + offset[0]); + self->data[0] = (void *)fdata; + + self->types = (char *)self->data + sizeof(void *); + for(i=0; inargs; i++) self->types[i] = PyArray_OBJECT; + + str = self->types + offset[1]; + memcpy(str, fname, fname_len); + memcpy(str+fname_len, " (vectorized)", 14); + + self->name = str; + + /* Do a better job someday */ + self->doc = "dynamic ufunc based on a python function"; + + + return (PyObject *)self; +} + +/*UFUNC_API*/ +static int +PyUFunc_ReplaceLoopBySignature(PyUFuncObject *func, + PyUFuncGenericFunction newfunc, + int *signature, + PyUFuncGenericFunction *oldfunc) +{ + int i,j; + int res = -1; + /* Find the location of the matching signature */ + for(i=0; intypes; i++) { + for(j=0; jnargs; j++) { + if (signature[j] != func->types[i*func->nargs+j]) + break; + } + if (j < func->nargs) continue; + + if (oldfunc != NULL) { + *oldfunc = func->functions[i]; + } + func->functions[i] = newfunc; + res = 0; + break; + } + return res; +} + +/*UFUNC_API*/ +static PyObject * +PyUFunc_FromFuncAndData(PyUFuncGenericFunction *func, void **data, + char *types, int ntypes, + int nin, int nout, int identity, + char *name, char *doc, int check_return) +{ + return PyUFunc_FromFuncAndDataAndSignature(func, data, types, ntypes, + nin, nout, identity, name, doc, check_return, NULL); +} + +/*UFUNC_API*/ +static PyObject * +PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data, + char *types, int ntypes, + int nin, int nout, int identity, + char *name, char *doc, + int check_return, const char *signature) +{ + PyUFuncObject *self; + + self = _pya_malloc(sizeof(PyUFuncObject)); + if (self == NULL) return NULL; + PyObject_Init((PyObject *)self, &PyUFunc_Type); + + self->nin = nin; + self->nout = nout; + self->nargs = nin+nout; + self->identity = identity; + + self->functions = func; + self->data = data; + self->types = types; + self->ntypes = ntypes; + self->check_return = check_return; + self->ptr = NULL; + self->obj = NULL; + self->userloops=NULL; + + if (name == NULL) self->name = "?"; + else self->name = name; + + if (doc == NULL) self->doc = "NULL"; + else self->doc = doc; + + /* generalized ufunc */ + self->core_enabled = 0; + self->core_num_dim_ix = 0; + self->core_num_dims = NULL; + self->core_dim_ixs = NULL; + self->core_offsets = NULL; + self->core_signature = NULL; + if (signature != NULL) { + if (_parse_signature(self, signature) != 0) + return NULL; + } + + return (PyObject *)self; +} + +/* This is the first-part of the CObject structure. + + I don't think this will change, but if it should, then + this needs to be fixed. The exposed C-API was insufficient + because I needed to replace the pointer and it wouldn't + let me with a destructor set (even though it works fine + with the destructor). +*/ + +typedef struct { + PyObject_HEAD + void *c_obj; +} _simple_cobj; + +#define _SETCPTR(cobj, val) ((_simple_cobj *)(cobj))->c_obj = (val) + +/* return 1 if arg1 > arg2, 0 if arg1 == arg2, and -1 if arg1 < arg2 + */ +static int +cmp_arg_types(int *arg1, int *arg2, int n) +{ + for(;n>0; n--, arg1++, arg2++) { + if (PyArray_EquivTypenums(*arg1, *arg2)) continue; + if (PyArray_CanCastSafely(*arg1, *arg2)) + return -1; + return 1; + } + return 0; +} + +/* This frees the linked-list structure + when the CObject is destroyed (removed + from the internal dictionary) +*/ +static void +_loop1d_list_free(void *ptr) +{ + PyUFunc_Loop1d *funcdata; + if (ptr == NULL) return; + funcdata = (PyUFunc_Loop1d *)ptr; + if (funcdata == NULL) return; + _pya_free(funcdata->arg_types); + _loop1d_list_free(funcdata->next); + _pya_free(funcdata); +} + + +/*UFUNC_API*/ +static int +PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc, + int usertype, + PyUFuncGenericFunction function, + int *arg_types, + void *data) +{ + PyArray_Descr *descr; + PyUFunc_Loop1d *funcdata; + PyObject *key, *cobj; + int i; + int *newtypes=NULL; + + descr=PyArray_DescrFromType(usertype); + if ((usertype < PyArray_USERDEF) || (descr==NULL)) { + PyErr_SetString(PyExc_TypeError, + "unknown user-defined type"); + return -1; + } + Py_DECREF(descr); + + if (ufunc->userloops == NULL) { + ufunc->userloops = PyDict_New(); + } + key = PyInt_FromLong((long) usertype); + if (key == NULL) return -1; + funcdata = _pya_malloc(sizeof(PyUFunc_Loop1d)); + if (funcdata == NULL) goto fail; + newtypes = _pya_malloc(sizeof(int)*ufunc->nargs); + if (newtypes == NULL) goto fail; + if (arg_types != NULL) { + for(i=0; inargs; i++) { + newtypes[i] = arg_types[i]; + } + } + else { + for(i=0; inargs; i++) { + newtypes[i] = usertype; + } + } + + funcdata->func = function; + funcdata->arg_types = newtypes; + funcdata->data = data; + funcdata->next = NULL; + + /* Get entry for this user-defined type*/ + cobj = PyDict_GetItem(ufunc->userloops, key); + + /* If it's not there, then make one and return. */ + if (cobj == NULL) { + cobj = PyCObject_FromVoidPtr((void *)funcdata, + _loop1d_list_free); + if (cobj == NULL) goto fail; + PyDict_SetItem(ufunc->userloops, key, cobj); + Py_DECREF(cobj); + Py_DECREF(key); + return 0; + } + else { + PyUFunc_Loop1d *current, *prev=NULL; + int cmp=1; + /* There is already at least 1 loop. Place this one in + lexicographic order. If the next one signature + is exactly like this one, then just replace. + Otherwise insert. + */ + current = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(cobj); + while (current != NULL) { + cmp = cmp_arg_types(current->arg_types, newtypes, + ufunc->nargs); + if (cmp >= 0) break; + prev = current; + current = current->next; + } + if (cmp == 0) { /* just replace it with new function */ + current->func = function; + current->data = data; + _pya_free(newtypes); + _pya_free(funcdata); + } + else { /* insert it before the current one + by hacking the internals of cobject to + replace the function pointer --- + can't use CObject API because destructor is set. + */ + funcdata->next = current; + if (prev == NULL) { /* place this at front */ + _SETCPTR(cobj, funcdata); + } + else { + prev->next = funcdata; + } + } + } + Py_DECREF(key); + return 0; + + + fail: + Py_DECREF(key); + _pya_free(funcdata); + _pya_free(newtypes); + if (!PyErr_Occurred()) PyErr_NoMemory(); + return -1; +} + +#undef _SETCPTR + + +static void +ufunc_dealloc(PyUFuncObject *self) +{ + if (self->core_num_dims) _pya_free(self->core_num_dims); + if (self->core_dim_ixs) _pya_free(self->core_dim_ixs); + if (self->core_offsets) _pya_free(self->core_offsets); + if (self->core_signature) _pya_free(self->core_signature); + if (self->ptr) _pya_free(self->ptr); + Py_XDECREF(self->userloops); + Py_XDECREF(self->obj); + _pya_free(self); +} + +static PyObject * +ufunc_repr(PyUFuncObject *self) +{ + char buf[100]; + + sprintf(buf, "", self->name); + + return PyString_FromString(buf); +} + + +/* -------------------------------------------------------- */ + +/* op.outer(a,b) is equivalent to op(a[:,NewAxis,NewAxis,etc.],b) + where a has b.ndim NewAxis terms appended. + + The result has dimensions a.ndim + b.ndim +*/ + +static PyObject * +ufunc_outer(PyUFuncObject *self, PyObject *args, PyObject *kwds) +{ + int i; + PyObject *ret; + PyArrayObject *ap1=NULL, *ap2=NULL, *ap_new=NULL; + PyObject *new_args, *tmp; + PyObject *shape1, *shape2, *newshape; + + if (self->core_enabled) { + PyErr_Format(PyExc_TypeError, + "method outer is not allowed in ufunc with non-trivial"\ + " signature"); + return NULL; + } + + if(self->nin != 2) { + PyErr_SetString(PyExc_ValueError, + "outer product only supported "\ + "for binary functions"); + return NULL; + } + + if (PySequence_Length(args) != 2) { + PyErr_SetString(PyExc_TypeError, + "exactly two arguments expected"); + return NULL; + } + + tmp = PySequence_GetItem(args, 0); + if (tmp == NULL) return NULL; + ap1 = (PyArrayObject *) \ + PyArray_FromObject(tmp, PyArray_NOTYPE, 0, 0); + Py_DECREF(tmp); + if (ap1 == NULL) return NULL; + + tmp = PySequence_GetItem(args, 1); + if (tmp == NULL) return NULL; + ap2 = (PyArrayObject *)PyArray_FromObject(tmp, PyArray_NOTYPE, 0, 0); + Py_DECREF(tmp); + if (ap2 == NULL) {Py_DECREF(ap1); return NULL;} + + /* Construct new shape tuple */ + shape1 = PyTuple_New(ap1->nd); + if (shape1 == NULL) goto fail; + for(i=0; ind; i++) + PyTuple_SET_ITEM(shape1, i, + PyLong_FromLongLong((longlong)ap1-> \ + dimensions[i])); + + shape2 = PyTuple_New(ap2->nd); + for(i=0; ind; i++) + PyTuple_SET_ITEM(shape2, i, PyInt_FromLong((long) 1)); + if (shape2 == NULL) {Py_DECREF(shape1); goto fail;} + newshape = PyNumber_Add(shape1, shape2); + Py_DECREF(shape1); + Py_DECREF(shape2); + if (newshape == NULL) goto fail; + + ap_new = (PyArrayObject *)PyArray_Reshape(ap1, newshape); + Py_DECREF(newshape); + if (ap_new == NULL) goto fail; + + new_args = Py_BuildValue("(OO)", ap_new, ap2); + Py_DECREF(ap1); + Py_DECREF(ap2); + Py_DECREF(ap_new); + ret = ufunc_generic_call(self, new_args, kwds); + Py_DECREF(new_args); + return ret; + + fail: + Py_XDECREF(ap1); + Py_XDECREF(ap2); + Py_XDECREF(ap_new); + return NULL; +} + + +static PyObject * +ufunc_reduce(PyUFuncObject *self, PyObject *args, PyObject *kwds) +{ + + return PyUFunc_GenericReduction(self, args, kwds, UFUNC_REDUCE); +} + +static PyObject * +ufunc_accumulate(PyUFuncObject *self, PyObject *args, PyObject *kwds) +{ + + return PyUFunc_GenericReduction(self, args, kwds, UFUNC_ACCUMULATE); +} + +static PyObject * +ufunc_reduceat(PyUFuncObject *self, PyObject *args, PyObject *kwds) +{ + return PyUFunc_GenericReduction(self, args, kwds, UFUNC_REDUCEAT); +} + + +static struct PyMethodDef ufunc_methods[] = { + {"reduce", (PyCFunction)ufunc_reduce, METH_VARARGS | METH_KEYWORDS, NULL }, + {"accumulate", (PyCFunction)ufunc_accumulate, + METH_VARARGS | METH_KEYWORDS, NULL }, + {"reduceat", (PyCFunction)ufunc_reduceat, + METH_VARARGS | METH_KEYWORDS, NULL }, + {"outer", (PyCFunction)ufunc_outer, METH_VARARGS | METH_KEYWORDS, NULL}, + {NULL, NULL, 0, NULL} /* sentinel */ +}; + + + +/* construct the string + y1,y2,...,yn +*/ +static PyObject * +_makeargs(int num, char *ltr, int null_if_none) +{ + PyObject *str; + int i; + switch (num) { + case 0: + if (null_if_none) return NULL; + return PyString_FromString(""); + case 1: + return PyString_FromString(ltr); + } + str = PyString_FromFormat("%s1, %s2", ltr, ltr); + for(i = 3; i <= num; ++i) { + PyString_ConcatAndDel(&str, PyString_FromFormat(", %s%d", ltr, i)); + } + return str; +} + +static char +_typecharfromnum(int num) { + PyArray_Descr *descr; + char ret; + + descr = PyArray_DescrFromType(num); + ret = descr->type; + Py_DECREF(descr); + return ret; +} + +static PyObject * +ufunc_get_doc(PyUFuncObject *self) +{ + /* Put docstring first or FindMethod finds it...*/ + /* could so some introspection on name and nin + nout */ + /* to automate the first part of it */ + /* the doc string shouldn't need the calling convention */ + /* construct + name(x1, x2, ...,[ out1, out2, ...]) + + __doc__ + */ + PyObject *outargs, *inargs, *doc; + outargs = _makeargs(self->nout, "out", 1); + inargs = _makeargs(self->nin, "x", 0); + if (outargs == NULL) { + doc = PyString_FromFormat("%s(%s)\n\n%s", + self->name, + PyString_AS_STRING(inargs), + self->doc); + } else { + doc = PyString_FromFormat("%s(%s[, %s])\n\n%s", + self->name, + PyString_AS_STRING(inargs), + PyString_AS_STRING(outargs), + self->doc); + Py_DECREF(outargs); + } + Py_DECREF(inargs); + return doc; +} + +static PyObject * +ufunc_get_nin(PyUFuncObject *self) +{ + return PyInt_FromLong(self->nin); +} + +static PyObject * +ufunc_get_nout(PyUFuncObject *self) +{ + return PyInt_FromLong(self->nout); +} + +static PyObject * +ufunc_get_nargs(PyUFuncObject *self) +{ + return PyInt_FromLong(self->nargs); +} + +static PyObject * +ufunc_get_ntypes(PyUFuncObject *self) +{ + return PyInt_FromLong(self->ntypes); +} + +static PyObject * +ufunc_get_types(PyUFuncObject *self) +{ + /* return a list with types grouped + input->output */ + PyObject *list; + PyObject *str; + int k, j, n, nt=self->ntypes; + int ni = self->nin; + int no = self->nout; + char *t; + list = PyList_New(nt); + if (list == NULL) return NULL; + t = _pya_malloc(no+ni+2); + n = 0; + for(k=0; ktypes[n]); + n++; + } + t[ni] = '-'; + t[ni+1] = '>'; + for(j=0; jtypes[n]); + n++; + } + str = PyString_FromStringAndSize(t, no+ni+2); + PyList_SET_ITEM(list, k, str); + } + _pya_free(t); + return list; +} + +static PyObject * +ufunc_get_name(PyUFuncObject *self) +{ + return PyString_FromString(self->name); +} + +static PyObject * +ufunc_get_identity(PyUFuncObject *self) +{ + switch(self->identity) { + case PyUFunc_One: + return PyInt_FromLong(1); + case PyUFunc_Zero: + return PyInt_FromLong(0); + } + return Py_None; +} + +static PyObject * +ufunc_get_signature(PyUFuncObject *self) +{ + if (!self->core_enabled) + Py_RETURN_NONE; + return PyString_FromString(self->core_signature); +} + +#undef _typecharfromnum + +/* Docstring is now set from python */ +/* static char *Ufunctype__doc__ = NULL; */ + +static PyGetSetDef ufunc_getset[] = { + {"__doc__", (getter)ufunc_get_doc, NULL, "documentation string", NULL}, + {"nin", (getter)ufunc_get_nin, NULL, "number of inputs", NULL}, + {"nout", (getter)ufunc_get_nout, NULL, "number of outputs", NULL}, + {"nargs", (getter)ufunc_get_nargs, NULL, "number of arguments", NULL}, + {"ntypes", (getter)ufunc_get_ntypes, NULL, "number of types", NULL}, + {"types", (getter)ufunc_get_types, NULL, "return a list with types grouped input->output", NULL}, + {"__name__", (getter)ufunc_get_name, NULL, "function name", NULL}, + {"identity", (getter)ufunc_get_identity, NULL, "identity value", NULL}, + {"signature",(getter)ufunc_get_signature,NULL, "signature"}, + {NULL, NULL, NULL, NULL, NULL}, /* Sentinel */ +}; + +static PyTypeObject PyUFunc_Type = { + PyObject_HEAD_INIT(0) + 0, /*ob_size*/ + "numpy.ufunc", /*tp_name*/ + sizeof(PyUFuncObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + /* methods */ + (destructor)ufunc_dealloc, /*tp_dealloc*/ + (printfunc)0, /*tp_print*/ + (getattrfunc)0, /*tp_getattr*/ + (setattrfunc)0, /*tp_setattr*/ + (cmpfunc)0, /*tp_compare*/ + (reprfunc)ufunc_repr, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + (hashfunc)0, /*tp_hash*/ + (ternaryfunc)ufunc_generic_call, /*tp_call*/ + (reprfunc)ufunc_repr, /*tp_str*/ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + NULL, /* tp_doc */ /* was Ufunctype__doc__ */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + ufunc_methods, /* tp_methods */ + 0, /* tp_members */ + ufunc_getset, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ + +#ifdef COUNT_ALLOCS + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ +#endif +}; + +/* End of code for ufunc objects */ +/* -------------------------------------------------------- */ Modified: trunk/numpy/core/src/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2008-11-21 21:50:17 UTC (rev 6088) +++ trunk/numpy/core/src/umathmodule.c.src 2008-11-22 01:28:52 UTC (rev 6089) @@ -21,7 +21,7 @@ #define M_PI 3.14159265358979323846264338328 #endif -#include "math_c99.inc" +#include "umath_funcs_c99.inc" /* ***************************************************************************** @@ -182,9 +182,9 @@ /* * Don't pass structures between functions (only pointers) because how - * structures are passed is compiler dependent and could cause - * segfaults if ufuncobject.c is compiled with a different compiler - * than an extension that makes use of the UFUNC API + * structures are passed is compiler dependent and could cause segfaults if + * umath_ufunc_object.inc is compiled with a different compiler than an + * extension that makes use of the UFUNC API */ /**begin repeat @@ -1939,7 +1939,7 @@ */ #include "__umath_generated.c" -#include "ufuncobject.c" +#include "umath_ufunc_object.inc" #include "__ufunc_api.c" static PyUFuncGenericFunction frexp_functions[] = { From numpy-svn at scipy.org Fri Nov 21 23:25:32 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 21 Nov 2008 22:25:32 -0600 (CST) Subject: [Numpy-svn] r6090 - in trunk/numpy/core: . code_generators src Message-ID: <20081122042532.4624C39C088@scipy.org> Author: charris Date: 2008-11-21 22:25:21 -0600 (Fri, 21 Nov 2008) New Revision: 6090 Added: trunk/numpy/core/src/umath_funcs.inc.src trunk/numpy/core/src/umath_loops.inc.src Modified: trunk/numpy/core/SConscript trunk/numpy/core/code_generators/genapi.py trunk/numpy/core/setup.py trunk/numpy/core/src/umathmodule.c.src Log: Merge branch 'ufunc' Modified: trunk/numpy/core/SConscript =================================================================== --- trunk/numpy/core/SConscript 2008-11-22 01:28:52 UTC (rev 6089) +++ trunk/numpy/core/SConscript 2008-11-22 04:25:21 UTC (rev 6090) @@ -137,9 +137,9 @@ mfuncs_defined = dict([(f, 0) for f in mfuncs]) # Check for mandatory funcs: we barf if a single one of those is not there -mandatory_funcs = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", -"floor", "ceil", "sqrt", "log10", "log", "exp", "asin", "acos", "atan", "fmod", -'modf', 'frexp', 'ldexp'] + mandatory_funcs = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", + "floor", "ceil", "sqrt", "log10", "log", "exp", "asin", + "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp'] if not config.CheckFuncsAtOnce(mandatory_funcs): raise SystemError("One of the required function to build numpy is not" @@ -159,16 +159,17 @@ # XXX: we do not test for hypot because python checks for it (HAVE_HYPOT in # python.h... I wish they would clean their public headers someday) -optional_stdfuncs = ["expm1", "log1p", "acosh", "asinh", "atanh", - "rint", "trunc"] + optional_stdfuncs = ["expm1", "log1p", "acosh", "asinh", "atanh", + "rint", "trunc", "exp2", "log2"] check_funcs(optional_stdfuncs) # C99 functions: float and long double versions -c99_funcs = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", - "ceil", "rint", "trunc", "sqrt", "log10", "log", "exp", - "expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh", - "hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp'] + c99_funcs = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", + "ceil", "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", + "expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh", + "hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp', + "exp2", "log2"] for prec in ['l', 'f']: fns = [f + prec for f in c99_funcs] @@ -251,7 +252,9 @@ # Generate generated code #------------------------ scalartypes_src = env.GenerateFromTemplate(pjoin('src', 'scalartypes.inc.src')) -math_c99_src = env.GenerateFromTemplate(pjoin('src', 'umath_funcs_c99.inc.src')) +umath_funcs_c99_src = env.GenerateFromTemplate(pjoin('src', 'umath_funcs_c99.inc.src')) +umath_funcs_src = env.GenerateFromTemplate(pjoin('src', 'umath_funcs.inc.src')) +umath_loops_src = env.GenerateFromTemplate(pjoin('src', 'umath_loops.inc.src')) arraytypes_src = env.GenerateFromTemplate(pjoin('src', 'arraytypes.inc.src')) sortmodule_src = env.GenerateFromTemplate(pjoin('src', '_sortmodule.c.src')) umathmodule_src = env.GenerateFromTemplate(pjoin('src', 'umathmodule.c.src')) Modified: trunk/numpy/core/code_generators/genapi.py =================================================================== --- trunk/numpy/core/code_generators/genapi.py 2008-11-22 01:28:52 UTC (rev 6089) +++ trunk/numpy/core/code_generators/genapi.py 2008-11-22 04:25:21 UTC (rev 6090) @@ -18,6 +18,7 @@ 'multiarraymodule.c', 'scalartypes.inc.src', 'umath_ufunc_object.inc', + 'umath_funcs.inc.src', 'umathmodule.c.src' ] THIS_DIR = os.path.dirname(__file__) Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2008-11-22 01:28:52 UTC (rev 6089) +++ trunk/numpy/core/setup.py 2008-11-22 04:25:21 UTC (rev 6090) @@ -358,6 +358,8 @@ join('src','scalartypes.inc.src'), join('src','arraytypes.inc.src'), join('src','umath_funcs_c99.inc.src'), + join('src','umath_funcs.inc.src'), + join('src','umath_loops.inc.src'), ], depends = [join('src','umath_ufunc_object.inc'), generate_umath_py, Added: trunk/numpy/core/src/umath_funcs.inc.src =================================================================== --- trunk/numpy/core/src/umath_funcs.inc.src 2008-11-22 01:28:52 UTC (rev 6089) +++ trunk/numpy/core/src/umath_funcs.inc.src 2008-11-22 04:25:21 UTC (rev 6090) @@ -0,0 +1,570 @@ +/* + * This file is for the definitions of the non-c99 functions used in ufuncs. + * All the complex ufuncs are defined here along with a smattering of real and + * object functions. + */ + +#ifndef M_PI +#define M_PI 3.14159265358979323846264338328 +#endif + +#define M_LOG10_E 0.434294481903251827651128918916605082294397 + +/* Useful constants in three precisions.*/ + +/**begin repeat + * #c = f, ,l# + * #C = F, ,L# + */ +#define NPY_E at c@ 2.7182818284590452353602874713526625 at C@ /* e */ +#define NPY_LOG2E at c@ 1.4426950408889634073599246810018921 at C@ /* log_2 e */ +#define NPY_LOG10E at c@ 0.4342944819032518276511289189166051 at C@ /* log_10 e */ +#define NPY_LOGE2 at c@ 0.6931471805599453094172321214581766 at C@ /* log_e 2 */ +#define NPY_LOGE10 at c@ 2.3025850929940456840179914546843642 at C@ /* log_e 10 */ +#define NPY_PI at c@ 3.1415926535897932384626433832795029 at C@ /* pi */ +#define NPY_PI_2 at c@ 1.5707963267948966192313216916397514 at C@ /* pi/2 */ +#define NPY_PI_4 at c@ 0.7853981633974483096156608458198757 at C@ /* pi/4 */ +#define NPY_1_PI at c@ 0.3183098861837906715377675267450287 at C@ /* 1/pi */ +#define NPY_2_PI at c@ 0.6366197723675813430755350534900574 at C@ /* 2/pi */ +/**end repeat**/ + +/* + ****************************************************************************** + ** FLOAT FUNCTIONS ** + ****************************************************************************** + */ + +/**begin repeat + * #type = float, double, longdouble# + * #c = f, ,l# + * #C = F, ,L# + */ + +#define LOGE2 NPY_LOGE2 at c@ +#define LOG2E NPY_LOG2E at c@ +#define RAD2DEG (180.0 at c@/NPY_PI at c@) +#define DEG2RAD (NPY_PI at c@/180.0 at c@) + +static @type@ +rad2deg at c@(@type@ x) { + return x*RAD2DEG; +} + +static @type@ +deg2rad at c@(@type@ x) { + return x*DEG2RAD; +} + +static @type@ +log2_1p at c@(@type@ x) +{ + @type@ u = 1 + x; + if (u == 1) { + return LOG2E*x; + } else { + return log2 at c@(u) * x / (u - 1); + } +} + +static @type@ +exp2_1m at c@(@type@ x) +{ + @type@ u = exp at c@(x); + if (u == 1.0) { + return LOGE2*x; + } else if (u - 1 == -1) { + return -LOGE2; + } else { + return (u - 1) * x/log2 at c@(u); + } +} + +static @type@ +logaddexp at c@(@type@ x, @type@ y) +{ + const @type@ tmp = x - y; + if (tmp > 0) { + return x + log1p at c@(exp at c@(-tmp)); + } + else { + return y + log1p at c@(exp at c@(tmp)); + } +} + +static @type@ +logaddexp2 at c@(@type@ x, @type@ y) +{ + const @type@ tmp = x - y; + if (tmp > 0) { + return x + log2_1p at c@(exp2 at c@(-tmp)); + } + else { + return y + log2_1p at c@(exp2 at c@(tmp)); + } +} + +#define degrees at c@ rad2deg at c@ +#define radians at c@ deg2rad at c@ + +#undef LOGE2 +#undef LOG2E +#undef RAD2DEG +#undef DEG2RAD + +/**end repeat**/ + +/* + ***************************************************************************** + ** PYTHON OBJECT FUNCTIONS ** + ***************************************************************************** + */ + +static PyObject * +Py_square(PyObject *o) +{ + return PyNumber_Multiply(o, o); +} + +static PyObject * +Py_get_one(PyObject *NPY_UNUSED(o)) +{ + return PyInt_FromLong(1); +} + +static PyObject * +Py_reciprocal(PyObject *o) +{ + PyObject *one = PyInt_FromLong(1); + PyObject *result; + + if (!one) { + return NULL; + } + result = PyNumber_Divide(one, o); + Py_DECREF(one); + return result; +} + +/* + * Define numpy version of PyNumber_Power as binary function. + */ +static PyObject * +npy_ObjectPower(PyObject *x, PyObject *y) +{ + return PyNumber_Power(x, y, Py_None); +} + +/**begin repeat + * #Kind = Max, Min# + * #OP = >=, <=# + */ +static PyObject * +npy_Object at Kind@(PyObject *i1, PyObject *i2) +{ + PyObject *result; + int cmp; + + if (PyObject_Cmp(i1, i2, &cmp) < 0) { + return NULL; + } + if (cmp @OP@ 0) { + result = i1; + } + else { + result = i2; + } + Py_INCREF(result); + return result; +} +/**end repeat**/ + + +/* + ***************************************************************************** + ** COMPLEX FUNCTIONS ** + ***************************************************************************** + */ + + +/* + * Don't pass structures between functions (only pointers) because how + * structures are passed is compiler dependent and could cause segfaults if + * umath_ufunc_object.inc is compiled with a different compiler than an + * extension that makes use of the UFUNC API + */ + +/**begin repeat + + #typ=float, double, longdouble# + #c=f,,l# +*/ + +/* constants */ +static c at typ@ nc_1 at c@ = {1., 0.}; +static c at typ@ nc_half at c@ = {0.5, 0.}; +static c at typ@ nc_i at c@ = {0., 1.}; +static c at typ@ nc_i2 at c@ = {0., 0.5}; +/* + * static c at typ@ nc_mi at c@ = {0., -1.}; + * static c at typ@ nc_pi2 at c@ = {M_PI/2., 0.}; + */ + + +static void +nc_sum at c@(c at typ@ *a, c at typ@ *b, c at typ@ *r) +{ + r->real = a->real + b->real; + r->imag = a->imag + b->imag; + return; +} + +static void +nc_diff at c@(c at typ@ *a, c at typ@ *b, c at typ@ *r) +{ + r->real = a->real - b->real; + r->imag = a->imag - b->imag; + return; +} + +static void +nc_neg at c@(c at typ@ *a, c at typ@ *r) +{ + r->real = -a->real; + r->imag = -a->imag; + return; +} + +static void +nc_prod at c@(c at typ@ *a, c at typ@ *b, c at typ@ *r) +{ + @typ@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag; + r->real = ar*br - ai*bi; + r->imag = ar*bi + ai*br; + return; +} + +static void +nc_quot at c@(c at typ@ *a, c at typ@ *b, c at typ@ *r) +{ + + @typ@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag; + @typ@ d = br*br + bi*bi; + r->real = (ar*br + ai*bi)/d; + r->imag = (ai*br - ar*bi)/d; + return; +} + +static void +nc_sqrt at c@(c at typ@ *x, c at typ@ *r) +{ + @typ@ s,d; + if (x->real == 0. && x->imag == 0.) + *r = *x; + else { + s = sqrt at c@((fabs at c@(x->real) + hypot at c@(x->real,x->imag))/2); + d = x->imag/(2*s); + if (x->real > 0) { + r->real = s; + r->imag = d; + } + else if (x->imag >= 0) { + r->real = d; + r->imag = s; + } + else { + r->real = -d; + r->imag = -s; + } + } + return; +} + +static void +nc_rint at c@(c at typ@ *x, c at typ@ *r) +{ + r->real = rint at c@(x->real); + r->imag = rint at c@(x->imag); +} + +static void +nc_log at c@(c at typ@ *x, c at typ@ *r) +{ + @typ@ l = hypot at c@(x->real,x->imag); + r->imag = atan2 at c@(x->imag, x->real); + r->real = log at c@(l); + return; +} + +static void +nc_log1p at c@(c at typ@ *x, c at typ@ *r) +{ + @typ@ l = hypot at c@(x->real + 1,x->imag); + r->imag = atan2 at c@(x->imag, x->real + 1); + r->real = log at c@(l); + return; +} + +static void +nc_exp at c@(c at typ@ *x, c at typ@ *r) +{ + @typ@ a = exp at c@(x->real); + r->real = a*cos at c@(x->imag); + r->imag = a*sin at c@(x->imag); + return; +} + +static void +nc_expm1 at c@(c at typ@ *x, c at typ@ *r) +{ + @typ@ a = exp at c@(x->real); + r->real = a*cos at c@(x->imag) - 1; + r->imag = a*sin at c@(x->imag); + return; +} + +static void +nc_pow at c@(c at typ@ *a, c at typ@ *b, c at typ@ *r) +{ + intp n; + @typ@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag; + + if (br == 0. && bi == 0.) { + r->real = 1.; + r->imag = 0.; + return; + } + if (ar == 0. && ai == 0.) { + r->real = 0.; + r->imag = 0.; + return; + } + if (bi == 0 && (n=(intp)br) == br) { + if (n > -100 && n < 100) { + c at typ@ p, aa; + intp mask = 1; + if (n < 0) n = -n; + aa = nc_1 at c@; + p.real = ar; p.imag = ai; + while (1) { + if (n & mask) + nc_prod at c@(&aa,&p,&aa); + mask <<= 1; + if (n < mask || mask <= 0) break; + nc_prod at c@(&p,&p,&p); + } + r->real = aa.real; r->imag = aa.imag; + if (br < 0) nc_quot at c@(&nc_1 at c@, r, r); + return; + } + } + /* + * complexobect.c uses an inline version of this formula + * investigate whether this had better performance or accuracy + */ + nc_log at c@(a, r); + nc_prod at c@(r, b, r); + nc_exp at c@(r, r); + return; +} + + +static void +nc_prodi at c@(c at typ@ *x, c at typ@ *r) +{ + @typ@ xr = x->real; + r->real = -x->imag; + r->imag = xr; + return; +} + + +static void +nc_acos at c@(c at typ@ *x, c at typ@ *r) +{ + /* + * return nc_neg(nc_prodi(nc_log(nc_sum(x,nc_prod(nc_i, + * nc_sqrt(nc_diff(nc_1,nc_prod(x,x)))))))); + */ + nc_prod at c@(x,x,r); + nc_diff at c@(&nc_1 at c@, r, r); + nc_sqrt at c@(r, r); + nc_prodi at c@(r, r); + nc_sum at c@(x, r, r); + nc_log at c@(r, r); + nc_prodi at c@(r, r); + nc_neg at c@(r, r); + return; +} + +static void +nc_acosh at c@(c at typ@ *x, c at typ@ *r) +{ + /* + * return nc_log(nc_sum(x, + * nc_prod(nc_sqrt(nc_sum(x,nc_1)), nc_sqrt(nc_diff(x,nc_1))))); + */ + c at typ@ t; + + nc_sum at c@(x, &nc_1 at c@, &t); + nc_sqrt at c@(&t, &t); + nc_diff at c@(x, &nc_1 at c@, r); + nc_sqrt at c@(r, r); + nc_prod at c@(&t, r, r); + nc_sum at c@(x, r, r); + nc_log at c@(r, r); + return; +} + +static void +nc_asin at c@(c at typ@ *x, c at typ@ *r) +{ + /* + * return nc_neg(nc_prodi(nc_log(nc_sum(nc_prod(nc_i,x), + * nc_sqrt(nc_diff(nc_1,nc_prod(x,x))))))); + */ + c at typ@ a, *pa=&a; + nc_prod at c@(x, x, r); + nc_diff at c@(&nc_1 at c@, r, r); + nc_sqrt at c@(r, r); + nc_prodi at c@(x, pa); + nc_sum at c@(pa, r, r); + nc_log at c@(r, r); + nc_prodi at c@(r, r); + nc_neg at c@(r, r); + return; +} + + +static void +nc_asinh at c@(c at typ@ *x, c at typ@ *r) +{ + /* + * return nc_log(nc_sum(nc_sqrt(nc_sum(nc_1,nc_prod(x,x))),x)); + */ + nc_prod at c@(x, x, r); + nc_sum at c@(&nc_1 at c@, r, r); + nc_sqrt at c@(r, r); + nc_sum at c@(r, x, r); + nc_log at c@(r, r); + return; +} + +static void +nc_atan at c@(c at typ@ *x, c at typ@ *r) +{ + /* + * return nc_prod(nc_i2,nc_log(nc_quot(nc_sum(nc_i,x),nc_diff(nc_i,x)))); + */ + c at typ@ a, *pa=&a; + nc_diff at c@(&nc_i at c@, x, pa); + nc_sum at c@(&nc_i at c@, x, r); + nc_quot at c@(r, pa, r); + nc_log at c@(r,r); + nc_prod at c@(&nc_i2 at c@, r, r); + return; +} + +static void +nc_atanh at c@(c at typ@ *x, c at typ@ *r) +{ + /* + * return nc_prod(nc_half,nc_log(nc_quot(nc_sum(nc_1,x),nc_diff(nc_1,x)))); + */ + c at typ@ a, *pa=&a; + nc_diff at c@(&nc_1 at c@, x, r); + nc_sum at c@(&nc_1 at c@, x, pa); + nc_quot at c@(pa, r, r); + nc_log at c@(r, r); + nc_prod at c@(&nc_half at c@, r, r); + return; +} + +static void +nc_cos at c@(c at typ@ *x, c at typ@ *r) +{ + @typ@ xr=x->real, xi=x->imag; + r->real = cos at c@(xr)*cosh at c@(xi); + r->imag = -sin at c@(xr)*sinh at c@(xi); + return; +} + +static void +nc_cosh at c@(c at typ@ *x, c at typ@ *r) +{ + @typ@ xr=x->real, xi=x->imag; + r->real = cos at c@(xi)*cosh at c@(xr); + r->imag = sin at c@(xi)*sinh at c@(xr); + return; +} + +static void +nc_log10 at c@(c at typ@ *x, c at typ@ *r) +{ + nc_log at c@(x, r); + r->real *= (@typ@) M_LOG10_E; + r->imag *= (@typ@) M_LOG10_E; + return; +} + +static void +nc_sin at c@(c at typ@ *x, c at typ@ *r) +{ + @typ@ xr=x->real, xi=x->imag; + r->real = sin at c@(xr)*cosh at c@(xi); + r->imag = cos at c@(xr)*sinh at c@(xi); + return; +} + +static void +nc_sinh at c@(c at typ@ *x, c at typ@ *r) +{ + @typ@ xr=x->real, xi=x->imag; + r->real = cos at c@(xi)*sinh at c@(xr); + r->imag = sin at c@(xi)*cosh at c@(xr); + return; +} + +static void +nc_tan at c@(c at typ@ *x, c at typ@ *r) +{ + @typ@ sr,cr,shi,chi; + @typ@ rs,is,rc,ic; + @typ@ d; + @typ@ xr=x->real, xi=x->imag; + sr = sin at c@(xr); + cr = cos at c@(xr); + shi = sinh at c@(xi); + chi = cosh at c@(xi); + rs = sr*chi; + is = cr*shi; + rc = cr*chi; + ic = -sr*shi; + d = rc*rc + ic*ic; + r->real = (rs*rc+is*ic)/d; + r->imag = (is*rc-rs*ic)/d; + return; +} + +static void +nc_tanh at c@(c at typ@ *x, c at typ@ *r) +{ + @typ@ si,ci,shr,chr; + @typ@ rs,is,rc,ic; + @typ@ d; + @typ@ xr=x->real, xi=x->imag; + si = sin at c@(xi); + ci = cos at c@(xi); + shr = sinh at c@(xr); + chr = cosh at c@(xr); + rs = ci*shr; + is = si*chr; + rc = ci*chr; + ic = si*shr; + d = rc*rc + ic*ic; + r->real = (rs*rc+is*ic)/d; + r->imag = (is*rc-rs*ic)/d; + return; +} + +/**end repeat**/ + Copied: trunk/numpy/core/src/umath_loops.inc.src (from rev 6089, trunk/numpy/core/src/umathmodule.c.src) =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2008-11-22 01:28:52 UTC (rev 6089) +++ trunk/numpy/core/src/umath_loops.inc.src 2008-11-22 04:25:21 UTC (rev 6090) @@ -0,0 +1,1369 @@ +/* -*- c -*- */ + +/* + ***************************************************************************** + ** UFUNC LOOPS ** + ***************************************************************************** + */ + +#define OUTPUT_LOOP\ + char *op1 = args[1];\ + intp os1 = steps[1];\ + intp n = dimensions[0];\ + intp i;\ + for(i = 0; i < n; i++, op1 += os1) + +#define UNARY_LOOP\ + char *ip1 = args[0], *op1 = args[1];\ + intp is1 = steps[0], os1 = steps[1];\ + intp n = dimensions[0];\ + intp i;\ + for(i = 0; i < n; i++, ip1 += is1, op1 += os1) + +#define UNARY_LOOP_TWO_OUT\ + char *ip1 = args[0], *op1 = args[1], *op2 = args[2];\ + intp is1 = steps[0], os1 = steps[1], os2 = steps[2];\ + intp n = dimensions[0];\ + intp i;\ + for(i = 0; i < n; i++, ip1 += is1, op1 += os1, op2 += os2) + +#define BINARY_LOOP\ + char *ip1 = args[0], *ip2 = args[1], *op1 = args[2];\ + intp is1 = steps[0], is2 = steps[1], os1 = steps[2];\ + intp n = dimensions[0];\ + intp i;\ + for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) + +#define BINARY_LOOP_TWO_OUT\ + char *ip1 = args[0], *ip2 = args[1], *op1 = args[2], *op2 = args[3];\ + intp is1 = steps[0], is2 = steps[1], os1 = steps[2], os2 = steps[3];\ + intp n = dimensions[0];\ + intp i;\ + for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1, op2 += os2) + +/****************************************************************************** + ** GENERIC FLOAT LOOPS ** + *****************************************************************************/ + + +typedef float floatUnaryFunc(float x); +typedef double doubleUnaryFunc(double x); +typedef longdouble longdoubleUnaryFunc(longdouble x); +typedef float floatBinaryFunc(float x, float y); +typedef double doubleBinaryFunc(double x, double y); +typedef longdouble longdoubleBinaryFunc(longdouble x, longdouble y); + + +/*UFUNC_API*/ +static void +PyUFunc_f_f(char **args, intp *dimensions, intp *steps, void *func) +{ + floatUnaryFunc *f = (floatUnaryFunc *)func; + UNARY_LOOP { + const float in1 = *(float *)ip1; + *(float *)op1 = f(in1); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_f_f_As_d_d(char **args, intp *dimensions, intp *steps, void *func) +{ + doubleUnaryFunc *f = (doubleUnaryFunc *)func; + UNARY_LOOP { + const float in1 = *(float *)ip1; + *(float *)op1 = (float)f((double)in1); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_ff_f(char **args, intp *dimensions, intp *steps, void *func) +{ + floatBinaryFunc *f = (floatBinaryFunc *)func; + BINARY_LOOP { + float in1 = *(float *)ip1; + float in2 = *(float *)ip2; + *(float *)op1 = f(in1, in2); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_ff_f_As_dd_d(char **args, intp *dimensions, intp *steps, void *func) +{ + doubleBinaryFunc *f = (doubleBinaryFunc *)func; + BINARY_LOOP { + float in1 = *(float *)ip1; + float in2 = *(float *)ip2; + *(float *)op1 = (double)f((double)in1, (double)in2); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_d_d(char **args, intp *dimensions, intp *steps, void *func) +{ + doubleUnaryFunc *f = (doubleUnaryFunc *)func; + UNARY_LOOP { + double in1 = *(double *)ip1; + *(double *)op1 = f(in1); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_dd_d(char **args, intp *dimensions, intp *steps, void *func) +{ + doubleBinaryFunc *f = (doubleBinaryFunc *)func; + BINARY_LOOP { + double in1 = *(double *)ip1; + double in2 = *(double *)ip2; + *(double *)op1 = f(in1, in2); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_g_g(char **args, intp *dimensions, intp *steps, void *func) +{ + longdoubleUnaryFunc *f = (longdoubleUnaryFunc *)func; + UNARY_LOOP { + longdouble in1 = *(longdouble *)ip1; + *(longdouble *)op1 = f(in1); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_gg_g(char **args, intp *dimensions, intp *steps, void *func) +{ + longdoubleBinaryFunc *f = (longdoubleBinaryFunc *)func; + BINARY_LOOP { + longdouble in1 = *(longdouble *)ip1; + longdouble in2 = *(longdouble *)ip2; + *(longdouble *)op1 = f(in1, in2); + } +} + + + +/****************************************************************************** + ** GENERIC COMPLEX LOOPS ** + *****************************************************************************/ + + +typedef void cdoubleUnaryFunc(cdouble *x, cdouble *r); +typedef void cfloatUnaryFunc(cfloat *x, cfloat *r); +typedef void clongdoubleUnaryFunc(clongdouble *x, clongdouble *r); +typedef void cdoubleBinaryFunc(cdouble *x, cdouble *y, cdouble *r); +typedef void cfloatBinaryFunc(cfloat *x, cfloat *y, cfloat *r); +typedef void clongdoubleBinaryFunc(clongdouble *x, clongdouble *y, + clongdouble *r); + +/*UFUNC_API*/ +static void +PyUFunc_F_F(char **args, intp *dimensions, intp *steps, void *func) +{ + cfloatUnaryFunc *f = (cfloatUnaryFunc *)func; + UNARY_LOOP { + cfloat in1 = *(cfloat *)ip1; + cfloat *out = (cfloat *)op1; + f(&in1, out); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_F_F_As_D_D(char **args, intp *dimensions, intp *steps, void *func) +{ + cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; + UNARY_LOOP { + const float *in1 = (float *)ip1; + cdouble tmp = {(double)(in1[0]),(double)in1[1]}; + cdouble out; + f(&tmp, &out); + ((float *)op1)[0] = (float)out.real; + ((float *)op1)[1] = (float)out.imag; + } +} + +/*UFUNC_API*/ +static void +PyUFunc_FF_F(char **args, intp *dimensions, intp *steps, void *func) +{ + cfloatBinaryFunc *f = (cfloatBinaryFunc *)func; + BINARY_LOOP { + cfloat in1 = *(cfloat *)ip1; + cfloat in2 = *(cfloat *)ip2; + cfloat *out = (cfloat *)op1; + f(&in1, &in2, out); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_FF_F_As_DD_D(char **args, intp *dimensions, intp *steps, void *func) +{ + cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; + BINARY_LOOP { + const float *in1 = (float *)ip1; + const float *in2 = (float *)ip2; + cdouble tmp1 = {(double)(in1[0]),(double)in1[1]}; + cdouble tmp2 = {(double)(in2[0]),(double)in2[1]}; + cdouble out; + f(&tmp1, &tmp2, &out); + ((float *)op1)[0] = (float)out.real; + ((float *)op1)[1] = (float)out.imag; + } +} + +/*UFUNC_API*/ +static void +PyUFunc_D_D(char **args, intp *dimensions, intp *steps, void *func) +{ + cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; + UNARY_LOOP { + cdouble in1 = *(cdouble *)ip1; + cdouble *out = (cdouble *)op1; + f(&in1, out); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_DD_D(char **args, intp *dimensions, intp *steps, void *func) +{ + cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; + BINARY_LOOP { + cdouble in1 = *(cdouble *)ip1; + cdouble in2 = *(cdouble *)ip2; + cdouble *out = (cdouble *)op1; + f(&in1, &in2, out); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_G_G(char **args, intp *dimensions, intp *steps, void *func) +{ + clongdoubleUnaryFunc *f = (clongdoubleUnaryFunc *)func; + UNARY_LOOP { + clongdouble in1 = *(clongdouble *)ip1; + clongdouble *out = (clongdouble *)op1; + f(&in1, out); + } +} + +/*UFUNC_API*/ +static void +PyUFunc_GG_G(char **args, intp *dimensions, intp *steps, void *func) +{ + clongdoubleBinaryFunc *f = (clongdoubleBinaryFunc *)func; + BINARY_LOOP { + clongdouble in1 = *(clongdouble *)ip1; + clongdouble in2 = *(clongdouble *)ip2; + clongdouble *out = (clongdouble *)op1; + f(&in1, &in2, out); + } +} + + +/****************************************************************************** + ** GENERIC OBJECT lOOPS ** + *****************************************************************************/ + +/*UFUNC_API*/ +static void +PyUFunc_O_O(char **args, intp *dimensions, intp *steps, void *func) +{ + unaryfunc f = (unaryfunc)func; + UNARY_LOOP { + PyObject *in1 = *(PyObject **)ip1; + PyObject **out = (PyObject **)op1; + PyObject *ret = f(in1); + if ((ret == NULL) || PyErr_Occurred()) { + return; + } + Py_XDECREF(*out); + *out = ret; + } +} + +/*UFUNC_API*/ +static void +PyUFunc_O_O_method(char **args, intp *dimensions, intp *steps, void *func) +{ + char *meth = (char *)func; + UNARY_LOOP { + PyObject *in1 = *(PyObject **)ip1; + PyObject **out = (PyObject **)op1; + PyObject *ret = PyObject_CallMethod(in1, meth, NULL); + if (ret == NULL) { + return; + } + Py_XDECREF(*out); + *out = ret; + } +} + +/*UFUNC_API*/ +static void +PyUFunc_OO_O(char **args, intp *dimensions, intp *steps, void *func) +{ + binaryfunc f = (binaryfunc)func; + BINARY_LOOP { + PyObject *in1 = *(PyObject **)ip1; + PyObject *in2 = *(PyObject **)ip2; + PyObject **out = (PyObject **)op1; + PyObject *ret = f(in1, in2); + if (PyErr_Occurred()) { + return; + } + Py_XDECREF(*out); + *out = ret; + } +} + +/*UFUNC_API*/ +static void +PyUFunc_OO_O_method(char **args, intp *dimensions, intp *steps, void *func) +{ + char *meth = (char *)func; + BINARY_LOOP { + PyObject *in1 = *(PyObject **)ip1; + PyObject *in2 = *(PyObject **)ip2; + PyObject **out = (PyObject **)op1; + PyObject *ret = PyObject_CallMethod(in1, meth, "(O)", in2); + if (ret == NULL) { + return; + } + Py_XDECREF(*out); + *out = ret; + } +} + +/* + * A general-purpose ufunc that deals with general-purpose Python callable. + * func is a structure with nin, nout, and a Python callable function + */ + +/*UFUNC_API*/ +static void +PyUFunc_On_Om(char **args, intp *dimensions, intp *steps, void *func) +{ + intp n = dimensions[0]; + PyUFunc_PyFuncData *data = (PyUFunc_PyFuncData *)func; + int nin = data->nin; + int nout = data->nout; + PyObject *tocall = data->callable; + char *ptrs[NPY_MAXARGS]; + PyObject *arglist, *result; + PyObject *in, **op; + intp i, j, ntot; + + ntot = nin+nout; + + for(j = 0; j < ntot; j++) { + ptrs[j] = args[j]; + } + for(i = 0; i < n; i++) { + arglist = PyTuple_New(nin); + if (arglist == NULL) { + return; + } + for(j = 0; j < nin; j++) { + in = *((PyObject **)ptrs[j]); + if (in == NULL) { + Py_DECREF(arglist); + return; + } + PyTuple_SET_ITEM(arglist, j, in); + Py_INCREF(in); + } + result = PyEval_CallObject(tocall, arglist); + Py_DECREF(arglist); + if (result == NULL) { + return; + } + if PyTuple_Check(result) { + if (nout != PyTuple_Size(result)) { + Py_DECREF(result); + return; + } + for(j = 0; j < nout; j++) { + op = (PyObject **)ptrs[j+nin]; + Py_XDECREF(*op); + *op = PyTuple_GET_ITEM(result, j); + Py_INCREF(*op); + } + Py_DECREF(result); + } + else { + op = (PyObject **)ptrs[nin]; + Py_XDECREF(*op); + *op = result; + } + for(j = 0; j < ntot; j++) { + ptrs[j] += steps[j]; + } + } +} + +/* + ***************************************************************************** + ** BOOLEAN LOOPS ** + ***************************************************************************** + */ + +#define BOOL_invert BOOL_logical_not +#define BOOL_negative BOOL_logical_not +#define BOOL_add BOOL_logical_or +#define BOOL_bitwise_and BOOL_logical_and +#define BOOL_bitwise_or BOOL_logical_or +#define BOOL_bitwise_xor BOOL_logical_xor +#define BOOL_multiply BOOL_logical_and +#define BOOL_subtract BOOL_logical_xor +#define BOOL_fmax BOOL_maximum +#define BOOL_fmin BOOL_minimum + +/**begin repeat + * #kind = equal, not_equal, greater, greater_equal, less, less_equal, + * logical_and, logical_or# + * #OP = ==, !=, >, >=, <, <=, &&, ||# + **/ + +static void +BOOL_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + Bool in1 = *((Bool *)ip1) != 0; + Bool in2 = *((Bool *)ip2) != 0; + *((Bool *)op1)= in1 @OP@ in2; + } +} +/**end repeat**/ + +static void +BOOL_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + Bool in1 = *((Bool *)ip1) != 0; + Bool in2 = *((Bool *)ip2) != 0; + *((Bool *)op1)= (in1 && !in2) || (!in1 && in2); + } +} + +/**begin repeat + * #kind = maximum, minimum# + * #OP = >, <# + **/ +static void +BOOL_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + Bool in1 = *((Bool *)ip1) != 0; + Bool in2 = *((Bool *)ip2) != 0; + *((Bool *)op1) = (in1 @OP@ in2) ? in1 : in2; + } +} +/**end repeat**/ + +/**begin repeat + * #kind = absolute, logical_not# + * #OP = !=, ==# + **/ +static void +BOOL_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + Bool in1 = *(Bool *)ip1; + *((Bool *)op1) = in1 @OP@ 0; + } +} +/**end repeat**/ + +static void +BOOL_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) +{ + OUTPUT_LOOP { + *((Bool *)op1) = 1; + } +} + + +/* + ***************************************************************************** + ** INTEGER LOOPS + ***************************************************************************** + */ + +/**begin repeat + * #type = byte, short, int, long, longlong# + * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG# + * #ftype = float, float, double, double, double# + */ + +/**begin repeat1 + * both signed and unsigned integer types + * #s = , u# + * #S = , U# + */ + +#define @S@@TYPE at _floor_divide @S@@TYPE at _divide +#define @S@@TYPE at _fmax @S@@TYPE at _maximum +#define @S@@TYPE at _fmin @S@@TYPE at _minimum + +static void + at S@@TYPE at _ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) +{ + OUTPUT_LOOP { + *((@s@@type@ *)op1) = 1; + } +} + +static void + at S@@TYPE at _square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) +{ + UNARY_LOOP { + const @s@@type@ in1 = *(@s@@type@ *)ip1; + *((@s@@type@ *)op1) = in1*in1; + } +} + +static void + at S@@TYPE at _reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) +{ + UNARY_LOOP { + const @s@@type@ in1 = *(@s@@type@ *)ip1; + *((@s@@type@ *)op1) = (@s@@type@)(1.0/in1); + } +} + +static void + at S@@TYPE at _conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + const @s@@type@ in1 = *(@s@@type@ *)ip1; + *((@s@@type@ *)op1) = in1; + } +} + +static void + at S@@TYPE at _negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + const @s@@type@ in1 = *(@s@@type@ *)ip1; + *((@s@@type@ *)op1) = (@s@@type@)(-(@type@)in1); + } +} + +static void + at S@@TYPE at _logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + const @s@@type@ in1 = *(@s@@type@ *)ip1; + *((Bool *)op1) = !in1; + } +} + +static void + at S@@TYPE at _invert(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + const @s@@type@ in1 = *(@s@@type@ *)ip1; + *((@s@@type@ *)op1) = ~in1; + } +} + +/**begin repeat2 + * Arithmetic + * #kind = add, subtract, multiply, bitwise_and, bitwise_or, bitwise_xor, + * left_shift, right_shift# + * #OP = +, -,*, &, |, ^, <<, >># + */ +static void + at S@@TYPE at _@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @s@@type@ in1 = *(@s@@type@ *)ip1; + const @s@@type@ in2 = *(@s@@type@ *)ip2; + *((@s@@type@ *)op1) = in1 @OP@ in2; + } +} +/**end repeat2**/ + +/**begin repeat2 + * #kind = equal, not_equal, greater, greater_equal, less, less_equal, + * logical_and, logical_or# + * #OP = ==, !=, >, >=, <, <=, &&, ||# + */ +static void + at S@@TYPE at _@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @s@@type@ in1 = *(@s@@type@ *)ip1; + const @s@@type@ in2 = *(@s@@type@ *)ip2; + *((Bool *)op1) = in1 @OP@ in2; + } +} +/**end repeat2**/ + +static void + at S@@TYPE at _logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @s@@type@ in1 = *(@s@@type@ *)ip1; + const @s@@type@ in2 = *(@s@@type@ *)ip2; + *((Bool *)op1)= (in1 && !in2) || (!in1 && in2); + } +} + +/**begin repeat2 + * #kind = maximum, minimum# + * #OP = >, <# + **/ +static void + at S@@TYPE at _@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @s@@type@ in1 = *(@s@@type@ *)ip1; + const @s@@type@ in2 = *(@s@@type@ *)ip2; + *((@s@@type@ *)op1) = (in1 @OP@ in2) ? in1 : in2; + } +} +/**end repeat2**/ + +static void + at S@@TYPE at _true_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @s@@type@ in1 = *(@s@@type@ *)ip1; + const @s@@type@ in2 = *(@s@@type@ *)ip2; + if (in2 == 0) { + generate_divbyzero_error(); + *((@ftype@ *)op1) = 0; + } + else { + *((@ftype@ *)op1) = (@ftype@)in1 / (@ftype@)in2; + } + } +} + +static void + at S@@TYPE at _power(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @ftype@ in1 = (@ftype@)*(@s@@type@ *)ip1; + const @ftype@ in2 = (@ftype@)*(@s@@type@ *)ip2; + *((@s@@type@ *)op1) = (@s@@type@) pow(in1, in2); + } +} + +static void + at S@@TYPE at _fmod(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @s@@type@ in1 = *(@s@@type@ *)ip1; + const @s@@type@ in2 = *(@s@@type@ *)ip2; + if (in2 == 0) { + generate_divbyzero_error(); + *((@s@@type@ *)op1) = 0; + } + else { + *((@s@@type@ *)op1)= in1 % in2; + } + + } +} + +/**end repeat1**/ + +static void +U at TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + const u at type@ in1 = *(u at type@ *)ip1; + *((u at type@ *)op1) = in1; + } +} + +static void + at TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *((@type@ *)op1) = (in1 >= 0) ? in1 : -in1; + } +} + +static void +U at TYPE@_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + const u at type@ in1 = *(u at type@ *)ip1; + *((u at type@ *)op1) = in1 > 0 ? 1 : 0; + } +} + +static void + at TYPE@_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *((@type@ *)op1) = in1 > 0 ? 1 : (in1 < 0 ? -1 : 0); + } +} + +static void + at TYPE@_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + if (in2 == 0) { + generate_divbyzero_error(); + *((@type@ *)op1) = 0; + } + else if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { + *((@type@ *)op1) = in1/in2 - 1; + } + else { + *((@type@ *)op1) = in1/in2; + } + } +} + +static void +U at TYPE@_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const u at type@ in1 = *(u at type@ *)ip1; + const u at type@ in2 = *(u at type@ *)ip2; + if (in2 == 0) { + generate_divbyzero_error(); + *((u at type@ *)op1) = 0; + } + else { + *((u at type@ *)op1)= in1/in2; + } + } +} + +static void + at TYPE@_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + if (in2 == 0) { + generate_divbyzero_error(); + *((@type@ *)op1) = 0; + } + else { + /* handle mixed case the way Python does */ + const @type@ rem = in1 % in2; + if ((in1 > 0) == (in2 > 0) || rem == 0) { + *((@type@ *)op1) = rem; + } + else { + *((@type@ *)op1) = rem + in2; + } + } + } +} + +static void +U at TYPE@_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const u at type@ in1 = *(u at type@ *)ip1; + const u at type@ in2 = *(u at type@ *)ip2; + if (in2 == 0) { + generate_divbyzero_error(); + *((@type@ *)op1) = 0; + } + else { + *((@type@ *)op1) = in1 % in2; + } + } +} + +/**end repeat**/ + +/* + ***************************************************************************** + ** FLOAT LOOPS ** + ***************************************************************************** + */ + + +/**begin repeat + * Float types + * #type = float, double, longdouble# + * #TYPE = FLOAT, DOUBLE, LONGDOUBLE# + * #c = f, , l# + * #C = F, , L# + */ + + +/**begin repeat1 + * Arithmetic + * # kind = add, subtract, multiply, divide# + * # OP = +, -, *, /# + */ +static void + at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + *((@type@ *)op1) = in1 @OP@ in2; + } +} +/**end repeat1**/ + +/**begin repeat1 + * #kind = equal, not_equal, less, less_equal, greater, greater_equal, + * logical_and, logical_or# + * #OP = ==, !=, <, <=, >, >=, &&, ||# + */ +static void + at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + *((Bool *)op1) = in1 @OP@ in2; + } +} +/**end repeat1**/ + +static void + at TYPE@_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + *((Bool *)op1)= (in1 && !in2) || (!in1 && in2); + } +} + +static void + at TYPE@_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *((Bool *)op1) = !in1; + } +} + +/**begin repeat1 + * #kind = isnan, isinf, isfinite, signbit# + * #func = isnan, isinf, isfinite, signbit# + **/ +static void + at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *((Bool *)op1) = @func@(in1) != 0; + } +} +/**end repeat1**/ + +/**begin repeat1 + * #kind = maximum, minimum# + * #OP = >=, <=# + **/ +static void + at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + /* */ + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + *((@type@ *)op1) = (in1 @OP@ in2 || isnan(in1)) ? in1 : in2; + } +} +/**end repeat1**/ + +/**begin repeat1 + * #kind = fmax, fmin# + * #OP = >=, <=# + **/ +static void + at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *func) +{ + /* */ + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + *((@type@ *)op1) = (in1 @OP@ in2 || isnan(in2)) ? in1 : in2; + } +} +/**end repeat1**/ + +static void + at TYPE@_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + *((@type@ *)op1) = floor at c@(in1/in2); + } +} + +static void + at TYPE@_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + const @type@ res = fmod at c@(in1,in2); + if (res && ((in2 < 0) != (res < 0))) { + *((@type@ *)op1) = res + in2; + } + else { + *((@type@ *)op1) = res; + } + } +} + +static void + at TYPE@_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) +{ + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *((@type@ *)op1) = in1*in1; + } +} + +static void + at TYPE@_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) +{ + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *((@type@ *)op1) = 1/in1; + } +} + +static void + at TYPE@_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) +{ + OUTPUT_LOOP { + *((@type@ *)op1) = 1; + } +} + +static void + at TYPE@_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *((@type@ *)op1) = in1; + } +} + +static void + at TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ tmp = in1 > 0 ? in1 : -in1; + /* add 0 to clear -0.0 */ + *((@type@ *)op1) = tmp + 0; + } +} + +static void + at TYPE@_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *((@type@ *)op1) = -in1; + } +} + +static void + at TYPE@_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + /* Sign of nan is currently 0 */ + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *((@type@ *)op1) = in1 > 0 ? 1 : (in1 < 0 ? -1 : 0); + } +} + +static void + at TYPE@_modf(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP_TWO_OUT { + const @type@ in1 = *(@type@ *)ip1; + *((@type@ *)op1) = modf at c@(in1, (@type@ *)op2); + } +} + +#ifdef HAVE_FREXP at C@ +static void + at TYPE@_frexp(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP_TWO_OUT { + const @type@ in1 = *(@type@ *)ip1; + *((@type@ *)op1) = frexp at c@(in1, (int *)op2); + } +} +#endif + +#ifdef HAVE_LDEXP at C@ +static void + at TYPE@_ldexp(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const int in2 = *(int *)ip2; + *((@type@ *)op1) = ldexp at c@(in1, in2); + } +} +#endif + +#define @TYPE at _true_divide @TYPE at _divide + +/**end repeat**/ + + +/* + ***************************************************************************** + ** COMPLEX LOOPS ** + ***************************************************************************** + */ + +#define CGE(xr,xi,yr,yi) (xr > yr || (xr == yr && xi >= yi)) +#define CLE(xr,xi,yr,yi) (xr < yr || (xr == yr && xi <= yi)) +#define CGT(xr,xi,yr,yi) (xr > yr || (xr == yr && xi > yi)) +#define CLT(xr,xi,yr,yi) (xr < yr || (xr == yr && xi < yi)) +#define CEQ(xr,xi,yr,yi) (xr == yr && xi == yi) +#define CNE(xr,xi,yr,yi) (xr != yr || xi != yi) + +/**begin repeat + * complex types + * #type = float, double, longdouble# + * #TYPE = FLOAT, DOUBLE, LONGDOUBLE# + * #c = f, , l# + */ + +/**begin repeat1 + * arithmetic + * #kind = add, subtract# + * #OP = +, -# + */ +static void +C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1r = ((@type@ *)ip1)[0]; + const @type@ in1i = ((@type@ *)ip1)[1]; + const @type@ in2r = ((@type@ *)ip2)[0]; + const @type@ in2i = ((@type@ *)ip2)[1]; + ((@type@ *)op1)[0] = in1r @OP@ in2r; + ((@type@ *)op1)[1] = in1i @OP@ in2i; + } +} +/**end repeat1**/ + +static void +C at TYPE@_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1r = ((@type@ *)ip1)[0]; + const @type@ in1i = ((@type@ *)ip1)[1]; + const @type@ in2r = ((@type@ *)ip2)[0]; + const @type@ in2i = ((@type@ *)ip2)[1]; + ((@type@ *)op1)[0] = in1r*in2r - in1i*in2i; + ((@type@ *)op1)[1] = in1r*in2i + in1i*in2r; + } +} + +static void +C at TYPE@_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1r = ((@type@ *)ip1)[0]; + const @type@ in1i = ((@type@ *)ip1)[1]; + const @type@ in2r = ((@type@ *)ip2)[0]; + const @type@ in2i = ((@type@ *)ip2)[1]; + @type@ d = in2r*in2r + in2i*in2i; + ((@type@ *)op1)[0] = (in1r*in2r + in1i*in2i)/d; + ((@type@ *)op1)[1] = (in1i*in2r - in1r*in2i)/d; + } +} + +static void +C at TYPE@_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1r = ((@type@ *)ip1)[0]; + const @type@ in1i = ((@type@ *)ip1)[1]; + const @type@ in2r = ((@type@ *)ip2)[0]; + const @type@ in2i = ((@type@ *)ip2)[1]; + @type@ d = in2r*in2r + in2i*in2i; + ((@type@ *)op1)[0] = floor at c@((in1r*in2r + in1i*in2i)/d); + ((@type@ *)op1)[1] = 0; + } +} + +/**begin repeat1 + * #kind= greater, greater_equal, less, less_equal, equal, not_equal# + * #OP = CGT, CGE, CLT, CLE, CEQ, CNE# + */ +static void +C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1r = ((@type@ *)ip1)[0]; + const @type@ in1i = ((@type@ *)ip1)[1]; + const @type@ in2r = ((@type@ *)ip2)[0]; + const @type@ in2i = ((@type@ *)ip2)[1]; + *((Bool *)op1) = @OP@(in1r,in1i,in2r,in2i); + } +} +/**end repeat1**/ + +/**begin repeat1 + #kind = logical_and, logical_or# + #OP1 = ||, ||# + #OP2 = &&, ||# +*/ +static void +C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1r = ((@type@ *)ip1)[0]; + const @type@ in1i = ((@type@ *)ip1)[1]; + const @type@ in2r = ((@type@ *)ip2)[0]; + const @type@ in2i = ((@type@ *)ip2)[1]; + *((Bool *)op1) = (in1r @OP1@ in1i) @OP2@ (in2r @OP1@ in2i); + } +} +/**end repeat1**/ + +static void +C at TYPE@_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1r = ((@type@ *)ip1)[0]; + const @type@ in1i = ((@type@ *)ip1)[1]; + const @type@ in2r = ((@type@ *)ip2)[0]; + const @type@ in2i = ((@type@ *)ip2)[1]; + const Bool tmp1 = (in1r || in1i); + const Bool tmp2 = (in2r || in2i); + *((Bool *)op1) = (tmp1 && !tmp2) || (!tmp1 && tmp2); + } +} + +static void +C at TYPE@_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + const @type@ in1r = ((@type@ *)ip1)[0]; + const @type@ in1i = ((@type@ *)ip1)[1]; + *((Bool *)op1) = !(in1r || in1i); + } +} + +/**begin repeat1 + * #kind = isnan, isinf, isfinite# + * #func = isnan, isinf, isfinite# + * #OP = ||, ||, &&# + **/ +static void +C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + const @type@ in1r = ((@type@ *)ip1)[0]; + const @type@ in1i = ((@type@ *)ip1)[1]; + *((Bool *)op1) = @func@(in1r) @OP@ @func@(in1i); + } +} +/**end repeat1**/ + +static void +C at TYPE@_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) +{ + UNARY_LOOP { + const @type@ in1r = ((@type@ *)ip1)[0]; + const @type@ in1i = ((@type@ *)ip1)[1]; + ((@type@ *)op1)[0] = in1r*in1r - in1i*in1i; + ((@type@ *)op1)[1] = in1r*in1i + in1i*in1r; + } +} + +static void +C at TYPE@_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) +{ + UNARY_LOOP { + const @type@ in1r = ((@type@ *)ip1)[0]; + const @type@ in1i = ((@type@ *)ip1)[1]; + if (fabs at c@(in1i) <= fabs at c@(in1r)) { + const @type@ r = in1i/in1r; + const @type@ d = in1r + in1i*r; + ((@type@ *)op1)[0] = 1/d; + ((@type@ *)op1)[1] = -r/d; + } else { + const @type@ r = in1r/in1i; + const @type@ d = in1r*r + in1i; + ((@type@ *)op1)[0] = r/d; + ((@type@ *)op1)[1] = -1/d; + } + } +} + +static void +C at TYPE@_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) +{ + OUTPUT_LOOP { + ((@type@ *)op1)[0] = 1; + ((@type@ *)op1)[1] = 0; + } +} + +static void +C at TYPE@_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { + UNARY_LOOP { + const @type@ in1r = ((@type@ *)ip1)[0]; + const @type@ in1i = ((@type@ *)ip1)[1]; + ((@type@ *)op1)[0] = in1r; + ((@type@ *)op1)[1] = -in1i; + } +} + +static void +C at TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP { + const @type@ in1r = ((@type@ *)ip1)[0]; + const @type@ in1i = ((@type@ *)ip1)[1]; + *((@type@ *)op1) = sqrt at c@(in1r*in1r + in1i*in1i); + } +} + +static void +C at TYPE@_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + /* fixme: sign of nan is currently 0 */ + UNARY_LOOP { + const @type@ in1r = ((@type@ *)ip1)[0]; + const @type@ in1i = ((@type@ *)ip1)[1]; + ((@type@ *)op1)[0] = CGT(in1r, in1i, 0, 0) ? 1 : + (CLT(in1r, in1i, 0, 0) ? -1 : 0); + ((@type@ *)op1)[1] = 0; + } +} + +/**begin repeat1 + * #kind = maximum, minimum# + * #OP = CGE, CLE# + */ +static void +C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + BINARY_LOOP { + const @type@ in1r = ((@type@ *)ip1)[0]; + const @type@ in1i = ((@type@ *)ip1)[1]; + const @type@ in2r = ((@type@ *)ip2)[0]; + const @type@ in2i = ((@type@ *)ip2)[1]; + if (@OP@(in1r, in1i, in2r, in2i) || isnan(in1r) || isnan(in1i)) { + ((@type@ *)op1)[0] = in1r; + ((@type@ *)op1)[1] = in1i; + } + else { + ((@type@ *)op1)[0] = in2r; + ((@type@ *)op1)[1] = in2i; + } + } +} +/**end repeat1**/ + +/**begin repeat1 + * #kind = fmax, fmin# + * #OP = CGE, CLE# + */ +static void +C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *func) +{ + BINARY_LOOP { + const @type@ in1r = ((@type@ *)ip1)[0]; + const @type@ in1i = ((@type@ *)ip1)[1]; + const @type@ in2r = ((@type@ *)ip2)[0]; + const @type@ in2i = ((@type@ *)ip2)[1]; + if (@OP@(in1r, in1i, in2r, in2i) || isnan(in2r) || isnan(in2i)) { + ((@type@ *)op1)[0] = in1r; + ((@type@ *)op1)[1] = in1i; + } + else { + ((@type@ *)op1)[0] = in2r; + ((@type@ *)op1)[1] = in2i; + } + } +} +/**end repeat1**/ + +#define C at TYPE@_true_divide C at TYPE@_divide + +/**end repeat**/ + +#undef CGE +#undef CLE +#undef CGT +#undef CLT +#undef CEQ +#undef CNE + +/* + ***************************************************************************** + ** OBJECT LOOPS ** + ***************************************************************************** + */ + +/**begin repeat + * #kind = equal, not_equal, greater, greater_equal, less, less_equal# + * #OP = EQ, NE, GT, GE, LT, LE# + */ +static void +OBJECT_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { + BINARY_LOOP { + PyObject *in1 = *(PyObject **)ip1; + PyObject *in2 = *(PyObject **)ip2; + int ret = PyObject_RichCompareBool(in1, in2, Py_ at OP@); + if (ret == -1) { + return; + } + *((Bool *)op1) = (Bool)ret; + } +} +/**end repeat**/ + +static void +OBJECT_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) +{ + PyObject *zero = PyInt_FromLong(0); + UNARY_LOOP { + PyObject *in1 = *(PyObject **)ip1; + PyObject **out = (PyObject **)op1; + PyObject *ret = PyInt_FromLong(PyObject_Compare(in1, zero)); + if (PyErr_Occurred()) { + return; + } + Py_XDECREF(*out); + *out = ret; + } + Py_DECREF(zero); +} + +/* + ***************************************************************************** + ** END LOOPS ** + ***************************************************************************** + */ + + Modified: trunk/numpy/core/src/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2008-11-22 01:28:52 UTC (rev 6089) +++ trunk/numpy/core/src/umathmodule.c.src 2008-11-22 04:25:21 UTC (rev 6090) @@ -9,1938 +9,34 @@ ** INCLUDES ** ***************************************************************************** */ +#define _UMATHMODULE /* needed in one of the numpy include files */ +#include #include "Python.h" #include "numpy/noprefix.h" -#define _UMATHMODULE #include "numpy/ufuncobject.h" #include "abstract.h" #include "config.h" -#include -#ifndef M_PI -#define M_PI 3.14159265358979323846264338328 -#endif - -#include "umath_funcs_c99.inc" - /* ***************************************************************************** - ** FLOAT FUNCTIONS ** + ** INCLUDE GENERATED CODE ** ***************************************************************************** */ +#include "umath_funcs_c99.inc" +#include "umath_funcs.inc" +#include "umath_loops.inc" +#include "umath_ufunc_object.inc" +#include "__umath_generated.c" +#include "__ufunc_api.c" -/**begin repeat - * #type = float, double, longdouble# - * #c = f, ,l# - * #C = F, ,L# - */ -/* fixme: need more precision for LOG2 and INVLOG2 */ - -#define PI 3.14159265358979323846264338328 at c@ -#define LOG2 0.69314718055994530943 at c@ -#define INVLOG2 1.4426950408889634074 at c@ -#define degrees at c@ rad2deg at c@ -#define radians at c@ deg2rad at c@ - -static @type@ -rad2deg at c@(@type@ x) { - return x*(180.0 at c@/PI); -} - -static @type@ -deg2rad at c@(@type@ x) { - return x*(PI/180.0 at c@); -} - -static @type@ -log2_1p at c@(@type@ x) -{ - @type@ u = 1 + x; - if (u == 1) { - return INVLOG2*x; - } else { - return log2 at c@(u) * x / (u - 1); - } -} - -static @type@ -exp2_1m at c@(@type@ x) -{ - @type@ u = exp at c@(x); - if (u == 1.0) { - return LOG2*x; - } else if (u - 1 == -1) { - return -LOG2; - } else { - return (u - 1) * x/log2 at c@(u); - } -} - -static @type@ -logaddexp at c@(@type@ x, @type@ y) -{ - const @type@ tmp = x - y; - if (tmp > 0) { - return x + log1p at c@(exp at c@(-tmp)); - } - else { - return y + log1p at c@(exp at c@(tmp)); - } -} - -static @type@ -logaddexp2 at c@(@type@ x, @type@ y) -{ - const @type@ tmp = x - y; - if (tmp > 0) { - return x + log2_1p at c@(exp2 at c@(-tmp)); - } - else { - return y + log2_1p at c@(exp2 at c@(tmp)); - } -} - -#undef PI -#undef LOG2 -#undef INVLOG2 - -/**end repeat**/ - /* ***************************************************************************** - ** PYTHON OBJECT FUNCTIONS ** - ***************************************************************************** - */ - -static PyObject * -Py_square(PyObject *o) -{ - return PyNumber_Multiply(o, o); -} - -static PyObject * -Py_get_one(PyObject *NPY_UNUSED(o)) -{ - return PyInt_FromLong(1); -} - -static PyObject * -Py_reciprocal(PyObject *o) -{ - PyObject *one = PyInt_FromLong(1); - PyObject *result; - - if (!one) { - return NULL; - } - result = PyNumber_Divide(one, o); - Py_DECREF(one); - return result; -} - -/* - * Define numpy version of PyNumber_Power as binary function. - */ -static PyObject * -npy_ObjectPower(PyObject *x, PyObject *y) -{ - return PyNumber_Power(x, y, Py_None); -} - -/**begin repeat - * #Kind = Max, Min# - * #OP = >=, <=# - */ -static PyObject * -npy_Object at Kind@(PyObject *i1, PyObject *i2) -{ - PyObject *result; - int cmp; - - if (PyObject_Cmp(i1, i2, &cmp) < 0) { - return NULL; - } - if (cmp @OP@ 0) { - result = i1; - } - else { - result = i2; - } - Py_INCREF(result); - return result; -} -/**end repeat**/ - - -/* - ***************************************************************************** - ** COMPLEX FUNCTIONS ** - ***************************************************************************** - */ - - -/* - * Don't pass structures between functions (only pointers) because how - * structures are passed is compiler dependent and could cause segfaults if - * umath_ufunc_object.inc is compiled with a different compiler than an - * extension that makes use of the UFUNC API - */ - -/**begin repeat - - #typ=float, double, longdouble# - #c=f,,l# -*/ - -/* constants */ -static c at typ@ nc_1 at c@ = {1., 0.}; -static c at typ@ nc_half at c@ = {0.5, 0.}; -static c at typ@ nc_i at c@ = {0., 1.}; -static c at typ@ nc_i2 at c@ = {0., 0.5}; -/* - * static c at typ@ nc_mi at c@ = {0., -1.}; - * static c at typ@ nc_pi2 at c@ = {M_PI/2., 0.}; - */ - - -static void -nc_sum at c@(c at typ@ *a, c at typ@ *b, c at typ@ *r) -{ - r->real = a->real + b->real; - r->imag = a->imag + b->imag; - return; -} - -static void -nc_diff at c@(c at typ@ *a, c at typ@ *b, c at typ@ *r) -{ - r->real = a->real - b->real; - r->imag = a->imag - b->imag; - return; -} - -static void -nc_neg at c@(c at typ@ *a, c at typ@ *r) -{ - r->real = -a->real; - r->imag = -a->imag; - return; -} - -static void -nc_prod at c@(c at typ@ *a, c at typ@ *b, c at typ@ *r) -{ - @typ@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag; - r->real = ar*br - ai*bi; - r->imag = ar*bi + ai*br; - return; -} - -static void -nc_quot at c@(c at typ@ *a, c at typ@ *b, c at typ@ *r) -{ - - @typ@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag; - @typ@ d = br*br + bi*bi; - r->real = (ar*br + ai*bi)/d; - r->imag = (ai*br - ar*bi)/d; - return; -} - -static void -nc_sqrt at c@(c at typ@ *x, c at typ@ *r) -{ - @typ@ s,d; - if (x->real == 0. && x->imag == 0.) - *r = *x; - else { - s = sqrt at c@((fabs at c@(x->real) + hypot at c@(x->real,x->imag))/2); - d = x->imag/(2*s); - if (x->real > 0) { - r->real = s; - r->imag = d; - } - else if (x->imag >= 0) { - r->real = d; - r->imag = s; - } - else { - r->real = -d; - r->imag = -s; - } - } - return; -} - -static void -nc_rint at c@(c at typ@ *x, c at typ@ *r) -{ - r->real = rint at c@(x->real); - r->imag = rint at c@(x->imag); -} - -static void -nc_log at c@(c at typ@ *x, c at typ@ *r) -{ - @typ@ l = hypot at c@(x->real,x->imag); - r->imag = atan2 at c@(x->imag, x->real); - r->real = log at c@(l); - return; -} - -static void -nc_log1p at c@(c at typ@ *x, c at typ@ *r) -{ - @typ@ l = hypot at c@(x->real + 1,x->imag); - r->imag = atan2 at c@(x->imag, x->real + 1); - r->real = log at c@(l); - return; -} - -static void -nc_exp at c@(c at typ@ *x, c at typ@ *r) -{ - @typ@ a = exp at c@(x->real); - r->real = a*cos at c@(x->imag); - r->imag = a*sin at c@(x->imag); - return; -} - -static void -nc_expm1 at c@(c at typ@ *x, c at typ@ *r) -{ - @typ@ a = exp at c@(x->real); - r->real = a*cos at c@(x->imag) - 1; - r->imag = a*sin at c@(x->imag); - return; -} - -static void -nc_pow at c@(c at typ@ *a, c at typ@ *b, c at typ@ *r) -{ - intp n; - @typ@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag; - - if (br == 0. && bi == 0.) { - r->real = 1.; - r->imag = 0.; - return; - } - if (ar == 0. && ai == 0.) { - r->real = 0.; - r->imag = 0.; - return; - } - if (bi == 0 && (n=(intp)br) == br) { - if (n > -100 && n < 100) { - c at typ@ p, aa; - intp mask = 1; - if (n < 0) n = -n; - aa = nc_1 at c@; - p.real = ar; p.imag = ai; - while (1) { - if (n & mask) - nc_prod at c@(&aa,&p,&aa); - mask <<= 1; - if (n < mask || mask <= 0) break; - nc_prod at c@(&p,&p,&p); - } - r->real = aa.real; r->imag = aa.imag; - if (br < 0) nc_quot at c@(&nc_1 at c@, r, r); - return; - } - } - /* - * complexobect.c uses an inline version of this formula - * investigate whether this had better performance or accuracy - */ - nc_log at c@(a, r); - nc_prod at c@(r, b, r); - nc_exp at c@(r, r); - return; -} - - -static void -nc_prodi at c@(c at typ@ *x, c at typ@ *r) -{ - @typ@ xr = x->real; - r->real = -x->imag; - r->imag = xr; - return; -} - - -static void -nc_acos at c@(c at typ@ *x, c at typ@ *r) -{ - /* - * return nc_neg(nc_prodi(nc_log(nc_sum(x,nc_prod(nc_i, - * nc_sqrt(nc_diff(nc_1,nc_prod(x,x)))))))); - */ - nc_prod at c@(x,x,r); - nc_diff at c@(&nc_1 at c@, r, r); - nc_sqrt at c@(r, r); - nc_prodi at c@(r, r); - nc_sum at c@(x, r, r); - nc_log at c@(r, r); - nc_prodi at c@(r, r); - nc_neg at c@(r, r); - return; -} - -static void -nc_acosh at c@(c at typ@ *x, c at typ@ *r) -{ - /* - * return nc_log(nc_sum(x, - * nc_prod(nc_sqrt(nc_sum(x,nc_1)), nc_sqrt(nc_diff(x,nc_1))))); - */ - c at typ@ t; - - nc_sum at c@(x, &nc_1 at c@, &t); - nc_sqrt at c@(&t, &t); - nc_diff at c@(x, &nc_1 at c@, r); - nc_sqrt at c@(r, r); - nc_prod at c@(&t, r, r); - nc_sum at c@(x, r, r); - nc_log at c@(r, r); - return; -} - -static void -nc_asin at c@(c at typ@ *x, c at typ@ *r) -{ - /* - * return nc_neg(nc_prodi(nc_log(nc_sum(nc_prod(nc_i,x), - * nc_sqrt(nc_diff(nc_1,nc_prod(x,x))))))); - */ - c at typ@ a, *pa=&a; - nc_prod at c@(x, x, r); - nc_diff at c@(&nc_1 at c@, r, r); - nc_sqrt at c@(r, r); - nc_prodi at c@(x, pa); - nc_sum at c@(pa, r, r); - nc_log at c@(r, r); - nc_prodi at c@(r, r); - nc_neg at c@(r, r); - return; -} - - -static void -nc_asinh at c@(c at typ@ *x, c at typ@ *r) -{ - /* - * return nc_log(nc_sum(nc_sqrt(nc_sum(nc_1,nc_prod(x,x))),x)); - */ - nc_prod at c@(x, x, r); - nc_sum at c@(&nc_1 at c@, r, r); - nc_sqrt at c@(r, r); - nc_sum at c@(r, x, r); - nc_log at c@(r, r); - return; -} - -static void -nc_atan at c@(c at typ@ *x, c at typ@ *r) -{ - /* - * return nc_prod(nc_i2,nc_log(nc_quot(nc_sum(nc_i,x),nc_diff(nc_i,x)))); - */ - c at typ@ a, *pa=&a; - nc_diff at c@(&nc_i at c@, x, pa); - nc_sum at c@(&nc_i at c@, x, r); - nc_quot at c@(r, pa, r); - nc_log at c@(r,r); - nc_prod at c@(&nc_i2 at c@, r, r); - return; -} - -static void -nc_atanh at c@(c at typ@ *x, c at typ@ *r) -{ - /* - * return nc_prod(nc_half,nc_log(nc_quot(nc_sum(nc_1,x),nc_diff(nc_1,x)))); - */ - c at typ@ a, *pa=&a; - nc_diff at c@(&nc_1 at c@, x, r); - nc_sum at c@(&nc_1 at c@, x, pa); - nc_quot at c@(pa, r, r); - nc_log at c@(r, r); - nc_prod at c@(&nc_half at c@, r, r); - return; -} - -static void -nc_cos at c@(c at typ@ *x, c at typ@ *r) -{ - @typ@ xr=x->real, xi=x->imag; - r->real = cos at c@(xr)*cosh at c@(xi); - r->imag = -sin at c@(xr)*sinh at c@(xi); - return; -} - -static void -nc_cosh at c@(c at typ@ *x, c at typ@ *r) -{ - @typ@ xr=x->real, xi=x->imag; - r->real = cos at c@(xi)*cosh at c@(xr); - r->imag = sin at c@(xi)*sinh at c@(xr); - return; -} - - -#define M_LOG10_E 0.434294481903251827651128918916605082294397 - -static void -nc_log10 at c@(c at typ@ *x, c at typ@ *r) -{ - nc_log at c@(x, r); - r->real *= (@typ@) M_LOG10_E; - r->imag *= (@typ@) M_LOG10_E; - return; -} - -static void -nc_sin at c@(c at typ@ *x, c at typ@ *r) -{ - @typ@ xr=x->real, xi=x->imag; - r->real = sin at c@(xr)*cosh at c@(xi); - r->imag = cos at c@(xr)*sinh at c@(xi); - return; -} - -static void -nc_sinh at c@(c at typ@ *x, c at typ@ *r) -{ - @typ@ xr=x->real, xi=x->imag; - r->real = cos at c@(xi)*sinh at c@(xr); - r->imag = sin at c@(xi)*cosh at c@(xr); - return; -} - -static void -nc_tan at c@(c at typ@ *x, c at typ@ *r) -{ - @typ@ sr,cr,shi,chi; - @typ@ rs,is,rc,ic; - @typ@ d; - @typ@ xr=x->real, xi=x->imag; - sr = sin at c@(xr); - cr = cos at c@(xr); - shi = sinh at c@(xi); - chi = cosh at c@(xi); - rs = sr*chi; - is = cr*shi; - rc = cr*chi; - ic = -sr*shi; - d = rc*rc + ic*ic; - r->real = (rs*rc+is*ic)/d; - r->imag = (is*rc-rs*ic)/d; - return; -} - -static void -nc_tanh at c@(c at typ@ *x, c at typ@ *r) -{ - @typ@ si,ci,shr,chr; - @typ@ rs,is,rc,ic; - @typ@ d; - @typ@ xr=x->real, xi=x->imag; - si = sin at c@(xi); - ci = cos at c@(xi); - shr = sinh at c@(xr); - chr = cosh at c@(xr); - rs = ci*shr; - is = si*chr; - rc = ci*chr; - ic = si*shr; - d = rc*rc + ic*ic; - r->real = (rs*rc+is*ic)/d; - r->imag = (is*rc-rs*ic)/d; - return; -} - -/**end repeat**/ - -/* - ***************************************************************************** - ** UFUNC LOOPS ** - ***************************************************************************** - */ - -#define OUTPUT_LOOP\ - char *op1 = args[1];\ - intp os1 = steps[1];\ - intp n = dimensions[0];\ - intp i;\ - for(i = 0; i < n; i++, op1 += os1) - -#define UNARY_LOOP\ - char *ip1 = args[0], *op1 = args[1];\ - intp is1 = steps[0], os1 = steps[1];\ - intp n = dimensions[0];\ - intp i;\ - for(i = 0; i < n; i++, ip1 += is1, op1 += os1) - -#define UNARY_LOOP_TWO_OUT\ - char *ip1 = args[0], *op1 = args[1], *op2 = args[2];\ - intp is1 = steps[0], os1 = steps[1], os2 = steps[2];\ - intp n = dimensions[0];\ - intp i;\ - for(i = 0; i < n; i++, ip1 += is1, op1 += os1, op2 += os2) - -#define BINARY_LOOP\ - char *ip1 = args[0], *ip2 = args[1], *op1 = args[2];\ - intp is1 = steps[0], is2 = steps[1], os1 = steps[2];\ - intp n = dimensions[0];\ - intp i;\ - for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) - -#define BINARY_LOOP_TWO_OUT\ - char *ip1 = args[0], *ip2 = args[1], *op1 = args[2], *op2 = args[3];\ - intp is1 = steps[0], is2 = steps[1], os1 = steps[2], os2 = steps[3];\ - intp n = dimensions[0];\ - intp i;\ - for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1, op2 += os2) - -/****************************************************************************** - ** GENERIC FLOAT LOOPS ** - *****************************************************************************/ - - -typedef float floatUnaryFunc(float x); -typedef double doubleUnaryFunc(double x); -typedef longdouble longdoubleUnaryFunc(longdouble x); -typedef float floatBinaryFunc(float x, float y); -typedef double doubleBinaryFunc(double x, double y); -typedef longdouble longdoubleBinaryFunc(longdouble x, longdouble y); - - -/*UFUNC_API*/ -static void -PyUFunc_f_f(char **args, intp *dimensions, intp *steps, void *func) -{ - floatUnaryFunc *f = (floatUnaryFunc *)func; - UNARY_LOOP { - const float in1 = *(float *)ip1; - *(float *)op1 = f(in1); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_f_f_As_d_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleUnaryFunc *f = (doubleUnaryFunc *)func; - UNARY_LOOP { - const float in1 = *(float *)ip1; - *(float *)op1 = (float)f((double)in1); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_ff_f(char **args, intp *dimensions, intp *steps, void *func) -{ - floatBinaryFunc *f = (floatBinaryFunc *)func; - BINARY_LOOP { - float in1 = *(float *)ip1; - float in2 = *(float *)ip2; - *(float *)op1 = f(in1, in2); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_ff_f_As_dd_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleBinaryFunc *f = (doubleBinaryFunc *)func; - BINARY_LOOP { - float in1 = *(float *)ip1; - float in2 = *(float *)ip2; - *(float *)op1 = (double)f((double)in1, (double)in2); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_d_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleUnaryFunc *f = (doubleUnaryFunc *)func; - UNARY_LOOP { - double in1 = *(double *)ip1; - *(double *)op1 = f(in1); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_dd_d(char **args, intp *dimensions, intp *steps, void *func) -{ - doubleBinaryFunc *f = (doubleBinaryFunc *)func; - BINARY_LOOP { - double in1 = *(double *)ip1; - double in2 = *(double *)ip2; - *(double *)op1 = f(in1, in2); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_g_g(char **args, intp *dimensions, intp *steps, void *func) -{ - longdoubleUnaryFunc *f = (longdoubleUnaryFunc *)func; - UNARY_LOOP { - longdouble in1 = *(longdouble *)ip1; - *(longdouble *)op1 = f(in1); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_gg_g(char **args, intp *dimensions, intp *steps, void *func) -{ - longdoubleBinaryFunc *f = (longdoubleBinaryFunc *)func; - BINARY_LOOP { - longdouble in1 = *(longdouble *)ip1; - longdouble in2 = *(longdouble *)ip2; - *(longdouble *)op1 = f(in1, in2); - } -} - - - -/****************************************************************************** - ** GENERIC COMPLEX LOOPS ** - *****************************************************************************/ - - -typedef void cdoubleUnaryFunc(cdouble *x, cdouble *r); -typedef void cfloatUnaryFunc(cfloat *x, cfloat *r); -typedef void clongdoubleUnaryFunc(clongdouble *x, clongdouble *r); -typedef void cdoubleBinaryFunc(cdouble *x, cdouble *y, cdouble *r); -typedef void cfloatBinaryFunc(cfloat *x, cfloat *y, cfloat *r); -typedef void clongdoubleBinaryFunc(clongdouble *x, clongdouble *y, - clongdouble *r); - -/*UFUNC_API*/ -static void -PyUFunc_F_F(char **args, intp *dimensions, intp *steps, void *func) -{ - cfloatUnaryFunc *f = (cfloatUnaryFunc *)func; - UNARY_LOOP { - cfloat in1 = *(cfloat *)ip1; - cfloat *out = (cfloat *)op1; - f(&in1, out); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_F_F_As_D_D(char **args, intp *dimensions, intp *steps, void *func) -{ - cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; - UNARY_LOOP { - const float *in1 = (float *)ip1; - cdouble tmp = {(double)(in1[0]),(double)in1[1]}; - cdouble out; - f(&tmp, &out); - ((float *)op1)[0] = (float)out.real; - ((float *)op1)[1] = (float)out.imag; - } -} - -/*UFUNC_API*/ -static void -PyUFunc_FF_F(char **args, intp *dimensions, intp *steps, void *func) -{ - cfloatBinaryFunc *f = (cfloatBinaryFunc *)func; - BINARY_LOOP { - cfloat in1 = *(cfloat *)ip1; - cfloat in2 = *(cfloat *)ip2; - cfloat *out = (cfloat *)op1; - f(&in1, &in2, out); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_FF_F_As_DD_D(char **args, intp *dimensions, intp *steps, void *func) -{ - cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; - BINARY_LOOP { - const float *in1 = (float *)ip1; - const float *in2 = (float *)ip2; - cdouble tmp1 = {(double)(in1[0]),(double)in1[1]}; - cdouble tmp2 = {(double)(in2[0]),(double)in2[1]}; - cdouble out; - f(&tmp1, &tmp2, &out); - ((float *)op1)[0] = (float)out.real; - ((float *)op1)[1] = (float)out.imag; - } -} - -/*UFUNC_API*/ -static void -PyUFunc_D_D(char **args, intp *dimensions, intp *steps, void *func) -{ - cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func; - UNARY_LOOP { - cdouble in1 = *(cdouble *)ip1; - cdouble *out = (cdouble *)op1; - f(&in1, out); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_DD_D(char **args, intp *dimensions, intp *steps, void *func) -{ - cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func; - BINARY_LOOP { - cdouble in1 = *(cdouble *)ip1; - cdouble in2 = *(cdouble *)ip2; - cdouble *out = (cdouble *)op1; - f(&in1, &in2, out); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_G_G(char **args, intp *dimensions, intp *steps, void *func) -{ - clongdoubleUnaryFunc *f = (clongdoubleUnaryFunc *)func; - UNARY_LOOP { - clongdouble in1 = *(clongdouble *)ip1; - clongdouble *out = (clongdouble *)op1; - f(&in1, out); - } -} - -/*UFUNC_API*/ -static void -PyUFunc_GG_G(char **args, intp *dimensions, intp *steps, void *func) -{ - clongdoubleBinaryFunc *f = (clongdoubleBinaryFunc *)func; - BINARY_LOOP { - clongdouble in1 = *(clongdouble *)ip1; - clongdouble in2 = *(clongdouble *)ip2; - clongdouble *out = (clongdouble *)op1; - f(&in1, &in2, out); - } -} - - -/****************************************************************************** - ** GENERIC OBJECT lOOPS ** - *****************************************************************************/ - -/*UFUNC_API*/ -static void -PyUFunc_O_O(char **args, intp *dimensions, intp *steps, void *func) -{ - unaryfunc f = (unaryfunc)func; - UNARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject **out = (PyObject **)op1; - PyObject *ret = f(in1); - if ((ret == NULL) || PyErr_Occurred()) { - return; - } - Py_XDECREF(*out); - *out = ret; - } -} - -/*UFUNC_API*/ -static void -PyUFunc_O_O_method(char **args, intp *dimensions, intp *steps, void *func) -{ - char *meth = (char *)func; - UNARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject **out = (PyObject **)op1; - PyObject *ret = PyObject_CallMethod(in1, meth, NULL); - if (ret == NULL) { - return; - } - Py_XDECREF(*out); - *out = ret; - } -} - -/*UFUNC_API*/ -static void -PyUFunc_OO_O(char **args, intp *dimensions, intp *steps, void *func) -{ - binaryfunc f = (binaryfunc)func; - BINARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject *in2 = *(PyObject **)ip2; - PyObject **out = (PyObject **)op1; - PyObject *ret = f(in1, in2); - if (PyErr_Occurred()) { - return; - } - Py_XDECREF(*out); - *out = ret; - } -} - -/*UFUNC_API*/ -static void -PyUFunc_OO_O_method(char **args, intp *dimensions, intp *steps, void *func) -{ - char *meth = (char *)func; - BINARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject *in2 = *(PyObject **)ip2; - PyObject **out = (PyObject **)op1; - PyObject *ret = PyObject_CallMethod(in1, meth, "(O)", in2); - if (ret == NULL) { - return; - } - Py_XDECREF(*out); - *out = ret; - } -} - -/* - * A general-purpose ufunc that deals with general-purpose Python callable. - * func is a structure with nin, nout, and a Python callable function - */ - -/*UFUNC_API*/ -static void -PyUFunc_On_Om(char **args, intp *dimensions, intp *steps, void *func) -{ - intp n = dimensions[0]; - PyUFunc_PyFuncData *data = (PyUFunc_PyFuncData *)func; - int nin = data->nin; - int nout = data->nout; - PyObject *tocall = data->callable; - char *ptrs[NPY_MAXARGS]; - PyObject *arglist, *result; - PyObject *in, **op; - intp i, j, ntot; - - ntot = nin+nout; - - for(j = 0; j < ntot; j++) { - ptrs[j] = args[j]; - } - for(i = 0; i < n; i++) { - arglist = PyTuple_New(nin); - if (arglist == NULL) { - return; - } - for(j = 0; j < nin; j++) { - in = *((PyObject **)ptrs[j]); - if (in == NULL) { - Py_DECREF(arglist); - return; - } - PyTuple_SET_ITEM(arglist, j, in); - Py_INCREF(in); - } - result = PyEval_CallObject(tocall, arglist); - Py_DECREF(arglist); - if (result == NULL) { - return; - } - if PyTuple_Check(result) { - if (nout != PyTuple_Size(result)) { - Py_DECREF(result); - return; - } - for(j = 0; j < nout; j++) { - op = (PyObject **)ptrs[j+nin]; - Py_XDECREF(*op); - *op = PyTuple_GET_ITEM(result, j); - Py_INCREF(*op); - } - Py_DECREF(result); - } - else { - op = (PyObject **)ptrs[nin]; - Py_XDECREF(*op); - *op = result; - } - for(j = 0; j < ntot; j++) { - ptrs[j] += steps[j]; - } - } -} - -/* - ***************************************************************************** - ** BOOLEAN LOOPS ** - ***************************************************************************** - */ - -#define BOOL_invert BOOL_logical_not -#define BOOL_negative BOOL_logical_not -#define BOOL_add BOOL_logical_or -#define BOOL_bitwise_and BOOL_logical_and -#define BOOL_bitwise_or BOOL_logical_or -#define BOOL_bitwise_xor BOOL_logical_xor -#define BOOL_multiply BOOL_logical_and -#define BOOL_subtract BOOL_logical_xor -#define BOOL_fmax BOOL_maximum -#define BOOL_fmin BOOL_minimum - -/**begin repeat - * #kind = equal, not_equal, greater, greater_equal, less, less_equal, - * logical_and, logical_or# - * #OP = ==, !=, >, >=, <, <=, &&, ||# - **/ - -static void -BOOL_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - Bool in1 = *((Bool *)ip1) != 0; - Bool in2 = *((Bool *)ip2) != 0; - *((Bool *)op1)= in1 @OP@ in2; - } -} -/**end repeat**/ - -static void -BOOL_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - Bool in1 = *((Bool *)ip1) != 0; - Bool in2 = *((Bool *)ip2) != 0; - *((Bool *)op1)= (in1 && !in2) || (!in1 && in2); - } -} - -/**begin repeat - * #kind = maximum, minimum# - * #OP = >, <# - **/ -static void -BOOL_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - Bool in1 = *((Bool *)ip1) != 0; - Bool in2 = *((Bool *)ip2) != 0; - *((Bool *)op1) = (in1 @OP@ in2) ? in1 : in2; - } -} -/**end repeat**/ - -/**begin repeat - * #kind = absolute, logical_not# - * #OP = !=, ==# - **/ -static void -BOOL_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - Bool in1 = *(Bool *)ip1; - *((Bool *)op1) = in1 @OP@ 0; - } -} -/**end repeat**/ - -static void -BOOL_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - OUTPUT_LOOP { - *((Bool *)op1) = 1; - } -} - - -/* - ***************************************************************************** - ** INTEGER LOOPS - ***************************************************************************** - */ - -/**begin repeat - * #type = byte, short, int, long, longlong# - * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG# - * #ftype = float, float, double, double, double# - */ - -/**begin repeat1 - * both signed and unsigned integer types - * #s = , u# - * #S = , U# - */ - -#define @S@@TYPE at _floor_divide @S@@TYPE at _divide -#define @S@@TYPE at _fmax @S@@TYPE at _maximum -#define @S@@TYPE at _fmin @S@@TYPE at _minimum - -static void - at S@@TYPE at _ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - OUTPUT_LOOP { - *((@s@@type@ *)op1) = 1; - } -} - -static void - at S@@TYPE at _square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((@s@@type@ *)op1) = in1*in1; - } -} - -static void - at S@@TYPE at _reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((@s@@type@ *)op1) = (@s@@type@)(1.0/in1); - } -} - -static void - at S@@TYPE at _conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((@s@@type@ *)op1) = in1; - } -} - -static void - at S@@TYPE at _negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((@s@@type@ *)op1) = (@s@@type@)(-(@type@)in1); - } -} - -static void - at S@@TYPE at _logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((Bool *)op1) = !in1; - } -} - -static void - at S@@TYPE at _invert(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - *((@s@@type@ *)op1) = ~in1; - } -} - -/**begin repeat2 - * Arithmetic - * #kind = add, subtract, multiply, bitwise_and, bitwise_or, bitwise_xor, - * left_shift, right_shift# - * #OP = +, -,*, &, |, ^, <<, >># - */ -static void - at S@@TYPE at _@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - const @s@@type@ in2 = *(@s@@type@ *)ip2; - *((@s@@type@ *)op1) = in1 @OP@ in2; - } -} -/**end repeat2**/ - -/**begin repeat2 - * #kind = equal, not_equal, greater, greater_equal, less, less_equal, - * logical_and, logical_or# - * #OP = ==, !=, >, >=, <, <=, &&, ||# - */ -static void - at S@@TYPE at _@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - const @s@@type@ in2 = *(@s@@type@ *)ip2; - *((Bool *)op1) = in1 @OP@ in2; - } -} -/**end repeat2**/ - -static void - at S@@TYPE at _logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - const @s@@type@ in2 = *(@s@@type@ *)ip2; - *((Bool *)op1)= (in1 && !in2) || (!in1 && in2); - } -} - -/**begin repeat2 - * #kind = maximum, minimum# - * #OP = >, <# - **/ -static void - at S@@TYPE at _@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - const @s@@type@ in2 = *(@s@@type@ *)ip2; - *((@s@@type@ *)op1) = (in1 @OP@ in2) ? in1 : in2; - } -} -/**end repeat2**/ - -static void - at S@@TYPE at _true_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - const @s@@type@ in2 = *(@s@@type@ *)ip2; - if (in2 == 0) { - generate_divbyzero_error(); - *((@ftype@ *)op1) = 0; - } - else { - *((@ftype@ *)op1) = (@ftype@)in1 / (@ftype@)in2; - } - } -} - -static void - at S@@TYPE at _power(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @ftype@ in1 = (@ftype@)*(@s@@type@ *)ip1; - const @ftype@ in2 = (@ftype@)*(@s@@type@ *)ip2; - *((@s@@type@ *)op1) = (@s@@type@) pow(in1, in2); - } -} - -static void - at S@@TYPE at _fmod(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @s@@type@ in1 = *(@s@@type@ *)ip1; - const @s@@type@ in2 = *(@s@@type@ *)ip2; - if (in2 == 0) { - generate_divbyzero_error(); - *((@s@@type@ *)op1) = 0; - } - else { - *((@s@@type@ *)op1)= in1 % in2; - } - - } -} - -/**end repeat1**/ - -static void -U at TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const u at type@ in1 = *(u at type@ *)ip1; - *((u at type@ *)op1) = in1; - } -} - -static void - at TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = (in1 >= 0) ? in1 : -in1; - } -} - -static void -U at TYPE@_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const u at type@ in1 = *(u at type@ *)ip1; - *((u at type@ *)op1) = in1 > 0 ? 1 : 0; - } -} - -static void - at TYPE@_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = in1 > 0 ? 1 : (in1 < 0 ? -1 : 0); - } -} - -static void - at TYPE@_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - if (in2 == 0) { - generate_divbyzero_error(); - *((@type@ *)op1) = 0; - } - else if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) { - *((@type@ *)op1) = in1/in2 - 1; - } - else { - *((@type@ *)op1) = in1/in2; - } - } -} - -static void -U at TYPE@_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const u at type@ in1 = *(u at type@ *)ip1; - const u at type@ in2 = *(u at type@ *)ip2; - if (in2 == 0) { - generate_divbyzero_error(); - *((u at type@ *)op1) = 0; - } - else { - *((u at type@ *)op1)= in1/in2; - } - } -} - -static void - at TYPE@_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - if (in2 == 0) { - generate_divbyzero_error(); - *((@type@ *)op1) = 0; - } - else { - /* handle mixed case the way Python does */ - const @type@ rem = in1 % in2; - if ((in1 > 0) == (in2 > 0) || rem == 0) { - *((@type@ *)op1) = rem; - } - else { - *((@type@ *)op1) = rem + in2; - } - } - } -} - -static void -U at TYPE@_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const u at type@ in1 = *(u at type@ *)ip1; - const u at type@ in2 = *(u at type@ *)ip2; - if (in2 == 0) { - generate_divbyzero_error(); - *((@type@ *)op1) = 0; - } - else { - *((@type@ *)op1) = in1 % in2; - } - } -} - -/**end repeat**/ - -/* - ***************************************************************************** - ** FLOAT LOOPS ** - ***************************************************************************** - */ - - -/**begin repeat - * Float types - * #type = float, double, longdouble# - * #TYPE = FLOAT, DOUBLE, LONGDOUBLE# - * #c = f, , l# - * #C = F, , L# - */ - - -/**begin repeat1 - * Arithmetic - * # kind = add, subtract, multiply, divide# - * # OP = +, -, *, /# - */ -static void - at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = in1 @OP@ in2; - } -} -/**end repeat1**/ - -/**begin repeat1 - * #kind = equal, not_equal, less, less_equal, greater, greater_equal, - * logical_and, logical_or# - * #OP = ==, !=, <, <=, >, >=, &&, ||# - */ -static void - at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((Bool *)op1) = in1 @OP@ in2; - } -} -/**end repeat1**/ - -static void - at TYPE@_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((Bool *)op1)= (in1 && !in2) || (!in1 && in2); - } -} - -static void - at TYPE@_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((Bool *)op1) = !in1; - } -} - -/**begin repeat1 - * #kind = isnan, isinf, isfinite, signbit# - * #func = isnan, isinf, isfinite, signbit# - **/ -static void - at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((Bool *)op1) = @func@(in1) != 0; - } -} -/**end repeat1**/ - -/**begin repeat1 - * #kind = maximum, minimum# - * #OP = >=, <=# - **/ -static void - at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - /* */ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = (in1 @OP@ in2 || isnan(in1)) ? in1 : in2; - } -} -/**end repeat1**/ - -/**begin repeat1 - * #kind = fmax, fmin# - * #OP = >=, <=# - **/ -static void - at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *func) -{ - /* */ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = (in1 @OP@ in2 || isnan(in2)) ? in1 : in2; - } -} -/**end repeat1**/ - -static void - at TYPE@_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = floor at c@(in1/in2); - } -} - -static void - at TYPE@_remainder(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ in2 = *(@type@ *)ip2; - const @type@ res = fmod at c@(in1,in2); - if (res && ((in2 < 0) != (res < 0))) { - *((@type@ *)op1) = res + in2; - } - else { - *((@type@ *)op1) = res; - } - } -} - -static void - at TYPE@_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = in1*in1; - } -} - -static void - at TYPE@_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = 1/in1; - } -} - -static void - at TYPE@_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - OUTPUT_LOOP { - *((@type@ *)op1) = 1; - } -} - -static void - at TYPE@_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = in1; - } -} - -static void - at TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const @type@ tmp = in1 > 0 ? in1 : -in1; - /* add 0 to clear -0.0 */ - *((@type@ *)op1) = tmp + 0; - } -} - -static void - at TYPE@_negative(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = -in1; - } -} - -static void - at TYPE@_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - /* Sign of nan is currently 0 */ - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = in1 > 0 ? 1 : (in1 < 0 ? -1 : 0); - } -} - -static void - at TYPE@_modf(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP_TWO_OUT { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = modf at c@(in1, (@type@ *)op2); - } -} - -#ifdef HAVE_FREXP at C@ -static void - at TYPE@_frexp(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP_TWO_OUT { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = frexp at c@(in1, (int *)op2); - } -} -#endif - -#ifdef HAVE_LDEXP at C@ -static void - at TYPE@_ldexp(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - const int in2 = *(int *)ip2; - *((@type@ *)op1) = ldexp at c@(in1, in2); - } -} -#endif - -#define @TYPE at _true_divide @TYPE at _divide - -/**end repeat**/ - - -/* - ***************************************************************************** - ** COMPLEX LOOPS ** - ***************************************************************************** - */ - -#define CGE(xr,xi,yr,yi) (xr > yr || (xr == yr && xi >= yi)) -#define CLE(xr,xi,yr,yi) (xr < yr || (xr == yr && xi <= yi)) -#define CGT(xr,xi,yr,yi) (xr > yr || (xr == yr && xi > yi)) -#define CLT(xr,xi,yr,yi) (xr < yr || (xr == yr && xi < yi)) -#define CEQ(xr,xi,yr,yi) (xr == yr && xi == yi) -#define CNE(xr,xi,yr,yi) (xr != yr || xi != yi) - -/**begin repeat - * complex types - * #type = float, double, longdouble# - * #TYPE = FLOAT, DOUBLE, LONGDOUBLE# - * #c = f, , l# - */ - -/**begin repeat1 - * arithmetic - * #kind = add, subtract# - * #OP = +, -# - */ -static void -C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - ((@type@ *)op1)[0] = in1r @OP@ in2r; - ((@type@ *)op1)[1] = in1i @OP@ in2i; - } -} -/**end repeat1**/ - -static void -C at TYPE@_multiply(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - ((@type@ *)op1)[0] = in1r*in2r - in1i*in2i; - ((@type@ *)op1)[1] = in1r*in2i + in1i*in2r; - } -} - -static void -C at TYPE@_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - @type@ d = in2r*in2r + in2i*in2i; - ((@type@ *)op1)[0] = (in1r*in2r + in1i*in2i)/d; - ((@type@ *)op1)[1] = (in1i*in2r - in1r*in2i)/d; - } -} - -static void -C at TYPE@_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - @type@ d = in2r*in2r + in2i*in2i; - ((@type@ *)op1)[0] = floor at c@((in1r*in2r + in1i*in2i)/d); - ((@type@ *)op1)[1] = 0; - } -} - -/**begin repeat1 - * #kind= greater, greater_equal, less, less_equal, equal, not_equal# - * #OP = CGT, CGE, CLT, CLE, CEQ, CNE# - */ -static void -C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - *((Bool *)op1) = @OP@(in1r,in1i,in2r,in2i); - } -} -/**end repeat1**/ - -/**begin repeat1 - #kind = logical_and, logical_or# - #OP1 = ||, ||# - #OP2 = &&, ||# -*/ -static void -C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - *((Bool *)op1) = (in1r @OP1@ in1i) @OP2@ (in2r @OP1@ in2i); - } -} -/**end repeat1**/ - -static void -C at TYPE@_logical_xor(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - const Bool tmp1 = (in1r || in1i); - const Bool tmp2 = (in2r || in2i); - *((Bool *)op1) = (tmp1 && !tmp2) || (!tmp1 && tmp2); - } -} - -static void -C at TYPE@_logical_not(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - *((Bool *)op1) = !(in1r || in1i); - } -} - -/**begin repeat1 - * #kind = isnan, isinf, isfinite# - * #func = isnan, isinf, isfinite# - * #OP = ||, ||, &&# - **/ -static void -C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - *((Bool *)op1) = @func@(in1r) @OP@ @func@(in1i); - } -} -/**end repeat1**/ - -static void -C at TYPE@_square(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - ((@type@ *)op1)[0] = in1r*in1r - in1i*in1i; - ((@type@ *)op1)[1] = in1r*in1i + in1i*in1r; - } -} - -static void -C at TYPE@_reciprocal(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - UNARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - if (fabs at c@(in1i) <= fabs at c@(in1r)) { - const @type@ r = in1i/in1r; - const @type@ d = in1r + in1i*r; - ((@type@ *)op1)[0] = 1/d; - ((@type@ *)op1)[1] = -r/d; - } else { - const @type@ r = in1r/in1i; - const @type@ d = in1r*r + in1i; - ((@type@ *)op1)[0] = r/d; - ((@type@ *)op1)[1] = -1/d; - } - } -} - -static void -C at TYPE@_ones_like(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(data)) -{ - OUTPUT_LOOP { - ((@type@ *)op1)[0] = 1; - ((@type@ *)op1)[1] = 0; - } -} - -static void -C at TYPE@_conjugate(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { - UNARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - ((@type@ *)op1)[0] = in1r; - ((@type@ *)op1)[1] = -in1i; - } -} - -static void -C at TYPE@_absolute(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - UNARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - *((@type@ *)op1) = sqrt at c@(in1r*in1r + in1i*in1i); - } -} - -static void -C at TYPE@_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - /* fixme: sign of nan is currently 0 */ - UNARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - ((@type@ *)op1)[0] = CGT(in1r, in1i, 0, 0) ? 1 : - (CLT(in1r, in1i, 0, 0) ? -1 : 0); - ((@type@ *)op1)[1] = 0; - } -} - -/**begin repeat1 - * #kind = maximum, minimum# - * #OP = CGE, CLE# - */ -static void -C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - if (@OP@(in1r, in1i, in2r, in2i) || isnan(in1r) || isnan(in1i)) { - ((@type@ *)op1)[0] = in1r; - ((@type@ *)op1)[1] = in1i; - } - else { - ((@type@ *)op1)[0] = in2r; - ((@type@ *)op1)[1] = in2i; - } - } -} -/**end repeat1**/ - -/**begin repeat1 - * #kind = fmax, fmin# - * #OP = CGE, CLE# - */ -static void -C at TYPE@_ at kind@(char **args, intp *dimensions, intp *steps, void *func) -{ - BINARY_LOOP { - const @type@ in1r = ((@type@ *)ip1)[0]; - const @type@ in1i = ((@type@ *)ip1)[1]; - const @type@ in2r = ((@type@ *)ip2)[0]; - const @type@ in2i = ((@type@ *)ip2)[1]; - if (@OP@(in1r, in1i, in2r, in2i) || isnan(in2r) || isnan(in2i)) { - ((@type@ *)op1)[0] = in1r; - ((@type@ *)op1)[1] = in1i; - } - else { - ((@type@ *)op1)[0] = in2r; - ((@type@ *)op1)[1] = in2i; - } - } -} -/**end repeat1**/ - -#define C at TYPE@_true_divide C at TYPE@_divide - -/**end repeat**/ - -#undef CGE -#undef CLE -#undef CGT -#undef CLT -#undef CEQ -#undef CNE - -/* - ***************************************************************************** - ** OBJECT LOOPS ** - ***************************************************************************** - */ - -/**begin repeat - * #kind = equal, not_equal, greater, greater_equal, less, less_equal# - * #OP = EQ, NE, GT, GE, LT, LE# - */ -static void -OBJECT_ at kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) { - BINARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject *in2 = *(PyObject **)ip2; - int ret = PyObject_RichCompareBool(in1, in2, Py_ at OP@); - if (ret == -1) { - return; - } - *((Bool *)op1) = (Bool)ret; - } -} -/**end repeat**/ - -static void -OBJECT_sign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) -{ - PyObject *zero = PyInt_FromLong(0); - UNARY_LOOP { - PyObject *in1 = *(PyObject **)ip1; - PyObject **out = (PyObject **)op1; - PyObject *ret = PyInt_FromLong(PyObject_Compare(in1, zero)); - if (PyErr_Occurred()) { - return; - } - Py_XDECREF(*out); - *out = ret; - } - Py_DECREF(zero); -} - -/* - ***************************************************************************** - ** END LOOPS ** - ***************************************************************************** - */ - - -/* - ***************************************************************************** ** SETUP UFUNCS ** ***************************************************************************** */ -#include "__umath_generated.c" -#include "umath_ufunc_object.inc" -#include "__ufunc_api.c" +/* Less automated additions to the ufuncs */ static PyUFuncGenericFunction frexp_functions[] = { #ifdef HAVE_FREXPF @@ -1983,41 +79,6 @@ #endif }; - -static double -pinf_init(void) -{ - double mul = 1e10; - double tmp = 0.0; - double pinf; - - pinf = mul; - for (;;) { - pinf *= mul; - if (pinf == tmp) break; - tmp = pinf; - } - return pinf; -} - -static double -pzero_init(void) -{ - double div = 1e10; - double tmp = 0.0; - double pinf; - - pinf = div; - for (;;) { - pinf /= div; - if (pinf == tmp) break; - tmp = pinf; - } - return pinf; -} - -/* Less automated additions to the ufuncs */ - static void InitOtherOperators(PyObject *dictionary) { PyObject *f; @@ -2052,6 +113,42 @@ return; } +/* Setup +inf and +0 */ + +static double +pinf_init(void) +{ + double mul = 1e10; + double tmp = 0.0; + double pinf; + + pinf = mul; + for (;;) { + pinf *= mul; + if (pinf == tmp) break; + tmp = pinf; + } + return pinf; +} + +static double +pzero_init(void) +{ + double div = 1e10; + double tmp = 0.0; + double pinf; + + pinf = div; + for (;;) { + pinf /= div; + if (pinf == tmp) break; + tmp = pinf; + } + return pinf; +} + +/* Setup the umath module */ + static struct PyMethodDef methods[] = { {"frompyfunc", (PyCFunction) ufunc_frompyfunc, METH_VARARGS | METH_KEYWORDS, doc_frompyfunc}, From numpy-svn at scipy.org Sat Nov 22 01:32:50 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 22 Nov 2008 00:32:50 -0600 (CST) Subject: [Numpy-svn] r6091 - trunk/numpy/core/src Message-ID: <20081122063250.DEBA539C088@scipy.org> Author: charris Date: 2008-11-22 00:32:40 -0600 (Sat, 22 Nov 2008) New Revision: 6091 Modified: trunk/numpy/core/src/umathmodule.c.src Log: Change order of includes to see if it matters on SPARC Etch buildbot. Modified: trunk/numpy/core/src/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2008-11-22 04:25:21 UTC (rev 6090) +++ trunk/numpy/core/src/umathmodule.c.src 2008-11-22 06:32:40 UTC (rev 6091) @@ -9,13 +9,13 @@ ** INCLUDES ** ***************************************************************************** */ -#define _UMATHMODULE /* needed in one of the numpy include files */ -#include #include "Python.h" #include "numpy/noprefix.h" +#define _UMATHMODULE /* needed in one of the numpy include files */ #include "numpy/ufuncobject.h" #include "abstract.h" #include "config.h" +#include /* ***************************************************************************** From numpy-svn at scipy.org Sat Nov 22 02:54:53 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 22 Nov 2008 01:54:53 -0600 (CST) Subject: [Numpy-svn] r6092 - trunk/numpy/core/src Message-ID: <20081122075453.3ECE539C088@scipy.org> Author: charris Date: 2008-11-22 01:54:48 -0600 (Sat, 22 Nov 2008) New Revision: 6092 Modified: trunk/numpy/core/src/umathmodule.c.src Log: Add some cautionary comments. Modified: trunk/numpy/core/src/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2008-11-22 06:32:40 UTC (rev 6091) +++ trunk/numpy/core/src/umathmodule.c.src 2008-11-22 07:54:48 UTC (rev 6092) @@ -9,12 +9,24 @@ ** INCLUDES ** ***************************************************************************** */ + +/* + * _UMATHMODULE IS needed in __ufunc_api.h, included from numpy/ufuncobject.h. + * This is a mess and it would be nice to fix it. It has nothing to do with + * __ufunc_api.c + */ +#define _UMATHMODULE + #include "Python.h" #include "numpy/noprefix.h" -#define _UMATHMODULE /* needed in one of the numpy include files */ #include "numpy/ufuncobject.h" #include "abstract.h" #include "config.h" + +/* + * Looks like some versions of Python.h do naughty things, so math.h needs + * to come after. + */ #include /* @@ -113,7 +125,7 @@ return; } -/* Setup +inf and +0 */ +/* Setup +inf and +0 */ static double pinf_init(void) @@ -148,6 +160,7 @@ } /* Setup the umath module */ +static PyTypeObject PyUFunc_Type; static struct PyMethodDef methods[] = { {"frompyfunc", (PyCFunction) ufunc_frompyfunc, From numpy-svn at scipy.org Sat Nov 22 03:01:34 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 22 Nov 2008 02:01:34 -0600 (CST) Subject: [Numpy-svn] r6093 - trunk/numpy/core/src Message-ID: <20081122080134.D889839C088@scipy.org> Author: charris Date: 2008-11-22 02:01:26 -0600 (Sat, 22 Nov 2008) New Revision: 6093 Modified: trunk/numpy/core/src/umathmodule.c.src Log: Comment out redundant variable declaration for time being. Modified: trunk/numpy/core/src/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2008-11-22 07:54:48 UTC (rev 6092) +++ trunk/numpy/core/src/umathmodule.c.src 2008-11-22 08:01:26 UTC (rev 6093) @@ -160,7 +160,8 @@ } /* Setup the umath module */ -static PyTypeObject PyUFunc_Type; +/* Remove for time being, it is declared in __ufunc_api.h */ +/*static PyTypeObject PyUFunc_Type;*/ static struct PyMethodDef methods[] = { {"frompyfunc", (PyCFunction) ufunc_frompyfunc, From numpy-svn at scipy.org Sat Nov 22 19:04:31 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 22 Nov 2008 18:04:31 -0600 (CST) Subject: [Numpy-svn] r6094 - in trunk/numpy/ma: . tests Message-ID: <20081123000431.5A8C839C088@scipy.org> Author: pierregm Date: 2008-11-22 18:04:29 -0600 (Sat, 22 Nov 2008) New Revision: 6094 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/tests/test_core.py Log: Added mod to the ufuncs Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-11-22 08:01:26 UTC (rev 6093) +++ trunk/numpy/ma/core.py 2008-11-23 00:04:29 UTC (rev 6094) @@ -46,7 +46,7 @@ 'masked_object','masked_outside', 'masked_print_option', 'masked_singleton','masked_values', 'masked_where', 'max', 'maximum', 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', - 'multiply', + 'mod', 'multiply', 'negative', 'nomask', 'nonzero', 'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'product', 'ptp', 'put', 'putmask', @@ -768,6 +768,7 @@ remainder = _DomainedBinaryOperation(umath.remainder, _DomainSafeDivide(), 0, 1) fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) +mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1) #####-------------------------------------------------------------------------- Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2008-11-22 08:01:26 UTC (rev 6093) +++ trunk/numpy/ma/tests/test_core.py 2008-11-23 00:04:29 UTC (rev 6094) @@ -752,9 +752,19 @@ assert_equal(np.sum(x,1), sum(x,1)) assert_equal(np.product(x,1), product(x,1)) + def test_mod(self): + "Tests mod" + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_equal(mod(x, y), mod(xm, ym)) + test = mod(ym, xm) + assert_equal(test, np.mod(ym, xm)) + assert_equal(test.mask, mask_or(xm.mask, ym.mask)) + test = mod(xm, ym) + assert_equal(test, np.mod(xm, ym)) + assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) + - def test_TakeTransposeInnerOuter(self): "Test of take, transpose, inner, outer products" x = arange(24) From numpy-svn at scipy.org Sat Nov 22 19:13:51 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 22 Nov 2008 18:13:51 -0600 (CST) Subject: [Numpy-svn] r6095 - branches/1.2.x/numpy/ma Message-ID: <20081123001351.EA80839C088@scipy.org> Author: pierregm Date: 2008-11-22 18:13:48 -0600 (Sat, 22 Nov 2008) New Revision: 6095 Modified: branches/1.2.x/numpy/ma/core.py Log: Added mod to the ufuncs Modified: branches/1.2.x/numpy/ma/core.py =================================================================== --- branches/1.2.x/numpy/ma/core.py 2008-11-23 00:04:29 UTC (rev 6094) +++ branches/1.2.x/numpy/ma/core.py 2008-11-23 00:13:48 UTC (rev 6095) @@ -45,7 +45,7 @@ 'masked_object','masked_outside', 'masked_print_option', 'masked_singleton','masked_values', 'masked_where', 'max', 'maximum', 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', - 'multiply', + 'mod', 'multiply', 'negative', 'nomask', 'nonzero', 'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'product', 'ptp', 'put', 'putmask', @@ -291,9 +291,12 @@ #####-------------------------------------------------------------------------- def filled(a, fill_value = None): - """Return a as an array with masked data replaced by value. If - value is None, get_fill_value(a) is used instead. If a is already - a ndarray, a itself is returned. + """ + Return `a` as an array where masked data have been replaced by `value`. + + If `a` is not a MaskedArray, `a` itself is returned. + If `a` is a MaskedArray and `fill_value` is None, `fill_value` is set to + `a.fill_value`. Parameters ---------- @@ -764,6 +767,7 @@ remainder = _DomainedBinaryOperation(umath.remainder, _DomainSafeDivide(), 0, 1) fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) +mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1) #####-------------------------------------------------------------------------- @@ -875,6 +879,7 @@ else: return result + def make_mask_none(newshape, dtype=None): """ Return a mask of shape s, filled with False. @@ -884,7 +889,8 @@ news : tuple A tuple indicating the shape of the final mask. dtype: {None, dtype}, optional - A dtype. + If None, use MaskType. Otherwise, use a new datatype with the same fields + as `dtype` with boolean type. """ if dtype is None: From numpy-svn at scipy.org Sun Nov 23 05:39:23 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 23 Nov 2008 04:39:23 -0600 (CST) Subject: [Numpy-svn] r6096 - numpy-docs/trunk trunk/doc Message-ID: <20081123103923.E82AB39C088@scipy.org> Author: ptvirtan Date: 2008-11-23 04:39:05 -0600 (Sun, 23 Nov 2008) New Revision: 6096 Added: trunk/doc/Makefile trunk/doc/README.txt trunk/doc/postprocess.py trunk/doc/source/ trunk/doc/summarize.py Removed: numpy-docs/trunk/Makefile numpy-docs/trunk/README.txt numpy-docs/trunk/postprocess.py numpy-docs/trunk/source/ numpy-docs/trunk/summarize.py Log: Moved numpy-docs under doc/ in the main Numpy trunk. Deleted: numpy-docs/trunk/Makefile =================================================================== --- numpy-docs/trunk/Makefile 2008-11-23 00:13:48 UTC (rev 6095) +++ numpy-docs/trunk/Makefile 2008-11-23 10:39:05 UTC (rev 6096) @@ -1,98 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = LANG=C sphinx-build -PAPER = - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source - -.PHONY: help clean html web pickle htmlhelp latex changes linkcheck - -help: - @echo "Please use \`make ' where is one of" - @echo " dist to make a distribution-ready tree" - @echo " html to make standalone HTML files" - @echo " pickle to make pickle files (usable by e.g. sphinx-web)" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " changes to make an overview over all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - -clean: - -rm -rf build/* source/reference/generated - -dist: html - test -d build/latex || make latex - make -C build/latex all-pdf - -rm -rf build/dist - cp -r build/html build/dist - perl -pi -e 's#^\s*(
  • NumPy.*?Manual.*?»
  • )#
  • Numpy and Scipy Documentation »
  • #;' build/dist/*.html build/dist/*/*.html build/dist/*/*/*.html - cd build/html && zip -9r ../dist/numpy-html.zip . - cp build/latex/*.pdf build/dist - cd build/dist && tar czf ../dist.tar.gz * - -generate: build/generate-stamp -build/generate-stamp: $(wildcard source/reference/*.rst) ext - mkdir -p build - ./ext/autosummary_generate.py source/reference/*.rst \ - -p dump.xml -o source/reference/generated - touch build/generate-stamp - -ext: - svn co http://sphinx.googlecode.com/svn/contrib/trunk/numpyext ext - -html: generate - mkdir -p build/html build/doctrees - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html - python postprocess.py html build/html/*.html - @echo - @echo "Build finished. The HTML pages are in build/html." - -pickle: generate - mkdir -p build/pickle build/doctrees - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle - @echo - @echo "Build finished; now you can process the pickle files or run" - @echo " sphinx-web build/pickle" - @echo "to start the sphinx-web server." - -web: pickle - -htmlhelp: generate - mkdir -p build/htmlhelp build/doctrees - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in build/htmlhelp." - -latex: generate - mkdir -p build/latex build/doctrees - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex - python postprocess.py tex build/latex/*.tex - @echo - @echo "Build finished; the LaTeX files are in build/latex." - @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ - "run these through (pdf)latex." - -coverage: build - mkdir -p build/coverage build/doctrees - $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) build/coverage - @echo "Coverage finished; see c.txt and python.txt in build/coverage" - -changes: generate - mkdir -p build/changes build/doctrees - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes - @echo - @echo "The overview file is in build/changes." - -linkcheck: generate - mkdir -p build/linkcheck build/doctrees - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in build/linkcheck/output.txt." Deleted: numpy-docs/trunk/README.txt =================================================================== --- numpy-docs/trunk/README.txt 2008-11-23 00:13:48 UTC (rev 6095) +++ numpy-docs/trunk/README.txt 2008-11-23 10:39:05 UTC (rev 6096) @@ -1,40 +0,0 @@ -NumPy Reference Guide -===================== - -Instructions ------------- -1. Optionally download an XML dump of the newest docstrings from the doc wiki - at ``/pydocweb/dump`` and save it as ``dump.xml``. -2. Run ``make html`` or ``make dist`` - -You can also run ``summarize.py`` to see which parts of the Numpy -namespace are documented. - - -TODO ----- - -* Numberless [*] footnotes cause LaTeX errors. - -* ``See also`` sections are still somehow broken even if some work. - The problem is that Sphinx searches like this:: - - 'name' - 'active_module.name' - 'active_module.active_class.name'. - - Whereas, we would like to have this: - - 'name' - 'active_module.name' - 'parent_of_active_module.name' - 'parent_of_parent_of_active_module.name' - ... - 'numpy.name' - - We can get one step upwards by always using 'numpy' as the active module. - It seems difficult to beat Sphinx to do what we want. - Do we need to change our docstring standard slightly, ie. allow only - leaving the 'numpy.' prefix away? - -* Link resolution doesn't work as intended... eg. `doc.ufunc`_ Deleted: numpy-docs/trunk/postprocess.py =================================================================== --- numpy-docs/trunk/postprocess.py 2008-11-23 00:13:48 UTC (rev 6095) +++ numpy-docs/trunk/postprocess.py 2008-11-23 10:39:05 UTC (rev 6096) @@ -1,59 +0,0 @@ -#!/usr/bin/env python -""" -%prog MODE FILES... - -Post-processes HTML and Latex files output by Sphinx. -MODE is either 'html' or 'tex'. - -""" -import re, optparse - -def main(): - p = optparse.OptionParser(__doc__) - options, args = p.parse_args() - - if len(args) < 1: - p.error('no mode given') - - mode = args.pop(0) - - if mode not in ('html', 'tex'): - p.error('unknown mode %s' % mode) - - for fn in args: - f = open(fn, 'r') - try: - if mode == 'html': - lines = process_html(fn, f.readlines()) - elif mode == 'tex': - lines = process_tex(f.readlines()) - finally: - f.close() - - f = open(fn, 'w') - f.write("".join(lines)) - f.close() - -def process_html(fn, lines): - return lines - -def process_tex(lines): - """ - Remove unnecessary section titles from the LaTeX file. - - """ - new_lines = [] - for line in lines: - if (line.startswith(r'\section{numpy.') - or line.startswith(r'\subsection{numpy.') - or line.startswith(r'\subsubsection{numpy.') - or line.startswith(r'\paragraph{numpy.') - or line.startswith(r'\subparagraph{numpy.') - ): - pass # skip! - else: - new_lines.append(line) - return new_lines - -if __name__ == "__main__": - main() Deleted: numpy-docs/trunk/summarize.py =================================================================== --- numpy-docs/trunk/summarize.py 2008-11-23 00:13:48 UTC (rev 6095) +++ numpy-docs/trunk/summarize.py 2008-11-23 10:39:05 UTC (rev 6096) @@ -1,167 +0,0 @@ -#!/usr/bin/env python -""" -summarize.py - -Show a summary about which Numpy functions are documented and which are not. - -""" - -import os, glob, re, sys, inspect, optparse -sys.path.append(os.path.join(os.path.dirname(__file__), 'ext')) -from ext.phantom_import import import_phantom_module - -from ext.autosummary_generate import get_documented - -CUR_DIR = os.path.dirname(__file__) -SOURCE_DIR = os.path.join(CUR_DIR, 'source', 'reference') - -SKIP_LIST = """ -# --- aliases: -alltrue sometrue bitwise_not cumproduct -row_stack column_stack product rank - -# -- skipped: -core lib f2py dual doc emath ma rec char distutils oldnumeric numarray -testing version matlib - -add_docstring add_newdoc add_newdocs fastCopyAndTranspose pkgload -conjugate disp - -int0 object0 unicode0 uint0 string_ string0 void0 - -flagsobj - -setup setupscons PackageLoader - -lib.scimath.arccos lib.scimath.arcsin lib.scimath.arccosh lib.scimath.arcsinh -lib.scimath.arctanh lib.scimath.log lib.scimath.log2 lib.scimath.log10 -lib.scimath.logn lib.scimath.power lib.scimath.sqrt - -# --- numpy.random: -random random.info random.mtrand random.ranf random.sample random.random - -# --- numpy.fft: -fft fft.Tester fft.bench fft.fftpack fft.fftpack_lite fft.helper -fft.refft fft.refft2 fft.refftn fft.irefft fft.irefft2 fft.irefftn -fft.info fft.test - -# --- numpy.linalg: -linalg linalg.Tester -linalg.bench linalg.info linalg.lapack_lite linalg.linalg linalg.test - -# --- numpy.ctypeslib: -ctypeslib ctypeslib.test - -""".split() - -def main(): - p = optparse.OptionParser(__doc__) - options, args = p.parse_args() - - if len(args) != 0: - p.error('Wrong number of arguments') - - # prepare - fn = os.path.join(CUR_DIR, 'dump.xml') - if os.path.isfile(fn): - import_phantom_module(fn) - - # check - documented, undocumented = check_numpy() - - # report - in_sections = {} - for name, locations in documented.iteritems(): - for (filename, section, keyword, toctree) in locations: - in_sections.setdefault((filename, section, keyword), []).append(name) - - print "Documented" - print "==========\n" - - last_filename = None - for (filename, section, keyword), names in sorted(in_sections.items()): - if filename != last_filename: - print "--- %s\n" % filename - last_filename = filename - print " ** ", section - print format_in_columns(sorted(names)) - print "\n" - - print "" - print "Undocumented" - print "============\n" - print format_in_columns(sorted(undocumented.keys())) - -def check_numpy(): - documented = get_documented(glob.glob(SOURCE_DIR + '/*.rst')) - undocumented = {} - - import numpy, numpy.fft, numpy.linalg, numpy.random - for mod in [numpy, numpy.fft, numpy.linalg, numpy.random, - numpy.ctypeslib, numpy.emath, numpy.ma]: - undocumented.update(get_undocumented(documented, mod, skip=SKIP_LIST)) - - for d in (documented, undocumented): - for k in d.keys(): - if k.startswith('numpy.'): - d[k[6:]] = d[k] - del d[k] - - return documented, undocumented - -def get_undocumented(documented, module, module_name=None, skip=[]): - """ - Find out which items in Numpy are not documented. - - Returns - ------- - undocumented : dict of bool - Dictionary containing True for each documented item name - and False for each undocumented one. - - """ - undocumented = {} - - if module_name is None: - module_name = module.__name__ - - for name in dir(module): - obj = getattr(module, name) - if name.startswith('_'): continue - - full_name = '.'.join([module_name, name]) - - if full_name in skip: continue - if full_name.startswith('numpy.') and full_name[6:] in skip: continue - if not (inspect.ismodule(obj) or callable(obj) or inspect.isclass(obj)): - continue - - if full_name not in documented: - undocumented[full_name] = True - - return undocumented - -def format_in_columns(lst): - """ - Format a list containing strings to a string containing the items - in columns. - """ - lst = map(str, lst) - col_len = max(map(len, lst)) + 2 - ncols = 80//col_len - if ncols == 0: - ncols = 1 - - if len(lst) % ncols == 0: - nrows = len(lst)//ncols - else: - nrows = 1 + len(lst)//ncols - - fmt = ' %%-%ds ' % (col_len-2) - - lines = [] - for n in range(nrows): - lines.append("".join([fmt % x for x in lst[n::nrows]])) - return "\n".join(lines) - -if __name__ == "__main__": main() Copied: trunk/doc/Makefile (from rev 6095, numpy-docs/trunk/Makefile) Copied: trunk/doc/README.txt (from rev 6095, numpy-docs/trunk/README.txt) Copied: trunk/doc/postprocess.py (from rev 6095, numpy-docs/trunk/postprocess.py) Copied: trunk/doc/source (from rev 6095, numpy-docs/trunk/source) Copied: trunk/doc/summarize.py (from rev 6095, numpy-docs/trunk/summarize.py) From numpy-svn at scipy.org Sun Nov 23 05:41:39 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 23 Nov 2008 04:41:39 -0600 (CST) Subject: [Numpy-svn] r6097 - / Message-ID: <20081123104139.DC5CF39C088@scipy.org> Author: ptvirtan Date: 2008-11-23 04:41:31 -0600 (Sun, 23 Nov 2008) New Revision: 6097 Removed: numpy-docs/ Log: Remove numpy-docs, now that it lives under numpy trunk. From numpy-svn at scipy.org Sun Nov 23 05:46:26 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 23 Nov 2008 04:46:26 -0600 (CST) Subject: [Numpy-svn] r6098 - trunk/doc Message-ID: <20081123104626.BD21939C088@scipy.org> Author: ptvirtan Date: 2008-11-23 04:46:17 -0600 (Sun, 23 Nov 2008) New Revision: 6098 Modified: trunk/doc/ Log: Set svn:ignore properly under doc/ Property changes on: trunk/doc ___________________________________________________________________ Name: svn:ignore + *.pyc build From numpy-svn at scipy.org Sun Nov 23 19:55:15 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 23 Nov 2008 18:55:15 -0600 (CST) Subject: [Numpy-svn] r6099 - trunk/numpy/core/code_generators Message-ID: <20081124005515.157B639C088@scipy.org> Author: charris Date: 2008-11-23 18:55:10 -0600 (Sun, 23 Nov 2008) New Revision: 6099 Modified: trunk/numpy/core/code_generators/genapi.py Log: Fix missing generic loop declarations. Modified: trunk/numpy/core/code_generators/genapi.py =================================================================== --- trunk/numpy/core/code_generators/genapi.py 2008-11-23 10:46:17 UTC (rev 6098) +++ trunk/numpy/core/code_generators/genapi.py 2008-11-24 00:55:10 UTC (rev 6099) @@ -18,8 +18,7 @@ 'multiarraymodule.c', 'scalartypes.inc.src', 'umath_ufunc_object.inc', - 'umath_funcs.inc.src', - 'umathmodule.c.src' + 'umath_loops.inc.src' ] THIS_DIR = os.path.dirname(__file__) API_FILES = [os.path.join(THIS_DIR, '..', 'src', a) for a in API_FILES] From numpy-svn at scipy.org Mon Nov 24 14:12:10 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 24 Nov 2008 13:12:10 -0600 (CST) Subject: [Numpy-svn] r6100 - in trunk/numpy/core: . tests Message-ID: <20081124191210.6502B39C0F1@scipy.org> Author: pierregm Date: 2008-11-24 13:12:05 -0600 (Mon, 24 Nov 2008) New Revision: 6100 Modified: trunk/numpy/core/defmatrix.py trunk/numpy/core/tests/test_defmatrix.py Log: * added ddof to np.matrix.var (bug fix #861) Modified: trunk/numpy/core/defmatrix.py =================================================================== --- trunk/numpy/core/defmatrix.py 2008-11-24 00:55:10 UTC (rev 6099) +++ trunk/numpy/core/defmatrix.py 2008-11-24 19:12:05 UTC (rev 6100) @@ -480,7 +480,7 @@ is taken before squaring, so that the result is always real and nonnegative. """ - return N.ndarray.var(self, axis, dtype, out)._align(axis) + return N.ndarray.var(self, axis, dtype, out, ddof)._align(axis) def prod(self, axis=None, dtype=None, out=None): return N.ndarray.prod(self, axis, dtype, out)._align(axis) Modified: trunk/numpy/core/tests/test_defmatrix.py =================================================================== --- trunk/numpy/core/tests/test_defmatrix.py 2008-11-24 00:55:10 UTC (rev 6099) +++ trunk/numpy/core/tests/test_defmatrix.py 2008-11-24 19:12:05 UTC (rev 6100) @@ -92,6 +92,12 @@ assert all(x.ptp(0) == array([2, 2])) assert all(x.ptp(1) == array([1, 1])) + def test_var(self): + x = np.arange(9).reshape((3,3)) + mx = x.view(np.matrix) + assert_equal(x.var(ddof=0), mx.var(ddof=0)) + assert_equal(x.var(ddof=1), mx.var(ddof=1)) + def test_basic(self): import numpy.linalg as linalg From numpy-svn at scipy.org Tue Nov 25 08:19:56 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 Nov 2008 07:19:56 -0600 (CST) Subject: [Numpy-svn] r6101 - branches Message-ID: <20081125131956.AEA9FC7C00B@scipy.org> Author: cdavid Date: 2008-11-25 07:19:51 -0600 (Tue, 25 Nov 2008) New Revision: 6101 Added: branches/dynamic_cpu_configuration/ Log: Start a branch to add "dynamic" cpu configuration (that is set up when compiled against numpy headers instead of harcoding it at build time). Copied: branches/dynamic_cpu_configuration (from rev 6100, trunk) From numpy-svn at scipy.org Tue Nov 25 08:20:48 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 Nov 2008 07:20:48 -0600 (CST) Subject: [Numpy-svn] r6102 - trunk Message-ID: <20081125132048.C899439C088@scipy.org> Author: cdavid Date: 2008-11-25 07:20:45 -0600 (Tue, 25 Nov 2008) New Revision: 6102 Modified: trunk/ Log: Initialized merge tracking via "svnmerge" with revisions "1-6101" from http://svn.scipy.org/svn/numpy/branches/dynamic_cpu_configuration Property changes on: trunk ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /branches/visualstudio_manifest:1-6077 /trunk:1-2871 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/multicore:1-3687 /branches/visualstudio_manifest:1-6077 /trunk:1-2871 From numpy-svn at scipy.org Tue Nov 25 08:23:14 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 Nov 2008 07:23:14 -0600 (CST) Subject: [Numpy-svn] r6103 - branches/dynamic_cpu_configuration Message-ID: <20081125132314.2A8B139C088@scipy.org> Author: cdavid Date: 2008-11-25 07:23:11 -0600 (Tue, 25 Nov 2008) New Revision: 6103 Modified: branches/dynamic_cpu_configuration/ Log: Initialized merge tracking via "svnmerge" with revisions "1-6100" from http://svn.scipy.org/svn/numpy/trunk Property changes on: branches/dynamic_cpu_configuration ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /branches/visualstudio_manifest:1-6077 /trunk:1-2871 + /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /branches/visualstudio_manifest:1-6077 /trunk:1-6100 From numpy-svn at scipy.org Tue Nov 25 08:56:53 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 Nov 2008 07:56:53 -0600 (CST) Subject: [Numpy-svn] r6104 - branches/dynamic_cpu_configuration/numpy/core/include/numpy Message-ID: <20081125135653.F1E8D39C088@scipy.org> Author: cdavid Date: 2008-11-25 07:56:49 -0600 (Tue, 25 Nov 2008) New Revision: 6104 Added: branches/dynamic_cpu_configuration/numpy/core/include/numpy/cpuarch.h Log: Add a (public) header to detect common CPU archs. Added: branches/dynamic_cpu_configuration/numpy/core/include/numpy/cpuarch.h =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/include/numpy/cpuarch.h 2008-11-25 13:23:11 UTC (rev 6103) +++ branches/dynamic_cpu_configuration/numpy/core/include/numpy/cpuarch.h 2008-11-25 13:56:49 UTC (rev 6104) @@ -0,0 +1,32 @@ +/* + * This set (target) cpu specific macros: + * - NPY_TARGET_CPU: target CPU type + */ +#ifndef _NPY_CPUARCH_H_ +#define _NPY_CPUARCH_H_ + +#if defined ( _i386_ ) || defined( __i386__ ) + /* __i386__ is defined by gcc and Intel compiler on Linux, _i386_ by + VS compiler */ + #define NPY_TARGET_CPU NPY_X86 +#elif defined(__x86_64__) || defined(__amd64__) + /* both __x86_64__ and __amd64__ are defined by gcc */ + #define NPY_TARGET_CPU NPY_AMD64 +#elif defined(__ppc__) || defined(__powerpc__) + /* __ppc__ is defined by gcc, I remember having seen __powerpc__ once, + * but can't find it ATM */ + #define NPY_TARGET_CPU NPY_PPC +#elif defined(__sparc__) || defined(__sparc) + /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */ + #define NPY_TARGET_CPU NPY_SPARC +#elif defined(__s390__) + #define NPY_TARGET_CPU NPY_S390 +#elif defined(__parisc__) + /* XXX: Not sure about this one... */ + #define NPY_TARGET_CPU NPY_PA_RISC +#else + #error Unknown CPU, please report this to numpy maintainers with \ + information about your platform +#endif + +#endif From numpy-svn at scipy.org Tue Nov 25 08:57:14 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 Nov 2008 07:57:14 -0600 (CST) Subject: [Numpy-svn] r6105 - branches/dynamic_cpu_configuration/numpy/core/include/numpy Message-ID: <20081125135714.E98D339C088@scipy.org> Author: cdavid Date: 2008-11-25 07:57:06 -0600 (Tue, 25 Nov 2008) New Revision: 6105 Modified: branches/dynamic_cpu_configuration/numpy/core/include/numpy/cpuarch.h Log: Document the possible values for NPY_TARGET_CPU macro. Modified: branches/dynamic_cpu_configuration/numpy/core/include/numpy/cpuarch.h =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/include/numpy/cpuarch.h 2008-11-25 13:56:49 UTC (rev 6104) +++ branches/dynamic_cpu_configuration/numpy/core/include/numpy/cpuarch.h 2008-11-25 13:57:06 UTC (rev 6105) @@ -1,6 +1,12 @@ /* * This set (target) cpu specific macros: - * - NPY_TARGET_CPU: target CPU type + * - NPY_TARGET_CPU: target CPU type. Possible values: + * NPY_X86 + * NPY_AMD64 + * NPY_PPC + * NPY_SPARC + * NPY_S390 + * NPY_PA_RISC */ #ifndef _NPY_CPUARCH_H_ #define _NPY_CPUARCH_H_ From numpy-svn at scipy.org Tue Nov 25 08:57:35 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 Nov 2008 07:57:35 -0600 (CST) Subject: [Numpy-svn] r6106 - branches/dynamic_cpu_configuration/numpy/core/include/numpy Message-ID: <20081125135735.EC0A539C088@scipy.org> Author: cdavid Date: 2008-11-25 07:57:30 -0600 (Tue, 25 Nov 2008) New Revision: 6106 Added: branches/dynamic_cpu_configuration/numpy/core/include/numpy/npy_endian.h Log: Add a (public) header to set cpu endianness when numpy headers are included instead of setting them at build time. Added: branches/dynamic_cpu_configuration/numpy/core/include/numpy/npy_endian.h =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/include/numpy/npy_endian.h 2008-11-25 13:57:06 UTC (rev 6105) +++ branches/dynamic_cpu_configuration/numpy/core/include/numpy/npy_endian.h 2008-11-25 13:57:30 UTC (rev 6106) @@ -0,0 +1,32 @@ +#ifndef _NPY_ENDIAN_H_ +#define _NPY_ENDIAN_H_ + +/* NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in + * endian.h */ + +#ifdef NPY_HAVE_ENDIAN_H + /* Use endian.h if available */ + #include + #define NPY_BYTE_ODER __BYTE_ORDER + #if (__BYTE_ORDER == __LITTLE_ENDIAN) + #define NPY_LITTLE_ENDIAN + #elif (__BYTE_ORDER == __BIG_ENDIAN) + #define NPY_BYTE_ODER __BYTE_ORDER + #else + #error Unknown machine endianness detected. + #endif +#else + /* Set endianness info using target CPU */ + #include "cpuarch.h" + + #if defined(NPY_X86) || defined(NPY_AMD64) + #define NPY_LITTLE_ENDIAN + #define NPY_BYTE_ORDER 1234 + #elif defined(NPY_PPC) || defined(NPY_SPARC) || defined(NPY_S390) || \ + defined(NPY_PA_RISC) + #define NPY_BIG_ENDIAN + #define NPY_BYTE_ORDER 4321 + #endif +#endif + +#endif From numpy-svn at scipy.org Tue Nov 25 08:57:57 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 Nov 2008 07:57:57 -0600 (CST) Subject: [Numpy-svn] r6107 - branches/dynamic_cpu_configuration/numpy/core/include/numpy Message-ID: <20081125135757.4F2ED39C088@scipy.org> Author: cdavid Date: 2008-11-25 07:57:49 -0600 (Tue, 25 Nov 2008) New Revision: 6107 Modified: branches/dynamic_cpu_configuration/numpy/core/include/numpy/ndarrayobject.h Log: Use npy_endian.h to detect endianness of CPU. We avoid using WORDS_ENDIAN (as set by python headers themselves) because its value is set at python build time, and hence does not work for situations like fat builds on Mac OS X: if the fat binary is built on PPC, WORDS_ENDIAN is defined to 1 in python headers, even on Intel. Instead, we use npy_endian.h, which is not set at numpy build time, but set everytime npy_endian.h is included (using cpu-specific macro). Modified: branches/dynamic_cpu_configuration/numpy/core/include/numpy/ndarrayobject.h =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/include/numpy/ndarrayobject.h 2008-11-25 13:57:30 UTC (rev 6106) +++ branches/dynamic_cpu_configuration/numpy/core/include/numpy/ndarrayobject.h 2008-11-25 13:57:49 UTC (rev 6107) @@ -16,6 +16,8 @@ /* This is auto-generated by the installer */ #include "numpyconfig.h" +#include "npy_endian.h" + /* Only use thread if configured in config and python supports it */ #if defined WITH_THREAD && !NPY_NO_SMP #define NPY_ALLOW_THREADS 1 @@ -1793,7 +1795,7 @@ #define NPY_SWAP 's' #define NPY_IGNORE '|' -#ifdef WORDS_BIGENDIAN +#ifdef NPY_BIG_ENDIAN #define NPY_NATBYTE NPY_BIG #define NPY_OPPBYTE NPY_LITTLE #else From numpy-svn at scipy.org Tue Nov 25 21:14:00 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 Nov 2008 20:14:00 -0600 (CST) Subject: [Numpy-svn] r6108 - in trunk/numpy/ma: . tests Message-ID: <20081126021400.4272939C088@scipy.org> Author: pierregm Date: 2008-11-25 20:13:57 -0600 (Tue, 25 Nov 2008) New Revision: 6108 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/tests/test_core.py Log: * added ma.diag * added copy, cumprod, cumsum, harden_mask, prod, round, soften_mask, squeeze to the namespace * TEMPORARILY fixed a pb of compatibility with python 2.6 (involvingin(np.nan)) Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-11-25 13:57:49 UTC (rev 6107) +++ trunk/numpy/ma/core.py 2008-11-26 02:13:57 UTC (rev 6108) @@ -28,13 +28,14 @@ 'array', 'asarray','asanyarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'ceil', 'choose', 'clip', 'common_fill_value', 'compress', - 'compressed', 'concatenate', 'conjugate', 'cos', 'cosh', 'count', - 'default_fill_value', 'diagonal', 'divide', 'dump', 'dumps', + 'compressed', 'concatenate', 'conjugate', 'copy', 'cos', 'cosh', + 'count', 'cumprod', 'cumsum', + 'default_fill_value', 'diag', 'diagonal', 'divide', 'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp', 'fabs', 'fmod', 'filled', 'floor', 'floor_divide','fix_invalid', 'frombuffer', 'fromfunction', 'getdata','getmask', 'getmaskarray', 'greater', 'greater_equal', - 'hypot', + 'harden_mask', 'hypot', 'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log10', @@ -49,12 +50,13 @@ 'mod', 'multiply', 'negative', 'nomask', 'nonzero', 'not_equal', 'ones', 'outer', 'outerproduct', - 'power', 'product', 'ptp', 'put', 'putmask', + 'power', 'prod', 'product', 'ptp', 'put', 'putmask', 'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize', - 'right_shift', 'round_', - 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue', 'sort', - 'sqrt', 'std', 'subtract', 'sum', 'swapaxes', - 'take', 'tan', 'tanh', 'transpose', 'true_divide', + 'right_shift', 'round_', 'round', + 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue', + 'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', + 'swapaxes', + 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', 'var', 'where', 'zeros'] @@ -2668,8 +2670,8 @@ have the same shape and buffer length as the expected output but the type will be cast if necessary. - Warning - ------- + Warnings + -------- The mask is lost if out is not a valid :class:`MaskedArray` ! Returns @@ -2678,8 +2680,8 @@ A new array holding the result is returned unless ``out`` is specified, in which case a reference to ``out`` is returned. - Example - ------- + Examples + -------- >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) >>> print marr.cumsum() [0 1 3 -- -- -- 9 16 24 33] @@ -3189,7 +3191,11 @@ outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask else: - np.putmask(out, newmask, np.nan) + if out.dtype < np.dtype(float): + filler = -9999 + else: + filler = np.nan + np.putmask(out, newmask, filler) return out def mini(self, axis=None): @@ -3251,7 +3257,11 @@ outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask else: - np.putmask(out, newmask, np.nan) + if out.dtype < np.dtype(float): + filler = -9999 + else: + filler = np.nan + np.putmask(out, newmask, filler) return out def ptp(self, axis=None, out=None, fill_value=None): @@ -3657,25 +3667,32 @@ all = _frommethod('all') anomalies = anom = _frommethod('anom') any = _frommethod('any') +compress = _frommethod('compress') conjugate = _frommethod('conjugate') +cumprod = _frommethod('cumprod') +cumsum = _frommethod('cumsum') +copy = _frommethod('copy') +diagonal = _frommethod('diagonal') +harden_mask = _frommethod('harden_mask') ids = _frommethod('ids') -nonzero = _frommethod('nonzero') -diagonal = _frommethod('diagonal') maximum = _maximum_operation() mean = _frommethod('mean') minimum = _minimum_operation () +nonzero = _frommethod('nonzero') +prod = _frommethod('prod') product = _frommethod('prod') ptp = _frommethod('ptp') ravel = _frommethod('ravel') repeat = _frommethod('repeat') round = _frommethod('round') +shrink_mask = _frommethod('shrink_mask') +soften_mask = _frommethod('soften_mask') std = _frommethod('std') sum = _frommethod('sum') swapaxes = _frommethod('swapaxes') take = _frommethod('take') trace = _frommethod('trace') var = _frommethod('var') -compress = _frommethod('compress') #.............................................................................. def power(a, b, third=None): @@ -3825,6 +3842,40 @@ count.__doc__ = MaskedArray.count.__doc__ +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + Parameters + ---------- + v : array_like + If `v` is a 2-dimensional array, return a copy of + its `k`-th diagonal. If `v` is a 1-dimensional array, + return a 2-dimensional array with `v` on the `k`-th diagonal. + k : int, optional + Diagonal in question. The defaults is 0. + + Examples + -------- + >>> x = np.arange(9).reshape((3,3)) + >>> x + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.diag(x) + array([0, 4, 8]) + >>> np.diag(np.diag(x)) + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 8]]) + + """ + output = np.diag(v, k).view(MaskedArray) + if getmask(v) is not nomask: + output._mask = np.diag(v._mask, k) + return output + + def expand_dims(x, axis): """ Expand the shape of the array by including a new axis before @@ -4348,5 +4399,6 @@ indices = np.indices ones = _convert2ma('ones') zeros = _convert2ma('zeros') +squeeze = np.squeeze ############################################################################### Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2008-11-25 13:57:49 UTC (rev 6107) +++ trunk/numpy/ma/tests/test_core.py 2008-11-26 02:13:57 UTC (rev 6108) @@ -1952,6 +1952,22 @@ _ = method(out=nout, ddof=1) self.failUnless(np.isnan(nout)) + + def test_diag(self): + "Test diag" + x = arange(9).reshape((3,3)) + x[1,1] = masked + out = np.diag(x) + assert_equal(out, [0, 4, 8]) + out = diag(x) + assert_equal(out, [0, 4, 8]) + assert_equal(out.mask, [0, 1, 0]) + out = diag(out) + control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], + mask = [[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(out, control) + + #------------------------------------------------------------------------------ class TestMaskedArrayMathMethodsComplex(TestCase): From numpy-svn at scipy.org Tue Nov 25 21:17:08 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 Nov 2008 20:17:08 -0600 (CST) Subject: [Numpy-svn] r6109 - in branches/1.2.x/numpy/ma: . tests Message-ID: <20081126021708.4B48F39C088@scipy.org> Author: pierregm Date: 2008-11-25 20:17:06 -0600 (Tue, 25 Nov 2008) New Revision: 6109 Modified: branches/1.2.x/numpy/ma/core.py branches/1.2.x/numpy/ma/tests/test_core.py Log: * added ma.diag * added copy, cumprod, cumsum, harden_mask, prod, round, soften_mask, squeeze to the namespace Modified: branches/1.2.x/numpy/ma/core.py =================================================================== --- branches/1.2.x/numpy/ma/core.py 2008-11-26 02:13:57 UTC (rev 6108) +++ branches/1.2.x/numpy/ma/core.py 2008-11-26 02:17:06 UTC (rev 6109) @@ -27,13 +27,14 @@ 'array', 'asarray','asanyarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'ceil', 'choose', 'clip', 'common_fill_value', 'compress', - 'compressed', 'concatenate', 'conjugate', 'cos', 'cosh', 'count', - 'default_fill_value', 'diagonal', 'divide', 'dump', 'dumps', + 'compressed', 'concatenate', 'conjugate', 'copy', 'cos', 'cosh', + 'count', 'cumprod', 'cumsum', + 'default_fill_value', 'diag', 'diagonal', 'divide', 'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp', 'fabs', 'fmod', 'filled', 'floor', 'floor_divide','fix_invalid', 'frombuffer', 'fromfunction', 'getdata','getmask', 'getmaskarray', 'greater', 'greater_equal', - 'hypot', + 'harden_mask', 'hypot', 'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift', 'less', 'less_equal', 'load', 'loads', 'log', 'log10', @@ -48,12 +49,13 @@ 'mod', 'multiply', 'negative', 'nomask', 'nonzero', 'not_equal', 'ones', 'outer', 'outerproduct', - 'power', 'product', 'ptp', 'put', 'putmask', + 'power', 'prod', 'product', 'ptp', 'put', 'putmask', 'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize', - 'right_shift', 'round_', - 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue', 'sort', - 'sqrt', 'std', 'subtract', 'sum', 'swapaxes', - 'take', 'tan', 'tanh', 'transpose', 'true_divide', + 'right_shift', 'round_', 'round', + 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue', + 'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', + 'swapaxes', + 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', 'var', 'where', 'zeros'] @@ -2667,8 +2669,8 @@ have the same shape and buffer length as the expected output but the type will be cast if necessary. - Warning - ------- + Warnings + -------- The mask is lost if out is not a valid :class:`MaskedArray` ! Returns @@ -2677,8 +2679,8 @@ A new array holding the result is returned unless ``out`` is specified, in which case a reference to ``out`` is returned. - Example - ------- + Examples + -------- >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) >>> print marr.cumsum() [0 1 3 -- -- -- 9 16 24 33] @@ -3656,25 +3658,32 @@ all = _frommethod('all') anomalies = anom = _frommethod('anom') any = _frommethod('any') +compress = _frommethod('compress') conjugate = _frommethod('conjugate') +cumprod = _frommethod('cumprod') +cumsum = _frommethod('cumsum') +copy = _frommethod('copy') +diagonal = _frommethod('diagonal') +harden_mask = _frommethod('harden_mask') ids = _frommethod('ids') -nonzero = _frommethod('nonzero') -diagonal = _frommethod('diagonal') maximum = _maximum_operation() mean = _frommethod('mean') minimum = _minimum_operation () +nonzero = _frommethod('nonzero') +prod = _frommethod('prod') product = _frommethod('prod') ptp = _frommethod('ptp') ravel = _frommethod('ravel') repeat = _frommethod('repeat') round = _frommethod('round') +shrink_mask = _frommethod('shrink_mask') +soften_mask = _frommethod('soften_mask') std = _frommethod('std') sum = _frommethod('sum') swapaxes = _frommethod('swapaxes') take = _frommethod('take') trace = _frommethod('trace') var = _frommethod('var') -compress = _frommethod('compress') #.............................................................................. def power(a, b, third=None): @@ -3824,6 +3833,40 @@ count.__doc__ = MaskedArray.count.__doc__ +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + Parameters + ---------- + v : array_like + If `v` is a 2-dimensional array, return a copy of + its `k`-th diagonal. If `v` is a 1-dimensional array, + return a 2-dimensional array with `v` on the `k`-th diagonal. + k : int, optional + Diagonal in question. The defaults is 0. + + Examples + -------- + >>> x = np.arange(9).reshape((3,3)) + >>> x + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.diag(x) + array([0, 4, 8]) + >>> np.diag(np.diag(x)) + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 8]]) + + """ + output = np.diag(v, k).view(MaskedArray) + if getmask(v) is not nomask: + output._mask = np.diag(v._mask, k) + return output + + def expand_dims(x, axis): """ Expand the shape of the array by including a new axis before @@ -4347,5 +4390,6 @@ indices = np.indices ones = _convert2ma('ones') zeros = _convert2ma('zeros') +squeeze = np.squeeze ############################################################################### Modified: branches/1.2.x/numpy/ma/tests/test_core.py =================================================================== --- branches/1.2.x/numpy/ma/tests/test_core.py 2008-11-26 02:13:57 UTC (rev 6108) +++ branches/1.2.x/numpy/ma/tests/test_core.py 2008-11-26 02:17:06 UTC (rev 6109) @@ -752,9 +752,19 @@ assert_equal(np.sum(x,1), sum(x,1)) assert_equal(np.product(x,1), product(x,1)) + def test_mod(self): + "Tests mod" + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_equal(mod(x, y), mod(xm, ym)) + test = mod(ym, xm) + assert_equal(test, np.mod(ym, xm)) + assert_equal(test.mask, mask_or(xm.mask, ym.mask)) + test = mod(xm, ym) + assert_equal(test, np.mod(xm, ym)) + assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) + - def test_TakeTransposeInnerOuter(self): "Test of take, transpose, inner, outer products" x = arange(24) @@ -1942,6 +1952,22 @@ _ = method(out=nout, ddof=1) self.failUnless(np.isnan(nout)) + + def test_diag(self): + "Test diag" + x = arange(9).reshape((3,3)) + x[1,1] = masked + out = np.diag(x) + assert_equal(out, [0, 4, 8]) + out = diag(x) + assert_equal(out, [0, 4, 8]) + assert_equal(out.mask, [0, 1, 0]) + out = diag(out) + control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], + mask = [[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(out, control) + + #------------------------------------------------------------------------------ class TestMaskedArrayMathMethodsComplex(TestCase): From numpy-svn at scipy.org Wed Nov 26 23:29:50 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 26 Nov 2008 22:29:50 -0600 (CST) Subject: [Numpy-svn] r6110 - trunk/numpy/ma Message-ID: <20081127042950.7E4BA39C088@scipy.org> Author: pierregm Date: 2008-11-26 22:29:43 -0600 (Wed, 26 Nov 2008) New Revision: 6110 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/extras.py Log: * Added get_object_signature to fix missing signatures * Fixed .getdoc from _arraymethod, _frommethod, _convert2ma, _fromnxfunction * Fixed the docstrings of .trace, .mean, .argsort, .sort * Suppressed duplicated conjugate, ptp, round, expand_dims, apply_along_axis, compress_rowcols, mask_rowcols, vander, polyfit Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-11-26 02:17:06 UTC (rev 6109) +++ trunk/numpy/ma/core.py 2008-11-27 04:29:43 UTC (rev 6110) @@ -31,7 +31,7 @@ 'compressed', 'concatenate', 'conjugate', 'copy', 'cos', 'cosh', 'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal', 'divide', 'dump', 'dumps', - 'empty', 'empty_like', 'equal', 'exp', + 'empty', 'empty_like', 'equal', 'exp', 'expand_dims', 'fabs', 'fmod', 'filled', 'floor', 'floor_divide','fix_invalid', 'frombuffer', 'fromfunction', 'getdata','getmask', 'getmaskarray', 'greater', 'greater_equal', @@ -97,6 +97,22 @@ """ return newdoc % (initialdoc, note) +def get_object_signature(obj): + """ + Get the signature from obj + """ + import inspect + try: + sig = inspect.formatargspec(*inspect.getargspec(obj)) + except TypeError, errmsg: + msg = "Unable to retrieve the signature of %s '%s'\n"\ + "(Initial error message: %s)" +# warnings.warn(msg % (type(obj), +# getattr(obj, '__name__', '???'), +# errmsg)) + sig = '' + return sig + #####-------------------------------------------------------------------------- #---- --- Exceptions --- #####-------------------------------------------------------------------------- @@ -1210,8 +1226,8 @@ # def getdoc(self): "Return the doc of the function (from the doc of the method)." - methdoc = getattr(ndarray, self.__name__, None) - methdoc = getattr(np, self.__name__, methdoc) + methdoc = getattr(ndarray, self.__name__, None) or \ + getattr(np, self.__name__, None) if methdoc is not None: return methdoc.__doc__ # @@ -2562,9 +2578,7 @@ def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): """ - Return the sum along the offset diagonal of the array's - indicated `axis1` and `axis2`. - + (this docstring should be overwritten) """ #!!!: implement out + test! m = self._mask @@ -2575,8 +2589,8 @@ else: D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) return D.astype(dtype).filled(0).sum(axis=None, out=out) + trace.__doc__ = ndarray.trace.__doc__ - def sum(self, axis=None, dtype=None, out=None): """ Return the sum of the array elements over the given axis. @@ -2826,7 +2840,14 @@ def mean(self, axis=None, dtype=None, out=None): - "" + """ + Returns the average of the array elements along given axis. + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean : equivalent function' + """ if self._mask is nomask: result = super(MaskedArray, self).mean(axis=axis, dtype=dtype) else: @@ -2842,7 +2863,6 @@ outmask.flat = getattr(result, '_mask', nomask) return out return result - mean.__doc__ = ndarray.mean.__doc__ def anom(self, axis=None, dtype=None): """ @@ -2939,48 +2959,37 @@ #............................................ def argsort(self, axis=None, fill_value=None, kind='quicksort', order=None): - """Return an ndarray of indices that sort the array along the - specified axis. Masked values are filled beforehand to - fill_value. + """ + Return an ndarray of indices that sort the array along the + specified axis. Masked values are filled beforehand to + fill_value. - Parameters - ---------- - axis : int, optional - Axis to be indirectly sorted. - If not given, uses a flatten version of the array. - fill_value : {var} - Value used to fill in the masked values. - If not given, self.fill_value is used instead. - kind : {string} - Sorting algorithm (default 'quicksort') - Possible values: 'quicksort', 'mergesort', or 'heapsort' + Parameters + ---------- + axis : int, optional + Axis along which to sort. If not given, the flattened array is used. + kind : {'quicksort', 'mergesort', 'heapsort'}, optional + Sorting algorithm. + order : list, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. Not all fields need be + specified. + Returns + ------- + index_array : ndarray, int + Array of indices that sort `a` along the specified axis. + In other words, ``a[index_array]`` yields a sorted `a`. + + See Also + -------- + sort : Describes sorting algorithms used. + lexsort : Indirect stable sort with multiple keys. + ndarray.sort : Inplace sort. - Notes - ----- - This method executes an indirect sort along the given axis - using the algorithm specified by the kind keyword. It returns - an array of indices of the same shape as 'a' that index data - along the given axis in sorted order. + Notes + ----- + See `sort` for notes on the different sorting algorithms. - The various sorts are characterized by average speed, worst - case performance need for work space, and whether they are - stable. A stable sort keeps items with the same key in the - same relative order. The three available algorithms have the - following properties: - - |------------------------------------------------------| - | kind | speed | worst case | work space | stable| - |------------------------------------------------------| - |'quicksort'| 1 | O(n^2) | 0 | no | - |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | - |'heapsort' | 3 | O(n*log(n)) | 0 | no | - |------------------------------------------------------| - - All the sort algorithms make temporary copies of the data when - the sort is not along the last axis. Consequently, sorts along - the last axis are faster and use less space than sorts along - other axis. - """ if fill_value is None: fill_value = default_fill_value(self) @@ -3071,19 +3080,21 @@ def sort(self, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None): """ - Sort along the given axis. + Return a sorted copy of an array. Parameters ---------- - axis : {int}, optional - Axis to be indirectly sorted. - kind : {'quicksort', 'mergesort', or 'heapsort'}, optional - Sorting algorithm (default 'quicksort') - Possible values: 'quicksort', 'mergesort', or 'heapsort'. - order : {None, var} - If a has fields defined, then the order keyword can be the field name - to sort on or a list (or tuple) of field names to indicate the order - that fields should be used to define the sort. + a : array_like + Array to be sorted. + axis : int or None, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort'}, optional + Sorting algorithm. Default is 'quicksort'. + order : list, optional + When `a` is a structured array, this argument specifies which fields + to compare first, second, and so on. This list does not need to + include all of the fields. endwith : {True, False}, optional Whether missing values (if any) should be forced in the upper indices (at the end of the array) (True) or lower indices (at the beginning). @@ -3093,30 +3104,68 @@ Returns ------- - - When used as method, returns None. - - When used as a function, returns an array. + sorted_array : ndarray + Array of the same type and shape as `a`. + See Also + -------- + ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + Notes ----- - This method sorts 'a' in place along the given axis using - the algorithm specified by the kind keyword. + The various sorting algorithms are characterized by their average speed, + worst case performance, work space size, and whether they are stable. A + stable sort keeps items with the same key in the same relative + order. The three available algorithms have the following + properties: - The various sorts may characterized by average speed, - worst case performance need for work space, and whether - they are stable. A stable sort keeps items with the same - key in the same relative order and is most useful when - used w/ argsort where the key might differ from the items - being sorted. The three available algorithms have the - following properties: + =========== ======= ============= ============ ======= + kind speed worst case work space stable + =========== ======= ============= ============ ======= + 'quicksort' 1 O(n^2) 0 no + 'mergesort' 2 O(n*log(n)) ~n/2 yes + 'heapsort' 3 O(n*log(n)) 0 no + =========== ======= ============= ============ ======= - |------------------------------------------------------| - | kind | speed | worst case | work space | stable| - |------------------------------------------------------| - |'quicksort'| 1 | O(n^2) | 0 | no | - |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | - |'heapsort' | 3 | O(n*log(n)) | 0 | no | - |------------------------------------------------------| + All the sort algorithms make temporary copies of the data when + sorting along any but the last axis. Consequently, sorting along + the last axis is faster and uses less space than sorting along + any other axis. + Examples + -------- + >>> a = np.array([[1,4],[3,1]]) + >>> np.sort(a) # sort along the last axis + array([[1, 4], + [1, 3]]) + >>> np.sort(a, axis=None) # sort the flattened array + array([1, 1, 3, 4]) + >>> np.sort(a, axis=0) # sort along the first axis + array([[1, 1], + [3, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + + >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] + >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), + ... ('Galahad', 1.7, 38)] + >>> a = np.array(values, dtype=dtype) # create a structured array + >>> np.sort(a, order='height') # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), + ('Lancelot', 1.8999999999999999, 38)], + dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), + ('Arthur', 1.8, 41)], + dtype=[('name', '|S10'), ('height', '>> import warnings - >>> warnings.simplefilter('ignore',np.RankWarning) - - - See Also - -------- - polyval : computes polynomial values. - - Notes - ----- - If X is a the Vandermonde Matrix computed from x (see - http://mathworld.wolfram.com/VandermondeMatrix.html), then the - polynomial least squares solution is given by the 'p' in - - X*p = y - - where X.shape is a matrix of dimensions (len(x), deg + 1), p is a vector of - dimensions (deg + 1, 1), and y is a vector of dimensions (len(x), 1). - - This equation can be solved as - - p = (XT*X)^-1 * XT * y - - where XT is the transpose of X and -1 denotes the inverse. However, this - method is susceptible to rounding errors and generally the singular value - decomposition of the matrix X is preferred and that is what is done here. - The singular value method takes a paramenter, 'rcond', which sets a limit on - the relative size of the smallest singular value to be used in solving the - equation. This may result in lowering the rank of the Vandermonde matrix, in - which case a RankWarning is issued. If polyfit issues a RankWarning, try a - fit of lower degree or replace x by x - x.mean(), both of which will - generally improve the condition number. The routine already normalizes the - vector x by its maximum absolute value to help in this regard. The rcond - parameter can be set to a value smaller than its default, but the resulting - fit may be spurious. The current default value of rcond is len(x)*eps, where - eps is the relative precision of the floating type being used, generally - around 1e-7 and 2e-16 for IEEE single and double precision respectively. - This value of rcond is fairly conservative but works pretty well when x - - x.mean() is used in place of x. - - - DISCLAIMER: Power series fits are full of pitfalls for the unwary once the - degree of the fit becomes large or the interval of sample points is badly - centered. The problem is that the powers x**n are generally a poor basis for - the polynomial functions on the sample interval, resulting in a Vandermonde - matrix is ill conditioned and coefficients sensitive to rounding erros. The - computation of the polynomial values will also sensitive to rounding errors. - Consequently, the quality of the polynomial fit should be checked against - the data whenever the condition number is large. The quality of polynomial - fits *can not* be taken for granted. If all you want to do is draw a smooth - curve through the y values and polyfit is not doing the job, try centering - the sample range or look into scipy.interpolate, which includes some nice - spline fitting functions that may be of use. - - For more info, see - http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html, - but note that the k's and n's in the superscripts and subscripts - on that page. The linear algebra is correct, however. - - - - Notes - ----- - Any masked values in x is propagated in y, and vice-versa. - + Any masked values in x is propagated in y, and vice-versa. """ order = int(deg) + 1 x = asarray(x) @@ -1159,5 +1047,6 @@ return c, resids, rank, s, rcond else : return c +polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__) ################################################################################ From numpy-svn at scipy.org Wed Nov 26 23:29:53 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 26 Nov 2008 22:29:53 -0600 (CST) Subject: [Numpy-svn] r6111 - branches/1.2.x/numpy/ma Message-ID: <20081127042953.D157739C088@scipy.org> Author: pierregm Date: 2008-11-26 22:29:51 -0600 (Wed, 26 Nov 2008) New Revision: 6111 Modified: branches/1.2.x/numpy/ma/core.py branches/1.2.x/numpy/ma/extras.py Log: * Added get_object_signature to fix missing signatures * Fixed .getdoc from _arraymethod, _frommethod, _convert2ma, _fromnxfunction * Fixed the docstrings of .trace, .mean, .argsort, .sort * Suppressed duplicated conjugate, ptp, round, expand_dims, apply_along_axis, compress_rowcols, mask_rowcols, vander, polyfit Modified: branches/1.2.x/numpy/ma/core.py =================================================================== --- branches/1.2.x/numpy/ma/core.py 2008-11-27 04:29:43 UTC (rev 6110) +++ branches/1.2.x/numpy/ma/core.py 2008-11-27 04:29:51 UTC (rev 6111) @@ -30,7 +30,7 @@ 'compressed', 'concatenate', 'conjugate', 'copy', 'cos', 'cosh', 'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal', 'divide', 'dump', 'dumps', - 'empty', 'empty_like', 'equal', 'exp', + 'empty', 'empty_like', 'equal', 'exp', 'expand_dims', 'fabs', 'fmod', 'filled', 'floor', 'floor_divide','fix_invalid', 'frombuffer', 'fromfunction', 'getdata','getmask', 'getmaskarray', 'greater', 'greater_equal', @@ -96,6 +96,22 @@ """ return newdoc % (initialdoc, note) +def get_object_signature(obj): + """ + Get the signature from obj + """ + import inspect + try: + sig = inspect.formatargspec(*inspect.getargspec(obj)) + except TypeError, errmsg: + msg = "Unable to retrieve the signature of %s '%s'\n"\ + "(Initial error message: %s)" +# warnings.warn(msg % (type(obj), +# getattr(obj, '__name__', '???'), +# errmsg)) + sig = '' + return sig + #####-------------------------------------------------------------------------- #---- --- Exceptions --- #####-------------------------------------------------------------------------- @@ -1209,8 +1225,8 @@ # def getdoc(self): "Return the doc of the function (from the doc of the method)." - methdoc = getattr(ndarray, self.__name__, None) - methdoc = getattr(np, self.__name__, methdoc) + methdoc = getattr(ndarray, self.__name__, None) or \ + getattr(np, self.__name__, None) if methdoc is not None: return methdoc.__doc__ # @@ -2561,9 +2577,7 @@ def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): """ - Return the sum along the offset diagonal of the array's - indicated `axis1` and `axis2`. - + (this docstring should be overwritten) """ #!!!: implement out + test! m = self._mask @@ -2574,8 +2588,8 @@ else: D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) return D.astype(dtype).filled(0).sum(axis=None, out=out) + trace.__doc__ = ndarray.trace.__doc__ - def sum(self, axis=None, dtype=None, out=None): """ Return the sum of the array elements over the given axis. @@ -2825,7 +2839,14 @@ def mean(self, axis=None, dtype=None, out=None): - "" + """ + Returns the average of the array elements along given axis. + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean : equivalent function' + """ if self._mask is nomask: result = super(MaskedArray, self).mean(axis=axis, dtype=dtype) else: @@ -2841,7 +2862,6 @@ outmask.flat = getattr(result, '_mask', nomask) return out return result - mean.__doc__ = ndarray.mean.__doc__ def anom(self, axis=None, dtype=None): """ @@ -2938,48 +2958,37 @@ #............................................ def argsort(self, axis=None, fill_value=None, kind='quicksort', order=None): - """Return an ndarray of indices that sort the array along the - specified axis. Masked values are filled beforehand to - fill_value. + """ + Return an ndarray of indices that sort the array along the + specified axis. Masked values are filled beforehand to + fill_value. - Parameters - ---------- - axis : int, optional - Axis to be indirectly sorted. - If not given, uses a flatten version of the array. - fill_value : {var} - Value used to fill in the masked values. - If not given, self.fill_value is used instead. - kind : {string} - Sorting algorithm (default 'quicksort') - Possible values: 'quicksort', 'mergesort', or 'heapsort' + Parameters + ---------- + axis : int, optional + Axis along which to sort. If not given, the flattened array is used. + kind : {'quicksort', 'mergesort', 'heapsort'}, optional + Sorting algorithm. + order : list, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. Not all fields need be + specified. + Returns + ------- + index_array : ndarray, int + Array of indices that sort `a` along the specified axis. + In other words, ``a[index_array]`` yields a sorted `a`. + + See Also + -------- + sort : Describes sorting algorithms used. + lexsort : Indirect stable sort with multiple keys. + ndarray.sort : Inplace sort. - Notes - ----- - This method executes an indirect sort along the given axis - using the algorithm specified by the kind keyword. It returns - an array of indices of the same shape as 'a' that index data - along the given axis in sorted order. + Notes + ----- + See `sort` for notes on the different sorting algorithms. - The various sorts are characterized by average speed, worst - case performance need for work space, and whether they are - stable. A stable sort keeps items with the same key in the - same relative order. The three available algorithms have the - following properties: - - |------------------------------------------------------| - | kind | speed | worst case | work space | stable| - |------------------------------------------------------| - |'quicksort'| 1 | O(n^2) | 0 | no | - |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | - |'heapsort' | 3 | O(n*log(n)) | 0 | no | - |------------------------------------------------------| - - All the sort algorithms make temporary copies of the data when - the sort is not along the last axis. Consequently, sorts along - the last axis are faster and use less space than sorts along - other axis. - """ if fill_value is None: fill_value = default_fill_value(self) @@ -3070,19 +3079,21 @@ def sort(self, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None): """ - Sort along the given axis. + Return a sorted copy of an array. Parameters ---------- - axis : {int}, optional - Axis to be indirectly sorted. - kind : {'quicksort', 'mergesort', or 'heapsort'}, optional - Sorting algorithm (default 'quicksort') - Possible values: 'quicksort', 'mergesort', or 'heapsort'. - order : {None, var} - If a has fields defined, then the order keyword can be the field name - to sort on or a list (or tuple) of field names to indicate the order - that fields should be used to define the sort. + a : array_like + Array to be sorted. + axis : int or None, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort'}, optional + Sorting algorithm. Default is 'quicksort'. + order : list, optional + When `a` is a structured array, this argument specifies which fields + to compare first, second, and so on. This list does not need to + include all of the fields. endwith : {True, False}, optional Whether missing values (if any) should be forced in the upper indices (at the end of the array) (True) or lower indices (at the beginning). @@ -3092,30 +3103,68 @@ Returns ------- - - When used as method, returns None. - - When used as a function, returns an array. + sorted_array : ndarray + Array of the same type and shape as `a`. + See Also + -------- + ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + Notes ----- - This method sorts 'a' in place along the given axis using - the algorithm specified by the kind keyword. + The various sorting algorithms are characterized by their average speed, + worst case performance, work space size, and whether they are stable. A + stable sort keeps items with the same key in the same relative + order. The three available algorithms have the following + properties: - The various sorts may characterized by average speed, - worst case performance need for work space, and whether - they are stable. A stable sort keeps items with the same - key in the same relative order and is most useful when - used w/ argsort where the key might differ from the items - being sorted. The three available algorithms have the - following properties: + =========== ======= ============= ============ ======= + kind speed worst case work space stable + =========== ======= ============= ============ ======= + 'quicksort' 1 O(n^2) 0 no + 'mergesort' 2 O(n*log(n)) ~n/2 yes + 'heapsort' 3 O(n*log(n)) 0 no + =========== ======= ============= ============ ======= - |------------------------------------------------------| - | kind | speed | worst case | work space | stable| - |------------------------------------------------------| - |'quicksort'| 1 | O(n^2) | 0 | no | - |'mergesort'| 2 | O(n*log(n)) | ~n/2 | yes | - |'heapsort' | 3 | O(n*log(n)) | 0 | no | - |------------------------------------------------------| + All the sort algorithms make temporary copies of the data when + sorting along any but the last axis. Consequently, sorting along + the last axis is faster and uses less space than sorting along + any other axis. + Examples + -------- + >>> a = np.array([[1,4],[3,1]]) + >>> np.sort(a) # sort along the last axis + array([[1, 4], + [1, 3]]) + >>> np.sort(a, axis=None) # sort the flattened array + array([1, 1, 3, 4]) + >>> np.sort(a, axis=0) # sort along the first axis + array([[1, 1], + [3, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + + >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] + >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), + ... ('Galahad', 1.7, 38)] + >>> a = np.array(values, dtype=dtype) # create a structured array + >>> np.sort(a, order='height') # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), + ('Lancelot', 1.8999999999999999, 38)], + dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), + ('Arthur', 1.8, 41)], + dtype=[('name', '|S10'), ('height', '>> import warnings - >>> warnings.simplefilter('ignore',np.RankWarning) - - - See Also - -------- - polyval : computes polynomial values. - - Notes - ----- - If X is a the Vandermonde Matrix computed from x (see - http://mathworld.wolfram.com/VandermondeMatrix.html), then the - polynomial least squares solution is given by the 'p' in - - X*p = y - - where X.shape is a matrix of dimensions (len(x), deg + 1), p is a vector of - dimensions (deg + 1, 1), and y is a vector of dimensions (len(x), 1). - - This equation can be solved as - - p = (XT*X)^-1 * XT * y - - where XT is the transpose of X and -1 denotes the inverse. However, this - method is susceptible to rounding errors and generally the singular value - decomposition of the matrix X is preferred and that is what is done here. - The singular value method takes a paramenter, 'rcond', which sets a limit on - the relative size of the smallest singular value to be used in solving the - equation. This may result in lowering the rank of the Vandermonde matrix, in - which case a RankWarning is issued. If polyfit issues a RankWarning, try a - fit of lower degree or replace x by x - x.mean(), both of which will - generally improve the condition number. The routine already normalizes the - vector x by its maximum absolute value to help in this regard. The rcond - parameter can be set to a value smaller than its default, but the resulting - fit may be spurious. The current default value of rcond is len(x)*eps, where - eps is the relative precision of the floating type being used, generally - around 1e-7 and 2e-16 for IEEE single and double precision respectively. - This value of rcond is fairly conservative but works pretty well when x - - x.mean() is used in place of x. - - - DISCLAIMER: Power series fits are full of pitfalls for the unwary once the - degree of the fit becomes large or the interval of sample points is badly - centered. The problem is that the powers x**n are generally a poor basis for - the polynomial functions on the sample interval, resulting in a Vandermonde - matrix is ill conditioned and coefficients sensitive to rounding erros. The - computation of the polynomial values will also sensitive to rounding errors. - Consequently, the quality of the polynomial fit should be checked against - the data whenever the condition number is large. The quality of polynomial - fits *can not* be taken for granted. If all you want to do is draw a smooth - curve through the y values and polyfit is not doing the job, try centering - the sample range or look into scipy.interpolate, which includes some nice - spline fitting functions that may be of use. - - For more info, see - http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html, - but note that the k's and n's in the superscripts and subscripts - on that page. The linear algebra is correct, however. - - - - Notes - ----- - Any masked values in x is propagated in y, and vice-versa. - + Any masked values in x is propagated in y, and vice-versa. """ order = int(deg) + 1 x = asarray(x) @@ -1159,5 +1047,6 @@ return c, resids, rank, s, rcond else : return c +polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__) ################################################################################ From numpy-svn at scipy.org Thu Nov 27 01:56:14 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 27 Nov 2008 00:56:14 -0600 (CST) Subject: [Numpy-svn] r6112 - in trunk: doc/source/reference numpy/ma Message-ID: <20081127065614.3417B39C088@scipy.org> Author: pierregm Date: 2008-11-27 00:56:12 -0600 (Thu, 27 Nov 2008) New Revision: 6112 Modified: trunk/doc/source/reference/routines.ma.rst trunk/numpy/ma/core.py Log: Doc update Modified: trunk/doc/source/reference/routines.ma.rst =================================================================== --- trunk/doc/source/reference/routines.ma.rst 2008-11-27 04:29:51 UTC (rev 6111) +++ trunk/doc/source/reference/routines.ma.rst 2008-11-27 06:56:12 UTC (rev 6112) @@ -5,48 +5,403 @@ .. currentmodule:: numpy + +Constants +========= + +.. autosummary:: + :toctree: generated/ + + ma.masked + ma.nomask + + ma.MaskType + + Creation --------- +======== +From existing data +~~~~~~~~~~~~~~~~~~ + .. autosummary:: :toctree: generated/ ma.masked_array + ma.array + ma.copy + ma.frombuffer + ma.fromfunction -Converting to ndarray ---------------------- + ma.MaskedArray.copy + +Ones and zeros +~~~~~~~~~~~~~~ + .. autosummary:: :toctree: generated/ + + ma.empty + ma.empty_like + ma.masked_all + ma.masked_all_like + ma.ones + ma.zeros - ma.filled - ma.common_fill_value - ma.default_fill_value - ma.masked_array.get_fill_value - ma.maximum_fill_value - ma.minimum_fill_value +_____ + Inspecting the array --------------------- +==================== .. autosummary:: :toctree: generated/ + ma.all + ma.any + ma.count + ma.count_masked ma.getmask ma.getmaskarray ma.getdata - ma.count_masked + ma.nonzero + ma.shape + ma.size + + ma.MaskedArray.data + ma.MaskedArray.mask + ma.MaskedArray.recordmask + + ma.MaskedArray.all + ma.MaskedArray.any + ma.MaskedArray.count + ma.MaskedArray.nonzero + ma.shape + ma.size -Modifying the mask ------------------- +_____ + +Manipulating a MaskedArray +========================== + +Changing the shape +~~~~~~~~~~~~~~~~~~ + .. autosummary:: :toctree: generated/ + + ma.ravel + ma.reshape + ma.resize + ma.MaskedArray.flatten + ma.MaskedArray.ravel + ma.MaskedArray.reshape + ma.MaskedArray.resize + + +Modifying axes +~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.swapaxes + ma.transpose + + ma.MaskedArray.swapaxes + ma.MaskedArray.transpose + + +Changing the number of dimensions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.atleast_1d + ma.atleast_2d + ma.atleast_3d + ma.expand_dims + ma.squeeze + + ma.MaskedArray.squeeze + + ma.column_stack + ma.concatenate + ma.dstack + ma.hstack + ma.hsplit + ma.mr_ + ma.row_stack + ma.vstack + + +Joining arrays +~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.column_stack + ma.concatenate + ma.dstack + ma.hstack + ma.vstack + + +_____ + +Operations on masks +=================== + +Creating a mask +~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + ma.make_mask + ma.make_mask_none + ma.mask_or + ma.make_mask_descr + + +Accessing a mask +~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.getmask + ma.getmaskarray + ma.masked_array.mask + + +Finding masked data +~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.flatnotmasked_contiguous + ma.flatnotmasked_edges + ma.notmasked_contiguous + ma.notmasked_edges + + +Modifying a mask +~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + ma.mask_cols ma.mask_or ma.mask_rowcols ma.mask_rows ma.harden_mask - ma.ids + ma.soften_mask + + ma.MaskedArray.harden_mask + ma.MaskedArray.soften_mask + ma.MaskedArray.shrink_mask + ma.MaskedArray.unshare_mask + + +_____ + +Conversion operations +====================== + +> to a masked array +~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.asarray + ma.asanyarray + ma.fix_invalid + ma.masked_equal + ma.masked_greater + ma.masked_greater_equal + ma.masked_inside + ma.masked_invalid + ma.masked_less + ma.masked_less_equal + ma.masked_not_equal + ma.masked_object + ma.masked_outside + ma.masked_values + ma.masked_where + + +> to a ndarray +~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.compress_cols + ma.compress_rowcols + ma.compress_rows + ma.compressed + ma.filled + + ma.MaskedArray.compressed + ma.MaskedArray.filled + + +> to another object +~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.MaskedArray.tofile + ma.MaskedArray.tolist + ma.MaskedArray.torecords + ma.MaskedArray.tostring + + +Pickling and unpickling +~~~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.dump + ma.dumps + ma.load + ma.loads + + +Filling a masked array +~~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.common_fill_value + ma.default_fill_value + ma.maximum_fill_value + ma.maximum_fill_value + ma.set_fill_value + + ma.MaskedArray.get_fill_value + ma.MaskedArray.set_fill_value + ma.MaskedArray.fill_value + + +_____ + +Masked arrays arithmetics +========================= + +Arithmetics +~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.anom + ma.anomalies + ma.average + ma.conjugate + ma.corrcoef + ma.cov + ma.cumsum + ma.cumprod + ma.mean + ma.median + ma.power + ma.prod + ma.std + ma.sum + ma.var + + ma.MaskedArray.anom + ma.MaskedArray.cumprod + ma.MaskedArray.cumsum + ma.MaskedArray.mean + ma.MaskedArray.prod + ma.MaskedArray.std + ma.MaskedArray.sum + ma.MaskedArray.var + + +Minimum/maximum +~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.argmax + ma.argmin + ma.max + ma.min + ma.ptp + + ma.MaskedArray.argmax + ma.MaskedArray.argmin + ma.MaskedArray.max + ma.MaskedArray.min + ma.MaskedArray.ptp + + +Sorting +~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.argsort + ma.sort + ma.MaskedArray.argsort + ma.MaskedArray.sort + + +Algebra +~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.diag + ma.dot + ma.identity + ma.inner + ma.innerproduct + ma.outer + ma.outerproduct + ma.trace + ma.transpose + + ma.MaskedArray.trace + ma.MaskedArray.transpose + + +Polynomial fit +~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.vander + ma.polyfit + + +Clipping and rounding +~~~~~~~~~~~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.around + ma.clip + ma.round + + ma.MaskedArray.clip + ma.MaskedArray.round + + +Miscellanea +~~~~~~~~~~~ +.. autosummary:: + :toctree: generated/ + + ma.allequal + ma.allclose + ma.apply_along_axis + ma.arange + ma.choose + ma.ediff1d + ma.indices + ma.where + + Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-11-27 04:29:51 UTC (rev 6111) +++ trunk/numpy/ma/core.py 2008-11-27 06:56:12 UTC (rev 6112) @@ -4219,6 +4219,17 @@ round = round_ def inner(a, b): + """ + Returns the inner product of a and b for arrays of floating point types. + + Like the generic NumPy equivalent the product sum is over the last dimension + of a and b. + + Notes + ----- + The first argument is not conjugated. + + """ fa = filled(a, 0) fb = filled(b, 0) if len(fa.shape) == 0: @@ -4269,7 +4280,7 @@ def allclose (a, b, masked_equal=True, rtol=1.e-5, atol=1.e-8, fill_value=None): """ - Returns True if two arrays are element-wise equal within a tolerance. + Returns True if two arrays are element-wise equal within a tolerance. The tolerance values are positive, typically very small numbers. The relative difference (`rtol` * `b`) and the absolute difference (`atol`) From numpy-svn at scipy.org Thu Nov 27 05:58:53 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 27 Nov 2008 04:58:53 -0600 (CST) Subject: [Numpy-svn] r6113 - trunk/doc/release Message-ID: <20081127105853.3734DC7C006@scipy.org> Author: jarrod.millman Date: 2008-11-27 04:58:51 -0600 (Thu, 27 Nov 2008) New Revision: 6113 Added: trunk/doc/release/1.3.0-notes.rst Log: add release notes for 1.3 Added: trunk/doc/release/1.3.0-notes.rst =================================================================== --- trunk/doc/release/1.3.0-notes.rst 2008-11-27 06:56:12 UTC (rev 6112) +++ trunk/doc/release/1.3.0-notes.rst 2008-11-27 10:58:51 UTC (rev 6113) @@ -0,0 +1,41 @@ +========================= +NumPy 1.3.0 Release Notes +========================= + +This minor release comes almost four months after the 1.1.0 release. + +Changes +------- + +Generalized ufuncs +~~~~~~~~~~~~~~~~~~ + +http://projects.scipy.org/scipy/numpy/ticket/887 + +Refactoring numpy.core math configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +http://projects.scipy.org/scipy/numpy/browser/trunk/doc/neps/math_config_clean.txt + +Improvements to build warnings +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +http://projects.scipy.org/scipy/numpy/browser/trunk/doc/neps/warnfix.txt + +Python 2.6 support +~~~~~~~~~~~~~~~~~~ + +http://www.python.org/dev/peps/pep-0361/ + +Histogram +~~~~~~~~~ + +The semantics of histogram has been modified to fix long-standing issues +with outliers handling. The main changes concern + +#. the definition of the bin edges, now including the rightmost edge, and +#. the handling of upper outliers, now ignored rather than tallied in the + rightmost bin. + +The previous behavior is still accessible using `new=False`, but is scheduled +to be deprecated in the next release (1.3). From numpy-svn at scipy.org Thu Nov 27 15:26:17 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 27 Nov 2008 14:26:17 -0600 (CST) Subject: [Numpy-svn] r6114 - in trunk/doc/source: . _templates Message-ID: <20081127202617.E49F939C249@scipy.org> Author: ptvirtan Date: 2008-11-27 14:26:04 -0600 (Thu, 27 Nov 2008) New Revision: 6114 Added: trunk/doc/source/release.rst Modified: trunk/doc/source/_templates/indexcontent.html trunk/doc/source/contents.rst Log: doc: include release notes to Sphinx build Modified: trunk/doc/source/_templates/indexcontent.html =================================================================== --- trunk/doc/source/_templates/indexcontent.html 2008-11-27 10:58:51 UTC (rev 6113) +++ trunk/doc/source/_templates/indexcontent.html 2008-11-27 20:26:04 UTC (rev 6114) @@ -33,6 +33,7 @@ + Modified: trunk/doc/source/contents.rst =================================================================== --- trunk/doc/source/contents.rst 2008-11-27 10:58:51 UTC (rev 6113) +++ trunk/doc/source/contents.rst 2008-11-27 20:26:04 UTC (rev 6114) @@ -6,6 +6,7 @@ user/index reference/index + release about bugs license Added: trunk/doc/source/release.rst =================================================================== --- trunk/doc/source/release.rst 2008-11-27 10:58:51 UTC (rev 6113) +++ trunk/doc/source/release.rst 2008-11-27 20:26:04 UTC (rev 6114) @@ -0,0 +1,5 @@ +************* +Release Notes +************* + +.. include:: ../release/1.3.0-notes.rst From numpy-svn at scipy.org Thu Nov 27 22:52:25 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 27 Nov 2008 21:52:25 -0600 (CST) Subject: [Numpy-svn] r6115 - trunk/numpy/core/src Message-ID: <20081128035225.E02FC39C27D@scipy.org> Author: charris Date: 2008-11-27 21:52:16 -0600 (Thu, 27 Nov 2008) New Revision: 6115 Modified: trunk/numpy/core/src/umath_funcs_c99.inc.src Log: Make numpy version of atanh more robust. Numpy log1p still needs a major overhaul. Modified: trunk/numpy/core/src/umath_funcs_c99.inc.src =================================================================== --- trunk/numpy/core/src/umath_funcs_c99.inc.src 2008-11-27 20:26:04 UTC (rev 6114) +++ trunk/numpy/core/src/umath_funcs_c99.inc.src 2008-11-28 03:52:16 UTC (rev 6115) @@ -181,7 +181,12 @@ static double npy_atanh(double x) { - return 0.5*log1p(2.0*x/(1.0-x)); + if (x > 0) { + return -0.5*log1p(-2.0*x/(1.0 + x)); + } + else { + return 0.5*log1p(2.0*x/(1.0 - x)); + } } #define atanh npy_atanh #else From numpy-svn at scipy.org Fri Nov 28 00:34:41 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 27 Nov 2008 23:34:41 -0600 (CST) Subject: [Numpy-svn] r6116 - in trunk: doc/source/reference numpy/core/code_generators Message-ID: <20081128053441.7727E39C2C6@scipy.org> Author: charris Date: 2008-11-27 23:34:33 -0600 (Thu, 27 Nov 2008) New Revision: 6116 Modified: trunk/doc/source/reference/ufuncs.rst trunk/numpy/core/code_generators/docstrings.py trunk/numpy/core/code_generators/generate_umath.py Log: Add preliminary docstrings for: log2, exp2, logaddexp, logaddexp2, rad2deg, deg2rad. The complete docstring for fmin and fmax are on the web but haven't yet been merged. Modified: trunk/doc/source/reference/ufuncs.rst =================================================================== --- trunk/doc/source/reference/ufuncs.rst 2008-11-28 03:52:16 UTC (rev 6115) +++ trunk/doc/source/reference/ufuncs.rst 2008-11-28 05:34:33 UTC (rev 6116) @@ -77,20 +77,20 @@ with a dimension of length 1 to satisfy property 2. .. admonition:: Example - + If ``a.shape`` is (5,1), ``b.shape`` is (1,6), ``c.shape`` is (6,) and d.shape is ``()`` so that d is a scalar, then *a*, *b*, *c*, and *d* are all broadcastable to dimension (5,6); and - + - *a* acts like a (5,6) array where ``a[:,0]`` is broadcast to the other columns, - + - *b* acts like a (5,6) array where ``b[0,:]`` is broadcast to the other rows, - + - *c* acts like a (1,6) array and therefore like a (5,6) array where ``c[:]` is broadcast to every row, and finally, - + - *d* acts like a (5,6) array where the single value is repeated. @@ -205,8 +205,8 @@ .. admonition:: Figure - Code segment showing the can cast safely table for a 32-bit system. - + Code segment showing the can cast safely table for a 32-bit system. + >>> def print_table(ntypes): ... print 'X', ... for char in ntypes: print char, @@ -245,7 +245,7 @@ You should note that, while included in the table for completeness, the 'S', 'U', and 'V' types cannot be operated on by ufuncs. Also, note that on a 64-bit system the integer types may have different -sizes resulting in a slightly altered table. +sizes resulting in a slightly altered table. Mixed scalar-array operations use a different set of casting rules that ensure that a scalar cannot upcast an array unless the scalar is @@ -264,7 +264,7 @@ -------------------------- All ufuncs take optional keyword arguments. These represent rather -advanced usage and will likely not be used by most users. +advanced usage and will likely not be used by most users. .. index:: pair: ufunc; keyword arguments @@ -296,7 +296,7 @@ ---------- There are some informational attributes that universal functions -possess. None of the attributes can be set. +possess. None of the attributes can be set. .. index:: pair: ufunc; attributes @@ -316,7 +316,7 @@ ufunc.nin ufunc.nout - ufunc.nargs + ufunc.nargs ufunc.ntypes ufunc.types ufunc.identity @@ -386,7 +386,7 @@ .. note:: The ufunc still returns its output(s) even if you use the optional - output argument(s). + output argument(s). Math operations --------------- @@ -398,6 +398,7 @@ multiply divide logaddexp + logaddexp2 true_divide floor_divide negative @@ -410,10 +411,12 @@ sign conj exp + exp2 log + log2 + log10 expm1 log1p - log10 sqrt square reciprocal @@ -433,7 +436,7 @@ Trigonometric functions ----------------------- All trigonometric functions use radians when an angle is called for. -The ratio of degrees to radians is :math:`180^{\circ}/\pi.` +The ratio of degrees to radians is :math:`180^{\circ}/\pi.` .. autosummary:: @@ -458,7 +461,7 @@ ----------------------- These function all need integer arguments and they maniuplate the bit- -pattern of those arguments. +pattern of those arguments. .. autosummary:: @@ -501,7 +504,7 @@ element-by-element array comparisons. Be sure to understand the operator precedence: (a>2) & (a<5) is the proper syntax because a>2 & a<5 will result in an error due to the fact that 2 & a is evaluated - first. + first. .. autosummary:: @@ -514,7 +517,7 @@ method of the maximum ufunc is much faster. Also, the max() method will not give answers you might expect for arrays with greater than one dimension. The reduce method of minimum also allows you to compute - a total minimum over an array. + a total minimum over an array. .. autosummary:: @@ -528,7 +531,7 @@ two arrays is larger. In contrast, max(a,b) treats the objects a and b as a whole, looks at the (total) truth value of a>b and uses it to return either a or b (as a whole). A similar difference exists between - minimum(a,b) and min(a,b). + minimum(a,b) and min(a,b). Floating functions @@ -536,7 +539,7 @@ Recall that all of these functions work element-by-element over an array, returning an array output. The description details only a -single operation. +single operation. .. autosummary:: Modified: trunk/numpy/core/code_generators/docstrings.py =================================================================== --- trunk/numpy/core/code_generators/docstrings.py 2008-11-28 03:52:16 UTC (rev 6115) +++ trunk/numpy/core/code_generators/docstrings.py 2008-11-28 05:34:33 UTC (rev 6116) @@ -691,8 +691,9 @@ """ Return the truncated value of the input, element-wise. - The truncated value of the scalar `x` is the nearest integer `i`, such - that i is not larger than x amplitude + The truncated value of the scalar `x` is the nearest integer `i` which + is closer to zero than `x` is. In short, the fractional part of the + signed number `x` is discarded. Parameters ---------- @@ -789,7 +790,9 @@ add_newdoc('numpy.core.umath', 'degrees', """ - Convert angles from radians to degrees. + Convert angles from radians to degrees. This is the same + function as rad2deg but the latter is preferred because of + the more descriptive name. Parameters ---------- @@ -804,6 +807,8 @@ See Also -------- + rad2deg : Convert angles from radians to degrees. + deg2rad : Convert angles from degrees to radians. radians : Convert angles from degrees to radians. unwrap : Remove large jumps in angle by wrapping. @@ -818,6 +823,41 @@ """) +add_newdoc('numpy.core.umath', 'rad2deg', + """ + Convert angles from radians to degrees. This is the same + function as degrees but is preferred because its more + descriptive name. + + Parameters + ---------- + x : array_like + Angle in radians. + + Returns + ------- + y : ndarray + The corresponding angle in degrees. + + + See Also + -------- + degrees : Convert angles from radians to degrees. + deg2rad : Convert angles from degrees to radians. + radians : Convert angles from degrees to radians. + unwrap : Remove large jumps in angle by wrapping. + + Notes + ----- + rad2deg(x) is ``180 * x / pi``. + + Examples + -------- + >>> np.rad2deg(np.pi/2) + 90.0 + + """) + add_newdoc('numpy.core.umath', 'divide', """ Divide arguments element-wise. @@ -962,6 +1002,22 @@ """) +add_newdoc('numpy.core.umath', 'exp2', + """ + Calculate `2**p` for all `p` in the input array. + + Parameters + ---------- + x : array_like + Input values. + + Returns + ------- + out : ndarray + Element-wise 2 to the power `x`. + + """) + add_newdoc('numpy.core.umath', 'expm1', """ Return the exponential of the elements in the array minus one. @@ -1661,6 +1717,82 @@ """) +add_newdoc('numpy.core.umath', 'log2', + """ + Base-2 logarithm of `x`. + + Parameters + ---------- + x : array_like + Input values. + + Returns + ------- + y : ndarray + Base-2 logarithm of `x`. + + See Also + -------- + log, log10, log1p + + """) + +add_newdoc('numpy.core.umath', 'logaddexp', + """ + Logarithm of `exp(x) + exp(y)`. + + This function is useful in statistics where the calculated probabilities of + events may be so small as to excede the range of normal floating point + numbers. In such cases the logarithm of the calculated probability is + stored. This function allows adding probabilities stored in such a fashion. + + Parameters + ---------- + x : array_like + Input values. + y : array_like + Input values. + + + Returns + ------- + result : ndarray + Logarithm of `exp(x) + exp(y)`. + + See Also + -------- + logaddexp2 + + """) + +add_newdoc('numpy.core.umath', 'logaddexp2', + """ + Base-2 Logarithm of `2**x + 2**y`. + + This function is useful in machine learning when the calculated probabilities of + events may be so small as to excede the range of normal floating point + numbers. In such cases the base-2 logarithm of the calculated probability + can be used instead. This function allows adding probabilities stored in such a fashion. + + Parameters + ---------- + x : array_like + Input values. + y : array_like + Input values. + + + Returns + ------- + result : ndarray + Base-2 logarithm of `2**x + 2**y`. + + See Also + -------- + logaddexp + + """) + add_newdoc('numpy.core.umath', 'log1p', """ `log(1 + x)` in base `e`, elementwise. @@ -1921,6 +2053,16 @@ """) +add_newdoc('numpy.core.umath', 'fmax', + """ + + """) + +add_newdoc('numpy.core.umath', 'fmin', + """ + + """) + add_newdoc('numpy.core.umath', 'modf', """ Return the fractional and integral part of a number. @@ -2102,7 +2244,8 @@ add_newdoc('numpy.core.umath', 'radians', """ - Convert angles from degrees to radians. + Convert angles from degrees to radians. This function is + the same as deg2rad, which is more descriptive.. Parameters ---------- @@ -2116,6 +2259,8 @@ See Also -------- + deg2rad : Convert angles from degrees to radians. + rad2deg : Convert angles from radians to degrees. degrees : Convert angles from radians to degrees. unwrap : Remove large jumps in angle by wrapping. @@ -2130,6 +2275,39 @@ """) +add_newdoc('numpy.core.umath', 'deg2rad', + """ + Convert angles from degrees to radians. This is the same + function as radians, but deg2rad is a more descriptive name. + + Parameters + ---------- + x : array_like + Angles in degrees. + + Returns + ------- + y : ndarray + The corresponding angle in radians. + + See Also + -------- + radians : Convert angles from degrees to radians. + rad2deg : Convert angles from radians to degrees. + degrees : Convert angles from radians to degrees. + unwrap : Remove large jumps in angle by wrapping. + + Notes + ----- + ``deg2rad(x)`` is ``x * pi / 180``. + + Examples + -------- + >>> np.deg2rad(180) + 3.1415926535897931 + + """) + add_newdoc('numpy.core.umath', 'reciprocal', """ Return element-wise reciprocal. Modified: trunk/numpy/core/code_generators/generate_umath.py =================================================================== --- trunk/numpy/core/code_generators/generate_umath.py 2008-11-28 03:52:16 UTC (rev 6115) +++ trunk/numpy/core/code_generators/generate_umath.py 2008-11-28 05:34:33 UTC (rev 6116) @@ -330,12 +330,12 @@ ), 'logaddexp' : Ufunc(2, 1, None, - "", + docstrings.get('numpy.core.umath.logaddexp'), TD(flts, f="logaddexp") ), 'logaddexp2' : Ufunc(2, 1, None, - "", + docstrings.get('numpy.core.umath.logaddexp2'), TD(flts, f="logaddexp2") ), 'bitwise_and' : @@ -381,7 +381,7 @@ ), 'rad2deg' : Ufunc(1, 1, None, - '', + docstrings.get('numpy.core.umath.rad2deg'), TD(fltsM, f='rad2deg'), ), 'radians' : @@ -391,7 +391,7 @@ ), 'deg2rad' : Ufunc(1, 1, None, - '', + docstrings.get('numpy.core.umath.deg2rad'), TD(fltsM, f='deg2rad'), ), 'arccos' : @@ -474,7 +474,7 @@ ), 'exp2' : Ufunc(1, 1, None, - '', + docstrings.get('numpy.core.umath.exp2'), TD(flts, f='exp2'), TD(M, f='exp2'), ), @@ -492,7 +492,7 @@ ), 'log2' : Ufunc(1, 1, None, - '', + docstrings.get('numpy.core.umath.log2'), TD(flts, f='log2'), TD(M, f='log2'), ), @@ -522,7 +522,7 @@ ), 'trunc' : Ufunc(1, 1, None, - '', + docstrings.get('numpy.core.umath.trunc'), TD(flts, f='trunc'), TD(M, f='trunc'), ), From numpy-svn at scipy.org Fri Nov 28 11:47:40 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 28 Nov 2008 10:47:40 -0600 (CST) Subject: [Numpy-svn] r6117 - trunk/numpy/core Message-ID: <20081128164740.4485A39C088@scipy.org> Author: cdavid Date: 2008-11-28 10:47:34 -0600 (Fri, 28 Nov 2008) New Revision: 6117 Modified: trunk/numpy/core/SConscript Log: Fix typo in core scons script. Modified: trunk/numpy/core/SConscript =================================================================== --- trunk/numpy/core/SConscript 2008-11-28 05:34:33 UTC (rev 6116) +++ trunk/numpy/core/SConscript 2008-11-28 16:47:34 UTC (rev 6117) @@ -1,4 +1,4 @@ -# Last Change: Fri Oct 03 04:00 PM 2008 J +# Last Change: Sat Nov 29 01:00 AM 2008 J # vim:syntax=python import os import sys @@ -159,17 +159,17 @@ # XXX: we do not test for hypot because python checks for it (HAVE_HYPOT in # python.h... I wish they would clean their public headers someday) - optional_stdfuncs = ["expm1", "log1p", "acosh", "asinh", "atanh", - "rint", "trunc", "exp2", "log2"] +optional_stdfuncs = ["expm1", "log1p", "acosh", "asinh", "atanh", + "rint", "trunc", "exp2", "log2"] check_funcs(optional_stdfuncs) # C99 functions: float and long double versions - c99_funcs = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", - "ceil", "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", - "expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh", - "hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp', - "exp2", "log2"] +c99_funcs = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", + "ceil", "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", + "expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh", + "hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp', + "exp2", "log2"] for prec in ['l', 'f']: fns = [f + prec for f in c99_funcs] From numpy-svn at scipy.org Fri Nov 28 11:50:13 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 28 Nov 2008 10:50:13 -0600 (CST) Subject: [Numpy-svn] r6118 - trunk/numpy/core Message-ID: <20081128165013.013C939C088@scipy.org> Author: cdavid Date: 2008-11-28 10:50:08 -0600 (Fri, 28 Nov 2008) New Revision: 6118 Modified: trunk/numpy/core/SConscript Log: Anoter typo on core scons script. Modified: trunk/numpy/core/SConscript =================================================================== --- trunk/numpy/core/SConscript 2008-11-28 16:47:34 UTC (rev 6117) +++ trunk/numpy/core/SConscript 2008-11-28 16:50:08 UTC (rev 6118) @@ -137,9 +137,9 @@ mfuncs_defined = dict([(f, 0) for f in mfuncs]) # Check for mandatory funcs: we barf if a single one of those is not there - mandatory_funcs = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", - "floor", "ceil", "sqrt", "log10", "log", "exp", "asin", - "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp'] +mandatory_funcs = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", + "floor", "ceil", "sqrt", "log10", "log", "exp", "asin", + "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp'] if not config.CheckFuncsAtOnce(mandatory_funcs): raise SystemError("One of the required function to build numpy is not" From numpy-svn at scipy.org Sat Nov 29 07:07:21 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 29 Nov 2008 06:07:21 -0600 (CST) Subject: [Numpy-svn] r6119 - trunk/numpy/lib Message-ID: <20081129120721.7B34239C089@scipy.org> Author: stefan Date: 2008-11-29 06:07:07 -0600 (Sat, 29 Nov 2008) New Revision: 6119 Modified: trunk/numpy/lib/io.py Log: Add memory map support to `load` [patch by Gael Varoquaux]. Closes #954. Modified: trunk/numpy/lib/io.py =================================================================== --- trunk/numpy/lib/io.py 2008-11-28 16:50:08 UTC (rev 6118) +++ trunk/numpy/lib/io.py 2008-11-29 12:07:07 UTC (rev 6119) @@ -79,7 +79,7 @@ else: raise KeyError, "%s is not a file in the archive" % key -def load(file, memmap=False): +def load(file, mmap_mode=None): """ Load a pickled, ``.npy``, or ``.npz`` binary file. @@ -87,10 +87,15 @@ ---------- file : file-like object or string The file to read. It must support ``seek()`` and ``read()`` methods. - memmap : bool - If True, then memory-map the ``.npy`` file (or unzip the ``.npz`` file - into a temporary directory and memory-map each component). This has - no effect for a pickled file. + mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional + If not None, then memory-map the file, using the given mode + (see `numpy.memmap`). The mode has no effect for pickled or + zipped files. + A memory-mapped array is stored on disk, and not directly loaded + into memory. However, it can be accessed and sliced like any + ndarray. Memory mapping is especially useful for accessing + small fragments of large files without reading the entire file + into memory. Returns ------- @@ -104,28 +109,35 @@ Notes ----- - - If file contains pickle data, then whatever is stored in the + - If the file contains pickle data, then whatever is stored in the pickle is returned. - If the file is a ``.npy`` file, then an array is returned. - If the file is a ``.npz`` file, then a dictionary-like object is - returned, containing {filename: array} key-value pairs, one for - every file in the archive. + returned, containing ``{filename: array}`` key-value pairs, one for + each file in the archive. Examples -------- - >>> np.save('/tmp/123', np.array([1, 2, 3]) + Store data to disk, and load it again: + + >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) >>> np.load('/tmp/123.npy') - array([1, 2, 3]) + array([[1, 2, 3], + [4, 5, 6]]) + Mem-map the stored array, and then access the second row + directly from disk: + + >>> X = np.load('/tmp/123.npy', mmap_mode='r') + >>> X[1, :] + memmap([4, 5, 6]) + """ if isinstance(file, basestring): fid = _file(file,"rb") else: fid = file - if memmap: - raise NotImplementedError - # Code to distinguish from NumPy binary files and pickles. _ZIP_PREFIX = 'PK\x03\x04' N = len(format.MAGIC_PREFIX) @@ -134,7 +146,10 @@ if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz) return NpzFile(fid) elif magic == format.MAGIC_PREFIX: # .npy file - return format.read_array(fid) + if mmap_mode: + return format.open_memmap(file, mode=mmap_mode) + else: + return format.read_array(fid) else: # Try a pickle try: return _cload(fid) From numpy-svn at scipy.org Sat Nov 29 07:08:06 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 29 Nov 2008 06:08:06 -0600 (CST) Subject: [Numpy-svn] r6120 - trunk/numpy/lib/tests Message-ID: <20081129120806.BB20B39C089@scipy.org> Author: stefan Date: 2008-11-29 06:07:54 -0600 (Sat, 29 Nov 2008) New Revision: 6120 Modified: trunk/numpy/lib/tests/test_io.py Log: Add test for load's mmap_mode. Modified: trunk/numpy/lib/tests/test_io.py =================================================================== --- trunk/numpy/lib/tests/test_io.py 2008-11-29 12:07:07 UTC (rev 6119) +++ trunk/numpy/lib/tests/test_io.py 2008-11-29 12:07:54 UTC (rev 6120) @@ -2,51 +2,86 @@ import numpy as np import StringIO +from tempfile import NamedTemporaryFile class RoundtripTest: + def roundtrip(self, save_func, *args, **kwargs): + """ + save_func : callable + Function used to save arrays to file. + file_on_disk : bool + If true, store the file on disk, instead of in a + string buffer. + save_kwds : dict + Parameters passed to `save_func`. + load_kwds : dict + Parameters passed to `numpy.load`. + args : tuple of arrays + Arrays stored to file. + + """ + save_kwds = kwargs.get('save_kwds', {}) + load_kwds = kwargs.get('load_kwds', {}) + file_on_disk = kwargs.get('file_on_disk', False) + + if file_on_disk: + target_file = NamedTemporaryFile() + load_file = target_file.name + else: + target_file = StringIO.StringIO() + load_file = target_file + + arr = args + + save_func(target_file, *arr, **save_kwds) + target_file.flush() + target_file.seek(0) + + arr_reloaded = np.load(load_file, **load_kwds) + + self.arr = arr + self.arr_reloaded = arr_reloaded + def test_array(self): - a = np.array( [[1,2],[3,4]], float) - self.do(a) + a = np.array([[1, 2], [3, 4]], float) + self.roundtrip(a) - a = np.array( [[1,2],[3,4]], int) - self.do(a) + a = np.array([[1, 2], [3, 4]], int) + self.roundtrip(a) - a = np.array( [[1+5j,2+6j],[3+7j,4+8j]], dtype=np.csingle) - self.do(a) + a = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle) + self.roundtrip(a) - a = np.array( [[1+5j,2+6j],[3+7j,4+8j]], dtype=np.cdouble) - self.do(a) + a = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble) + self.roundtrip(a) def test_1D(self): - a = np.array([1,2,3,4], int) - self.do(a) + a = np.array([1, 2, 3, 4], int) + self.roundtrip(a) + def test_mmap(self): + a = np.array([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + def test_record(self): a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) - self.do(a) + self.roundtrip(a) class TestSaveLoad(RoundtripTest, TestCase): - def do(self, a): - c = StringIO.StringIO() - np.save(c, a) - c.seek(0) - a_reloaded = np.load(c) - assert_equal(a, a_reloaded) + def roundtrip(self, *args, **kwargs): + RoundtripTest.roundtrip(self, np.save, *args, **kwargs) + assert_equal(self.arr[0], self.arr_reloaded) - class TestSavezLoad(RoundtripTest, TestCase): - def do(self, *arrays): - c = StringIO.StringIO() - np.savez(c, *arrays) - c.seek(0) - l = np.load(c) - for n, a in enumerate(arrays): - assert_equal(a, l['arr_%d' % n]) + def roundtrip(self, *args, **kwargs): + RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) + for n, arr in enumerate(self.arr): + assert_equal(arr, self.arr_reloaded['arr_%d' % n]) def test_multiple_arrays(self): a = np.array( [[1,2],[3,4]], float) b = np.array( [[1+2j,2+7j],[3-6j,4+12j]], complex) - self.do(a,b) + self.roundtrip(a,b) def test_named_arrays(self): a = np.array( [[1,2],[3,4]], float) From numpy-svn at scipy.org Sat Nov 29 07:08:42 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 29 Nov 2008 06:08:42 -0600 (CST) Subject: [Numpy-svn] r6121 - trunk/numpy/lib Message-ID: <20081129120842.4B08239C089@scipy.org> Author: stefan Date: 2008-11-29 06:08:29 -0600 (Sat, 29 Nov 2008) New Revision: 6121 Modified: trunk/numpy/lib/format.py Log: Opening a memmap requires a filename. Raise an error otherwise. Modified: trunk/numpy/lib/format.py =================================================================== --- trunk/numpy/lib/format.py 2008-11-29 12:07:54 UTC (rev 6120) +++ trunk/numpy/lib/format.py 2008-11-29 12:08:29 UTC (rev 6121) @@ -381,7 +381,7 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None, - fortran_order=False, version=(1,0)): + fortran_order=False, version=(1,0)): """ Open a .npy file as a memory-mapped array. @@ -390,7 +390,7 @@ Parameters ---------- filename : str - The name of the file on disk. This may not be a filelike object. + The name of the file on disk. This may not be a file-like object. mode : str, optional The mode to open the file with. In addition to the standard file modes, 'c' is also accepted to mean "copy on write". See `numpy.memmap` for @@ -425,6 +425,10 @@ numpy.memmap """ + if not isinstance(filename, basestring): + raise ValueError("Filename must be a string. Memmap cannot use" \ + " existing file handles.") + if 'w' in mode: # We are creating the file, not reading it. # Check if we ought to create the file. From numpy-svn at scipy.org Sat Nov 29 07:09:20 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 29 Nov 2008 06:09:20 -0600 (CST) Subject: [Numpy-svn] r6122 - trunk/numpy/lib/tests Message-ID: <20081129120920.E491239C089@scipy.org> Author: stefan Date: 2008-11-29 06:09:07 -0600 (Sat, 29 Nov 2008) New Revision: 6122 Modified: trunk/numpy/lib/tests/test_io.py Log: Reformat spacing in io tests. Modified: trunk/numpy/lib/tests/test_io.py =================================================================== --- trunk/numpy/lib/tests/test_io.py 2008-11-29 12:08:29 UTC (rev 6121) +++ trunk/numpy/lib/tests/test_io.py 2008-11-29 12:09:07 UTC (rev 6122) @@ -49,10 +49,10 @@ a = np.array([[1, 2], [3, 4]], int) self.roundtrip(a) - a = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle) + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle) self.roundtrip(a) - a = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble) + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble) self.roundtrip(a) def test_1D(self): @@ -79,13 +79,13 @@ assert_equal(arr, self.arr_reloaded['arr_%d' % n]) def test_multiple_arrays(self): - a = np.array( [[1,2],[3,4]], float) - b = np.array( [[1+2j,2+7j],[3-6j,4+12j]], complex) + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) self.roundtrip(a,b) def test_named_arrays(self): - a = np.array( [[1,2],[3,4]], float) - b = np.array( [[1+2j,2+7j],[3-6j,4+12j]], complex) + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) c = StringIO.StringIO() np.savez(c, file_a=a, file_b=b) c.seek(0) @@ -96,7 +96,7 @@ class TestSaveTxt(TestCase): def test_array(self): - a =np.array( [[1,2],[3,4]], float) + a =np.array([[1, 2], [3, 4]], float) c = StringIO.StringIO() np.savetxt(c, a) c.seek(0) @@ -104,14 +104,14 @@ ['1.000000000000000000e+00 2.000000000000000000e+00\n', '3.000000000000000000e+00 4.000000000000000000e+00\n']) - a =np.array( [[1,2],[3,4]], int) + a =np.array([[1, 2], [3, 4]], int) c = StringIO.StringIO() np.savetxt(c, a, fmt='%d') c.seek(0) assert_equal(c.readlines(), ['1 2\n', '3 4\n']) def test_1D(self): - a = np.array([1,2,3,4], int) + a = np.array([1, 2, 3, 4], int) c = StringIO.StringIO() np.savetxt(c, a, fmt='%d') c.seek(0) @@ -181,12 +181,12 @@ c.seek(0) x = np.loadtxt(c, dtype=int) - a = np.array([[1,2],[3,4]], int) + a = np.array([[1, 2], [3, 4]], int) assert_array_equal(x, a) c.seek(0) x = np.loadtxt(c, dtype=float) - a = np.array([[1,2],[3,4]], float) + a = np.array([[1, 2], [3, 4]], float) assert_array_equal(x, a) def test_1D(self): @@ -194,14 +194,14 @@ c.write('1\n2\n3\n4\n') c.seek(0) x = np.loadtxt(c, dtype=int) - a = np.array([1,2,3,4], int) + a = np.array([1, 2, 3, 4], int) assert_array_equal(x, a) c = StringIO.StringIO() c.write('1,2,3,4\n') c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',') - a = np.array([1,2,3,4], int) + a = np.array([1, 2, 3, 4], int) assert_array_equal(x, a) def test_missing(self): @@ -210,7 +210,7 @@ c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', \ converters={3:lambda s: int(s or -999)}) - a = np.array([1,2,3,-999,5], int) + a = np.array([1, 2, 3, -999, 5], int) assert_array_equal(x, a) def test_converters_with_usecols(self): @@ -219,8 +219,8 @@ c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', \ converters={3:lambda s: int(s or -999)}, \ - usecols=(1, 3, )) - a = np.array([[2, -999],[7, 9]], int) + usecols=(1, 3,)) + a = np.array([[2, -999], [7, 9]], int) assert_array_equal(x, a) def test_comments(self): @@ -229,7 +229,7 @@ c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', \ comments='#') - a = np.array([1,2,3,5], int) + a = np.array([1, 2, 3, 5], int) assert_array_equal(x, a) def test_skiprows(self): @@ -238,7 +238,7 @@ c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', \ skiprows=1) - a = np.array([1,2,3,5], int) + a = np.array([1, 2, 3, 5], int) assert_array_equal(x, a) c = StringIO.StringIO() @@ -246,28 +246,28 @@ c.seek(0) x = np.loadtxt(c, dtype=int, delimiter=',', \ skiprows=1) - a = np.array([1,2,3,5], int) + a = np.array([1, 2, 3, 5], int) assert_array_equal(x, a) def test_usecols(self): - a =np.array( [[1,2],[3,4]], float) + a = np.array([[1, 2], [3, 4]], float) c = StringIO.StringIO() np.savetxt(c, a) c.seek(0) x = np.loadtxt(c, dtype=float, usecols=(1,)) assert_array_equal(x, a[:,1]) - a =np.array( [[1,2,3],[3,4,5]], float) + a =np.array([[1, 2, 3], [3, 4, 5]], float) c = StringIO.StringIO() np.savetxt(c, a) c.seek(0) - x = np.loadtxt(c, dtype=float, usecols=(1,2)) - assert_array_equal(x, a[:,1:]) + x = np.loadtxt(c, dtype=float, usecols=(1, 2)) + assert_array_equal(x, a[:, 1:]) # Testing with arrays instead of tuples. c.seek(0) - x = np.loadtxt(c, dtype=float, usecols=np.array([1,2])) - assert_array_equal(x, a[:,1:]) + x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2])) + assert_array_equal(x, a[:, 1:]) # Checking with dtypes defined converters. data = '''JOE 70.1 25.3 @@ -276,9 +276,9 @@ c = StringIO.StringIO(data) names = ['stid', 'temp'] dtypes = ['S4', 'f8'] - arr = np.loadtxt(c, usecols=(0,2),dtype=zip(names,dtypes)) - assert_equal(arr['stid'], ["JOE", "BOB"]) - assert_equal(arr['temp'], [25.3, 27.9]) + arr = np.loadtxt(c, usecols=(0, 2), dtype=zip(names, dtypes)) + assert_equal(arr['stid'], ["JOE", "BOB"]) + assert_equal(arr['temp'], [25.3, 27.9]) def test_fancy_dtype(self): c = StringIO.StringIO() @@ -286,7 +286,7 @@ c.seek(0) dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) x = np.loadtxt(c, dtype=dt, delimiter=',') - a = np.array([(1,(2,3.0)),(4,(5,6.0))], dt) + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt) assert_array_equal(x, a) def test_empty_file(self): @@ -297,11 +297,13 @@ c = StringIO.StringIO() c.writelines(['1 21\n', '3 42\n']) c.seek(0) - data = np.loadtxt(c, usecols=(1,), converters={0: lambda s: int(s, 16)}) + data = np.loadtxt(c, usecols=(1,), + converters={0: lambda s: int(s, 16)}) assert_array_equal(data, [21, 42]) c.seek(0) - data = np.loadtxt(c, usecols=(1,), converters={1: lambda s: int(s, 16)}) + data = np.loadtxt(c, usecols=(1,), + converters={1: lambda s: int(s, 16)}) assert_array_equal(data, [33, 66]) class Testfromregex(TestCase): @@ -312,7 +314,8 @@ dt = [('num', np.float64), ('val', 'S3')] x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt) - a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')], dtype=dt) + a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')], + dtype=dt) assert_array_equal(x, a) def test_record_2(self): @@ -323,7 +326,8 @@ dt = [('num', np.int32), ('val', 'S3')] x = np.fromregex(c, r"(\d+)\s+(...)", dt) - a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')], dtype=dt) + a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')], + dtype=dt) assert_array_equal(x, a) def test_record_3(self): From numpy-svn at scipy.org Sat Nov 29 09:53:57 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 29 Nov 2008 08:53:57 -0600 (CST) Subject: [Numpy-svn] r6123 - trunk/numpy/lib Message-ID: <20081129145357.DA08639C089@scipy.org> Author: stefan Date: 2008-11-29 08:53:44 -0600 (Sat, 29 Nov 2008) New Revision: 6123 Modified: trunk/numpy/lib/io.py Log: Identify file object using 'readline', rather than 'seek'. Modified: trunk/numpy/lib/io.py =================================================================== --- trunk/numpy/lib/io.py 2008-11-29 12:09:07 UTC (rev 6122) +++ trunk/numpy/lib/io.py 2008-11-29 14:53:44 UTC (rev 6123) @@ -348,7 +348,7 @@ fh = gzip.open(fname) else: fh = file(fname) - elif hasattr(fname, 'seek'): + elif hasattr(fname, 'readline'): fh = fname else: raise ValueError('fname must be a string or file handle') From numpy-svn at scipy.org Sat Nov 29 09:54:49 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 29 Nov 2008 08:54:49 -0600 (CST) Subject: [Numpy-svn] r6124 - trunk/numpy/lib Message-ID: <20081129145449.A9A9A39C089@scipy.org> Author: stefan Date: 2008-11-29 08:54:29 -0600 (Sat, 29 Nov 2008) New Revision: 6124 Modified: trunk/numpy/lib/io.py Log: Add bz2 support to loadtxt [patch by Ryan May]. Modified: trunk/numpy/lib/io.py =================================================================== --- trunk/numpy/lib/io.py 2008-11-29 14:53:44 UTC (rev 6123) +++ trunk/numpy/lib/io.py 2008-11-29 14:54:29 UTC (rev 6124) @@ -279,8 +279,8 @@ Parameters ---------- fname : file or string - File or filename to read. If the filename extension is ``.gz``, - the file is first decompressed. + File or filename to read. If the filename extension is ``.gz`` or + ``.bz2``, the file is first decompressed. dtype : data-type Data type of the resulting array. If this is a record data-type, the resulting array will be 1-dimensional, and each row will be @@ -346,6 +346,9 @@ if fname.endswith('.gz'): import gzip fh = gzip.open(fname) + elif fname.endswith('.bz2'): + import bz2 + fh = bz2.BZ2File(fname) else: fh = file(fname) elif hasattr(fname, 'readline'): From numpy-svn at scipy.org Sun Nov 30 09:45:00 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 30 Nov 2008 08:45:00 -0600 (CST) Subject: [Numpy-svn] r6125 - in trunk/doc: . source sphinxext sphinxext/tests Message-ID: <20081130144500.8787839C30B@scipy.org> Author: ptvirtan Date: 2008-11-30 08:44:38 -0600 (Sun, 30 Nov 2008) New Revision: 6125 Added: trunk/doc/sphinxext/ trunk/doc/sphinxext/LICENSE.txt trunk/doc/sphinxext/__init__.py trunk/doc/sphinxext/autosummary.py trunk/doc/sphinxext/autosummary_generate.py trunk/doc/sphinxext/comment_eater.py trunk/doc/sphinxext/compiler_unparse.py trunk/doc/sphinxext/docscrape.py trunk/doc/sphinxext/docscrape_sphinx.py trunk/doc/sphinxext/numpydoc.py trunk/doc/sphinxext/only_directives.py trunk/doc/sphinxext/phantom_import.py trunk/doc/sphinxext/plot_directive.py trunk/doc/sphinxext/tests/ trunk/doc/sphinxext/tests/test_docscrape.py trunk/doc/sphinxext/traitsdoc.py Modified: trunk/doc/Makefile trunk/doc/source/conf.py Log: Move Sphinx extensions under Numpy's SVN trunk Modified: trunk/doc/Makefile =================================================================== --- trunk/doc/Makefile 2008-11-29 14:54:29 UTC (rev 6124) +++ trunk/doc/Makefile 2008-11-30 14:44:38 UTC (rev 6125) @@ -37,15 +37,12 @@ cd build/dist && tar czf ../dist.tar.gz * generate: build/generate-stamp -build/generate-stamp: $(wildcard source/reference/*.rst) ext +build/generate-stamp: $(wildcard source/reference/*.rst) mkdir -p build - ./ext/autosummary_generate.py source/reference/*.rst \ + ./sphinxext/autosummary_generate.py source/reference/*.rst \ -p dump.xml -o source/reference/generated touch build/generate-stamp -ext: - svn co http://sphinx.googlecode.com/svn/contrib/trunk/numpyext ext - html: generate mkdir -p build/html build/doctrees $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html Modified: trunk/doc/source/conf.py =================================================================== --- trunk/doc/source/conf.py 2008-11-29 14:54:29 UTC (rev 6124) +++ trunk/doc/source/conf.py 2008-11-30 14:44:38 UTC (rev 6125) @@ -5,7 +5,7 @@ # If your extensions are in another directory, add it here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. -sys.path.append(os.path.abspath('../ext')) +sys.path.append(os.path.abspath('../sphinxext')) # Check Sphinx version import sphinx Added: trunk/doc/sphinxext/LICENSE.txt =================================================================== --- trunk/doc/sphinxext/LICENSE.txt 2008-11-29 14:54:29 UTC (rev 6124) +++ trunk/doc/sphinxext/LICENSE.txt 2008-11-30 14:44:38 UTC (rev 6125) @@ -0,0 +1,97 @@ +------------------------------------------------------------------------------- + The files + - numpydoc.py + - autosummary.py + - autosummary_generate.py + - docscrape.py + - docscrape_sphinx.py + - phantom_import.py + have the following license: + +Copyright (C) 2008 Stefan van der Walt , Pauli Virtanen + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------- + The files + - compiler_unparse.py + - comment_eater.py + - traitsdoc.py + have the following license: + +This software is OSI Certified Open Source Software. +OSI Certified is a certification mark of the Open Source Initiative. + +Copyright (c) 2006, Enthought, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Enthought, Inc. nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +------------------------------------------------------------------------------- + The files + - only_directives.py + - plot_directive.py + originate from Matplotlib (http://matplotlib.sf.net/) which has + the following license: + +Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved. + +1. This LICENSE AGREEMENT is between John D. Hunter (?JDH?), and the Individual or Organization (?Licensee?) accessing and otherwise using matplotlib software in source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, JDH hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use matplotlib 0.98.3 alone or in any derivative version, provided, however, that JDH?s License Agreement and JDH?s notice of copyright, i.e., ?Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved? are retained in matplotlib 0.98.3 alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on or incorporates matplotlib 0.98.3 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to matplotlib 0.98.3. + +4. JDH is making matplotlib 0.98.3 available to Licensee on an ?AS IS? basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 0.98.3 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. + +5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB 0.98.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING MATPLOTLIB 0.98.3, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between JDH and Licensee. This License Agreement does not grant permission to use JDH trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement. + Added: trunk/doc/sphinxext/__init__.py =================================================================== Added: trunk/doc/sphinxext/autosummary.py =================================================================== --- trunk/doc/sphinxext/autosummary.py 2008-11-29 14:54:29 UTC (rev 6124) +++ trunk/doc/sphinxext/autosummary.py 2008-11-30 14:44:38 UTC (rev 6125) @@ -0,0 +1,334 @@ +""" +=========== +autosummary +=========== + +Sphinx extension that adds an autosummary:: directive, which can be +used to generate function/method/attribute/etc. summary lists, similar +to those output eg. by Epydoc and other API doc generation tools. + +An :autolink: role is also provided. + +autosummary directive +--------------------- + +The autosummary directive has the form:: + + .. autosummary:: + :nosignatures: + :toctree: generated/ + + module.function_1 + module.function_2 + ... + +and it generates an output table (containing signatures, optionally) + + ======================== ============================================= + module.function_1(args) Summary line from the docstring of function_1 + module.function_2(args) Summary line from the docstring + ... + ======================== ============================================= + +If the :toctree: option is specified, files matching the function names +are inserted to the toctree with the given prefix: + + generated/module.function_1 + generated/module.function_2 + ... + +Note: The file names contain the module:: or currentmodule:: prefixes. + +.. seealso:: autosummary_generate.py + + +autolink role +------------- + +The autolink role functions as ``:obj:`` when the name referred can be +resolved to a Python object, and otherwise it becomes simple emphasis. +This can be used as the default role to make links 'smart'. + +""" +import sys, os, posixpath, re + +from docutils.parsers.rst import directives +from docutils.statemachine import ViewList +from docutils import nodes + +import sphinx.addnodes, sphinx.roles, sphinx.builder +from sphinx.util import patfilter + +from docscrape_sphinx import get_doc_object + + +def setup(app): + app.add_directive('autosummary', autosummary_directive, True, (0, 0, False), + toctree=directives.unchanged, + nosignatures=directives.flag) + app.add_role('autolink', autolink_role) + + app.add_node(autosummary_toc, + html=(autosummary_toc_visit_html, autosummary_toc_depart_noop), + latex=(autosummary_toc_visit_latex, autosummary_toc_depart_noop)) + app.connect('doctree-read', process_autosummary_toc) + +#------------------------------------------------------------------------------ +# autosummary_toc node +#------------------------------------------------------------------------------ + +class autosummary_toc(nodes.comment): + pass + +def process_autosummary_toc(app, doctree): + """ + Insert items described in autosummary:: to the TOC tree, but do + not generate the toctree:: list. + + """ + env = app.builder.env + crawled = {} + def crawl_toc(node, depth=1): + crawled[node] = True + for j, subnode in enumerate(node): + try: + if (isinstance(subnode, autosummary_toc) + and isinstance(subnode[0], sphinx.addnodes.toctree)): + env.note_toctree(env.docname, subnode[0]) + continue + except IndexError: + continue + if not isinstance(subnode, nodes.section): + continue + if subnode not in crawled: + crawl_toc(subnode, depth+1) + crawl_toc(doctree) + +def autosummary_toc_visit_html(self, node): + """Hide autosummary toctree list in HTML output""" + raise nodes.SkipNode + +def autosummary_toc_visit_latex(self, node): + """Show autosummary toctree (= put the referenced pages here) in Latex""" + pass + +def autosummary_toc_depart_noop(self, node): + pass + +#------------------------------------------------------------------------------ +# .. autosummary:: +#------------------------------------------------------------------------------ + +def autosummary_directive(dirname, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + """ + Pretty table containing short signatures and summaries of functions etc. + + autosummary also generates a (hidden) toctree:: node. + + """ + + names = [] + names += [x.strip() for x in content if x.strip()] + + table, warnings, real_names = get_autosummary(names, state, + 'nosignatures' in options) + node = table + + env = state.document.settings.env + suffix = env.config.source_suffix + all_docnames = env.found_docs.copy() + dirname = posixpath.dirname(env.docname) + + if 'toctree' in options: + tree_prefix = options['toctree'].strip() + docnames = [] + for name in names: + name = real_names.get(name, name) + + docname = tree_prefix + name + if docname.endswith(suffix): + docname = docname[:-len(suffix)] + docname = posixpath.normpath(posixpath.join(dirname, docname)) + if docname not in env.found_docs: + warnings.append(state.document.reporter.warning( + 'toctree references unknown document %r' % docname, + line=lineno)) + docnames.append(docname) + + tocnode = sphinx.addnodes.toctree() + tocnode['includefiles'] = docnames + tocnode['maxdepth'] = -1 + tocnode['glob'] = None + + tocnode = autosummary_toc('', '', tocnode) + return warnings + [node] + [tocnode] + else: + return warnings + [node] + +def get_autosummary(names, state, no_signatures=False): + """ + Generate a proper table node for autosummary:: directive. + + Parameters + ---------- + names : list of str + Names of Python objects to be imported and added to the table. + document : document + Docutils document object + + """ + document = state.document + + real_names = {} + warnings = [] + + prefixes = [''] + prefixes.insert(0, document.settings.env.currmodule) + + table = nodes.table('') + group = nodes.tgroup('', cols=2) + table.append(group) + group.append(nodes.colspec('', colwidth=30)) + group.append(nodes.colspec('', colwidth=70)) + body = nodes.tbody('') + group.append(body) + + def append_row(*column_texts): + row = nodes.row('') + for text in column_texts: + node = nodes.paragraph('') + vl = ViewList() + vl.append(text, '') + state.nested_parse(vl, 0, node) + row.append(nodes.entry('', node)) + body.append(row) + + for name in names: + try: + obj, real_name = import_by_name(name, prefixes=prefixes) + except ImportError: + warnings.append(document.reporter.warning( + 'failed to import %s' % name)) + append_row(":obj:`%s`" % name, "") + continue + + real_names[name] = real_name + + doc = get_doc_object(obj) + + if doc['Summary']: + title = " ".join(doc['Summary']) + else: + title = "" + + col1 = ":obj:`%s <%s>`" % (name, real_name) + if doc['Signature']: + sig = re.sub('^[a-zA-Z_0-9.-]*', '', doc['Signature']) + if '=' in sig: + # abbreviate optional arguments + sig = re.sub(r', ([a-zA-Z0-9_]+)=', r'[, \1=', sig, count=1) + sig = re.sub(r'\(([a-zA-Z0-9_]+)=', r'([\1=', sig, count=1) + sig = re.sub(r'=[^,)]+,', ',', sig) + sig = re.sub(r'=[^,)]+\)$', '])', sig) + # shorten long strings + sig = re.sub(r'(\[.{16,16}[^,)]*?),.*?\]\)', r'\1, ...])', sig) + else: + sig = re.sub(r'(\(.{16,16}[^,)]*?),.*?\)', r'\1, ...)', sig) + col1 += " " + sig + col2 = title + append_row(col1, col2) + + return table, warnings, real_names + +def import_by_name(name, prefixes=[None]): + """ + Import a Python object that has the given name, under one of the prefixes. + + Parameters + ---------- + name : str + Name of a Python object, eg. 'numpy.ndarray.view' + prefixes : list of (str or None), optional + Prefixes to prepend to the name (None implies no prefix). + The first prefixed name that results to successful import is used. + + Returns + ------- + obj + The imported object + name + Name of the imported object (useful if `prefixes` was used) + + """ + for prefix in prefixes: + try: + if prefix: + prefixed_name = '.'.join([prefix, name]) + else: + prefixed_name = name + return _import_by_name(prefixed_name), prefixed_name + except ImportError: + pass + raise ImportError + +def _import_by_name(name): + """Import a Python object given its full name""" + try: + # try first interpret `name` as MODNAME.OBJ + name_parts = name.split('.') + try: + modname = '.'.join(name_parts[:-1]) + __import__(modname) + return getattr(sys.modules[modname], name_parts[-1]) + except (ImportError, IndexError, AttributeError): + pass + + # ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ... + last_j = 0 + modname = None + for j in reversed(range(1, len(name_parts)+1)): + last_j = j + modname = '.'.join(name_parts[:j]) + try: + __import__(modname) + except ImportError: + continue + if modname in sys.modules: + break + + if last_j < len(name_parts): + obj = sys.modules[modname] + for obj_name in name_parts[last_j:]: + obj = getattr(obj, obj_name) + return obj + else: + return sys.modules[modname] + except (ValueError, ImportError, AttributeError, KeyError), e: + raise ImportError(e) + +#------------------------------------------------------------------------------ +# :autolink: (smart default role) +#------------------------------------------------------------------------------ + +def autolink_role(typ, rawtext, etext, lineno, inliner, + options={}, content=[]): + """ + Smart linking role. + + Expands to ":obj:`text`" if `text` is an object that can be imported; + otherwise expands to "*text*". + """ + r = sphinx.roles.xfileref_role('obj', rawtext, etext, lineno, inliner, + options, content) + pnode = r[0][0] + + prefixes = [None] + #prefixes.insert(0, inliner.document.settings.env.currmodule) + try: + obj, name = import_by_name(pnode['reftarget'], prefixes) + except ImportError: + content = pnode[0] + r[0][0] = nodes.emphasis(rawtext, content[0].astext(), + classes=content['classes']) + return r Added: trunk/doc/sphinxext/autosummary_generate.py =================================================================== --- trunk/doc/sphinxext/autosummary_generate.py 2008-11-29 14:54:29 UTC (rev 6124) +++ trunk/doc/sphinxext/autosummary_generate.py 2008-11-30 14:44:38 UTC (rev 6125) @@ -0,0 +1,189 @@ +#!/usr/bin/env python +r""" +autosummary_generate.py OPTIONS FILES + +Generate automatic RST source files for items referred to in +autosummary:: directives. + +Each generated RST file contains a single auto*:: directive which +extracts the docstring of the referred item. + +Example Makefile rule:: + + generate: + ./ext/autosummary_generate.py -o source/generated source/*.rst + +""" +import glob, re, inspect, os, optparse +from autosummary import import_by_name + +try: + from phantom_import import import_phantom_module +except ImportError: + import_phantom_module = lambda x: x + +def main(): + p = optparse.OptionParser(__doc__.strip()) + p.add_option("-p", "--phantom", action="store", type="string", + dest="phantom", default=None, + help="Phantom import modules from a file") + p.add_option("-o", "--output-dir", action="store", type="string", + dest="output_dir", default=None, + help=("Write all output files to the given directory (instead " + "of writing them as specified in the autosummary:: " + "directives)")) + options, args = p.parse_args() + + if len(args) == 0: + p.error("wrong number of arguments") + + if options.phantom and os.path.isfile(options.phantom): + import_phantom_module(options.phantom) + + # read + names = {} + for name, loc in get_documented(args).items(): + for (filename, sec_title, keyword, toctree) in loc: + if toctree is not None: + path = os.path.join(os.path.dirname(filename), toctree) + names[name] = os.path.abspath(path) + + # write + for name, path in sorted(names.items()): + if options.output_dir is not None: + path = options.output_dir + + if not os.path.isdir(path): + os.makedirs(path) + + try: + obj, name = import_by_name(name) + except ImportError, e: + print "Failed to import '%s': %s" % (name, e) + continue + + fn = os.path.join(path, '%s.rst' % name) + + if os.path.exists(fn): + # skip + continue + + f = open(fn, 'w') + + try: + f.write('%s\n%s\n\n' % (name, '='*len(name))) + + if inspect.isclass(obj): + if issubclass(obj, Exception): + f.write(format_modulemember(name, 'autoexception')) + else: + f.write(format_modulemember(name, 'autoclass')) + elif inspect.ismodule(obj): + f.write(format_modulemember(name, 'automodule')) + elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj): + f.write(format_classmember(name, 'automethod')) + elif callable(obj): + f.write(format_modulemember(name, 'autofunction')) + elif hasattr(obj, '__get__'): + f.write(format_classmember(name, 'autoattribute')) + else: + f.write(format_modulemember(name, 'autofunction')) + finally: + f.close() + +def format_modulemember(name, directive): + parts = name.split('.') + mod, name = '.'.join(parts[:-1]), parts[-1] + return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name) + +def format_classmember(name, directive): + parts = name.split('.') + mod, name = '.'.join(parts[:-2]), '.'.join(parts[-2:]) + return ".. currentmodule:: %s\n\n.. %s:: %s\n" % (mod, directive, name) + +def get_documented(filenames): + """ + Find out what items are documented in source/*.rst + + Returns + ------- + documented : dict of list of (filename, title, keyword, toctree) + Dictionary whose keys are documented names of objects. + The value is a list of locations where the object was documented. + Each location is a tuple of filename, the current section title, + the name of the directive, and the value of the :toctree: argument + (if present) of the directive. + + """ + title_underline_re = re.compile("^[-=*_^#]{3,}\s*$") + autodoc_re = re.compile(".. auto(function|method|attribute|class|exception|module)::\s*([A-Za-z0-9_.]+)\s*$") + autosummary_re = re.compile(r'^\.\.\s+autosummary::\s*') + module_re = re.compile(r'^\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$') + autosummary_item_re = re.compile(r'^\s+([_a-zA-Z][a-zA-Z0-9_.]*)\s*') + toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$') + + documented = {} + + for filename in filenames: + current_title = [] + last_line = None + toctree = None + current_module = None + in_autosummary = False + + f = open(filename, 'r') + for line in f: + try: + if in_autosummary: + m = toctree_arg_re.match(line) + if m: + toctree = m.group(1) + continue + + if line.strip().startswith(':'): + continue # skip options + + m = autosummary_item_re.match(line) + if m: + name = m.group(1).strip() + if current_module and not name.startswith(current_module + '.'): + name = "%s.%s" % (current_module, name) + documented.setdefault(name, []).append( + (filename, current_title, 'autosummary', toctree)) + continue + if line.strip() == '': + continue + in_autosummary = False + + m = autosummary_re.match(line) + if m: + in_autosummary = True + continue + + m = autodoc_re.search(line) + if m: + name = m.group(2).strip() + if current_module and not name.startswith(current_module + '.'): + name = "%s.%s" % (current_module, name) + if m.group(1) == "module": + current_module = name + documented.setdefault(name, []).append( + (filename, current_title, "auto" + m.group(1), None)) + continue + + m = title_underline_re.match(line) + if m and last_line: + current_title = last_line.strip() + continue + + m = module_re.match(line) + if m: + current_module = m.group(2) + continue + finally: + last_line = line + + return documented + +if __name__ == "__main__": + main() Property changes on: trunk/doc/sphinxext/autosummary_generate.py ___________________________________________________________________ Name: svn:executable + * Added: trunk/doc/sphinxext/comment_eater.py =================================================================== --- trunk/doc/sphinxext/comment_eater.py 2008-11-29 14:54:29 UTC (rev 6124) +++ trunk/doc/sphinxext/comment_eater.py 2008-11-30 14:44:38 UTC (rev 6125) @@ -0,0 +1,158 @@ +from cStringIO import StringIO +import compiler +import inspect +import textwrap +import tokenize + +from compiler_unparse import unparse + + +class Comment(object): + """ A comment block. + """ + is_comment = True + def __init__(self, start_lineno, end_lineno, text): + # int : The first line number in the block. 1-indexed. + self.start_lineno = start_lineno + # int : The last line number. Inclusive! + self.end_lineno = end_lineno + # str : The text block including '#' character but not any leading spaces. + self.text = text + + def add(self, string, start, end, line): + """ Add a new comment line. + """ + self.start_lineno = min(self.start_lineno, start[0]) + self.end_lineno = max(self.end_lineno, end[0]) + self.text += string + + def __repr__(self): + return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno, + self.end_lineno, self.text) + + +class NonComment(object): + """ A non-comment block of code. + """ + is_comment = False + def __init__(self, start_lineno, end_lineno): + self.start_lineno = start_lineno + self.end_lineno = end_lineno + + def add(self, string, start, end, line): + """ Add lines to the block. + """ + if string.strip(): + # Only add if not entirely whitespace. + self.start_lineno = min(self.start_lineno, start[0]) + self.end_lineno = max(self.end_lineno, end[0]) + + def __repr__(self): + return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno, + self.end_lineno) + + +class CommentBlocker(object): + """ Pull out contiguous comment blocks. + """ + def __init__(self): + # Start with a dummy. + self.current_block = NonComment(0, 0) + + # All of the blocks seen so far. + self.blocks = [] + + # The index mapping lines of code to their associated comment blocks. + self.index = {} + + def process_file(self, file): + """ Process a file object. + """ + for token in tokenize.generate_tokens(file.next): + self.process_token(*token) + self.make_index() + + def process_token(self, kind, string, start, end, line): + """ Process a single token. + """ + if self.current_block.is_comment: + if kind == tokenize.COMMENT: + self.current_block.add(string, start, end, line) + else: + self.new_noncomment(start[0], end[0]) + else: + if kind == tokenize.COMMENT: + self.new_comment(string, start, end, line) + else: + self.current_block.add(string, start, end, line) + + def new_noncomment(self, start_lineno, end_lineno): + """ We are transitioning from a noncomment to a comment. + """ + block = NonComment(start_lineno, end_lineno) + self.blocks.append(block) + self.current_block = block + + def new_comment(self, string, start, end, line): + """ Possibly add a new comment. + + Only adds a new comment if this comment is the only thing on the line. + Otherwise, it extends the noncomment block. + """ + prefix = line[:start[1]] + if prefix.strip(): + # Oops! Trailing comment, not a comment block. + self.current_block.add(string, start, end, line) + else: + # A comment block. + block = Comment(start[0], end[0], string) + self.blocks.append(block) + self.current_block = block + + def make_index(self): + """ Make the index mapping lines of actual code to their associated + prefix comments. + """ + for prev, block in zip(self.blocks[:-1], self.blocks[1:]): + if not block.is_comment: + self.index[block.start_lineno] = prev + + def search_for_comment(self, lineno, default=None): + """ Find the comment block just before the given line number. + + Returns None (or the specified default) if there is no such block. + """ + if not self.index: + self.make_index() + block = self.index.get(lineno, None) + text = getattr(block, 'text', default) + return text + + +def strip_comment_marker(text): + """ Strip # markers at the front of a block of comment text. + """ + lines = [] + for line in text.splitlines(): + lines.append(line.lstrip('#')) + text = textwrap.dedent('\n'.join(lines)) + return text + + +def get_class_traits(klass): + """ Yield all of the documentation for trait definitions on a class object. + """ + # FIXME: gracefully handle errors here or in the caller? + source = inspect.getsource(klass) + cb = CommentBlocker() + cb.process_file(StringIO(source)) + mod_ast = compiler.parse(source) + class_ast = mod_ast.node.nodes[0] + for node in class_ast.code.nodes: + # FIXME: handle other kinds of assignments? + if isinstance(node, compiler.ast.Assign): + name = node.nodes[0].name + rhs = unparse(node.expr).strip() + doc = strip_comment_marker(cb.search_for_comment(node.lineno, default='')) + yield name, rhs, doc + Added: trunk/doc/sphinxext/compiler_unparse.py =================================================================== --- trunk/doc/sphinxext/compiler_unparse.py 2008-11-29 14:54:29 UTC (rev 6124) +++ trunk/doc/sphinxext/compiler_unparse.py 2008-11-30 14:44:38 UTC (rev 6125) @@ -0,0 +1,860 @@ +""" Turn compiler.ast structures back into executable python code. + + The unparse method takes a compiler.ast tree and transforms it back into + valid python code. It is incomplete and currently only works for + import statements, function calls, function definitions, assignments, and + basic expressions. + + Inspired by python-2.5-svn/Demo/parser/unparse.py + + fixme: We may want to move to using _ast trees because the compiler for + them is about 6 times faster than compiler.compile. +""" + +import sys +import cStringIO +from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add + +def unparse(ast, single_line_functions=False): + s = cStringIO.StringIO() + UnparseCompilerAst(ast, s, single_line_functions) + return s.getvalue().lstrip() + +op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2, + 'compiler.ast.Add':1, 'compiler.ast.Sub':1 } + +class UnparseCompilerAst: + """ Methods in this class recursively traverse an AST and + output source code for the abstract syntax; original formatting + is disregarged. + """ + + ######################################################################### + # object interface. + ######################################################################### + + def __init__(self, tree, file = sys.stdout, single_line_functions=False): + """ Unparser(tree, file=sys.stdout) -> None. + + Print the source for tree to file. + """ + self.f = file + self._single_func = single_line_functions + self._do_indent = True + self._indent = 0 + self._dispatch(tree) + self._write("\n") + self.f.flush() + + ######################################################################### + # Unparser private interface. + ######################################################################### + + ### format, output, and dispatch methods ################################ + + def _fill(self, text = ""): + "Indent a piece of text, according to the current indentation level" + if self._do_indent: + self._write("\n"+" "*self._indent + text) + else: + self._write(text) + + def _write(self, text): + "Append a piece of text to the current line." + self.f.write(text) + + def _enter(self): + "Print ':', and increase the indentation." + self._write(": ") + self._indent += 1 + + def _leave(self): + "Decrease the indentation level." + self._indent -= 1 + + def _dispatch(self, tree): + "_dispatcher function, _dispatching tree type T to method _T." + if isinstance(tree, list): + for t in tree: + self._dispatch(t) + return + meth = getattr(self, "_"+tree.__class__.__name__) + if tree.__class__.__name__ == 'NoneType' and not self._do_indent: + return + meth(tree) + + + ######################################################################### + # compiler.ast unparsing methods. + # + # There should be one method per concrete grammar type. They are + # organized in alphabetical order. + ######################################################################### + + def _Add(self, t): + self.__binary_op(t, '+') + + def _And(self, t): + self._write(" (") + for i, node in enumerate(t.nodes): + self._dispatch(node) + if i != len(t.nodes)-1: + self._write(") and (") + self._write(")") + + def _AssAttr(self, t): + """ Handle assigning an attribute of an object + """ + self._dispatch(t.expr) + self._write('.'+t.attrname) + + def _Assign(self, t): + """ Expression Assignment such as "a = 1". + + This only handles assignment in expressions. Keyword assignment + is handled separately. + """ + self._fill() + for target in t.nodes: + self._dispatch(target) + self._write(" = ") + self._dispatch(t.expr) + if not self._do_indent: + self._write('; ') + + def _AssName(self, t): + """ Name on left hand side of expression. + + Treat just like a name on the right side of an expression. + """ + self._Name(t) + + def _AssTuple(self, t): + """ Tuple on left hand side of an expression. + """ + + # _write each elements, separated by a comma. + for element in t.nodes[:-1]: + self._dispatch(element) + self._write(", ") + + # Handle the last one without writing comma + last_element = t.nodes[-1] + self._dispatch(last_element) + + def _AugAssign(self, t): + """ +=,-=,*=,/=,**=, etc. operations + """ + + self._fill() + self._dispatch(t.node) + self._write(' '+t.op+' ') + self._dispatch(t.expr) + if not self._do_indent: + self._write(';') + + def _Bitand(self, t): + """ Bit and operation. + """ + + for i, node in enumerate(t.nodes): + self._write("(") + self._dispatch(node) + self._write(")") + if i != len(t.nodes)-1: + self._write(" & ") + + def _Bitor(self, t): + """ Bit or operation + """ + + for i, node in enumerate(t.nodes): + self._write("(") + self._dispatch(node) + self._write(")") + if i != len(t.nodes)-1: + self._write(" | ") + + def _CallFunc(self, t): + """ Function call. + """ + self._dispatch(t.node) + self._write("(") + comma = False + for e in t.args: + if comma: self._write(", ") + else: comma = True + self._dispatch(e) + if t.star_args: + if comma: self._write(", ") + else: comma = True + self._write("*") + self._dispatch(t.star_args) + if t.dstar_args: + if comma: self._write(", ") + else: comma = True + self._write("**") + self._dispatch(t.dstar_args) + self._write(")") + + def _Compare(self, t): + self._dispatch(t.expr) + for op, expr in t.ops: + self._write(" " + op + " ") + self._dispatch(expr) + + def _Const(self, t): + """ A constant value such as an integer value, 3, or a string, "hello". + """ + self._dispatch(t.value) + + def _Decorators(self, t): + """ Handle function decorators (eg. @has_units) + """ + for node in t.nodes: + self._dispatch(node) + + def _Dict(self, t): + self._write("{") + for i, (k, v) in enumerate(t.items): + self._dispatch(k) + self._write(": ") + self._dispatch(v) + if i < len(t.items)-1: + self._write(", ") + self._write("}") + + def _Discard(self, t): + """ Node for when return value is ignored such as in "foo(a)". + """ + self._fill() + self._dispatch(t.expr) + + def _Div(self, t): + self.__binary_op(t, '/') + + def _Ellipsis(self, t): + self._write("...") + + def _From(self, t): + """ Handle "from xyz import foo, bar as baz". + """ + # fixme: Are From and ImportFrom handled differently? + self._fill("from ") + self._write(t.modname) + self._write(" import ") + for i, (name,asname) in enumerate(t.names): + if i != 0: + self._write(", ") + self._write(name) + if asname is not None: + self._write(" as "+asname) + + def _Function(self, t): + """ Handle function definitions + """ + if t.decorators is not None: + self._fill("@") + self._dispatch(t.decorators) + self._fill("def "+t.name + "(") + defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults) + for i, arg in enumerate(zip(t.argnames, defaults)): + self._write(arg[0]) + if arg[1] is not None: + self._write('=') + self._dispatch(arg[1]) + if i < len(t.argnames)-1: + self._write(', ') + self._write(")") + if self._single_func: + self._do_indent = False + self._enter() + self._dispatch(t.code) + self._leave() + self._do_indent = True + + def _Getattr(self, t): + """ Handle getting an attribute of an object + """ + if isinstance(t.expr, (Div, Mul, Sub, Add)): + self._write('(') + self._dispatch(t.expr) + self._write(')') + else: + self._dispatch(t.expr) + + self._write('.'+t.attrname) + + def _If(self, t): + self._fill() + + for i, (compare,code) in enumerate(t.tests): + if i == 0: + self._write("if ") + else: + self._write("elif ") + self._dispatch(compare) + self._enter() + self._fill() + self._dispatch(code) + self._leave() + self._write("\n") + + if t.else_ is not None: + self._write("else") + self._enter() + self._fill() + self._dispatch(t.else_) + self._leave() + self._write("\n") + + def _IfExp(self, t): + self._dispatch(t.then) + self._write(" if ") + self._dispatch(t.test) + + if t.else_ is not None: + self._write(" else (") + self._dispatch(t.else_) + self._write(")") + + def _Import(self, t): + """ Handle "import xyz.foo". + """ + self._fill("import ") + + for i, (name,asname) in enumerate(t.names): + if i != 0: + self._write(", ") + self._write(name) + if asname is not None: + self._write(" as "+asname) + + def _Keyword(self, t): + """ Keyword value assignment within function calls and definitions. + """ + self._write(t.name) + self._write("=") + self._dispatch(t.expr) + + def _List(self, t): + self._write("[") + for i,node in enumerate(t.nodes): + self._dispatch(node) + if i < len(t.nodes)-1: + self._write(", ") + self._write("]") + + def _Module(self, t): + if t.doc is not None: + self._dispatch(t.doc) + self._dispatch(t.node) + + def _Mul(self, t): + self.__binary_op(t, '*') + + def _Name(self, t): + self._write(t.name) + + def _NoneType(self, t): + self._write("None") + + def _Not(self, t): + self._write('not (') + self._dispatch(t.expr) + self._write(')') + + def _Or(self, t): + self._write(" (") + for i, node in enumerate(t.nodes): + self._dispatch(node) + if i != len(t.nodes)-1: + self._write(") or (") + self._write(")") + + def _Pass(self, t): + self._write("pass\n") + + def _Printnl(self, t): + self._fill("print ") + if t.dest: + self._write(">> ") + self._dispatch(t.dest) + self._write(", ") + comma = False + for node in t.nodes: + if comma: self._write(', ') + else: comma = True + self._dispatch(node) + + def _Power(self, t): + self.__binary_op(t, '**') + + def _Return(self, t): + self._fill("return ") + if t.value: + if isinstance(t.value, Tuple): + text = ', '.join([ name.name for name in t.value.asList() ]) + self._write(text) + else: + self._dispatch(t.value) + if not self._do_indent: + self._write('; ') + + def _Slice(self, t): + self._dispatch(t.expr) + self._write("[") + if t.lower: + self._dispatch(t.lower) + self._write(":") + if t.upper: + self._dispatch(t.upper) + #if t.step: + # self._write(":") + # self._dispatch(t.step) + self._write("]") + + def _Sliceobj(self, t): + for i, node in enumerate(t.nodes): + if i != 0: + self._write(":") + if not (isinstance(node, Const) and node.value is None): + self._dispatch(node) + + def _Stmt(self, tree): + for node in tree.nodes: + self._dispatch(node) + + def _Sub(self, t): + self.__binary_op(t, '-') + + def _Subscript(self, t): + self._dispatch(t.expr) + self._write("[") + for i, value in enumerate(t.subs): + if i != 0: + self._write(",") + self._dispatch(value) + self._write("]") + + def _TryExcept(self, t): + self._fill("try") + self._enter() + self._dispatch(t.body) + self._leave() + + for handler in t.handlers: + self._fill('except ') + self._dispatch(handler[0]) + if handler[1] is not None: + self._write(', ') + self._dispatch(handler[1]) + self._enter() + self._dispatch(handler[2]) + self._leave() + + if t.else_: + self._fill("else") + self._enter() + self._dispatch(t.else_) + self._leave() + + def _Tuple(self, t): + + if not t.nodes: + # Empty tuple. + self._write("()") + else: + self._write("(") + + # _write each elements, separated by a comma. + for element in t.nodes[:-1]: + self._dispatch(element) + self._write(", ") + + # Handle the last one without writing comma + last_element = t.nodes[-1] + self._dispatch(last_element) + + self._write(")") + + def _UnaryAdd(self, t): + self._write("+") + self._dispatch(t.expr) + + def _UnarySub(self, t): + self._write("-") + self._dispatch(t.expr) + + def _With(self, t): + self._fill('with ') + self._dispatch(t.expr) + if t.vars: + self._write(' as ') + self._dispatch(t.vars.name) + self._enter() + self._dispatch(t.body) + self._leave() + self._write('\n') + + def _int(self, t): + self._write(repr(t)) + + def __binary_op(self, t, symbol): + # Check if parenthesis are needed on left side and then dispatch + has_paren = False + left_class = str(t.left.__class__) + if (left_class in op_precedence.keys() and + op_precedence[left_class] < op_precedence[str(t.__class__)]): + has_paren = True + if has_paren: + self._write('(') + self._dispatch(t.left) + if has_paren: + self._write(')') + # Write the appropriate symbol for operator + self._write(symbol) + # Check if parenthesis are needed on the right side and then dispatch + has_paren = False + right_class = str(t.right.__class__) + if (right_class in op_precedence.keys() and + op_precedence[right_class] < op_precedence[str(t.__class__)]): + has_paren = True + if has_paren: + self._write('(') + self._dispatch(t.right) + if has_paren: + self._write(')') + + def _float(self, t): + # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001' + # We prefer str here. + self._write(str(t)) + + def _str(self, t): + self._write(repr(t)) + + def _tuple(self, t): + self._write(str(t)) + + ######################################################################### + # These are the methods from the _ast modules unparse. + # + # As our needs to handle more advanced code increase, we may want to + # modify some of the methods below so that they work for compiler.ast. + ######################################################################### + +# # stmt +# def _Expr(self, tree): +# self._fill() +# self._dispatch(tree.value) +# +# def _Import(self, t): +# self._fill("import ") +# first = True +# for a in t.names: +# if first: +# first = False +# else: +# self._write(", ") +# self._write(a.name) +# if a.asname: +# self._write(" as "+a.asname) +# +## def _ImportFrom(self, t): +## self._fill("from ") +## self._write(t.module) +## self._write(" import ") +## for i, a in enumerate(t.names): +## if i == 0: +## self._write(", ") +## self._write(a.name) +## if a.asname: +## self._write(" as "+a.asname) +## # XXX(jpe) what is level for? +## +# +# def _Break(self, t): +# self._fill("break") +# +# def _Continue(self, t): +# self._fill("continue") +# +# def _Delete(self, t): +# self._fill("del ") +# self._dispatch(t.targets) +# +# def _Assert(self, t): +# self._fill("assert ") +# self._dispatch(t.test) +# if t.msg: +# self._write(", ") +# self._dispatch(t.msg) +# +# def _Exec(self, t): +# self._fill("exec ") +# self._dispatch(t.body) +# if t.globals: +# self._write(" in ") +# self._dispatch(t.globals) +# if t.locals: +# self._write(", ") +# self._dispatch(t.locals) +# +# def _Print(self, t): +# self._fill("print ") +# do_comma = False +# if t.dest: +# self._write(">>") +# self._dispatch(t.dest) +# do_comma = True +# for e in t.values: +# if do_comma:self._write(", ") +# else:do_comma=True +# self._dispatch(e) +# if not t.nl: +# self._write(",") +# +# def _Global(self, t): +# self._fill("global") +# for i, n in enumerate(t.names): +# if i != 0: +# self._write(",") +# self._write(" " + n) +# +# def _Yield(self, t): +# self._fill("yield") +# if t.value: +# self._write(" (") +# self._dispatch(t.value) +# self._write(")") +# +# def _Raise(self, t): +# self._fill('raise ') +# if t.type: +# self._dispatch(t.type) +# if t.inst: +# self._write(", ") +# self._dispatch(t.inst) +# if t.tback: +# self._write(", ") +# self._dispatch(t.tback) +# +# +# def _TryFinally(self, t): +# self._fill("try") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# self._fill("finally") +# self._enter() +# self._dispatch(t.finalbody) +# self._leave() +# +# def _excepthandler(self, t): +# self._fill("except ") +# if t.type: +# self._dispatch(t.type) +# if t.name: +# self._write(", ") +# self._dispatch(t.name) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _ClassDef(self, t): +# self._write("\n") +# self._fill("class "+t.name) +# if t.bases: +# self._write("(") +# for a in t.bases: +# self._dispatch(a) +# self._write(", ") +# self._write(")") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _FunctionDef(self, t): +# self._write("\n") +# for deco in t.decorators: +# self._fill("@") +# self._dispatch(deco) +# self._fill("def "+t.name + "(") +# self._dispatch(t.args) +# self._write(")") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _For(self, t): +# self._fill("for ") +# self._dispatch(t.target) +# self._write(" in ") +# self._dispatch(t.iter) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# if t.orelse: +# self._fill("else") +# self._enter() +# self._dispatch(t.orelse) +# self._leave +# +# def _While(self, t): +# self._fill("while ") +# self._dispatch(t.test) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# if t.orelse: +# self._fill("else") +# self._enter() +# self._dispatch(t.orelse) +# self._leave +# +# # expr +# def _Str(self, tree): +# self._write(repr(tree.s)) +## +# def _Repr(self, t): +# self._write("`") +# self._dispatch(t.value) +# self._write("`") +# +# def _Num(self, t): +# self._write(repr(t.n)) +# +# def _ListComp(self, t): +# self._write("[") +# self._dispatch(t.elt) +# for gen in t.generators: +# self._dispatch(gen) +# self._write("]") +# +# def _GeneratorExp(self, t): +# self._write("(") +# self._dispatch(t.elt) +# for gen in t.generators: +# self._dispatch(gen) +# self._write(")") +# +# def _comprehension(self, t): +# self._write(" for ") +# self._dispatch(t.target) +# self._write(" in ") +# self._dispatch(t.iter) +# for if_clause in t.ifs: +# self._write(" if ") +# self._dispatch(if_clause) +# +# def _IfExp(self, t): +# self._dispatch(t.body) +# self._write(" if ") +# self._dispatch(t.test) +# if t.orelse: +# self._write(" else ") +# self._dispatch(t.orelse) +# +# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"} +# def _UnaryOp(self, t): +# self._write(self.unop[t.op.__class__.__name__]) +# self._write("(") +# self._dispatch(t.operand) +# self._write(")") +# +# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%", +# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&", +# "FloorDiv":"//", "Pow": "**"} +# def _BinOp(self, t): +# self._write("(") +# self._dispatch(t.left) +# self._write(")" + self.binop[t.op.__class__.__name__] + "(") +# self._dispatch(t.right) +# self._write(")") +# +# boolops = {_ast.And: 'and', _ast.Or: 'or'} +# def _BoolOp(self, t): +# self._write("(") +# self._dispatch(t.values[0]) +# for v in t.values[1:]: +# self._write(" %s " % self.boolops[t.op.__class__]) +# self._dispatch(v) +# self._write(")") +# +# def _Attribute(self,t): +# self._dispatch(t.value) +# self._write(".") +# self._write(t.attr) +# +## def _Call(self, t): +## self._dispatch(t.func) +## self._write("(") +## comma = False +## for e in t.args: +## if comma: self._write(", ") +## else: comma = True +## self._dispatch(e) +## for e in t.keywords: +## if comma: self._write(", ") +## else: comma = True +## self._dispatch(e) +## if t.starargs: +## if comma: self._write(", ") +## else: comma = True +## self._write("*") +## self._dispatch(t.starargs) +## if t.kwargs: +## if comma: self._write(", ") +## else: comma = True +## self._write("**") +## self._dispatch(t.kwargs) +## self._write(")") +# +# # slice +# def _Index(self, t): +# self._dispatch(t.value) +# +# def _ExtSlice(self, t): +# for i, d in enumerate(t.dims): +# if i != 0: +# self._write(': ') +# self._dispatch(d) +# +# # others +# def _arguments(self, t): +# first = True +# nonDef = len(t.args)-len(t.defaults) +# for a in t.args[0:nonDef]: +# if first:first = False +# else: self._write(", ") +# self._dispatch(a) +# for a,d in zip(t.args[nonDef:], t.defaults): +# if first:first = False +# else: self._write(", ") +# self._dispatch(a), +# self._write("=") +# self._dispatch(d) +# if t.vararg: +# if first:first = False +# else: self._write(", ") +# self._write("*"+t.vararg) +# if t.kwarg: +# if first:first = False +# else: self._write(", ") +# self._write("**"+t.kwarg) +# +## def _keyword(self, t): +## self._write(t.arg) +## self._write("=") +## self._dispatch(t.value) +# +# def _Lambda(self, t): +# self._write("lambda ") +# self._dispatch(t.args) +# self._write(": ") +# self._dispatch(t.body) + + + Added: trunk/doc/sphinxext/docscrape.py =================================================================== --- trunk/doc/sphinxext/docscrape.py 2008-11-29 14:54:29 UTC (rev 6124) +++ trunk/doc/sphinxext/docscrape.py 2008-11-30 14:44:38 UTC (rev 6125) @@ -0,0 +1,492 @@ +"""Extract reference documentation from the NumPy source tree. + +""" + +import inspect +import textwrap +import re +import pydoc +from StringIO import StringIO +from warnings import warn +4 +class Reader(object): + """A line-based string reader. + + """ + def __init__(self, data): + """ + Parameters + ---------- + data : str + String with lines separated by '\n'. + + """ + if isinstance(data,list): + self._str = data + else: + self._str = data.split('\n') # store string as list of lines + + self.reset() + + def __getitem__(self, n): + return self._str[n] + + def reset(self): + self._l = 0 # current line nr + + def read(self): + if not self.eof(): + out = self[self._l] + self._l += 1 + return out + else: + return '' + + def seek_next_non_empty_line(self): + for l in self[self._l:]: + if l.strip(): + break + else: + self._l += 1 + + def eof(self): + return self._l >= len(self._str) + + def read_to_condition(self, condition_func): + start = self._l + for line in self[start:]: + if condition_func(line): + return self[start:self._l] + self._l += 1 + if self.eof(): + return self[start:self._l+1] + return [] + + def read_to_next_empty_line(self): + self.seek_next_non_empty_line() + def is_empty(line): + return not line.strip() + return self.read_to_condition(is_empty) + + def read_to_next_unindented_line(self): + def is_unindented(line): + return (line.strip() and (len(line.lstrip()) == len(line))) + return self.read_to_condition(is_unindented) + + def peek(self,n=0): + if self._l + n < len(self._str): + return self[self._l + n] + else: + return '' + + def is_empty(self): + return not ''.join(self._str).strip() + + +class NumpyDocString(object): + def __init__(self,docstring): + docstring = textwrap.dedent(docstring).split('\n') + + self._doc = Reader(docstring) + self._parsed_data = { + 'Signature': '', + 'Summary': [''], + 'Extended Summary': [], + 'Parameters': [], + 'Returns': [], + 'Raises': [], + 'Warns': [], + 'Other Parameters': [], + 'Attributes': [], + 'Methods': [], + 'See Also': [], + 'Notes': [], + 'Warnings': [], + 'References': '', + 'Examples': '', + 'index': {} + } + + self._parse() + + def __getitem__(self,key): + return self._parsed_data[key] + + def __setitem__(self,key,val): + if not self._parsed_data.has_key(key): + warn("Unknown section %s" % key) + else: + self._parsed_data[key] = val + + def _is_at_section(self): + self._doc.seek_next_non_empty_line() + + if self._doc.eof(): + return False + + l1 = self._doc.peek().strip() # e.g. Parameters + + if l1.startswith('.. index::'): + return True + + l2 = self._doc.peek(1).strip() # ---------- or ========== + return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) + + def _strip(self,doc): + i = 0 + j = 0 + for i,line in enumerate(doc): + if line.strip(): break + + for j,line in enumerate(doc[::-1]): + if line.strip(): break + + return doc[i:len(doc)-j] + + def _read_to_next_section(self): + section = self._doc.read_to_next_empty_line() + + while not self._is_at_section() and not self._doc.eof(): + if not self._doc.peek(-1).strip(): # previous line was empty + section += [''] + + section += self._doc.read_to_next_empty_line() + + return section + + def _read_sections(self): + while not self._doc.eof(): + data = self._read_to_next_section() + name = data[0].strip() + + if name.startswith('..'): # index section + yield name, data[1:] + elif len(data) < 2: + yield StopIteration + else: + yield name, self._strip(data[2:]) + + def _parse_param_list(self,content): + r = Reader(content) + params = [] + while not r.eof(): + header = r.read().strip() + if ' : ' in header: + arg_name, arg_type = header.split(' : ')[:2] + else: + arg_name, arg_type = header, '' + + desc = r.read_to_next_unindented_line() + desc = dedent_lines(desc) + + params.append((arg_name,arg_type,desc)) + + return params + + + _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" + r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) + def _parse_see_also(self, content): + """ + func_name : Descriptive text + continued text + another_func_name : Descriptive text + func_name1, func_name2, :meth:`func_name`, func_name3 + + """ + items = [] + + def parse_item_name(text): + """Match ':role:`name`' or 'name'""" + m = self._name_rgx.match(text) + if m: + g = m.groups() + if g[1] is None: + return g[3], None + else: + return g[2], g[1] + raise ValueError("%s is not a item name" % text) + + def push_item(name, rest): + if not name: + return + name, role = parse_item_name(name) + items.append((name, list(rest), role)) + del rest[:] + + current_func = None + rest = [] + + for line in content: + if not line.strip(): continue + + m = self._name_rgx.match(line) + if m and line[m.end():].strip().startswith(':'): + push_item(current_func, rest) + current_func, line = line[:m.end()], line[m.end():] + rest = [line.split(':', 1)[1].strip()] + if not rest[0]: + rest = [] + elif not line.startswith(' '): + push_item(current_func, rest) + current_func = None + if ',' in line: + for func in line.split(','): + push_item(func, []) + elif line.strip(): + current_func = line + elif current_func is not None: + rest.append(line.strip()) + push_item(current_func, rest) + return items + + def _parse_index(self, section, content): + """ + .. index: default + :refguide: something, else, and more + + """ + def strip_each_in(lst): + return [s.strip() for s in lst] + + out = {} + section = section.split('::') + if len(section) > 1: + out['default'] = strip_each_in(section[1].split(','))[0] + for line in content: + line = line.split(':') + if len(line) > 2: + out[line[1]] = strip_each_in(line[2].split(',')) + return out + + def _parse_summary(self): + """Grab signature (if given) and summary""" + if self._is_at_section(): + return + + summary = self._doc.read_to_next_empty_line() + summary_str = " ".join([s.strip() for s in summary]).strip() + if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): + self['Signature'] = summary_str + if not self._is_at_section(): + self['Summary'] = self._doc.read_to_next_empty_line() + else: + self['Summary'] = summary + + if not self._is_at_section(): + self['Extended Summary'] = self._read_to_next_section() + + def _parse(self): + self._doc.reset() + self._parse_summary() + + for (section,content) in self._read_sections(): + if not section.startswith('..'): + section = ' '.join([s.capitalize() for s in section.split(' ')]) + if section in ('Parameters', 'Attributes', 'Methods', + 'Returns', 'Raises', 'Warns'): + self[section] = self._parse_param_list(content) + elif section.startswith('.. index::'): + self['index'] = self._parse_index(section, content) + elif section == 'See Also': + self['See Also'] = self._parse_see_also(content) + else: + self[section] = content + + # string conversion routines + + def _str_header(self, name, symbol='-'): + return [name, len(name)*symbol] + + def _str_indent(self, doc, indent=4): + out = [] + for line in doc: + out += [' '*indent + line] + return out + + def _str_signature(self): + if self['Signature']: + return [self['Signature'].replace('*','\*')] + [''] + else: + return [''] + + def _str_summary(self): + if self['Summary']: + return self['Summary'] + [''] + else: + return [] + + def _str_extended_summary(self): + if self['Extended Summary']: + return self['Extended Summary'] + [''] + else: + return [] + + def _str_param_list(self, name): + out = [] + if self[name]: + out += self._str_header(name) + for param,param_type,desc in self[name]: + out += ['%s : %s' % (param, param_type)] + out += self._str_indent(desc) + out += [''] + return out + + def _str_section(self, name): + out = [] + if self[name]: + out += self._str_header(name) + out += self[name] + out += [''] + return out + + def _str_see_also(self, func_role): + if not self['See Also']: return [] + out = [] + out += self._str_header("See Also") + last_had_desc = True + for func, desc, role in self['See Also']: + if role: + link = ':%s:`%s`' % (role, func) + elif func_role: + link = ':%s:`%s`' % (func_role, func) + else: + link = "`%s`_" % func + if desc or last_had_desc: + out += [''] + out += [link] + else: + out[-1] += ", %s" % link + if desc: + out += self._str_indent([' '.join(desc)]) + last_had_desc = True + else: + last_had_desc = False + out += [''] + return out + + def _str_index(self): + idx = self['index'] + out = [] + out += ['.. index:: %s' % idx.get('default','')] + for section, references in idx.iteritems(): + if section == 'default': + continue + out += [' :%s: %s' % (section, ', '.join(references))] + return out + + def __str__(self, func_role=''): + out = [] + out += self._str_signature() + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters','Returns','Raises'): + out += self._str_param_list(param_list) + out += self._str_section('Warnings') + out += self._str_see_also(func_role) + for s in ('Notes','References','Examples'): + out += self._str_section(s) + out += self._str_index() + return '\n'.join(out) + + +def indent(str,indent=4): + indent_str = ' '*indent + if str is None: + return indent_str + lines = str.split('\n') + return '\n'.join(indent_str + l for l in lines) + +def dedent_lines(lines): + """Deindent a list of lines maximally""" + return textwrap.dedent("\n".join(lines)).split("\n") + +def header(text, style='-'): + return text + '\n' + style*len(text) + '\n' + + +class FunctionDoc(NumpyDocString): + def __init__(self, func, role='func'): + self._f = func + self._role = role # e.g. "func" or "meth" + try: + NumpyDocString.__init__(self,inspect.getdoc(func) or '') + except ValueError, e: + print '*'*78 + print "ERROR: '%s' while parsing `%s`" % (e, self._f) + print '*'*78 + #print "Docstring follows:" + #print doclines + #print '='*78 + + if not self['Signature']: + func, func_name = self.get_func() + try: + # try to read signature + argspec = inspect.getargspec(func) + argspec = inspect.formatargspec(*argspec) + argspec = argspec.replace('*','\*') + signature = '%s%s' % (func_name, argspec) + except TypeError, e: + signature = '%s()' % func_name + self['Signature'] = signature + + def get_func(self): + func_name = getattr(self._f, '__name__', self.__class__.__name__) + if inspect.isclass(self._f): + func = getattr(self._f, '__call__', self._f.__init__) + else: + func = self._f + return func, func_name + + def __str__(self): + out = '' + + func, func_name = self.get_func() + signature = self['Signature'].replace('*', '\*') + + roles = {'func': 'function', + 'meth': 'method'} + + if self._role: + if not roles.has_key(self._role): + print "Warning: invalid role %s" % self._role + out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), + func_name) + + out += super(FunctionDoc, self).__str__(func_role=self._role) + return out + + +class ClassDoc(NumpyDocString): + def __init__(self,cls,modulename='',func_doc=FunctionDoc): + if not inspect.isclass(cls): + raise ValueError("Initialise using a class. Got %r" % cls) + self._cls = cls + + if modulename and not modulename.endswith('.'): + modulename += '.' + self._mod = modulename + self._name = cls.__name__ + self._func_doc = func_doc + + NumpyDocString.__init__(self, pydoc.getdoc(cls)) + + @property + def methods(self): + return [name for name,func in inspect.getmembers(self._cls) + if not name.startswith('_') and callable(func)] + + def __str__(self): + out = '' + out += super(ClassDoc, self).__str__() + out += "\n\n" + + #for m in self.methods: + # print "Parsing `%s`" % m + # out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n' + # out += '.. index::\n single: %s; %s\n\n' % (self._name, m) + + return out + + Added: trunk/doc/sphinxext/docscrape_sphinx.py =================================================================== --- trunk/doc/sphinxext/docscrape_sphinx.py 2008-11-29 14:54:29 UTC (rev 6124) +++ trunk/doc/sphinxext/docscrape_sphinx.py 2008-11-30 14:44:38 UTC (rev 6125) @@ -0,0 +1,133 @@ +import re, inspect, textwrap, pydoc +from docscrape import NumpyDocString, FunctionDoc, ClassDoc + +class SphinxDocString(NumpyDocString): + # string conversion routines + def _str_header(self, name, symbol='`'): + return ['.. rubric:: ' + name, ''] + + def _str_field_list(self, name): + return [':' + name + ':'] + + def _str_indent(self, doc, indent=4): + out = [] + for line in doc: + out += [' '*indent + line] + return out + + def _str_signature(self): + return [''] + if self['Signature']: + return ['``%s``' % self['Signature']] + [''] + else: + return [''] + + def _str_summary(self): + return self['Summary'] + [''] + + def _str_extended_summary(self): + return self['Extended Summary'] + [''] + + def _str_param_list(self, name): + out = [] + if self[name]: + out += self._str_field_list(name) + out += [''] + for param,param_type,desc in self[name]: + out += self._str_indent(['**%s** : %s' % (param.strip(), + param_type)]) + out += [''] + out += self._str_indent(desc,8) + out += [''] + return out + + def _str_section(self, name): + out = [] + if self[name]: + out += self._str_header(name) + out += [''] + content = textwrap.dedent("\n".join(self[name])).split("\n") + out += content + out += [''] + return out + + def _str_see_also(self, func_role): + out = [] + if self['See Also']: + see_also = super(SphinxDocString, self)._str_see_also(func_role) + out = ['.. seealso::', ''] + out += self._str_indent(see_also[2:]) + return out + + def _str_warnings(self): + out = [] + if self['Warnings']: + out = ['.. warning::', ''] + out += self._str_indent(self['Warnings']) + return out + + def _str_index(self): + idx = self['index'] + out = [] + if len(idx) == 0: + return out + + out += ['.. index:: %s' % idx.get('default','')] + for section, references in idx.iteritems(): + if section == 'default': + continue + elif section == 'refguide': + out += [' single: %s' % (', '.join(references))] + else: + out += [' %s: %s' % (section, ','.join(references))] + return out + + def _str_references(self): + out = [] + if self['References']: + out += self._str_header('References') + if isinstance(self['References'], str): + self['References'] = [self['References']] + out.extend(self['References']) + out += [''] + return out + + def __str__(self, indent=0, func_role="obj"): + out = [] + out += self._str_signature() + out += self._str_index() + [''] + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters', 'Attributes', 'Methods', + 'Returns','Raises'): + out += self._str_param_list(param_list) + out += self._str_warnings() + out += self._str_see_also(func_role) + out += self._str_section('Notes') + out += self._str_references() + out += self._str_section('Examples') + out = self._str_indent(out,indent) + return '\n'.join(out) + +class SphinxFunctionDoc(SphinxDocString, FunctionDoc): + pass + +class SphinxClassDoc(SphinxDocString, ClassDoc): + pass + +def get_doc_object(obj, what=None): + if what is None: + if inspect.isclass(obj): + what = 'class' + elif inspect.ismodule(obj): + what = 'module' + elif callable(obj): + what = 'function' + else: + what = 'object' + if what == 'class': + return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc) + elif what in ('function', 'method'): + return SphinxFunctionDoc(obj, '') + else: + return SphinxDocString(pydoc.getdoc(obj)) Added: trunk/doc/sphinxext/numpydoc.py =================================================================== --- trunk/doc/sphinxext/numpydoc.py 2008-11-29 14:54:29 UTC (rev 6124) +++ trunk/doc/sphinxext/numpydoc.py 2008-11-30 14:44:38 UTC (rev 6125) @@ -0,0 +1,111 @@ +""" +======== +numpydoc +======== + +Sphinx extension that handles docstrings in the Numpy standard format. [1] + +It will: + +- Convert Parameters etc. sections to field lists. +- Convert See Also section to a See also entry. +- Renumber references. +- Extract the signature from the docstring, if it can't be determined otherwise. + +.. [1] http://projects.scipy.org/scipy/numpy/wiki/CodingStyleGuidelines#docstring-standard + +""" + +import os, re, pydoc +from docscrape_sphinx import get_doc_object, SphinxDocString +import inspect + +def mangle_docstrings(app, what, name, obj, options, lines, + reference_offset=[0]): + if what == 'module': + # Strip top title + title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', + re.I|re.S) + lines[:] = title_re.sub('', "\n".join(lines)).split("\n") + else: + doc = get_doc_object(obj, what) + lines[:] = str(doc).split("\n") + + if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ + obj.__name__: + v = dict(full_name=obj.__name__) + lines += [''] + (app.config.numpydoc_edit_link % v).split("\n") + + # replace reference numbers so that there are no duplicates + references = [] + for l in lines: + l = l.strip() + if l.startswith('.. ['): + try: + references.append(int(l[len('.. ['):l.index(']')])) + except ValueError: + print "WARNING: invalid reference in %s docstring" % name + + # Start renaming from the biggest number, otherwise we may + # overwrite references. + references.sort() + if references: + for i, line in enumerate(lines): + for r in references: + new_r = reference_offset[0] + r + lines[i] = lines[i].replace('[%d]_' % r, + '[%d]_' % new_r) + lines[i] = lines[i].replace('.. [%d]' % r, + '.. [%d]' % new_r) + + reference_offset[0] += len(references) + +def mangle_signature(app, what, name, obj, options, sig, retann): + # Do not try to inspect classes that don't define `__init__` + if (inspect.isclass(obj) and + 'initializes x; see ' in pydoc.getdoc(obj.__init__)): + return '', '' + + if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return + if not hasattr(obj, '__doc__'): return + + doc = SphinxDocString(pydoc.getdoc(obj)) + if doc['Signature']: + sig = re.sub("^[^(]*", "", doc['Signature']) + return sig, '' + +def initialize(app): + try: + app.connect('autodoc-process-signature', mangle_signature) + except: + monkeypatch_sphinx_ext_autodoc() + +def setup(app, get_doc_object_=get_doc_object): + global get_doc_object + get_doc_object = get_doc_object_ + + app.connect('autodoc-process-docstring', mangle_docstrings) + app.connect('builder-inited', initialize) + app.add_config_value('numpydoc_edit_link', None, True) + +#------------------------------------------------------------------------------ +# Monkeypatch sphinx.ext.autodoc to accept argspecless autodocs (Sphinx < 0.5) +#------------------------------------------------------------------------------ + +def monkeypatch_sphinx_ext_autodoc(): + global _original_format_signature + import sphinx.ext.autodoc + + if sphinx.ext.autodoc.format_signature is our_format_signature: + return + + print "[numpydoc] Monkeypatching sphinx.ext.autodoc ..." + _original_format_signature = sphinx.ext.autodoc.format_signature + sphinx.ext.autodoc.format_signature = our_format_signature + +def our_format_signature(what, obj): + r = mangle_signature(None, what, None, obj, None, None, None) + if r is not None: + return r[0] + else: + return _original_format_signature(what, obj) Added: trunk/doc/sphinxext/only_directives.py =================================================================== --- trunk/doc/sphinxext/only_directives.py 2008-11-29 14:54:29 UTC (rev 6124) +++ trunk/doc/sphinxext/only_directives.py 2008-11-30 14:44:38 UTC (rev 6125) @@ -0,0 +1,87 @@ +# +# A pair of directives for inserting content that will only appear in +# either html or latex. +# + +from docutils.nodes import Body, Element +from docutils.writers.html4css1 import HTMLTranslator +from sphinx.latexwriter import LaTeXTranslator +from docutils.parsers.rst import directives + +class html_only(Body, Element): + pass + +class latex_only(Body, Element): + pass + +def run(content, node_class, state, content_offset): + text = '\n'.join(content) + node = node_class(text) + state.nested_parse(content, content_offset, node) + return [node] + +try: + from docutils.parsers.rst import Directive +except ImportError: + from docutils.parsers.rst.directives import _directives + + def html_only_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + return run(content, html_only, state, content_offset) + + def latex_only_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + return run(content, latex_only, state, content_offset) + + for func in (html_only_directive, latex_only_directive): + func.content = 1 + func.options = {} + func.arguments = None + + _directives['htmlonly'] = html_only_directive + _directives['latexonly'] = latex_only_directive +else: + class OnlyDirective(Directive): + has_content = True + required_arguments = 0 + optional_arguments = 0 + final_argument_whitespace = True + option_spec = {} + + def run(self): + self.assert_has_content() + return run(self.content, self.node_class, + self.state, self.content_offset) + + class HtmlOnlyDirective(OnlyDirective): + node_class = html_only + + class LatexOnlyDirective(OnlyDirective): + node_class = latex_only + + directives.register_directive('htmlonly', HtmlOnlyDirective) + directives.register_directive('latexonly', LatexOnlyDirective) + +def setup(app): + app.add_node(html_only) + app.add_node(latex_only) + + # Add visit/depart methods to HTML-Translator: + def visit_perform(self, node): + pass + def depart_perform(self, node): + pass + def visit_ignore(self, node): + node.children = [] + def depart_ignore(self, node): + node.children = [] + + HTMLTranslator.visit_html_only = visit_perform + HTMLTranslator.depart_html_only = depart_perform + HTMLTranslator.visit_latex_only = visit_ignore + HTMLTranslator.depart_latex_only = depart_ignore + + LaTeXTranslator.visit_html_only = visit_ignore + LaTeXTranslator.depart_html_only = depart_ignore + LaTeXTranslator.visit_latex_only = visit_perform + LaTeXTranslator.depart_latex_only = depart_perform Added: trunk/doc/sphinxext/phantom_import.py =================================================================== --- trunk/doc/sphinxext/phantom_import.py 2008-11-29 14:54:29 UTC (rev 6124) +++ trunk/doc/sphinxext/phantom_import.py 2008-11-30 14:44:38 UTC (rev 6125) @@ -0,0 +1,162 @@ +""" +============== +phantom_import +============== + +Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar +extensions to use docstrings loaded from an XML file. + +This extension loads an XML file in the Pydocweb format [1] and +creates a dummy module that contains the specified docstrings. This +can be used to get the current docstrings from a Pydocweb instance +without needing to rebuild the documented module. + +.. [1] http://code.google.com/p/pydocweb + +""" +import imp, sys, compiler, types, os, inspect, re + +def setup(app): + app.connect('builder-inited', initialize) + app.add_config_value('phantom_import_file', None, True) + +def initialize(app): + fn = app.config.phantom_import_file + if (fn and os.path.isfile(fn)): + print "[numpydoc] Phantom importing modules from", fn, "..." + import_phantom_module(fn) + +#------------------------------------------------------------------------------ +# Creating 'phantom' modules from an XML description +#------------------------------------------------------------------------------ +def import_phantom_module(xml_file): + """ + Insert a fake Python module to sys.modules, based on a XML file. + + The XML file is expected to conform to Pydocweb DTD. The fake + module will contain dummy objects, which guarantee the following: + + - Docstrings are correct. + - Class inheritance relationships are correct (if present in XML). + - Function argspec is *NOT* correct (even if present in XML). + Instead, the function signature is prepended to the function docstring. + - Class attributes are *NOT* correct; instead, they are dummy objects. + + Parameters + ---------- + xml_file : str + Name of an XML file to read + + """ + import lxml.etree as etree + + object_cache = {} + + tree = etree.parse(xml_file) + root = tree.getroot() + + # Sort items so that + # - Base classes come before classes inherited from them + # - Modules come before their contents + all_nodes = dict([(n.attrib['id'], n) for n in root]) + + def _get_bases(node, recurse=False): + bases = [x.attrib['ref'] for x in node.findall('base')] + if recurse: + j = 0 + while True: + try: + b = bases[j] + except IndexError: break + if b in all_nodes: + bases.extend(_get_bases(all_nodes[b])) + j += 1 + return bases + + type_index = ['module', 'class', 'callable', 'object'] + + def base_cmp(a, b): + x = cmp(type_index.index(a.tag), type_index.index(b.tag)) + if x != 0: return x + + if a.tag == 'class' and b.tag == 'class': + a_bases = _get_bases(a, recurse=True) + b_bases = _get_bases(b, recurse=True) + x = cmp(len(a_bases), len(b_bases)) + if x != 0: return x + if a.attrib['id'] in b_bases: return -1 + if b.attrib['id'] in a_bases: return 1 + + return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.')) + + nodes = root.getchildren() + nodes.sort(base_cmp) + + # Create phantom items + for node in nodes: + name = node.attrib['id'] + doc = (node.text or '').decode('string-escape') + "\n" + if doc == "\n": doc = "" + + # create parent, if missing + parent = name + while True: + parent = '.'.join(parent.split('.')[:-1]) + if not parent: break + if parent in object_cache: break + obj = imp.new_module(parent) + object_cache[parent] = obj + sys.modules[parent] = obj + + # create object + if node.tag == 'module': + obj = imp.new_module(name) + obj.__doc__ = doc + sys.modules[name] = obj + elif node.tag == 'class': + bases = [object_cache[b] for b in _get_bases(node) + if b in object_cache] + bases.append(object) + init = lambda self: None + init.__doc__ = doc + obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init}) + obj.__name__ = name.split('.')[-1] + elif node.tag == 'callable': + funcname = node.attrib['id'].split('.')[-1] + argspec = node.attrib.get('argspec') + if argspec: + argspec = re.sub('^[^(]*', '', argspec) + doc = "%s%s\n\n%s" % (funcname, argspec, doc) + obj = lambda: 0 + obj.__argspec_is_invalid_ = True + obj.func_name = funcname + obj.__name__ = name + obj.__doc__ = doc + if inspect.isclass(object_cache[parent]): + obj.__objclass__ = object_cache[parent] + else: + class Dummy(object): pass + obj = Dummy() + obj.__name__ = name + obj.__doc__ = doc + if inspect.isclass(object_cache[parent]): + obj.__get__ = lambda: None + object_cache[name] = obj + + if parent: + if inspect.ismodule(object_cache[parent]): + obj.__module__ = parent + setattr(object_cache[parent], name.split('.')[-1], obj) + + # Populate items + for node in root: + obj = object_cache.get(node.attrib['id']) + if obj is None: continue + for ref in node.findall('ref'): + if node.tag == 'class': + if ref.attrib['ref'].startswith(node.attrib['id'] + '.'): + setattr(obj, ref.attrib['name'], + object_cache.get(ref.attrib['ref'])) + else: + setattr(obj, ref.attrib['name'], + object_cache.get(ref.attrib['ref'])) Added: trunk/doc/sphinxext/plot_directive.py =================================================================== --- trunk/doc/sphinxext/plot_directive.py 2008-11-29 14:54:29 UTC (rev 6124) +++ trunk/doc/sphinxext/plot_directive.py 2008-11-30 14:44:38 UTC (rev 6125) @@ -0,0 +1,295 @@ +# plot_directive.py from matplotlib.sf.net +"""A special directive for including a matplotlib plot. + +Given a path to a .py file, it includes the source code inline, then: + +- On HTML, will include a .png with a link to a high-res .png. + +- On LaTeX, will include a .pdf + +This directive supports all of the options of the `image` directive, +except for `target` (since plot will add its own target). + +Additionally, if the :include-source: option is provided, the literal +source will be included inline, as well as a link to the source. + +.. warning:: + + This is a hacked version of plot_directive.py from Matplotlib. + It's very much subject to change! + +""" + +import sys, os, glob, shutil, imp, warnings, cStringIO, re +from docutils.parsers.rst import directives +try: + # docutils 0.4 + from docutils.parsers.rst.directives.images import align +except ImportError: + # docutils 0.5 + from docutils.parsers.rst.directives.images import Image + align = Image.align + +import matplotlib +import matplotlib.cbook as cbook +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import matplotlib.image as image +from matplotlib import _pylab_helpers + +def runfile(fullpath, is_doctest=False): + # Change the working directory to the directory of the example, so + # it can get at its data files, if any. + pwd = os.getcwd() + path, fname = os.path.split(fullpath) + os.chdir(path) + stdout = sys.stdout + sys.stdout = cStringIO.StringIO() + try: + code = "" + if is_doctest: + fd = cStringIO.StringIO() + for line in open(fname): + m = re.match(r'^\s*(>>>|...) (.*)$', line) + if m: + code += m.group(2) + "\n" + else: + code = open(fname).read() + + ns = {} + exec setup.config.plot_pre_code in ns + exec code in ns + finally: + os.chdir(pwd) + sys.stdout = stdout + return ns + +options = {'alt': directives.unchanged, + 'height': directives.length_or_unitless, + 'width': directives.length_or_percentage_or_unitless, + 'scale': directives.nonnegative_int, + 'align': align, + 'class': directives.class_option, + 'include-source': directives.flag, + 'doctest-format': directives.flag + } + +template = """ +.. htmlonly:: + + [`source code <%(linkdir)s/%(sourcename)s>`__, + `png <%(linkdir)s/%(outname)s.hires.png>`__, + `pdf <%(linkdir)s/%(outname)s.pdf>`__] + + .. image:: %(linkdir)s/%(outname)s.png +%(options)s + +.. latexonly:: + .. image:: %(linkdir)s/%(outname)s.pdf +%(options)s + +""" + +exception_template = """ +.. htmlonly:: + + [`source code <%(linkdir)s/%(sourcename)s>`__] + +Exception occurred rendering plot. + +""" + + +def out_of_date(original, derived): + """ + Returns True if derivative is out-of-date wrt original, + both of which are full file paths. + """ + return (not os.path.exists(derived) + or os.stat(derived).st_mtime < os.stat(original).st_mtime) + +def makefig(fullpath, outdir, is_doctest=False): + """ + run a pyplot script and save the low and high res PNGs and a PDF in _static + + """ + + fullpath = str(fullpath) # todo, why is unicode breaking this + + print ' makefig: fullpath=%s, outdir=%s'%( fullpath, outdir) + formats = [('png', 80), + ('hires.png', 200), + ('pdf', 50), + ] + + basedir, fname = os.path.split(fullpath) + basename, ext = os.path.splitext(fname) + if ext != '.py': + basename = fname + sourcename = fname + all_exists = True + + if basedir != outdir: + shutil.copyfile(fullpath, os.path.join(outdir, fname)) + + # Look for single-figure output files first + for format, dpi in formats: + outname = os.path.join(outdir, '%s.%s' % (basename, format)) + if out_of_date(fullpath, outname): + all_exists = False + break + + if all_exists: + print ' already have %s'%fullpath + return 1 + + # Then look for multi-figure output files, assuming + # if we have some we have all... + i = 0 + while True: + all_exists = True + for format, dpi in formats: + outname = os.path.join(outdir, '%s_%02d.%s' % (basename, i, format)) + if out_of_date(fullpath, outname): + all_exists = False + break + if all_exists: + i += 1 + else: + break + + if i != 0: + print ' already have %d figures for %s' % (i, fullpath) + return i + + # We didn't find the files, so build them + + print ' building %s'%fullpath + plt.close('all') # we need to clear between runs + matplotlib.rcdefaults() + # Set a figure size that doesn't overflow typical browser windows + matplotlib.rcParams['figure.figsize'] = (5.5, 4.5) + + try: + runfile(fullpath, is_doctest=is_doctest) + except: + s = cbook.exception_to_str("Exception running plot %s" % fullpath) + warnings.warn(s) + return 0 + + fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() + for i, figman in enumerate(fig_managers): + for format, dpi in formats: + if len(fig_managers) == 1: + outname = basename + else: + outname = "%s_%02d" % (basename, i) + outpath = os.path.join(outdir, '%s.%s' % (outname, format)) + try: + figman.canvas.figure.savefig(outpath, dpi=dpi) + except: + s = cbook.exception_to_str("Exception running plot %s" % fullpath) + warnings.warn(s) + return 0 + + return len(fig_managers) + +def run(arguments, options, state_machine, lineno): + reference = directives.uri(arguments[0]) + basedir, fname = os.path.split(reference) + basename, ext = os.path.splitext(fname) + if ext != '.py': + basename = fname + sourcename = fname + #print 'plotdir', reference, basename, ext + + # get the directory of the rst file + rstdir, rstfile = os.path.split(state_machine.document.attributes['source']) + reldir = rstdir[len(setup.confdir)+1:] + relparts = [p for p in os.path.split(reldir) if p.strip()] + nparts = len(relparts) + #print ' rstdir=%s, reldir=%s, relparts=%s, nparts=%d'%(rstdir, reldir, relparts, nparts) + #print 'RUN', rstdir, reldir + outdir = os.path.join(setup.confdir, setup.config.plot_output_dir, basedir) + if not os.path.exists(outdir): + cbook.mkdirs(outdir) + + linkdir = ('../' * nparts) + setup.config.plot_output_dir.replace(os.path.sep, '/') + '/' + basedir + #linkdir = os.path.join('..', outdir) + num_figs = makefig(reference, outdir, + is_doctest=('doctest-format' in options)) + #print ' reference="%s", basedir="%s", linkdir="%s", outdir="%s"'%(reference, basedir, linkdir, outdir) + + if options.has_key('include-source'): + contents = open(reference, 'r').read() + if 'doctest-format' in options: + lines = [''] + else: + lines = ['.. code-block:: python', ''] + lines += [' %s'%row.rstrip() for row in contents.split('\n')] + del options['include-source'] + else: + lines = [] + + if 'doctest-format' in options: + del options['doctest-format'] + + if num_figs > 0: + options = [' :%s: %s' % (key, val) for key, val in + options.items()] + options = "\n".join(options) + + for i in range(num_figs): + if num_figs == 1: + outname = basename + else: + outname = "%s_%02d" % (basename, i) + lines.extend((template % locals()).split('\n')) + else: + lines.extend((exception_template % locals()).split('\n')) + + if len(lines): + state_machine.insert_input( + lines, state_machine.input_lines.source(0)) + return [] + + + +try: + from docutils.parsers.rst import Directive +except ImportError: + from docutils.parsers.rst.directives import _directives + + def plot_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + return run(arguments, options, state_machine, lineno) + plot_directive.__doc__ = __doc__ + plot_directive.arguments = (1, 0, 1) + plot_directive.options = options + + _directives['plot'] = plot_directive +else: + class plot_directive(Directive): + required_arguments = 1 + optional_arguments = 0 + final_argument_whitespace = True + option_spec = options + def run(self): + return run(self.arguments, self.options, + self.state_machine, self.lineno) + plot_directive.__doc__ = __doc__ + + directives.register_directive('plot', plot_directive) + +def setup(app): + setup.app = app + setup.config = app.config + setup.confdir = app.confdir + + app.add_config_value('plot_output_dir', '_static', True) + app.add_config_value('plot_pre_code', '', True) + +plot_directive.__doc__ = __doc__ + +directives.register_directive('plot', plot_directive) + Added: trunk/doc/sphinxext/tests/test_docscrape.py =================================================================== --- trunk/doc/sphinxext/tests/test_docscrape.py 2008-11-29 14:54:29 UTC (rev 6124) +++ trunk/doc/sphinxext/tests/test_docscrape.py 2008-11-30 14:44:38 UTC (rev 6125) @@ -0,0 +1,490 @@ +# -*- encoding:utf-8 -*- + +import sys, os +sys.path.append(os.path.join(os.path.dirname(__file__), '..')) + +from docscrape import NumpyDocString, FunctionDoc +from docscrape_sphinx import SphinxDocString +from nose.tools import * + +doc_txt = '''\ + numpy.multivariate_normal(mean, cov, shape=None) + + Draw values from a multivariate normal distribution with specified + mean and covariance. + + The multivariate normal or Gaussian distribution is a generalisation + of the one-dimensional normal distribution to higher dimensions. + + Parameters + ---------- + mean : (N,) ndarray + Mean of the N-dimensional distribution. + + .. math:: + + (1+2+3)/3 + + cov : (N,N) ndarray + Covariance matrix of the distribution. + shape : tuple of ints + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. Because + each sample is N-dimensional, the output shape is (m,n,k,N). + + Returns + ------- + out : ndarray + The drawn samples, arranged according to `shape`. If the + shape given is (m,n,...), then the shape of `out` is is + (m,n,...,N). + + In other words, each entry ``out[i,j,...,:]`` is an N-dimensional + value drawn from the distribution. + + Warnings + -------- + Certain warnings apply. + + Notes + ----- + + Instead of specifying the full covariance matrix, popular + approximations include: + + - Spherical covariance (`cov` is a multiple of the identity matrix) + - Diagonal covariance (`cov` has non-negative elements only on the diagonal) + + This geometrical property can be seen in two dimensions by plotting + generated data-points: + + >>> mean = [0,0] + >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis + + >>> x,y = multivariate_normal(mean,cov,5000).T + >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() + + Note that the covariance matrix must be symmetric and non-negative + definite. + + References + ---------- + .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic + Processes," 3rd ed., McGraw-Hill Companies, 1991 + .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," + 2nd ed., Wiley, 2001. + + See Also + -------- + some, other, funcs + otherfunc : relationship + + Examples + -------- + >>> mean = (1,2) + >>> cov = [[1,0],[1,0]] + >>> x = multivariate_normal(mean,cov,(3,3)) + >>> print x.shape + (3, 3, 2) + + The following is probably true, given that 0.6 is roughly twice the + standard deviation: + + >>> print list( (x[0,0,:] - mean) < 0.6 ) + [True, True] + + .. index:: random + :refguide: random;distributions, random;gauss + + ''' +doc = NumpyDocString(doc_txt) + + +def test_signature(): + assert doc['Signature'].startswith('numpy.multivariate_normal(') + assert doc['Signature'].endswith('shape=None)') + +def test_summary(): + assert doc['Summary'][0].startswith('Draw values') + assert doc['Summary'][-1].endswith('covariance.') + +def test_extended_summary(): + assert doc['Extended Summary'][0].startswith('The multivariate normal') + +def test_parameters(): + assert_equal(len(doc['Parameters']), 3) + assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape']) + + arg, arg_type, desc = doc['Parameters'][1] + assert_equal(arg_type, '(N,N) ndarray') + assert desc[0].startswith('Covariance matrix') + assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3' + +def test_returns(): + assert_equal(len(doc['Returns']), 1) + arg, arg_type, desc = doc['Returns'][0] + assert_equal(arg, 'out') + assert_equal(arg_type, 'ndarray') + assert desc[0].startswith('The drawn samples') + assert desc[-1].endswith('distribution.') + +def test_notes(): + assert doc['Notes'][0].startswith('Instead') + assert doc['Notes'][-1].endswith('definite.') + assert_equal(len(doc['Notes']), 17) + +def test_references(): + assert doc['References'][0].startswith('..') + assert doc['References'][-1].endswith('2001.') + +def test_examples(): + assert doc['Examples'][0].startswith('>>>') + assert doc['Examples'][-1].endswith('True]') + +def test_index(): + assert_equal(doc['index']['default'], 'random') + print doc['index'] + assert_equal(len(doc['index']), 2) + assert_equal(len(doc['index']['refguide']), 2) + +def non_blank_line_by_line_compare(a,b): + a = [l for l in a.split('\n') if l.strip()] + b = [l for l in b.split('\n') if l.strip()] + for n,line in enumerate(a): + if not line == b[n]: + raise AssertionError("Lines %s of a and b differ: " + "\n>>> %s\n<<< %s\n" % + (n,line,b[n])) +def test_str(): + non_blank_line_by_line_compare(str(doc), +"""numpy.multivariate_normal(mean, cov, shape=None) + +Draw values from a multivariate normal distribution with specified +mean and covariance. + +The multivariate normal or Gaussian distribution is a generalisation +of the one-dimensional normal distribution to higher dimensions. + +Parameters +---------- +mean : (N,) ndarray + Mean of the N-dimensional distribution. + + .. math:: + + (1+2+3)/3 + +cov : (N,N) ndarray + Covariance matrix of the distribution. +shape : tuple of ints + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. Because + each sample is N-dimensional, the output shape is (m,n,k,N). + +Returns +------- +out : ndarray + The drawn samples, arranged according to `shape`. If the + shape given is (m,n,...), then the shape of `out` is is + (m,n,...,N). + + In other words, each entry ``out[i,j,...,:]`` is an N-dimensional + value drawn from the distribution. + +Warnings +-------- +Certain warnings apply. + +See Also +-------- +`some`_, `other`_, `funcs`_ + +`otherfunc`_ + relationship + +Notes +----- +Instead of specifying the full covariance matrix, popular +approximations include: + + - Spherical covariance (`cov` is a multiple of the identity matrix) + - Diagonal covariance (`cov` has non-negative elements only on the diagonal) + +This geometrical property can be seen in two dimensions by plotting +generated data-points: + +>>> mean = [0,0] +>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis + +>>> x,y = multivariate_normal(mean,cov,5000).T +>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() + +Note that the covariance matrix must be symmetric and non-negative +definite. + +References +---------- +.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic + Processes," 3rd ed., McGraw-Hill Companies, 1991 +.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," + 2nd ed., Wiley, 2001. + +Examples +-------- +>>> mean = (1,2) +>>> cov = [[1,0],[1,0]] +>>> x = multivariate_normal(mean,cov,(3,3)) +>>> print x.shape +(3, 3, 2) + +The following is probably true, given that 0.6 is roughly twice the +standard deviation: + +>>> print list( (x[0,0,:] - mean) < 0.6 ) +[True, True] + +.. index:: random + :refguide: random;distributions, random;gauss""") + + +def test_sphinx_str(): + sphinx_doc = SphinxDocString(doc_txt) + non_blank_line_by_line_compare(str(sphinx_doc), +""" +.. index:: random + single: random;distributions, random;gauss + +Draw values from a multivariate normal distribution with specified +mean and covariance. + +The multivariate normal or Gaussian distribution is a generalisation +of the one-dimensional normal distribution to higher dimensions. + +:Parameters: + + **mean** : (N,) ndarray + + Mean of the N-dimensional distribution. + + .. math:: + + (1+2+3)/3 + + **cov** : (N,N) ndarray + + Covariance matrix of the distribution. + + **shape** : tuple of ints + + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. Because + each sample is N-dimensional, the output shape is (m,n,k,N). + +:Returns: + + **out** : ndarray + + The drawn samples, arranged according to `shape`. If the + shape given is (m,n,...), then the shape of `out` is is + (m,n,...,N). + + In other words, each entry ``out[i,j,...,:]`` is an N-dimensional + value drawn from the distribution. + +.. warning:: + + Certain warnings apply. + +.. seealso:: + + :obj:`some`, :obj:`other`, :obj:`funcs` + + :obj:`otherfunc` + relationship + +.. rubric:: Notes + +Instead of specifying the full covariance matrix, popular +approximations include: + + - Spherical covariance (`cov` is a multiple of the identity matrix) + - Diagonal covariance (`cov` has non-negative elements only on the diagonal) + +This geometrical property can be seen in two dimensions by plotting +generated data-points: + +>>> mean = [0,0] +>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis + +>>> x,y = multivariate_normal(mean,cov,5000).T +>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() + +Note that the covariance matrix must be symmetric and non-negative +definite. + +.. rubric:: References + +.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic + Processes," 3rd ed., McGraw-Hill Companies, 1991 +.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," + 2nd ed., Wiley, 2001. + +.. rubric:: Examples + +>>> mean = (1,2) +>>> cov = [[1,0],[1,0]] +>>> x = multivariate_normal(mean,cov,(3,3)) +>>> print x.shape +(3, 3, 2) + +The following is probably true, given that 0.6 is roughly twice the +standard deviation: + +>>> print list( (x[0,0,:] - mean) < 0.6 ) +[True, True] +""") + + +doc2 = NumpyDocString(""" + Returns array of indices of the maximum values of along the given axis. + + Parameters + ---------- + a : {array_like} + Array to look in. + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis""") + +def test_parameters_without_extended_description(): + assert_equal(len(doc2['Parameters']), 2) + +doc3 = NumpyDocString(""" + my_signature(*params, **kwds) + + Return this and that. + """) + +def test_escape_stars(): + signature = str(doc3).split('\n')[0] + assert_equal(signature, 'my_signature(\*params, \*\*kwds)') + +doc4 = NumpyDocString( + """a.conj() + + Return an array with all complex-valued elements conjugated.""") + +def test_empty_extended_summary(): + assert_equal(doc4['Extended Summary'], []) + +doc5 = NumpyDocString( + """ + a.something() + + Raises + ------ + LinAlgException + If array is singular. + + """) + +def test_raises(): + assert_equal(len(doc5['Raises']), 1) + name,_,desc = doc5['Raises'][0] + assert_equal(name,'LinAlgException') + assert_equal(desc,['If array is singular.']) + +def test_see_also(): + doc6 = NumpyDocString( + """ + z(x,theta) + + See Also + -------- + func_a, func_b, func_c + func_d : some equivalent func + foo.func_e : some other func over + multiple lines + func_f, func_g, :meth:`func_h`, func_j, + func_k + :obj:`baz.obj_q` + :class:`class_j`: fubar + foobar + """) + + assert len(doc6['See Also']) == 12 + for func, desc, role in doc6['See Also']: + if func in ('func_a', 'func_b', 'func_c', 'func_f', + 'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'): + assert(not desc) + else: + assert(desc) + + if func == 'func_h': + assert role == 'meth' + elif func == 'baz.obj_q': + assert role == 'obj' + elif func == 'class_j': + assert role == 'class' + else: + assert role is None + + if func == 'func_d': + assert desc == ['some equivalent func'] + elif func == 'foo.func_e': + assert desc == ['some other func over', 'multiple lines'] + elif func == 'class_j': + assert desc == ['fubar', 'foobar'] + +def test_see_also_print(): + class Dummy(object): + """ + See Also + -------- + func_a, func_b + func_c : some relationship + goes here + func_d + """ + pass + + obj = Dummy() + s = str(FunctionDoc(obj, role='func')) + assert(':func:`func_a`, :func:`func_b`' in s) + assert(' some relationship' in s) + assert(':func:`func_d`' in s) + +doc7 = NumpyDocString(""" + + Doc starts on second line. + + """) + +def test_empty_first_line(): + assert doc7['Summary'][0].startswith('Doc starts') + + +def test_no_summary(): + str(SphinxDocString(""" + Parameters + ----------""")) + + +def test_unicode(): + doc = SphinxDocString(""" + ????????????? + + ???????????? + + Parameters + ---------- + ??? : ??? + ??? + + Returns + ------- + ??? : ??? + ??? + + """) + assert doc['Summary'][0] == u'?????????????'.encode('utf-8') Added: trunk/doc/sphinxext/traitsdoc.py =================================================================== --- trunk/doc/sphinxext/traitsdoc.py 2008-11-29 14:54:29 UTC (rev 6124) +++ trunk/doc/sphinxext/traitsdoc.py 2008-11-30 14:44:38 UTC (rev 6125) @@ -0,0 +1,140 @@ +""" +========= +traitsdoc +========= + +Sphinx extension that handles docstrings in the Numpy standard format, [1] +and support Traits [2]. + +This extension can be used as a replacement for ``numpydoc`` when support +for Traits is required. + +.. [1] http://projects.scipy.org/scipy/numpy/wiki/CodingStyleGuidelines#docstring-standard +.. [2] http://code.enthought.com/projects/traits/ + +""" + +import inspect +import os +import pydoc + +import docscrape +import docscrape_sphinx +from docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString + +import numpydoc + +import comment_eater + +class SphinxTraitsDoc(SphinxClassDoc): + def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc): + if not inspect.isclass(cls): + raise ValueError("Initialise using a class. Got %r" % cls) + self._cls = cls + + if modulename and not modulename.endswith('.'): + modulename += '.' + self._mod = modulename + self._name = cls.__name__ + self._func_doc = func_doc + + docstring = pydoc.getdoc(cls) + docstring = docstring.split('\n') + + # De-indent paragraph + try: + indent = min(len(s) - len(s.lstrip()) for s in docstring + if s.strip()) + except ValueError: + indent = 0 + + for n,line in enumerate(docstring): + docstring[n] = docstring[n][indent:] + + self._doc = docscrape.Reader(docstring) + self._parsed_data = { + 'Signature': '', + 'Summary': '', + 'Description': [], + 'Extended Summary': [], + 'Parameters': [], + 'Returns': [], + 'Raises': [], + 'Warns': [], + 'Other Parameters': [], + 'Traits': [], + 'Methods': [], + 'See Also': [], + 'Notes': [], + 'References': '', + 'Example': '', + 'Examples': '', + 'index': {} + } + + self._parse() + + def _str_summary(self): + return self['Summary'] + [''] + + def _str_extended_summary(self): + return self['Description'] + self['Extended Summary'] + [''] + + def __str__(self, indent=0, func_role="func"): + out = [] + out += self._str_signature() + out += self._str_index() + [''] + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters', 'Traits', 'Methods', + 'Returns','Raises'): + out += self._str_param_list(param_list) + out += self._str_see_also("obj") + out += self._str_section('Notes') + out += self._str_references() + out += self._str_section('Example') + out += self._str_section('Examples') + out = self._str_indent(out,indent) + return '\n'.join(out) + +def looks_like_issubclass(obj, classname): + """ Return True if the object has a class or superclass with the given class + name. + + Ignores old-style classes. + """ + t = obj + if t.__name__ == classname: + return True + for klass in t.__mro__: + if klass.__name__ == classname: + return True + return False + +def get_doc_object(obj, what=None): + if what is None: + if inspect.isclass(obj): + what = 'class' + elif inspect.ismodule(obj): + what = 'module' + elif callable(obj): + what = 'function' + else: + what = 'object' + if what == 'class': + doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc) + if looks_like_issubclass(obj, 'HasTraits'): + for name, trait, comment in comment_eater.get_class_traits(obj): + # Exclude private traits. + if not name.startswith('_'): + doc['Traits'].append((name, trait, comment.splitlines())) + return doc + elif what in ('function', 'method'): + return SphinxFunctionDoc(obj, '') + else: + return SphinxDocString(pydoc.getdoc(obj)) + +def setup(app): + # init numpydoc + numpydoc.setup(app, get_doc_object) + From numpy-svn at scipy.org Sun Nov 30 10:08:57 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 30 Nov 2008 09:08:57 -0600 (CST) Subject: [Numpy-svn] r6126 - trunk/numpy/core/code_generators Message-ID: <20081130150857.EA1EA39C30B@scipy.org> Author: ptvirtan Date: 2008-11-30 09:08:38 -0600 (Sun, 30 Nov 2008) New Revision: 6126 Added: trunk/numpy/core/code_generators/ufunc_docstrings.py Removed: trunk/numpy/core/code_generators/docstrings.py Modified: trunk/numpy/core/code_generators/generate_umath.py Log: Rename core/.../docstrings.py to ufunc_docstrings.py Deleted: trunk/numpy/core/code_generators/docstrings.py =================================================================== --- trunk/numpy/core/code_generators/docstrings.py 2008-11-30 14:44:38 UTC (rev 6125) +++ trunk/numpy/core/code_generators/docstrings.py 2008-11-30 15:08:38 UTC (rev 6126) @@ -1,2718 +0,0 @@ -# Docstrings for generated ufuncs - -docdict = {} - -def get(name): - return docdict.get(name) - -def add_newdoc(place, name, doc): - docdict['.'.join((place, name))] = doc - - -add_newdoc('numpy.core.umath', 'absolute', - """ - Calculate the absolute value element-wise. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - res : ndarray - An ndarray containing the absolute value of - each element in `x`. For complex input, ``a + ib``, the - absolute value is :math:`\\sqrt{ a^2 + b^2 }`. - - Examples - -------- - >>> x = np.array([-1.2, 1.2]) - >>> np.absolute(x) - array([ 1.2, 1.2]) - >>> np.absolute(1.2 + 1j) - 1.5620499351813308 - - Plot the function over ``[-10, 10]``: - - >>> import matplotlib.pyplot as plt - - >>> x = np.linspace(-10, 10, 101) - >>> plt.plot(x, np.absolute(x)) - >>> plt.show() - - Plot the function over the complex plane: - - >>> xx = x + 1j * x[:, np.newaxis] - >>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10]) - >>> plt.show() - - """) - -add_newdoc('numpy.core.umath', 'add', - """ - Add arguments element-wise. - - Parameters - ---------- - x1, x2 : array_like - The arrays to be added. - - Returns - ------- - y : {ndarray, scalar} - The sum of `x1` and `x2`, element-wise. Returns scalar if - both `x1` and `x2` are scalars. - - Notes - ----- - Equivalent to `x1` + `x2` in terms of array broadcasting. - - Examples - -------- - >>> np.add(1.0, 4.0) - 5.0 - >>> x1 = np.arange(9.0).reshape((3, 3)) - >>> x2 = np.arange(3.0) - >>> np.add(x1, x2) - array([[ 0., 2., 4.], - [ 3., 5., 7.], - [ 6., 8., 10.]]) - - """) - -add_newdoc('numpy.core.umath', 'arccos', - """ - Trigonometric inverse cosine, element-wise. - - The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``. - - Parameters - ---------- - x : array_like - `x`-coordinate on the unit circle. - For real arguments, the domain is [-1, 1]. - - Returns - ------- - angle : ndarray - The angle of the ray intersecting the unit circle at the given - `x`-coordinate in radians [0, pi]. If `x` is a scalar then a - scalar is returned, otherwise an array of the same shape as `x` - is returned. - - See Also - -------- - cos, arctan, arcsin - - Notes - ----- - `arccos` is a multivalued function: for each `x` there are infinitely - many numbers `z` such that `cos(z) = x`. The convention is to return the - angle `z` whose real part lies in `[0, pi]`. - - For real-valued input data types, `arccos` always returns real output. - For each value that cannot be expressed as a real number or infinity, it - yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `arccos` is a complex analytical function that - has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from above - on the former and from below on the latter. - - The inverse `cos` is also known as `acos` or cos^-1. - - References - ---------- - .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/ - .. [2] Wikipedia, "Inverse trigonometric function", - http://en.wikipedia.org/wiki/Arccos - - Examples - -------- - We expect the arccos of 1 to be 0, and of -1 to be pi: - - >>> np.arccos([1, -1]) - array([ 0. , 3.14159265]) - - Plot arccos: - - >>> import matplotlib.pyplot as plt - >>> x = np.linspace(-1, 1, num=100) - >>> plt.plot(x, np.arccos(x)) - >>> plt.axis('tight') - >>> plt.show() - - """) - -add_newdoc('numpy.core.umath', 'arccosh', - """ - Inverse hyperbolic cosine, elementwise. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - out : ndarray - Array of the same shape and dtype as `x`. - - Notes - ----- - `arccosh` is a multivalued function: for each `x` there are infinitely - many numbers `z` such that `cosh(z) = x`. The convention is to return the - `z` whose imaginary part lies in `[-pi, pi]` and the real part in - ``[0, inf]``. - - For real-valued input data types, `arccosh` always returns real output. - For each value that cannot be expressed as a real number or infinity, it - yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `arccosh` is a complex analytical function that - has a branch cut `[-inf, 1]` and is continuous from above on it. - - References - ---------- - .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/ - .. [2] Wikipedia, "Inverse hyperbolic function", - http://en.wikipedia.org/wiki/Arccosh - - Examples - -------- - >>> np.arccosh([np.e, 10.0]) - array([ 1.65745445, 2.99322285]) - - """) - -add_newdoc('numpy.core.umath', 'arcsin', - """ - Inverse sine elementwise. - - Parameters - ---------- - x : array_like - `y`-coordinate on the unit circle. - - Returns - ------- - angle : ndarray - The angle of the ray intersecting the unit circle at the given - `y`-coordinate in radians ``[-pi, pi]``. If `x` is a scalar then - a scalar is returned, otherwise an array is returned. - - See Also - -------- - sin, arctan, arctan2 - - Notes - ----- - `arcsin` is a multivalued function: for each `x` there are infinitely - many numbers `z` such that `sin(z) = x`. The convention is to return the - angle `z` whose real part lies in `[-pi/2, pi/2]`. - - For real-valued input data types, `arcsin` always returns real output. - For each value that cannot be expressed as a real number or infinity, it - yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `arcsin` is a complex analytical function that - has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from above - on the former and from below on the latter. - - The inverse sine is also known as `asin` or ``sin^-1``. - - References - ---------- - .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/ - .. [2] Wikipedia, "Inverse trigonometric function", - http://en.wikipedia.org/wiki/Arcsin - - Examples - -------- - >>> np.arcsin(1) # pi/2 - 1.5707963267948966 - >>> np.arcsin(-1) # -pi/2 - -1.5707963267948966 - >>> np.arcsin(0) - 0.0 - - """) - -add_newdoc('numpy.core.umath', 'arcsinh', - """ - Inverse hyperbolic sine elementwise. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - out : ndarray - Array of of the same shape as `x`. - - Notes - ----- - `arcsinh` is a multivalued function: for each `x` there are infinitely - many numbers `z` such that `sinh(z) = x`. The convention is to return the - `z` whose imaginary part lies in `[-pi/2, pi/2]`. - - For real-valued input data types, `arcsinh` always returns real output. - For each value that cannot be expressed as a real number or infinity, it - returns ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `arccos` is a complex analytical function that - has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from - the right on the former and from the left on the latter. - - The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``. - - References - ---------- - .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/ - .. [2] Wikipedia, "Inverse hyperbolic function", - http://en.wikipedia.org/wiki/Arcsinh - - Examples - -------- - >>> np.arcsinh(np.array([np.e, 10.0])) - array([ 1.72538256, 2.99822295]) - - """) - -add_newdoc('numpy.core.umath', 'arctan', - """ - Trigonometric inverse tangent, element-wise. - - The inverse of tan, so that if ``y = tan(x)`` then - ``x = arctan(y)``. - - Parameters - ---------- - x : array_like - Input values. `arctan` is applied to each element of `x`. - - Returns - ------- - out : ndarray - Out has the same shape as `x`. Its real part is - in ``[-pi/2, pi/2]``. It is a scalar if `x` is a scalar. - - See Also - -------- - arctan2 : Calculate the arctan of y/x. - - Notes - ----- - `arctan` is a multivalued function: for each `x` there are infinitely - many numbers `z` such that `tan(z) = x`. The convention is to return the - angle `z` whose real part lies in `[-pi/2, pi/2]`. - - For real-valued input data types, `arctan` always returns real output. - For each value that cannot be expressed as a real number or infinity, it - yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `arctan` is a complex analytical function that - has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from the - left on the former and from the right on the latter. - - The inverse tangent is also known as `atan` or ``tan^-1``. - - References - ---------- - .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/ - .. [2] Wikipedia, "Inverse trigonometric function", - http://en.wikipedia.org/wiki/Arctan - - Examples - -------- - We expect the arctan of 0 to be 0, and of 1 to be :math:`\\pi/4`: - - >>> np.arctan([0, 1]) - array([ 0. , 0.78539816]) - - >>> np.pi/4 - 0.78539816339744828 - - Plot arctan: - - >>> import matplotlib.pyplot as plt - >>> x = np.linspace(-10, 10) - >>> plt.plot(x, np.arctan(x)) - >>> plt.axis('tight') - >>> plt.show() - - """) - -add_newdoc('numpy.core.umath', 'arctan2', - """ - Elementwise arc tangent of ``x1/x2`` choosing the quadrant correctly. - - The quadrant (ie. branch) is chosen so that ``arctan2(x1, x2)`` - is the signed angle in radians between the line segments - ``(0,0) - (1,0)`` and ``(0,0) - (x2,x1)``. This function is defined - also for `x2` = 0. - - `arctan2` is not defined for complex-valued arguments. - - Parameters - ---------- - x1 : array_like, real-valued - y-coordinates. - x2 : array_like, real-valued - x-coordinates. `x2` must be broadcastable to match the shape of `x1`, - or vice versa. - - Returns - ------- - angle : ndarray - Array of angles in radians, in the range ``[-pi, pi]``. - - See Also - -------- - arctan, tan - - Notes - ----- - `arctan2` is identical to the `atan2` function of the underlying - C library. The following special values are defined in the C standard [2]: - - ====== ====== ================ - `x1` `x2` `arctan2(x1,x2)` - ====== ====== ================ - +/- 0 +0 +/- 0 - +/- 0 -0 +/- pi - > 0 +/-inf +0 / +pi - < 0 +/-inf -0 / -pi - +/-inf +inf +/- (pi/4) - +/-inf -inf +/- (3*pi/4) - ====== ====== ================ - - Note that +0 and -0 are distinct floating point numbers. - - References - ---------- - .. [1] Wikipedia, "atan2", - http://en.wikipedia.org/wiki/Atan2 - .. [2] ISO/IEC standard 9899:1999, "Programming language C", 1999. - - Examples - -------- - Consider four points in different quadrants: - - >>> x = np.array([-1, +1, +1, -1]) - >>> y = np.array([-1, -1, +1, +1]) - >>> np.arctan2(y, x) * 180 / np.pi - array([-135., -45., 45., 135.]) - - Note the order of the parameters. `arctan2` is defined also when `x2` = 0 - and at several other special points, obtaining values in - the range ``[-pi, pi]``: - - >>> np.arctan2([1., -1.], [0., 0.]) - array([ 1.57079633, -1.57079633]) - >>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf]) - array([ 0. , 3.14159265, 0.78539816]) - - """) - -add_newdoc('numpy.core.umath', 'arctanh', - """ - Inverse hyperbolic tangent elementwise. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - out : ndarray - Array of the same shape as `x`. - - Notes - ----- - `arctanh` is a multivalued function: for each `x` there are infinitely - many numbers `z` such that `tanh(z) = x`. The convention is to return the - `z` whose imaginary part lies in `[-pi/2, pi/2]`. - - For real-valued input data types, `arctanh` always returns real output. - For each value that cannot be expressed as a real number or infinity, it - yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `arctanh` is a complex analytical function that - has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from - above on the former and from below on the latter. - - The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``. - - References - ---------- - .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/ - .. [2] Wikipedia, "Inverse hyperbolic function", - http://en.wikipedia.org/wiki/Arctanh - - Examples - -------- - >>> np.arctanh([0, -0.5]) - array([ 0. , -0.54930614]) - - """) - -add_newdoc('numpy.core.umath', 'bitwise_and', - """ - Compute bit-wise AND of two arrays, element-wise. - - When calculating the bit-wise AND between two elements, ``x`` and ``y``, - each element is first converted to its binary representation (which works - just like the decimal system, only now we're using 2 instead of 10): - - .. math:: x = \\sum_{i=0}^{W-1} a_i \\cdot 2^i\\\\ - y = \\sum_{i=0}^{W-1} b_i \\cdot 2^i, - - where ``W`` is the bit-width of the type (i.e., 8 for a byte or uint8), - and each :math:`a_i` and :math:`b_j` is either 0 or 1. For example, 13 - is represented as ``00001101``, which translates to - :math:`2^4 + 2^3 + 2`. - - The bit-wise operator is the result of - - .. math:: z = \\sum_{i=0}^{i=W-1} (a_i \\wedge b_i) \\cdot 2^i, - - where :math:`\\wedge` is the AND operator, which yields one whenever - both :math:`a_i` and :math:`b_i` are 1. - - Parameters - ---------- - x1, x2 : array_like - Only integer types are handled (including booleans). - - Returns - ------- - out : array_like - Result. - - See Also - -------- - bitwise_or, bitwise_xor - logical_and - binary_repr : - Return the binary representation of the input number as a string. - - Examples - -------- - We've seen that 13 is represented by ``00001101``. Similary, 17 is - represented by ``00010001``. The bit-wise AND of 13 and 17 is - therefore ``000000001``, or 1: - - >>> np.bitwise_and(13, 17) - 1 - - >>> np.bitwise_and(14, 13) - 12 - >>> np.binary_repr(12) - '1100' - >>> np.bitwise_and([14,3], 13) - array([12, 1]) - - >>> np.bitwise_and([11,7], [4,25]) - array([0, 1]) - >>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16])) - array([ 2, 4, 16]) - >>> np.bitwise_and([True, True], [False, True]) - array([False, True], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'bitwise_or', - """ - Compute bit-wise OR of two arrays, element-wise. - - When calculating the bit-wise OR between two elements, ``x`` and ``y``, - each element is first converted to its binary representation (which works - just like the decimal system, only now we're using 2 instead of 10): - - .. math:: x = \\sum_{i=0}^{W-1} a_i \\cdot 2^i\\\\ - y = \\sum_{i=0}^{W-1} b_i \\cdot 2^i, - - where ``W`` is the bit-width of the type (i.e., 8 for a byte or uint8), - and each :math:`a_i` and :math:`b_j` is either 0 or 1. For example, 13 - is represented as ``00001101``, which translates to - :math:`2^4 + 2^3 + 2`. - - The bit-wise operator is the result of - - .. math:: z = \\sum_{i=0}^{i=W-1} (a_i \\vee b_i) \\cdot 2^i, - - where :math:`\\vee` is the OR operator, which yields one whenever - either :math:`a_i` or :math:`b_i` is 1. - - Parameters - ---------- - x1, x2 : array_like - Only integer types are handled (including booleans). - - Returns - ------- - out : array_like - Result. - - See Also - -------- - bitwise_and, bitwise_xor - logical_or - binary_repr : - Return the binary representation of the input number as a string. - - Examples - -------- - We've seen that 13 is represented by ``00001101``. Similary, 16 is - represented by ``00010000``. The bit-wise OR of 13 and 16 is - therefore ``000111011``, or 29: - - >>> np.bitwise_or(13, 16) - 29 - >>> np.binary_repr(29) - '11101' - - >>> np.bitwise_or(32, 2) - 34 - >>> np.bitwise_or([33, 4], 1) - array([33, 5]) - >>> np.bitwise_or([33, 4], [1, 2]) - array([33, 6]) - - >>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4])) - array([ 6, 5, 255]) - >>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32), - ... np.array([4, 4, 4, 2147483647L], dtype=np.int32)) - array([ 6, 5, 255, 2147483647]) - >>> np.bitwise_or([True, True], [False, True]) - array([ True, True], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'bitwise_xor', - """ - Compute bit-wise XOR of two arrays, element-wise. - - When calculating the bit-wise XOR between two elements, ``x`` and ``y``, - each element is first converted to its binary representation (which works - just like the decimal system, only now we're using 2 instead of 10): - - .. math:: x = \\sum_{i=0}^{W-1} a_i \\cdot 2^i\\\\ - y = \\sum_{i=0}^{W-1} b_i \\cdot 2^i, - - where ``W`` is the bit-width of the type (i.e., 8 for a byte or uint8), - and each :math:`a_i` and :math:`b_j` is either 0 or 1. For example, 13 - is represented as ``00001101``, which translates to - :math:`2^4 + 2^3 + 2`. - - The bit-wise operator is the result of - - .. math:: z = \\sum_{i=0}^{i=W-1} (a_i \\oplus b_i) \\cdot 2^i, - - where :math:`\\oplus` is the XOR operator, which yields one whenever - either :math:`a_i` or :math:`b_i` is 1, but not both. - - Parameters - ---------- - x1, x2 : array_like - Only integer types are handled (including booleans). - - Returns - ------- - out : ndarray - Result. - - See Also - -------- - bitwise_and, bitwise_or - logical_xor - binary_repr : - Return the binary representation of the input number as a string. - - Examples - -------- - We've seen that 13 is represented by ``00001101``. Similary, 17 is - represented by ``00010001``. The bit-wise XOR of 13 and 17 is - therefore ``00011100``, or 28: - - >>> np.bitwise_xor(13, 17) - 28 - >>> np.binary_repr(28) - '11100' - - >>> np.bitwise_xor(31, 5) - 26 - >>> np.bitwise_xor([31,3], 5) - array([26, 6]) - - >>> np.bitwise_xor([31,3], [5,6]) - array([26, 5]) - >>> np.bitwise_xor([True, True], [False, True]) - array([ True, False], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'ceil', - """ - Return the ceiling of the input, element-wise. - - The ceil of the scalar `x` is the smallest integer `i`, such that - `i >= x`. It is often denoted as :math:`\\lceil x \\rceil`. - - Parameters - ---------- - x : array_like - Input data. - - Returns - ------- - y : {ndarray, scalar} - The ceiling of each element in `x`. - - Examples - -------- - >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) - >>> np.ceil(a) - array([-1., -1., -0., 1., 2., 2., 2.]) - - """) - -add_newdoc('numpy.core.umath', 'trunc', - """ - Return the truncated value of the input, element-wise. - - The truncated value of the scalar `x` is the nearest integer `i` which - is closer to zero than `x` is. In short, the fractional part of the - signed number `x` is discarded. - - Parameters - ---------- - x : array_like - Input data. - - Returns - ------- - y : {ndarray, scalar} - The truncated value of each element in `x`. - - Examples - -------- - >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) - >>> np.ceil(a) - array([-1., -1., -0., 0., 1., 1., 2.]) - - """) - -add_newdoc('numpy.core.umath', 'conjugate', - """ - Return the complex conjugate, element-wise. - - The complex conjugate of a complex number is obtained by changing the - sign of its imaginary part. - - Parameters - ---------- - x : array_like - Input value. - - Returns - ------- - y : ndarray - The complex conjugate of `x`, with same dtype as `y`. - - Examples - -------- - >>> np.conjugate(1+2j) - (1-2j) - - """) - -add_newdoc('numpy.core.umath', 'cos', - """ - Cosine elementwise. - - Parameters - ---------- - x : array_like - Input array in radians. - - Returns - ------- - out : ndarray - Output array of same shape as `x`. - - Examples - -------- - >>> np.cos(np.array([0, np.pi/2, np.pi])) - array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00]) - - """) - -add_newdoc('numpy.core.umath', 'cosh', - """ - Hyperbolic cosine, element-wise. - - Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - out : ndarray - Output array of same shape as `x`. - - Examples - -------- - >>> np.cosh(0) - 1.0 - - The hyperbolic cosine describes the shape of a hanging cable: - - >>> import matplotlib.pyplot as plt - >>> x = np.linspace(-4, 4, 1000) - >>> plt.plot(x, np.cosh(x)) - >>> plt.show() - - """) - -add_newdoc('numpy.core.umath', 'degrees', - """ - Convert angles from radians to degrees. This is the same - function as rad2deg but the latter is preferred because of - the more descriptive name. - - Parameters - ---------- - x : array_like - Angle in radians. - - Returns - ------- - y : ndarray - The corresponding angle in degrees. - - - See Also - -------- - rad2deg : Convert angles from radians to degrees. - deg2rad : Convert angles from degrees to radians. - radians : Convert angles from degrees to radians. - unwrap : Remove large jumps in angle by wrapping. - - Notes - ----- - degrees(x) is ``180 * x / pi``. - - Examples - -------- - >>> np.degrees(np.pi/2) - 90.0 - - """) - -add_newdoc('numpy.core.umath', 'rad2deg', - """ - Convert angles from radians to degrees. This is the same - function as degrees but is preferred because its more - descriptive name. - - Parameters - ---------- - x : array_like - Angle in radians. - - Returns - ------- - y : ndarray - The corresponding angle in degrees. - - - See Also - -------- - degrees : Convert angles from radians to degrees. - deg2rad : Convert angles from degrees to radians. - radians : Convert angles from degrees to radians. - unwrap : Remove large jumps in angle by wrapping. - - Notes - ----- - rad2deg(x) is ``180 * x / pi``. - - Examples - -------- - >>> np.rad2deg(np.pi/2) - 90.0 - - """) - -add_newdoc('numpy.core.umath', 'divide', - """ - Divide arguments element-wise. - - Parameters - ---------- - x1 : array_like - Dividend array. - x2 : array_like - Divisor array. - - Returns - ------- - y : {ndarray, scalar} - The quotient `x1/x2`, element-wise. Returns a scalar if - both `x1` and `x2` are scalars. - - See Also - -------- - seterr : Set whether to raise or warn on overflow, underflow and division - by zero. - - Notes - ----- - Equivalent to `x1` / `x2` in terms of array-broadcasting. - - Behavior on division by zero can be changed using `seterr`. - - When both `x1` and `x2` are of an integer type, `divide` will return - integers and throw away the fractional part. Moreover, division by zero - always yields zero in integer arithmetic. - - Examples - -------- - >>> np.divide(2.0, 4.0) - 0.5 - >>> x1 = np.arange(9.0).reshape((3, 3)) - >>> x2 = np.arange(3.0) - >>> np.divide(x1, x2) - array([[ NaN, 1. , 1. ], - [ Inf, 4. , 2.5], - [ Inf, 7. , 4. ]]) - - Note the behavior with integer types: - - >>> np.divide(2, 4) - 0 - >>> np.divide(2, 4.) - 0.5 - - Division by zero always yields zero in integer arithmetic, and does not - raise an exception or a warning: - - >>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int)) - array([0, 0]) - - Division by zero can, however, be caught using `seterr`: - - >>> old_err_state = np.seterr(divide='raise') - >>> np.divide(1, 0) - Traceback (most recent call last): - File "", line 1, in - FloatingPointError: divide by zero encountered in divide - - >>> ignored_states = np.seterr(**old_err_state) - >>> np.divide(1, 0) - 0 - - """) - -add_newdoc('numpy.core.umath', 'equal', - """ - Returns elementwise x1 == x2 in a bool array. - - Parameters - ---------- - x1, x2 : array_like - Input arrays of the same shape. - - Returns - ------- - out : boolean - The elementwise test `x1` == `x2`. - - """) - -add_newdoc('numpy.core.umath', 'exp', - """ - Calculate the exponential of the elements in the input array. - - Parameters - ---------- - x : array_like - Input values. - - Returns - ------- - out : ndarray - Element-wise exponential of `x`. - - Notes - ----- - The irrational number ``e`` is also known as Euler's number. It is - approximately 2.718281, and is the base of the natural logarithm, - ``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`, - then :math:`e^x = y`. For real input, ``exp(x)`` is always positive. - - For complex arguments, ``x = a + ib``, we can write - :math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already - known (it is the real argument, described above). The second term, - :math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with magnitude - 1 and a periodic phase. - - References - ---------- - .. [1] Wikipedia, "Exponential function", - http://en.wikipedia.org/wiki/Exponential_function - .. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions - with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69, - http://www.math.sfu.ca/~cbm/aands/page_69.htm - - Examples - -------- - Plot the magnitude and phase of ``exp(x)`` in the complex plane: - - >>> import matplotlib.pyplot as plt - - >>> x = np.linspace(-2*np.pi, 2*np.pi, 100) - >>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane - >>> out = np.exp(xx) - - >>> plt.subplot(121) - >>> plt.imshow(np.abs(out), - ... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi]) - >>> plt.title('Magnitude of exp(x)') - - >>> plt.subplot(122) - >>> plt.imshow(np.angle(out), - ... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi]) - >>> plt.title('Phase (angle) of exp(x)') - >>> plt.show() - - """) - -add_newdoc('numpy.core.umath', 'exp2', - """ - Calculate `2**p` for all `p` in the input array. - - Parameters - ---------- - x : array_like - Input values. - - Returns - ------- - out : ndarray - Element-wise 2 to the power `x`. - - """) - -add_newdoc('numpy.core.umath', 'expm1', - """ - Return the exponential of the elements in the array minus one. - - Parameters - ---------- - x : array_like - Input values. - - Returns - ------- - out : ndarray - Element-wise exponential minus one: ``out=exp(x)-1``. - - See Also - -------- - log1p : ``log(1+x)``, the inverse of expm1. - - - Notes - ----- - This function provides greater precision than using ``exp(x)-1`` - for small values of `x`. - - Examples - -------- - Since the series expansion of ``e**x = 1 + x + x**2/2! + x**3/3! + ...``, - for very small `x` we expect that ``e**x -1 ~ x + x**2/2``: - - >>> np.expm1(1e-10) - 1.00000000005e-10 - >>> np.exp(1e-10) - 1 - 1.000000082740371e-10 - - """) - -add_newdoc('numpy.core.umath', 'fabs', - """ - Compute the absolute values elementwise. - - This function returns the absolute values (positive magnitude) of the data - in `x`. Complex values are not handled, use `absolute` to find the - absolute values of complex data. - - Parameters - ---------- - x : array_like - The array of numbers for which the absolute values are required. If - `x` is a scalar, the result `y` will also be a scalar. - - Returns - ------- - y : {ndarray, scalar} - The absolute values of `x`, the returned values are always floats. - - See Also - -------- - absolute : Absolute values including `complex` types. - - Examples - -------- - >>> np.fabs(-1) - 1.0 - >>> np.fabs([-1.2, 1.2]) - array([ 1.2, 1.2]) - - """) - -add_newdoc('numpy.core.umath', 'floor', - """ - Return the floor of the input, element-wise. - - The floor of the scalar `x` is the largest integer `i`, such that - `i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`. - - Parameters - ---------- - x : array_like - Input data. - - Returns - ------- - y : {ndarray, scalar} - The floor of each element in `x`. - - Notes - ----- - Some spreadsheet programs calculate the "floor-towards-zero", in other - words ``floor(-2.5) == -2``. NumPy, however, uses the a definition of - `floor` such that `floor(-2.5) == -3``. - - Examples - -------- - >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) - >>> np.floor(a) - array([-2., -2., -1., 0., 1., 1., 2.]) - - """) - -add_newdoc('numpy.core.umath', 'floor_divide', - """ - Return the largest integer smaller or equal to the division of the inputs. - - Parameters - ---------- - x1 : array_like - Numerator. - x2 : array_like - Denominator. - - Returns - ------- - y : ndarray - y = floor(`x1`/`x2`) - - - See Also - -------- - divide : Standard division. - floor : Round a number to the nearest integer toward minus infinity. - ceil : Round a number to the nearest integer toward infinity. - - Examples - -------- - >>> np.floor_divide(7,3) - 2 - >>> np.floor_divide([1., 2., 3., 4.], 2.5) - array([ 0., 0., 1., 1.]) - - """) - -add_newdoc('numpy.core.umath', 'fmod', - """ - Return the remainder of division. - - This is the NumPy implementation of the C modulo operator `%`. - - Parameters - ---------- - x1 : array_like - Dividend. - x2 : array_like - Divisor. - - Returns - ------- - y : array_like - The remainder of the division of `x1` by `x2`. - - See Also - -------- - mod : Modulo operation where the quotient is `floor(x1,x2)`. - - Notes - ----- - The result of the modulo operation for negative dividend and divisors is - bound by conventions. In `fmod`, the sign of the remainder is the sign of - the dividend, and the sign of the divisor has no influence on the results. - - Examples - -------- - >>> np.fmod([-3, -2, -1, 1, 2, 3], 2) - array([-1, 0, -1, 1, 0, 1]) - - >>> np.mod([-3, -2, -1, 1, 2, 3], 2) - array([1, 0, 1, 1, 0, 1]) - - """) - -add_newdoc('numpy.core.umath', 'greater', - """ - Return (x1 > x2) element-wise. - - Parameters - ---------- - x1, x2 : array_like - Input arrays. - - Returns - ------- - Out : {ndarray, bool} - Output array of bools, or a single bool if `x1` and `x2` are scalars. - - See Also - -------- - greater_equal - - Examples - -------- - >>> np.greater([4,2],[2,2]) - array([ True, False], dtype=bool) - - If the inputs are ndarrays, then np.greater is equivalent to '>'. - - >>> a = np.array([4,2]) - >>> b = np.array([2,2]) - >>> a > b - array([ True, False], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'greater_equal', - """ - Element-wise True if first array is greater or equal than second array. - - Parameters - ---------- - x1, x2 : array_like - Input arrays. - - Returns - ------- - out : ndarray, bool - Output array. - - See Also - -------- - greater, less, less_equal, equal - - Examples - -------- - >>> np.greater_equal([4,2],[2,2]) - array([ True, True], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'hypot', - """ - Given two sides of a right triangle, return its hypotenuse. - - Parameters - ---------- - x : array_like - Base of the triangle. - y : array_like - Height of the triangle. - - Returns - ------- - z : ndarray - Hypotenuse of the triangle: sqrt(x**2 + y**2) - - Examples - -------- - >>> np.hypot(3,4) - 5.0 - - """) - -add_newdoc('numpy.core.umath', 'invert', - """ - Compute bit-wise inversion, or bit-wise NOT, element-wise. - - When calculating the bit-wise NOT of an element ``x``, each element is - first converted to its binary representation (which works - just like the decimal system, only now we're using 2 instead of 10): - - .. math:: x = \\sum_{i=0}^{W-1} a_i \\cdot 2^i - - where ``W`` is the bit-width of the type (i.e., 8 for a byte or uint8), - and each :math:`a_i` is either 0 or 1. For example, 13 is represented - as ``00001101``, which translates to :math:`2^4 + 2^3 + 2`. - - The bit-wise operator is the result of - - .. math:: z = \\sum_{i=0}^{i=W-1} (\\lnot a_i) \\cdot 2^i, - - where :math:`\\lnot` is the NOT operator, which yields 1 whenever - :math:`a_i` is 0 and yields 0 whenever :math:`a_i` is 1. - - For signed integer inputs, the two's complement is returned. - In a two's-complement system negative numbers are represented by the two's - complement of the absolute value. This is the most common method of - representing signed integers on computers [1]_. A N-bit two's-complement - system can represent every integer in the range - :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. - - Parameters - ---------- - x1 : ndarray - Only integer types are handled (including booleans). - - Returns - ------- - out : ndarray - Result. - - See Also - -------- - bitwise_and, bitwise_or, bitwise_xor - logical_not - binary_repr : - Return the binary representation of the input number as a string. - - Notes - ----- - `bitwise_not` is an alias for `invert`: - - >>> np.bitwise_not is np.invert - True - - References - ---------- - .. [1] Wikipedia, "Two's complement", - http://en.wikipedia.org/wiki/Two's_complement - - Examples - -------- - We've seen that 13 is represented by ``00001101``. - The invert or bit-wise NOT of 13 is then: - - >>> np.invert(np.array([13], dtype=uint8)) - array([242], dtype=uint8) - >>> np.binary_repr(x, width=8) - '00001101' - >>> np.binary_repr(242, width=8) - '11110010' - - The result depends on the bit-width: - - >>> np.invert(np.array([13], dtype=uint16)) - array([65522], dtype=uint16) - >>> np.binary_repr(x, width=16) - '0000000000001101' - >>> np.binary_repr(65522, width=16) - '1111111111110010' - - When using signed integer types the result is the two's complement of - the result for the unsigned type: - - >>> np.invert(np.array([13], dtype=int8)) - array([-14], dtype=int8) - >>> np.binary_repr(-14, width=8) - '11110010' - - Booleans are accepted as well: - - >>> np.invert(array([True, False])) - array([False, True], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'isfinite', - """ - Returns True for each element that is a finite number. - - Shows which elements of the input are finite (not infinity or not - Not a Number). - - Parameters - ---------- - x : array_like - Input values. - y : array_like, optional - A boolean array with the same shape and type as `x` to store the result. - - Returns - ------- - y : ndarray, bool - For scalar input data, the result is a new numpy boolean with value True - if the input data is finite; otherwise the value is False (input is - either positive infinity, negative infinity or Not a Number). - - For array input data, the result is an numpy boolean array with the same - dimensions as the input and the values are True if the corresponding - element of the input is finite; otherwise the values are False (element - is either positive infinity, negative infinity or Not a Number). If the - second argument is supplied then an numpy integer array is returned with - values 0 or 1 corresponding to False and True, respectively. - - See Also - -------- - isinf : Shows which elements are negative or negative infinity. - isneginf : Shows which elements are negative infinity. - isposinf : Shows which elements are positive infinity. - isnan : Shows which elements are Not a Number (NaN). - - - Notes - ----- - Not a Number, positive infinity and negative infinity are considered - to be non-finite. - - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Also that positive infinity is not equivalent to negative infinity. But - infinity is equivalent to positive infinity. - - Errors result if second argument is also supplied with scalar input or - if first and second arguments have different shapes. - - Examples - -------- - >>> np.isfinite(1) - True - >>> np.isfinite(0) - True - >>> np.isfinite(np.nan) - False - >>> np.isfinite(np.inf) - False - >>> np.isfinite(np.NINF) - False - >>> np.isfinite([np.log(-1.),1.,np.log(0)]) - array([False, True, False], dtype=bool) - >>> x=np.array([-np.inf, 0., np.inf]) - >>> y=np.array([2,2,2]) - >>> np.isfinite(x,y) - array([0, 1, 0]) - >>> y - array([0, 1, 0]) - - """) - -add_newdoc('numpy.core.umath', 'isinf', - """ - Shows which elements of the input are positive or negative infinity. - Returns a numpy boolean scalar or array resulting from an element-wise test - for positive or negative infinity. - - Parameters - ---------- - x : array_like - input values - y : array_like, optional - An array with the same shape as `x` to store the result. - - Returns - ------- - y : {ndarray, bool} - For scalar input data, the result is a new numpy boolean with value True - if the input data is positive or negative infinity; otherwise the value - is False. - - For array input data, the result is an numpy boolean array with the same - dimensions as the input and the values are True if the corresponding - element of the input is positive or negative infinity; otherwise the - values are False. If the second argument is supplied then an numpy - integer array is returned with values 0 or 1 corresponding to False and - True, respectively. - - See Also - -------- - isneginf : Shows which elements are negative infinity. - isposinf : Shows which elements are positive infinity. - isnan : Shows which elements are Not a Number (NaN). - isfinite: Shows which elements are not: Not a number, positive and - negative infinity - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - Also that positive infinity is not equivalent to negative infinity. But - infinity is equivalent to positive infinity. - - Errors result if second argument is also supplied with scalar input or - if first and second arguments have different shapes. - - Numpy's definitions for positive infinity (PINF) and negative infinity - (NINF) may be change in the future versions. - - Examples - -------- - >>> np.isinf(np.inf) - True - >>> np.isinf(np.nan) - False - >>> np.isinf(np.NINF) - True - >>> np.isinf([np.inf, -np.inf, 1.0, np.nan]) - array([ True, True, False, False], dtype=bool) - >>> x=np.array([-np.inf, 0., np.inf]) - >>> y=np.array([2,2,2]) - >>> np.isinf(x,y) - array([1, 0, 1]) - >>> y - array([1, 0, 1]) - - """) - -add_newdoc('numpy.core.umath', 'isnan', - """ - Returns a numpy boolean scalar or array resulting from an element-wise test - for Not a Number (NaN). - - Parameters - ---------- - x : array_like - input data. - - Returns - ------- - y : {ndarray, bool} - For scalar input data, the result is a new numpy boolean with value True - if the input data is NaN; otherwise the value is False. - - For array input data, the result is an numpy boolean array with the same - dimensions as the input and the values are True if the corresponding - element of the input is Not a Number; otherwise the values are False. - - See Also - -------- - isinf : Tests for infinity. - isneginf : Tests for negative infinity. - isposinf : Tests for positive infinity. - isfinite : Shows which elements are not: Not a number, positive infinity - and negative infinity - - Notes - ----- - Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic - (IEEE 754). This means that Not a Number is not equivalent to infinity. - - Examples - -------- - >>> np.isnan(np.nan) - True - >>> np.isnan(np.inf) - False - >>> np.isnan([np.log(-1.),1.,np.log(0)]) - array([ True, False, False], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'left_shift', - """ - Shift the bits of an integer to the left. - - Bits are shifted to the left by appending `x2` 0s at the right of `x1`. - Since the internal representation of numbers is in binary format, this - operation is equivalent to multiplying `x1` by ``2**x2``. - - Parameters - ---------- - x1 : array_like of integer type - Input values. - x2 : array_like of integer type - Number of zeros to append to `x1`. - - Returns - ------- - out : array of integer type - Return `x1` with bits shifted `x2` times to the left. - - See Also - -------- - right_shift : Shift the bits of an integer to the right. - binary_repr : Return the binary representation of the input number - as a string. - - Examples - -------- - >>> np.left_shift(5, [1,2,3]) - array([10, 20, 40]) - - """) - -add_newdoc('numpy.core.umath', 'less', - """ - Returns (x1 < x2) element-wise. - - Parameters - ---------- - x1, x2 : array_like - Input arrays. - - Returns - ------- - Out : {ndarray, bool} - Output array of bools, or a single bool if `x1` and `x2` are scalars. - - See Also - -------- - less_equal - - Examples - -------- - >>> np.less([1,2],[2,2]) - array([ True, False], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'less_equal', - """ - Returns (x1 <= x2) element-wise. - - Parameters - ---------- - x1, x2 : array_like - Input arrays. - - Returns - ------- - Out : {ndarray, bool} - Output array of bools, or a single bool if `x1` and `x2` are scalars. - - See Also - -------- - less - - Examples - -------- - >>> np.less_equal([1,2,3],[2,2,2]) - array([ True, True, False], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'log', - """ - Natural logarithm, element-wise. - - The natural logarithm `log` is the inverse of the exponential function, - so that `log(exp(x)) = x`. The natural logarithm is logarithm in base `e`. - - Parameters - ---------- - x : array_like - Input value. - - Returns - ------- - y : ndarray - The natural logarithm of `x`, element-wise. - - See Also - -------- - log10, log2, log1p - - Notes - ----- - Logarithm is a multivalued function: for each `x` there is an infinite - number of `z` such that `exp(z) = x`. The convention is to return the `z` - whose imaginary part lies in `[-pi, pi]`. - - For real-valued input data types, `log` always returns real output. For - each value that cannot be expressed as a real number or infinity, it - yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `log` is a complex analytical function that - has a branch cut `[-inf, 0]` and is continuous from above on it. `log` - handles the floating-point negative zero as an infinitesimal negative - number, conforming to the C99 standard. - - References - ---------- - .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/ - .. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm - - Examples - -------- - >>> np.log([1, np.e, np.e**2, 0]) - array([ 0., 1., 2., -Inf]) - - """) - -add_newdoc('numpy.core.umath', 'log10', - """ - Compute the logarithm in base 10 element-wise. - - Parameters - ---------- - x : array_like - Input values. - - Returns - ------- - y : ndarray - Base-10 logarithm of `x`. - - Notes - ----- - Logarithm is a multivalued function: for each `x` there is an infinite - number of `z` such that `10**z = x`. The convention is to return the `z` - whose imaginary part lies in `[-pi, pi]`. - - For real-valued input data types, `log10` always returns real output. For - each value that cannot be expressed as a real number or infinity, it - yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `log10` is a complex analytical function that - has a branch cut `[-inf, 0]` and is continuous from above on it. `log10` - handles the floating-point negative zero as an infinitesimal negative - number, conforming to the C99 standard. - - References - ---------- - .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/ - .. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm - - Examples - -------- - >>> np.log10([1.e-15,-3.]) - array([-15., NaN]) - - """) - -add_newdoc('numpy.core.umath', 'log2', - """ - Base-2 logarithm of `x`. - - Parameters - ---------- - x : array_like - Input values. - - Returns - ------- - y : ndarray - Base-2 logarithm of `x`. - - See Also - -------- - log, log10, log1p - - """) - -add_newdoc('numpy.core.umath', 'logaddexp', - """ - Logarithm of `exp(x) + exp(y)`. - - This function is useful in statistics where the calculated probabilities of - events may be so small as to excede the range of normal floating point - numbers. In such cases the logarithm of the calculated probability is - stored. This function allows adding probabilities stored in such a fashion. - - Parameters - ---------- - x : array_like - Input values. - y : array_like - Input values. - - - Returns - ------- - result : ndarray - Logarithm of `exp(x) + exp(y)`. - - See Also - -------- - logaddexp2 - - """) - -add_newdoc('numpy.core.umath', 'logaddexp2', - """ - Base-2 Logarithm of `2**x + 2**y`. - - This function is useful in machine learning when the calculated probabilities of - events may be so small as to excede the range of normal floating point - numbers. In such cases the base-2 logarithm of the calculated probability - can be used instead. This function allows adding probabilities stored in such a fashion. - - Parameters - ---------- - x : array_like - Input values. - y : array_like - Input values. - - - Returns - ------- - result : ndarray - Base-2 logarithm of `2**x + 2**y`. - - See Also - -------- - logaddexp - - """) - -add_newdoc('numpy.core.umath', 'log1p', - """ - `log(1 + x)` in base `e`, elementwise. - - Parameters - ---------- - x : array_like - Input values. - - Returns - ------- - y : ndarray - Natural logarithm of `1 + x`, elementwise. - - Notes - ----- - For real-valued input, `log1p` is accurate also for `x` so small - that `1 + x == 1` in floating-point accuracy. - - Logarithm is a multivalued function: for each `x` there is an infinite - number of `z` such that `exp(z) = 1 + x`. The convention is to return - the `z` whose imaginary part lies in `[-pi, pi]`. - - For real-valued input data types, `log1p` always returns real output. For - each value that cannot be expressed as a real number or infinity, it - yields ``nan`` and sets the `invalid` floating point error flag. - - For complex-valued input, `log1p` is a complex analytical function that - has a branch cut `[-inf, -1]` and is continuous from above on it. `log1p` - handles the floating-point negative zero as an infinitesimal negative - number, conforming to the C99 standard. - - References - ---------- - .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", - 10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/ - .. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm - - Examples - -------- - >>> np.log1p(1e-99) - 1e-99 - >>> np.log(1 + 1e-99) - 0.0 - - """) - -add_newdoc('numpy.core.umath', 'logical_and', - """ - Compute the truth value of x1 AND x2 elementwise. - - Parameters - ---------- - x1, x2 : array_like - Logical AND is applied to the elements of `x1` and `x2`. - They have to be of the same shape. - - - Returns - ------- - y : {ndarray, bool} - Boolean result with the same shape as `x1` and `x2` of the logical - AND operation on elements of `x1` and `x2`. - - See Also - -------- - logical_or, logical_not, logical_xor - bitwise_and - - Examples - -------- - >>> np.logical_and(True, False) - False - >>> np.logical_and([True, False], [False, False]) - array([False, False], dtype=bool) - - >>> x = np.arange(5) - >>> np.logical_and(x>1, x<4) - array([False, False, True, True, False], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'logical_not', - """ - Compute the truth value of NOT x elementwise. - - Parameters - ---------- - x : array_like - Logical NOT is applied to the elements of `x`. - - Returns - ------- - y : {ndarray, bool} - Boolean result with the same shape as `x` of the NOT operation - on elements of `x`. - - See Also - -------- - logical_and, logical_or, logical_xor - - Examples - -------- - >>> np.logical_not(3) - False - >>> np.logical_not([True, False, 0, 1]) - array([False, True, True, False], dtype=bool) - - >>> x = np.arange(5) - >>> np.logical_not(x<3) - array([False, False, False, True, True], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'logical_or', - """ - Compute the truth value of x1 OR x2 elementwise. - - Parameters - ---------- - x1, x2 : array_like - Logical OR is applied to the elements of `x1` and `x2`. - They have to be of the same shape. - - Returns - ------- - y : {ndarray, bool} - Boolean result with the same shape as `x1` and `x2` of the logical - OR operation on elements of `x1` and `x2`. - - See Also - -------- - logical_and, logical_not, logical_xor - bitwise_or - - Examples - -------- - >>> np.logical_or(True, False) - True - >>> np.logical_or([True, False], [False, False]) - array([ True, False], dtype=bool) - - >>> x = np.arange(5) - >>> np.logical_or(x < 1, x > 3) - array([ True, False, False, False, True], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'logical_xor', - """ - Compute the truth value of x1 XOR x2 elementwise. - - Parameters - ---------- - x1, x2 : array_like - Logical XOR is applied to the elements of `x1` and `x2`. - They have to be of the same shape. - - Returns - ------- - y : {ndarray, bool} - Boolean result with the same shape as `x1` and `x2` of the logical - XOR operation on elements of `x1` and `x2`. - - See Also - -------- - logical_and, logical_or, logical_not - bitwise_xor - - Examples - -------- - >>> np.logical_xor(True, False) - True - >>> np.logical_xor([True, True, False, False], [True, False, True, False]) - array([False, True, True, False], dtype=bool) - - >>> x = np.arange(5) - >>> np.logical_xor(x < 1, x > 3) - array([ True, False, False, False, True], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'maximum', - """ - Element-wise maximum of array elements. - - Compare two arrays and returns a new array containing - the element-wise maxima. - - Parameters - ---------- - x1, x2 : array_like - The arrays holding the elements to be compared. - - Returns - ------- - y : {ndarray, scalar} - The maximum of `x1` and `x2`, element-wise. Returns scalar if - both `x1` and `x2` are scalars. - - See Also - -------- - minimum : - element-wise minimum - - Notes - ----- - Equivalent to ``np.where(x1 > x2, x1, x2)`` but faster and does proper - broadcasting. - - Examples - -------- - >>> np.maximum([2, 3, 4], [1, 5, 2]) - array([2, 5, 4]) - - >>> np.maximum(np.eye(2), [0.5, 2]) - array([[ 1. , 2. ], - [ 0.5, 2. ]]) - - """) - -add_newdoc('numpy.core.umath', 'minimum', - """ - Element-wise minimum of array elements. - - Compare two arrays and returns a new array containing - the element-wise minima. - - Parameters - ---------- - x1, x2 : array_like - The arrays holding the elements to be compared. - - Returns - ------- - y : {ndarray, scalar} - The minimum of `x1` and `x2`, element-wise. Returns scalar if - both `x1` and `x2` are scalars. - - See Also - -------- - maximum : - element-wise maximum - - Notes - ----- - Equivalent to ``np.where(x1 < x2, x1, x2)`` but faster and does proper - broadcasting. - - Examples - -------- - >>> np.minimum([2, 3, 4], [1, 5, 2]) - array([1, 3, 2]) - - >>> np.minimum(np.eye(2), [0.5, 2]) - array([[ 0.5, 0. ], - [ 0. , 1. ]]) - - """) - -add_newdoc('numpy.core.umath', 'fmax', - """ - - """) - -add_newdoc('numpy.core.umath', 'fmin', - """ - - """) - -add_newdoc('numpy.core.umath', 'modf', - """ - Return the fractional and integral part of a number. - - The fractional and integral parts are negative if the given number is - negative. - - Parameters - ---------- - x : array_like - Input number. - - Returns - ------- - y1 : ndarray - Fractional part of `x`. - y2 : ndarray - Integral part of `x`. - - Examples - -------- - >>> np.modf(2.5) - (0.5, 2.0) - >>> np.modf(-.4) - (-0.40000000000000002, -0.0) - - """) - -add_newdoc('numpy.core.umath', 'multiply', - """ - Multiply arguments elementwise. - - Parameters - ---------- - x1, x2 : array_like - The arrays to be multiplied. - - Returns - ------- - y : ndarray - The product of `x1` and `x2`, elementwise. Returns a scalar if - both `x1` and `x2` are scalars. - - Notes - ----- - Equivalent to `x1` * `x2` in terms of array-broadcasting. - - Examples - -------- - >>> np.multiply(2.0, 4.0) - 8.0 - - >>> x1 = np.arange(9.0).reshape((3, 3)) - >>> x2 = np.arange(3.0) - >>> np.multiply(x1, x2) - array([[ 0., 1., 4.], - [ 0., 4., 10.], - [ 0., 7., 16.]]) - - """) - -add_newdoc('numpy.core.umath', 'negative', - """ - Returns an array with the negative of each element of the original array. - - Parameters - ---------- - x : {array_like, scalar} - Input array. - - Returns - ------- - y : {ndarray, scalar} - Returned array or scalar `y=-x`. - - Examples - -------- - >>> np.negative([1.,-1.]) - array([-1., 1.]) - - """) - -add_newdoc('numpy.core.umath', 'not_equal', - """ - Return (x1 != x2) element-wise. - - Parameters - ---------- - x1, x2 : array_like - Input arrays. - out : ndarray, optional - A placeholder the same shape as `x1` to store the result. - - Returns - ------- - not_equal : ndarray bool, scalar bool - For each element in `x1, x2`, return True if `x1` is not equal - to `x2` and False otherwise. - - - See Also - -------- - equal, greater, greater_equal, less, less_equal - - Examples - -------- - >>> np.not_equal([1.,2.], [1., 3.]) - array([False, True], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'ones_like', - """ - Returns an array of ones with the same shape and type as a given array. - - Equivalent to ``a.copy().fill(1)``. - - Please refer to the documentation for `zeros_like`. - - See Also - -------- - zeros_like - - Examples - -------- - >>> a = np.array([[1, 2, 3], [4, 5, 6]]) - >>> np.ones_like(a) - array([[1, 1, 1], - [1, 1, 1]]) - - """) - -add_newdoc('numpy.core.umath', 'power', - """ - Returns element-wise base array raised to power from second array. - - Raise each base in `x1` to the power of the exponents in `x2`. This - requires that `x1` and `x2` must be broadcastable to the same shape. - - Parameters - ---------- - x1 : array_like - The bases. - x2 : array_like - The exponents. - - Returns - ------- - y : ndarray - The bases in `x1` raised to the exponents in `x2`. - - Examples - -------- - Cube each element in a list. - - >>> x1 = range(6) - >>> x1 - [0, 1, 2, 3, 4, 5] - >>> np.power(x1, 3) - array([ 0, 1, 8, 27, 64, 125]) - - Raise the bases to different exponents. - - >>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0] - >>> np.power(x1, x2) - array([ 0., 1., 8., 27., 16., 5.]) - - The effect of broadcasting. - - >>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]]) - >>> x2 - array([[1, 2, 3, 3, 2, 1], - [1, 2, 3, 3, 2, 1]]) - >>> np.power(x1, x2) - array([[ 0, 1, 8, 27, 16, 5], - [ 0, 1, 8, 27, 16, 5]]) - - """) - -add_newdoc('numpy.core.umath', 'radians', - """ - Convert angles from degrees to radians. This function is - the same as deg2rad, which is more descriptive.. - - Parameters - ---------- - x : array_like - Angles in degrees. - - Returns - ------- - y : ndarray - The corresponding angle in radians. - - See Also - -------- - deg2rad : Convert angles from degrees to radians. - rad2deg : Convert angles from radians to degrees. - degrees : Convert angles from radians to degrees. - unwrap : Remove large jumps in angle by wrapping. - - Notes - ----- - ``radians(x)`` is ``x * pi / 180``. - - Examples - -------- - >>> np.radians(180) - 3.1415926535897931 - - """) - -add_newdoc('numpy.core.umath', 'deg2rad', - """ - Convert angles from degrees to radians. This is the same - function as radians, but deg2rad is a more descriptive name. - - Parameters - ---------- - x : array_like - Angles in degrees. - - Returns - ------- - y : ndarray - The corresponding angle in radians. - - See Also - -------- - radians : Convert angles from degrees to radians. - rad2deg : Convert angles from radians to degrees. - degrees : Convert angles from radians to degrees. - unwrap : Remove large jumps in angle by wrapping. - - Notes - ----- - ``deg2rad(x)`` is ``x * pi / 180``. - - Examples - -------- - >>> np.deg2rad(180) - 3.1415926535897931 - - """) - -add_newdoc('numpy.core.umath', 'reciprocal', - """ - Return element-wise reciprocal. - - Parameters - ---------- - x : array_like - Input value. - - Returns - ------- - y : ndarray - Return value. - - Examples - -------- - >>> reciprocal(2.) - 0.5 - >>> reciprocal([1, 2., 3.33]) - array([ 1. , 0.5 , 0.3003003]) - - """) - -add_newdoc('numpy.core.umath', 'remainder', - """ - Returns element-wise remainder of division. - - Computes `x1 - floor(x1/x2)*x2`. - - Parameters - ---------- - x1 : array_like - Dividend array. - x2 : array_like - Divisor array. - - Returns - ------- - y : ndarray - The remainder of the quotient `x1/x2`, element-wise. Returns a scalar - if both `x1` and `x2` are scalars. - - See Also - -------- - divide - floor - - Notes - ----- - Returns 0 when `x2` is 0. - - Examples - -------- - >>> np.remainder([4,7],[2,3]) - array([0, 1]) - - """) - -add_newdoc('numpy.core.umath', 'right_shift', - """ - Shift the bits of an integer to the right. - - Bits are shifted to the right by removing `x2` bits at the right of `x1`. - Since the internal representation of numbers is in binary format, this - operation is equivalent to dividing `x1` by ``2**x2``. - - Parameters - ---------- - x1 : array_like, int - Input values. - x2 : array_like, int - Number of bits to remove at the right of `x1`. - - Returns - ------- - out : ndarray, int - Return `x1` with bits shifted `x2` times to the right. - - See Also - -------- - left_shift : Shift the bits of an integer to the left. - binary_repr : Return the binary representation of the input number - as a string. - - Examples - -------- - >>> np.right_shift(10, [1,2,3]) - array([5, 2, 1]) - - """) - -add_newdoc('numpy.core.umath', 'rint', - """ - Round elements of the array to the nearest integer. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - out : ndarray - Output array is same shape and type as `x`. - - Examples - -------- - >>> a = [-4.1, -3.6, -2.5, 0.1, 2.5, 3.1, 3.9] - >>> np.rint(a) - array([-4., -4., -2., 0., 2., 3., 4.]) - - """) - -add_newdoc('numpy.core.umath', 'sign', - """ - Returns an element-wise indication of the sign of a number. - - The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. - - Parameters - ---------- - x : array_like - Input values. - - Returns - ------- - y : ndarray - The sign of `x`. - - Examples - -------- - >>> np.sign([-5., 4.5]) - array([-1., 1.]) - >>> np.sign(0) - 0 - - """) - -add_newdoc('numpy.core.umath', 'signbit', - """ - Returns element-wise True where signbit is set (less than zero). - - Parameters - ---------- - x: array_like - The input value(s). - - Returns - ------- - out : array_like, bool - Output. - - Examples - -------- - >>> np.signbit(-1.2) - True - >>> np.signbit(np.array([1, -2.3, 2.1])) - array([False, True, False], dtype=bool) - - """) - -add_newdoc('numpy.core.umath', 'sin', - """ - Trigonometric sine, element-wise. - - Parameters - ---------- - x : array_like - Angle, in radians (:math:`2 \\pi` rad equals 360 degrees). - - Returns - ------- - y : array_like - The sine of each element of x. - - See Also - -------- - arcsin, sinh, cos - - Notes - ----- - The sine is one of the fundamental functions of trigonometry - (the mathematical study of triangles). Consider a circle of radius - 1 centered on the origin. A ray comes in from the :math:`+x` axis, - makes an angle at the origin (measured counter-clockwise from that - axis), and departs from the origin. The :math:`y` coordinate of - the outgoing ray's intersection with the unit circle is the sine - of that angle. It ranges from -1 for :math:`x=3\\pi / 2` to - +1 for :math:`\\pi / 2.` The function has zeroes where the angle is - a multiple of :math:`\\pi`. Sines of angles between :math:`\\pi` and - :math:`2\\pi` are negative. The numerous properties of the sine and - related functions are included in any standard trigonometry text. - - Examples - -------- - Print sine of one angle: - - >>> np.sin(np.pi/2.) - 1.0 - - Print sines of an array of angles given in degrees: - - >>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. ) - array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ]) - - Plot the sine function: - - >>> import matplotlib.pylab as plt - >>> x = np.linspace(-np.pi, np.pi, 201) - >>> plt.plot(x, np.sin(x)) - >>> plt.xlabel('Angle [rad]') - >>> plt.ylabel('sin(x)') - >>> plt.axis('tight') - >>> plt.show() - - """) - -add_newdoc('numpy.core.umath', 'sinh', - """ - Hyperbolic sine, element-wise. - - Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or - ``-1j * np.sin(1j*x)``. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - out : ndarray - Output array of same shape as `x`. - - """) - -add_newdoc('numpy.core.umath', 'sqrt', - """ - Return the positive square-root of an array, element-wise. - - Parameters - ---------- - x : array_like - The square root of each element in this array is calculated. - - Returns - ------- - y : ndarray - An array of the same shape as `x`, containing the square-root of - each element in `x`. If any element in `x` - is complex, a complex array is returned. If all of the elements - of `x` are real, negative elements return numpy.nan elements. - - See Also - -------- - numpy.lib.scimath.sqrt - A version which returns complex numbers when given negative reals. - - Notes - ----- - `sqrt` has a branch cut ``[-inf, 0)`` and is continuous from above on it. - - Examples - -------- - >>> np.sqrt([1,4,9]) - array([ 1., 2., 3.]) - - >>> np.sqrt([4, -1, -3+4J]) - array([ 2.+0.j, 0.+1.j, 1.+2.j]) - - >>> np.sqrt([4, -1, numpy.inf]) - array([ 2., NaN, Inf]) - - """) - -add_newdoc('numpy.core.umath', 'square', - """ - Return the element-wise square of the input. - - Parameters - ---------- - x : array_like - Input data. - - Returns - ------- - out : ndarray - Element-wise `x*x`, of the same shape and dtype as `x`. - Returns scalar if `x` is a scalar. - - See Also - -------- - numpy.linalg.matrix_power - sqrt - power - - Examples - -------- - >>> np.square([-1j, 1]) - array([-1.-0.j, 1.+0.j]) - - """) - -add_newdoc('numpy.core.umath', 'subtract', - """ - Subtract arguments element-wise. - - Parameters - ---------- - x1, x2 : array_like - The arrays to be subtracted from each other. If type is 'array_like' - the `x1` and `x2` shapes must be identical. - - Returns - ------- - y : ndarray - The difference of `x1` and `x2`, element-wise. Returns a scalar if - both `x1` and `x2` are scalars. - - Notes - ----- - Equivalent to `x1` - `x2` in terms of array-broadcasting. - - Examples - -------- - >>> np.subtract(1.0, 4.0) - -3.0 - - >>> x1 = np.arange(9.0).reshape((3, 3)) - >>> x2 = np.arange(3.0) - >>> np.subtract(x1, x2) - array([[ 0., 0., 0.], - [ 3., 3., 3.], - [ 6., 6., 6.]]) - - """) - -add_newdoc('numpy.core.umath', 'tan', - """ - Compute tangent element-wise. - - Parameters - ---------- - x : array_like - Angles in radians. - - Returns - ------- - y : ndarray - The corresponding tangent values. - - - Examples - -------- - >>> from math import pi - >>> np.tan(np.array([-pi,pi/2,pi])) - array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16]) - - """) - -add_newdoc('numpy.core.umath', 'tanh', - """ - Hyperbolic tangent element-wise. - - Parameters - ---------- - x : array_like - Input array. - - Returns - ------- - y : ndarray - The corresponding hyperbolic tangent values. - - """) - -add_newdoc('numpy.core.umath', 'true_divide', - """ - Returns an element-wise, true division of the inputs. - - Instead of the Python traditional 'floor division', this returns a true - division. True division adjusts the output type to present the best - answer, regardless of input types. - - Parameters - ---------- - x1 : array_like - Dividend - x2 : array_like - Divisor - - Returns - ------- - out : ndarray - Result is scalar if both inputs are scalar, ndarray otherwise. - - Notes - ----- - The floor division operator ('//') was added in Python 2.2 making '//' - and '/' equivalent operators. The default floor division operation of - '/' can be replaced by true division with - 'from __future__ import division'. - - In Python 3.0, '//' will be the floor division operator and '/' will be - the true division operator. The 'true_divide(`x1`, `x2`)' function is - equivalent to true division in Python. - - """) Modified: trunk/numpy/core/code_generators/generate_umath.py =================================================================== --- trunk/numpy/core/code_generators/generate_umath.py 2008-11-30 14:44:38 UTC (rev 6125) +++ trunk/numpy/core/code_generators/generate_umath.py 2008-11-30 15:08:38 UTC (rev 6126) @@ -1,7 +1,7 @@ import re, textwrap import sys, os sys.path.insert(0, os.path.dirname(__file__)) -import docstrings +import ufunc_docstrings as docstrings sys.path.pop(0) Zero = "PyUFunc_Zero" Copied: trunk/numpy/core/code_generators/ufunc_docstrings.py (from rev 6125, trunk/numpy/core/code_generators/docstrings.py) =================================================================== --- trunk/numpy/core/code_generators/docstrings.py 2008-11-30 14:44:38 UTC (rev 6125) +++ trunk/numpy/core/code_generators/ufunc_docstrings.py 2008-11-30 15:08:38 UTC (rev 6126) @@ -0,0 +1,2718 @@ +# Docstrings for generated ufuncs + +docdict = {} + +def get(name): + return docdict.get(name) + +def add_newdoc(place, name, doc): + docdict['.'.join((place, name))] = doc + + +add_newdoc('numpy.core.umath', 'absolute', + """ + Calculate the absolute value element-wise. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + res : ndarray + An ndarray containing the absolute value of + each element in `x`. For complex input, ``a + ib``, the + absolute value is :math:`\\sqrt{ a^2 + b^2 }`. + + Examples + -------- + >>> x = np.array([-1.2, 1.2]) + >>> np.absolute(x) + array([ 1.2, 1.2]) + >>> np.absolute(1.2 + 1j) + 1.5620499351813308 + + Plot the function over ``[-10, 10]``: + + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-10, 10, 101) + >>> plt.plot(x, np.absolute(x)) + >>> plt.show() + + Plot the function over the complex plane: + + >>> xx = x + 1j * x[:, np.newaxis] + >>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10]) + >>> plt.show() + + """) + +add_newdoc('numpy.core.umath', 'add', + """ + Add arguments element-wise. + + Parameters + ---------- + x1, x2 : array_like + The arrays to be added. + + Returns + ------- + y : {ndarray, scalar} + The sum of `x1` and `x2`, element-wise. Returns scalar if + both `x1` and `x2` are scalars. + + Notes + ----- + Equivalent to `x1` + `x2` in terms of array broadcasting. + + Examples + -------- + >>> np.add(1.0, 4.0) + 5.0 + >>> x1 = np.arange(9.0).reshape((3, 3)) + >>> x2 = np.arange(3.0) + >>> np.add(x1, x2) + array([[ 0., 2., 4.], + [ 3., 5., 7.], + [ 6., 8., 10.]]) + + """) + +add_newdoc('numpy.core.umath', 'arccos', + """ + Trigonometric inverse cosine, element-wise. + + The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``. + + Parameters + ---------- + x : array_like + `x`-coordinate on the unit circle. + For real arguments, the domain is [-1, 1]. + + Returns + ------- + angle : ndarray + The angle of the ray intersecting the unit circle at the given + `x`-coordinate in radians [0, pi]. If `x` is a scalar then a + scalar is returned, otherwise an array of the same shape as `x` + is returned. + + See Also + -------- + cos, arctan, arcsin + + Notes + ----- + `arccos` is a multivalued function: for each `x` there are infinitely + many numbers `z` such that `cos(z) = x`. The convention is to return the + angle `z` whose real part lies in `[0, pi]`. + + For real-valued input data types, `arccos` always returns real output. + For each value that cannot be expressed as a real number or infinity, it + yields ``nan`` and sets the `invalid` floating point error flag. + + For complex-valued input, `arccos` is a complex analytical function that + has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from above + on the former and from below on the latter. + + The inverse `cos` is also known as `acos` or cos^-1. + + References + ---------- + .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", + 10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/ + .. [2] Wikipedia, "Inverse trigonometric function", + http://en.wikipedia.org/wiki/Arccos + + Examples + -------- + We expect the arccos of 1 to be 0, and of -1 to be pi: + + >>> np.arccos([1, -1]) + array([ 0. , 3.14159265]) + + Plot arccos: + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-1, 1, num=100) + >>> plt.plot(x, np.arccos(x)) + >>> plt.axis('tight') + >>> plt.show() + + """) + +add_newdoc('numpy.core.umath', 'arccosh', + """ + Inverse hyperbolic cosine, elementwise. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray + Array of the same shape and dtype as `x`. + + Notes + ----- + `arccosh` is a multivalued function: for each `x` there are infinitely + many numbers `z` such that `cosh(z) = x`. The convention is to return the + `z` whose imaginary part lies in `[-pi, pi]` and the real part in + ``[0, inf]``. + + For real-valued input data types, `arccosh` always returns real output. + For each value that cannot be expressed as a real number or infinity, it + yields ``nan`` and sets the `invalid` floating point error flag. + + For complex-valued input, `arccosh` is a complex analytical function that + has a branch cut `[-inf, 1]` and is continuous from above on it. + + References + ---------- + .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", + 10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/ + .. [2] Wikipedia, "Inverse hyperbolic function", + http://en.wikipedia.org/wiki/Arccosh + + Examples + -------- + >>> np.arccosh([np.e, 10.0]) + array([ 1.65745445, 2.99322285]) + + """) + +add_newdoc('numpy.core.umath', 'arcsin', + """ + Inverse sine elementwise. + + Parameters + ---------- + x : array_like + `y`-coordinate on the unit circle. + + Returns + ------- + angle : ndarray + The angle of the ray intersecting the unit circle at the given + `y`-coordinate in radians ``[-pi, pi]``. If `x` is a scalar then + a scalar is returned, otherwise an array is returned. + + See Also + -------- + sin, arctan, arctan2 + + Notes + ----- + `arcsin` is a multivalued function: for each `x` there are infinitely + many numbers `z` such that `sin(z) = x`. The convention is to return the + angle `z` whose real part lies in `[-pi/2, pi/2]`. + + For real-valued input data types, `arcsin` always returns real output. + For each value that cannot be expressed as a real number or infinity, it + yields ``nan`` and sets the `invalid` floating point error flag. + + For complex-valued input, `arcsin` is a complex analytical function that + has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from above + on the former and from below on the latter. + + The inverse sine is also known as `asin` or ``sin^-1``. + + References + ---------- + .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", + 10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/ + .. [2] Wikipedia, "Inverse trigonometric function", + http://en.wikipedia.org/wiki/Arcsin + + Examples + -------- + >>> np.arcsin(1) # pi/2 + 1.5707963267948966 + >>> np.arcsin(-1) # -pi/2 + -1.5707963267948966 + >>> np.arcsin(0) + 0.0 + + """) + +add_newdoc('numpy.core.umath', 'arcsinh', + """ + Inverse hyperbolic sine elementwise. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray + Array of of the same shape as `x`. + + Notes + ----- + `arcsinh` is a multivalued function: for each `x` there are infinitely + many numbers `z` such that `sinh(z) = x`. The convention is to return the + `z` whose imaginary part lies in `[-pi/2, pi/2]`. + + For real-valued input data types, `arcsinh` always returns real output. + For each value that cannot be expressed as a real number or infinity, it + returns ``nan`` and sets the `invalid` floating point error flag. + + For complex-valued input, `arccos` is a complex analytical function that + has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from + the right on the former and from the left on the latter. + + The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``. + + References + ---------- + .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", + 10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/ + .. [2] Wikipedia, "Inverse hyperbolic function", + http://en.wikipedia.org/wiki/Arcsinh + + Examples + -------- + >>> np.arcsinh(np.array([np.e, 10.0])) + array([ 1.72538256, 2.99822295]) + + """) + +add_newdoc('numpy.core.umath', 'arctan', + """ + Trigonometric inverse tangent, element-wise. + + The inverse of tan, so that if ``y = tan(x)`` then + ``x = arctan(y)``. + + Parameters + ---------- + x : array_like + Input values. `arctan` is applied to each element of `x`. + + Returns + ------- + out : ndarray + Out has the same shape as `x`. Its real part is + in ``[-pi/2, pi/2]``. It is a scalar if `x` is a scalar. + + See Also + -------- + arctan2 : Calculate the arctan of y/x. + + Notes + ----- + `arctan` is a multivalued function: for each `x` there are infinitely + many numbers `z` such that `tan(z) = x`. The convention is to return the + angle `z` whose real part lies in `[-pi/2, pi/2]`. + + For real-valued input data types, `arctan` always returns real output. + For each value that cannot be expressed as a real number or infinity, it + yields ``nan`` and sets the `invalid` floating point error flag. + + For complex-valued input, `arctan` is a complex analytical function that + has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from the + left on the former and from the right on the latter. + + The inverse tangent is also known as `atan` or ``tan^-1``. + + References + ---------- + .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", + 10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/ + .. [2] Wikipedia, "Inverse trigonometric function", + http://en.wikipedia.org/wiki/Arctan + + Examples + -------- + We expect the arctan of 0 to be 0, and of 1 to be :math:`\\pi/4`: + + >>> np.arctan([0, 1]) + array([ 0. , 0.78539816]) + + >>> np.pi/4 + 0.78539816339744828 + + Plot arctan: + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-10, 10) + >>> plt.plot(x, np.arctan(x)) + >>> plt.axis('tight') + >>> plt.show() + + """) + +add_newdoc('numpy.core.umath', 'arctan2', + """ + Elementwise arc tangent of ``x1/x2`` choosing the quadrant correctly. + + The quadrant (ie. branch) is chosen so that ``arctan2(x1, x2)`` + is the signed angle in radians between the line segments + ``(0,0) - (1,0)`` and ``(0,0) - (x2,x1)``. This function is defined + also for `x2` = 0. + + `arctan2` is not defined for complex-valued arguments. + + Parameters + ---------- + x1 : array_like, real-valued + y-coordinates. + x2 : array_like, real-valued + x-coordinates. `x2` must be broadcastable to match the shape of `x1`, + or vice versa. + + Returns + ------- + angle : ndarray + Array of angles in radians, in the range ``[-pi, pi]``. + + See Also + -------- + arctan, tan + + Notes + ----- + `arctan2` is identical to the `atan2` function of the underlying + C library. The following special values are defined in the C standard [2]: + + ====== ====== ================ + `x1` `x2` `arctan2(x1,x2)` + ====== ====== ================ + +/- 0 +0 +/- 0 + +/- 0 -0 +/- pi + > 0 +/-inf +0 / +pi + < 0 +/-inf -0 / -pi + +/-inf +inf +/- (pi/4) + +/-inf -inf +/- (3*pi/4) + ====== ====== ================ + + Note that +0 and -0 are distinct floating point numbers. + + References + ---------- + .. [1] Wikipedia, "atan2", + http://en.wikipedia.org/wiki/Atan2 + .. [2] ISO/IEC standard 9899:1999, "Programming language C", 1999. + + Examples + -------- + Consider four points in different quadrants: + + >>> x = np.array([-1, +1, +1, -1]) + >>> y = np.array([-1, -1, +1, +1]) + >>> np.arctan2(y, x) * 180 / np.pi + array([-135., -45., 45., 135.]) + + Note the order of the parameters. `arctan2` is defined also when `x2` = 0 + and at several other special points, obtaining values in + the range ``[-pi, pi]``: + + >>> np.arctan2([1., -1.], [0., 0.]) + array([ 1.57079633, -1.57079633]) + >>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf]) + array([ 0. , 3.14159265, 0.78539816]) + + """) + +add_newdoc('numpy.core.umath', 'arctanh', + """ + Inverse hyperbolic tangent elementwise. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray + Array of the same shape as `x`. + + Notes + ----- + `arctanh` is a multivalued function: for each `x` there are infinitely + many numbers `z` such that `tanh(z) = x`. The convention is to return the + `z` whose imaginary part lies in `[-pi/2, pi/2]`. + + For real-valued input data types, `arctanh` always returns real output. + For each value that cannot be expressed as a real number or infinity, it + yields ``nan`` and sets the `invalid` floating point error flag. + + For complex-valued input, `arctanh` is a complex analytical function that + has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from + above on the former and from below on the latter. + + The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``. + + References + ---------- + .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", + 10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/ + .. [2] Wikipedia, "Inverse hyperbolic function", + http://en.wikipedia.org/wiki/Arctanh + + Examples + -------- + >>> np.arctanh([0, -0.5]) + array([ 0. , -0.54930614]) + + """) + +add_newdoc('numpy.core.umath', 'bitwise_and', + """ + Compute bit-wise AND of two arrays, element-wise. + + When calculating the bit-wise AND between two elements, ``x`` and ``y``, + each element is first converted to its binary representation (which works + just like the decimal system, only now we're using 2 instead of 10): + + .. math:: x = \\sum_{i=0}^{W-1} a_i \\cdot 2^i\\\\ + y = \\sum_{i=0}^{W-1} b_i \\cdot 2^i, + + where ``W`` is the bit-width of the type (i.e., 8 for a byte or uint8), + and each :math:`a_i` and :math:`b_j` is either 0 or 1. For example, 13 + is represented as ``00001101``, which translates to + :math:`2^4 + 2^3 + 2`. + + The bit-wise operator is the result of + + .. math:: z = \\sum_{i=0}^{i=W-1} (a_i \\wedge b_i) \\cdot 2^i, + + where :math:`\\wedge` is the AND operator, which yields one whenever + both :math:`a_i` and :math:`b_i` are 1. + + Parameters + ---------- + x1, x2 : array_like + Only integer types are handled (including booleans). + + Returns + ------- + out : array_like + Result. + + See Also + -------- + bitwise_or, bitwise_xor + logical_and + binary_repr : + Return the binary representation of the input number as a string. + + Examples + -------- + We've seen that 13 is represented by ``00001101``. Similary, 17 is + represented by ``00010001``. The bit-wise AND of 13 and 17 is + therefore ``000000001``, or 1: + + >>> np.bitwise_and(13, 17) + 1 + + >>> np.bitwise_and(14, 13) + 12 + >>> np.binary_repr(12) + '1100' + >>> np.bitwise_and([14,3], 13) + array([12, 1]) + + >>> np.bitwise_and([11,7], [4,25]) + array([0, 1]) + >>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16])) + array([ 2, 4, 16]) + >>> np.bitwise_and([True, True], [False, True]) + array([False, True], dtype=bool) + + """) + +add_newdoc('numpy.core.umath', 'bitwise_or', + """ + Compute bit-wise OR of two arrays, element-wise. + + When calculating the bit-wise OR between two elements, ``x`` and ``y``, + each element is first converted to its binary representation (which works + just like the decimal system, only now we're using 2 instead of 10): + + .. math:: x = \\sum_{i=0}^{W-1} a_i \\cdot 2^i\\\\ + y = \\sum_{i=0}^{W-1} b_i \\cdot 2^i, + + where ``W`` is the bit-width of the type (i.e., 8 for a byte or uint8), + and each :math:`a_i` and :math:`b_j` is either 0 or 1. For example, 13 + is represented as ``00001101``, which translates to + :math:`2^4 + 2^3 + 2`. + + The bit-wise operator is the result of + + .. math:: z = \\sum_{i=0}^{i=W-1} (a_i \\vee b_i) \\cdot 2^i, + + where :math:`\\vee` is the OR operator, which yields one whenever + either :math:`a_i` or :math:`b_i` is 1. + + Parameters + ---------- + x1, x2 : array_like + Only integer types are handled (including booleans). + + Returns + ------- + out : array_like + Result. + + See Also + -------- + bitwise_and, bitwise_xor + logical_or + binary_repr : + Return the binary representation of the input number as a string. + + Examples + -------- + We've seen that 13 is represented by ``00001101``. Similary, 16 is + represented by ``00010000``. The bit-wise OR of 13 and 16 is + therefore ``000111011``, or 29: + + >>> np.bitwise_or(13, 16) + 29 + >>> np.binary_repr(29) + '11101' + + >>> np.bitwise_or(32, 2) + 34 + >>> np.bitwise_or([33, 4], 1) + array([33, 5]) + >>> np.bitwise_or([33, 4], [1, 2]) + array([33, 6]) + + >>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4])) + array([ 6, 5, 255]) + >>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32), + ... np.array([4, 4, 4, 2147483647L], dtype=np.int32)) + array([ 6, 5, 255, 2147483647]) + >>> np.bitwise_or([True, True], [False, True]) + array([ True, True], dtype=bool) + + """) + +add_newdoc('numpy.core.umath', 'bitwise_xor', + """ + Compute bit-wise XOR of two arrays, element-wise. + + When calculating the bit-wise XOR between two elements, ``x`` and ``y``, + each element is first converted to its binary representation (which works + just like the decimal system, only now we're using 2 instead of 10): + + .. math:: x = \\sum_{i=0}^{W-1} a_i \\cdot 2^i\\\\ + y = \\sum_{i=0}^{W-1} b_i \\cdot 2^i, + + where ``W`` is the bit-width of the type (i.e., 8 for a byte or uint8), + and each :math:`a_i` and :math:`b_j` is either 0 or 1. For example, 13 + is represented as ``00001101``, which translates to + :math:`2^4 + 2^3 + 2`. + + The bit-wise operator is the result of + + .. math:: z = \\sum_{i=0}^{i=W-1} (a_i \\oplus b_i) \\cdot 2^i, + + where :math:`\\oplus` is the XOR operator, which yields one whenever + either :math:`a_i` or :math:`b_i` is 1, but not both. + + Parameters + ---------- + x1, x2 : array_like + Only integer types are handled (including booleans). + + Returns + ------- + out : ndarray + Result. + + See Also + -------- + bitwise_and, bitwise_or + logical_xor + binary_repr : + Return the binary representation of the input number as a string. + + Examples + -------- + We've seen that 13 is represented by ``00001101``. Similary, 17 is + represented by ``00010001``. The bit-wise XOR of 13 and 17 is + therefore ``00011100``, or 28: + + >>> np.bitwise_xor(13, 17) + 28 + >>> np.binary_repr(28) + '11100' + + >>> np.bitwise_xor(31, 5) + 26 + >>> np.bitwise_xor([31,3], 5) + array([26, 6]) + + >>> np.bitwise_xor([31,3], [5,6]) + array([26, 5]) + >>> np.bitwise_xor([True, True], [False, True]) + array([ True, False], dtype=bool) + + """) + +add_newdoc('numpy.core.umath', 'ceil', + """ + Return the ceiling of the input, element-wise. + + The ceil of the scalar `x` is the smallest integer `i`, such that + `i >= x`. It is often denoted as :math:`\\lceil x \\rceil`. + + Parameters + ---------- + x : array_like + Input data. + + Returns + ------- + y : {ndarray, scalar} + The ceiling of each element in `x`. + + Examples + -------- + >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) + >>> np.ceil(a) + array([-1., -1., -0., 1., 2., 2., 2.]) + + """) + +add_newdoc('numpy.core.umath', 'trunc', + """ + Return the truncated value of the input, element-wise. + + The truncated value of the scalar `x` is the nearest integer `i` which + is closer to zero than `x` is. In short, the fractional part of the + signed number `x` is discarded. + + Parameters + ---------- + x : array_like + Input data. + + Returns + ------- + y : {ndarray, scalar} + The truncated value of each element in `x`. + + Examples + -------- + >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) + >>> np.ceil(a) + array([-1., -1., -0., 0., 1., 1., 2.]) + + """) + +add_newdoc('numpy.core.umath', 'conjugate', + """ + Return the complex conjugate, element-wise. + + The complex conjugate of a complex number is obtained by changing the + sign of its imaginary part. + + Parameters + ---------- + x : array_like + Input value. + + Returns + ------- + y : ndarray + The complex conjugate of `x`, with same dtype as `y`. + + Examples + -------- + >>> np.conjugate(1+2j) + (1-2j) + + """) + +add_newdoc('numpy.core.umath', 'cos', + """ + Cosine elementwise. + + Parameters + ---------- + x : array_like + Input array in radians. + + Returns + ------- + out : ndarray + Output array of same shape as `x`. + + Examples + -------- + >>> np.cos(np.array([0, np.pi/2, np.pi])) + array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00]) + + """) + +add_newdoc('numpy.core.umath', 'cosh', + """ + Hyperbolic cosine, element-wise. + + Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray + Output array of same shape as `x`. + + Examples + -------- + >>> np.cosh(0) + 1.0 + + The hyperbolic cosine describes the shape of a hanging cable: + + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-4, 4, 1000) + >>> plt.plot(x, np.cosh(x)) + >>> plt.show() + + """) + +add_newdoc('numpy.core.umath', 'degrees', + """ + Convert angles from radians to degrees. This is the same + function as rad2deg but the latter is preferred because of + the more descriptive name. + + Parameters + ---------- + x : array_like + Angle in radians. + + Returns + ------- + y : ndarray + The corresponding angle in degrees. + + + See Also + -------- + rad2deg : Convert angles from radians to degrees. + deg2rad : Convert angles from degrees to radians. + radians : Convert angles from degrees to radians. + unwrap : Remove large jumps in angle by wrapping. + + Notes + ----- + degrees(x) is ``180 * x / pi``. + + Examples + -------- + >>> np.degrees(np.pi/2) + 90.0 + + """) + +add_newdoc('numpy.core.umath', 'rad2deg', + """ + Convert angles from radians to degrees. This is the same + function as degrees but is preferred because its more + descriptive name. + + Parameters + ---------- + x : array_like + Angle in radians. + + Returns + ------- + y : ndarray + The corresponding angle in degrees. + + + See Also + -------- + degrees : Convert angles from radians to degrees. + deg2rad : Convert angles from degrees to radians. + radians : Convert angles from degrees to radians. + unwrap : Remove large jumps in angle by wrapping. + + Notes + ----- + rad2deg(x) is ``180 * x / pi``. + + Examples + -------- + >>> np.rad2deg(np.pi/2) + 90.0 + + """) + +add_newdoc('numpy.core.umath', 'divide', + """ + Divide arguments element-wise. + + Parameters + ---------- + x1 : array_like + Dividend array. + x2 : array_like + Divisor array. + + Returns + ------- + y : {ndarray, scalar} + The quotient `x1/x2`, element-wise. Returns a scalar if + both `x1` and `x2` are scalars. + + See Also + -------- + seterr : Set whether to raise or warn on overflow, underflow and division + by zero. + + Notes + ----- + Equivalent to `x1` / `x2` in terms of array-broadcasting. + + Behavior on division by zero can be changed using `seterr`. + + When both `x1` and `x2` are of an integer type, `divide` will return + integers and throw away the fractional part. Moreover, division by zero + always yields zero in integer arithmetic. + + Examples + -------- + >>> np.divide(2.0, 4.0) + 0.5 + >>> x1 = np.arange(9.0).reshape((3, 3)) + >>> x2 = np.arange(3.0) + >>> np.divide(x1, x2) + array([[ NaN, 1. , 1. ], + [ Inf, 4. , 2.5], + [ Inf, 7. , 4. ]]) + + Note the behavior with integer types: + + >>> np.divide(2, 4) + 0 + >>> np.divide(2, 4.) + 0.5 + + Division by zero always yields zero in integer arithmetic, and does not + raise an exception or a warning: + + >>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int)) + array([0, 0]) + + Division by zero can, however, be caught using `seterr`: + + >>> old_err_state = np.seterr(divide='raise') + >>> np.divide(1, 0) + Traceback (most recent call last): + File "", line 1, in + FloatingPointError: divide by zero encountered in divide + + >>> ignored_states = np.seterr(**old_err_state) + >>> np.divide(1, 0) + 0 + + """) + +add_newdoc('numpy.core.umath', 'equal', + """ + Returns elementwise x1 == x2 in a bool array. + + Parameters + ---------- + x1, x2 : array_like + Input arrays of the same shape. + + Returns + ------- + out : boolean + The elementwise test `x1` == `x2`. + + """) + +add_newdoc('numpy.core.umath', 'exp', + """ + Calculate the exponential of the elements in the input array. + + Parameters + ---------- + x : array_like + Input values. + + Returns + ------- + out : ndarray + Element-wise exponential of `x`. + + Notes + ----- + The irrational number ``e`` is also known as Euler's number. It is + approximately 2.718281, and is the base of the natural logarithm, + ``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`, + then :math:`e^x = y`. For real input, ``exp(x)`` is always positive. + + For complex arguments, ``x = a + ib``, we can write + :math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already + known (it is the real argument, described above). The second term, + :math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with magnitude + 1 and a periodic phase. + + References + ---------- + .. [1] Wikipedia, "Exponential function", + http://en.wikipedia.org/wiki/Exponential_function + .. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions + with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69, + http://www.math.sfu.ca/~cbm/aands/page_69.htm + + Examples + -------- + Plot the magnitude and phase of ``exp(x)`` in the complex plane: + + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-2*np.pi, 2*np.pi, 100) + >>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane + >>> out = np.exp(xx) + + >>> plt.subplot(121) + >>> plt.imshow(np.abs(out), + ... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi]) + >>> plt.title('Magnitude of exp(x)') + + >>> plt.subplot(122) + >>> plt.imshow(np.angle(out), + ... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi]) + >>> plt.title('Phase (angle) of exp(x)') + >>> plt.show() + + """) + +add_newdoc('numpy.core.umath', 'exp2', + """ + Calculate `2**p` for all `p` in the input array. + + Parameters + ---------- + x : array_like + Input values. + + Returns + ------- + out : ndarray + Element-wise 2 to the power `x`. + + """) + +add_newdoc('numpy.core.umath', 'expm1', + """ + Return the exponential of the elements in the array minus one. + + Parameters + ---------- + x : array_like + Input values. + + Returns + ------- + out : ndarray + Element-wise exponential minus one: ``out=exp(x)-1``. + + See Also + -------- + log1p : ``log(1+x)``, the inverse of expm1. + + + Notes + ----- + This function provides greater precision than using ``exp(x)-1`` + for small values of `x`. + + Examples + -------- + Since the series expansion of ``e**x = 1 + x + x**2/2! + x**3/3! + ...``, + for very small `x` we expect that ``e**x -1 ~ x + x**2/2``: + + >>> np.expm1(1e-10) + 1.00000000005e-10 + >>> np.exp(1e-10) - 1 + 1.000000082740371e-10 + + """) + +add_newdoc('numpy.core.umath', 'fabs', + """ + Compute the absolute values elementwise. + + This function returns the absolute values (positive magnitude) of the data + in `x`. Complex values are not handled, use `absolute` to find the + absolute values of complex data. + + Parameters + ---------- + x : array_like + The array of numbers for which the absolute values are required. If + `x` is a scalar, the result `y` will also be a scalar. + + Returns + ------- + y : {ndarray, scalar} + The absolute values of `x`, the returned values are always floats. + + See Also + -------- + absolute : Absolute values including `complex` types. + + Examples + -------- + >>> np.fabs(-1) + 1.0 + >>> np.fabs([-1.2, 1.2]) + array([ 1.2, 1.2]) + + """) + +add_newdoc('numpy.core.umath', 'floor', + """ + Return the floor of the input, element-wise. + + The floor of the scalar `x` is the largest integer `i`, such that + `i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`. + + Parameters + ---------- + x : array_like + Input data. + + Returns + ------- + y : {ndarray, scalar} + The floor of each element in `x`. + + Notes + ----- + Some spreadsheet programs calculate the "floor-towards-zero", in other + words ``floor(-2.5) == -2``. NumPy, however, uses the a definition of + `floor` such that `floor(-2.5) == -3``. + + Examples + -------- + >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) + >>> np.floor(a) + array([-2., -2., -1., 0., 1., 1., 2.]) + + """) + +add_newdoc('numpy.core.umath', 'floor_divide', + """ + Return the largest integer smaller or equal to the division of the inputs. + + Parameters + ---------- + x1 : array_like + Numerator. + x2 : array_like + Denominator. + + Returns + ------- + y : ndarray + y = floor(`x1`/`x2`) + + + See Also + -------- + divide : Standard division. + floor : Round a number to the nearest integer toward minus infinity. + ceil : Round a number to the nearest integer toward infinity. + + Examples + -------- + >>> np.floor_divide(7,3) + 2 + >>> np.floor_divide([1., 2., 3., 4.], 2.5) + array([ 0., 0., 1., 1.]) + + """) + +add_newdoc('numpy.core.umath', 'fmod', + """ + Return the remainder of division. + + This is the NumPy implementation of the C modulo operator `%`. + + Parameters + ---------- + x1 : array_like + Dividend. + x2 : array_like + Divisor. + + Returns + ------- + y : array_like + The remainder of the division of `x1` by `x2`. + + See Also + -------- + mod : Modulo operation where the quotient is `floor(x1,x2)`. + + Notes + ----- + The result of the modulo operation for negative dividend and divisors is + bound by conventions. In `fmod`, the sign of the remainder is the sign of + the dividend, and the sign of the divisor has no influence on the results. + + Examples + -------- + >>> np.fmod([-3, -2, -1, 1, 2, 3], 2) + array([-1, 0, -1, 1, 0, 1]) + + >>> np.mod([-3, -2, -1, 1, 2, 3], 2) + array([1, 0, 1, 1, 0, 1]) + + """) + +add_newdoc('numpy.core.umath', 'greater', + """ + Return (x1 > x2) element-wise. + + Parameters + ---------- + x1, x2 : array_like + Input arrays. + + Returns + ------- + Out : {ndarray, bool} + Output array of bools, or a single bool if `x1` and `x2` are scalars. + + See Also + -------- + greater_equal + + Examples + -------- + >>> np.greater([4,2],[2,2]) + array([ True, False], dtype=bool) + + If the inputs are ndarrays, then np.greater is equivalent to '>'. + + >>> a = np.array([4,2]) + >>> b = np.array([2,2]) + >>> a > b + array([ True, False], dtype=bool) + + """) + +add_newdoc('numpy.core.umath', 'greater_equal', + """ + Element-wise True if first array is greater or equal than second array. + + Parameters + ---------- + x1, x2 : array_like + Input arrays. + + Returns + ------- + out : ndarray, bool + Output array. + + See Also + -------- + greater, less, less_equal, equal + + Examples + -------- + >>> np.greater_equal([4,2],[2,2]) + array([ True, True], dtype=bool) + + """) + +add_newdoc('numpy.core.umath', 'hypot', + """ + Given two sides of a right triangle, return its hypotenuse. + + Parameters + ---------- + x : array_like + Base of the triangle. + y : array_like + Height of the triangle. + + Returns + ------- + z : ndarray + Hypotenuse of the triangle: sqrt(x**2 + y**2) + + Examples + -------- + >>> np.hypot(3,4) + 5.0 + + """) + +add_newdoc('numpy.core.umath', 'invert', + """ + Compute bit-wise inversion, or bit-wise NOT, element-wise. + + When calculating the bit-wise NOT of an element ``x``, each element is + first converted to its binary representation (which works + just like the decimal system, only now we're using 2 instead of 10): + + .. math:: x = \\sum_{i=0}^{W-1} a_i \\cdot 2^i + + where ``W`` is the bit-width of the type (i.e., 8 for a byte or uint8), + and each :math:`a_i` is either 0 or 1. For example, 13 is represented + as ``00001101``, which translates to :math:`2^4 + 2^3 + 2`. + + The bit-wise operator is the result of + + .. math:: z = \\sum_{i=0}^{i=W-1} (\\lnot a_i) \\cdot 2^i, + + where :math:`\\lnot` is the NOT operator, which yields 1 whenever + :math:`a_i` is 0 and yields 0 whenever :math:`a_i` is 1. + + For signed integer inputs, the two's complement is returned. + In a two's-complement system negative numbers are represented by the two's + complement of the absolute value. This is the most common method of + representing signed integers on computers [1]_. A N-bit two's-complement + system can represent every integer in the range + :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. + + Parameters + ---------- + x1 : ndarray + Only integer types are handled (including booleans). + + Returns + ------- + out : ndarray + Result. + + See Also + -------- + bitwise_and, bitwise_or, bitwise_xor + logical_not + binary_repr : + Return the binary representation of the input number as a string. + + Notes + ----- + `bitwise_not` is an alias for `invert`: + + >>> np.bitwise_not is np.invert + True + + References + ---------- + .. [1] Wikipedia, "Two's complement", + http://en.wikipedia.org/wiki/Two's_complement + + Examples + -------- + We've seen that 13 is represented by ``00001101``. + The invert or bit-wise NOT of 13 is then: + + >>> np.invert(np.array([13], dtype=uint8)) + array([242], dtype=uint8) + >>> np.binary_repr(x, width=8) + '00001101' + >>> np.binary_repr(242, width=8) + '11110010' + + The result depends on the bit-width: + + >>> np.invert(np.array([13], dtype=uint16)) + array([65522], dtype=uint16) + >>> np.binary_repr(x, width=16) + '0000000000001101' + >>> np.binary_repr(65522, width=16) + '1111111111110010' + + When using signed integer types the result is the two's complement of + the result for the unsigned type: + + >>> np.invert(np.array([13], dtype=int8)) + array([-14], dtype=int8) + >>> np.binary_repr(-14, width=8) + '11110010' + + Booleans are accepted as well: + + >>> np.invert(array([True, False])) + array([False, True], dtype=bool) + + """) + +add_newdoc('numpy.core.umath', 'isfinite', + """ + Returns True for each element that is a finite number. + + Shows which elements of the input are finite (not infinity or not + Not a Number). + + Parameters + ---------- + x : array_like + Input values. + y : array_like, optional + A boolean array with the same shape and type as `x` to store the result. + + Returns + ------- + y : ndarray, bool + For scalar input data, the result is a new numpy boolean with value True + if the input data is finite; otherwise the value is False (input is + either positive infinity, negative infinity or Not a Number). + + For array input data, the result is an numpy boolean array with the same + dimensions as the input and the values are True if the corresponding + element of the input is finite; otherwise the values are False (element + is either positive infinity, negative infinity or Not a Number). If the + second argument is supplied then an numpy integer array is returned with + values 0 or 1 corresponding to False and True, respectively. + + See Also + -------- + isinf : Shows which elements are negative or negative infinity. + isneginf : Shows which elements are negative infinity. + isposinf : Shows which elements are positive infinity. + isnan : Shows which elements are Not a Number (NaN). + + + Notes + ----- + Not a Number, positive infinity and negative infinity are considered + to be non-finite. + + Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Also that positive infinity is not equivalent to negative infinity. But + infinity is equivalent to positive infinity. + + Errors result if second argument is also supplied with scalar input or + if first and second arguments have different shapes. + + Examples + -------- + >>> np.isfinite(1) + True + >>> np.isfinite(0) + True + >>> np.isfinite(np.nan) + False + >>> np.isfinite(np.inf) + False + >>> np.isfinite(np.NINF) + False + >>> np.isfinite([np.log(-1.),1.,np.log(0)]) + array([False, True, False], dtype=bool) + >>> x=np.array([-np.inf, 0., np.inf]) + >>> y=np.array([2,2,2]) + >>> np.isfinite(x,y) + array([0, 1, 0]) + >>> y + array([0, 1, 0]) + + """) + +add_newdoc('numpy.core.umath', 'isinf', + """ + Shows which elements of the input are positive or negative infinity. + Returns a numpy boolean scalar or array resulting from an element-wise test + for positive or negative infinity. + + Parameters + ---------- + x : array_like + input values + y : array_like, optional + An array with the same shape as `x` to store the result. + + Returns + ------- + y : {ndarray, bool} + For scalar input data, the result is a new numpy boolean with value True + if the input data is positive or negative infinity; otherwise the value + is False. + + For array input data, the result is an numpy boolean array with the same + dimensions as the input and the values are True if the corresponding + element of the input is positive or negative infinity; otherwise the + values are False. If the second argument is supplied then an numpy + integer array is returned with values 0 or 1 corresponding to False and + True, respectively. + + See Also + -------- + isneginf : Shows which elements are negative infinity. + isposinf : Shows which elements are positive infinity. + isnan : Shows which elements are Not a Number (NaN). + isfinite: Shows which elements are not: Not a number, positive and + negative infinity + + Notes + ----- + Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Also that positive infinity is not equivalent to negative infinity. But + infinity is equivalent to positive infinity. + + Errors result if second argument is also supplied with scalar input or + if first and second arguments have different shapes. + + Numpy's definitions for positive infinity (PINF) and negative infinity + (NINF) may be change in the future versions. + + Examples + -------- + >>> np.isinf(np.inf) + True + >>> np.isinf(np.nan) + False + >>> np.isinf(np.NINF) + True + >>> np.isinf([np.inf, -np.inf, 1.0, np.nan]) + array([ True, True, False, False], dtype=bool) + >>> x=np.array([-np.inf, 0., np.inf]) + >>> y=np.array([2,2,2]) + >>> np.isinf(x,y) + array([1, 0, 1]) + >>> y + array([1, 0, 1]) + + """) + +add_newdoc('numpy.core.umath', 'isnan', + """ + Returns a numpy boolean scalar or array resulting from an element-wise test + for Not a Number (NaN). + + Parameters + ---------- + x : array_like + input data. + + Returns + ------- + y : {ndarray, bool} + For scalar input data, the result is a new numpy boolean with value True + if the input data is NaN; otherwise the value is False. + + For array input data, the result is an numpy boolean array with the same + dimensions as the input and the values are True if the corresponding + element of the input is Not a Number; otherwise the values are False. + + See Also + -------- + isinf : Tests for infinity. + isneginf : Tests for negative infinity. + isposinf : Tests for positive infinity. + isfinite : Shows which elements are not: Not a number, positive infinity + and negative infinity + + Notes + ----- + Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + + Examples + -------- + >>> np.isnan(np.nan) + True + >>> np.isnan(np.inf) + False + >>> np.isnan([np.log(-1.),1.,np.log(0)]) + array([ True, False, False], dtype=bool) + + """) + +add_newdoc('numpy.core.umath', 'left_shift', + """ + Shift the bits of an integer to the left. + + Bits are shifted to the left by appending `x2` 0s at the right of `x1`. + Since the internal representation of numbers is in binary format, this + operation is equivalent to multiplying `x1` by ``2**x2``. + + Parameters + ---------- + x1 : array_like of integer type + Input values. + x2 : array_like of integer type + Number of zeros to append to `x1`. + + Returns + ------- + out : array of integer type + Return `x1` with bits shifted `x2` times to the left. + + See Also + -------- + right_shift : Shift the bits of an integer to the right. + binary_repr : Return the binary representation of the input number + as a string. + + Examples + -------- + >>> np.left_shift(5, [1,2,3]) + array([10, 20, 40]) + + """) + +add_newdoc('numpy.core.umath', 'less', + """ + Returns (x1 < x2) element-wise. + + Parameters + ---------- + x1, x2 : array_like + Input arrays. + + Returns + ------- + Out : {ndarray, bool} + Output array of bools, or a single bool if `x1` and `x2` are scalars. + + See Also + -------- + less_equal + + Examples + -------- + >>> np.less([1,2],[2,2]) + array([ True, False], dtype=bool) + + """) + +add_newdoc('numpy.core.umath', 'less_equal', + """ + Returns (x1 <= x2) element-wise. + + Parameters + ---------- + x1, x2 : array_like + Input arrays. + + Returns + ------- + Out : {ndarray, bool} + Output array of bools, or a single bool if `x1` and `x2` are scalars. + + See Also + -------- + less + + Examples + -------- + >>> np.less_equal([1,2,3],[2,2,2]) + array([ True, True, False], dtype=bool) + + """) + +add_newdoc('numpy.core.umath', 'log', + """ + Natural logarithm, element-wise. + + The natural logarithm `log` is the inverse of the exponential function, + so that `log(exp(x)) = x`. The natural logarithm is logarithm in base `e`. + + Parameters + ---------- + x : array_like + Input value. + + Returns + ------- + y : ndarray + The natural logarithm of `x`, element-wise. + + See Also + -------- + log10, log2, log1p + + Notes + ----- + Logarithm is a multivalued function: for each `x` there is an infinite + number of `z` such that `exp(z) = x`. The convention is to return the `z` + whose imaginary part lies in `[-pi, pi]`. + + For real-valued input data types, `log` always returns real output. For + each value that cannot be expressed as a real number or infinity, it + yields ``nan`` and sets the `invalid` floating point error flag. + + For complex-valued input, `log` is a complex analytical function that + has a branch cut `[-inf, 0]` and is continuous from above on it. `log` + handles the floating-point negative zero as an infinitesimal negative + number, conforming to the C99 standard. + + References + ---------- + .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", + 10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/ + .. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm + + Examples + -------- + >>> np.log([1, np.e, np.e**2, 0]) + array([ 0., 1., 2., -Inf]) + + """) + +add_newdoc('numpy.core.umath', 'log10', + """ + Compute the logarithm in base 10 element-wise. + + Parameters + ---------- + x : array_like + Input values. + + Returns + ------- + y : ndarray + Base-10 logarithm of `x`. + + Notes + ----- + Logarithm is a multivalued function: for each `x` there is an infinite + number of `z` such that `10**z = x`. The convention is to return the `z` + whose imaginary part lies in `[-pi, pi]`. + + For real-valued input data types, `log10` always returns real output. For + each value that cannot be expressed as a real number or infinity, it + yields ``nan`` and sets the `invalid` floating point error flag. + + For complex-valued input, `log10` is a complex analytical function that + has a branch cut `[-inf, 0]` and is continuous from above on it. `log10` + handles the floating-point negative zero as an infinitesimal negative + number, conforming to the C99 standard. + + References + ---------- + .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", + 10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/ + .. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm + + Examples + -------- + >>> np.log10([1.e-15,-3.]) + array([-15., NaN]) + + """) + +add_newdoc('numpy.core.umath', 'log2', + """ + Base-2 logarithm of `x`. + + Parameters + ---------- + x : array_like + Input values. + + Returns + ------- + y : ndarray + Base-2 logarithm of `x`. + + See Also + -------- + log, log10, log1p + + """) + +add_newdoc('numpy.core.umath', 'logaddexp', + """ + Logarithm of `exp(x) + exp(y)`. + + This function is useful in statistics where the calculated probabilities of + events may be so small as to excede the range of normal floating point + numbers. In such cases the logarithm of the calculated probability is + stored. This function allows adding probabilities stored in such a fashion. + + Parameters + ---------- + x : array_like + Input values. + y : array_like + Input values. + + + Returns + ------- + result : ndarray + Logarithm of `exp(x) + exp(y)`. + + See Also + -------- + logaddexp2 + + """) + +add_newdoc('numpy.core.umath', 'logaddexp2', + """ + Base-2 Logarithm of `2**x + 2**y`. + + This function is useful in machine learning when the calculated probabilities of + events may be so small as to excede the range of normal floating point + numbers. In such cases the base-2 logarithm of the calculated probability + can be used instead. This function allows adding probabilities stored in such a fashion. + + Parameters + ---------- + x : array_like + Input values. + y : array_like + Input values. + + + Returns + ------- + result : ndarray + Base-2 logarithm of `2**x + 2**y`. + + See Also + -------- + logaddexp + + """) + +add_newdoc('numpy.core.umath', 'log1p', + """ + `log(1 + x)` in base `e`, elementwise. + + Parameters + ---------- + x : array_like + Input values. + + Returns + ------- + y : ndarray + Natural logarithm of `1 + x`, elementwise. + + Notes + ----- + For real-valued input, `log1p` is accurate also for `x` so small + that `1 + x == 1` in floating-point accuracy. + + Logarithm is a multivalued function: for each `x` there is an infinite + number of `z` such that `exp(z) = 1 + x`. The convention is to return + the `z` whose imaginary part lies in `[-pi, pi]`. + + For real-valued input data types, `log1p` always returns real output. For + each value that cannot be expressed as a real number or infinity, it + yields ``nan`` and sets the `invalid` floating point error flag. + + For complex-valued input, `log1p` is a complex analytical function that + has a branch cut `[-inf, -1]` and is continuous from above on it. `log1p` + handles the floating-point negative zero as an infinitesimal negative + number, conforming to the C99 standard. + + References + ---------- + .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", + 10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/ + .. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm + + Examples + -------- + >>> np.log1p(1e-99) + 1e-99 + >>> np.log(1 + 1e-99) + 0.0 + + """) + +add_newdoc('numpy.core.umath', 'logical_and', + """ + Compute the truth value of x1 AND x2 elementwise. + + Parameters + ---------- + x1, x2 : array_like + Logical AND is applied to the elements of `x1` and `x2`. + They have to be of the same shape. + + + Returns + ------- + y : {ndarray, bool} + Boolean result with the same shape as `x1` and `x2` of the logical + AND operation on elements of `x1` and `x2`. + + See Also + -------- + logical_or, logical_not, logical_xor + bitwise_and + + Examples + -------- + >>> np.logical_and(True, False) + False + >>> np.logical_and([True, False], [False, False]) + array([False, False], dtype=bool) + + >>> x = np.arange(5) + >>> np.logical_and(x>1, x<4) + array([False, False, True, True, False], dtype=bool) + + """) + +add_newdoc('numpy.core.umath', 'logical_not', + """ + Compute the truth value of NOT x elementwise. + + Parameters + ---------- + x : array_like + Logical NOT is applied to the elements of `x`. + + Returns + ------- + y : {ndarray, bool} + Boolean result with the same shape as `x` of the NOT operation + on elements of `x`. + + See Also + -------- + logical_and, logical_or, logical_xor + + Examples + -------- + >>> np.logical_not(3) + False + >>> np.logical_not([True, False, 0, 1]) + array([False, True, True, False], dtype=bool) + + >>> x = np.arange(5) + >>> np.logical_not(x<3) + array([False, False, False, True, True], dtype=bool) + + """) + +add_newdoc('numpy.core.umath', 'logical_or', + """ + Compute the truth value of x1 OR x2 elementwise. + + Parameters + ---------- + x1, x2 : array_like + Logical OR is applied to the elements of `x1` and `x2`. + They have to be of the same shape. + + Returns + ------- + y : {ndarray, bool} + Boolean result with the same shape as `x1` and `x2` of the logical + OR operation on elements of `x1` and `x2`. + + See Also + -------- + logical_and, logical_not, logical_xor + bitwise_or + + Examples + -------- + >>> np.logical_or(True, False) + True + >>> np.logical_or([True, False], [False, False]) + array([ True, False], dtype=bool) + + >>> x = np.arange(5) + >>> np.logical_or(x < 1, x > 3) + array([ True, False, False, False, True], dtype=bool) + + """) + +add_newdoc('numpy.core.umath', 'logical_xor', + """ + Compute the truth value of x1 XOR x2 elementwise. + + Parameters + ---------- + x1, x2 : array_like + Logical XOR is applied to the elements of `x1` and `x2`. + They have to be of the same shape. + + Returns + ------- + y : {ndarray, bool} + Boolean result with the same shape as `x1` and `x2` of the logical + XOR operation on elements of `x1` and `x2`. + + See Also + -------- + logical_and, logical_or, logical_not + bitwise_xor + + Examples + -------- + >>> np.logical_xor(True, False) + True + >>> np.logical_xor([True, True, False, False], [True, False, True, False]) + array([False, True, True, False], dtype=bool) + + >>> x = np.arange(5) + >>> np.logical_xor(x < 1, x > 3) + array([ True, False, False, False, True], dtype=bool) + + """) + +add_newdoc('numpy.core.umath', 'maximum', + """ + Element-wise maximum of array elements. + + Compare two arrays and returns a new array containing + the element-wise maxima. + + Parameters + ---------- + x1, x2 : array_like + The arrays holding the elements to be compared. + + Returns + ------- + y : {ndarray, scalar} + The maximum of `x1` and `x2`, element-wise. Returns scalar if + both `x1` and `x2` are scalars. + + See Also + -------- + minimum : + element-wise minimum + + Notes + ----- + Equivalent to ``np.where(x1 > x2, x1, x2)`` but faster and does proper + broadcasting. + + Examples + -------- + >>> np.maximum([2, 3, 4], [1, 5, 2]) + array([2, 5, 4]) + + >>> np.maximum(np.eye(2), [0.5, 2]) + array([[ 1. , 2. ], + [ 0.5, 2. ]]) + + """) + +add_newdoc('numpy.core.umath', 'minimum', + """ + Element-wise minimum of array elements. + + Compare two arrays and returns a new array containing + the element-wise minima. + + Parameters + ---------- + x1, x2 : array_like + The arrays holding the elements to be compared. + + Returns + ------- + y : {ndarray, scalar} + The minimum of `x1` and `x2`, element-wise. Returns scalar if + both `x1` and `x2` are scalars. + + See Also + -------- + maximum : + element-wise maximum + + Notes + ----- + Equivalent to ``np.where(x1 < x2, x1, x2)`` but faster and does proper + broadcasting. + + Examples + -------- + >>> np.minimum([2, 3, 4], [1, 5, 2]) + array([1, 3, 2]) + + >>> np.minimum(np.eye(2), [0.5, 2]) + array([[ 0.5, 0. ], + [ 0. , 1. ]]) + + """) + +add_newdoc('numpy.core.umath', 'fmax', + """ + + """) + +add_newdoc('numpy.core.umath', 'fmin', + """ + + """) + +add_newdoc('numpy.core.umath', 'modf', + """ + Return the fractional and integral part of a number. + + The fractional and integral parts are negative if the given number is + negative. + + Parameters + ---------- + x : array_like + Input number. + + Returns + ------- + y1 : ndarray + Fractional part of `x`. + y2 : ndarray + Integral part of `x`. + + Examples + -------- + >>> np.modf(2.5) + (0.5, 2.0) + >>> np.modf(-.4) + (-0.40000000000000002, -0.0) + + """) + +add_newdoc('numpy.core.umath', 'multiply', + """ + Multiply arguments elementwise. + + Parameters + ---------- + x1, x2 : array_like + The arrays to be multiplied. + + Returns + ------- + y : ndarray + The product of `x1` and `x2`, elementwise. Returns a scalar if + both `x1` and `x2` are scalars. + + Notes + ----- + Equivalent to `x1` * `x2` in terms of array-broadcasting. + + Examples + -------- + >>> np.multiply(2.0, 4.0) + 8.0 + + >>> x1 = np.arange(9.0).reshape((3, 3)) + >>> x2 = np.arange(3.0) + >>> np.multiply(x1, x2) + array([[ 0., 1., 4.], + [ 0., 4., 10.], + [ 0., 7., 16.]]) + + """) + +add_newdoc('numpy.core.umath', 'negative', + """ + Returns an array with the negative of each element of the original array. + + Parameters + ---------- + x : {array_like, scalar} + Input array. + + Returns + ------- + y : {ndarray, scalar} + Returned array or scalar `y=-x`. + + Examples + -------- + >>> np.negative([1.,-1.]) + array([-1., 1.]) + + """) + +add_newdoc('numpy.core.umath', 'not_equal', + """ + Return (x1 != x2) element-wise. + + Parameters + ---------- + x1, x2 : array_like + Input arrays. + out : ndarray, optional + A placeholder the same shape as `x1` to store the result. + + Returns + ------- + not_equal : ndarray bool, scalar bool + For each element in `x1, x2`, return True if `x1` is not equal + to `x2` and False otherwise. + + + See Also + -------- + equal, greater, greater_equal, less, less_equal + + Examples + -------- + >>> np.not_equal([1.,2.], [1., 3.]) + array([False, True], dtype=bool) + + """) + +add_newdoc('numpy.core.umath', 'ones_like', + """ + Returns an array of ones with the same shape and type as a given array. + + Equivalent to ``a.copy().fill(1)``. + + Please refer to the documentation for `zeros_like`. + + See Also + -------- + zeros_like + + Examples + -------- + >>> a = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.ones_like(a) + array([[1, 1, 1], + [1, 1, 1]]) + + """) + +add_newdoc('numpy.core.umath', 'power', + """ + Returns element-wise base array raised to power from second array. + + Raise each base in `x1` to the power of the exponents in `x2`. This + requires that `x1` and `x2` must be broadcastable to the same shape. + + Parameters + ---------- + x1 : array_like + The bases. + x2 : array_like + The exponents. + + Returns + ------- + y : ndarray + The bases in `x1` raised to the exponents in `x2`. + + Examples + -------- + Cube each element in a list. + + >>> x1 = range(6) + >>> x1 + [0, 1, 2, 3, 4, 5] + >>> np.power(x1, 3) + array([ 0, 1, 8, 27, 64, 125]) + + Raise the bases to different exponents. + + >>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0] + >>> np.power(x1, x2) + array([ 0., 1., 8., 27., 16., 5.]) + + The effect of broadcasting. + + >>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]]) + >>> x2 + array([[1, 2, 3, 3, 2, 1], + [1, 2, 3, 3, 2, 1]]) + >>> np.power(x1, x2) + array([[ 0, 1, 8, 27, 16, 5], + [ 0, 1, 8, 27, 16, 5]]) + + """) + +add_newdoc('numpy.core.umath', 'radians', + """ + Convert angles from degrees to radians. This function is + the same as deg2rad, which is more descriptive.. + + Parameters + ---------- + x : array_like + Angles in degrees. + + Returns + ------- + y : ndarray + The corresponding angle in radians. + + See Also + -------- + deg2rad : Convert angles from degrees to radians. + rad2deg : Convert angles from radians to degrees. + degrees : Convert angles from radians to degrees. + unwrap : Remove large jumps in angle by wrapping. + + Notes + ----- + ``radians(x)`` is ``x * pi / 180``. + + Examples + -------- + >>> np.radians(180) + 3.1415926535897931 + + """) + +add_newdoc('numpy.core.umath', 'deg2rad', + """ + Convert angles from degrees to radians. This is the same + function as radians, but deg2rad is a more descriptive name. + + Parameters + ---------- + x : array_like + Angles in degrees. + + Returns + ------- + y : ndarray + The corresponding angle in radians. + + See Also + -------- + radians : Convert angles from degrees to radians. + rad2deg : Convert angles from radians to degrees. + degrees : Convert angles from radians to degrees. + unwrap : Remove large jumps in angle by wrapping. + + Notes + ----- + ``deg2rad(x)`` is ``x * pi / 180``. + + Examples + -------- + >>> np.deg2rad(180) + 3.1415926535897931 + + """) + +add_newdoc('numpy.core.umath', 'reciprocal', + """ + Return element-wise reciprocal. + + Parameters + ---------- + x : array_like + Input value. + + Returns + ------- + y : ndarray + Return value. + + Examples + -------- + >>> reciprocal(2.) + 0.5 + >>> reciprocal([1, 2., 3.33]) + array([ 1. , 0.5 , 0.3003003]) + + """) + +add_newdoc('numpy.core.umath', 'remainder', + """ + Returns element-wise remainder of division. + + Computes `x1 - floor(x1/x2)*x2`. + + Parameters + ---------- + x1 : array_like + Dividend array. + x2 : array_like + Divisor array. + + Returns + ------- + y : ndarray + The remainder of the quotient `x1/x2`, element-wise. Returns a scalar + if both `x1` and `x2` are scalars. + + See Also + -------- + divide + floor + + Notes + ----- + Returns 0 when `x2` is 0. + + Examples + -------- + >>> np.remainder([4,7],[2,3]) + array([0, 1]) + + """) + +add_newdoc('numpy.core.umath', 'right_shift', + """ + Shift the bits of an integer to the right. + + Bits are shifted to the right by removing `x2` bits at the right of `x1`. + Since the internal representation of numbers is in binary format, this + operation is equivalent to dividing `x1` by ``2**x2``. + + Parameters + ---------- + x1 : array_like, int + Input values. + x2 : array_like, int + Number of bits to remove at the right of `x1`. + + Returns + ------- + out : ndarray, int + Return `x1` with bits shifted `x2` times to the right. + + See Also + -------- + left_shift : Shift the bits of an integer to the left. + binary_repr : Return the binary representation of the input number + as a string. + + Examples + -------- + >>> np.right_shift(10, [1,2,3]) + array([5, 2, 1]) + + """) + +add_newdoc('numpy.core.umath', 'rint', + """ + Round elements of the array to the nearest integer. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray + Output array is same shape and type as `x`. + + Examples + -------- + >>> a = [-4.1, -3.6, -2.5, 0.1, 2.5, 3.1, 3.9] + >>> np.rint(a) + array([-4., -4., -2., 0., 2., 3., 4.]) + + """) + +add_newdoc('numpy.core.umath', 'sign', + """ + Returns an element-wise indication of the sign of a number. + + The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. + + Parameters + ---------- + x : array_like + Input values. + + Returns + ------- + y : ndarray + The sign of `x`. + + Examples + -------- + >>> np.sign([-5., 4.5]) + array([-1., 1.]) + >>> np.sign(0) + 0 + + """) + +add_newdoc('numpy.core.umath', 'signbit', + """ + Returns element-wise True where signbit is set (less than zero). + + Parameters + ---------- + x: array_like + The input value(s). + + Returns + ------- + out : array_like, bool + Output. + + Examples + -------- + >>> np.signbit(-1.2) + True + >>> np.signbit(np.array([1, -2.3, 2.1])) + array([False, True, False], dtype=bool) + + """) + +add_newdoc('numpy.core.umath', 'sin', + """ + Trigonometric sine, element-wise. + + Parameters + ---------- + x : array_like + Angle, in radians (:math:`2 \\pi` rad equals 360 degrees). + + Returns + ------- + y : array_like + The sine of each element of x. + + See Also + -------- + arcsin, sinh, cos + + Notes + ----- + The sine is one of the fundamental functions of trigonometry + (the mathematical study of triangles). Consider a circle of radius + 1 centered on the origin. A ray comes in from the :math:`+x` axis, + makes an angle at the origin (measured counter-clockwise from that + axis), and departs from the origin. The :math:`y` coordinate of + the outgoing ray's intersection with the unit circle is the sine + of that angle. It ranges from -1 for :math:`x=3\\pi / 2` to + +1 for :math:`\\pi / 2.` The function has zeroes where the angle is + a multiple of :math:`\\pi`. Sines of angles between :math:`\\pi` and + :math:`2\\pi` are negative. The numerous properties of the sine and + related functions are included in any standard trigonometry text. + + Examples + -------- + Print sine of one angle: + + >>> np.sin(np.pi/2.) + 1.0 + + Print sines of an array of angles given in degrees: + + >>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. ) + array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ]) + + Plot the sine function: + + >>> import matplotlib.pylab as plt + >>> x = np.linspace(-np.pi, np.pi, 201) + >>> plt.plot(x, np.sin(x)) + >>> plt.xlabel('Angle [rad]') + >>> plt.ylabel('sin(x)') + >>> plt.axis('tight') + >>> plt.show() + + """) + +add_newdoc('numpy.core.umath', 'sinh', + """ + Hyperbolic sine, element-wise. + + Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or + ``-1j * np.sin(1j*x)``. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray + Output array of same shape as `x`. + + """) + +add_newdoc('numpy.core.umath', 'sqrt', + """ + Return the positive square-root of an array, element-wise. + + Parameters + ---------- + x : array_like + The square root of each element in this array is calculated. + + Returns + ------- + y : ndarray + An array of the same shape as `x`, containing the square-root of + each element in `x`. If any element in `x` + is complex, a complex array is returned. If all of the elements + of `x` are real, negative elements return numpy.nan elements. + + See Also + -------- + numpy.lib.scimath.sqrt + A version which returns complex numbers when given negative reals. + + Notes + ----- + `sqrt` has a branch cut ``[-inf, 0)`` and is continuous from above on it. + + Examples + -------- + >>> np.sqrt([1,4,9]) + array([ 1., 2., 3.]) + + >>> np.sqrt([4, -1, -3+4J]) + array([ 2.+0.j, 0.+1.j, 1.+2.j]) + + >>> np.sqrt([4, -1, numpy.inf]) + array([ 2., NaN, Inf]) + + """) + +add_newdoc('numpy.core.umath', 'square', + """ + Return the element-wise square of the input. + + Parameters + ---------- + x : array_like + Input data. + + Returns + ------- + out : ndarray + Element-wise `x*x`, of the same shape and dtype as `x`. + Returns scalar if `x` is a scalar. + + See Also + -------- + numpy.linalg.matrix_power + sqrt + power + + Examples + -------- + >>> np.square([-1j, 1]) + array([-1.-0.j, 1.+0.j]) + + """) + +add_newdoc('numpy.core.umath', 'subtract', + """ + Subtract arguments element-wise. + + Parameters + ---------- + x1, x2 : array_like + The arrays to be subtracted from each other. If type is 'array_like' + the `x1` and `x2` shapes must be identical. + + Returns + ------- + y : ndarray + The difference of `x1` and `x2`, element-wise. Returns a scalar if + both `x1` and `x2` are scalars. + + Notes + ----- + Equivalent to `x1` - `x2` in terms of array-broadcasting. + + Examples + -------- + >>> np.subtract(1.0, 4.0) + -3.0 + + >>> x1 = np.arange(9.0).reshape((3, 3)) + >>> x2 = np.arange(3.0) + >>> np.subtract(x1, x2) + array([[ 0., 0., 0.], + [ 3., 3., 3.], + [ 6., 6., 6.]]) + + """) + +add_newdoc('numpy.core.umath', 'tan', + """ + Compute tangent element-wise. + + Parameters + ---------- + x : array_like + Angles in radians. + + Returns + ------- + y : ndarray + The corresponding tangent values. + + + Examples + -------- + >>> from math import pi + >>> np.tan(np.array([-pi,pi/2,pi])) + array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16]) + + """) + +add_newdoc('numpy.core.umath', 'tanh', + """ + Hyperbolic tangent element-wise. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + y : ndarray + The corresponding hyperbolic tangent values. + + """) + +add_newdoc('numpy.core.umath', 'true_divide', + """ + Returns an element-wise, true division of the inputs. + + Instead of the Python traditional 'floor division', this returns a true + division. True division adjusts the output type to present the best + answer, regardless of input types. + + Parameters + ---------- + x1 : array_like + Dividend + x2 : array_like + Divisor + + Returns + ------- + out : ndarray + Result is scalar if both inputs are scalar, ndarray otherwise. + + Notes + ----- + The floor division operator ('//') was added in Python 2.2 making '//' + and '/' equivalent operators. The default floor division operation of + '/' can be replaced by true division with + 'from __future__ import division'. + + In Python 3.0, '//' will be the floor division operator and '/' will be + the true division operator. The 'true_divide(`x1`, `x2`)' function is + equivalent to true division in Python. + + """)