[pypy-commit] pypy missing-ndarray-attributes: hack around seemingly nonfunctional @staticmethod

mattip noreply at buildbot.pypy.org
Mon Feb 4 11:16:05 CET 2013


Author: Matti Picus <matti.picus at gmail.com>
Branch: missing-ndarray-attributes
Changeset: r60862:72bf13db5cf2
Date: 2013-02-04 12:11 +0200
http://bitbucket.org/pypy/pypy/changeset/72bf13db5cf2/

Log:	hack around seemingly nonfunctional @staticmethod

diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py
--- a/pypy/module/micronumpy/arrayimpl/sort.py
+++ b/pypy/module/micronumpy/arrayimpl/sort.py
@@ -16,9 +16,10 @@
 
 INT_SIZE = rffi.sizeof(lltype.Signed)
 
-def make_sort_function(space, itemtype, count=1):
+def make_sort_function(space, itemtype, comp_type, count=1):
     TP = itemtype.T
     step = rffi.sizeof(TP)
+    print itemtype
     
     class Repr(object):
         def __init__(self, index_stride_size, stride_size, size, values,
@@ -35,14 +36,21 @@
             if count < 2:
                 v = raw_storage_getitem(TP, self.values, item * self.stride_size
                                     + self.start)
-                v = itemtype.for_computation(v)
+                if comp_type=='int':
+                    v = int(v)
+                elif comp_type=='float':
+                    v = float(v)
+                elif comp_type=='complex':
+                    v = float(v[0]),float(v[1])
+                else:
+                    raise NotImplementedError('cannot reach')
             else:
                 v = []
                 for i in range(count):
                     _v = raw_storage_getitem(TP, self.values, item * self.stride_size
                                     + self.start + step * i)
                     v.append(_v)
-                v = itemtype.for_computation(v)
+                v = for_computation(v)
             return (v, raw_storage_getitem(lltype.Signed, self.indexes,
                                            item * self.index_stride_size +
                                            self.index_start))
@@ -145,7 +153,7 @@
     cache = space.fromcache(SortCache) # that populates SortClasses
     itemtype = arr.dtype.itemtype
     for tp in all_types:
-        if isinstance(itemtype, tp):
+        if isinstance(itemtype, tp[0]):
             return cache._lookup(tp)(arr, space, w_axis,
                                      itemtype.get_element_size())
     # XXX this should probably be changed
@@ -153,9 +161,10 @@
            space.wrap("sorting of non-numeric types " + \
                   "'%s' is not implemented" % arr.dtype.get_name(), ))
 
-all_types = (types.all_int_types + types.all_complex_types +
-             types.all_float_types)
-all_types = [i for i in all_types if not '_mixin_' in i.__dict__]
+all_types = (types.all_float_types) # + types.all_complex_types +
+            # types.all_int_types)
+all_types = [i for i in all_types if not '_mixin_' in i[0].__dict__]
+all_types = [i for i in all_types if not 'NonNative' in str(i[0])]
 all_types = unrolling_iterable(all_types)
 
 class SortCache(object):
@@ -166,10 +175,10 @@
             return
         self.built = True
         cache = {}
-        for cls in all_types._items:
+        for cls, it in all_types._items:
             if cls in types.all_complex_types:
-                cache[cls] = make_sort_function(space, cls, 2)
+                cache[cls] = make_sort_function(space, cls, it, 2)
             else:
-                cache[cls] = make_sort_function(space, cls)
+                cache[cls] = make_sort_function(space, cls, it)
         self.cache = cache
-        self._lookup = specialize.memo()(lambda tp : cache[tp])
+        self._lookup = specialize.memo()(lambda tp : cache[tp[0]])
diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py
--- a/pypy/module/micronumpy/types.py
+++ b/pypy/module/micronumpy/types.py
@@ -31,10 +31,11 @@
     specialize.argtype(1)(func)
     @functools.wraps(func)
     def dispatcher(self, v):
+        raw = self.unbox(v)
         return self.box(
             func(
                 self,
-                self.for_computation(self.unbox(v))
+                self.for_computation(raw)
             )
         )
     return dispatcher
@@ -142,6 +143,7 @@
         #XXX this is the place to display a warning
         return self.box(real)
 
+    @specialize.argtype(1)
     def unbox(self, box):
         assert isinstance(box, self.BoxType)
         return box.value
@@ -954,53 +956,6 @@
         swapped_value = byteswap(rffi.cast(self.T, value))
         raw_storage_setitem(storage, i + offset, swapped_value)
 
-class BaseFloat16(Float):
-    _mixin_ = True
-
-    _attrs_ = ()
-    _STORAGE_T = rffi.USHORT
-    T = rffi.SHORT
-
-    BoxType = interp_boxes.W_Float16Box
-
-    @specialize.argtype(1)
-    def box(self, value):
-        return self.BoxType(rffi.cast(rffi.DOUBLE, value))
-
-    def runpack_str(self, s):
-        assert len(s) == 2
-        fval = unpack_float(s, native_is_bigendian)
-        return self.box(fval)
-
-    def default_fromstring(self, space):
-        return self.box(-1.0)
-
-    def byteswap(self, w_v):
-        value = self.unbox(w_v)
-        hbits = float_pack(value,2)
-        swapped = byteswap(rffi.cast(self._STORAGE_T, hbits))
-        return self.box(float_unpack(r_ulonglong(swapped), 2))
-
-class Float16(BaseType, BaseFloat16):
-    def _read(self, storage, i, offset):
-        hbits = raw_storage_getitem(self._STORAGE_T, storage, i + offset)
-        return float_unpack(r_ulonglong(hbits), 2)
-
-    def _write(self, storage, i, offset, value):
-        hbits = float_pack(value,2)
-        raw_storage_setitem(storage, i + offset,
-                rffi.cast(self._STORAGE_T, hbits))    
-
-class NonNativeFloat16(BaseType, BaseFloat16):
-    def _read(self, storage, i, offset):
-        hbits = raw_storage_getitem(self._STORAGE_T, storage, i + offset)
-        return float_unpack(r_ulonglong(byteswap(hbits)), 2)
-
-    def _write(self, storage, i, offset, value):
-        hbits = float_pack(value,2)
-        raw_storage_setitem(storage, i + offset,
-                byteswap(rffi.cast(self._STORAGE_T, hbits)))
-
 
 class Float32(BaseType, Float):
     _attrs_ = ()
@@ -1755,16 +1710,74 @@
 all_int_types = []
 all_complex_types = []
 
+def for_int_computation(v):
+    return widen(v)
+
+def for_float_computation(v):
+    return float(v)
+
+def for_complex_computation(v):
+    return float(v[0]), float(v[1])
+
 def _setup():
     # compute alignment
     for tp in globals().values():
         if isinstance(tp, type) and hasattr(tp, 'T'):
             tp.alignment = clibffi.cast_type_to_ffitype(tp.T).c_alignment
             if issubclass(tp, Float):
-                all_float_types.append(tp)
+                all_float_types.append((tp, 'float'))
             if issubclass(tp, Integer):
-                all_int_types.append(tp)
+                all_int_types.append((tp, 'int'))
             if issubclass(tp, ComplexFloating):
-                all_complex_types.append(tp)
+                all_complex_types.append((tp, 'complex'))
 _setup()
 del _setup
+
+class BaseFloat16(Float):
+    _mixin_ = True
+
+    _attrs_ = ()
+    _STORAGE_T = rffi.USHORT
+    T = rffi.SHORT
+
+    BoxType = interp_boxes.W_Float16Box
+
+    @specialize.argtype(1)
+    def box(self, value):
+        return self.BoxType(rffi.cast(rffi.DOUBLE, value))
+
+    def runpack_str(self, s):
+        assert len(s) == 2
+        fval = unpack_float(s, native_is_bigendian)
+        return self.box(fval)
+
+    def default_fromstring(self, space):
+        return self.box(-1.0)
+
+    def byteswap(self, w_v):
+        value = self.unbox(w_v)
+        hbits = float_pack(value,2)
+        swapped = byteswap(rffi.cast(self._STORAGE_T, hbits))
+        return self.box(float_unpack(r_ulonglong(swapped), 2))
+
+class Float16(BaseType, BaseFloat16):
+    def _read(self, storage, i, offset):
+        hbits = raw_storage_getitem(self._STORAGE_T, storage, i + offset)
+        return float_unpack(r_ulonglong(hbits), 2)
+
+    def _write(self, storage, i, offset, value):
+        hbits = float_pack(value,2)
+        raw_storage_setitem(storage, i + offset,
+                rffi.cast(self._STORAGE_T, hbits))    
+
+class NonNativeFloat16(BaseType, BaseFloat16):
+    def _read(self, storage, i, offset):
+        hbits = raw_storage_getitem(self._STORAGE_T, storage, i + offset)
+        return float_unpack(r_ulonglong(byteswap(hbits)), 2)
+
+    def _write(self, storage, i, offset, value):
+        hbits = float_pack(value,2)
+        raw_storage_setitem(storage, i + offset,
+                byteswap(rffi.cast(self._STORAGE_T, hbits)))
+
+


More information about the pypy-commit mailing list