[pypy-svn] r58270 - pypy/branch/gc-experiments/pypy/rpython/memory/gc

fijal at codespeak.net fijal at codespeak.net
Sat Sep 20 15:39:02 CEST 2008


Author: fijal
Date: Sat Sep 20 15:39:01 2008
New Revision: 58270

Modified:
   pypy/branch/gc-experiments/pypy/rpython/memory/gc/base.py
   pypy/branch/gc-experiments/pypy/rpython/memory/gc/markcompact.py
   pypy/branch/gc-experiments/pypy/rpython/memory/gc/semispace.py
Log:
Hack until test_direct works. Some code (especially regarding finalizers)
makes no sense whatsoever, in-progress


Modified: pypy/branch/gc-experiments/pypy/rpython/memory/gc/base.py
==============================================================================
--- pypy/branch/gc-experiments/pypy/rpython/memory/gc/base.py	(original)
+++ pypy/branch/gc-experiments/pypy/rpython/memory/gc/base.py	Sat Sep 20 15:39:01 2008
@@ -1,5 +1,9 @@
 from pypy.rpython.lltypesystem import lltype, llmemory, llarena
 from pypy.rlib.debug import ll_assert
+from pypy.rpython.memory.gcheader import GCHeaderBuilder
+from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
+from pypy.rpython.memory.support import get_address_stack, get_address_deque
+from pypy.rpython.memory.support import AddressDict
 
 class GCBase(object):
     _alloc_flavor_ = "raw"
@@ -194,6 +198,22 @@
     moving_gc = True
     first_unused_gcflag = first_gcflag << 3
 
+    def __init__(self, chunk_size=DEFAULT_CHUNK_SIZE):
+        GCBase.__init__(self)
+        self.gcheaderbuilder = GCHeaderBuilder(self.HDR)
+        self.AddressStack = get_address_stack(chunk_size)
+        self.AddressDeque = get_address_deque(chunk_size)
+        self.AddressDict = AddressDict
+        self.finalizer_lock_count = 0
+        self.id_free_list = self.AddressStack()
+        self.next_free_id = 1
+
+    def setup(self):
+        self.objects_with_finalizers = self.AddressDeque()
+        self.run_finalizers = self.AddressDeque()
+        self.objects_with_weakrefs = self.AddressStack()
+        self.objects_with_id = self.AddressDict()
+
     def can_move(self, addr):
         return True
 
@@ -231,6 +251,231 @@
             size = llarena.round_up_for_allocation(size)
         return size
 
+
+    def deal_with_objects_with_finalizers(self, scan):
+        # walk over list of objects with finalizers
+        # if it is not copied, add it to the list of to-be-called finalizers
+        # and copy it, to me make the finalizer runnable
+        # We try to run the finalizers in a "reasonable" order, like
+        # CPython does.  The details of this algorithm are in
+        # pypy/doc/discussion/finalizer-order.txt.
+        new_with_finalizer = self.AddressDeque()
+        marked = self.AddressDeque()
+        pending = self.AddressStack()
+        self.tmpstack = self.AddressStack()
+        while self.objects_with_finalizers.non_empty():
+            x = self.objects_with_finalizers.popleft()
+            ll_assert(self._finalization_state(x) != 1, 
+                      "bad finalization state 1")
+            if self.surviving(x):
+                new_with_finalizer.append(self.get_forwarding_address(x))
+                continue
+            marked.append(x)
+            pending.append(x)
+            while pending.non_empty():
+                y = pending.pop()
+                state = self._finalization_state(y)
+                if state == 0:
+                    self._bump_finalization_state_from_0_to_1(y)
+                    self.trace(y, self._append_if_nonnull, pending)
+                elif state == 2:
+                    self._recursively_bump_finalization_state_from_2_to_3(y)
+            scan = self._recursively_bump_finalization_state_from_1_to_2(
+                       x, scan)
+
+        while marked.non_empty():
+            x = marked.popleft()
+            state = self._finalization_state(x)
+            ll_assert(state >= 2, "unexpected finalization state < 2")
+            newx = self.get_forwarding_address(x)
+            if state == 2:
+                self.run_finalizers.append(newx)
+                # we must also fix the state from 2 to 3 here, otherwise
+                # we leave the GCFLAG_FINALIZATION_ORDERING bit behind
+                # which will confuse the next collection
+                self._recursively_bump_finalization_state_from_2_to_3(x)
+            else:
+                new_with_finalizer.append(newx)
+
+        self.tmpstack.delete()
+        pending.delete()
+        marked.delete()
+        self.objects_with_finalizers.delete()
+        self.objects_with_finalizers = new_with_finalizer
+        return scan
+
+
+    def _append_if_nonnull(pointer, stack):
+        if pointer.address[0] != NULL:
+            stack.append(pointer.address[0])
+    _append_if_nonnull = staticmethod(_append_if_nonnull)
+
+    def _finalization_state(self, obj):
+        if self.surviving(obj):
+            newobj = self.get_forwarding_address(obj)
+            hdr = self.header(newobj)
+            if hdr.tid & GCFLAG_FINALIZATION_ORDERING:
+                return 2
+            else:
+                return 3
+        else:
+            hdr = self.header(obj)
+            if hdr.tid & GCFLAG_FINALIZATION_ORDERING:
+                return 1
+            else:
+                return 0
+
+    def _bump_finalization_state_from_0_to_1(self, obj):
+        ll_assert(self._finalization_state(obj) == 0,
+                  "unexpected finalization state != 0")
+        hdr = self.header(obj)
+        hdr.tid |= GCFLAG_FINALIZATION_ORDERING
+
+    def _recursively_bump_finalization_state_from_2_to_3(self, obj):
+        ll_assert(self._finalization_state(obj) == 2,
+                  "unexpected finalization state != 2")
+        newobj = self.get_forwarding_address(obj)
+        pending = self.tmpstack
+        ll_assert(not pending.non_empty(), "tmpstack not empty")
+        pending.append(newobj)
+        while pending.non_empty():
+            y = pending.pop()
+            hdr = self.header(y)
+            if hdr.tid & GCFLAG_FINALIZATION_ORDERING:     # state 2 ?
+                hdr.tid &= ~GCFLAG_FINALIZATION_ORDERING   # change to state 3
+                self.trace(y, self._append_if_nonnull, pending)
+
+    def _recursively_bump_finalization_state_from_1_to_2(self, obj, scan):
+        # recursively convert objects from state 1 to state 2.
+        # Note that copy() copies all bits, including the
+        # GCFLAG_FINALIZATION_ORDERING.  The mapping between
+        # state numbers and the presence of this bit was designed
+        # for the following to work :-)
+        self.copy(obj)
+        return self.scan_copied(scan)
+
+    def invalidate_weakrefs(self):
+        # walk over list of objects that contain weakrefs
+        # if the object it references survives then update the weakref
+        # otherwise invalidate the weakref
+        new_with_weakref = self.AddressStack()
+        while self.objects_with_weakrefs.non_empty():
+            obj = self.objects_with_weakrefs.pop()
+            if not self.surviving(obj):
+                continue # weakref itself dies
+            obj = self.get_forwarding_address(obj)
+            offset = self.weakpointer_offset(self.get_type_id(obj))
+            pointing_to = (obj + offset).address[0]
+            # XXX I think that pointing_to cannot be NULL here
+            if pointing_to:
+                if self.surviving(pointing_to):
+                    (obj + offset).address[0] = self.get_forwarding_address(
+                        pointing_to)
+                    new_with_weakref.append(obj)
+                else:
+                    (obj + offset).address[0] = NULL
+        self.objects_with_weakrefs.delete()
+        self.objects_with_weakrefs = new_with_weakref
+
+    def update_run_finalizers(self):
+        # we are in an inner collection, caused by a finalizer
+        # the run_finalizers objects need to be copied
+        new_run_finalizer = self.AddressDeque()
+        while self.run_finalizers.non_empty():
+            obj = self.run_finalizers.popleft()
+            new_run_finalizer.append(self.copy(obj))
+        self.run_finalizers.delete()
+        self.run_finalizers = new_run_finalizer
+
+    def execute_finalizers(self):
+        self.finalizer_lock_count += 1
+        try:
+            while self.run_finalizers.non_empty():
+                #print "finalizer"
+                if self.finalizer_lock_count > 1:
+                    # the outer invocation of execute_finalizers() will do it
+                    break
+                obj = self.run_finalizers.popleft()
+                finalizer = self.getfinalizer(self.get_type_id(obj))
+                finalizer(obj)
+        finally:
+            self.finalizer_lock_count -= 1
+
+    def id(self, ptr):
+        obj = llmemory.cast_ptr_to_adr(ptr)
+        if self.header(obj).tid & GCFLAG_EXTERNAL:
+            result = self._compute_id_for_external(obj)
+        else:
+            result = self._compute_id(obj)
+        return llmemory.cast_adr_to_int(result)
+
+    def _next_id(self):
+        # return an id not currently in use (as an address instead of an int)
+        if self.id_free_list.non_empty():
+            result = self.id_free_list.pop()    # reuse a dead id
+        else:
+            # make up a fresh id number
+            result = llmemory.cast_int_to_adr(self.next_free_id)
+            self.next_free_id += 2    # only odd numbers, to make lltype
+                                      # and llmemory happy and to avoid
+                                      # clashes with real addresses
+        return result
+
+    def _compute_id(self, obj):
+        # look if the object is listed in objects_with_id
+        result = self.objects_with_id.get(obj)
+        if not result:
+            result = self._next_id()
+            self.objects_with_id.setitem(obj, result)
+        return result
+
+    def _compute_id_for_external(self, obj):
+        # For prebuilt objects, we can simply return their address.
+        # This method is overriden by the HybridGC.
+        return obj
+
+    def update_objects_with_id(self):
+        old = self.objects_with_id
+        new_objects_with_id = self.AddressDict(old.length())
+        old.foreach(self._update_object_id_FAST, new_objects_with_id)
+        old.delete()
+        self.objects_with_id = new_objects_with_id
+
+    def _update_object_id(self, obj, id, new_objects_with_id):
+        # safe version (used by subclasses)
+        if self.surviving(obj):
+            newobj = self.get_forwarding_address(obj)
+            new_objects_with_id.setitem(newobj, id)
+        else:
+            self.id_free_list.append(id)
+
+    def _update_object_id_FAST(self, obj, id, new_objects_with_id):
+        # unsafe version, assumes that the new_objects_with_id is large enough
+        if self.surviving(obj):
+            newobj = self.get_forwarding_address(obj)
+            new_objects_with_id.insertclean(newobj, id)
+        else:
+            self.id_free_list.append(id)
+
+    def debug_check_object(self, obj):
+        """Check the invariants about 'obj' that should be true
+        between collections."""
+        tid = self.header(obj).tid
+        if tid & GCFLAG_EXTERNAL:
+            ll_assert(tid & GCFLAG_FORWARDED, "bug: external+!forwarded")
+            ll_assert(not (self.tospace <= obj < self.free),
+                      "external flag but object inside the semispaces")
+        else:
+            ll_assert(not (tid & GCFLAG_FORWARDED), "bug: !external+forwarded")
+            ll_assert(self.tospace <= obj < self.free,
+                      "!external flag but object outside the semispaces")
+        ll_assert(not (tid & GCFLAG_FINALIZATION_ORDERING),
+                  "unexpected GCFLAG_FINALIZATION_ORDERING")
+
+    def debug_check_can_copy(self, obj):
+        ll_assert(not (self.tospace <= obj < self.free),
+                  "copy() on already-copied object")
+
 def choose_gc_from_config(config):
     """Return a (GCClass, GC_PARAMS) from the given config object.
     """

Modified: pypy/branch/gc-experiments/pypy/rpython/memory/gc/markcompact.py
==============================================================================
--- pypy/branch/gc-experiments/pypy/rpython/memory/gc/markcompact.py	(original)
+++ pypy/branch/gc-experiments/pypy/rpython/memory/gc/markcompact.py	Sat Sep 20 15:39:01 2008
@@ -1,7 +1,7 @@
 
 from pypy.rpython.lltypesystem import lltype, llmemory, llarena
-from pypy.rpython.memory.gc.base import MovingGCBase
-from pypy.rpython.memory.gcheader import GCHeaderBuilder
+from pypy.rpython.memory.gc.base import MovingGCBase, GCFLAG_FORWARDED,\
+     TYPEID_MASK
 from pypy.rlib.debug import ll_assert
 from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
 from pypy.rpython.memory.support import get_address_stack, get_address_deque
@@ -15,23 +15,22 @@
 memoryError = MemoryError()
 
 class MarkCompactGC(MovingGCBase):
-    HDR = lltype.Struct('header', ('tid', lltype.Signed))
+    HDR = lltype.Struct('header', ('tid', lltype.Signed),
+                        ('forward_ptr', llmemory.Address))
 
     def __init__(self, chunk_size=DEFAULT_CHUNK_SIZE, space_size=16*(1024**2)):
         # space_size should be maximal available virtual memory.
         # this way we'll never need to copy anything nor implement
         # paging on our own
         self.space_size = space_size
-        self.gcheaderbuilder = GCHeaderBuilder(self.HDR)
-        self.AddressStack = get_address_stack(chunk_size)
-        self.AddressDeque = get_address_deque(chunk_size)
-        self.AddressDict = AddressDict
+        MovingGCBase.__init__(self, chunk_size)
         self.counter = 0
 
     def setup(self):
         self.space = llarena.arena_malloc(self.space_size, True)
         ll_assert(bool(self.space), "couldn't allocate arena")
         self.spaceptr = self.space
+        MovingGCBase.setup(self)
 
     def init_gc_object(self, addr, typeid, flags=0):
         hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
@@ -47,6 +46,10 @@
         llarena.arena_reserve(result, totalsize)
         self.init_gc_object(result, typeid)
         self.spaceptr += totalsize
+        if has_finalizer:
+            self.objects_with_finalizers.append(result + size_gc_header)
+        if contains_weakptr:
+            self.objects_with_weakrefs.append(result + size_gc_header)
         return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
     
     def malloc_varsize_clear(self, typeid, length, size, itemsize,
@@ -66,7 +69,8 @@
         self.init_gc_object(result, typeid)
         (result + size_gc_header + offset_to_length).signed[0] = length
         self.spaceptr = result + llarena.round_up_for_allocation(totalsize)
-        # XXX has_finalizer etc.
+        if has_finalizer:
+            self.objects_with_finalizers.append(result + size_gc_header)
         return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
 
     def eventually_collect(self):
@@ -80,14 +84,44 @@
     def collect(self):
         self.debug_check_consistency()
         self.mark()
-        self.compact()
+        if self.run_finalizers.non_empty():
+            self.update_run_finalizers()
+        if self.objects_with_finalizers.non_empty():
+            self.deal_with_objects_with_finalizers(self.space)
+        if self.objects_with_weakrefs.non_empty():
+            self.invalidate_weakrefs()
+        self.debug_check_consistency()
+        toaddr = llarena.arena_new_view(self.space)
+        self.create_forward_pointers(toaddr)
+        self.debug_check_consistency()
         self.update_forward_refs()
+        self.compact(toaddr)
+        self.space = toaddr
         self.debug_check_consistency()
 
-    def compact(self):
-        self.forwardrefs = self.AddressDict()
+    def create_forward_pointers(self, toaddr):
+        fromaddr = self.space
+        size_gc_header = self.gcheaderbuilder.size_gc_header
+        while fromaddr < self.spaceptr:
+            hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
+            obj = fromaddr + size_gc_header
+            objsize = self.get_size(obj)
+            totalsize = size_gc_header + objsize
+            if hdr.tid & GCFLAG_MARKBIT:
+                # this objects survives, clear MARKBIT
+                if fromaddr.offset != toaddr.offset:
+                    # this object is forwarded, set forward bit and address
+                    hdr.tid |= GCFLAG_FORWARDED
+                    llarena.arena_reserve(toaddr, totalsize)
+                    hdr.forward_ptr = toaddr + size_gc_header
+                toaddr += size_gc_header + objsize
+            fromaddr += size_gc_header + objsize
+
+    def get_type_id(self, addr):
+        return self.header(addr).tid & TYPEID_MASK
+
+    def compact(self, toaddr):
         fromaddr = self.space
-        toaddr = self.space
         size_gc_header = self.gcheaderbuilder.size_gc_header
         while fromaddr < self.spaceptr:
             hdr = llmemory.cast_adr_to_ptr(fromaddr, lltype.Ptr(self.HDR))
@@ -98,31 +132,37 @@
                 # this object dies, clear arena                
                 llarena.arena_reset(fromaddr, totalsize, True)
             else:
-                # this objects survives, clear MARKBIT
-                hdr.tid &= ~GCFLAG_MARKBIT
-                if fromaddr != toaddr:
+                if hdr.tid & GCFLAG_FORWARDED:
                     # this object needs to be copied somewhere
-
                     # first approach - copy object if possible, otherwise
                     # copy it somewhere else and keep track of that
-                    self.forwardrefs.setitem(fromaddr, toaddr)
                     if toaddr + totalsize > fromaddr:
                         # this is the worst possible scenario: object does
                         # not fit inside space to copy
                         xxx
                     else:
                         # object fits there: copy
-                        llop.debug_print(lltype.Void, fromaddr, "copied to", toaddr,
-                                         "tid", self.header(obj).tid,
-                                         "size", totalsize)
-                        llarena.arena_reserve(toaddr, totalsize)
+                        hdr.tid &= ~(GCFLAG_MARKBIT|GCFLAG_FORWARDED)
+                        #llop.debug_print(lltype.Void, fromaddr, "copied to", toaddr,
+                        #                 "tid", self.header(obj).tid,
+                        #                 "size", totalsize)
                         llmemory.raw_memcopy(obj - size_gc_header, toaddr, totalsize)
                         llarena.arena_reset(fromaddr, totalsize, True)
+                else:
+                    hdr.tid &= ~(GCFLAG_MARKBIT|GCFLAG_FORWARDED)
+                    # XXX this is here only to make llarena happier, makes no
+                    #     sense whatsoever, need to disable it when translated
+                    llarena.arena_reserve(toaddr, totalsize)
+                    llmemory.raw_memcopy(obj - size_gc_header, toaddr, totalsize)
                 toaddr += size_gc_header + objsize
             fromaddr += size_gc_header + objsize
         self.spaceptr = toaddr
 
     def update_forward_refs(self):
+        self.root_walker.walk_roots(
+            MarkCompactGC._trace_copy,  # stack roots
+            MarkCompactGC._trace_copy,  # static in prebuilt non-gc structures
+            MarkCompactGC._trace_copy)  # static in prebuilt gc objects
         ptr = self.space
         size_gc_header = self.gcheaderbuilder.size_gc_header
         while ptr < self.spaceptr:
@@ -131,12 +171,15 @@
             totalsize = size_gc_header + objsize
             self.trace(obj, self._trace_copy, None)
             ptr += totalsize
-        self.forwardrefs = None
 
-    def _trace_copy(self, pointer, ignored):
+    def _trace_copy(self, pointer, ignored=None):
         addr = pointer.address[0]
+        size_gc_header = self.gcheaderbuilder.size_gc_header
         if addr != NULL:
-            pointer.address[0] = self.forwardrefs.get(addr, addr)
+            hdr = llmemory.cast_adr_to_ptr(addr - size_gc_header,
+                                            lltype.Ptr(self.HDR))
+            if hdr.tid & GCFLAG_FORWARDED:
+                pointer.address[0] = hdr.forward_ptr
 
     def mark(self):
         self.root_walker.walk_roots(
@@ -149,3 +192,7 @@
         if obj != NULL:
             self.header(obj).tid |= GCFLAG_MARKBIT
             self.trace(obj, self._mark_object, None)
+
+    def debug_check_object(self, obj):
+        # XXX write it down
+        pass

Modified: pypy/branch/gc-experiments/pypy/rpython/memory/gc/semispace.py
==============================================================================
--- pypy/branch/gc-experiments/pypy/rpython/memory/gc/semispace.py	(original)
+++ pypy/branch/gc-experiments/pypy/rpython/memory/gc/semispace.py	Sat Sep 20 15:39:01 2008
@@ -4,7 +4,6 @@
 from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
 from pypy.rpython.memory.support import get_address_stack, get_address_deque
 from pypy.rpython.memory.support import AddressDict
-from pypy.rpython.memory.gcheader import GCHeaderBuilder
 from pypy.rpython.lltypesystem import lltype, llmemory, llarena
 from pypy.rlib.objectmodel import free_non_gc_object
 from pypy.rlib.debug import ll_assert
@@ -39,17 +38,10 @@
 
     def __init__(self, chunk_size=DEFAULT_CHUNK_SIZE, space_size=4096,
                  max_space_size=sys.maxint//2+1):
-        MovingGCBase.__init__(self)
+        MovingGCBase.__init__(self, chunk_size)
         self.space_size = space_size
         self.max_space_size = max_space_size
-        self.gcheaderbuilder = GCHeaderBuilder(self.HDR)
-        self.AddressStack = get_address_stack(chunk_size)
-        self.AddressDeque = get_address_deque(chunk_size)
-        self.AddressDict = AddressDict
-        self.finalizer_lock_count = 0
         self.red_zone = 0
-        self.id_free_list = self.AddressStack()
-        self.next_free_id = 1
 
     def setup(self):
         if DEBUG_PRINT:
@@ -61,10 +53,7 @@
         self.fromspace = llarena.arena_malloc(self.space_size, True)
         ll_assert(bool(self.fromspace), "couldn't allocate fromspace")
         self.free = self.tospace
-        self.objects_with_finalizers = self.AddressDeque()
-        self.run_finalizers = self.AddressDeque()
-        self.objects_with_weakrefs = self.AddressStack()
-        self.objects_with_id = self.AddressDict()
+        MovingGCBase.setup(self)
 
     # This class only defines the malloc_{fixed,var}size_clear() methods
     # because the spaces are filled with zeroes in advance.
@@ -382,228 +371,5 @@
         stub = llmemory.cast_adr_to_ptr(obj, self.FORWARDSTUBPTR)
         stub.forw = newobj
 
-    def deal_with_objects_with_finalizers(self, scan):
-        # walk over list of objects with finalizers
-        # if it is not copied, add it to the list of to-be-called finalizers
-        # and copy it, to me make the finalizer runnable
-        # We try to run the finalizers in a "reasonable" order, like
-        # CPython does.  The details of this algorithm are in
-        # pypy/doc/discussion/finalizer-order.txt.
-        new_with_finalizer = self.AddressDeque()
-        marked = self.AddressDeque()
-        pending = self.AddressStack()
-        self.tmpstack = self.AddressStack()
-        while self.objects_with_finalizers.non_empty():
-            x = self.objects_with_finalizers.popleft()
-            ll_assert(self._finalization_state(x) != 1, 
-                      "bad finalization state 1")
-            if self.surviving(x):
-                new_with_finalizer.append(self.get_forwarding_address(x))
-                continue
-            marked.append(x)
-            pending.append(x)
-            while pending.non_empty():
-                y = pending.pop()
-                state = self._finalization_state(y)
-                if state == 0:
-                    self._bump_finalization_state_from_0_to_1(y)
-                    self.trace(y, self._append_if_nonnull, pending)
-                elif state == 2:
-                    self._recursively_bump_finalization_state_from_2_to_3(y)
-            scan = self._recursively_bump_finalization_state_from_1_to_2(
-                       x, scan)
-
-        while marked.non_empty():
-            x = marked.popleft()
-            state = self._finalization_state(x)
-            ll_assert(state >= 2, "unexpected finalization state < 2")
-            newx = self.get_forwarding_address(x)
-            if state == 2:
-                self.run_finalizers.append(newx)
-                # we must also fix the state from 2 to 3 here, otherwise
-                # we leave the GCFLAG_FINALIZATION_ORDERING bit behind
-                # which will confuse the next collection
-                self._recursively_bump_finalization_state_from_2_to_3(x)
-            else:
-                new_with_finalizer.append(newx)
-
-        self.tmpstack.delete()
-        pending.delete()
-        marked.delete()
-        self.objects_with_finalizers.delete()
-        self.objects_with_finalizers = new_with_finalizer
-        return scan
-
-    def _append_if_nonnull(pointer, stack):
-        if pointer.address[0] != NULL:
-            stack.append(pointer.address[0])
-    _append_if_nonnull = staticmethod(_append_if_nonnull)
-
-    def _finalization_state(self, obj):
-        if self.surviving(obj):
-            newobj = self.get_forwarding_address(obj)
-            hdr = self.header(newobj)
-            if hdr.tid & GCFLAG_FINALIZATION_ORDERING:
-                return 2
-            else:
-                return 3
-        else:
-            hdr = self.header(obj)
-            if hdr.tid & GCFLAG_FINALIZATION_ORDERING:
-                return 1
-            else:
-                return 0
-
-    def _bump_finalization_state_from_0_to_1(self, obj):
-        ll_assert(self._finalization_state(obj) == 0,
-                  "unexpected finalization state != 0")
-        hdr = self.header(obj)
-        hdr.tid |= GCFLAG_FINALIZATION_ORDERING
-
-    def _recursively_bump_finalization_state_from_2_to_3(self, obj):
-        ll_assert(self._finalization_state(obj) == 2,
-                  "unexpected finalization state != 2")
-        newobj = self.get_forwarding_address(obj)
-        pending = self.tmpstack
-        ll_assert(not pending.non_empty(), "tmpstack not empty")
-        pending.append(newobj)
-        while pending.non_empty():
-            y = pending.pop()
-            hdr = self.header(y)
-            if hdr.tid & GCFLAG_FINALIZATION_ORDERING:     # state 2 ?
-                hdr.tid &= ~GCFLAG_FINALIZATION_ORDERING   # change to state 3
-                self.trace(y, self._append_if_nonnull, pending)
-
-    def _recursively_bump_finalization_state_from_1_to_2(self, obj, scan):
-        # recursively convert objects from state 1 to state 2.
-        # Note that copy() copies all bits, including the
-        # GCFLAG_FINALIZATION_ORDERING.  The mapping between
-        # state numbers and the presence of this bit was designed
-        # for the following to work :-)
-        self.copy(obj)
-        return self.scan_copied(scan)
-
-    def invalidate_weakrefs(self):
-        # walk over list of objects that contain weakrefs
-        # if the object it references survives then update the weakref
-        # otherwise invalidate the weakref
-        new_with_weakref = self.AddressStack()
-        while self.objects_with_weakrefs.non_empty():
-            obj = self.objects_with_weakrefs.pop()
-            if not self.surviving(obj):
-                continue # weakref itself dies
-            obj = self.get_forwarding_address(obj)
-            offset = self.weakpointer_offset(self.get_type_id(obj))
-            pointing_to = (obj + offset).address[0]
-            # XXX I think that pointing_to cannot be NULL here
-            if pointing_to:
-                if self.surviving(pointing_to):
-                    (obj + offset).address[0] = self.get_forwarding_address(
-                        pointing_to)
-                    new_with_weakref.append(obj)
-                else:
-                    (obj + offset).address[0] = NULL
-        self.objects_with_weakrefs.delete()
-        self.objects_with_weakrefs = new_with_weakref
-
-    def update_run_finalizers(self):
-        # we are in an inner collection, caused by a finalizer
-        # the run_finalizers objects need to be copied
-        new_run_finalizer = self.AddressDeque()
-        while self.run_finalizers.non_empty():
-            obj = self.run_finalizers.popleft()
-            new_run_finalizer.append(self.copy(obj))
-        self.run_finalizers.delete()
-        self.run_finalizers = new_run_finalizer
-
-    def execute_finalizers(self):
-        self.finalizer_lock_count += 1
-        try:
-            while self.run_finalizers.non_empty():
-                #print "finalizer"
-                if self.finalizer_lock_count > 1:
-                    # the outer invocation of execute_finalizers() will do it
-                    break
-                obj = self.run_finalizers.popleft()
-                finalizer = self.getfinalizer(self.get_type_id(obj))
-                finalizer(obj)
-        finally:
-            self.finalizer_lock_count -= 1
-
-    def id(self, ptr):
-        obj = llmemory.cast_ptr_to_adr(ptr)
-        if self.header(obj).tid & GCFLAG_EXTERNAL:
-            result = self._compute_id_for_external(obj)
-        else:
-            result = self._compute_id(obj)
-        return llmemory.cast_adr_to_int(result)
-
-    def _next_id(self):
-        # return an id not currently in use (as an address instead of an int)
-        if self.id_free_list.non_empty():
-            result = self.id_free_list.pop()    # reuse a dead id
-        else:
-            # make up a fresh id number
-            result = llmemory.cast_int_to_adr(self.next_free_id)
-            self.next_free_id += 2    # only odd numbers, to make lltype
-                                      # and llmemory happy and to avoid
-                                      # clashes with real addresses
-        return result
-
-    def _compute_id(self, obj):
-        # look if the object is listed in objects_with_id
-        result = self.objects_with_id.get(obj)
-        if not result:
-            result = self._next_id()
-            self.objects_with_id.setitem(obj, result)
-        return result
-
-    def _compute_id_for_external(self, obj):
-        # For prebuilt objects, we can simply return their address.
-        # This method is overriden by the HybridGC.
-        return obj
-
-    def update_objects_with_id(self):
-        old = self.objects_with_id
-        new_objects_with_id = self.AddressDict(old.length())
-        old.foreach(self._update_object_id_FAST, new_objects_with_id)
-        old.delete()
-        self.objects_with_id = new_objects_with_id
-
-    def _update_object_id(self, obj, id, new_objects_with_id):
-        # safe version (used by subclasses)
-        if self.surviving(obj):
-            newobj = self.get_forwarding_address(obj)
-            new_objects_with_id.setitem(newobj, id)
-        else:
-            self.id_free_list.append(id)
-
-    def _update_object_id_FAST(self, obj, id, new_objects_with_id):
-        # unsafe version, assumes that the new_objects_with_id is large enough
-        if self.surviving(obj):
-            newobj = self.get_forwarding_address(obj)
-            new_objects_with_id.insertclean(newobj, id)
-        else:
-            self.id_free_list.append(id)
-
-    def debug_check_object(self, obj):
-        """Check the invariants about 'obj' that should be true
-        between collections."""
-        tid = self.header(obj).tid
-        if tid & GCFLAG_EXTERNAL:
-            ll_assert(tid & GCFLAG_FORWARDED, "bug: external+!forwarded")
-            ll_assert(not (self.tospace <= obj < self.free),
-                      "external flag but object inside the semispaces")
-        else:
-            ll_assert(not (tid & GCFLAG_FORWARDED), "bug: !external+forwarded")
-            ll_assert(self.tospace <= obj < self.free,
-                      "!external flag but object outside the semispaces")
-        ll_assert(not (tid & GCFLAG_FINALIZATION_ORDERING),
-                  "unexpected GCFLAG_FINALIZATION_ORDERING")
-
-    def debug_check_can_copy(self, obj):
-        ll_assert(not (self.tospace <= obj < self.free),
-                  "copy() on already-copied object")
-
     STATISTICS_NUMBERS = 0
 



More information about the Pypy-commit mailing list