[pypy-svn] r77046 - in pypy/branch/gen2-gc/pypy/rpython/memory/gc: . test
arigo at codespeak.net
arigo at codespeak.net
Mon Sep 13 19:12:05 CEST 2010
Author: arigo
Date: Mon Sep 13 19:12:03 2010
New Revision: 77046
Modified:
pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py
pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py
Log:
Change Arenas to no longer be a class, but just a Struct.
Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py
==============================================================================
--- pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py (original)
+++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/gen2.py Mon Sep 13 19:12:03 2010
@@ -58,36 +58,22 @@
# ____________________________________________________________
-# Terminology: Arenas are collection of pages; both are fixed-size.
+# Terminology: "Arenas" are collection of "pages"; both are fixed-size.
# A page contains a number of allocated objects, called "blocks".
-class Arena(object):
- _alloc_flavor_ = "raw"
-
- def __init__(self, arena_size, page_size):
- self.page_size = page_size
- self.arena_size = arena_size
- # 'arena_base' points to the start of malloced memory; it might not
- # be a page-aligned address
- self.arena_base = llarena.arena_malloc(self.arena_size, False)
- if not self.arena_base:
- raise MemoryError("couldn't allocate the next arena")
- # 'freepage' points to the first unused page
- # 'nfreepages' is the number of unused pages
- self.freepage = start_of_page(self.arena_base + page_size - 1,
- page_size)
- arena_end = self.arena_base + self.arena_size
- self.nfreepages = (arena_end - self.freepage) // page_size
- self.nuninitializedpages = self.nfreepages
- #
- # The arenas containing at least one free page are linked in a
- # doubly-linked list. We keep this chained list in order: it
- # starts with the arenas with the most number of allocated
- # pages, so that the least allocated arenas near the end of the
- # list have a chance to become completely empty and be freed.
- self.nextarena = None
- self.prevarena = None
+ARENA_PTR = lltype.Ptr(lltype.ForwardReference())
+ARENA = lltype.Struct('Arena',
+ ('arena_base', llmemory.Address), # see allocate_arena() for a description
+ ('freepage', llmemory.Address),
+ ('nfreepages', lltype.Signed),
+ ('nuninitializedpages', lltype.Signed),
+ ('nextarena', ARENA_PTR),
+ ('prevarena', ARENA_PTR),
+ ('arena_index', lltype.Signed),
+ )
+ARENA_PTR.TO.become(ARENA)
+ARENA_NULL = lltype.nullptr(ARENA)
# Each initialized page in the arena starts with a PAGE_HEADER. The
@@ -98,16 +84,62 @@
# pointer to the next free block, forming a chained list.
PAGE_PTR = lltype.Ptr(lltype.ForwardReference())
-PAGE_HEADER = lltype.Struct('page_header',
+PAGE_HEADER = lltype.Struct('PageHeader',
+ ('nextpage', PAGE_PTR), # chained list of pages with the same size class
+ ('prevpage', PAGE_PTR), # "", but not initialized for the head of list!
('nfree', lltype.Signed), # number of free blocks in this page
('nuninitialized', lltype.Signed), # num. uninitialized blocks (<= nfree)
('freeblock', llmemory.Address), # first free block, chained list
- ('prevpage', PAGE_PTR), # chained list of pages with the same size class
+ ('arena_index', lltype.Signed), # index of the arena in 'all_arenas'
)
PAGE_PTR.TO.become(PAGE_HEADER)
PAGE_NULL = lltype.nullptr(PAGE_HEADER)
+def allocate_arena(arena_size, page_size):
+ # 'arena_base' points to the start of malloced memory; it might not
+ # be a page-aligned address
+ arena_base = llarena.arena_malloc(arena_size, False)
+ if not arena_base:
+ raise MemoryError("couldn't allocate the next arena")
+ #
+ # 'freepage' points to the first unused page
+ freepage = start_of_page(arena_base + page_size - 1, page_size)
+ #
+ # we stick the ARENA structure either at the start or at the end
+ # of the big arena, depending on alignment of the malloc'ed memory
+ arena_end = arena_base + arena_size
+ struct_size = llmemory.raw_malloc_usage(llmemory.sizeof(ARENA))
+ if freepage - arena_base >= struct_size:
+ arena_addr = arena_base
+ else:
+ arena_end -= struct_size
+ arena_addr = arena_end
+ #
+ llarena.arena_reserve(arena_addr, llmemory.sizeof(ARENA), False)
+ arena = llmemory.cast_adr_to_ptr(arena_addr, ARENA_PTR)
+ #
+ arena.arena_base = arena_base
+ arena.freepage = freepage
+ # 'nfreepages' is the number of unused pages
+ arena.nfreepages = (arena_end - freepage) // page_size
+ arena.nuninitializedpages = arena.nfreepages
+ #
+ # The arenas containing at least one free page are linked in a
+ # doubly-linked list. We keep this chained list in order: it
+ # starts with the arenas with the most number of allocated
+ # pages, so that the least allocated arenas near the end of the
+ # list have a chance to become completely empty and be freed.
+ arena.nextarena = ARENA_NULL
+ arena.prevarena = ARENA_NULL
+ return arena
+
+def free_arena(arena):
+ llarena.arena_free(arena.arena_base)
+
+# ____________________________________________________________
+
+
class ArenaCollection(object):
_alloc_flavor_ = "raw"
@@ -123,8 +155,19 @@
length = small_request_threshold / WORD + 1
self.page_for_size = lltype.malloc(rffi.CArray(PAGE_PTR), length,
flavor='raw', zero=True)
- self.arenas_start = None # the most allocated (but not full) arena
- self.arenas_end = None # the least allocated (but not empty) arena
+ self.nblocks_for_size = lltype.malloc(rffi.CArray(lltype.Signed),
+ length, flavor='raw')
+ hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
+ for i in range(1, length):
+ self.nblocks_for_size[i] = (page_size - hdrsize) // (WORD * i)
+ #
+ self.arenas_start = ARENA_NULL #the most allocated (but not full) arena
+ self.arenas_end = ARENA_NULL #the least allocated(but not empty)arena
+ #
+ self.all_arenas = lltype.malloc(rffi.CArray(ARENA_PTR), 0,
+ flavor='raw')
+ self.all_arenas_size = 0
+ self.all_arenas_next = 0
def malloc(self, size):
@@ -147,7 +190,7 @@
#
# This was the last free block, so unlink the page from the
# chained list.
- self.page_for_size[size_class] = page.prevpage
+ self.page_for_size[size_class] = page.nextpage
#
else:
# This was not the last free block, so update 'page.freeblock'
@@ -162,7 +205,11 @@
else:
# The 'result' was part of the chained list; read the next.
page.freeblock = result.address[0]
+ llarena.arena_reset(result,
+ llmemory.sizeof(llmemory.Address),
+ False)
#
+ llarena.arena_reserve(result, _dummy_size(size), False)
return result
@@ -171,12 +218,11 @@
#
# Get the arena with the highest number of pages already allocated
arena = self.arenas_start
- if arena is None:
+ if arena == ARENA_NULL:
# No arenas. Get a fresh new arena.
- ll_assert(self.arenas_end is None, "!arenas_start && arenas_end")
- arena = Arena(self.arena_size, self.page_size)
- self.arenas_start = arena
- self.arenas_end = arena
+ ll_assert(self.arenas_end == ARENA_NULL,
+ "!arenas_start && arenas_end")
+ arena = self.allocate_new_arena()
#
# Get the page from there (same logic as in malloc() except on
# pages instead of on blocks)
@@ -187,10 +233,10 @@
# This was the last free page, so unlink the arena from the
# chained list.
self.arenas_start = arena.nextarena
- if self.arenas_start is None:
- self.arenas_end = None
+ if self.arenas_start == ARENA_NULL:
+ self.arenas_end = ARENA_NULL
else:
- self.arenas_start.prevarena = None
+ self.arenas_start.prevarena = ARENA_NULL
#
else:
# This was not the last free page, so update 'arena.freepage'
@@ -201,7 +247,7 @@
arena.freepage = result + self.page_size
arena.nuninitializedpages -= 1
ll_assert(arena.nfreepages == arena.nuninitializedpages,
- "bad value of page.nuninitialized")
+ "bad value of arena.nuninitializedpages")
else:
# The 'result' was part of the chained list; read the next.
arena.freepage = result.address[0]
@@ -214,16 +260,104 @@
page = llmemory.cast_adr_to_ptr(result, PAGE_PTR)
#
hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
- page.nfree = ((self.page_size - hdrsize) / WORD) // size_class
- #
+ page.nfree = self.nblocks_for_size[size_class]
page.nuninitialized = page.nfree
page.freeblock = result + hdrsize
- page.prevpage = PAGE_NULL
+ page.nextpage = PAGE_NULL
ll_assert(self.page_for_size[size_class] == PAGE_NULL,
"allocate_new_page() called but a page is already waiting")
self.page_for_size[size_class] = page
return page
+
+ def allocate_new_arena(self):
+ arena = allocate_arena(self.arena_size, self.page_size)
+ self.arenas_start = arena
+ self.arenas_end = arena
+ #
+ # Search the next free entry in the 'all_arenas' array
+ i = self.all_arenas_next
+ size = self.all_arenas_size
+ count = size
+ while count > 0:
+ if self.all_arenas[i] == ARENA_NULL:
+ break # 'i' is the free entry
+ count -= 1
+ i += 1
+ if i == size:
+ i = 0
+ else:
+ #
+ # No more free entry. Resize the array to get some space.
+ newsize = (size + 3) * 2
+ copy = lltype.malloc(rffi.CArray(ARENA_PTR), newsize,
+ flavor='raw', zero=True)
+ i = 0
+ while i < size:
+ copy[i] = self.all_arenas[i]
+ i += 1
+ # 'i' is equal to the old 'size', so it's now a free entry
+ lltype.free(self.all_arenas, flavor='raw')
+ self.all_arenas = copy
+ self.all_arenas_size = newsize
+ #
+ self.all_arenas_next = i
+ arena.arena_index = i
+ self.all_arenas[i] = arena
+ return arena
+
+
+ def free(self, obj, size):
+ """Free a previously malloc'ed block."""
+ ll_assert(size > 0, "free: size is null or negative")
+ ll_assert(size <= self.small_request_threshold, "free: size too big")
+ ll_assert((size & (WORD-1)) == 0, "free: size is not aligned")
+ #
+ llarena.arena_reset(obj, _dummy_size(size), False)
+ pageaddr = start_of_page(obj, self.page_size)
+ if not we_are_translated():
+ hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
+ assert obj - pageaddr >= hdrsize
+ assert (obj - pageaddr - hdrsize) % size == 0
+ page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
+ size_class = size / WORD
+ #
+ # Increment the number of known free objects
+ nfree = page.nfree + 1
+ if nfree < self.nblocks_for_size[size_class]:
+ #
+ # Not all objects in this page are freed yet.
+ # Add the free block to the chained list.
+ page.nfree = nfree
+ llarena.arena_reserve(obj, llmemory.sizeof(llmemory.Address),
+ False)
+ obj.address[0] = page.freeblock
+ page.freeblock = obj
+ #
+ # If the page was full, then it now has space and should be
+ # linked back in the page_for_size[] linked list.
+ if nfree == 1:
+ page.nextpage = self.page_for_size[size_class]
+ if page.nextpage != PAGE_NULL:
+ page.nextpage.prevpage = page
+ self.page_for_size[size_class] = page
+ #
+ else:
+ # The page becomes completely free. Remove it from
+ # the page_for_size[] linked list.
+ if page == self.page_for_size[size_class]:
+ self.page_for_size[size_class] = page.nextpage
+ else:
+ prev = page.prevpage
+ next = page.nextpage
+ prev.nextpage = next
+ next.prevpage = prev
+ #
+ # Free the page, putting it back in the chained list of the arena
+ # where it belongs
+ xxx#...
+
+
# ____________________________________________________________
# Helpers to go from a pointer to the start of its page
@@ -241,6 +375,13 @@
ofs = ((addr.offset - shift) // page_size) * page_size + shift
return llarena.fakearenaaddress(addr.arena, ofs)
+def _dummy_size(size):
+ if we_are_translated():
+ return size
+ if isinstance(size, int):
+ size = llmemory.sizeof(lltype.Char) * size
+ return size
+
# ____________________________________________________________
def nursery_size_from_env():
Modified: pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py
==============================================================================
--- pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py (original)
+++ pypy/branch/gen2-gc/pypy/rpython/memory/gc/test/test_gen2.py Mon Sep 13 19:12:03 2010
@@ -1,25 +1,27 @@
from pypy.rpython.memory.gc import gen2
from pypy.rpython.memory.gc.gen2 import WORD, PAGE_NULL, PAGE_HEADER, PAGE_PTR
+from pypy.rpython.memory.gc.gen2 import ARENA, ARENA_NULL
from pypy.rpython.lltypesystem import lltype, llmemory, llarena
SHIFT = 4
hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
+arenasize = llmemory.raw_malloc_usage(llmemory.sizeof(ARENA))
def test_allocate_arena():
- a = gen2.Arena(SHIFT + 8*20, 8)
+ a = gen2.allocate_arena(SHIFT + 8*20 + arenasize, 8)
assert a.freepage == a.arena_base + SHIFT
assert a.nfreepages == 20
assert a.nuninitializedpages == 20
- assert a.prevarena is None
- assert a.nextarena is None
+ assert a.prevarena == ARENA_NULL
+ assert a.nextarena == ARENA_NULL
#
- a = gen2.Arena(SHIFT + 8*20 + 7, 8)
+ a = gen2.allocate_arena(SHIFT + 8*20 + 7 + arenasize, 8)
assert a.freepage == a.arena_base + SHIFT
assert a.nfreepages == 20
assert a.nuninitializedpages == 20
- assert a.prevarena is None
- assert a.nextarena is None
+ assert a.prevarena == ARENA_NULL
+ assert a.nextarena == ARENA_NULL
def test_allocate_new_page():
@@ -32,30 +34,30 @@
assert page.nuninitialized == page.nfree
page2 = page.freeblock - hdrsize
assert llmemory.cast_ptr_to_adr(page) == page2
- assert page.prevpage == PAGE_NULL
+ assert page.nextpage == PAGE_NULL
#
ac = gen2.ArenaCollection(arenasize, pagesize, 99)
- assert ac.arenas_start is ac.arenas_end is None
+ assert ac.arenas_start == ac.arenas_end == ARENA_NULL
#
page = ac.allocate_new_page(5)
checknewpage(page, 5)
a = ac.arenas_start
- assert a is not None
- assert a is ac.arenas_end
+ assert a != ARENA_NULL
+ assert a == ac.arenas_end
assert a.nfreepages == 2
assert a.freepage == a.arena_base + SHIFT + pagesize
assert ac.page_for_size[5] == page
#
page = ac.allocate_new_page(3)
checknewpage(page, 3)
- assert a is ac.arenas_start is ac.arenas_end
+ assert a == ac.arenas_start == ac.arenas_end
assert a.nfreepages == 1
assert a.freepage == a.arena_base + SHIFT + 2*pagesize
assert ac.page_for_size[3] == page
#
page = ac.allocate_new_page(4)
checknewpage(page, 4)
- assert ac.arenas_start is ac.arenas_end is None # has been unlinked
+ assert ac.arenas_start == ac.arenas_end == ARENA_NULL # has been unlinked
assert ac.page_for_size[4] == page
@@ -70,14 +72,16 @@
page.nfree = nblocks - nusedblocks
page.nuninitialized = page.nfree
page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
- page.prevpage = ac.page_for_size[size_class]
+ page.nextpage = ac.page_for_size[size_class]
ac.page_for_size[size_class] = page
+ if page.nextpage:
+ page.nextpage.prevpage = page
#
alist = []
for layout in pagelayouts:
assert len(layout) == nb_pages
assert " " not in layout.rstrip(" ")
- a = gen2.Arena(arenasize, pagesize)
+ a = gen2.allocate_arena(arenasize, pagesize)
alist.append(a)
assert lltype.typeOf(a.freepage) == llmemory.Address
startpageaddr = a.freepage
@@ -119,12 +123,12 @@
def getarena(ac, num, total=None):
if total is not None:
a = getarena(ac, total-1)
- assert a is ac.arenas_end
- assert a.nextarena is None
- prev = None
+ assert a == ac.arenas_end
+ assert a.nextarena == ARENA_NULL
+ prev = ARENA_NULL
a = ac.arenas_start
for i in range(num):
- assert a.prevarena is prev
+ assert a.prevarena == prev
prev = a
a = a.nextarena
return a
@@ -147,18 +151,18 @@
a1 = getarena(ac, 1, total=2)
page = ac.allocate_new_page(1); checkpage(ac, page, a0, 2)
page = ac.allocate_new_page(2); checkpage(ac, page, a0, 3)
- assert getarena(ac, 0, total=2) is a0
+ assert getarena(ac, 0, total=2) == a0
page = ac.allocate_new_page(3); checkpage(ac, page, a0, 4)
- assert getarena(ac, 0, total=1) is a1
+ assert getarena(ac, 0, total=1) == a1
page = ac.allocate_new_page(4); checkpage(ac, page, a1, 0)
page = ac.allocate_new_page(5); checkpage(ac, page, a1, 2)
page = ac.allocate_new_page(6); checkpage(ac, page, a1, 3)
page = ac.allocate_new_page(7); checkpage(ac, page, a1, 4)
- assert ac.arenas_start is ac.arenas_end is None
+ assert ac.arenas_start == ac.arenas_end == ARENA_NULL
-def checkobj(arena, num_page, pos_obj, obj):
- pageaddr = arena.arena_base + SHIFT + num_page * arena.page_size
+def ckob(ac, arena, num_page, pos_obj, obj):
+ pageaddr = arena.arena_base + SHIFT + num_page * ac.page_size
assert obj == pageaddr + hdrsize + pos_obj
@@ -166,48 +170,48 @@
pagesize = hdrsize + 7*WORD
ac = arena_collection_for_test(pagesize, "#23..2 ")
a0 = getarena(ac, 0, total=1)
- obj = ac.malloc(2*WORD); checkobj(a0, 5, 4*WORD, obj)
- obj = ac.malloc(2*WORD); checkobj(a0, 1, 4*WORD, obj)
- obj = ac.malloc(2*WORD); checkobj(a0, 3, 0*WORD, obj)
- obj = ac.malloc(2*WORD); checkobj(a0, 3, 2*WORD, obj)
- obj = ac.malloc(2*WORD); checkobj(a0, 3, 4*WORD, obj)
- obj = ac.malloc(2*WORD); checkobj(a0, 4, 0*WORD, obj)
- obj = ac.malloc(2*WORD); checkobj(a0, 4, 2*WORD, obj)
- obj = ac.malloc(2*WORD); checkobj(a0, 4, 4*WORD, obj)
- obj = ac.malloc(2*WORD); checkobj(a0, 6, 0*WORD, obj)
- obj = ac.malloc(2*WORD); checkobj(a0, 6, 2*WORD, obj)
- obj = ac.malloc(2*WORD); checkobj(a0, 6, 4*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 5, 4*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 1, 4*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 3, 0*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 3, 2*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 3, 4*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 0*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 2*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 4*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 6, 0*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 6, 2*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 6, 4*WORD, obj)
def test_malloc_mixed_sizes():
pagesize = hdrsize + 7*WORD
ac = arena_collection_for_test(pagesize, "#23..2 ")
a0 = getarena(ac, 0, total=1)
- obj = ac.malloc(2*WORD); checkobj(a0, 5, 4*WORD, obj)
- obj = ac.malloc(3*WORD); checkobj(a0, 2, 3*WORD, obj)
- obj = ac.malloc(2*WORD); checkobj(a0, 1, 4*WORD, obj)
- obj = ac.malloc(3*WORD); checkobj(a0, 3, 0*WORD, obj) # 3rd page -> size 3
- obj = ac.malloc(2*WORD); checkobj(a0, 4, 0*WORD, obj) # 4th page -> size 2
- obj = ac.malloc(3*WORD); checkobj(a0, 3, 3*WORD, obj)
- obj = ac.malloc(2*WORD); checkobj(a0, 4, 2*WORD, obj)
- obj = ac.malloc(3*WORD); checkobj(a0, 6, 0*WORD, obj) # 6th page -> size 3
- obj = ac.malloc(2*WORD); checkobj(a0, 4, 4*WORD, obj)
- obj = ac.malloc(3*WORD); checkobj(a0, 6, 3*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 5, 4*WORD, obj)
+ obj = ac.malloc(3*WORD); ckob(ac, a0, 2, 3*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 1, 4*WORD, obj)
+ obj = ac.malloc(3*WORD); ckob(ac, a0, 3, 0*WORD, obj) # 3rd page -> size 3
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 0*WORD, obj) # 4th page -> size 2
+ obj = ac.malloc(3*WORD); ckob(ac, a0, 3, 3*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 2*WORD, obj)
+ obj = ac.malloc(3*WORD); ckob(ac, a0, 6, 0*WORD, obj) # 6th page -> size 3
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 4, 4*WORD, obj)
+ obj = ac.malloc(3*WORD); ckob(ac, a0, 6, 3*WORD, obj)
def test_malloc_new_arena():
pagesize = hdrsize + 7*WORD
ac = arena_collection_for_test(pagesize, "#23..2 ")
a0 = getarena(ac, 0, total=1)
- obj = ac.malloc(5*WORD); checkobj(a0, 3, 0*WORD, obj) # 3rd page -> size 5
- obj = ac.malloc(4*WORD); checkobj(a0, 4, 0*WORD, obj) # 4th page -> size 4
- obj = ac.malloc(1*WORD); checkobj(a0, 6, 0*WORD, obj) # 6th page -> size 1
- assert ac.arenas_start is ac.arenas_end is None # no more free page
- obj = ac.malloc(1*WORD); checkobj(a0, 6, 1*WORD, obj)
+ obj = ac.malloc(5*WORD); ckob(ac, a0, 3, 0*WORD, obj) # 3rd page -> size 5
+ obj = ac.malloc(4*WORD); ckob(ac, a0, 4, 0*WORD, obj) # 4th page -> size 4
+ obj = ac.malloc(1*WORD); ckob(ac, a0, 6, 0*WORD, obj) # 6th page -> size 1
+ assert ac.arenas_start == ac.arenas_end == ARENA_NULL # no more free page
+ obj = ac.malloc(1*WORD); ckob(ac, a0, 6, 1*WORD, obj)
obj = ac.malloc(5*WORD)
a1 = getarena(ac, 0, total=1)
- pass; checkobj(a1, 0, 0*WORD, obj) # a1/0 -> size 5
- obj = ac.malloc(1*WORD); checkobj(a0, 6, 2*WORD, obj)
- obj = ac.malloc(5*WORD); checkobj(a1, 1, 0*WORD, obj) # a1/1 -> size 5
- obj = ac.malloc(1*WORD); checkobj(a0, 6, 3*WORD, obj)
- obj = ac.malloc(2*WORD); checkobj(a0, 5, 4*WORD, obj)
- obj = ac.malloc(2*WORD); checkobj(a0, 1, 4*WORD, obj)
- obj = ac.malloc(2*WORD); checkobj(a1, 2, 0*WORD, obj) # a1/2 -> size 2
+ pass; ckob(ac, a1, 0, 0*WORD, obj) # a1/0 -> size 5
+ obj = ac.malloc(1*WORD); ckob(ac, a0, 6, 2*WORD, obj)
+ obj = ac.malloc(5*WORD); ckob(ac, a1, 1, 0*WORD, obj) # a1/1 -> size 5
+ obj = ac.malloc(1*WORD); ckob(ac, a0, 6, 3*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 5, 4*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a0, 1, 4*WORD, obj)
+ obj = ac.malloc(2*WORD); ckob(ac, a1, 2, 0*WORD, obj) # a1/2 -> size 2
More information about the Pypy-commit
mailing list