[pypy-commit] stmgc c5: Finally, make largemalloc.c work on an externally-provided arena of
arigo
noreply at buildbot.pypy.org
Fri Dec 20 16:58:44 CET 2013
Author: Armin Rigo <arigo at tunes.org>
Branch: c5
Changeset: r576:2925f0ed8eed
Date: 2013-12-20 16:58 +0100
http://bitbucket.org/pypy/stmgc/changeset/2925f0ed8eed/
Log: Finally, make largemalloc.c work on an externally-provided arena of
memory, which can grow or shrink at the end.
diff --git a/c5/largemalloc.c b/c5/largemalloc.c
--- a/c5/largemalloc.c
+++ b/c5/largemalloc.c
@@ -84,37 +84,9 @@
fragments of space between bigger allocations.
*/
-static dlist_t largebins[N_BINS] = {
+static dlist_t largebins[N_BINS];
+static mchunk_t *first_chunk, *last_chunk;
-#define INIT(num) { largebins + num, largebins + num }
- INIT(0), INIT(1), INIT(2), INIT(3), INIT(4),
- INIT(5), INIT(6), INIT(7), INIT(8), INIT(9),
- INIT(10), INIT(11), INIT(12), INIT(13), INIT(14),
- INIT(15), INIT(16), INIT(17), INIT(18), INIT(19),
- INIT(20), INIT(21), INIT(22), INIT(23), INIT(24),
- INIT(25), INIT(26), INIT(27), INIT(28), INIT(29),
- INIT(30), INIT(31), INIT(32), INIT(33), INIT(34),
- INIT(35), INIT(36), INIT(37), INIT(38), INIT(39),
- INIT(40), INIT(41), INIT(42), INIT(43), INIT(44),
- INIT(45), INIT(46), INIT(47), INIT(48), INIT(49),
- INIT(50), INIT(51), INIT(52), INIT(53), INIT(54),
- INIT(55), INIT(56), INIT(57), INIT(58), INIT(59),
- INIT(60), INIT(61), INIT(62), INIT(63), INIT(64),
- INIT(65), INIT(66), INIT(67), INIT(68), INIT(69),
- INIT(70), INIT(71), INIT(72), INIT(73), INIT(74),
- INIT(75), INIT(76), INIT(77), INIT(78), INIT(79),
- INIT(80), INIT(81), INIT(82), INIT(83) };
-#undef INIT
-
-void _stm_large_reset(void)
-{
- int i;
- for (i = 0; i < N_BINS; i++)
- largebins[i].prev = largebins[i].next = &largebins[i];
-}
-
-
-static char *allocate_more(size_t request_size);
static void insert_unsorted(mchunk_t *new)
{
@@ -229,8 +201,8 @@
}
}
- /* not enough free memory. We need to allocate more. */
- return allocate_more(request_size);
+ /* not enough memory. */
+ return NULL;
found:
assert(mscan->size & FLAG_SORTED);
@@ -262,31 +234,6 @@
return (char *)&mscan->d;
}
-static char *allocate_more(size_t request_size)
-{
- assert(request_size < MMAP_LIMIT);//XXX
-
- size_t big_size = MMAP_LIMIT * 8 - 48;
- mchunk_t *big_chunk = (mchunk_t *)malloc(big_size);
- if (!big_chunk) {
- fprintf(stderr, "out of memory!\n");
- abort();
- }
- fprintf(stderr, "allocate_more: %p\n", &big_chunk->d);
-
- big_chunk->prev_size = THIS_CHUNK_FREE;
- big_chunk->size = big_size - CHUNK_HEADER_SIZE * 2;
-
- assert((char *)&next_chunk_u(big_chunk)->prev_size ==
- ((char *)big_chunk) + big_size - CHUNK_HEADER_SIZE);
- next_chunk_u(big_chunk)->prev_size = big_chunk->size;
- next_chunk_u(big_chunk)->size = END_MARKER;
-
- insert_unsorted(big_chunk);
-
- return stm_large_malloc(request_size);
-}
-
void stm_large_free(char *data)
{
mchunk_t *chunk = data2chunk(data);
@@ -349,8 +296,9 @@
}
-void _stm_large_dump(char *data)
+void _stm_large_dump(void)
{
+ char *data = ((char *)first_chunk) + 16;
size_t prev_size_if_free = 0;
while (1) {
fprintf(stderr, "[ %p: %zu\n", data - 16, *(size_t*)(data - 16));
@@ -366,12 +314,88 @@
}
if (*(size_t*)(data - 8) == END_MARKER)
break;
- fprintf(stderr, " %p: %zu]%s\n", data - 8, *(size_t*)(data - 8),
+ fprintf(stderr, " %p: %zu ]%s\n", data - 8, *(size_t*)(data - 8),
prev_size_if_free ? " (free)" : "");
if (!prev_size_if_free)
assert(!((*(size_t*)(data - 8)) & FLAG_SORTED));
+ assert(*(ssize_t*)(data - 8) > 0);
data += (*(size_t*)(data - 8)) & ~FLAG_SORTED;
data += 16;
}
fprintf(stderr, " %p: end. ]\n\n", data - 8);
+ assert(data - 16 == (char *)last_chunk);
}
+
+void stm_largemalloc_init(char *data_start, size_t data_size)
+{
+ int i;
+ for (i = 0; i < N_BINS; i++)
+ largebins[i].prev = largebins[i].next = &largebins[i];
+
+ assert(data_size >= 2 * sizeof(struct malloc_chunk));
+ assert((data_size & 31) == 0);
+ first_chunk = (mchunk_t *)data_start;
+ first_chunk->prev_size = THIS_CHUNK_FREE;
+ first_chunk->size = data_size - 2 * CHUNK_HEADER_SIZE;
+ last_chunk = chunk_at_offset(first_chunk, data_size - CHUNK_HEADER_SIZE);
+ last_chunk->prev_size = first_chunk->size;
+ last_chunk->size = END_MARKER;
+ assert(last_chunk == next_chunk_u(first_chunk));
+
+ insert_unsorted(first_chunk);
+}
+
+int stm_largemalloc_resize_arena(size_t new_size)
+{
+ assert(new_size >= 2 * sizeof(struct malloc_chunk));
+ assert((new_size & 31) == 0);
+
+ new_size -= CHUNK_HEADER_SIZE;
+ mchunk_t *new_last_chunk = chunk_at_offset(first_chunk, new_size);
+ mchunk_t *old_last_chunk = last_chunk;
+ size_t old_size = ((char *)old_last_chunk) - (char *)first_chunk;
+
+ if (new_size < old_size) {
+ /* check if there is enough free space at the end to allow
+ such a reduction */
+ size_t lsize = last_chunk->prev_size;
+ assert(lsize != THIS_CHUNK_FREE);
+ if (lsize == BOTH_CHUNKS_USED)
+ return 0;
+ lsize += CHUNK_HEADER_SIZE;
+ mchunk_t *prev_chunk = chunk_at_offset(last_chunk, -lsize);
+ if (((char *)new_last_chunk) < ((char *)prev_chunk) +
+ sizeof(struct malloc_chunk))
+ return 0;
+
+ /* unlink the prev_chunk from the doubly-linked list */
+ prev_chunk->d.next->prev = prev_chunk->d.prev;
+ prev_chunk->d.prev->next = prev_chunk->d.next;
+
+ /* reduce the prev_chunk */
+ assert((prev_chunk->size & ~FLAG_SORTED) == last_chunk->prev_size);
+ prev_chunk->size = ((char*)new_last_chunk) - (char *)prev_chunk
+ - CHUNK_HEADER_SIZE;
+
+ /* make a fresh-new last chunk */
+ new_last_chunk->prev_size = prev_chunk->size;
+ new_last_chunk->size = END_MARKER;
+ last_chunk = new_last_chunk;
+ assert(last_chunk == next_chunk_u(prev_chunk));
+
+ insert_unsorted(prev_chunk);
+ }
+ else if (new_size > old_size) {
+ /* make the new last chunk first, with only the extra size */
+ mchunk_t *old_last_chunk = last_chunk;
+ old_last_chunk->size = (new_size - old_size) - CHUNK_HEADER_SIZE;
+ new_last_chunk->prev_size = BOTH_CHUNKS_USED;
+ new_last_chunk->size = END_MARKER;
+ last_chunk = new_last_chunk;
+ assert(last_chunk == next_chunk_u(old_last_chunk));
+
+ /* then free the last_chunk (turn it from "used" to "free) */
+ stm_large_free((char *)&old_last_chunk->d);
+ }
+ return 1;
+}
diff --git a/c5/largemalloc.h b/c5/largemalloc.h
--- a/c5/largemalloc.h
+++ b/c5/largemalloc.h
@@ -1,6 +1,9 @@
#include <stdlib.h>
+void stm_largemalloc_init(char *data_start, size_t data_size);
+int stm_largemalloc_resize_arena(size_t new_size);
+
char *stm_large_malloc(size_t request_size);
void stm_large_free(char *data);
-void _stm_large_dump(char *data);
-void _stm_large_reset(void);
+
+void _stm_large_dump(void);
diff --git a/c5/test/support.py b/c5/test/support.py
--- a/c5/test/support.py
+++ b/c5/test/support.py
@@ -42,10 +42,11 @@
void _stm_teardown(void);
void _stm_teardown_process(void);
+void stm_largemalloc_init(char *data_start, size_t data_size);
+int stm_largemalloc_resize_arena(size_t new_size);
char *stm_large_malloc(size_t request_size);
void stm_large_free(char *data);
-void _stm_large_dump(char *data);
-void _stm_large_reset(void);
+void _stm_large_dump(void);
void *memset(void *s, int c, size_t n);
""")
diff --git a/c5/test/test_largemalloc.py b/c5/test/test_largemalloc.py
--- a/c5/test/test_largemalloc.py
+++ b/c5/test/test_largemalloc.py
@@ -1,11 +1,15 @@
from support import *
-import random
+import sys, random
class TestLargeMalloc(object):
def setup_method(self, meth):
- lib._stm_large_reset()
+ size = 1024 * 1024 # 1MB
+ self.rawmem = ffi.new("char[]", size)
+ self.size = size
+ lib.memset(self.rawmem, 0xcd, size)
+ lib.stm_largemalloc_init(self.rawmem, size)
def test_simple(self):
d1 = lib.stm_large_malloc(7000)
@@ -33,10 +37,51 @@
assert d7 == d6 + 616
d8 = lib.stm_large_malloc(600)
assert d8 == d4
+ #
+ lib._stm_large_dump()
+
+ def test_overflow_1(self):
+ d = lib.stm_large_malloc(self.size - 32)
+ assert d == self.rawmem + 16
+ lib._stm_large_dump()
+
+ def test_overflow_2(self):
+ d = lib.stm_large_malloc(self.size - 16)
+ assert d == ffi.NULL
+ lib._stm_large_dump()
+
+ def test_overflow_3(self):
+ d = lib.stm_large_malloc(sys.maxint & ~7)
+ assert d == ffi.NULL
+ lib._stm_large_dump()
+
+ def test_resize_arena_reduce_1(self):
+ r = lib.stm_largemalloc_resize_arena(self.size - 32)
+ assert r == 1
+ d = lib.stm_large_malloc(self.size - 32)
+ assert d == ffi.NULL
+ lib._stm_large_dump()
+
+ def test_resize_arena_reduce_2(self):
+ lib.stm_large_malloc(self.size // 2 - 64)
+ r = lib.stm_largemalloc_resize_arena(self.size // 2)
+ assert r == 1
+ lib._stm_large_dump()
+
+ def test_resize_arena_cannot_reduce_1(self):
+ lib.stm_large_malloc(self.size // 2)
+ r = lib.stm_largemalloc_resize_arena(self.size // 2)
+ assert r == 0
+ lib._stm_large_dump()
+
+ def test_resize_arena_cannot_reduce_2(self):
+ lib.stm_large_malloc(self.size // 2 - 56)
+ r = lib.stm_largemalloc_resize_arena(self.size // 2)
+ assert r == 0
+ lib._stm_large_dump()
def test_random(self):
r = random.Random(1007)
- first = None
p = []
for i in range(100000):
if len(p) != 0 and (len(p) > 100 or r.randrange(0, 5) < 2):
@@ -50,12 +95,11 @@
sz = r.randrange(8, 160) * 8
d = lib.stm_large_malloc(sz)
print 'alloc %5d (%s)' % (sz, d)
- if first is None:
- first = d
+ assert d != ffi.NULL
lib.memset(d, 0xdd, sz)
content1 = chr(r.randrange(0, 256))
content2 = chr(r.randrange(0, 256))
d[0] = content1
d[sz - 1] = content2
p.append((d, sz, content1, content2))
- lib._stm_large_dump(first)
+ lib._stm_large_dump()
More information about the pypy-commit
mailing list