aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2001-10-13 10:46:30 +0000
committerH. Peter Anvin <hpa@zytor.com>2001-10-13 10:46:30 +0000
commitc4fb104d3393cf51c81a630e18ec32b2e8abb184 (patch)
tree2231c38318546fdf39ffbf1a3b8c6a015c95a46d
parentd3d34ebb008dac8842f4903fdedf5a04588fada7 (diff)
downloadlpsm-c4fb104d3393cf51c81a630e18ec32b2e8abb184.tar.gz
lpsm-c4fb104d3393cf51c81a630e18ec32b2e8abb184.tar.xz
lpsm-c4fb104d3393cf51c81a630e18ec32b2e8abb184.zip
Working allocation and freeing; still need reverse list pointer
to return completely freed slab page to the buddy system.
-rw-r--r--alloc.c197
-rw-r--r--lpsm.h1
-rw-r--r--testalloc.c25
3 files changed, 171 insertions, 52 deletions
diff --git a/alloc.c b/alloc.c
index fb05df5..775d766 100644
--- a/alloc.c
+++ b/alloc.c
@@ -48,11 +48,19 @@ struct slab_info {
/* This is <= 16 bytes on 32- and 64-bit architectures */
struct slab_header {
uint32_t magic;
- uint32_t free_count;
+ uint16_t free_count;
+ uint16_t slab_info_index;
struct slab_header *next;
};
-#define SLAB_INFO_COUNT 24
+#define SLAB_INFO_COUNT 32
+
+/* Change these if BUDDY_ORDER_MIN or MINIMUM_ALIGN is changed */
+/* These MUST be ordered in order of decreasing size. 0 is end of list. */
+static int slabsizes[] =
+{
+ 2032, 1344, 1016, 800, 576, 448, 256, 192, 128, 96, 64, 48, 32, 16, 8, 4, 2, 1, 0
+};
struct arena_header {
unsigned char arena_magic[16];
@@ -61,7 +69,8 @@ struct arena_header {
uint16_t arena_ptrsize;
void *arena_base; /* Base of allocation */
void *root_data_ptr; /* Root data pointer - for the user */
- void *bitmap_end_ptr; /* End of the allocation bitmap */
+ void *free_bitmap_end; /* End of the free bitmap */
+ void *alloc_bitmap_end; /* End of the allocated (buddy) bitmap */
void *data_start; /* Beginning of data area */
int buddy_order_min; /* Minimum size of buddy allocation */
int arena_size_lg2; /* Current arena allocation size */
@@ -72,10 +81,6 @@ struct arena_header {
#define BUDDY_ORDER_MIN 12
#define BUDDY_SIZE_MIN (1U << BUDDY_ORDER_MIN)
-/* This is the size of the alloc bitmask. It will usually be sparse. */
-/* This is two bits per unit of the lowest order */
-#define ALLOC_BITMAP_SIZE ((ARENA_LIMIT >> ORDER_MIN)*2/CHAR_BIT)
-
/* Align allocations at least this large (2^n) to this boundary */
#define MINIMUM_ALIGN_LG2 4
#define MINIMUM_ALIGN (1 << MINIMUM_ALIGN_LG2)
@@ -83,7 +88,7 @@ struct arena_header {
/* This is the size (2^n) allocated for a brand new arena.
This allocation will be sparse, so don't freak out too much! */
-#define ARENA_START_SIZE_LG2 (2*BUDDY_ORDER_MIN+2) /* 64 MB */
+#define ARENA_START_SIZE_LG2 (2*BUDDY_ORDER_MIN+1) /* 32 MB */
#define ARENA_START_SIZE (1UL << ARENA_START_SIZE_LG2)
#define HEADER_SIZE BUDDY_SIZE_MIN /* Default size of the header */
@@ -172,7 +177,7 @@ static int find_set_bit(const void *ptr, int start, int count, int err)
if ( val & mask ) {
end = start | BITSCAN_MASK;
mask = BITSCAN_1 << (start & BITSCAN_MASK);
- for ( i = start ; i < end ; i++ ) {
+ for ( i = start ; i <= end ; i++ ) {
if ( val & mask )
return i;
mask <<= 1;
@@ -188,7 +193,7 @@ static int find_set_bit(const void *ptr, int start, int count, int err)
if ( val ) {
mask = BITSCAN_1;
end = start | BITSCAN_MASK;
- for ( i = start ; i < end ; i++ ) {
+ for ( i = start ; i <= end ; i++ ) {
if ( val & mask )
return i;
mask <<= 1;
@@ -222,7 +227,7 @@ void *objstore_arena_init(void)
if ( memcmp(ah->arena_magic, objstore_arena_magic, 16) != 0 ) {
if ( memcmp(ah->arena_magic, objstore_arena_zero, 16) == 0 ) {
- size_t total_size;
+ size_t total_size, bitmap_size;
int i;
/* Uninitialized arena */
@@ -235,33 +240,20 @@ void *objstore_arena_init(void)
ah->buddy_order_min = BUDDY_ORDER_MIN;
ah->arena_size_lg2 = ARENA_START_SIZE_LG2;
- total_size = HEADER_SIZE + ARENA_START_SIZE +
- (1UL << (ARENA_START_SIZE_LG2-BUDDY_ORDER_MIN-2));
- ah->bitmap_end_ptr = (char *)ah + total_size;
+ bitmap_size = 1UL << (ARENA_START_SIZE_LG2-BUDDY_ORDER_MIN-2);
+ total_size = HEADER_SIZE + ARENA_START_SIZE + (bitmap_size << 1);
+ ah->free_bitmap_end = (char *)ah + total_size;
+ ah->alloc_bitmap_end = (char *)ah->free_bitmap_end - bitmap_size;
if ( objstore_extend(total_size) < 0 )
return NULL; /* Failed to establish initial arena */
- /* Change these if BUDDY_ORDER_MIN or MINIMUM_ALIGN is changed */
- /* These MUST be ordered in order of decreasing size */
- ah->slab[0].size = 2032;
- ah->slab[1].size = 1008;
- ah->slab[2].size = 496;
- ah->slab[3].size = 240;
- ah->slab[4].size = 112;
- ah->slab[5].size = 48;
- ah->slab[6].size = 32;
- ah->slab[7].size = 16;
- ah->slab[8].size = 8;
- ah->slab[9].size = 4;
- ah->slab[10].size = 2;
- ah->slab[11].size = 1;
-
- for ( i = 0 ; i <= 11 ; i++ ) {
+ /* Compute SLAB definitions */
+ for ( i = 0 ; slabsizes[i] ; i++ ) {
int header_size =
((sizeof(struct slab_header)+MINIMUM_ALIGN-1) & ~MINIMUM_ALIGN_MASK);
int bytes = BUDDY_SIZE_MIN - header_size;
- int size = ah->slab[i].size;
+ int size = slabsizes[i];
int count = bytes/size;
int bitmap_size;
@@ -276,12 +268,13 @@ void *objstore_arena_init(void)
count--;
}
+ ah->slab[i].size = size;
ah->slab[i].count = count;
ah->slab[i].data_offset = header_size + bitmap_size;
}
- /* The buddy itmask contains zeroes already; set the bit on the topmost order. */
- set_bit(ah->bitmap_end_ptr, -1);
+ /* The bitmasks contains zeroes already; set the free bit on the topmost order. */
+ set_bit(ah->free_bitmap_end, -1);
}
}
@@ -304,7 +297,7 @@ static int objstore_find_free_chunk(int rorder)
rs = 1 << rorder;
obit = 1-(rs+rs);
- xbit = find_set_bit(ah->bitmap_end_ptr, obit, rs, 0);
+ xbit = find_set_bit(ah->free_bitmap_end, obit, rs, 0);
if ( !xbit ) {
if ( rorder == 0 )
@@ -314,10 +307,10 @@ static int objstore_find_free_chunk(int rorder)
return 0; /* All out of those, sir */
printf("buddy: splitting rorder %2d %8d -> %d %d\n", rorder-1, xbit, (xbit << 1)-1, (xbit << 1));
xbit <<= 1; /* Convert to address of fragments */
- set_bit(ah->bitmap_end_ptr, xbit); /* Upper buddy is available */
+ set_bit(ah->free_bitmap_end, xbit); /* Upper buddy is available */
xbit--; /* Return the lower buddy */
} else {
- clr_bit(ah->bitmap_end_ptr, xbit); /* No longer available */
+ clr_bit(ah->free_bitmap_end, xbit); /* No longer available */
}
printf("buddy: allocated rorder %2d %8d\n", rorder, xbit);
return xbit;
@@ -351,6 +344,9 @@ static void *objstore_malloc_buddy(size_t size)
return NULL; /* Not implemented yet */
}
+ /* Mark this as a proper allocation (used to find the size on free()) */
+ set_bit(ah->alloc_bitmap_end, xbit);
+
p = (void *)((char *)ah->data_start +
((uintptr_t)(xbit-obit) << order));
@@ -358,7 +354,7 @@ static void *objstore_malloc_buddy(size_t size)
return p;
}
-static struct slab_header *objstore_make_new_slab(struct slab_info *si)
+static struct slab_header *objstore_make_new_slab(struct slab_info *si, int index)
{
struct slab_header *sh;
unsigned char *slab_bitmap;
@@ -372,9 +368,10 @@ static struct slab_header *objstore_make_new_slab(struct slab_info *si)
printf("slab: allocating new page for size %d at %p\n", si->size, sh);
- sh->magic = SLAB_MAGIC;
- sh->free_count = si->count;
- sh->next = si->list;
+ sh->magic = SLAB_MAGIC;
+ sh->free_count = si->count;
+ sh->slab_info_index = index;
+ sh->next = si->list;
slab_bitmap = (unsigned char *)sh +
((sizeof(struct slab_header)+MINIMUM_ALIGN-1) & ~MINIMUM_ALIGN_MASK);
@@ -388,7 +385,7 @@ static struct slab_header *objstore_make_new_slab(struct slab_info *si)
static void *objstore_malloc_slab(size_t size)
{
- int i;
+ int i, index;
int which;
struct slab_info *si;
struct slab_header *sh;
@@ -398,11 +395,13 @@ static void *objstore_malloc_slab(size_t size)
/* POSIX sayeth: thou shalt return unique addresses for malloc(0); */
if ( size == 0 ) size = 1;
+ index = 0;
si = &ah->slab[0];
for ( i = 1 ; i < SLAB_INFO_COUNT ; i++ ) {
- if ( size <= si[1].size )
+ if ( size <= si[1].size ) {
si++;
- else
+ index++;
+ } else
break;
}
@@ -414,7 +413,7 @@ static void *objstore_malloc_slab(size_t size)
sh = si->list;
if ( !sh ) {
/* Empty free list, need a new page */
- if ( !(sh = objstore_make_new_slab(si)) )
+ if ( !(sh = objstore_make_new_slab(si,index)) )
return NULL; /* Unavailable to allocate new slab */
}
@@ -432,8 +431,8 @@ static void *objstore_malloc_slab(size_t size)
}
p = (void *)((char *)sh + si->data_offset + which*si->size);
- printf("slab: allocated %d/%d bytes at %p, %d slabs left\n",
- size, si->size, p, sh->free_count);
+ printf("slab: allocated %d/%d bytes at %p, %d/%d slabs in use\n",
+ size, si->size, p, si->count-sh->free_count, si->count);
return p;
}
@@ -444,3 +443,109 @@ void *objstore_malloc(size_t size)
else
return objstore_malloc_slab(size);
}
+
+static void objstore_free_chunk(int xbit, int rorder)
+{
+ int bxbit = -(-xbit^1); /* Buddy xbit */
+
+ if ( rorder > 0 ) {
+ if ( test_bit(ah->free_bitmap_end, bxbit) ) {
+ /* Buddy is free. */
+ clr_bit(ah->free_bitmap_end, bxbit);
+ printf("buddy: coalescing rorder %2d %d %d -> %2d %d\n",
+ rorder, xbit, bxbit, rorder-1, -(-xbit >> 1));
+ objstore_free_chunk(-(-xbit >> 1), rorder-1);
+ return;
+ }
+ }
+ set_bit(ah->free_bitmap_end, xbit);
+}
+
+static void objstore_free_buddy(void *ptr)
+{
+ uintptr_t offset = (uintptr_t)ptr - (uintptr_t)ah->data_start;
+ int order, rorder;
+ int xbit, obit;
+
+ printf("buddy: freeing allocation at %p\n", ptr);
+
+ assert(offset < (1UL << ah->arena_size_lg2)); /* Not inside the arena?? */
+
+ for ( rorder = 0, order = ah->arena_size_lg2 ;
+ order >= BUDDY_ORDER_MIN ;
+ rorder++, order-- ) {
+ if ( offset & ((1UL << order)-1) )
+ continue; /* Can't be this order */
+
+ obit = 1-(2 << rorder);
+ xbit = obit + (offset >> order);
+
+ if ( test_bit(ah->alloc_bitmap_end, xbit) ) {
+ /* We're this order... */
+ printf("buddy: freeing rorder %2d %8d\n", rorder, xbit);
+ clr_bit(ah->alloc_bitmap_end, xbit);
+ objstore_free_chunk(xbit, rorder);
+ return;
+ }
+ }
+
+ assert(0); /* Freeing bogus memory? */
+}
+
+static void objstore_free_slab(void *ptr)
+{
+ struct slab_header *sh;
+ struct slab_info *si;
+ int offset, which, free_count;
+ unsigned char *slab_bitmap;
+
+ /* Slab header should be at the start of the page */
+ sh = (struct slab_header *)((uintptr_t)ptr & ~(BUDDY_SIZE_MIN-1));
+ assert(sh->magic == SLAB_MAGIC);
+ si = &ah->slab[sh->slab_info_index];
+ slab_bitmap = (unsigned char *)sh +
+ ((sizeof(struct slab_header)+MINIMUM_ALIGN-1) & ~MINIMUM_ALIGN_MASK);
+
+ printf("slab: Freeing size %d slab at %p, %d/%d allocated\n",
+ si->size, ptr, si->count-sh->free_count, si->count);
+
+ offset = ((uintptr_t)ptr & (BUDDY_SIZE_MIN-1)) - si->data_offset;
+ assert(offset >= 0);
+ which = offset/si->size;
+ assert(offset == which*si->size);
+
+ free_count = sh->free_count;
+
+ if ( free_count == si->count-1 ) {
+ struct slab_header **shp;
+ /* Deallocated the entire page.. SHOULD GIVE IT BACK TO THE BUDDY SYSTEM */
+ /**** Need reverse link pointer here -- scanning the whole list like this
+ is NOT ACCEPTABLE ****/
+ printf("slab: Returning page %p back to the buddy system\n", sh);
+ shp = &(si->list);
+ while ( *shp && *shp != sh )
+ shp = &((*shp)->next);
+ assert(*shp);
+ *shp = sh->next;
+ objstore_free_buddy(sh);
+ } else {
+ set_bit(slab_bitmap, which);
+ sh->free_count = free_count+1;
+ if ( free_count == 0 ) {
+ /* Page with newly available slabs */
+ printf("slab: Adding page %p back onto free list for slab size %d\n",
+ sh, si->size);
+ sh->next = si->list;
+ si->list = sh;
+ }
+ }
+}
+
+void objstore_free(void *ptr)
+{
+ /* Buddy allocations are ALWAYS page-aligned, SLAB allocations never */
+ if ( (uintptr_t)ptr & (BUDDY_SIZE_MIN-1) )
+ objstore_free_slab(ptr);
+ else
+ objstore_free_buddy(ptr);
+}
diff --git a/lpsm.h b/lpsm.h
index e1761a6..cc7c725 100644
--- a/lpsm.h
+++ b/lpsm.h
@@ -34,5 +34,6 @@ int objstore_extend(size_t new_size);
/* Alloc routines */
void *objstore_arena_init(void);
void *objstore_malloc(size_t size);
+void objstore_free(void *ptr);
#endif
diff --git a/testalloc.c b/testalloc.c
index 455aac7..1fcd29d 100644
--- a/testalloc.c
+++ b/testalloc.c
@@ -20,6 +20,7 @@
#include "objstore.h"
#define COUNT 8192
+#define SLOTS 256
#define MAXLOG 26
int main(int argc, char *argv[])
@@ -28,12 +29,15 @@ int main(int argc, char *argv[])
int sizes[COUNT];
void *buf;
int arena_len = 16384;
- int i;
+ int i, slot;
double rnd;
unlink("arena.dat");
unlink("arena.log");
+ memset(areas, 0, sizeof(areas));
+ memset(sizes, 0, sizeof(sizes));
+
buf = objstore_init("arena.dat", "arena.log", &arena_len);
if ( !objstore_arena_init() ) {
fprintf(stderr, "%s: objstore_arena_init() failed\n", argv[0]);
@@ -43,11 +47,20 @@ int main(int argc, char *argv[])
objstore_checkpoint(0.0);
for ( i = 0 ; i < COUNT ; i++ ) {
- rnd = (double)rand()/RAND_MAX;
- sizes[i] = (int)pow(2.0, MAXLOG*rnd);
- areas[i] = objstore_malloc(sizes[i]);
- printf("Alloc: %8d (0x%08x) bytes at %p\n",
- sizes[i], sizes[i], areas[i]);
+ slot = rand() % SLOTS;
+
+ if ( areas[slot] ) {
+ printf("Free: %8d (0x%08x) bytes at %p\n",
+ sizes[slot], sizes[slot], areas[slot]);
+ objstore_free(areas[slot]);
+ areas[slot] = NULL;
+ } else {
+ rnd = (double)rand()/RAND_MAX;
+ sizes[slot] = (int)pow(2.0, MAXLOG*pow(rnd,5.0));
+ areas[slot] = objstore_malloc(sizes[slot]);
+ printf("Alloc: %8d (0x%08x) bytes at %p\n",
+ sizes[slot], sizes[slot], areas[slot]);
+ }
}
objstore_checkpoint(0.0);