aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2001-10-13 18:45:00 +0000
committerH. Peter Anvin <hpa@zytor.com>2001-10-13 18:45:00 +0000
commitfd2cb9815b27ca8a45d820c3e45f2c39ca58b8ea (patch)
tree25aa7bdd7d8b108d4e48ae34686a1b7d3e06fd88
parentc4fb104d3393cf51c81a630e18ec32b2e8abb184 (diff)
downloadlpsm-fd2cb9815b27ca8a45d820c3e45f2c39ca58b8ea.tar.gz
lpsm-fd2cb9815b27ca8a45d820c3e45f2c39ca58b8ea.tar.xz
lpsm-fd2cb9815b27ca8a45d820c3e45f2c39ca58b8ea.zip
Add back pointers so we can reclaim SLAB pages easier; make work better
on 64-bit systems. Still need: realloc(), arena extension.
-rw-r--r--alloc.c49
1 files changed, 25 insertions, 24 deletions
diff --git a/alloc.c b/alloc.c
index 775d766..773a093 100644
--- a/alloc.c
+++ b/alloc.c
@@ -45,18 +45,21 @@ struct slab_info {
struct slab_header *list;
};
-/* This is <= 16 bytes on 32- and 64-bit architectures */
+/* This is 16 bytes on 32-bit and 24 bytes on 64-bit architectures */
+/* This header must end aligned to a bitscan_t datum */
struct slab_header {
uint32_t magic;
uint16_t free_count;
uint16_t slab_info_index;
- struct slab_header *next;
+ struct slab_header *next, **rev;
};
#define SLAB_INFO_COUNT 32
/* Change these if BUDDY_ORDER_MIN or MINIMUM_ALIGN is changed */
/* These MUST be ordered in order of decreasing size. 0 is end of list. */
+/* This particular set of slab sizes should work well for both a
+ 16- and 24-byte header size. */
static int slabsizes[] =
{
2032, 1344, 1016, 800, 576, 448, 256, 192, 128, 96, 64, 48, 32, 16, 8, 4, 2, 1, 0
@@ -250,19 +253,17 @@ void *objstore_arena_init(void)
/* Compute SLAB definitions */
for ( i = 0 ; slabsizes[i] ; i++ ) {
- int header_size =
- ((sizeof(struct slab_header)+MINIMUM_ALIGN-1) & ~MINIMUM_ALIGN_MASK);
+ int header_size = sizeof(struct slab_header);
int bytes = BUDDY_SIZE_MIN - header_size;
int size = slabsizes[i];
int count = bytes/size;
- int bitmap_size;
+ int data_offset;
while ( 1 ) {
- /* Compute bitmap size in bytes; rounded up to MINIMUM_ALIGN */
- bitmap_size = (count+7) >> 3;
- bitmap_size = (bitmap_size+MINIMUM_ALIGN-1) & ~MINIMUM_ALIGN_MASK;
-
- if ( bitmap_size+count*size <= bytes )
+ /* Compute header+bitmap size in bytes; rounded up to MINIMUM_ALIGN */
+ data_offset = header_size + ((count+7) >> 3);
+ data_offset = ((data_offset+MINIMUM_ALIGN-1) & ~MINIMUM_ALIGN_MASK);
+ if ( data_offset+count*size <= BUDDY_SIZE_MIN )
break;
count--;
@@ -270,7 +271,7 @@ void *objstore_arena_init(void)
ah->slab[i].size = size;
ah->slab[i].count = count;
- ah->slab[i].data_offset = header_size + bitmap_size;
+ ah->slab[i].data_offset = data_offset;
}
/* The bitmasks contains zeroes already; set the free bit on the topmost order. */
@@ -372,13 +373,16 @@ static struct slab_header *objstore_make_new_slab(struct slab_info *si, int inde
sh->free_count = si->count;
sh->slab_info_index = index;
sh->next = si->list;
+ sh->rev = &(si->list);
- slab_bitmap = (unsigned char *)sh +
- ((sizeof(struct slab_header)+MINIMUM_ALIGN-1) & ~MINIMUM_ALIGN_MASK);
+ slab_bitmap = (unsigned char *)(sh+1);
/* Mark all SLABs as available */
memset(slab_bitmap, ~0, bitmap_all_set_bytes);
slab_bitmap[bitmap_all_set_bytes] = (1 << bitmap_bits_left)-1;
+
+ if ( si->list )
+ si->list->rev = &(sh->next);
return (si->list = sh);
}
@@ -417,8 +421,7 @@ static void *objstore_malloc_slab(size_t size)
return NULL; /* Unavailable to allocate new slab */
}
- slab_bitmap = (char *)sh +
- ((sizeof(struct slab_header)+MINIMUM_ALIGN-1) & ~MINIMUM_ALIGN_MASK);
+ slab_bitmap = (unsigned char *)(sh+1);
which = find_set_bit(slab_bitmap, 0, si->count, -1);
assert(which >= 0); /* Otherwise something is bad... */
@@ -428,6 +431,8 @@ static void *objstore_malloc_slab(size_t size)
if ( --(sh->free_count) == 0 ) {
/* We just allocated the last slab, take off the free list */
si->list = sh->next;
+ if ( si->list )
+ si->list->rev = &(si->list);
}
p = (void *)((char *)sh + si->data_offset + which*si->size);
@@ -517,16 +522,9 @@ static void objstore_free_slab(void *ptr)
free_count = sh->free_count;
if ( free_count == si->count-1 ) {
- struct slab_header **shp;
- /* Deallocated the entire page.. SHOULD GIVE IT BACK TO THE BUDDY SYSTEM */
- /**** Need reverse link pointer here -- scanning the whole list like this
- is NOT ACCEPTABLE ****/
+ /* Deallocated the entire page; give back to buddy system */
printf("slab: Returning page %p back to the buddy system\n", sh);
- shp = &(si->list);
- while ( *shp && *shp != sh )
- shp = &((*shp)->next);
- assert(*shp);
- *shp = sh->next;
+ *(sh->rev) = sh->next; /* Remove from free list */
objstore_free_buddy(sh);
} else {
set_bit(slab_bitmap, which);
@@ -535,7 +533,10 @@ static void objstore_free_slab(void *ptr)
/* Page with newly available slabs */
printf("slab: Adding page %p back onto free list for slab size %d\n",
sh, si->size);
+ sh->rev = &(si->list);
sh->next = si->list;
+ if ( si->list )
+ si->list->rev = &(sh->next);
si->list = sh;
}
}