diff options
author | H. Peter Anvin <hpa@zytor.com> | 2001-10-13 22:38:07 +0000 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2001-10-13 22:38:07 +0000 |
commit | 353840e8c642322c63ce54f9d386bcdf86dc06a4 (patch) | |
tree | 938f8a7d3289af01893be0c985960dfba9f78f69 | |
parent | a0c984c1b205b01cfe5849080b116022a0cdebe6 (diff) | |
download | lpsm-353840e8c642322c63ce54f9d386bcdf86dc06a4.tar.gz lpsm-353840e8c642322c63ce54f9d386bcdf86dc06a4.tar.xz lpsm-353840e8c642322c63ce54f9d386bcdf86dc06a4.zip |
Now we have functioning extension of the arena.
-rw-r--r-- | alloc.c | 174 |
1 files changed, 147 insertions, 27 deletions
@@ -57,7 +57,7 @@ struct slab_info { struct slab_header { uint32_t magic; uint16_t free_count; - uint16_t slab_info_index; + uint16_t index; /* Index into slab_info array */ struct slab_header *next, **rev; }; @@ -98,7 +98,7 @@ struct arena_header { /* This is the size (2^n) allocated for a brand new arena. This allocation will be sparse, so don't freak out too much! */ -#define ARENA_START_SIZE_LG2 (2*BUDDY_ORDER_MIN+1) /* 32 MB */ +#define ARENA_START_SIZE_LG2 (2*BUDDY_ORDER_MIN) /* 16 MB */ #define ARENA_START_SIZE (1UL << ARENA_START_SIZE_LG2) #define HEADER_SIZE BUDDY_SIZE_MIN /* Default size of the header */ @@ -258,7 +258,7 @@ void *objstore_arena_init(void) ah->buddy_order_min = BUDDY_ORDER_MIN; ah->arena_size_lg2 = ARENA_START_SIZE_LG2; - bitmap_size = 1UL << (ARENA_START_SIZE_LG2-BUDDY_ORDER_MIN-2); + bitmap_size = 1UL << (ARENA_START_SIZE_LG2-BUDDY_ORDER_MIN-1); total_size = HEADER_SIZE + ARENA_START_SIZE + (bitmap_size << 1); ah->free_bitmap = (char *)ah + HEADER_SIZE + ARENA_START_SIZE; ah->alloc_bitmap = (char *)ah->free_bitmap + bitmap_size; @@ -305,6 +305,103 @@ void *objstore_arena_init(void) return ah->data_start; } +/* + * This function expands a bitmap from oorder orders to norder orders. + * This will not operate properly on a bitmap (old or new) that covers + * less than 3 orders. This assumes that the nbitmap area is already + * initalized to zero. + * + * Note that "orders" here refers to orders of bitmap, which is usually + * arena_size_lg2 - BUDDY_ORDER_MIN + 1. + */ +static void objstore_expand_bitmap(void *nbitmap, const void *obitmap, int norder, int oorder) +{ + int deltao, o, i, obit, nbit; + int obyte, nbyte; + + assert(norder > oorder); + deltao = norder - oorder; + + /* First, expand the orders which have fractional bytes */ + obit = 1; nbit = 1 << deltao; + for ( o = 0 ; o < 3 ; o++ ) { + for ( i = 0 ; i < obit ; i++ ) { + if ( test_bit(obitmap, obit+i) ) + set_bit(nbitmap, nbit+i); + } + obit <<= 1; nbit <<= 1; + } + + /* Then expand the orders which have complete bytes */ + obyte = 1; nbyte = 1 << deltao; + for ( o = 3 ; o < oorder ; o++ ) { + memcpy((unsigned char *)nbitmap + nbyte, + (unsigned char *)obitmap + obyte, + obyte); + obyte <<= 1; nbyte <<= 1; + } +} + +/* + * This function expands the arena to the specified log size. + */ +static int objstore_extend_arena(int logsize) +{ + size_t new_arena_size = (size_t)1 << logsize; + size_t total_size; + int ologsize = ah->arena_size_lg2; + int bitmap_size; + void *newfreemap, *newallocmap; + int obp; + + assert(logsize > ologsize); + + DPRINTF(("arena: extending arena to order %d: ", logsize)); + + bitmap_size = 1 << (logsize-BUDDY_ORDER_MIN-1); + total_size = HEADER_SIZE + new_arena_size + (bitmap_size << 1); + newfreemap = (char *)ah + HEADER_SIZE + new_arena_size; + newallocmap = (char *)newfreemap + bitmap_size; + + if ( objstore_extend(total_size) < 0 ) { + DPRINTF(("failed\n")); + return -1; /* Extension failed */ + } + + /* Otherwise, we just extended the area; reconstruct the bitmaps */ + objstore_expand_bitmap(newfreemap, ah->free_bitmap, + logsize-BUDDY_ORDER_MIN+1, ologsize-BUDDY_ORDER_MIN+1); + objstore_expand_bitmap(newallocmap, ah->alloc_bitmap, + logsize-BUDDY_ORDER_MIN+1, ologsize-BUDDY_ORDER_MIN+1); + ah->free_bitmap = newfreemap; + ah->alloc_bitmap = newallocmap; + ah->arena_size_lg2 = logsize; + + /* Mark the new allocations available */ + obp = 1 << (logsize-ologsize); + if ( test_bit(newfreemap, obp) ) { + /* The arena is completely unallocated... */ + clr_bit(newfreemap, obp); + set_bit(newfreemap, 1); + } else { + /* March up the hierarchy and set the new buddies to available */ + while ( obp > 1 ) { + set_bit(newfreemap, obp^1); + obp >>= 1; + } + } + + DPRINTF(("succeeded.\n")); + DPRINTF(("arena: new bitmaps start at %p\n", newfreemap)); + return 0; +} + +/* + * This function finds a free buddy-system allocation of the specified rorder, + * and returns its xbit value. It does mark it unavailable in the free bitmap, + * but does not mark it as an "allocation" in the allocation bitmap (the latter + * is used to determine what size free() we need to do. + */ static int objstore_find_free_chunk(int rorder) { int obit, xbit; @@ -336,26 +433,29 @@ static void *objstore_malloc_buddy(size_t size) int xbit, obit; void *p; - while ( size > (1UL << order) ) + while ( size > ((size_t)1 << order) ) order++; /* Now we know the order */ if ( order > ah->arena_size_lg2 ) { - /* Need to extend immediately */ - return NULL; /* Not implemented yet */ + if ( objstore_extend_arena(order+1) ) + return NULL; /* Unable to extend arena */ } /* Convert to order relative to the arena size order */ - rorder = ah->arena_size_lg2 - order; - obit = 1 << rorder; + do { + rorder = ah->arena_size_lg2 - order; + obit = 1 << rorder; - DPRINTF(("buddy: trying to allocate %d bytes, rorder = %d\n", size, rorder)); + DPRINTF(("buddy: trying to allocate %d bytes, rorder = %d\n", size, rorder)); - xbit = objstore_find_free_chunk(rorder); - if ( !xbit ) { - /* Need to extend */ - return NULL; /* Not implemented yet */ - } + xbit = objstore_find_free_chunk(rorder); + if ( !xbit ) { + /* Need to extend */ + if ( objstore_extend_arena(ah->arena_size_lg2 + 1) ) + return NULL; /* Failed to extend */ + } + } while ( !xbit ); /* Mark this as a proper allocation (used to find the size on free()) */ set_bit(ah->alloc_bitmap, xbit); @@ -379,12 +479,13 @@ static struct slab_header *objstore_make_new_slab(struct slab_info *si, int inde return NULL; DPRINTF(("slab: allocating new page for size %d at %p\n", si->size, sh)); + assert(index == si - &ah->slab[0]); - sh->magic = SLAB_MAGIC; - sh->free_count = si->count; - sh->slab_info_index = index; - sh->next = si->list; - sh->rev = &(si->list); + sh->magic = SLAB_MAGIC; + sh->free_count = si->count; + sh->index = index; + sh->next = si->list; + sh->rev = &(si->list); slab_bitmap = (unsigned char *)(sh+1); @@ -392,8 +493,10 @@ static struct slab_header *objstore_make_new_slab(struct slab_info *si, int inde memset(slab_bitmap, ~0, bitmap_all_set_bytes); slab_bitmap[bitmap_all_set_bytes] = (1 << bitmap_bits_left)-1; - if ( si->list ) - si->list->rev = &(sh->next); + if ( sh->next ) { + sh->next->rev = &(sh->next); + assert(sh->next->index == index); + } return (si->list = sh); } @@ -430,6 +533,10 @@ static void *objstore_malloc_slab(size_t size) if ( !(sh = objstore_make_new_slab(si,index)) ) return NULL; /* Unavailable to allocate new slab */ } + + /* It *better* be the same kind of slab... */ + assert(sh->magic == SLAB_MAGIC); + assert(sh->index == index); slab_bitmap = (unsigned char *)(sh+1); @@ -441,8 +548,10 @@ static void *objstore_malloc_slab(size_t size) if ( --(sh->free_count) == 0 ) { /* We just allocated the last slab, take off the free list */ si->list = sh->next; - if ( si->list ) - si->list->rev = &(si->list); + if ( sh->next ) { + sh->next->rev = &(si->list); + assert(sh->next->index == sh->index); + } } p = (void *)((char *)sh + si->data_offset + which*si->size); @@ -516,7 +625,7 @@ static void objstore_free_slab(void *ptr) /* Slab header should be at the start of the page */ sh = (struct slab_header *)((uintptr_t)ptr & ~(BUDDY_SIZE_MIN-1)); assert(sh->magic == SLAB_MAGIC); - si = &ah->slab[sh->slab_info_index]; + si = &ah->slab[sh->index]; slab_bitmap = (unsigned char *)sh + ((sizeof(struct slab_header)+MINIMUM_ALIGN-1) & ~MINIMUM_ALIGN_MASK); @@ -534,18 +643,29 @@ static void objstore_free_slab(void *ptr) /* Deallocated the entire page; give back to buddy system */ DPRINTF(("slab: Returning page %p back to the buddy system\n", sh)); *(sh->rev) = sh->next; /* Remove from free list */ + if ( sh->next ) { + sh->next->rev = sh->rev; + assert(sh->next->index == sh->index); + } +#ifndef NDEBUG + /* This unfortunately makes the page dirty */ + memset(sh, 0, BUDDY_SIZE_MIN); +#endif objstore_free_buddy(sh); } else { set_bit(slab_bitmap, which); sh->free_count = free_count+1; if ( free_count == 0 ) { - /* Page with newly available slabs */ + /* Page is no longer full, add to free list */ DPRINTF(("slab: Adding page %p back onto free list for slab size %d\n", sh, si->size)); + assert(sh->index == si - &ah->slab[0]); sh->rev = &(si->list); sh->next = si->list; - if ( si->list ) - si->list->rev = &(sh->next); + if ( sh->next ) { + sh->next->rev = &(sh->next); + assert(sh->next->index == sh->index); + } si->list = sh; } } |