aboutsummaryrefslogtreecommitdiffstats
path: root/malloc.c
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2001-10-20 02:32:11 +0000
committerH. Peter Anvin <hpa@zytor.com>2001-10-20 02:32:11 +0000
commitbe9d8ab8ec452c46777b2eb4ae925843d7361c7b (patch)
treeaad187010f9d0f268698c53273a86dc392207eb1 /malloc.c
parent50cedb0ff19233273ef33cb5b5e8725025a35024 (diff)
downloadlpsm-be9d8ab8ec452c46777b2eb4ae925843d7361c7b.tar.gz
lpsm-be9d8ab8ec452c46777b2eb4ae925843d7361c7b.tar.xz
lpsm-be9d8ab8ec452c46777b2eb4ae925843d7361c7b.zip
Add calloc(), break alloc.c into free/malloc/mgmt files, add
automatic dependency generation.
Diffstat (limited to 'malloc.c')
-rw-r--r--malloc.c307
1 files changed, 307 insertions, 0 deletions
diff --git a/malloc.c b/malloc.c
new file mode 100644
index 0000000..e2ae112
--- /dev/null
+++ b/malloc.c
@@ -0,0 +1,307 @@
+#ident "$Id$"
+/* ----------------------------------------------------------------------- *
+ *
+ * Copyright 2000-2001 H. Peter Anvin - All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation, Inc.,
+ * 59 Temple Place Ste 330, Boston MA 02111-1307, USA, version 2.1,
+ * incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * malloc.c
+ *
+ * Provide malloc() for the persistent memory allocator.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <string.h>
+
+#include "lpsm.h"
+#include "internals.h"
+#include "bitops.h"
+
+/*
+ * This function expands a bitmap from oorder orders to norder orders.
+ * This will not operate properly on a bitmap (old or new) that covers
+ * less than 3 orders. This assumes that the nbitmap area is already
+ * initalized to zero.
+ *
+ * Note that "orders" here refers to orders of bitmap, which is usually
+ * arena_size_lg2 - BUDDY_ORDER_MIN + 1.
+ */
+static void lpsm_expand_bitmap(void *nbitmap, const void *obitmap, int norder, int oorder)
+{
+ int deltao, o, i, obit, nbit;
+ int obyte, nbyte;
+
+ assert(norder > oorder);
+ deltao = norder - oorder;
+
+ /* First, expand the orders which have fractional bytes */
+ obit = 1; nbit = 1 << deltao;
+ for ( o = 0 ; o < 3 ; o++ ) {
+ for ( i = 0 ; i < obit ; i++ ) {
+ if ( test_bit(obitmap, obit+i) )
+ set_bit(nbitmap, nbit+i);
+ }
+ obit <<= 1; nbit <<= 1;
+ }
+
+ /* Then expand the orders which have complete bytes */
+ obyte = 1; nbyte = 1 << deltao;
+ for ( o = 3 ; o < oorder ; o++ ) {
+ memcpy((unsigned char *)nbitmap + nbyte,
+ (unsigned char *)obitmap + obyte,
+ obyte);
+ obyte <<= 1; nbyte <<= 1;
+ }
+}
+
+/*
+ * This function expands the arena to the specified log size.
+ */
+int lpsm_extend_arena(int logsize)
+{
+ size_t new_arena_size = (size_t)1 << logsize;
+ size_t total_size;
+ int ologsize = AH->arena_size_lg2;
+ int bitmap_size;
+ void *newfreemap, *newallocmap;
+ int obp;
+
+ assert(logsize > ologsize);
+
+ DPRINTF(("arena: extending arena to order %d: ", logsize));
+
+ bitmap_size = 1 << (logsize-BUDDY_ORDER_MIN-1);
+ total_size = AH->header_size + new_arena_size + (bitmap_size << 1);
+ newfreemap = (char *)AH->data_start + new_arena_size;
+ newallocmap = (char *)newfreemap + bitmap_size;
+
+ if ( lpsm_extend(total_size) < 0 ) {
+ DPRINTF(("failed\n"));
+ return -1; /* Extension failed */
+ }
+
+ /* Otherwise, we just extended the area; reconstruct the bitmaps */
+ lpsm_expand_bitmap(newfreemap, AH->free_bitmap,
+ logsize-BUDDY_ORDER_MIN+1, ologsize-BUDDY_ORDER_MIN+1);
+ lpsm_expand_bitmap(newallocmap, AH->alloc_bitmap,
+ logsize-BUDDY_ORDER_MIN+1, ologsize-BUDDY_ORDER_MIN+1);
+ AH->free_bitmap = newfreemap;
+ AH->alloc_bitmap = newallocmap;
+ AH->arena_size_lg2 = logsize;
+
+ /* Mark the new allocations available */
+ obp = 1 << (logsize-ologsize);
+ if ( test_bit(newfreemap, obp) ) {
+ /* The arena is completely unallocated... */
+ clr_bit(newfreemap, obp);
+ set_bit(newfreemap, 1);
+ } else {
+ /* March up the hierarchy and set the new buddies to available */
+ while ( obp > 1 ) {
+ set_bit(newfreemap, obp^1);
+ obp >>= 1;
+ }
+ }
+
+ DPRINTF(("succeeded.\n"));
+ DPRINTF(("arena: new bitmaps start at %p\n", newfreemap));
+ return 0;
+}
+
+/*
+ * This function finds a free buddy-system allocation of the specified rorder,
+ * and returns its xbit value. It does mark it unavailable in the free bitmap,
+ * but does not mark it as an "allocation" in the allocation bitmap (the latter
+ * is used to determine what size free() we need to do.
+ */
+static int lpsm_find_free_chunk(int rorder)
+{
+ int obit, xbit;
+
+ obit = 1 << rorder;
+ xbit = lpsm_find_set_bit(AH->free_bitmap, obit, obit, 0);
+
+ if ( !xbit ) {
+ if ( rorder == 0 )
+ return 0; /* We're out */
+ xbit = lpsm_find_free_chunk(rorder-1);
+ if ( !xbit )
+ return 0; /* All out of those, sir */
+ DPRINTF(("buddy: splitting rorder %2d %8d -> %d %d\n", rorder-1, xbit, xbit << 1, (xbit << 1)+1));
+ xbit <<= 1; /* Convert to address of fragments */
+ set_bit(AH->free_bitmap, xbit+1); /* Upper buddy is available */
+ /* Return the lower buddy */
+ } else {
+ clr_bit(AH->free_bitmap, xbit); /* No longer available */
+ }
+ DPRINTF(("buddy: allocated rorder %2d %8d\n", rorder, xbit));
+ return xbit;
+}
+
+int lpsm_malloc_buddy_xbit(int order)
+{
+ int rorder;
+ int xbit, obit;
+
+ if ( order > AH->arena_size_lg2 ) {
+ if ( lpsm_extend_arena(order+1) )
+ return 0; /* Unable to extend arena */
+ }
+
+ /* Convert to order relative to the arena size order */
+ do {
+ rorder = AH->arena_size_lg2 - order;
+ obit = 1 << rorder;
+
+ DPRINTF(("buddy: trying to allocate %d bytes, rorder = %d\n", size, rorder));
+
+ xbit = lpsm_find_free_chunk(rorder);
+ if ( !xbit ) {
+ /* Need to extend */
+ lpsm_print_statistics(); /* DEBUG */
+ if ( lpsm_extend_arena(AH->arena_size_lg2 + 1) )
+ return 0; /* Failed to extend */
+ }
+ } while ( !xbit );
+
+ /* Mark this as a proper allocation (used to find the size on free()) */
+ set_bit(AH->alloc_bitmap, xbit);
+
+ return xbit;
+}
+
+void *lpsm_malloc_buddy(size_t size)
+{
+ int order = BUDDY_ORDER_MIN;
+ int xbit, obit;
+ void *p;
+
+ while ( size > ((size_t)1 << order) )
+ order++;
+
+ xbit = lpsm_malloc_buddy_xbit(order);
+ if ( !xbit )
+ return NULL;
+
+ obit = 1 << (AH->arena_size_lg2 - order);
+
+ p = (void *)((char *)AH->data_start + ((uintptr_t)(xbit-obit) << order));
+
+ DPRINTF(("buddy: allocated %u/%u bytes at %p\n", size, 1U << order, p));
+ return p;
+}
+
+static struct slab_header *lpsm_make_new_slab(struct slab_info *si, int index)
+{
+ struct slab_header *sh;
+ unsigned char *slab_bitmap;
+ int bitmap_all_set_bytes = si->count >> 3;
+ int bitmap_bits_left = si->count & 7;
+
+ /* Get a page from the buddy system allocator */
+ sh = lpsm_malloc_buddy(BUDDY_SIZE_MIN);
+ if ( !sh )
+ return NULL;
+
+ DPRINTF(("slab: allocating new page for size %d at %p\n", si->size, sh));
+ assert(index == si - &AH->slab[0]);
+
+ sh->magic = SLAB_MAGIC;
+ sh->free_count = si->count;
+ sh->index = index;
+ sh->next = si->list;
+ sh->rev = &(si->list);
+
+ slab_bitmap = (unsigned char *)(sh+1);
+
+ /* Mark all SLABs as available */
+ memset(slab_bitmap, ~0, bitmap_all_set_bytes);
+ slab_bitmap[bitmap_all_set_bytes] = (1 << bitmap_bits_left)-1;
+
+ if ( sh->next ) {
+ sh->next->rev = &(sh->next);
+ assert(sh->next->index == index);
+ }
+
+ si->total_pages++;
+
+ return (si->list = sh);
+}
+
+void *lpsm_malloc_slab(size_t size)
+{
+ int i, index;
+ int which;
+ struct slab_info *si;
+ struct slab_header *sh;
+ void *slab_bitmap;
+ void *p;
+
+ /* POSIX sayeth: thou shalt return unique addresses for malloc(0); */
+ if ( size == 0 ) size = 1;
+
+ index = 0;
+ si = &AH->slab[0];
+ for ( i = 1 ; i < SLAB_INFO_COUNT ; i++ ) {
+ if ( size <= si[1].size ) {
+ si++;
+ index++;
+ } else
+ break;
+ }
+
+ /* Now si points to the slab_info header we want to allocate from */
+ DPRINTF(("slab: trying to allocate %d bytes, slab size %d bytes\n",
+ size, si->size));
+
+ sh = si->list;
+ if ( !sh ) {
+ /* Empty free list, need a new page */
+ if ( !(sh = lpsm_make_new_slab(si,index)) )
+ return NULL; /* Unavailable to allocate new slab */
+ }
+
+ /* It *better* be the same kind of slab... */
+ assert(sh->magic == SLAB_MAGIC);
+ assert(sh->index == index);
+
+ slab_bitmap = (unsigned char *)(sh+1);
+
+ which = lpsm_find_set_bit(slab_bitmap, 0, si->count, -1);
+ assert(which >= 0); /* Otherwise something is bad... */
+
+ clr_bit(slab_bitmap, which); /* No longer available */
+ si->alloc_slabs++;
+
+ if ( --(sh->free_count) == 0 ) {
+ /* We just allocated the last slab, take off the free list */
+ si->list = sh->next;
+ if ( sh->next ) {
+ sh->next->rev = &(si->list);
+ assert(sh->next->index == sh->index);
+ }
+ }
+
+ p = (void *)((char *)sh + si->data_offset + which*si->size);
+ DPRINTF(("slab: allocated %d/%d bytes at %p, %d/%d slabs in use\n",
+ size, si->size, p, si->count-sh->free_count, si->count));
+ return p;
+}
+
+void *lpsm_malloc(size_t size)
+{
+ if ( size > AH->slab[0].size )
+ return lpsm_malloc_buddy(size);
+ else
+ return lpsm_malloc_slab(size);
+}