Home | History | Annotate | Line # | Download | only in stdlib
malloc.c revision 1.53
      1 /*	$NetBSD: malloc.c,v 1.53 2011/05/13 23:11:00 christos Exp $	*/
      2 
      3 /*
      4  * ----------------------------------------------------------------------------
      5  * "THE BEER-WARE LICENSE" (Revision 42):
      6  * <phk (at) FreeBSD.ORG> wrote this file.  As long as you retain this notice you
      7  * can do whatever you want with this stuff. If we meet some day, and you think
      8  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
      9  * ----------------------------------------------------------------------------
     10  *
     11  * From FreeBSD: malloc.c,v 1.91 2006/01/12 07:28:20 jasone
     12  *
     13  */
     14 
     15 /*
     16  * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
     17  * to internal conditions and consistency in malloc.c. This has a
     18  * noticeable runtime performance hit, and generally will not do you
     19  * any good unless you fiddle with the internals of malloc or want
     20  * to catch random pointer corruption as early as possible.
     21  */
     22 #ifndef MALLOC_EXTRA_SANITY
     23 #undef MALLOC_EXTRA_SANITY
     24 #endif
     25 
     26 /*
     27  * What to use for Junk.  This is the byte value we use to fill with
     28  * when the 'J' option is enabled.
     29  */
     30 #define SOME_JUNK	0xd0		/* as in "Duh" :-) */
     31 
     32 /*
     33  * The basic parameters you can tweak.
     34  *
     35  * malloc_minsize	minimum size of an allocation in bytes.
     36  *			If this is too small it's too much work
     37  *			to manage them.  This is also the smallest
     38  *			unit of alignment used for the storage
     39  *			returned by malloc/realloc.
     40  *
     41  */
     42 
     43 #include "namespace.h"
     44 #if defined(__FreeBSD__)
     45 #   if defined(__i386__)
     46 #       define malloc_minsize		16U
     47 #   endif
     48 #   if defined(__ia64__)
     49 #	define malloc_pageshift		13U
     50 #	define malloc_minsize		16U
     51 #   endif
     52 #   if defined(__alpha__)
     53 #       define malloc_pageshift		13U
     54 #       define malloc_minsize		16U
     55 #   endif
     56 #   if defined(__sparc64__)
     57 #       define malloc_pageshift		13U
     58 #       define malloc_minsize		16U
     59 #   endif
     60 #   if defined(__amd64__)
     61 #       define malloc_pageshift		12U
     62 #       define malloc_minsize		16U
     63 #   endif
     64 #   if defined(__arm__)
     65 #       define malloc_pageshift         12U
     66 #       define malloc_minsize           16U
     67 #   endif
     68 #   define HAS_UTRACE
     69 #   define UTRACE_LABEL
     70 
     71 #include <sys/cdefs.h>
     72 void utrace(struct ut *, int);
     73 
     74     /*
     75      * Make malloc/free/realloc thread-safe in libc for use with
     76      * kernel threads.
     77      */
     78 #   include "libc_private.h"
     79 #   include "spinlock.h"
     80     static spinlock_t thread_lock	= _SPINLOCK_INITIALIZER;
     81 #   define _MALLOC_LOCK()		if (__isthreaded) _SPINLOCK(&thread_lock);
     82 #   define _MALLOC_UNLOCK()		if (__isthreaded) _SPINUNLOCK(&thread_lock);
     83 #endif /* __FreeBSD__ */
     84 
     85 #include <sys/types.h>
     86 #if defined(__NetBSD__)
     87 #   define malloc_minsize               16U
     88 #   define HAS_UTRACE
     89 #   define UTRACE_LABEL "malloc",
     90 #include <sys/cdefs.h>
     91 #include "extern.h"
     92 #if defined(LIBC_SCCS) && !defined(lint)
     93 __RCSID("$NetBSD: malloc.c,v 1.53 2011/05/13 23:11:00 christos Exp $");
     94 #endif /* LIBC_SCCS and not lint */
     95 int utrace(const char *, void *, size_t);
     96 
     97 #include <reentrant.h>
     98 extern int __isthreaded;
     99 static mutex_t thread_lock = MUTEX_INITIALIZER;
    100 #define _MALLOC_LOCK()	if (__isthreaded) mutex_lock(&thread_lock);
    101 #define _MALLOC_UNLOCK()	if (__isthreaded) mutex_unlock(&thread_lock);
    102 #endif /* __NetBSD__ */
    103 
    104 #if defined(__sparc__) && defined(sun)
    105 #   define malloc_minsize		16U
    106 #   define MAP_ANON			(0)
    107     static int fdzero;
    108 #   define MMAP_FD	fdzero
    109 #   define INIT_MMAP() \
    110 	{ if ((fdzero = open(_PATH_DEVZERO, O_RDWR, 0000)) == -1) \
    111 	    wrterror("open of /dev/zero"); }
    112 #endif /* __sparc__ */
    113 
    114 /* Insert your combination here... */
    115 #if defined(__FOOCPU__) && defined(__BAROS__)
    116 #   define malloc_minsize		16U
    117 #endif /* __FOOCPU__ && __BAROS__ */
    118 
    119 #ifndef ZEROSIZEPTR
    120 #define ZEROSIZEPTR	((void *)(uintptr_t)(1UL << (malloc_pageshift - 1)))
    121 #endif
    122 
    123 /*
    124  * No user serviceable parts behind this point.
    125  */
    126 #include <sys/types.h>
    127 #include <sys/mman.h>
    128 #include <errno.h>
    129 #include <fcntl.h>
    130 #include <paths.h>
    131 #include <stddef.h>
    132 #include <stdio.h>
    133 #include <stdlib.h>
    134 #include <string.h>
    135 #include <unistd.h>
    136 
    137 /*
    138  * This structure describes a page worth of chunks.
    139  */
    140 
    141 struct pginfo {
    142     struct pginfo	*next;	/* next on the free list */
    143     void		*page;	/* Pointer to the page */
    144     u_short		size;	/* size of this page's chunks */
    145     u_short		shift;	/* How far to shift for this size chunks */
    146     u_short		free;	/* How many free chunks */
    147     u_short		total;	/* How many chunk */
    148     u_int		bits[1]; /* Which chunks are free */
    149 };
    150 
    151 /*
    152  * This structure describes a number of free pages.
    153  */
    154 
    155 struct pgfree {
    156     struct pgfree	*next;	/* next run of free pages */
    157     struct pgfree	*prev;	/* prev run of free pages */
    158     void		*page;	/* pointer to free pages */
    159     void		*end;	/* pointer to end of free pages */
    160     size_t		size;	/* number of bytes free */
    161 };
    162 
    163 /*
    164  * How many bits per u_int in the bitmap.
    165  * Change only if not 8 bits/byte
    166  */
    167 #define	MALLOC_BITS	((int)(8*sizeof(u_int)))
    168 
    169 /*
    170  * Magic values to put in the page_directory
    171  */
    172 #define MALLOC_NOT_MINE	((struct pginfo*) 0)
    173 #define MALLOC_FREE 	((struct pginfo*) 1)
    174 #define MALLOC_FIRST	((struct pginfo*) 2)
    175 #define MALLOC_FOLLOW	((struct pginfo*) 3)
    176 #define MALLOC_MAGIC	((struct pginfo*) 4)
    177 
    178 /*
    179  * Page size related parameters, computed at run-time.
    180  */
    181 static size_t malloc_pagesize;
    182 static size_t malloc_pageshift;
    183 static size_t malloc_pagemask;
    184 
    185 #ifndef malloc_minsize
    186 #define malloc_minsize			16U
    187 #endif
    188 
    189 #ifndef malloc_maxsize
    190 #define malloc_maxsize			((malloc_pagesize)>>1)
    191 #endif
    192 
    193 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
    194 #define ptr2idx(foo) \
    195     (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
    196 
    197 #ifndef _MALLOC_LOCK
    198 #define _MALLOC_LOCK()
    199 #endif
    200 
    201 #ifndef _MALLOC_UNLOCK
    202 #define _MALLOC_UNLOCK()
    203 #endif
    204 
    205 #ifndef MMAP_FD
    206 #define MMAP_FD (-1)
    207 #endif
    208 
    209 #ifndef INIT_MMAP
    210 #define INIT_MMAP()
    211 #endif
    212 
    213 #ifndef MADV_FREE
    214 #define MADV_FREE MADV_DONTNEED
    215 #endif
    216 
    217 /* Number of free pages we cache */
    218 static size_t malloc_cache = 16;
    219 
    220 /* The offset from pagenumber to index into the page directory */
    221 static size_t malloc_origo;
    222 
    223 /* The last index in the page directory we care about */
    224 static size_t last_idx;
    225 
    226 /* Pointer to page directory. Allocated "as if with" malloc */
    227 static struct	pginfo **page_dir;
    228 
    229 /* How many slots in the page directory */
    230 static size_t	malloc_ninfo;
    231 
    232 /* Free pages line up here */
    233 static struct pgfree free_list;
    234 
    235 /* Abort(), user doesn't handle problems.  */
    236 static int malloc_abort;
    237 
    238 /* Are we trying to die ?  */
    239 static int suicide;
    240 
    241 /* always realloc ?  */
    242 static int malloc_realloc;
    243 
    244 /* pass the kernel a hint on free pages ?  */
    245 #if defined(MADV_FREE)
    246 static int malloc_hint = 0;
    247 #endif
    248 
    249 /* xmalloc behaviour ?  */
    250 static int malloc_xmalloc;
    251 
    252 /* sysv behaviour for malloc(0) ?  */
    253 static int malloc_sysv;
    254 
    255 /* zero fill ?  */
    256 static int malloc_zero;
    257 
    258 /* junk fill ?  */
    259 static int malloc_junk;
    260 
    261 #ifdef HAS_UTRACE
    262 
    263 /* utrace ?  */
    264 static int malloc_utrace;
    265 
    266 struct ut { void *p; size_t s; void *r; };
    267 
    268 #define UTRACE(a, b, c) \
    269 	if (malloc_utrace) {			\
    270 		struct ut u;			\
    271 		u.p=a; u.s = b; u.r=c;		\
    272 		utrace(UTRACE_LABEL (void *) &u, sizeof u);	\
    273 	}
    274 #else /* !HAS_UTRACE */
    275 #define UTRACE(a,b,c)
    276 #endif /* HAS_UTRACE */
    277 
    278 /* my last break. */
    279 static void *malloc_brk;
    280 
    281 /* one location cache for free-list holders */
    282 static struct pgfree *px;
    283 
    284 /* compile-time options */
    285 const char *_malloc_options;
    286 
    287 /* Name of the current public function */
    288 static const char *malloc_func;
    289 
    290 /* Macro for mmap */
    291 #define MMAP(size) \
    292 	mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
    293 	    MMAP_FD, (off_t)0);
    294 
    295 /*
    296  * Necessary function declarations
    297  */
    298 static int extend_pgdir(size_t idx);
    299 static void *imalloc(size_t size);
    300 static void ifree(void *ptr);
    301 static void *irealloc(void *ptr, size_t size);
    302 
    303 static void
    304 wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
    305 {
    306 
    307     write(STDERR_FILENO, p1, strlen(p1));
    308     write(STDERR_FILENO, p2, strlen(p2));
    309     write(STDERR_FILENO, p3, strlen(p3));
    310     write(STDERR_FILENO, p4, strlen(p4));
    311 }
    312 
    313 void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
    314 	    const char *p4) = wrtmessage;
    315 static void
    316 wrterror(const char *p)
    317 {
    318 
    319     suicide = 1;
    320     _malloc_message(getprogname(), malloc_func, " error: ", p);
    321     abort();
    322 }
    323 
    324 static void
    325 wrtwarning(const char *p)
    326 {
    327 
    328     /*
    329      * Sensitive processes, somewhat arbitrarily defined here as setuid,
    330      * setgid, root and wheel cannot afford to have malloc mistakes.
    331      */
    332     if (malloc_abort || issetugid() || getuid() == 0 || getgid() == 0)
    333 	wrterror(p);
    334 }
    335 
    336 /*
    337  * Allocate a number of pages from the OS
    338  */
    339 static void *
    340 map_pages(size_t pages)
    341 {
    342     caddr_t result, rresult, tail;
    343     intptr_t bytes = pages << malloc_pageshift;
    344 
    345     if (bytes < 0 || (size_t)bytes < pages) {
    346 	errno = ENOMEM;
    347 	return NULL;
    348     }
    349 
    350     if ((result = sbrk(bytes)) == (void *)-1)
    351 	return NULL;
    352 
    353     /*
    354      * Round to a page, in case sbrk(2) did not do this for us
    355      */
    356     rresult = (caddr_t)pageround((size_t)(uintptr_t)result);
    357     if (result < rresult) {
    358 	/* make sure we have enough space to fit bytes */
    359 	if (sbrk((intptr_t)(rresult - result)) == (void *) -1) {
    360 	    /* we failed, put everything back */
    361 	    if (brk(result)) {
    362 		wrterror("brk(2) failed [internal error]\n");
    363 	    }
    364 	}
    365     }
    366     tail = rresult + (size_t)bytes;
    367 
    368     last_idx = ptr2idx(tail) - 1;
    369     malloc_brk = tail;
    370 
    371     if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) {
    372 	malloc_brk = result;
    373 	last_idx = ptr2idx(malloc_brk) - 1;
    374 	/* Put back break point since we failed. */
    375 	if (brk(malloc_brk))
    376 	    wrterror("brk(2) failed [internal error]\n");
    377 	return 0;
    378     }
    379 
    380     return rresult;
    381 }
    382 
    383 /*
    384  * Extend page directory
    385  */
    386 static int
    387 extend_pgdir(size_t idx)
    388 {
    389     struct  pginfo **new, **old;
    390     size_t newlen, oldlen;
    391 
    392     /* check for overflow */
    393     if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1)
    394 	+ (malloc_pagesize / sizeof *page_dir)) < idx) {
    395 	errno = ENOMEM;
    396 	return 0;
    397     }
    398 
    399     /* Make it this many pages */
    400     newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
    401 
    402     /* remember the old mapping size */
    403     oldlen = malloc_ninfo * sizeof *page_dir;
    404 
    405     /*
    406      * NOTE: we allocate new pages and copy the directory rather than tempt
    407      * fate by trying to "grow" the region.. There is nothing to prevent
    408      * us from accidentally re-mapping space that's been allocated by our caller
    409      * via dlopen() or other mmap().
    410      *
    411      * The copy problem is not too bad, as there is 4K of page index per
    412      * 4MB of malloc arena.
    413      *
    414      * We can totally avoid the copy if we open a file descriptor to associate
    415      * the anon mappings with.  Then, when we remap the pages at the new
    416      * address, the old pages will be "magically" remapped..  But this means
    417      * keeping open a "secret" file descriptor.....
    418      */
    419 
    420     /* Get new pages */
    421     new = MMAP(newlen);
    422     if (new == MAP_FAILED)
    423 	return 0;
    424 
    425     /* Copy the old stuff */
    426     memcpy(new, page_dir, oldlen);
    427 
    428     /* register the new size */
    429     malloc_ninfo = newlen / sizeof *page_dir;
    430 
    431     /* swap the pointers */
    432     old = page_dir;
    433     page_dir = new;
    434 
    435     /* Now free the old stuff */
    436     munmap(old, oldlen);
    437     return 1;
    438 }
    439 
    440 /*
    441  * Initialize the world
    442  */
    443 static void
    444 malloc_init(void)
    445 {
    446     const char *p;
    447     char b[64];
    448     size_t i;
    449     ssize_t j;
    450     int save_errno = errno;
    451 
    452     /*
    453      * Compute page-size related variables.
    454      */
    455     malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
    456     malloc_pagemask = malloc_pagesize - 1;
    457     for (malloc_pageshift = 0;
    458 	 (1UL << malloc_pageshift) != malloc_pagesize;
    459 	 malloc_pageshift++)
    460 	/* nothing */ ;
    461 
    462     INIT_MMAP();
    463 
    464 #ifdef MALLOC_EXTRA_SANITY
    465     malloc_junk = 1;
    466 #endif /* MALLOC_EXTRA_SANITY */
    467 
    468     for (i = 0; i < 3; i++) {
    469 	if (i == 0) {
    470 	    int serrno = errno;
    471 	    j = readlink("/etc/malloc.conf", b, sizeof b - 1);
    472 	    if (j == -1) {
    473 		errno = serrno;
    474 		continue;
    475 	    }
    476 	    b[j] = '\0';
    477 	    p = b;
    478 	} else if (i == 1 && issetugid() == 0) {
    479 	    p = getenv("MALLOC_OPTIONS");
    480 	} else if (i == 1) {
    481 	    continue;
    482 	} else {
    483 	    p = _malloc_options;
    484 	}
    485 	for (; p != NULL && *p != '\0'; p++) {
    486 	    switch (*p) {
    487 		case '>': malloc_cache   <<= 1; break;
    488 		case '<': malloc_cache   >>= 1; break;
    489 		case 'a': malloc_abort   = 0; break;
    490 		case 'A': malloc_abort   = 1; break;
    491 		case 'h': malloc_hint    = 0; break;
    492 		case 'H': malloc_hint    = 1; break;
    493 		case 'r': malloc_realloc = 0; break;
    494 		case 'R': malloc_realloc = 1; break;
    495 		case 'j': malloc_junk    = 0; break;
    496 		case 'J': malloc_junk    = 1; break;
    497 #ifdef HAS_UTRACE
    498 		case 'u': malloc_utrace  = 0; break;
    499 		case 'U': malloc_utrace  = 1; break;
    500 #endif
    501 		case 'v': malloc_sysv    = 0; break;
    502 		case 'V': malloc_sysv    = 1; break;
    503 		case 'x': malloc_xmalloc = 0; break;
    504 		case 'X': malloc_xmalloc = 1; break;
    505 		case 'z': malloc_zero    = 0; break;
    506 		case 'Z': malloc_zero    = 1; break;
    507 		default:
    508 		    _malloc_message(getprogname(), malloc_func,
    509 			 " warning: ", "unknown char in MALLOC_OPTIONS\n");
    510 		    break;
    511 	    }
    512 	}
    513     }
    514 
    515     UTRACE(0, 0, 0);
    516 
    517     /*
    518      * We want junk in the entire allocation, and zero only in the part
    519      * the user asked for.
    520      */
    521     if (malloc_zero)
    522 	malloc_junk = 1;
    523 
    524     /* Allocate one page for the page directory */
    525     page_dir = MMAP(malloc_pagesize);
    526 
    527     if (page_dir == MAP_FAILED)
    528 	wrterror("mmap(2) failed, check limits.\n");
    529 
    530     /*
    531      * We need a maximum of malloc_pageshift buckets, steal these from the
    532      * front of the page_directory;
    533      */
    534     malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
    535 	>> malloc_pageshift;
    536     malloc_origo -= malloc_pageshift;
    537 
    538     malloc_ninfo = malloc_pagesize / sizeof *page_dir;
    539 
    540     /* Recalculate the cache size in bytes, and make sure it's nonzero */
    541 
    542     if (!malloc_cache)
    543 	malloc_cache++;
    544 
    545     malloc_cache <<= malloc_pageshift;
    546 
    547     /*
    548      * This is a nice hack from Kaleb Keithly (kaleb (at) x.org).
    549      * We can sbrk(2) further back when we keep this on a low address.
    550      */
    551     px = imalloc(sizeof *px);
    552 
    553     errno = save_errno;
    554 }
    555 
    556 /*
    557  * Allocate a number of complete pages
    558  */
    559 static void *
    560 malloc_pages(size_t size)
    561 {
    562     void *p, *delay_free = NULL;
    563     size_t i;
    564     struct pgfree *pf;
    565     size_t idx;
    566 
    567     idx = pageround(size);
    568     if (idx < size) {
    569 	errno = ENOMEM;
    570 	return NULL;
    571     } else
    572 	size = idx;
    573 
    574     p = NULL;
    575 
    576     /* Look for free pages before asking for more */
    577     for(pf = free_list.next; pf; pf = pf->next) {
    578 
    579 #ifdef MALLOC_EXTRA_SANITY
    580 	if (pf->size & malloc_pagemask)
    581 	    wrterror("(ES): junk length entry on free_list.\n");
    582 	if (!pf->size)
    583 	    wrterror("(ES): zero length entry on free_list.\n");
    584 	if (pf->page == pf->end)
    585 	    wrterror("(ES): zero entry on free_list.\n");
    586 	if (pf->page > pf->end)
    587 	    wrterror("(ES): sick entry on free_list.\n");
    588 	if ((void*)pf->page >= (void*)sbrk(0))
    589 	    wrterror("(ES): entry on free_list past brk.\n");
    590 	if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
    591 	    wrterror("(ES): non-free first page on free-list.\n");
    592 	if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
    593 	    wrterror("(ES): non-free last page on free-list.\n");
    594 #endif /* MALLOC_EXTRA_SANITY */
    595 
    596 	if (pf->size < size)
    597 	    continue;
    598 
    599 	if (pf->size == size) {
    600 	    p = pf->page;
    601 	    if (pf->next != NULL)
    602 		    pf->next->prev = pf->prev;
    603 	    pf->prev->next = pf->next;
    604 	    delay_free = pf;
    605 	    break;
    606 	}
    607 
    608 	p = pf->page;
    609 	pf->page = (char *)pf->page + size;
    610 	pf->size -= size;
    611 	break;
    612     }
    613 
    614 #ifdef MALLOC_EXTRA_SANITY
    615     if (p != NULL && page_dir[ptr2idx(p)] != MALLOC_FREE)
    616 	wrterror("(ES): allocated non-free page on free-list.\n");
    617 #endif /* MALLOC_EXTRA_SANITY */
    618 
    619     size >>= malloc_pageshift;
    620 
    621     /* Map new pages */
    622     if (p == NULL)
    623 	p = map_pages(size);
    624 
    625     if (p != NULL) {
    626 
    627 	idx = ptr2idx(p);
    628 	page_dir[idx] = MALLOC_FIRST;
    629 	for (i=1;i<size;i++)
    630 	    page_dir[idx+i] = MALLOC_FOLLOW;
    631 
    632 	if (malloc_junk)
    633 	    memset(p, SOME_JUNK, size << malloc_pageshift);
    634     }
    635 
    636     if (delay_free) {
    637 	if (px == NULL)
    638 	    px = delay_free;
    639 	else
    640 	    ifree(delay_free);
    641     }
    642 
    643     return p;
    644 }
    645 
    646 /*
    647  * Allocate a page of fragments
    648  */
    649 
    650 static inline int
    651 malloc_make_chunks(int bits)
    652 {
    653     struct  pginfo *bp;
    654     void *pp;
    655     int i, k;
    656     long l;
    657 
    658     /* Allocate a new bucket */
    659     pp = malloc_pages(malloc_pagesize);
    660     if (pp == NULL)
    661 	return 0;
    662 
    663     /* Find length of admin structure */
    664     l = (long)offsetof(struct pginfo, bits[0]);
    665     l += (long)sizeof bp->bits[0] *
    666 	(((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
    667 
    668     /* Don't waste more than two chunks on this */
    669     if ((1<<(bits)) <= l+l) {
    670 	bp = (struct  pginfo *)pp;
    671     } else {
    672 	bp = imalloc((size_t)l);
    673 	if (bp == NULL) {
    674 	    ifree(pp);
    675 	    return 0;
    676 	}
    677     }
    678 
    679     bp->size = (1<<bits);
    680     bp->shift = bits;
    681     bp->total = bp->free = (u_short)(malloc_pagesize >> bits);
    682     bp->page = pp;
    683 
    684     /* set all valid bits in the bitmap */
    685     k = bp->total;
    686     i = 0;
    687 
    688     /* Do a bunch at a time */
    689     for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
    690 	bp->bits[i / MALLOC_BITS] = ~0U;
    691 
    692     for(; i < k; i++)
    693         bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
    694 
    695     if (bp == bp->page) {
    696 	/* Mark the ones we stole for ourselves */
    697 	for(i = 0; l > 0; i++) {
    698 	    bp->bits[i / MALLOC_BITS] &= ~(1 << (i % MALLOC_BITS));
    699 	    bp->free--;
    700 	    bp->total--;
    701 	    l -= (long)(1 << bits);
    702 	}
    703     }
    704 
    705     /* MALLOC_LOCK */
    706 
    707     page_dir[ptr2idx(pp)] = bp;
    708 
    709     bp->next = page_dir[bits];
    710     page_dir[bits] = bp;
    711 
    712     /* MALLOC_UNLOCK */
    713 
    714     return 1;
    715 }
    716 
    717 /*
    718  * Allocate a fragment
    719  */
    720 static void *
    721 malloc_bytes(size_t size)
    722 {
    723     size_t i;
    724     int j;
    725     u_int u;
    726     struct  pginfo *bp;
    727     size_t k;
    728     u_int *lp;
    729 
    730     /* Don't bother with anything less than this */
    731     if (size < malloc_minsize)
    732 	size = malloc_minsize;
    733 
    734 
    735     /* Find the right bucket */
    736     j = 1;
    737     i = size-1;
    738     while (i >>= 1)
    739 	j++;
    740 
    741     /* If it's empty, make a page more of that size chunks */
    742     if (page_dir[j] == NULL && !malloc_make_chunks(j))
    743 	return NULL;
    744 
    745     bp = page_dir[j];
    746 
    747     /* Find first word of bitmap which isn't empty */
    748     for (lp = bp->bits; !*lp; lp++)
    749 	;
    750 
    751     /* Find that bit, and tweak it */
    752     u = 1;
    753     k = 0;
    754     while (!(*lp & u)) {
    755 	u += u;
    756 	k++;
    757     }
    758     *lp ^= u;
    759 
    760     /* If there are no more free, remove from free-list */
    761     if (!--bp->free) {
    762 	page_dir[j] = bp->next;
    763 	bp->next = NULL;
    764     }
    765 
    766     /* Adjust to the real offset of that chunk */
    767     k += (lp-bp->bits)*MALLOC_BITS;
    768     k <<= bp->shift;
    769 
    770     if (malloc_junk)
    771 	memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
    772 
    773     return (u_char *)bp->page + k;
    774 }
    775 
    776 /*
    777  * Allocate a piece of memory
    778  */
    779 static void *
    780 imalloc(size_t size)
    781 {
    782     void *result;
    783 
    784     if (suicide)
    785 	abort();
    786 
    787     if ((size + malloc_pagesize) < size)	/* Check for overflow */
    788 	result = NULL;
    789     else if ((size + malloc_pagesize) >= (uintptr_t)page_dir)
    790 	result = NULL;
    791     else if (size <= malloc_maxsize)
    792 	result = malloc_bytes(size);
    793     else
    794 	result = malloc_pages(size);
    795 
    796     if (malloc_abort && result == NULL)
    797 	wrterror("allocation failed.\n");
    798 
    799     if (malloc_zero && result != NULL)
    800 	memset(result, 0, size);
    801 
    802     return result;
    803 }
    804 
    805 /*
    806  * Change the size of an allocation.
    807  */
    808 static void *
    809 irealloc(void *ptr, size_t size)
    810 {
    811     void *p;
    812     size_t osize, idx;
    813     struct pginfo **mp;
    814     size_t i;
    815 
    816     if (suicide)
    817 	abort();
    818 
    819     idx = ptr2idx(ptr);
    820 
    821     if (idx < malloc_pageshift) {
    822 	wrtwarning("junk pointer, too low to make sense.\n");
    823 	return 0;
    824     }
    825 
    826     if (idx > last_idx) {
    827 	wrtwarning("junk pointer, too high to make sense.\n");
    828 	return 0;
    829     }
    830 
    831     mp = &page_dir[idx];
    832 
    833     if (*mp == MALLOC_FIRST) {			/* Page allocation */
    834 
    835 	/* Check the pointer */
    836 	if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
    837 	    wrtwarning("modified (page-) pointer.\n");
    838 	    return NULL;
    839 	}
    840 
    841 	/* Find the size in bytes */
    842 	for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
    843 	    osize += malloc_pagesize;
    844 
    845         if (!malloc_realloc && 			/* unless we have to, */
    846 	  size <= osize && 			/* .. or are too small, */
    847 	  size > (osize - malloc_pagesize)) {	/* .. or can free a page, */
    848 	    if (malloc_junk)
    849 		memset((u_char *)ptr + size, SOME_JUNK, osize-size);
    850 	    return ptr;				/* don't do anything. */
    851 	}
    852 
    853     } else if (*mp >= MALLOC_MAGIC) {		/* Chunk allocation */
    854 
    855 	/* Check the pointer for sane values */
    856 	if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) {
    857 	    wrtwarning("modified (chunk-) pointer.\n");
    858 	    return NULL;
    859 	}
    860 
    861 	/* Find the chunk index in the page */
    862 	i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift;
    863 
    864 	/* Verify that it isn't a free chunk already */
    865         if ((*mp)->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
    866 	    wrtwarning("chunk is already free.\n");
    867 	    return NULL;
    868 	}
    869 
    870 	osize = (*mp)->size;
    871 
    872 	if (!malloc_realloc &&		/* Unless we have to, */
    873 	  size <= osize && 		/* ..or are too small, */
    874 	  (size > osize / 2 ||	 	/* ..or could use a smaller size, */
    875 	  osize == malloc_minsize)) {	/* ..(if there is one) */
    876 	    if (malloc_junk)
    877 		memset((u_char *)ptr + size, SOME_JUNK, osize-size);
    878 	    return ptr;			/* ..Don't do anything */
    879 	}
    880 
    881     } else {
    882 	wrtwarning("pointer to wrong page.\n");
    883 	return NULL;
    884     }
    885 
    886     p = imalloc(size);
    887 
    888     if (p != NULL) {
    889 	/* copy the lesser of the two sizes, and free the old one */
    890 	if (!size || !osize)
    891 	    ;
    892 	else if (osize < size)
    893 	    memcpy(p, ptr, osize);
    894 	else
    895 	    memcpy(p, ptr, size);
    896 	ifree(ptr);
    897     }
    898     return p;
    899 }
    900 
    901 /*
    902  * Free a sequence of pages
    903  */
    904 
    905 static inline void
    906 free_pages(void *ptr, size_t idx, struct pginfo *info)
    907 {
    908     size_t i;
    909     struct pgfree *pf, *pt=NULL;
    910     size_t l;
    911     void *tail;
    912 
    913     if (info == MALLOC_FREE) {
    914 	wrtwarning("page is already free.\n");
    915 	return;
    916     }
    917 
    918     if (info != MALLOC_FIRST) {
    919 	wrtwarning("pointer to wrong page.\n");
    920 	return;
    921     }
    922 
    923     if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
    924 	wrtwarning("modified (page-) pointer.\n");
    925 	return;
    926     }
    927 
    928     /* Count how many pages and mark them free at the same time */
    929     page_dir[idx] = MALLOC_FREE;
    930     for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
    931 	page_dir[idx + i] = MALLOC_FREE;
    932 
    933     l = i << malloc_pageshift;
    934 
    935     if (malloc_junk)
    936 	memset(ptr, SOME_JUNK, l);
    937 
    938     if (malloc_hint)
    939 	madvise(ptr, l, MADV_FREE);
    940 
    941     tail = (char *)ptr+l;
    942 
    943     /* add to free-list */
    944     if (px == NULL)
    945 	px = imalloc(sizeof *px);	/* This cannot fail... */
    946     px->page = ptr;
    947     px->end =  tail;
    948     px->size = l;
    949     if (free_list.next == NULL) {
    950 
    951 	/* Nothing on free list, put this at head */
    952 	px->next = free_list.next;
    953 	px->prev = &free_list;
    954 	free_list.next = px;
    955 	pf = px;
    956 	px = NULL;
    957 
    958     } else {
    959 
    960 	/* Find the right spot, leave pf pointing to the modified entry. */
    961 	tail = (char *)ptr+l;
    962 
    963 	for(pf = free_list.next; pf->end < ptr && pf->next != NULL;
    964 	    pf = pf->next)
    965 	    ; /* Race ahead here */
    966 
    967 	if (pf->page > tail) {
    968 	    /* Insert before entry */
    969 	    px->next = pf;
    970 	    px->prev = pf->prev;
    971 	    pf->prev = px;
    972 	    px->prev->next = px;
    973 	    pf = px;
    974 	    px = NULL;
    975 	} else if (pf->end == ptr ) {
    976 	    /* Append to the previous entry */
    977 	    pf->end = (char *)pf->end + l;
    978 	    pf->size += l;
    979 	    if (pf->next != NULL && pf->end == pf->next->page ) {
    980 		/* And collapse the next too. */
    981 		pt = pf->next;
    982 		pf->end = pt->end;
    983 		pf->size += pt->size;
    984 		pf->next = pt->next;
    985 		if (pf->next != NULL)
    986 		    pf->next->prev = pf;
    987 	    }
    988 	} else if (pf->page == tail) {
    989 	    /* Prepend to entry */
    990 	    pf->size += l;
    991 	    pf->page = ptr;
    992 	} else if (pf->next == NULL) {
    993 	    /* Append at tail of chain */
    994 	    px->next = NULL;
    995 	    px->prev = pf;
    996 	    pf->next = px;
    997 	    pf = px;
    998 	    px = NULL;
    999 	} else {
   1000 	    wrterror("freelist is destroyed.\n");
   1001 	}
   1002     }
   1003 
   1004     /* Return something to OS ? */
   1005     if (pf->next == NULL &&			/* If we're the last one, */
   1006       pf->size > malloc_cache &&		/* ..and the cache is full, */
   1007       pf->end == malloc_brk &&			/* ..and none behind us, */
   1008       malloc_brk == sbrk((intptr_t)0)) {	/* ..and it's OK to do... */
   1009 
   1010 	/*
   1011 	 * Keep the cache intact.  Notice that the '>' above guarantees that
   1012 	 * the pf will always have at least one page afterwards.
   1013 	 */
   1014 	pf->end = (char *)pf->page + malloc_cache;
   1015 	pf->size = malloc_cache;
   1016 
   1017 	brk(pf->end);
   1018 	malloc_brk = pf->end;
   1019 
   1020 	idx = ptr2idx(pf->end);
   1021 
   1022 	for(i=idx;i <= last_idx;)
   1023 	    page_dir[i++] = MALLOC_NOT_MINE;
   1024 
   1025 	last_idx = idx - 1;
   1026 
   1027 	/* XXX: We could realloc/shrink the pagedir here I guess. */
   1028     }
   1029     if (pt != NULL)
   1030 	ifree(pt);
   1031 }
   1032 
   1033 /*
   1034  * Free a chunk, and possibly the page it's on, if the page becomes empty.
   1035  */
   1036 
   1037 static inline void
   1038 free_bytes(void *ptr, size_t idx, struct pginfo *info)
   1039 {
   1040     size_t i;
   1041     struct pginfo **mp;
   1042     void *vp;
   1043 
   1044     /* Find the chunk number on the page */
   1045     i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift;
   1046 
   1047     if (((size_t)(uintptr_t)ptr & (info->size-1))) {
   1048 	wrtwarning("modified (chunk-) pointer.\n");
   1049 	return;
   1050     }
   1051 
   1052     if (info->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
   1053 	wrtwarning("chunk is already free.\n");
   1054 	return;
   1055     }
   1056 
   1057     if (malloc_junk)
   1058 	memset(ptr, SOME_JUNK, (size_t)info->size);
   1059 
   1060     info->bits[i/MALLOC_BITS] |= (u_int)(1UL << (i % MALLOC_BITS));
   1061     info->free++;
   1062 
   1063     mp = page_dir + info->shift;
   1064 
   1065     if (info->free == 1) {
   1066 
   1067 	/* Page became non-full */
   1068 
   1069 	mp = page_dir + info->shift;
   1070 	/* Insert in address order */
   1071 	while (*mp && (*mp)->next && (*mp)->next->page < info->page)
   1072 	    mp = &(*mp)->next;
   1073 	info->next = *mp;
   1074 	*mp = info;
   1075 	return;
   1076     }
   1077 
   1078     if (info->free != info->total)
   1079 	return;
   1080 
   1081     /* Find & remove this page in the queue */
   1082     while (*mp != info) {
   1083 	mp = &((*mp)->next);
   1084 #ifdef MALLOC_EXTRA_SANITY
   1085 	if (!*mp)
   1086 		wrterror("(ES): Not on queue.\n");
   1087 #endif /* MALLOC_EXTRA_SANITY */
   1088     }
   1089     *mp = info->next;
   1090 
   1091     /* Free the page & the info structure if need be */
   1092     page_dir[idx] = MALLOC_FIRST;
   1093     vp = info->page;		/* Order is important ! */
   1094     if(vp != (void*)info)
   1095 	ifree(info);
   1096     ifree(vp);
   1097 }
   1098 
   1099 static void
   1100 ifree(void *ptr)
   1101 {
   1102     struct pginfo *info;
   1103     size_t idx;
   1104 
   1105     /* This is legal */
   1106     if (ptr == NULL)
   1107 	return;
   1108 
   1109     /* If we're already sinking, don't make matters any worse. */
   1110     if (suicide)
   1111 	return;
   1112 
   1113     idx = ptr2idx(ptr);
   1114 
   1115     if (idx < malloc_pageshift) {
   1116 	wrtwarning("junk pointer, too low to make sense.\n");
   1117 	return;
   1118     }
   1119 
   1120     if (idx > last_idx) {
   1121 	wrtwarning("junk pointer, too high to make sense.\n");
   1122 	return;
   1123     }
   1124 
   1125     info = page_dir[idx];
   1126 
   1127     if (info < MALLOC_MAGIC)
   1128         free_pages(ptr, idx, info);
   1129     else
   1130 	free_bytes(ptr, idx, info);
   1131     return;
   1132 }
   1133 
   1134 static int malloc_active; /* Recusion flag for public interface. */
   1135 static unsigned malloc_started; /* Set when initialization has been done */
   1136 
   1137 static void *
   1138 pubrealloc(void *ptr, size_t size, const char *func)
   1139 {
   1140     void *r;
   1141     int err = 0;
   1142 
   1143     /*
   1144      * If a thread is inside our code with a functional lock held, and then
   1145      * catches a signal which calls us again, we would get a deadlock if the
   1146      * lock is not of a recursive type.
   1147      */
   1148     _MALLOC_LOCK();
   1149     malloc_func = func;
   1150     if (malloc_active > 0) {
   1151 	if (malloc_active == 1) {
   1152 	    wrtwarning("recursive call\n");
   1153 	    malloc_active = 2;
   1154 	}
   1155         _MALLOC_UNLOCK();
   1156 	errno = EINVAL;
   1157 	return (NULL);
   1158     }
   1159     malloc_active = 1;
   1160 
   1161     if (!malloc_started) {
   1162         if (ptr != NULL) {
   1163 	    wrtwarning("malloc() has never been called\n");
   1164 	    malloc_active = 0;
   1165             _MALLOC_UNLOCK();
   1166 	    errno = EINVAL;
   1167 	    return (NULL);
   1168 	}
   1169 	malloc_init();
   1170 	malloc_started = 1;
   1171     }
   1172 
   1173     if (ptr == ZEROSIZEPTR)
   1174 	ptr = NULL;
   1175     if (malloc_sysv && !size) {
   1176 	if (ptr != NULL)
   1177 	    ifree(ptr);
   1178 	r = NULL;
   1179     } else if (!size) {
   1180 	if (ptr != NULL)
   1181 	    ifree(ptr);
   1182 	r = ZEROSIZEPTR;
   1183     } else if (ptr == NULL) {
   1184 	r = imalloc(size);
   1185 	err = (r == NULL);
   1186     } else {
   1187         r = irealloc(ptr, size);
   1188 	err = (r == NULL);
   1189     }
   1190     UTRACE(ptr, size, r);
   1191     malloc_active = 0;
   1192     _MALLOC_UNLOCK();
   1193     if (malloc_xmalloc && err)
   1194 	wrterror("out of memory\n");
   1195     if (err)
   1196 	errno = ENOMEM;
   1197     return (r);
   1198 }
   1199 
   1200 /*
   1201  * These are the public exported interface routines.
   1202  */
   1203 
   1204 void *
   1205 malloc(size_t size)
   1206 {
   1207 
   1208     return pubrealloc(NULL, size, " in malloc():");
   1209 }
   1210 
   1211 int
   1212 posix_memalign(void **memptr, size_t alignment, size_t size)
   1213 {
   1214     int err;
   1215     void *result;
   1216 
   1217     if (!malloc_started) {
   1218 	    malloc_init();
   1219 	    malloc_started = 1;
   1220     }
   1221     /* Make sure that alignment is a large enough power of 2. */
   1222     if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) ||
   1223 	alignment > malloc_pagesize)
   1224 	    return EINVAL;
   1225 
   1226     /*
   1227      * (size | alignment) is enough to assure the requested alignment, since
   1228      * the allocator always allocates power-of-two blocks.
   1229      */
   1230     err = errno; /* Protect errno against changes in pubrealloc(). */
   1231     result = pubrealloc(NULL, (size | alignment), " in posix_memalign()");
   1232     errno = err;
   1233 
   1234     if (result == NULL)
   1235 	return ENOMEM;
   1236 
   1237     *memptr = result;
   1238     return 0;
   1239 }
   1240 
   1241 void *
   1242 calloc(size_t num, size_t size)
   1243 {
   1244     void *ret;
   1245 
   1246     if (size != 0 && (num * size) / size != num) {
   1247 	/* size_t overflow. */
   1248 	errno = ENOMEM;
   1249 	return (NULL);
   1250     }
   1251 
   1252     ret = pubrealloc(NULL, num * size, " in calloc():");
   1253 
   1254     if (ret != NULL)
   1255 	memset(ret, 0, num * size);
   1256 
   1257     return ret;
   1258 }
   1259 
   1260 void
   1261 free(void *ptr)
   1262 {
   1263 
   1264     pubrealloc(ptr, 0, " in free():");
   1265 }
   1266 
   1267 void *
   1268 realloc(void *ptr, size_t size)
   1269 {
   1270 
   1271     return pubrealloc(ptr, size, " in realloc():");
   1272 }
   1273 
   1274 /*
   1275  * Begin library-private functions, used by threading libraries for protection
   1276  * of malloc during fork().  These functions are only called if the program is
   1277  * running in threaded mode, so there is no need to check whether the program
   1278  * is threaded here.
   1279  */
   1280 
   1281 void
   1282 _malloc_prefork(void)
   1283 {
   1284 
   1285 	_MALLOC_LOCK();
   1286 }
   1287 
   1288 void
   1289 _malloc_postfork(void)
   1290 {
   1291 
   1292 	_MALLOC_UNLOCK();
   1293 }
   1294