Home | History | Annotate | Line # | Download | only in stdlib
malloc.c revision 1.54.10.1
      1 /*	$NetBSD: malloc.c,v 1.54.10.1 2013/02/25 00:27:53 tls Exp $	*/
      2 
      3 /*
      4  * ----------------------------------------------------------------------------
      5  * "THE BEER-WARE LICENSE" (Revision 42):
      6  * <phk (at) FreeBSD.ORG> wrote this file.  As long as you retain this notice you
      7  * can do whatever you want with this stuff. If we meet some day, and you think
      8  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
      9  * ----------------------------------------------------------------------------
     10  *
     11  * From FreeBSD: malloc.c,v 1.91 2006/01/12 07:28:20 jasone
     12  *
     13  */
     14 
     15 /*
     16  * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
     17  * to internal conditions and consistency in malloc.c. This has a
     18  * noticeable runtime performance hit, and generally will not do you
     19  * any good unless you fiddle with the internals of malloc or want
     20  * to catch random pointer corruption as early as possible.
     21  */
     22 #ifndef MALLOC_EXTRA_SANITY
     23 #undef MALLOC_EXTRA_SANITY
     24 #endif
     25 
     26 /*
     27  * What to use for Junk.  This is the byte value we use to fill with
     28  * when the 'J' option is enabled.
     29  */
     30 #define SOME_JUNK	0xd0		/* as in "Duh" :-) */
     31 
     32 /*
     33  * The basic parameters you can tweak.
     34  *
     35  * malloc_minsize	minimum size of an allocation in bytes.
     36  *			If this is too small it's too much work
     37  *			to manage them.  This is also the smallest
     38  *			unit of alignment used for the storage
     39  *			returned by malloc/realloc.
     40  *
     41  */
     42 
     43 #include "namespace.h"
     44 #if defined(__FreeBSD__)
     45 #   if defined(__i386__)
     46 #       define malloc_minsize		16U
     47 #   endif
     48 #   if defined(__ia64__)
     49 #	define malloc_pageshift		13U
     50 #	define malloc_minsize		16U
     51 #   endif
     52 #   if defined(__alpha__)
     53 #       define malloc_pageshift		13U
     54 #       define malloc_minsize		16U
     55 #   endif
     56 #   if defined(__sparc64__)
     57 #       define malloc_pageshift		13U
     58 #       define malloc_minsize		16U
     59 #   endif
     60 #   if defined(__amd64__)
     61 #       define malloc_pageshift		12U
     62 #       define malloc_minsize		16U
     63 #   endif
     64 #   if defined(__arm__)
     65 #       define malloc_pageshift         12U
     66 #       define malloc_minsize           16U
     67 #   endif
     68 #   define HAS_UTRACE
     69 #   define UTRACE_LABEL
     70 
     71 #include <sys/cdefs.h>
     72 void utrace(struct ut *, int);
     73 
     74     /*
     75      * Make malloc/free/realloc thread-safe in libc for use with
     76      * kernel threads.
     77      */
     78 #   include "libc_private.h"
     79 #   include "spinlock.h"
     80     static spinlock_t thread_lock	= _SPINLOCK_INITIALIZER;
     81 #   define _MALLOC_LOCK()		if (__isthreaded) _SPINLOCK(&thread_lock);
     82 #   define _MALLOC_UNLOCK()		if (__isthreaded) _SPINUNLOCK(&thread_lock);
     83 #endif /* __FreeBSD__ */
     84 
     85 #include <sys/types.h>
     86 #if defined(__NetBSD__)
     87 #   define malloc_minsize               16U
     88 #   define HAS_UTRACE
     89 #   define UTRACE_LABEL "malloc",
     90 #include <sys/cdefs.h>
     91 #include "extern.h"
     92 #if defined(LIBC_SCCS) && !defined(lint)
     93 __RCSID("$NetBSD: malloc.c,v 1.54.10.1 2013/02/25 00:27:53 tls Exp $");
     94 #endif /* LIBC_SCCS and not lint */
     95 int utrace(const char *, void *, size_t);
     96 
     97 #include <reentrant.h>
     98 extern int __isthreaded;
     99 static mutex_t thread_lock = MUTEX_INITIALIZER;
    100 #define _MALLOC_LOCK()	if (__isthreaded) mutex_lock(&thread_lock);
    101 #define _MALLOC_UNLOCK()	if (__isthreaded) mutex_unlock(&thread_lock);
    102 #endif /* __NetBSD__ */
    103 
    104 #if defined(__sparc__) && defined(sun)
    105 #   define malloc_minsize		16U
    106 #   define MAP_ANON			(0)
    107     static int fdzero;
    108 #   define MMAP_FD	fdzero
    109 #   define INIT_MMAP() \
    110 	{ if ((fdzero = open(_PATH_DEVZERO, O_RDWR, 0000)) == -1) \
    111 	    wrterror("open of /dev/zero"); }
    112 #endif /* __sparc__ */
    113 
    114 /* Insert your combination here... */
    115 #if defined(__FOOCPU__) && defined(__BAROS__)
    116 #   define malloc_minsize		16U
    117 #endif /* __FOOCPU__ && __BAROS__ */
    118 
    119 #ifndef ZEROSIZEPTR
    120 #define ZEROSIZEPTR	((void *)(uintptr_t)(1UL << (malloc_pageshift - 1)))
    121 #endif
    122 
    123 /*
    124  * No user serviceable parts behind this point.
    125  */
    126 #include <sys/types.h>
    127 #include <sys/mman.h>
    128 #include <errno.h>
    129 #include <fcntl.h>
    130 #include <paths.h>
    131 #include <stddef.h>
    132 #include <stdio.h>
    133 #include <stdlib.h>
    134 #include <string.h>
    135 #include <unistd.h>
    136 
    137 /*
    138  * This structure describes a page worth of chunks.
    139  */
    140 
    141 struct pginfo {
    142     struct pginfo	*next;	/* next on the free list */
    143     void		*page;	/* Pointer to the page */
    144     u_short		size;	/* size of this page's chunks */
    145     u_short		shift;	/* How far to shift for this size chunks */
    146     u_short		free;	/* How many free chunks */
    147     u_short		total;	/* How many chunk */
    148     u_int		bits[1]; /* Which chunks are free */
    149 };
    150 
    151 /*
    152  * This structure describes a number of free pages.
    153  */
    154 
    155 struct pgfree {
    156     struct pgfree	*next;	/* next run of free pages */
    157     struct pgfree	*prev;	/* prev run of free pages */
    158     void		*page;	/* pointer to free pages */
    159     void		*end;	/* pointer to end of free pages */
    160     size_t		size;	/* number of bytes free */
    161 };
    162 
    163 /*
    164  * How many bits per u_int in the bitmap.
    165  * Change only if not 8 bits/byte
    166  */
    167 #define	MALLOC_BITS	((int)(8*sizeof(u_int)))
    168 
    169 /*
    170  * Magic values to put in the page_directory
    171  */
    172 #define MALLOC_NOT_MINE	((struct pginfo*) 0)
    173 #define MALLOC_FREE 	((struct pginfo*) 1)
    174 #define MALLOC_FIRST	((struct pginfo*) 2)
    175 #define MALLOC_FOLLOW	((struct pginfo*) 3)
    176 #define MALLOC_MAGIC	((struct pginfo*) 4)
    177 
    178 /*
    179  * Page size related parameters, computed at run-time.
    180  */
    181 static size_t malloc_pagesize;
    182 static size_t malloc_pageshift;
    183 static size_t malloc_pagemask;
    184 
    185 #ifndef malloc_minsize
    186 #define malloc_minsize			16U
    187 #endif
    188 
    189 #ifndef malloc_maxsize
    190 #define malloc_maxsize			((malloc_pagesize)>>1)
    191 #endif
    192 
    193 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
    194 #define ptr2idx(foo) \
    195     (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
    196 
    197 #ifndef _MALLOC_LOCK
    198 #define _MALLOC_LOCK()
    199 #endif
    200 
    201 #ifndef _MALLOC_UNLOCK
    202 #define _MALLOC_UNLOCK()
    203 #endif
    204 
    205 #ifndef MMAP_FD
    206 #define MMAP_FD (-1)
    207 #endif
    208 
    209 #ifndef INIT_MMAP
    210 #define INIT_MMAP()
    211 #endif
    212 
    213 #ifndef MADV_FREE
    214 #define MADV_FREE MADV_DONTNEED
    215 #endif
    216 
    217 /* Number of free pages we cache */
    218 static size_t malloc_cache = 16;
    219 
    220 /* The offset from pagenumber to index into the page directory */
    221 static size_t malloc_origo;
    222 
    223 /* The last index in the page directory we care about */
    224 static size_t last_idx;
    225 
    226 /* Pointer to page directory. Allocated "as if with" malloc */
    227 static struct	pginfo **page_dir;
    228 
    229 /* How many slots in the page directory */
    230 static size_t	malloc_ninfo;
    231 
    232 /* Free pages line up here */
    233 static struct pgfree free_list;
    234 
    235 /* Abort(), user doesn't handle problems.  */
    236 static int malloc_abort;
    237 
    238 /* Are we trying to die ?  */
    239 static int suicide;
    240 
    241 /* always realloc ?  */
    242 static int malloc_realloc;
    243 
    244 /* pass the kernel a hint on free pages ?  */
    245 #if defined(MADV_FREE)
    246 static int malloc_hint = 0;
    247 #endif
    248 
    249 /* xmalloc behaviour ?  */
    250 static int malloc_xmalloc;
    251 
    252 /* sysv behaviour for malloc(0) ?  */
    253 static int malloc_sysv;
    254 
    255 /* zero fill ?  */
    256 static int malloc_zero;
    257 
    258 /* junk fill ?  */
    259 static int malloc_junk;
    260 
    261 #ifdef HAS_UTRACE
    262 
    263 /* utrace ?  */
    264 static int malloc_utrace;
    265 
    266 struct ut { void *p; size_t s; void *r; };
    267 
    268 #define UTRACE(a, b, c) \
    269 	if (malloc_utrace) {			\
    270 		struct ut u;			\
    271 		u.p=a; u.s = b; u.r=c;		\
    272 		utrace(UTRACE_LABEL (void *) &u, sizeof u);	\
    273 	}
    274 #else /* !HAS_UTRACE */
    275 #define UTRACE(a,b,c)
    276 #endif /* HAS_UTRACE */
    277 
    278 /* my last break. */
    279 static void *malloc_brk;
    280 
    281 /* one location cache for free-list holders */
    282 static struct pgfree *px;
    283 
    284 /* compile-time options */
    285 const char *_malloc_options;
    286 
    287 /* Name of the current public function */
    288 static const char *malloc_func;
    289 
    290 /* Macro for mmap */
    291 #define MMAP(size) \
    292 	mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
    293 	    MMAP_FD, (off_t)0);
    294 
    295 /*
    296  * Necessary function declarations
    297  */
    298 static int extend_pgdir(size_t idx);
    299 static void *imalloc(size_t size);
    300 static void ifree(void *ptr);
    301 static void *irealloc(void *ptr, size_t size);
    302 
    303 static void
    304 wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
    305 {
    306 
    307     write(STDERR_FILENO, p1, strlen(p1));
    308     write(STDERR_FILENO, p2, strlen(p2));
    309     write(STDERR_FILENO, p3, strlen(p3));
    310     write(STDERR_FILENO, p4, strlen(p4));
    311 }
    312 
    313 void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
    314 	    const char *p4) = wrtmessage;
    315 static void
    316 wrterror(const char *p)
    317 {
    318 
    319     suicide = 1;
    320     _malloc_message(getprogname(), malloc_func, " error: ", p);
    321     abort();
    322 }
    323 
    324 static void
    325 wrtwarning(const char *p)
    326 {
    327 
    328     /*
    329      * Sensitive processes, somewhat arbitrarily defined here as setuid,
    330      * setgid, root and wheel cannot afford to have malloc mistakes.
    331      */
    332     if (malloc_abort || issetugid() || getuid() == 0 || getgid() == 0)
    333 	wrterror(p);
    334 }
    335 
    336 /*
    337  * Allocate a number of pages from the OS
    338  */
    339 static void *
    340 map_pages(size_t pages)
    341 {
    342     caddr_t result, rresult, tail;
    343     intptr_t bytes = pages << malloc_pageshift;
    344 
    345     if (bytes < 0 || (size_t)bytes < pages) {
    346 	errno = ENOMEM;
    347 	return NULL;
    348     }
    349 
    350     if ((result = sbrk(bytes)) == (void *)-1)
    351 	return NULL;
    352 
    353     /*
    354      * Round to a page, in case sbrk(2) did not do this for us
    355      */
    356     rresult = (caddr_t)pageround((size_t)(uintptr_t)result);
    357     if (result < rresult) {
    358 	/* make sure we have enough space to fit bytes */
    359 	if (sbrk((intptr_t)(rresult - result)) == (void *) -1) {
    360 	    /* we failed, put everything back */
    361 	    if (brk(result)) {
    362 		wrterror("brk(2) failed [internal error]\n");
    363 	    }
    364 	}
    365     }
    366     tail = rresult + (size_t)bytes;
    367 
    368     last_idx = ptr2idx(tail) - 1;
    369     malloc_brk = tail;
    370 
    371     if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) {
    372 	malloc_brk = result;
    373 	last_idx = ptr2idx(malloc_brk) - 1;
    374 	/* Put back break point since we failed. */
    375 	if (brk(malloc_brk))
    376 	    wrterror("brk(2) failed [internal error]\n");
    377 	return 0;
    378     }
    379 
    380     return rresult;
    381 }
    382 
    383 /*
    384  * Extend page directory
    385  */
    386 static int
    387 extend_pgdir(size_t idx)
    388 {
    389     struct  pginfo **new, **old;
    390     size_t newlen, oldlen;
    391 
    392     /* check for overflow */
    393     if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1)
    394 	+ (malloc_pagesize / sizeof *page_dir)) < idx) {
    395 	errno = ENOMEM;
    396 	return 0;
    397     }
    398 
    399     /* Make it this many pages */
    400     newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
    401 
    402     /* remember the old mapping size */
    403     oldlen = malloc_ninfo * sizeof *page_dir;
    404 
    405     /*
    406      * NOTE: we allocate new pages and copy the directory rather than tempt
    407      * fate by trying to "grow" the region.. There is nothing to prevent
    408      * us from accidentally re-mapping space that's been allocated by our caller
    409      * via dlopen() or other mmap().
    410      *
    411      * The copy problem is not too bad, as there is 4K of page index per
    412      * 4MB of malloc arena.
    413      *
    414      * We can totally avoid the copy if we open a file descriptor to associate
    415      * the anon mappings with.  Then, when we remap the pages at the new
    416      * address, the old pages will be "magically" remapped..  But this means
    417      * keeping open a "secret" file descriptor.....
    418      */
    419 
    420     /* Get new pages */
    421     new = MMAP(newlen);
    422     if (new == MAP_FAILED)
    423 	return 0;
    424 
    425     /* Copy the old stuff */
    426     memcpy(new, page_dir, oldlen);
    427 
    428     /* register the new size */
    429     malloc_ninfo = newlen / sizeof *page_dir;
    430 
    431     /* swap the pointers */
    432     old = page_dir;
    433     page_dir = new;
    434 
    435     /* Now free the old stuff */
    436     munmap(old, oldlen);
    437     return 1;
    438 }
    439 
    440 /*
    441  * Initialize the world
    442  */
    443 static void
    444 malloc_init(void)
    445 {
    446     const char *p;
    447     char b[64];
    448     size_t i;
    449     ssize_t j;
    450     int serrno = errno;
    451 
    452     /*
    453      * Compute page-size related variables.
    454      */
    455     malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
    456     malloc_pagemask = malloc_pagesize - 1;
    457     for (malloc_pageshift = 0;
    458 	 (1UL << malloc_pageshift) != malloc_pagesize;
    459 	 malloc_pageshift++)
    460 	/* nothing */ ;
    461 
    462     INIT_MMAP();
    463 
    464 #ifdef MALLOC_EXTRA_SANITY
    465     malloc_junk = 1;
    466 #endif /* MALLOC_EXTRA_SANITY */
    467 
    468     for (i = 0; i < 3; i++) {
    469 	if (i == 0) {
    470 	    j = readlink("/etc/malloc.conf", b, sizeof b - 1);
    471 	    if (j == -1)
    472 		continue;
    473 	    b[j] = '\0';
    474 	    p = b;
    475 	} else if (i == 1 && issetugid() == 0) {
    476 	    p = getenv("MALLOC_OPTIONS");
    477 	} else if (i == 1) {
    478 	    continue;
    479 	} else {
    480 	    p = _malloc_options;
    481 	}
    482 	for (; p != NULL && *p != '\0'; p++) {
    483 	    switch (*p) {
    484 		case '>': malloc_cache   <<= 1; break;
    485 		case '<': malloc_cache   >>= 1; break;
    486 		case 'a': malloc_abort   = 0; break;
    487 		case 'A': malloc_abort   = 1; break;
    488 		case 'h': malloc_hint    = 0; break;
    489 		case 'H': malloc_hint    = 1; break;
    490 		case 'r': malloc_realloc = 0; break;
    491 		case 'R': malloc_realloc = 1; break;
    492 		case 'j': malloc_junk    = 0; break;
    493 		case 'J': malloc_junk    = 1; break;
    494 #ifdef HAS_UTRACE
    495 		case 'u': malloc_utrace  = 0; break;
    496 		case 'U': malloc_utrace  = 1; break;
    497 #endif
    498 		case 'v': malloc_sysv    = 0; break;
    499 		case 'V': malloc_sysv    = 1; break;
    500 		case 'x': malloc_xmalloc = 0; break;
    501 		case 'X': malloc_xmalloc = 1; break;
    502 		case 'z': malloc_zero    = 0; break;
    503 		case 'Z': malloc_zero    = 1; break;
    504 		default:
    505 		    _malloc_message(getprogname(), malloc_func,
    506 			 " warning: ", "unknown char in MALLOC_OPTIONS\n");
    507 		    break;
    508 	    }
    509 	}
    510     }
    511 
    512     UTRACE(0, 0, 0);
    513 
    514     /*
    515      * We want junk in the entire allocation, and zero only in the part
    516      * the user asked for.
    517      */
    518     if (malloc_zero)
    519 	malloc_junk = 1;
    520 
    521     /* Allocate one page for the page directory */
    522     page_dir = MMAP(malloc_pagesize);
    523 
    524     if (page_dir == MAP_FAILED)
    525 	wrterror("mmap(2) failed, check limits.\n");
    526 
    527     /*
    528      * We need a maximum of malloc_pageshift buckets, steal these from the
    529      * front of the page_directory;
    530      */
    531     malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
    532 	>> malloc_pageshift;
    533     malloc_origo -= malloc_pageshift;
    534 
    535     malloc_ninfo = malloc_pagesize / sizeof *page_dir;
    536 
    537     /* Recalculate the cache size in bytes, and make sure it's nonzero */
    538 
    539     if (!malloc_cache)
    540 	malloc_cache++;
    541 
    542     malloc_cache <<= malloc_pageshift;
    543 
    544     /*
    545      * This is a nice hack from Kaleb Keithly (kaleb (at) x.org).
    546      * We can sbrk(2) further back when we keep this on a low address.
    547      */
    548     px = imalloc(sizeof *px);
    549 
    550     errno = serrno;
    551 }
    552 
    553 /*
    554  * Allocate a number of complete pages
    555  */
    556 static void *
    557 malloc_pages(size_t size)
    558 {
    559     void *p, *delay_free = NULL;
    560     size_t i;
    561     struct pgfree *pf;
    562     size_t idx;
    563 
    564     idx = pageround(size);
    565     if (idx < size) {
    566 	errno = ENOMEM;
    567 	return NULL;
    568     } else
    569 	size = idx;
    570 
    571     p = NULL;
    572 
    573     /* Look for free pages before asking for more */
    574     for(pf = free_list.next; pf; pf = pf->next) {
    575 
    576 #ifdef MALLOC_EXTRA_SANITY
    577 	if (pf->size & malloc_pagemask)
    578 	    wrterror("(ES): junk length entry on free_list.\n");
    579 	if (!pf->size)
    580 	    wrterror("(ES): zero length entry on free_list.\n");
    581 	if (pf->page == pf->end)
    582 	    wrterror("(ES): zero entry on free_list.\n");
    583 	if (pf->page > pf->end)
    584 	    wrterror("(ES): sick entry on free_list.\n");
    585 	if ((void*)pf->page >= (void*)sbrk(0))
    586 	    wrterror("(ES): entry on free_list past brk.\n");
    587 	if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
    588 	    wrterror("(ES): non-free first page on free-list.\n");
    589 	if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
    590 	    wrterror("(ES): non-free last page on free-list.\n");
    591 #endif /* MALLOC_EXTRA_SANITY */
    592 
    593 	if (pf->size < size)
    594 	    continue;
    595 
    596 	if (pf->size == size) {
    597 	    p = pf->page;
    598 	    if (pf->next != NULL)
    599 		    pf->next->prev = pf->prev;
    600 	    pf->prev->next = pf->next;
    601 	    delay_free = pf;
    602 	    break;
    603 	}
    604 
    605 	p = pf->page;
    606 	pf->page = (char *)pf->page + size;
    607 	pf->size -= size;
    608 	break;
    609     }
    610 
    611 #ifdef MALLOC_EXTRA_SANITY
    612     if (p != NULL && page_dir[ptr2idx(p)] != MALLOC_FREE)
    613 	wrterror("(ES): allocated non-free page on free-list.\n");
    614 #endif /* MALLOC_EXTRA_SANITY */
    615 
    616     size >>= malloc_pageshift;
    617 
    618     /* Map new pages */
    619     if (p == NULL)
    620 	p = map_pages(size);
    621 
    622     if (p != NULL) {
    623 
    624 	idx = ptr2idx(p);
    625 	page_dir[idx] = MALLOC_FIRST;
    626 	for (i=1;i<size;i++)
    627 	    page_dir[idx+i] = MALLOC_FOLLOW;
    628 
    629 	if (malloc_junk)
    630 	    memset(p, SOME_JUNK, size << malloc_pageshift);
    631     }
    632 
    633     if (delay_free) {
    634 	if (px == NULL)
    635 	    px = delay_free;
    636 	else
    637 	    ifree(delay_free);
    638     }
    639 
    640     return p;
    641 }
    642 
    643 /*
    644  * Allocate a page of fragments
    645  */
    646 
    647 static inline int
    648 malloc_make_chunks(int bits)
    649 {
    650     struct  pginfo *bp;
    651     void *pp;
    652     int i, k;
    653     long l;
    654 
    655     /* Allocate a new bucket */
    656     pp = malloc_pages(malloc_pagesize);
    657     if (pp == NULL)
    658 	return 0;
    659 
    660     /* Find length of admin structure */
    661     l = (long)offsetof(struct pginfo, bits[0]);
    662     l += (long)sizeof bp->bits[0] *
    663 	(((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
    664 
    665     /* Don't waste more than two chunks on this */
    666     if ((1<<(bits)) <= l+l) {
    667 	bp = (struct  pginfo *)pp;
    668     } else {
    669 	bp = imalloc((size_t)l);
    670 	if (bp == NULL) {
    671 	    ifree(pp);
    672 	    return 0;
    673 	}
    674     }
    675 
    676     bp->size = (1<<bits);
    677     bp->shift = bits;
    678     bp->total = bp->free = (u_short)(malloc_pagesize >> bits);
    679     bp->page = pp;
    680 
    681     /* set all valid bits in the bitmap */
    682     k = bp->total;
    683     i = 0;
    684 
    685     /* Do a bunch at a time */
    686     for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
    687 	bp->bits[i / MALLOC_BITS] = ~0U;
    688 
    689     for(; i < k; i++)
    690         bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
    691 
    692     if (bp == bp->page) {
    693 	/* Mark the ones we stole for ourselves */
    694 	for(i = 0; l > 0; i++) {
    695 	    bp->bits[i / MALLOC_BITS] &= ~(1 << (i % MALLOC_BITS));
    696 	    bp->free--;
    697 	    bp->total--;
    698 	    l -= (long)(1 << bits);
    699 	}
    700     }
    701 
    702     /* MALLOC_LOCK */
    703 
    704     page_dir[ptr2idx(pp)] = bp;
    705 
    706     bp->next = page_dir[bits];
    707     page_dir[bits] = bp;
    708 
    709     /* MALLOC_UNLOCK */
    710 
    711     return 1;
    712 }
    713 
    714 /*
    715  * Allocate a fragment
    716  */
    717 static void *
    718 malloc_bytes(size_t size)
    719 {
    720     size_t i;
    721     int j;
    722     u_int u;
    723     struct  pginfo *bp;
    724     size_t k;
    725     u_int *lp;
    726 
    727     /* Don't bother with anything less than this */
    728     if (size < malloc_minsize)
    729 	size = malloc_minsize;
    730 
    731 
    732     /* Find the right bucket */
    733     j = 1;
    734     i = size-1;
    735     while (i >>= 1)
    736 	j++;
    737 
    738     /* If it's empty, make a page more of that size chunks */
    739     if (page_dir[j] == NULL && !malloc_make_chunks(j))
    740 	return NULL;
    741 
    742     bp = page_dir[j];
    743 
    744     /* Find first word of bitmap which isn't empty */
    745     for (lp = bp->bits; !*lp; lp++)
    746 	;
    747 
    748     /* Find that bit, and tweak it */
    749     u = 1;
    750     k = 0;
    751     while (!(*lp & u)) {
    752 	u += u;
    753 	k++;
    754     }
    755     *lp ^= u;
    756 
    757     /* If there are no more free, remove from free-list */
    758     if (!--bp->free) {
    759 	page_dir[j] = bp->next;
    760 	bp->next = NULL;
    761     }
    762 
    763     /* Adjust to the real offset of that chunk */
    764     k += (lp-bp->bits)*MALLOC_BITS;
    765     k <<= bp->shift;
    766 
    767     if (malloc_junk)
    768 	memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
    769 
    770     return (u_char *)bp->page + k;
    771 }
    772 
    773 /*
    774  * Allocate a piece of memory
    775  */
    776 static void *
    777 imalloc(size_t size)
    778 {
    779     void *result;
    780 
    781     if (suicide)
    782 	abort();
    783 
    784     if ((size + malloc_pagesize) < size)	/* Check for overflow */
    785 	result = NULL;
    786     else if ((size + malloc_pagesize) >= (uintptr_t)page_dir)
    787 	result = NULL;
    788     else if (size <= malloc_maxsize)
    789 	result = malloc_bytes(size);
    790     else
    791 	result = malloc_pages(size);
    792 
    793     if (malloc_abort && result == NULL)
    794 	wrterror("allocation failed.\n");
    795 
    796     if (malloc_zero && result != NULL)
    797 	memset(result, 0, size);
    798 
    799     return result;
    800 }
    801 
    802 /*
    803  * Change the size of an allocation.
    804  */
    805 static void *
    806 irealloc(void *ptr, size_t size)
    807 {
    808     void *p;
    809     size_t osize, idx;
    810     struct pginfo **mp;
    811     size_t i;
    812 
    813     if (suicide)
    814 	abort();
    815 
    816     idx = ptr2idx(ptr);
    817 
    818     if (idx < malloc_pageshift) {
    819 	wrtwarning("junk pointer, too low to make sense.\n");
    820 	return 0;
    821     }
    822 
    823     if (idx > last_idx) {
    824 	wrtwarning("junk pointer, too high to make sense.\n");
    825 	return 0;
    826     }
    827 
    828     mp = &page_dir[idx];
    829 
    830     if (*mp == MALLOC_FIRST) {			/* Page allocation */
    831 
    832 	/* Check the pointer */
    833 	if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
    834 	    wrtwarning("modified (page-) pointer.\n");
    835 	    return NULL;
    836 	}
    837 
    838 	/* Find the size in bytes */
    839 	for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
    840 	    osize += malloc_pagesize;
    841 
    842         if (!malloc_realloc && 			/* unless we have to, */
    843 	  size <= osize && 			/* .. or are too small, */
    844 	  size > (osize - malloc_pagesize)) {	/* .. or can free a page, */
    845 	    if (malloc_junk)
    846 		memset((u_char *)ptr + size, SOME_JUNK, osize-size);
    847 	    return ptr;				/* don't do anything. */
    848 	}
    849 
    850     } else if (*mp >= MALLOC_MAGIC) {		/* Chunk allocation */
    851 
    852 	/* Check the pointer for sane values */
    853 	if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) {
    854 	    wrtwarning("modified (chunk-) pointer.\n");
    855 	    return NULL;
    856 	}
    857 
    858 	/* Find the chunk index in the page */
    859 	i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift;
    860 
    861 	/* Verify that it isn't a free chunk already */
    862         if ((*mp)->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
    863 	    wrtwarning("chunk is already free.\n");
    864 	    return NULL;
    865 	}
    866 
    867 	osize = (*mp)->size;
    868 
    869 	if (!malloc_realloc &&		/* Unless we have to, */
    870 	  size <= osize && 		/* ..or are too small, */
    871 	  (size > osize / 2 ||	 	/* ..or could use a smaller size, */
    872 	  osize == malloc_minsize)) {	/* ..(if there is one) */
    873 	    if (malloc_junk)
    874 		memset((u_char *)ptr + size, SOME_JUNK, osize-size);
    875 	    return ptr;			/* ..Don't do anything */
    876 	}
    877 
    878     } else {
    879 	wrtwarning("pointer to wrong page.\n");
    880 	return NULL;
    881     }
    882 
    883     p = imalloc(size);
    884 
    885     if (p != NULL) {
    886 	/* copy the lesser of the two sizes, and free the old one */
    887 	if (!size || !osize)
    888 	    ;
    889 	else if (osize < size)
    890 	    memcpy(p, ptr, osize);
    891 	else
    892 	    memcpy(p, ptr, size);
    893 	ifree(ptr);
    894     }
    895     return p;
    896 }
    897 
    898 /*
    899  * Free a sequence of pages
    900  */
    901 
    902 static inline void
    903 free_pages(void *ptr, size_t idx, struct pginfo *info)
    904 {
    905     size_t i;
    906     struct pgfree *pf, *pt=NULL;
    907     size_t l;
    908     void *tail;
    909 
    910     if (info == MALLOC_FREE) {
    911 	wrtwarning("page is already free.\n");
    912 	return;
    913     }
    914 
    915     if (info != MALLOC_FIRST) {
    916 	wrtwarning("pointer to wrong page.\n");
    917 	return;
    918     }
    919 
    920     if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
    921 	wrtwarning("modified (page-) pointer.\n");
    922 	return;
    923     }
    924 
    925     /* Count how many pages and mark them free at the same time */
    926     page_dir[idx] = MALLOC_FREE;
    927     for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
    928 	page_dir[idx + i] = MALLOC_FREE;
    929 
    930     l = i << malloc_pageshift;
    931 
    932     if (malloc_junk)
    933 	memset(ptr, SOME_JUNK, l);
    934 
    935     if (malloc_hint)
    936 	madvise(ptr, l, MADV_FREE);
    937 
    938     tail = (char *)ptr+l;
    939 
    940     /* add to free-list */
    941     if (px == NULL)
    942 	px = imalloc(sizeof *px);	/* This cannot fail... */
    943     px->page = ptr;
    944     px->end =  tail;
    945     px->size = l;
    946     if (free_list.next == NULL) {
    947 
    948 	/* Nothing on free list, put this at head */
    949 	px->next = free_list.next;
    950 	px->prev = &free_list;
    951 	free_list.next = px;
    952 	pf = px;
    953 	px = NULL;
    954 
    955     } else {
    956 
    957 	/* Find the right spot, leave pf pointing to the modified entry. */
    958 	tail = (char *)ptr+l;
    959 
    960 	for(pf = free_list.next; pf->end < ptr && pf->next != NULL;
    961 	    pf = pf->next)
    962 	    ; /* Race ahead here */
    963 
    964 	if (pf->page > tail) {
    965 	    /* Insert before entry */
    966 	    px->next = pf;
    967 	    px->prev = pf->prev;
    968 	    pf->prev = px;
    969 	    px->prev->next = px;
    970 	    pf = px;
    971 	    px = NULL;
    972 	} else if (pf->end == ptr ) {
    973 	    /* Append to the previous entry */
    974 	    pf->end = (char *)pf->end + l;
    975 	    pf->size += l;
    976 	    if (pf->next != NULL && pf->end == pf->next->page ) {
    977 		/* And collapse the next too. */
    978 		pt = pf->next;
    979 		pf->end = pt->end;
    980 		pf->size += pt->size;
    981 		pf->next = pt->next;
    982 		if (pf->next != NULL)
    983 		    pf->next->prev = pf;
    984 	    }
    985 	} else if (pf->page == tail) {
    986 	    /* Prepend to entry */
    987 	    pf->size += l;
    988 	    pf->page = ptr;
    989 	} else if (pf->next == NULL) {
    990 	    /* Append at tail of chain */
    991 	    px->next = NULL;
    992 	    px->prev = pf;
    993 	    pf->next = px;
    994 	    pf = px;
    995 	    px = NULL;
    996 	} else {
    997 	    wrterror("freelist is destroyed.\n");
    998 	}
    999     }
   1000 
   1001     /* Return something to OS ? */
   1002     if (pf->next == NULL &&			/* If we're the last one, */
   1003       pf->size > malloc_cache &&		/* ..and the cache is full, */
   1004       pf->end == malloc_brk &&			/* ..and none behind us, */
   1005       malloc_brk == sbrk((intptr_t)0)) {	/* ..and it's OK to do... */
   1006 
   1007 	/*
   1008 	 * Keep the cache intact.  Notice that the '>' above guarantees that
   1009 	 * the pf will always have at least one page afterwards.
   1010 	 */
   1011 	pf->end = (char *)pf->page + malloc_cache;
   1012 	pf->size = malloc_cache;
   1013 
   1014 	brk(pf->end);
   1015 	malloc_brk = pf->end;
   1016 
   1017 	idx = ptr2idx(pf->end);
   1018 
   1019 	for(i=idx;i <= last_idx;)
   1020 	    page_dir[i++] = MALLOC_NOT_MINE;
   1021 
   1022 	last_idx = idx - 1;
   1023 
   1024 	/* XXX: We could realloc/shrink the pagedir here I guess. */
   1025     }
   1026     if (pt != NULL)
   1027 	ifree(pt);
   1028 }
   1029 
   1030 /*
   1031  * Free a chunk, and possibly the page it's on, if the page becomes empty.
   1032  */
   1033 
   1034 static inline void
   1035 free_bytes(void *ptr, size_t idx, struct pginfo *info)
   1036 {
   1037     size_t i;
   1038     struct pginfo **mp;
   1039     void *vp;
   1040 
   1041     /* Find the chunk number on the page */
   1042     i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift;
   1043 
   1044     if (((size_t)(uintptr_t)ptr & (info->size-1))) {
   1045 	wrtwarning("modified (chunk-) pointer.\n");
   1046 	return;
   1047     }
   1048 
   1049     if (info->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
   1050 	wrtwarning("chunk is already free.\n");
   1051 	return;
   1052     }
   1053 
   1054     if (malloc_junk)
   1055 	memset(ptr, SOME_JUNK, (size_t)info->size);
   1056 
   1057     info->bits[i/MALLOC_BITS] |= (u_int)(1UL << (i % MALLOC_BITS));
   1058     info->free++;
   1059 
   1060     mp = page_dir + info->shift;
   1061 
   1062     if (info->free == 1) {
   1063 
   1064 	/* Page became non-full */
   1065 
   1066 	mp = page_dir + info->shift;
   1067 	/* Insert in address order */
   1068 	while (*mp && (*mp)->next && (*mp)->next->page < info->page)
   1069 	    mp = &(*mp)->next;
   1070 	info->next = *mp;
   1071 	*mp = info;
   1072 	return;
   1073     }
   1074 
   1075     if (info->free != info->total)
   1076 	return;
   1077 
   1078     /* Find & remove this page in the queue */
   1079     while (*mp != info) {
   1080 	mp = &((*mp)->next);
   1081 #ifdef MALLOC_EXTRA_SANITY
   1082 	if (!*mp)
   1083 		wrterror("(ES): Not on queue.\n");
   1084 #endif /* MALLOC_EXTRA_SANITY */
   1085     }
   1086     *mp = info->next;
   1087 
   1088     /* Free the page & the info structure if need be */
   1089     page_dir[idx] = MALLOC_FIRST;
   1090     vp = info->page;		/* Order is important ! */
   1091     if(vp != (void*)info)
   1092 	ifree(info);
   1093     ifree(vp);
   1094 }
   1095 
   1096 static void
   1097 ifree(void *ptr)
   1098 {
   1099     struct pginfo *info;
   1100     size_t idx;
   1101 
   1102     /* This is legal */
   1103     if (ptr == NULL)
   1104 	return;
   1105 
   1106     /* If we're already sinking, don't make matters any worse. */
   1107     if (suicide)
   1108 	return;
   1109 
   1110     idx = ptr2idx(ptr);
   1111 
   1112     if (idx < malloc_pageshift) {
   1113 	wrtwarning("junk pointer, too low to make sense.\n");
   1114 	return;
   1115     }
   1116 
   1117     if (idx > last_idx) {
   1118 	wrtwarning("junk pointer, too high to make sense.\n");
   1119 	return;
   1120     }
   1121 
   1122     info = page_dir[idx];
   1123 
   1124     if (info < MALLOC_MAGIC)
   1125         free_pages(ptr, idx, info);
   1126     else
   1127 	free_bytes(ptr, idx, info);
   1128     return;
   1129 }
   1130 
   1131 static int malloc_active; /* Recursion flag for public interface. */
   1132 static unsigned malloc_started; /* Set when initialization has been done */
   1133 
   1134 static void *
   1135 pubrealloc(void *ptr, size_t size, const char *func)
   1136 {
   1137     void *r;
   1138     int err = 0;
   1139 
   1140     /*
   1141      * If a thread is inside our code with a functional lock held, and then
   1142      * catches a signal which calls us again, we would get a deadlock if the
   1143      * lock is not of a recursive type.
   1144      */
   1145     _MALLOC_LOCK();
   1146     malloc_func = func;
   1147     if (malloc_active > 0) {
   1148 	if (malloc_active == 1) {
   1149 	    wrtwarning("recursive call\n");
   1150 	    malloc_active = 2;
   1151 	}
   1152         _MALLOC_UNLOCK();
   1153 	errno = EINVAL;
   1154 	return (NULL);
   1155     }
   1156     malloc_active = 1;
   1157 
   1158     if (!malloc_started) {
   1159         if (ptr != NULL) {
   1160 	    wrtwarning("malloc() has never been called\n");
   1161 	    malloc_active = 0;
   1162             _MALLOC_UNLOCK();
   1163 	    errno = EINVAL;
   1164 	    return (NULL);
   1165 	}
   1166 	malloc_init();
   1167 	malloc_started = 1;
   1168     }
   1169 
   1170     if (ptr == ZEROSIZEPTR)
   1171 	ptr = NULL;
   1172     if (malloc_sysv && !size) {
   1173 	if (ptr != NULL)
   1174 	    ifree(ptr);
   1175 	r = NULL;
   1176     } else if (!size) {
   1177 	if (ptr != NULL)
   1178 	    ifree(ptr);
   1179 	r = ZEROSIZEPTR;
   1180     } else if (ptr == NULL) {
   1181 	r = imalloc(size);
   1182 	err = (r == NULL);
   1183     } else {
   1184         r = irealloc(ptr, size);
   1185 	err = (r == NULL);
   1186     }
   1187     UTRACE(ptr, size, r);
   1188     malloc_active = 0;
   1189     _MALLOC_UNLOCK();
   1190     if (malloc_xmalloc && err)
   1191 	wrterror("out of memory\n");
   1192     if (err)
   1193 	errno = ENOMEM;
   1194     return (r);
   1195 }
   1196 
   1197 /*
   1198  * These are the public exported interface routines.
   1199  */
   1200 
   1201 void *
   1202 malloc(size_t size)
   1203 {
   1204 
   1205     return pubrealloc(NULL, size, " in malloc():");
   1206 }
   1207 
   1208 int
   1209 posix_memalign(void **memptr, size_t alignment, size_t size)
   1210 {
   1211     int err;
   1212     void *result;
   1213 
   1214     if (!malloc_started) {
   1215 	    malloc_init();
   1216 	    malloc_started = 1;
   1217     }
   1218     /* Make sure that alignment is a large enough power of 2. */
   1219     if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) ||
   1220 	alignment > malloc_pagesize)
   1221 	    return EINVAL;
   1222 
   1223     /*
   1224      * (size | alignment) is enough to assure the requested alignment, since
   1225      * the allocator always allocates power-of-two blocks.
   1226      */
   1227     err = errno; /* Protect errno against changes in pubrealloc(). */
   1228     result = pubrealloc(NULL, (size | alignment), " in posix_memalign()");
   1229     errno = err;
   1230 
   1231     if (result == NULL)
   1232 	return ENOMEM;
   1233 
   1234     *memptr = result;
   1235     return 0;
   1236 }
   1237 
   1238 void *
   1239 calloc(size_t num, size_t size)
   1240 {
   1241     void *ret;
   1242 
   1243     if (size != 0 && (num * size) / size != num) {
   1244 	/* size_t overflow. */
   1245 	errno = ENOMEM;
   1246 	return (NULL);
   1247     }
   1248 
   1249     ret = pubrealloc(NULL, num * size, " in calloc():");
   1250 
   1251     if (ret != NULL)
   1252 	memset(ret, 0, num * size);
   1253 
   1254     return ret;
   1255 }
   1256 
   1257 void
   1258 free(void *ptr)
   1259 {
   1260 
   1261     pubrealloc(ptr, 0, " in free():");
   1262 }
   1263 
   1264 void *
   1265 realloc(void *ptr, size_t size)
   1266 {
   1267 
   1268     return pubrealloc(ptr, size, " in realloc():");
   1269 }
   1270 
   1271 /*
   1272  * Begin library-private functions, used by threading libraries for protection
   1273  * of malloc during fork().  These functions are only called if the program is
   1274  * running in threaded mode, so there is no need to check whether the program
   1275  * is threaded here.
   1276  */
   1277 
   1278 void
   1279 _malloc_prefork(void)
   1280 {
   1281 
   1282 	_MALLOC_LOCK();
   1283 }
   1284 
   1285 void
   1286 _malloc_postfork(void)
   1287 {
   1288 
   1289 	_MALLOC_UNLOCK();
   1290 }
   1291