Home | History | Annotate | Line # | Download | only in stdlib
malloc.c revision 1.16
      1  1.16    kleink /*	$NetBSD: malloc.c,v 1.16 1999/01/29 08:11:36 kleink Exp $	*/
      2   1.5   thorpej 
      3   1.1       cgd /*
      4  1.11     perry  * Copyright (c) 1983, 1993
      5  1.11     perry  *	The Regents of the University of California.  All rights reserved.
      6   1.1       cgd  *
      7   1.1       cgd  * Redistribution and use in source and binary forms, with or without
      8   1.1       cgd  * modification, are permitted provided that the following conditions
      9   1.1       cgd  * are met:
     10   1.1       cgd  * 1. Redistributions of source code must retain the above copyright
     11   1.1       cgd  *    notice, this list of conditions and the following disclaimer.
     12   1.1       cgd  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.1       cgd  *    notice, this list of conditions and the following disclaimer in the
     14   1.1       cgd  *    documentation and/or other materials provided with the distribution.
     15   1.1       cgd  * 3. All advertising materials mentioning features or use of this software
     16   1.1       cgd  *    must display the following acknowledgement:
     17   1.1       cgd  *	This product includes software developed by the University of
     18   1.1       cgd  *	California, Berkeley and its contributors.
     19   1.1       cgd  * 4. Neither the name of the University nor the names of its contributors
     20   1.1       cgd  *    may be used to endorse or promote products derived from this software
     21   1.1       cgd  *    without specific prior written permission.
     22   1.1       cgd  *
     23   1.1       cgd  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     24   1.1       cgd  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25   1.1       cgd  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26   1.1       cgd  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     27   1.1       cgd  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     28   1.1       cgd  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     29   1.1       cgd  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30   1.1       cgd  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31   1.1       cgd  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32   1.1       cgd  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33   1.1       cgd  * SUCH DAMAGE.
     34   1.1       cgd  */
     35   1.1       cgd 
     36   1.9  christos #include <sys/cdefs.h>
     37   1.1       cgd #if defined(LIBC_SCCS) && !defined(lint)
     38   1.5   thorpej #if 0
     39  1.11     perry static char sccsid[] = "@(#)malloc.c	8.1 (Berkeley) 6/4/93";
     40   1.5   thorpej #else
     41  1.16    kleink __RCSID("$NetBSD: malloc.c,v 1.16 1999/01/29 08:11:36 kleink Exp $");
     42   1.5   thorpej #endif
     43   1.1       cgd #endif /* LIBC_SCCS and not lint */
     44   1.1       cgd 
     45   1.1       cgd /*
     46   1.1       cgd  * malloc.c (Caltech) 2/21/82
     47   1.1       cgd  * Chris Kingsley, kingsley@cit-20.
     48   1.1       cgd  *
     49   1.1       cgd  * This is a very fast storage allocator.  It allocates blocks of a small
     50   1.1       cgd  * number of different sizes, and keeps free lists of each size.  Blocks that
     51   1.1       cgd  * don't exactly fit are passed up to the next larger size.  In this
     52   1.1       cgd  * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long.
     53   1.1       cgd  * This is designed for use in a virtual memory environment.
     54   1.1       cgd  */
     55   1.1       cgd 
     56  1.10       jtc #include "namespace.h"
     57  1.16    kleink #include <sys/types.h>
     58  1.16    kleink #if defined(DEBUG) || defined(RCHECK)
     59  1.16    kleink #include <sys/uio.h>
     60  1.16    kleink #endif
     61  1.16    kleink #if defined(RCHECK) || defined(MSTATS)
     62   1.9  christos #include <stdio.h>
     63   1.9  christos #endif
     64   1.1       cgd #include <stdlib.h>
     65   1.1       cgd #include <string.h>
     66   1.1       cgd #include <unistd.h>
     67  1.16    kleink #include "reentrant.h"
     68   1.1       cgd 
     69   1.1       cgd 
     70   1.1       cgd /*
     71   1.1       cgd  * The overhead on a block is at least 4 bytes.  When free, this space
     72   1.1       cgd  * contains a pointer to the next free block, and the bottom two bits must
     73   1.1       cgd  * be zero.  When in use, the first byte is set to MAGIC, and the second
     74   1.1       cgd  * byte is the size index.  The remaining bytes are for alignment.
     75   1.1       cgd  * If range checking is enabled then a second word holds the size of the
     76   1.1       cgd  * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC).
     77   1.1       cgd  * The order of elements is critical: ov_magic must overlay the low order
     78   1.1       cgd  * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
     79   1.1       cgd  */
     80   1.1       cgd union	overhead {
     81   1.1       cgd 	union	overhead *ov_next;	/* when free */
     82   1.1       cgd 	struct {
     83   1.1       cgd 		u_char	ovu_magic;	/* magic number */
     84   1.1       cgd 		u_char	ovu_index;	/* bucket # */
     85   1.1       cgd #ifdef RCHECK
     86   1.1       cgd 		u_short	ovu_rmagic;	/* range magic number */
     87   1.4       cgd 		u_long	ovu_size;	/* actual block size */
     88   1.1       cgd #endif
     89   1.1       cgd 	} ovu;
     90   1.1       cgd #define	ov_magic	ovu.ovu_magic
     91   1.1       cgd #define	ov_index	ovu.ovu_index
     92   1.1       cgd #define	ov_rmagic	ovu.ovu_rmagic
     93   1.1       cgd #define	ov_size		ovu.ovu_size
     94   1.1       cgd };
     95   1.1       cgd 
     96   1.1       cgd #define	MAGIC		0xef		/* magic # on accounting info */
     97  1.16    kleink #ifdef RCHECK
     98   1.1       cgd #define RMAGIC		0x5555		/* magic # on range info */
     99  1.16    kleink #endif
    100   1.1       cgd 
    101   1.1       cgd #ifdef RCHECK
    102   1.1       cgd #define	RSLOP		sizeof (u_short)
    103   1.1       cgd #else
    104   1.1       cgd #define	RSLOP		0
    105   1.1       cgd #endif
    106   1.1       cgd 
    107   1.1       cgd /*
    108   1.1       cgd  * nextf[i] is the pointer to the next free block of size 2^(i+3).  The
    109   1.1       cgd  * smallest allocatable block is 8 bytes.  The overhead information
    110   1.1       cgd  * precedes the data area returned to the user.
    111   1.1       cgd  */
    112   1.1       cgd #define	NBUCKETS 30
    113   1.1       cgd static	union overhead *nextf[NBUCKETS];
    114   1.1       cgd 
    115  1.15  christos static	long pagesz;			/* page size */
    116   1.1       cgd static	int pagebucket;			/* page size bucket */
    117   1.1       cgd 
    118   1.1       cgd #ifdef MSTATS
    119   1.1       cgd /*
    120   1.1       cgd  * nmalloc[i] is the difference between the number of mallocs and frees
    121   1.1       cgd  * for a given block size.
    122   1.1       cgd  */
    123   1.1       cgd static	u_int nmalloc[NBUCKETS];
    124  1.16    kleink #endif
    125  1.16    kleink 
    126  1.16    kleink #ifdef _REENT
    127  1.16    kleink static	mutex_t malloc_mutex = MUTEX_INITIALIZER;
    128   1.1       cgd #endif
    129   1.1       cgd 
    130   1.9  christos static void morecore __P((int));
    131   1.9  christos static int findbucket __P((union overhead *, int));
    132   1.9  christos #ifdef MSTATS
    133  1.16    kleink void mstats __P((const char *));
    134   1.9  christos #endif
    135   1.9  christos 
    136   1.1       cgd #if defined(DEBUG) || defined(RCHECK)
    137   1.8  christos #define	ASSERT(p)   if (!(p)) botch(__STRING(p))
    138   1.9  christos 
    139  1.16    kleink static void botch __P((const char *));
    140   1.9  christos 
    141  1.16    kleink /*
    142  1.16    kleink  * NOTE: since this may be called while malloc_mutex is locked, stdio must not
    143  1.16    kleink  *       be used in this function.
    144  1.16    kleink  */
    145  1.16    kleink static void
    146   1.1       cgd botch(s)
    147  1.16    kleink 	const char *s;
    148   1.1       cgd {
    149  1.16    kleink 	struct iovec iov[3];
    150  1.16    kleink 
    151  1.16    kleink 	iov[0].iov_base	= "\nassertion botched: ";
    152  1.16    kleink 	iov[0].iov_len	= 20;
    153  1.16    kleink 	iov[1].iov_base	= (void *)s;
    154  1.16    kleink 	iov[1].iov_len	= strlen(s);
    155  1.16    kleink 	iov[2].iov_base	= "\n";
    156  1.16    kleink 	iov[2].iov_len	= 1;
    157  1.16    kleink 
    158  1.16    kleink 	/*
    159  1.16    kleink 	 * This place deserves a word of warning: a cancellation point will
    160  1.16    kleink 	 * occur when executing writev(), and we might be still owning
    161  1.16    kleink 	 * malloc_mutex.  At this point we need to disable cancellation
    162  1.16    kleink 	 * until `after' abort() because i) establishing a cancellation handler
    163  1.16    kleink 	 * might, depending on the implementation, result in another malloc()
    164  1.16    kleink 	 * to be executed, and ii) it is really not desirable to let execution
    165  1.16    kleink 	 * continue.  `Fix me.'
    166  1.16    kleink 	 *
    167  1.16    kleink 	 * Note that holding mutex_lock during abort() is safe.
    168  1.16    kleink 	 */
    169  1.16    kleink 
    170  1.16    kleink 	(void)writev(STDERR_FILENO, iov, 3);
    171   1.1       cgd 	abort();
    172   1.1       cgd }
    173   1.1       cgd #else
    174   1.1       cgd #define	ASSERT(p)
    175   1.1       cgd #endif
    176   1.1       cgd 
    177   1.1       cgd void *
    178   1.1       cgd malloc(nbytes)
    179   1.1       cgd 	size_t nbytes;
    180   1.1       cgd {
    181  1.12     perry   	union overhead *op;
    182  1.12     perry 	int bucket;
    183  1.12     perry   	long n;
    184  1.12     perry 	unsigned amt;
    185   1.1       cgd 
    186  1.16    kleink 	mutex_lock(&malloc_mutex);
    187  1.16    kleink 
    188   1.1       cgd 	/*
    189   1.1       cgd 	 * First time malloc is called, setup page size and
    190   1.1       cgd 	 * align break pointer so all data will be page aligned.
    191   1.1       cgd 	 */
    192   1.1       cgd 	if (pagesz == 0) {
    193   1.1       cgd 		pagesz = n = getpagesize();
    194  1.16    kleink 		ASSERT(pagesz > 0);
    195  1.15  christos 		op = (union overhead *)(void *)sbrk(0);
    196   1.4       cgd   		n = n - sizeof (*op) - ((long)op & (n - 1));
    197   1.1       cgd 		if (n < 0)
    198   1.1       cgd 			n += pagesz;
    199  1.16    kleink 		if (n) {
    200  1.16    kleink 			if (sbrk((int)n) == (void *)-1) {
    201  1.16    kleink 				mutex_unlock(&malloc_mutex);
    202   1.1       cgd 				return (NULL);
    203  1.16    kleink 			}
    204   1.1       cgd 		}
    205   1.1       cgd 		bucket = 0;
    206   1.1       cgd 		amt = 8;
    207   1.1       cgd 		while (pagesz > amt) {
    208   1.1       cgd 			amt <<= 1;
    209   1.1       cgd 			bucket++;
    210   1.1       cgd 		}
    211   1.1       cgd 		pagebucket = bucket;
    212   1.1       cgd 	}
    213   1.1       cgd 	/*
    214   1.1       cgd 	 * Convert amount of memory requested into closest block size
    215   1.1       cgd 	 * stored in hash buckets which satisfies request.
    216   1.1       cgd 	 * Account for space used per block for accounting.
    217   1.1       cgd 	 */
    218   1.1       cgd 	if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) {
    219   1.1       cgd #ifndef RCHECK
    220   1.1       cgd 		amt = 8;	/* size of first bucket */
    221   1.1       cgd 		bucket = 0;
    222   1.1       cgd #else
    223   1.1       cgd 		amt = 16;	/* size of first bucket */
    224   1.1       cgd 		bucket = 1;
    225   1.1       cgd #endif
    226   1.4       cgd 		n = -((long)sizeof (*op) + RSLOP);
    227   1.1       cgd 	} else {
    228  1.15  christos 		amt = (unsigned)pagesz;
    229   1.1       cgd 		bucket = pagebucket;
    230   1.1       cgd 	}
    231   1.1       cgd 	while (nbytes > amt + n) {
    232   1.1       cgd 		amt <<= 1;
    233   1.1       cgd 		if (amt == 0)
    234   1.1       cgd 			return (NULL);
    235   1.1       cgd 		bucket++;
    236   1.1       cgd 	}
    237   1.1       cgd 	/*
    238   1.1       cgd 	 * If nothing in hash bucket right now,
    239   1.1       cgd 	 * request more memory from the system.
    240   1.1       cgd 	 */
    241   1.1       cgd   	if ((op = nextf[bucket]) == NULL) {
    242   1.1       cgd   		morecore(bucket);
    243  1.16    kleink   		if ((op = nextf[bucket]) == NULL) {
    244  1.16    kleink 			mutex_unlock(&malloc_mutex);
    245   1.1       cgd   			return (NULL);
    246  1.16    kleink 		}
    247   1.1       cgd 	}
    248   1.1       cgd 	/* remove from linked list */
    249   1.1       cgd   	nextf[bucket] = op->ov_next;
    250   1.1       cgd 	op->ov_magic = MAGIC;
    251   1.1       cgd 	op->ov_index = bucket;
    252   1.1       cgd #ifdef MSTATS
    253   1.1       cgd   	nmalloc[bucket]++;
    254   1.1       cgd #endif
    255  1.16    kleink 	mutex_unlock(&malloc_mutex);
    256   1.1       cgd #ifdef RCHECK
    257   1.1       cgd 	/*
    258   1.1       cgd 	 * Record allocated size of block and
    259   1.1       cgd 	 * bound space with magic numbers.
    260   1.1       cgd 	 */
    261   1.1       cgd 	op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
    262   1.1       cgd 	op->ov_rmagic = RMAGIC;
    263   1.1       cgd   	*(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
    264   1.1       cgd #endif
    265  1.16    kleink   	return ((void *)(op + 1));
    266   1.1       cgd }
    267   1.1       cgd 
    268   1.1       cgd /*
    269   1.1       cgd  * Allocate more memory to the indicated bucket.
    270   1.1       cgd  */
    271   1.1       cgd static void
    272   1.1       cgd morecore(bucket)
    273   1.1       cgd 	int bucket;
    274   1.1       cgd {
    275  1.12     perry   	union overhead *op;
    276  1.12     perry 	long sz;		/* size of desired block */
    277   1.4       cgd   	long amt;			/* amount to allocate */
    278  1.15  christos   	long nblks;			/* how many blocks we get */
    279   1.1       cgd 
    280   1.1       cgd 	/*
    281   1.1       cgd 	 * sbrk_size <= 0 only for big, FLUFFY, requests (about
    282   1.1       cgd 	 * 2^30 bytes on a VAX, I think) or for a negative arg.
    283   1.1       cgd 	 */
    284   1.1       cgd 	sz = 1 << (bucket + 3);
    285   1.1       cgd #ifdef DEBUG
    286   1.1       cgd 	ASSERT(sz > 0);
    287   1.1       cgd #else
    288   1.1       cgd 	if (sz <= 0)
    289   1.1       cgd 		return;
    290   1.1       cgd #endif
    291   1.1       cgd 	if (sz < pagesz) {
    292   1.1       cgd 		amt = pagesz;
    293   1.1       cgd   		nblks = amt / sz;
    294   1.1       cgd 	} else {
    295   1.1       cgd 		amt = sz + pagesz;
    296   1.1       cgd 		nblks = 1;
    297   1.1       cgd 	}
    298  1.15  christos 	op = (union overhead *)(void *)sbrk((int)amt);
    299   1.1       cgd 	/* no more room! */
    300   1.4       cgd   	if ((long)op == -1)
    301   1.1       cgd   		return;
    302   1.1       cgd 	/*
    303   1.1       cgd 	 * Add new memory allocated to that on
    304   1.1       cgd 	 * free list for this hash bucket.
    305   1.1       cgd 	 */
    306   1.1       cgd   	nextf[bucket] = op;
    307   1.1       cgd   	while (--nblks > 0) {
    308  1.15  christos 		op->ov_next =
    309  1.15  christos 		    (union overhead *)(void *)((caddr_t)(void *)op+(size_t)sz);
    310  1.15  christos 		op = op->ov_next;
    311   1.1       cgd   	}
    312   1.1       cgd }
    313   1.1       cgd 
    314   1.1       cgd void
    315   1.1       cgd free(cp)
    316   1.1       cgd 	void *cp;
    317   1.1       cgd {
    318  1.16    kleink 	long size;
    319  1.12     perry 	union overhead *op;
    320   1.1       cgd 
    321   1.1       cgd   	if (cp == NULL)
    322   1.1       cgd   		return;
    323  1.15  christos 	op = (union overhead *)(void *)((caddr_t)cp - sizeof (union overhead));
    324   1.1       cgd #ifdef DEBUG
    325   1.1       cgd   	ASSERT(op->ov_magic == MAGIC);		/* make sure it was in use */
    326   1.1       cgd #else
    327   1.1       cgd 	if (op->ov_magic != MAGIC)
    328   1.1       cgd 		return;				/* sanity */
    329   1.1       cgd #endif
    330   1.1       cgd #ifdef RCHECK
    331   1.1       cgd   	ASSERT(op->ov_rmagic == RMAGIC);
    332   1.1       cgd 	ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC);
    333   1.1       cgd #endif
    334   1.1       cgd   	size = op->ov_index;
    335   1.1       cgd   	ASSERT(size < NBUCKETS);
    336  1.16    kleink 	mutex_lock(&malloc_mutex);
    337  1.16    kleink 	op->ov_next = nextf[(unsigned int)size];/* also clobbers ov_magic */
    338  1.16    kleink   	nextf[(unsigned int)size] = op;
    339   1.1       cgd #ifdef MSTATS
    340  1.15  christos   	nmalloc[(size_t)size]--;
    341   1.1       cgd #endif
    342  1.16    kleink 	mutex_unlock(&malloc_mutex);
    343   1.1       cgd }
    344   1.1       cgd 
    345   1.1       cgd /*
    346   1.1       cgd  * When a program attempts "storage compaction" as mentioned in the
    347   1.1       cgd  * old malloc man page, it realloc's an already freed block.  Usually
    348   1.1       cgd  * this is the last block it freed; occasionally it might be farther
    349   1.1       cgd  * back.  We have to search all the free lists for the block in order
    350   1.1       cgd  * to determine its bucket: 1st we make one pass thru the lists
    351   1.1       cgd  * checking only the first block in each; if that fails we search
    352  1.11     perry  * ``__realloc_srchlen'' blocks in each list for a match (the variable
    353   1.1       cgd  * is extern so the caller can modify it).  If that fails we just copy
    354   1.1       cgd  * however many bytes was given to realloc() and hope it's not huge.
    355   1.1       cgd  */
    356  1.11     perry int __realloc_srchlen = 4;	/* 4 should be plenty, -1 =>'s whole list */
    357   1.1       cgd 
    358   1.1       cgd void *
    359   1.1       cgd realloc(cp, nbytes)
    360   1.1       cgd 	void *cp;
    361   1.1       cgd 	size_t nbytes;
    362   1.1       cgd {
    363  1.12     perry   	u_long onb;
    364  1.12     perry 	long i;
    365   1.1       cgd 	union overhead *op;
    366  1.16    kleink 	char *res;
    367   1.1       cgd 	int was_alloced = 0;
    368   1.1       cgd 
    369   1.1       cgd   	if (cp == NULL)
    370   1.1       cgd   		return (malloc(nbytes));
    371   1.6       jtc 	if (nbytes == 0) {
    372   1.6       jtc 		free (cp);
    373  1.16    kleink 		return (NULL);
    374   1.6       jtc 	}
    375  1.15  christos 	op = (union overhead *)(void *)((caddr_t)cp - sizeof (union overhead));
    376  1.16    kleink 	mutex_lock(&malloc_mutex);
    377   1.1       cgd 	if (op->ov_magic == MAGIC) {
    378   1.1       cgd 		was_alloced++;
    379   1.1       cgd 		i = op->ov_index;
    380   1.1       cgd 	} else {
    381   1.1       cgd 		/*
    382   1.1       cgd 		 * Already free, doing "compaction".
    383   1.1       cgd 		 *
    384   1.1       cgd 		 * Search for the old block of memory on the
    385   1.1       cgd 		 * free list.  First, check the most common
    386   1.1       cgd 		 * case (last element free'd), then (this failing)
    387  1.11     perry 		 * the last ``__realloc_srchlen'' items free'd.
    388   1.1       cgd 		 * If all lookups fail, then assume the size of
    389   1.1       cgd 		 * the memory block being realloc'd is the
    390   1.1       cgd 		 * largest possible (so that all "nbytes" of new
    391   1.1       cgd 		 * memory are copied into).  Note that this could cause
    392   1.1       cgd 		 * a memory fault if the old area was tiny, and the moon
    393   1.1       cgd 		 * is gibbous.  However, that is very unlikely.
    394   1.1       cgd 		 */
    395   1.1       cgd 		if ((i = findbucket(op, 1)) < 0 &&
    396  1.11     perry 		    (i = findbucket(op, __realloc_srchlen)) < 0)
    397   1.1       cgd 			i = NBUCKETS;
    398   1.1       cgd 	}
    399  1.15  christos 	onb = (u_long)1 << (u_long)(i + 3);
    400   1.1       cgd 	if (onb < pagesz)
    401   1.1       cgd 		onb -= sizeof (*op) + RSLOP;
    402   1.1       cgd 	else
    403   1.1       cgd 		onb += pagesz - sizeof (*op) - RSLOP;
    404   1.1       cgd 	/* avoid the copy if same size block */
    405   1.1       cgd 	if (was_alloced) {
    406   1.1       cgd 		if (i) {
    407  1.15  christos 			i = (long)1 << (long)(i + 2);
    408   1.1       cgd 			if (i < pagesz)
    409   1.1       cgd 				i -= sizeof (*op) + RSLOP;
    410   1.1       cgd 			else
    411   1.1       cgd 				i += pagesz - sizeof (*op) - RSLOP;
    412   1.1       cgd 		}
    413   1.1       cgd 		if (nbytes <= onb && nbytes > i) {
    414   1.1       cgd #ifdef RCHECK
    415   1.1       cgd 			op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
    416   1.1       cgd 			*(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
    417   1.1       cgd #endif
    418  1.16    kleink 			mutex_unlock(&malloc_mutex);
    419  1.16    kleink 			return (cp);
    420  1.16    kleink 
    421  1.16    kleink 		}
    422  1.16    kleink #ifndef _REENT
    423  1.16    kleink 		else
    424   1.1       cgd 			free(cp);
    425  1.16    kleink #endif
    426  1.16    kleink 	}
    427  1.16    kleink 	mutex_unlock(&malloc_mutex);
    428  1.16    kleink 	if ((res = malloc(nbytes)) == NULL) {
    429  1.16    kleink #ifdef _REENT
    430  1.16    kleink 		free(cp);
    431  1.16    kleink #endif
    432  1.16    kleink 		return (NULL);
    433   1.1       cgd 	}
    434  1.16    kleink #ifndef _REENT
    435  1.16    kleink 	if (cp != res)		/* common optimization if "compacting" */
    436  1.16    kleink 		(void)memmove(res, cp, (size_t)((nbytes < onb) ? nbytes : onb));
    437  1.16    kleink #else
    438  1.16    kleink 	(void)memmove(res, cp, (size_t)((nbytes < onb) ? nbytes : onb));
    439  1.16    kleink 	free(cp);
    440  1.16    kleink #endif
    441   1.1       cgd   	return (res);
    442   1.1       cgd }
    443   1.1       cgd 
    444   1.1       cgd /*
    445   1.1       cgd  * Search ``srchlen'' elements of each free list for a block whose
    446   1.1       cgd  * header starts at ``freep''.  If srchlen is -1 search the whole list.
    447   1.1       cgd  * Return bucket number, or -1 if not found.
    448   1.1       cgd  */
    449   1.9  christos static int
    450   1.1       cgd findbucket(freep, srchlen)
    451   1.1       cgd 	union overhead *freep;
    452   1.1       cgd 	int srchlen;
    453   1.1       cgd {
    454  1.12     perry 	union overhead *p;
    455  1.12     perry 	int i, j;
    456   1.1       cgd 
    457   1.1       cgd 	for (i = 0; i < NBUCKETS; i++) {
    458   1.1       cgd 		j = 0;
    459   1.1       cgd 		for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
    460   1.1       cgd 			if (p == freep)
    461   1.1       cgd 				return (i);
    462   1.1       cgd 			j++;
    463   1.1       cgd 		}
    464   1.1       cgd 	}
    465   1.1       cgd 	return (-1);
    466   1.1       cgd }
    467   1.1       cgd 
    468   1.1       cgd #ifdef MSTATS
    469   1.1       cgd /*
    470   1.1       cgd  * mstats - print out statistics about malloc
    471   1.1       cgd  *
    472   1.1       cgd  * Prints two lines of numbers, one showing the length of the free list
    473   1.1       cgd  * for each size category, the second showing the number of mallocs -
    474   1.1       cgd  * frees for each size category.
    475   1.1       cgd  */
    476   1.9  christos void
    477   1.1       cgd mstats(s)
    478   1.1       cgd 	char *s;
    479   1.1       cgd {
    480  1.12     perry   	int i, j;
    481  1.12     perry   	union overhead *p;
    482   1.1       cgd   	int totfree = 0,
    483   1.1       cgd   	totused = 0;
    484   1.1       cgd 
    485   1.1       cgd   	fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s);
    486   1.1       cgd   	for (i = 0; i < NBUCKETS; i++) {
    487   1.1       cgd   		for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
    488   1.1       cgd   			;
    489   1.1       cgd   		fprintf(stderr, " %d", j);
    490   1.1       cgd   		totfree += j * (1 << (i + 3));
    491   1.1       cgd   	}
    492   1.1       cgd   	fprintf(stderr, "\nused:\t");
    493   1.1       cgd   	for (i = 0; i < NBUCKETS; i++) {
    494   1.1       cgd   		fprintf(stderr, " %d", nmalloc[i]);
    495   1.1       cgd   		totused += nmalloc[i] * (1 << (i + 3));
    496   1.1       cgd   	}
    497   1.1       cgd   	fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n",
    498   1.1       cgd 	    totused, totfree);
    499   1.1       cgd }
    500   1.1       cgd #endif
    501