Home | History | Annotate | Line # | Download | only in kern
kern_malloc.c revision 1.27
      1 /*	$NetBSD: kern_malloc.c,v 1.27 1998/01/21 22:24:33 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright 1996 Christopher G. Demetriou.  All rights reserved.
      5  * Copyright (c) 1987, 1991, 1993
      6  *	The Regents of the University of California.  All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by the University of
     19  *	California, Berkeley and its contributors.
     20  * 4. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
     37  */
     38 
     39 #include <sys/param.h>
     40 #include <sys/proc.h>
     41 #include <sys/map.h>
     42 #include <sys/kernel.h>
     43 #include <sys/malloc.h>
     44 #include <sys/systm.h>
     45 
     46 #include <vm/vm.h>
     47 #include <vm/vm_kern.h>
     48 
     49 #include "opt_kmemstats.h"
     50 #include "opt_malloclog.h"
     51 
     52 struct kmembuckets bucket[MINBUCKET + 16];
     53 struct kmemstats kmemstats[M_LAST];
     54 struct kmemusage *kmemusage;
     55 char *kmembase, *kmemlimit;
     56 const char *memname[] = INITKMEMNAMES;
     57 
     58 #ifdef MALLOCLOG
     59 #ifndef MALLOCLOGSIZE
     60 #define	MALLOCLOGSIZE	100000
     61 #endif
     62 
     63 struct malloclog {
     64 	void *addr;
     65 	long size;
     66 	int type;
     67 	int action;
     68 	const char *file;
     69 	long line;
     70 } malloclog[MALLOCLOGSIZE];
     71 
     72 long	malloclogptr;
     73 
     74 static void domlog __P((void *a, long size, int type, int action,
     75 	const char *file, long line));
     76 static void hitmlog __P((void *a));
     77 
     78 static void
     79 domlog(a, size, type, action, file, line)
     80 	void *a;
     81 	long size;
     82 	int type;
     83 	int action;
     84 	const char *file;
     85 	long line;
     86 {
     87 
     88 	malloclog[malloclogptr].addr = a;
     89 	malloclog[malloclogptr].size = size;
     90 	malloclog[malloclogptr].type = type;
     91 	malloclog[malloclogptr].action = action;
     92 	malloclog[malloclogptr].file = file;
     93 	malloclog[malloclogptr].line = line;
     94 	malloclogptr++;
     95 	if (malloclogptr >= MALLOCLOGSIZE)
     96 		malloclogptr = 0;
     97 }
     98 
     99 static void
    100 hitmlog(a)
    101 	void *a;
    102 {
    103 	struct malloclog *lp;
    104 	long l;
    105 
    106 #define	PRT \
    107 	if (malloclog[l].addr == a && malloclog[l].action) { \
    108 		lp = &malloclog[l]; \
    109 		printf("malloc log entry %ld:\n", l); \
    110 		printf("\taddr = %p\n", lp->addr); \
    111 		printf("\tsize = %ld\n", lp->size); \
    112 		printf("\ttype = %s\n", memname[lp->type]); \
    113 		printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
    114 		printf("\tfile = %s\n", lp->file); \
    115 		printf("\tline = %ld\n", lp->line); \
    116 	}
    117 
    118 	for (l = malloclogptr; l < MALLOCLOGSIZE; l++)
    119 		PRT
    120 
    121 	for (l = 0; l < malloclogptr; l++)
    122 		PRT
    123 }
    124 #endif /* MALLOCLOG */
    125 
    126 #ifdef DIAGNOSTIC
    127 /*
    128  * This structure provides a set of masks to catch unaligned frees.
    129  */
    130 long addrmask[] = { 0,
    131 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
    132 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
    133 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
    134 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
    135 };
    136 
    137 /*
    138  * The WEIRD_ADDR is used as known text to copy into free objects so
    139  * that modifications after frees can be detected.
    140  */
    141 #define WEIRD_ADDR	((unsigned) 0xdeadbeef)
    142 #define MAX_COPY	32
    143 
    144 /*
    145  * Normally the freelist structure is used only to hold the list pointer
    146  * for free objects.  However, when running with diagnostics, the first
    147  * 8 bytes of the structure is unused except for diagnostic information,
    148  * and the free list pointer is at offst 8 in the structure.  Since the
    149  * first 8 bytes is the portion of the structure most often modified, this
    150  * helps to detect memory reuse problems and avoid free list corruption.
    151  */
    152 struct freelist {
    153 	int32_t	spare0;
    154 	int16_t	type;
    155 	int16_t	spare1;
    156 	caddr_t	next;
    157 };
    158 #else /* !DIAGNOSTIC */
    159 struct freelist {
    160 	caddr_t	next;
    161 };
    162 #endif /* DIAGNOSTIC */
    163 
    164 /*
    165  * Allocate a block of memory
    166  */
    167 #ifdef MALLOCLOG
    168 void *
    169 _malloc(size, type, flags, file, line)
    170 	unsigned long size;
    171 	int type, flags;
    172 	const char *file;
    173 	long line;
    174 #else
    175 void *
    176 malloc(size, type, flags)
    177 	unsigned long size;
    178 	int type, flags;
    179 #endif /* MALLOCLOG */
    180 {
    181 	register struct kmembuckets *kbp;
    182 	register struct kmemusage *kup;
    183 	register struct freelist *freep;
    184 	long indx, npg, allocsize;
    185 	int s;
    186 	caddr_t va, cp, savedlist;
    187 #ifdef DIAGNOSTIC
    188 	int32_t *end, *lp;
    189 	int copysize;
    190 	const char *savedtype;
    191 #endif
    192 #ifdef KMEMSTATS
    193 	register struct kmemstats *ksp = &kmemstats[type];
    194 
    195 	if (((unsigned long)type) > M_LAST)
    196 		panic("malloc - bogus type");
    197 #endif
    198 	indx = BUCKETINDX(size);
    199 	kbp = &bucket[indx];
    200 	s = splimp();
    201 #ifdef KMEMSTATS
    202 	while (ksp->ks_memuse >= ksp->ks_limit) {
    203 		if (flags & M_NOWAIT) {
    204 			splx(s);
    205 			return ((void *) NULL);
    206 		}
    207 		if (ksp->ks_limblocks < 65535)
    208 			ksp->ks_limblocks++;
    209 		tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
    210 	}
    211 	ksp->ks_size |= 1 << indx;
    212 #endif
    213 #ifdef DIAGNOSTIC
    214 	copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
    215 #endif
    216 	if (kbp->kb_next == NULL) {
    217 		kbp->kb_last = NULL;
    218 		if (size > MAXALLOCSAVE)
    219 			allocsize = roundup(size, CLBYTES);
    220 		else
    221 			allocsize = 1 << indx;
    222 		npg = clrnd(btoc(allocsize));
    223 		va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
    224 					   !(flags & M_NOWAIT));
    225 		if (va == NULL) {
    226 			/*
    227 			 * Kmem_malloc() can return NULL, even if it can
    228 			 * wait, if there is no map space avaiable, because
    229 			 * it can't fix that problem.  Neither can we,
    230 			 * right now.  (We should release pages which
    231 			 * are completely free and which are in buckets
    232 			 * with too many free elements.)
    233 			 */
    234 			if ((flags & M_NOWAIT) == 0)
    235 				panic("malloc: out of space in kmem_map");
    236 			splx(s);
    237 			return ((void *) NULL);
    238 		}
    239 #ifdef KMEMSTATS
    240 		kbp->kb_total += kbp->kb_elmpercl;
    241 #endif
    242 		kup = btokup(va);
    243 		kup->ku_indx = indx;
    244 		if (allocsize > MAXALLOCSAVE) {
    245 			if (npg > 65535)
    246 				panic("malloc: allocation too large");
    247 			kup->ku_pagecnt = npg;
    248 #ifdef KMEMSTATS
    249 			ksp->ks_memuse += allocsize;
    250 #endif
    251 			goto out;
    252 		}
    253 #ifdef KMEMSTATS
    254 		kup->ku_freecnt = kbp->kb_elmpercl;
    255 		kbp->kb_totalfree += kbp->kb_elmpercl;
    256 #endif
    257 		/*
    258 		 * Just in case we blocked while allocating memory,
    259 		 * and someone else also allocated memory for this
    260 		 * bucket, don't assume the list is still empty.
    261 		 */
    262 		savedlist = kbp->kb_next;
    263 		kbp->kb_next = cp = va + (npg * NBPG) - allocsize;
    264 		for (;;) {
    265 			freep = (struct freelist *)cp;
    266 #ifdef DIAGNOSTIC
    267 			/*
    268 			 * Copy in known text to detect modification
    269 			 * after freeing.
    270 			 */
    271 			end = (int32_t *)&cp[copysize];
    272 			for (lp = (int32_t *)cp; lp < end; lp++)
    273 				*lp = WEIRD_ADDR;
    274 			freep->type = M_FREE;
    275 #endif /* DIAGNOSTIC */
    276 			if (cp <= va)
    277 				break;
    278 			cp -= allocsize;
    279 			freep->next = cp;
    280 		}
    281 		freep->next = savedlist;
    282 		if (kbp->kb_last == NULL)
    283 			kbp->kb_last = (caddr_t)freep;
    284 	}
    285 	va = kbp->kb_next;
    286 	kbp->kb_next = ((struct freelist *)va)->next;
    287 #ifdef DIAGNOSTIC
    288 	freep = (struct freelist *)va;
    289 	savedtype = (unsigned)freep->type < M_LAST ?
    290 		memname[freep->type] : "???";
    291 	if (kbp->kb_next &&
    292 	    !kernacc(kbp->kb_next, sizeof(struct freelist), 0)) {
    293 		printf(
    294 		    "%s %ld of object %p size %ld %s %s (invalid addr %p)\n",
    295 		    "Data modified on freelist: word",
    296 		    (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
    297 		    va, size, "previous type", savedtype, kbp->kb_next);
    298 #ifdef MALLOCLOG
    299 		hitmlog(va);
    300 #endif
    301 		kbp->kb_next = NULL;
    302 	}
    303 
    304 	/* Fill the fields that we've used with WEIRD_ADDR */
    305 #if BYTE_ORDER == BIG_ENDIAN
    306 	freep->type = WEIRD_ADDR >> 16;
    307 #endif
    308 #if BYTE_ORDER == LITTLE_ENDIAN
    309 	freep->type = (short)WEIRD_ADDR;
    310 #endif
    311 	end = (int32_t *)&freep->next +
    312 	    (sizeof(freep->next) / sizeof(int32_t));
    313 	for (lp = (int32_t *)&freep->next; lp < end; lp++)
    314 		*lp = WEIRD_ADDR;
    315 
    316 	/* and check that the data hasn't been modified. */
    317 	end = (int32_t *)&va[copysize];
    318 	for (lp = (int32_t *)va; lp < end; lp++) {
    319 		if (*lp == WEIRD_ADDR)
    320 			continue;
    321 		printf("%s %ld of object %p size %ld %s %s (0x%x != 0x%x)\n",
    322 		    "Data modified on freelist: word",
    323 		    (long)(lp - (int32_t *)va), va, size, "previous type",
    324 		    savedtype, *lp, WEIRD_ADDR);
    325 #ifdef MALLOCLOG
    326 		hitmlog(va);
    327 #endif
    328 		break;
    329 	}
    330 
    331 	freep->spare0 = 0;
    332 #endif /* DIAGNOSTIC */
    333 #ifdef KMEMSTATS
    334 	kup = btokup(va);
    335 	if (kup->ku_indx != indx)
    336 		panic("malloc: wrong bucket");
    337 	if (kup->ku_freecnt == 0)
    338 		panic("malloc: lost data");
    339 	kup->ku_freecnt--;
    340 	kbp->kb_totalfree--;
    341 	ksp->ks_memuse += 1 << indx;
    342 out:
    343 	kbp->kb_calls++;
    344 	ksp->ks_inuse++;
    345 	ksp->ks_calls++;
    346 	if (ksp->ks_memuse > ksp->ks_maxused)
    347 		ksp->ks_maxused = ksp->ks_memuse;
    348 #else
    349 out:
    350 #endif
    351 #ifdef MALLOCLOG
    352 	domlog(va, size, type, 1, file, line);
    353 #endif
    354 	splx(s);
    355 	return ((void *) va);
    356 }
    357 
    358 /*
    359  * Free a block of memory allocated by malloc.
    360  */
    361 #ifdef MALLOCLOG
    362 void
    363 _free(addr, type, file, line)
    364 	void *addr;
    365 	int type;
    366 	const char *file;
    367 	long line;
    368 #else
    369 void
    370 free(addr, type)
    371 	void *addr;
    372 	int type;
    373 #endif /* MALLOCLOG */
    374 {
    375 	register struct kmembuckets *kbp;
    376 	register struct kmemusage *kup;
    377 	register struct freelist *freep;
    378 	long size;
    379 	int s;
    380 #ifdef DIAGNOSTIC
    381 	caddr_t cp;
    382 	int32_t *end, *lp;
    383 	long alloc, copysize;
    384 #endif
    385 #ifdef KMEMSTATS
    386 	register struct kmemstats *ksp = &kmemstats[type];
    387 #endif
    388 
    389 	kup = btokup(addr);
    390 	size = 1 << kup->ku_indx;
    391 	kbp = &bucket[kup->ku_indx];
    392 	s = splimp();
    393 #ifdef MALLOCLOG
    394 	domlog(addr, 0, type, 2, file, line);
    395 #endif
    396 #ifdef DIAGNOSTIC
    397 	/*
    398 	 * Check for returns of data that do not point to the
    399 	 * beginning of the allocation.
    400 	 */
    401 	if (size > NBPG * CLSIZE)
    402 		alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
    403 	else
    404 		alloc = addrmask[kup->ku_indx];
    405 	if (((u_long)addr & alloc) != 0)
    406 		panic("free: unaligned addr %p, size %ld, type %s, mask %ld\n",
    407 			addr, size, memname[type], alloc);
    408 #endif /* DIAGNOSTIC */
    409 	if (size > MAXALLOCSAVE) {
    410 		kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
    411 #ifdef KMEMSTATS
    412 		size = kup->ku_pagecnt << PGSHIFT;
    413 		ksp->ks_memuse -= size;
    414 		kup->ku_indx = 0;
    415 		kup->ku_pagecnt = 0;
    416 		if (ksp->ks_memuse + size >= ksp->ks_limit &&
    417 		    ksp->ks_memuse < ksp->ks_limit)
    418 			wakeup((caddr_t)ksp);
    419 		ksp->ks_inuse--;
    420 		kbp->kb_total -= 1;
    421 #endif
    422 		splx(s);
    423 		return;
    424 	}
    425 	freep = (struct freelist *)addr;
    426 #ifdef DIAGNOSTIC
    427 	/*
    428 	 * Check for multiple frees. Use a quick check to see if
    429 	 * it looks free before laboriously searching the freelist.
    430 	 */
    431 	if (freep->spare0 == WEIRD_ADDR) {
    432 		for (cp = kbp->kb_next; cp;
    433 		    cp = ((struct freelist *)cp)->next) {
    434 			if (addr != cp)
    435 				continue;
    436 			printf("multiply freed item %p\n", addr);
    437 #ifdef MALLOCLOG
    438 			hitmlog(addr);
    439 #endif
    440 			panic("free: duplicated free");
    441 		}
    442 	}
    443 	/*
    444 	 * Copy in known text to detect modification after freeing
    445 	 * and to make it look free. Also, save the type being freed
    446 	 * so we can list likely culprit if modification is detected
    447 	 * when the object is reallocated.
    448 	 */
    449 	copysize = size < MAX_COPY ? size : MAX_COPY;
    450 	end = (int32_t *)&((caddr_t)addr)[copysize];
    451 	for (lp = (int32_t *)addr; lp < end; lp++)
    452 		*lp = WEIRD_ADDR;
    453 	freep->type = type;
    454 #endif /* DIAGNOSTIC */
    455 #ifdef KMEMSTATS
    456 	kup->ku_freecnt++;
    457 	if (kup->ku_freecnt >= kbp->kb_elmpercl)
    458 		if (kup->ku_freecnt > kbp->kb_elmpercl)
    459 			panic("free: multiple frees");
    460 		else if (kbp->kb_totalfree > kbp->kb_highwat)
    461 			kbp->kb_couldfree++;
    462 	kbp->kb_totalfree++;
    463 	ksp->ks_memuse -= size;
    464 	if (ksp->ks_memuse + size >= ksp->ks_limit &&
    465 	    ksp->ks_memuse < ksp->ks_limit)
    466 		wakeup((caddr_t)ksp);
    467 	ksp->ks_inuse--;
    468 #endif
    469 	if (kbp->kb_next == NULL)
    470 		kbp->kb_next = addr;
    471 	else
    472 		((struct freelist *)kbp->kb_last)->next = addr;
    473 	freep->next = NULL;
    474 	kbp->kb_last = addr;
    475 	splx(s);
    476 }
    477 
    478 /*
    479  * Change the size of a block of memory.
    480  */
    481 void *
    482 realloc(curaddr, newsize, type, flags)
    483 	void *curaddr;
    484 	unsigned long newsize;
    485 	int type, flags;
    486 {
    487 	register struct kmemusage *kup;
    488 	long cursize;
    489 	void *newaddr;
    490 #ifdef DIAGNOSTIC
    491 	long alloc;
    492 #endif
    493 
    494 	/*
    495 	 * Realloc() with a NULL pointer is the same as malloc().
    496 	 */
    497 	if (curaddr == NULL)
    498 		return (malloc(newsize, type, flags));
    499 
    500 	/*
    501 	 * Realloc() with zero size is the same as free().
    502 	 */
    503 	if (newsize == 0) {
    504 		free(curaddr, type);
    505 		return (NULL);
    506 	}
    507 
    508 	/*
    509 	 * Find out how large the old allocation was (and do some
    510 	 * sanity checking).
    511 	 */
    512 	kup = btokup(curaddr);
    513 	cursize = 1 << kup->ku_indx;
    514 
    515 #ifdef DIAGNOSTIC
    516 	/*
    517 	 * Check for returns of data that do not point to the
    518 	 * beginning of the allocation.
    519 	 */
    520 	if (cursize > NBPG * CLSIZE)
    521 		alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
    522 	else
    523 		alloc = addrmask[kup->ku_indx];
    524 	if (((u_long)curaddr & alloc) != 0)
    525 		panic("realloc: unaligned addr %p, size %ld, type %s, mask %ld\n",
    526 			curaddr, cursize, memname[type], alloc);
    527 #endif /* DIAGNOSTIC */
    528 
    529 	if (cursize > MAXALLOCSAVE)
    530 		cursize = ctob(kup->ku_pagecnt);
    531 
    532 	/*
    533 	 * If we already actually have as much as they want, we're done.
    534 	 */
    535 	if (newsize <= cursize)
    536 		return (curaddr);
    537 
    538 	/*
    539 	 * Can't satisfy the allocation with the existing block.
    540 	 * Allocate a new one and copy the data.
    541 	 */
    542 	newaddr = malloc(newsize, type, flags);
    543 	if (newaddr == NULL) {
    544 		/*
    545 		 * Malloc() failed, because flags included M_NOWAIT.
    546 		 * Return NULL to indicate that failure.  The old
    547 		 * pointer is still valid.
    548 		 */
    549 		return NULL;
    550 	}
    551 	bcopy(curaddr, newaddr, cursize);
    552 
    553 	/*
    554 	 * We were successful: free the old allocation and return
    555 	 * the new one.
    556 	 */
    557 	free(curaddr, type);
    558 	return (newaddr);
    559 }
    560 
    561 /*
    562  * Initialize the kernel memory allocator
    563  */
    564 void
    565 kmeminit()
    566 {
    567 #ifdef KMEMSTATS
    568 	register long indx;
    569 #endif
    570 	int npg;
    571 
    572 #if	((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
    573 		ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
    574 #endif
    575 #if	(MAXALLOCSAVE > MINALLOCSIZE * 32768)
    576 		ERROR!_kmeminit:_MAXALLOCSAVE_too_big
    577 #endif
    578 #if	(MAXALLOCSAVE < CLBYTES)
    579 		ERROR!_kmeminit:_MAXALLOCSAVE_too_small
    580 #endif
    581 
    582 	if (sizeof(struct freelist) > (1 << MINBUCKET))
    583 		panic("minbucket too small/struct freelist too big");
    584 
    585 	npg = VM_KMEM_SIZE/ NBPG;
    586 	kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
    587 		(vm_size_t)(npg * sizeof(struct kmemusage)));
    588 	kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
    589 		(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
    590 #ifdef KMEMSTATS
    591 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
    592 		if (1 << indx >= CLBYTES)
    593 			bucket[indx].kb_elmpercl = 1;
    594 		else
    595 			bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
    596 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
    597 	}
    598 	for (indx = 0; indx < M_LAST; indx++)
    599 		kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
    600 #endif
    601 }
    602