Home | History | Annotate | Line # | Download | only in kern
kern_malloc.c revision 1.32
      1 /*	$NetBSD: kern_malloc.c,v 1.32 1998/03/01 02:22:29 fvdl Exp $	*/
      2 
      3 /*
      4  * Copyright 1996 Christopher G. Demetriou.  All rights reserved.
      5  * Copyright (c) 1987, 1991, 1993
      6  *	The Regents of the University of California.  All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by the University of
     19  *	California, Berkeley and its contributors.
     20  * 4. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)kern_malloc.c	8.4 (Berkeley) 5/20/95
     37  */
     38 
     39 #include "opt_uvm.h"
     40 
     41 #include <sys/param.h>
     42 #include <sys/proc.h>
     43 #include <sys/map.h>
     44 #include <sys/kernel.h>
     45 #include <sys/malloc.h>
     46 #include <sys/systm.h>
     47 
     48 #include <vm/vm.h>
     49 #include <vm/vm_kern.h>
     50 
     51 #if defined(UVM)
     52 #include <uvm/uvm_extern.h>
     53 
     54 static struct vm_map kmem_map_store;
     55 vm_map_t kmem_map = NULL;
     56 #endif
     57 
     58 #include "opt_kmemstats.h"
     59 #include "opt_malloclog.h"
     60 
     61 struct kmembuckets bucket[MINBUCKET + 16];
     62 struct kmemstats kmemstats[M_LAST];
     63 struct kmemusage *kmemusage;
     64 char *kmembase, *kmemlimit;
     65 const char *memname[] = INITKMEMNAMES;
     66 
     67 #ifdef MALLOCLOG
     68 #ifndef MALLOCLOGSIZE
     69 #define	MALLOCLOGSIZE	100000
     70 #endif
     71 
     72 struct malloclog {
     73 	void *addr;
     74 	long size;
     75 	int type;
     76 	int action;
     77 	const char *file;
     78 	long line;
     79 } malloclog[MALLOCLOGSIZE];
     80 
     81 long	malloclogptr;
     82 
     83 static void domlog __P((void *a, long size, int type, int action,
     84 	const char *file, long line));
     85 static void hitmlog __P((void *a));
     86 
     87 static void
     88 domlog(a, size, type, action, file, line)
     89 	void *a;
     90 	long size;
     91 	int type;
     92 	int action;
     93 	const char *file;
     94 	long line;
     95 {
     96 
     97 	malloclog[malloclogptr].addr = a;
     98 	malloclog[malloclogptr].size = size;
     99 	malloclog[malloclogptr].type = type;
    100 	malloclog[malloclogptr].action = action;
    101 	malloclog[malloclogptr].file = file;
    102 	malloclog[malloclogptr].line = line;
    103 	malloclogptr++;
    104 	if (malloclogptr >= MALLOCLOGSIZE)
    105 		malloclogptr = 0;
    106 }
    107 
    108 static void
    109 hitmlog(a)
    110 	void *a;
    111 {
    112 	struct malloclog *lp;
    113 	long l;
    114 
    115 #define	PRT \
    116 	if (malloclog[l].addr == a && malloclog[l].action) { \
    117 		lp = &malloclog[l]; \
    118 		printf("malloc log entry %ld:\n", l); \
    119 		printf("\taddr = %p\n", lp->addr); \
    120 		printf("\tsize = %ld\n", lp->size); \
    121 		printf("\ttype = %s\n", memname[lp->type]); \
    122 		printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
    123 		printf("\tfile = %s\n", lp->file); \
    124 		printf("\tline = %ld\n", lp->line); \
    125 	}
    126 
    127 	for (l = malloclogptr; l < MALLOCLOGSIZE; l++)
    128 		PRT
    129 
    130 	for (l = 0; l < malloclogptr; l++)
    131 		PRT
    132 }
    133 #endif /* MALLOCLOG */
    134 
    135 #ifdef DIAGNOSTIC
    136 /*
    137  * This structure provides a set of masks to catch unaligned frees.
    138  */
    139 long addrmask[] = { 0,
    140 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
    141 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
    142 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
    143 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
    144 };
    145 
    146 /*
    147  * The WEIRD_ADDR is used as known text to copy into free objects so
    148  * that modifications after frees can be detected.
    149  */
    150 #define WEIRD_ADDR	((unsigned) 0xdeadbeef)
    151 #define MAX_COPY	32
    152 
    153 /*
    154  * Normally the freelist structure is used only to hold the list pointer
    155  * for free objects.  However, when running with diagnostics, the first
    156  * 8 bytes of the structure is unused except for diagnostic information,
    157  * and the free list pointer is at offst 8 in the structure.  Since the
    158  * first 8 bytes is the portion of the structure most often modified, this
    159  * helps to detect memory reuse problems and avoid free list corruption.
    160  */
    161 struct freelist {
    162 	int32_t	spare0;
    163 	int16_t	type;
    164 	int16_t	spare1;
    165 	caddr_t	next;
    166 };
    167 #else /* !DIAGNOSTIC */
    168 struct freelist {
    169 	caddr_t	next;
    170 };
    171 #endif /* DIAGNOSTIC */
    172 
    173 /*
    174  * Allocate a block of memory
    175  */
    176 #ifdef MALLOCLOG
    177 void *
    178 _malloc(size, type, flags, file, line)
    179 	unsigned long size;
    180 	int type, flags;
    181 	const char *file;
    182 	long line;
    183 #else
    184 void *
    185 malloc(size, type, flags)
    186 	unsigned long size;
    187 	int type, flags;
    188 #endif /* MALLOCLOG */
    189 {
    190 	register struct kmembuckets *kbp;
    191 	register struct kmemusage *kup;
    192 	register struct freelist *freep;
    193 	long indx, npg, allocsize;
    194 	int s;
    195 	caddr_t va, cp, savedlist;
    196 #ifdef DIAGNOSTIC
    197 	int32_t *end, *lp;
    198 	int copysize;
    199 	const char *savedtype;
    200 #endif
    201 #ifdef LOCKDEBUG
    202 	extern int simplelockrecurse;
    203 #endif
    204 #ifdef KMEMSTATS
    205 	register struct kmemstats *ksp = &kmemstats[type];
    206 
    207 	if (((unsigned long)type) > M_LAST)
    208 		panic("malloc - bogus type");
    209 #endif
    210 	indx = BUCKETINDX(size);
    211 	kbp = &bucket[indx];
    212 	s = splimp();
    213 #ifdef KMEMSTATS
    214 	while (ksp->ks_memuse >= ksp->ks_limit) {
    215 		if (flags & M_NOWAIT) {
    216 			splx(s);
    217 			return ((void *) NULL);
    218 		}
    219 		if (ksp->ks_limblocks < 65535)
    220 			ksp->ks_limblocks++;
    221 		tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
    222 	}
    223 	ksp->ks_size |= 1 << indx;
    224 #endif
    225 #ifdef DIAGNOSTIC
    226 	copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
    227 #endif
    228 #ifdef LOCKDEBUG
    229 	if (flags & M_NOWAIT)
    230 		simplelockrecurse++;
    231 #endif
    232 	if (kbp->kb_next == NULL) {
    233 		kbp->kb_last = NULL;
    234 		if (size > MAXALLOCSAVE)
    235 			allocsize = roundup(size, CLBYTES);
    236 		else
    237 			allocsize = 1 << indx;
    238 		npg = clrnd(btoc(allocsize));
    239 #if defined(UVM)
    240 		va = (caddr_t) uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
    241 				(vm_size_t)ctob(npg),
    242 				(flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0);
    243 #else
    244 		va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
    245 					   !(flags & M_NOWAIT));
    246 #endif
    247 		if (va == NULL) {
    248 			/*
    249 			 * Kmem_malloc() can return NULL, even if it can
    250 			 * wait, if there is no map space avaiable, because
    251 			 * it can't fix that problem.  Neither can we,
    252 			 * right now.  (We should release pages which
    253 			 * are completely free and which are in buckets
    254 			 * with too many free elements.)
    255 			 */
    256 			if ((flags & M_NOWAIT) == 0)
    257 				panic("malloc: out of space in kmem_map");
    258 #ifdef LOCKDEBUG
    259 			simplelockrecurse--;
    260 #endif
    261 			splx(s);
    262 			return ((void *) NULL);
    263 		}
    264 #ifdef KMEMSTATS
    265 		kbp->kb_total += kbp->kb_elmpercl;
    266 #endif
    267 		kup = btokup(va);
    268 		kup->ku_indx = indx;
    269 		if (allocsize > MAXALLOCSAVE) {
    270 			if (npg > 65535)
    271 				panic("malloc: allocation too large");
    272 			kup->ku_pagecnt = npg;
    273 #ifdef KMEMSTATS
    274 			ksp->ks_memuse += allocsize;
    275 #endif
    276 			goto out;
    277 		}
    278 #ifdef KMEMSTATS
    279 		kup->ku_freecnt = kbp->kb_elmpercl;
    280 		kbp->kb_totalfree += kbp->kb_elmpercl;
    281 #endif
    282 		/*
    283 		 * Just in case we blocked while allocating memory,
    284 		 * and someone else also allocated memory for this
    285 		 * bucket, don't assume the list is still empty.
    286 		 */
    287 		savedlist = kbp->kb_next;
    288 		kbp->kb_next = cp = va + (npg * NBPG) - allocsize;
    289 		for (;;) {
    290 			freep = (struct freelist *)cp;
    291 #ifdef DIAGNOSTIC
    292 			/*
    293 			 * Copy in known text to detect modification
    294 			 * after freeing.
    295 			 */
    296 			end = (int32_t *)&cp[copysize];
    297 			for (lp = (int32_t *)cp; lp < end; lp++)
    298 				*lp = WEIRD_ADDR;
    299 			freep->type = M_FREE;
    300 #endif /* DIAGNOSTIC */
    301 			if (cp <= va)
    302 				break;
    303 			cp -= allocsize;
    304 			freep->next = cp;
    305 		}
    306 		freep->next = savedlist;
    307 		if (kbp->kb_last == NULL)
    308 			kbp->kb_last = (caddr_t)freep;
    309 	}
    310 	va = kbp->kb_next;
    311 	kbp->kb_next = ((struct freelist *)va)->next;
    312 #ifdef DIAGNOSTIC
    313 	freep = (struct freelist *)va;
    314 	savedtype = (unsigned)freep->type < M_LAST ?
    315 		memname[freep->type] : "???";
    316 #if defined(UVM)
    317 	if (kbp->kb_next) {
    318 		int rv;
    319 		vm_offset_t addr = (vm_offset_t)kbp->kb_next;
    320 
    321 		vm_map_lock_read(kmem_map);
    322 		rv = uvm_map_checkprot(kmem_map, addr,
    323 				       addr + sizeof(struct freelist),
    324 				       VM_PROT_WRITE);
    325 		vm_map_unlock_read(kmem_map);
    326 
    327 		if (!rv)
    328 #else
    329 	if (kbp->kb_next &&
    330 	    !kernacc(kbp->kb_next, sizeof(struct freelist), 0))
    331 #endif
    332 								{
    333 		printf(
    334 		    "%s %ld of object %p size %ld %s %s (invalid addr %p)\n",
    335 		    "Data modified on freelist: word",
    336 		    (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
    337 		    va, size, "previous type", savedtype, kbp->kb_next);
    338 #ifdef MALLOCLOG
    339 		hitmlog(va);
    340 #endif
    341 		kbp->kb_next = NULL;
    342 #if defined(UVM)
    343 		}
    344 #endif
    345 	}
    346 
    347 	/* Fill the fields that we've used with WEIRD_ADDR */
    348 #if BYTE_ORDER == BIG_ENDIAN
    349 	freep->type = WEIRD_ADDR >> 16;
    350 #endif
    351 #if BYTE_ORDER == LITTLE_ENDIAN
    352 	freep->type = (short)WEIRD_ADDR;
    353 #endif
    354 	end = (int32_t *)&freep->next +
    355 	    (sizeof(freep->next) / sizeof(int32_t));
    356 	for (lp = (int32_t *)&freep->next; lp < end; lp++)
    357 		*lp = WEIRD_ADDR;
    358 
    359 	/* and check that the data hasn't been modified. */
    360 	end = (int32_t *)&va[copysize];
    361 	for (lp = (int32_t *)va; lp < end; lp++) {
    362 		if (*lp == WEIRD_ADDR)
    363 			continue;
    364 		printf("%s %ld of object %p size %ld %s %s (0x%x != 0x%x)\n",
    365 		    "Data modified on freelist: word",
    366 		    (long)(lp - (int32_t *)va), va, size, "previous type",
    367 		    savedtype, *lp, WEIRD_ADDR);
    368 #ifdef MALLOCLOG
    369 		hitmlog(va);
    370 #endif
    371 		break;
    372 	}
    373 
    374 	freep->spare0 = 0;
    375 #endif /* DIAGNOSTIC */
    376 #ifdef KMEMSTATS
    377 	kup = btokup(va);
    378 	if (kup->ku_indx != indx)
    379 		panic("malloc: wrong bucket");
    380 	if (kup->ku_freecnt == 0)
    381 		panic("malloc: lost data");
    382 	kup->ku_freecnt--;
    383 	kbp->kb_totalfree--;
    384 	ksp->ks_memuse += 1 << indx;
    385 out:
    386 	kbp->kb_calls++;
    387 	ksp->ks_inuse++;
    388 	ksp->ks_calls++;
    389 	if (ksp->ks_memuse > ksp->ks_maxused)
    390 		ksp->ks_maxused = ksp->ks_memuse;
    391 #else
    392 out:
    393 #endif
    394 #ifdef MALLOCLOG
    395 	domlog(va, size, type, 1, file, line);
    396 #endif
    397 	splx(s);
    398 #ifdef LOCKDEBUG
    399 	if (flags & M_NOWAIT)
    400 		simplelockrecurse--;
    401 #endif
    402 	return ((void *) va);
    403 }
    404 
    405 /*
    406  * Free a block of memory allocated by malloc.
    407  */
    408 #ifdef MALLOCLOG
    409 void
    410 _free(addr, type, file, line)
    411 	void *addr;
    412 	int type;
    413 	const char *file;
    414 	long line;
    415 #else
    416 void
    417 free(addr, type)
    418 	void *addr;
    419 	int type;
    420 #endif /* MALLOCLOG */
    421 {
    422 	register struct kmembuckets *kbp;
    423 	register struct kmemusage *kup;
    424 	register struct freelist *freep;
    425 	long size;
    426 	int s;
    427 #ifdef DIAGNOSTIC
    428 	caddr_t cp;
    429 	int32_t *end, *lp;
    430 	long alloc, copysize;
    431 #endif
    432 #ifdef KMEMSTATS
    433 	register struct kmemstats *ksp = &kmemstats[type];
    434 #endif
    435 
    436 	kup = btokup(addr);
    437 	size = 1 << kup->ku_indx;
    438 	kbp = &bucket[kup->ku_indx];
    439 	s = splimp();
    440 #ifdef MALLOCLOG
    441 	domlog(addr, 0, type, 2, file, line);
    442 #endif
    443 #ifdef DIAGNOSTIC
    444 	/*
    445 	 * Check for returns of data that do not point to the
    446 	 * beginning of the allocation.
    447 	 */
    448 	if (size > NBPG * CLSIZE)
    449 		alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
    450 	else
    451 		alloc = addrmask[kup->ku_indx];
    452 	if (((u_long)addr & alloc) != 0)
    453 		panic("free: unaligned addr %p, size %ld, type %s, mask %ld\n",
    454 			addr, size, memname[type], alloc);
    455 #endif /* DIAGNOSTIC */
    456 	if (size > MAXALLOCSAVE) {
    457 #if defined(UVM)
    458 		uvm_km_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
    459 #else
    460 		kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
    461 #endif
    462 #ifdef KMEMSTATS
    463 		size = kup->ku_pagecnt << PGSHIFT;
    464 		ksp->ks_memuse -= size;
    465 		kup->ku_indx = 0;
    466 		kup->ku_pagecnt = 0;
    467 		if (ksp->ks_memuse + size >= ksp->ks_limit &&
    468 		    ksp->ks_memuse < ksp->ks_limit)
    469 			wakeup((caddr_t)ksp);
    470 		ksp->ks_inuse--;
    471 		kbp->kb_total -= 1;
    472 #endif
    473 		splx(s);
    474 		return;
    475 	}
    476 	freep = (struct freelist *)addr;
    477 #ifdef DIAGNOSTIC
    478 	/*
    479 	 * Check for multiple frees. Use a quick check to see if
    480 	 * it looks free before laboriously searching the freelist.
    481 	 */
    482 	if (freep->spare0 == WEIRD_ADDR) {
    483 		for (cp = kbp->kb_next; cp;
    484 		    cp = ((struct freelist *)cp)->next) {
    485 			if (addr != cp)
    486 				continue;
    487 			printf("multiply freed item %p\n", addr);
    488 #ifdef MALLOCLOG
    489 			hitmlog(addr);
    490 #endif
    491 			panic("free: duplicated free");
    492 		}
    493 	}
    494 	/*
    495 	 * Copy in known text to detect modification after freeing
    496 	 * and to make it look free. Also, save the type being freed
    497 	 * so we can list likely culprit if modification is detected
    498 	 * when the object is reallocated.
    499 	 */
    500 	copysize = size < MAX_COPY ? size : MAX_COPY;
    501 	end = (int32_t *)&((caddr_t)addr)[copysize];
    502 	for (lp = (int32_t *)addr; lp < end; lp++)
    503 		*lp = WEIRD_ADDR;
    504 	freep->type = type;
    505 #endif /* DIAGNOSTIC */
    506 #ifdef KMEMSTATS
    507 	kup->ku_freecnt++;
    508 	if (kup->ku_freecnt >= kbp->kb_elmpercl)
    509 		if (kup->ku_freecnt > kbp->kb_elmpercl)
    510 			panic("free: multiple frees");
    511 		else if (kbp->kb_totalfree > kbp->kb_highwat)
    512 			kbp->kb_couldfree++;
    513 	kbp->kb_totalfree++;
    514 	ksp->ks_memuse -= size;
    515 	if (ksp->ks_memuse + size >= ksp->ks_limit &&
    516 	    ksp->ks_memuse < ksp->ks_limit)
    517 		wakeup((caddr_t)ksp);
    518 	ksp->ks_inuse--;
    519 #endif
    520 	if (kbp->kb_next == NULL)
    521 		kbp->kb_next = addr;
    522 	else
    523 		((struct freelist *)kbp->kb_last)->next = addr;
    524 	freep->next = NULL;
    525 	kbp->kb_last = addr;
    526 	splx(s);
    527 }
    528 
    529 /*
    530  * Change the size of a block of memory.
    531  */
    532 void *
    533 realloc(curaddr, newsize, type, flags)
    534 	void *curaddr;
    535 	unsigned long newsize;
    536 	int type, flags;
    537 {
    538 	register struct kmemusage *kup;
    539 	long cursize;
    540 	void *newaddr;
    541 #ifdef DIAGNOSTIC
    542 	long alloc;
    543 #endif
    544 
    545 	/*
    546 	 * Realloc() with a NULL pointer is the same as malloc().
    547 	 */
    548 	if (curaddr == NULL)
    549 		return (malloc(newsize, type, flags));
    550 
    551 	/*
    552 	 * Realloc() with zero size is the same as free().
    553 	 */
    554 	if (newsize == 0) {
    555 		free(curaddr, type);
    556 		return (NULL);
    557 	}
    558 
    559 	/*
    560 	 * Find out how large the old allocation was (and do some
    561 	 * sanity checking).
    562 	 */
    563 	kup = btokup(curaddr);
    564 	cursize = 1 << kup->ku_indx;
    565 
    566 #ifdef DIAGNOSTIC
    567 	/*
    568 	 * Check for returns of data that do not point to the
    569 	 * beginning of the allocation.
    570 	 */
    571 	if (cursize > NBPG * CLSIZE)
    572 		alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
    573 	else
    574 		alloc = addrmask[kup->ku_indx];
    575 	if (((u_long)curaddr & alloc) != 0)
    576 		panic("realloc: unaligned addr %p, size %ld, type %s, mask %ld\n",
    577 			curaddr, cursize, memname[type], alloc);
    578 #endif /* DIAGNOSTIC */
    579 
    580 	if (cursize > MAXALLOCSAVE)
    581 		cursize = ctob(kup->ku_pagecnt);
    582 
    583 	/*
    584 	 * If we already actually have as much as they want, we're done.
    585 	 */
    586 	if (newsize <= cursize)
    587 		return (curaddr);
    588 
    589 	/*
    590 	 * Can't satisfy the allocation with the existing block.
    591 	 * Allocate a new one and copy the data.
    592 	 */
    593 	newaddr = malloc(newsize, type, flags);
    594 	if (newaddr == NULL) {
    595 		/*
    596 		 * Malloc() failed, because flags included M_NOWAIT.
    597 		 * Return NULL to indicate that failure.  The old
    598 		 * pointer is still valid.
    599 		 */
    600 		return NULL;
    601 	}
    602 	bcopy(curaddr, newaddr, cursize);
    603 
    604 	/*
    605 	 * We were successful: free the old allocation and return
    606 	 * the new one.
    607 	 */
    608 	free(curaddr, type);
    609 	return (newaddr);
    610 }
    611 
    612 /*
    613  * Initialize the kernel memory allocator
    614  */
    615 void
    616 kmeminit()
    617 {
    618 #ifdef KMEMSTATS
    619 	register long indx;
    620 #endif
    621 	int npg;
    622 
    623 #if	((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
    624 		ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
    625 #endif
    626 #if	(MAXALLOCSAVE > MINALLOCSIZE * 32768)
    627 		ERROR!_kmeminit:_MAXALLOCSAVE_too_big
    628 #endif
    629 #if	(MAXALLOCSAVE < CLBYTES)
    630 		ERROR!_kmeminit:_MAXALLOCSAVE_too_small
    631 #endif
    632 
    633 	if (sizeof(struct freelist) > (1 << MINBUCKET))
    634 		panic("minbucket too small/struct freelist too big");
    635 
    636 	npg = VM_KMEM_SIZE/ NBPG;
    637 #if defined(UVM)
    638 	kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
    639 		(vm_size_t)(npg * sizeof(struct kmemusage)));
    640 	kmem_map = uvm_km_suballoc(kernel_map, (vm_offset_t *)&kmembase,
    641 		(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG),
    642 			FALSE, FALSE, &kmem_map_store);
    643 #else
    644 	kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
    645 		(vm_size_t)(npg * sizeof(struct kmemusage)));
    646 	kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
    647 		(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
    648 #endif
    649 #ifdef KMEMSTATS
    650 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
    651 		if (1 << indx >= CLBYTES)
    652 			bucket[indx].kb_elmpercl = 1;
    653 		else
    654 			bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
    655 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
    656 	}
    657 	for (indx = 0; indx < M_LAST; indx++)
    658 		kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
    659 #endif
    660 }
    661