Home | History | Annotate | Line # | Download | only in kern
kern_malloc.c revision 1.78
      1  1.78        pk /*	$NetBSD: kern_malloc.c,v 1.78 2003/02/14 21:51:36 pk Exp $	*/
      2   1.9       cgd 
      3   1.1       cgd /*
      4  1.37  christos  * Copyright (c) 1996 Christopher G. Demetriou.  All rights reserved.
      5   1.8       cgd  * Copyright (c) 1987, 1991, 1993
      6   1.8       cgd  *	The Regents of the University of California.  All rights reserved.
      7   1.1       cgd  *
      8   1.1       cgd  * Redistribution and use in source and binary forms, with or without
      9   1.1       cgd  * modification, are permitted provided that the following conditions
     10   1.1       cgd  * are met:
     11   1.1       cgd  * 1. Redistributions of source code must retain the above copyright
     12   1.1       cgd  *    notice, this list of conditions and the following disclaimer.
     13   1.1       cgd  * 2. Redistributions in binary form must reproduce the above copyright
     14   1.1       cgd  *    notice, this list of conditions and the following disclaimer in the
     15   1.1       cgd  *    documentation and/or other materials provided with the distribution.
     16   1.1       cgd  * 3. All advertising materials mentioning features or use of this software
     17   1.1       cgd  *    must display the following acknowledgement:
     18   1.1       cgd  *	This product includes software developed by the University of
     19   1.1       cgd  *	California, Berkeley and its contributors.
     20   1.1       cgd  * 4. Neither the name of the University nor the names of its contributors
     21   1.1       cgd  *    may be used to endorse or promote products derived from this software
     22   1.1       cgd  *    without specific prior written permission.
     23   1.1       cgd  *
     24   1.1       cgd  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25   1.1       cgd  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26   1.1       cgd  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27   1.1       cgd  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28   1.1       cgd  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29   1.1       cgd  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30   1.1       cgd  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31   1.1       cgd  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32   1.1       cgd  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33   1.1       cgd  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34   1.1       cgd  * SUCH DAMAGE.
     35   1.1       cgd  *
     36  1.32      fvdl  *	@(#)kern_malloc.c	8.4 (Berkeley) 5/20/95
     37   1.1       cgd  */
     38  1.64     lukem 
     39  1.64     lukem #include <sys/cdefs.h>
     40  1.78        pk __KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.78 2003/02/14 21:51:36 pk Exp $");
     41  1.31       mrg 
     42  1.33   thorpej #include "opt_lockdebug.h"
     43   1.1       cgd 
     44   1.7   mycroft #include <sys/param.h>
     45   1.7   mycroft #include <sys/proc.h>
     46   1.7   mycroft #include <sys/kernel.h>
     47   1.7   mycroft #include <sys/malloc.h>
     48  1.12  christos #include <sys/systm.h>
     49  1.24   thorpej 
     50  1.28       mrg #include <uvm/uvm_extern.h>
     51  1.28       mrg 
     52  1.61   thorpej static struct vm_map kmem_map_store;
     53  1.58       chs struct vm_map *kmem_map = NULL;
     54  1.28       mrg 
     55  1.49   thorpej #include "opt_kmempages.h"
     56  1.49   thorpej 
     57  1.49   thorpej #ifdef NKMEMCLUSTERS
     58  1.52  sommerfe #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
     59  1.49   thorpej #endif
     60  1.49   thorpej 
     61  1.49   thorpej /*
     62  1.49   thorpej  * Default number of pages in kmem_map.  We attempt to calculate this
     63  1.49   thorpej  * at run-time, but allow it to be either patched or set in the kernel
     64  1.49   thorpej  * config file.
     65  1.49   thorpej  */
     66  1.49   thorpej #ifndef NKMEMPAGES
     67  1.49   thorpej #define	NKMEMPAGES	0
     68  1.49   thorpej #endif
     69  1.49   thorpej int	nkmempages = NKMEMPAGES;
     70  1.49   thorpej 
     71  1.49   thorpej /*
     72  1.49   thorpej  * Defaults for lower- and upper-bounds for the kmem_map page count.
     73  1.49   thorpej  * Can be overridden by kernel config options.
     74  1.49   thorpej  */
     75  1.49   thorpej #ifndef	NKMEMPAGES_MIN
     76  1.49   thorpej #define	NKMEMPAGES_MIN	NKMEMPAGES_MIN_DEFAULT
     77  1.49   thorpej #endif
     78  1.49   thorpej 
     79  1.49   thorpej #ifndef NKMEMPAGES_MAX
     80  1.49   thorpej #define	NKMEMPAGES_MAX	NKMEMPAGES_MAX_DEFAULT
     81  1.49   thorpej #endif
     82  1.49   thorpej 
     83  1.24   thorpej #include "opt_kmemstats.h"
     84  1.27   thorpej #include "opt_malloclog.h"
     85  1.71      fvdl #include "opt_malloc_debug.h"
     86  1.12  christos 
     87   1.1       cgd struct kmembuckets bucket[MINBUCKET + 16];
     88   1.1       cgd struct kmemusage *kmemusage;
     89   1.1       cgd char *kmembase, *kmemlimit;
     90  1.77   thorpej 
     91  1.77   thorpej struct malloc_type *kmemstatistics;
     92   1.1       cgd 
     93  1.27   thorpej #ifdef MALLOCLOG
     94  1.27   thorpej #ifndef MALLOCLOGSIZE
     95  1.27   thorpej #define	MALLOCLOGSIZE	100000
     96  1.27   thorpej #endif
     97  1.27   thorpej 
     98  1.27   thorpej struct malloclog {
     99  1.27   thorpej 	void *addr;
    100  1.27   thorpej 	long size;
    101  1.77   thorpej 	struct malloc_type *type;
    102  1.27   thorpej 	int action;
    103  1.27   thorpej 	const char *file;
    104  1.27   thorpej 	long line;
    105  1.27   thorpej } malloclog[MALLOCLOGSIZE];
    106  1.27   thorpej 
    107  1.27   thorpej long	malloclogptr;
    108  1.27   thorpej 
    109  1.27   thorpej static void
    110  1.77   thorpej domlog(void *a, long size, struct malloc_type *type, int action,
    111  1.77   thorpej     const char *file, long line)
    112  1.27   thorpej {
    113  1.27   thorpej 
    114  1.27   thorpej 	malloclog[malloclogptr].addr = a;
    115  1.27   thorpej 	malloclog[malloclogptr].size = size;
    116  1.27   thorpej 	malloclog[malloclogptr].type = type;
    117  1.27   thorpej 	malloclog[malloclogptr].action = action;
    118  1.27   thorpej 	malloclog[malloclogptr].file = file;
    119  1.27   thorpej 	malloclog[malloclogptr].line = line;
    120  1.27   thorpej 	malloclogptr++;
    121  1.27   thorpej 	if (malloclogptr >= MALLOCLOGSIZE)
    122  1.27   thorpej 		malloclogptr = 0;
    123  1.27   thorpej }
    124  1.27   thorpej 
    125  1.27   thorpej static void
    126  1.69     enami hitmlog(void *a)
    127  1.27   thorpej {
    128  1.27   thorpej 	struct malloclog *lp;
    129  1.27   thorpej 	long l;
    130  1.27   thorpej 
    131  1.69     enami #define	PRT do { \
    132  1.27   thorpej 	if (malloclog[l].addr == a && malloclog[l].action) { \
    133  1.27   thorpej 		lp = &malloclog[l]; \
    134  1.27   thorpej 		printf("malloc log entry %ld:\n", l); \
    135  1.27   thorpej 		printf("\taddr = %p\n", lp->addr); \
    136  1.27   thorpej 		printf("\tsize = %ld\n", lp->size); \
    137  1.77   thorpej 		printf("\ttype = %s\n", lp->type->ks_shortdesc); \
    138  1.27   thorpej 		printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
    139  1.27   thorpej 		printf("\tfile = %s\n", lp->file); \
    140  1.27   thorpej 		printf("\tline = %ld\n", lp->line); \
    141  1.69     enami 	} \
    142  1.69     enami } while (/* CONSTCOND */0)
    143  1.27   thorpej 
    144  1.27   thorpej 	for (l = malloclogptr; l < MALLOCLOGSIZE; l++)
    145  1.69     enami 		PRT;
    146  1.27   thorpej 
    147  1.27   thorpej 	for (l = 0; l < malloclogptr; l++)
    148  1.69     enami 		PRT;
    149  1.27   thorpej }
    150  1.27   thorpej #endif /* MALLOCLOG */
    151  1.27   thorpej 
    152   1.8       cgd #ifdef DIAGNOSTIC
    153   1.8       cgd /*
    154   1.8       cgd  * This structure provides a set of masks to catch unaligned frees.
    155   1.8       cgd  */
    156  1.57  jdolecek const long addrmask[] = { 0,
    157   1.8       cgd 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
    158   1.8       cgd 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
    159   1.8       cgd 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
    160   1.8       cgd 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
    161   1.8       cgd };
    162   1.8       cgd 
    163   1.8       cgd /*
    164   1.8       cgd  * The WEIRD_ADDR is used as known text to copy into free objects so
    165   1.8       cgd  * that modifications after frees can be detected.
    166   1.8       cgd  */
    167  1.76   thorpej #define	WEIRD_ADDR	((uint32_t) 0xdeadbeef)
    168  1.55       chs #ifdef DEBUG
    169  1.69     enami #define	MAX_COPY	PAGE_SIZE
    170  1.55       chs #else
    171  1.69     enami #define	MAX_COPY	32
    172  1.55       chs #endif
    173   1.8       cgd 
    174   1.8       cgd /*
    175  1.11       cgd  * Normally the freelist structure is used only to hold the list pointer
    176  1.11       cgd  * for free objects.  However, when running with diagnostics, the first
    177  1.77   thorpej  * 8/16 bytes of the structure is unused except for diagnostic information,
    178  1.77   thorpej  * and the free list pointer is at offset 8/16 in the structure.  Since the
    179  1.11       cgd  * first 8 bytes is the portion of the structure most often modified, this
    180  1.11       cgd  * helps to detect memory reuse problems and avoid free list corruption.
    181   1.8       cgd  */
    182   1.8       cgd struct freelist {
    183  1.76   thorpej 	uint32_t spare0;
    184  1.77   thorpej #ifdef _LP64
    185  1.77   thorpej 	uint32_t spare1;		/* explicit padding */
    186  1.77   thorpej #endif
    187  1.77   thorpej 	struct malloc_type *type;
    188   1.8       cgd 	caddr_t	next;
    189   1.8       cgd };
    190   1.8       cgd #else /* !DIAGNOSTIC */
    191   1.8       cgd struct freelist {
    192   1.8       cgd 	caddr_t	next;
    193   1.8       cgd };
    194   1.8       cgd #endif /* DIAGNOSTIC */
    195   1.8       cgd 
    196   1.1       cgd /*
    197  1.77   thorpej  * The following are standard, build-in malloc types are are not
    198  1.77   thorpej  * specific to any one subsystem.
    199  1.77   thorpej  */
    200  1.77   thorpej MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
    201  1.77   thorpej MALLOC_DEFINE(M_DMAMAP, "DMA map", "bus_dma(9) structures");
    202  1.77   thorpej MALLOC_DEFINE(M_FREE, "free", "should be on free list");
    203  1.77   thorpej MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
    204  1.77   thorpej MALLOC_DEFINE(M_SOFTINTR, "softintr", "Softinterrupt structures");
    205  1.77   thorpej MALLOC_DEFINE(M_TEMP, "temp", "misc. temporary data buffers");
    206  1.77   thorpej 
    207  1.77   thorpej /* XXX These should all be elsewhere. */
    208  1.77   thorpej MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables");
    209  1.77   thorpej MALLOC_DEFINE(M_FTABLE, "fragtbl", "fragment reassembly header");
    210  1.77   thorpej MALLOC_DEFINE(M_UFSMNT, "UFS mount", "UFS mount structure");
    211  1.77   thorpej MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
    212  1.77   thorpej MALLOC_DEFINE(M_IPMOPTS, "ip_moptions", "internet multicast options");
    213  1.77   thorpej MALLOC_DEFINE(M_IPMADDR, "in_multi", "internet multicast address");
    214  1.77   thorpej MALLOC_DEFINE(M_MRTABLE, "mrt", "multicast routing tables");
    215  1.77   thorpej MALLOC_DEFINE(M_1394DATA, "1394data", "IEEE 1394 data buffers");
    216  1.77   thorpej 
    217  1.78        pk struct simplelock malloc_slock = SIMPLELOCK_INITIALIZER;
    218  1.78        pk 
    219  1.77   thorpej /*
    220   1.1       cgd  * Allocate a block of memory
    221   1.1       cgd  */
    222  1.27   thorpej #ifdef MALLOCLOG
    223  1.27   thorpej void *
    224  1.77   thorpej _malloc(unsigned long size, struct malloc_type *ksp, int flags,
    225  1.77   thorpej     const char *file, long line)
    226  1.27   thorpej #else
    227   1.1       cgd void *
    228  1.77   thorpej malloc(unsigned long size, struct malloc_type *ksp, int flags)
    229  1.27   thorpej #endif /* MALLOCLOG */
    230   1.1       cgd {
    231  1.50  augustss 	struct kmembuckets *kbp;
    232  1.50  augustss 	struct kmemusage *kup;
    233  1.50  augustss 	struct freelist *freep;
    234   1.5    andrew 	long indx, npg, allocsize;
    235   1.1       cgd 	int s;
    236   1.1       cgd 	caddr_t va, cp, savedlist;
    237   1.8       cgd #ifdef DIAGNOSTIC
    238  1.76   thorpej 	uint32_t *end, *lp;
    239   1.8       cgd 	int copysize;
    240  1.26   mycroft 	const char *savedtype;
    241   1.8       cgd #endif
    242   1.1       cgd 
    243  1.59   thorpej #ifdef LOCKDEBUG
    244  1.59   thorpej 	if ((flags & M_NOWAIT) == 0)
    245  1.59   thorpej 		simple_lock_only_held(NULL, "malloc");
    246  1.59   thorpej #endif
    247  1.62   thorpej #ifdef MALLOC_DEBUG
    248  1.77   thorpej 	if (debug_malloc(size, ksp, flags, (void **) &va))
    249  1.62   thorpej 		return ((void *) va);
    250  1.62   thorpej #endif
    251   1.1       cgd 	indx = BUCKETINDX(size);
    252   1.1       cgd 	kbp = &bucket[indx];
    253  1.56   thorpej 	s = splvm();
    254  1.78        pk 	simple_lock(&malloc_slock);
    255   1.1       cgd #ifdef KMEMSTATS
    256   1.1       cgd 	while (ksp->ks_memuse >= ksp->ks_limit) {
    257   1.1       cgd 		if (flags & M_NOWAIT) {
    258  1.78        pk 			simple_unlock(&malloc_slock);
    259   1.1       cgd 			splx(s);
    260   1.1       cgd 			return ((void *) NULL);
    261   1.1       cgd 		}
    262   1.1       cgd 		if (ksp->ks_limblocks < 65535)
    263   1.1       cgd 			ksp->ks_limblocks++;
    264  1.78        pk 		ltsleep((caddr_t)ksp, PSWP+2, ksp->ks_shortdesc, 0,
    265  1.78        pk 			&malloc_slock);
    266   1.1       cgd 	}
    267   1.8       cgd 	ksp->ks_size |= 1 << indx;
    268   1.8       cgd #endif
    269   1.8       cgd #ifdef DIAGNOSTIC
    270   1.8       cgd 	copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
    271   1.1       cgd #endif
    272   1.1       cgd 	if (kbp->kb_next == NULL) {
    273   1.8       cgd 		kbp->kb_last = NULL;
    274   1.1       cgd 		if (size > MAXALLOCSAVE)
    275  1.66     enami 			allocsize = round_page(size);
    276   1.1       cgd 		else
    277   1.1       cgd 			allocsize = 1 << indx;
    278  1.47     ragge 		npg = btoc(allocsize);
    279  1.78        pk 		simple_unlock(&malloc_slock);
    280  1.63       chs 		va = (caddr_t) uvm_km_kmemalloc(kmem_map, NULL,
    281  1.69     enami 		    (vsize_t)ctob(npg),
    282  1.73       chs 		    ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
    283  1.73       chs 		    ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0));
    284  1.51   thorpej 		if (__predict_false(va == NULL)) {
    285  1.17       cgd 			/*
    286  1.17       cgd 			 * Kmem_malloc() can return NULL, even if it can
    287  1.17       cgd 			 * wait, if there is no map space avaiable, because
    288  1.17       cgd 			 * it can't fix that problem.  Neither can we,
    289  1.17       cgd 			 * right now.  (We should release pages which
    290  1.17       cgd 			 * are completely free and which are in buckets
    291  1.17       cgd 			 * with too many free elements.)
    292  1.17       cgd 			 */
    293  1.68  jdolecek 			if ((flags & (M_NOWAIT|M_CANFAIL)) == 0)
    294  1.17       cgd 				panic("malloc: out of space in kmem_map");
    295   1.6       cgd 			splx(s);
    296  1.73       chs 			return (NULL);
    297   1.1       cgd 		}
    298  1.78        pk 		simple_lock(&malloc_slock);
    299   1.1       cgd #ifdef KMEMSTATS
    300   1.1       cgd 		kbp->kb_total += kbp->kb_elmpercl;
    301   1.1       cgd #endif
    302   1.1       cgd 		kup = btokup(va);
    303   1.1       cgd 		kup->ku_indx = indx;
    304   1.1       cgd 		if (allocsize > MAXALLOCSAVE) {
    305   1.1       cgd 			if (npg > 65535)
    306   1.1       cgd 				panic("malloc: allocation too large");
    307   1.1       cgd 			kup->ku_pagecnt = npg;
    308   1.1       cgd #ifdef KMEMSTATS
    309   1.1       cgd 			ksp->ks_memuse += allocsize;
    310   1.1       cgd #endif
    311   1.1       cgd 			goto out;
    312   1.1       cgd 		}
    313   1.1       cgd #ifdef KMEMSTATS
    314   1.1       cgd 		kup->ku_freecnt = kbp->kb_elmpercl;
    315   1.1       cgd 		kbp->kb_totalfree += kbp->kb_elmpercl;
    316   1.1       cgd #endif
    317   1.1       cgd 		/*
    318   1.1       cgd 		 * Just in case we blocked while allocating memory,
    319   1.1       cgd 		 * and someone else also allocated memory for this
    320   1.1       cgd 		 * bucket, don't assume the list is still empty.
    321   1.1       cgd 		 */
    322   1.1       cgd 		savedlist = kbp->kb_next;
    323  1.49   thorpej 		kbp->kb_next = cp = va + (npg << PAGE_SHIFT) - allocsize;
    324   1.8       cgd 		for (;;) {
    325   1.8       cgd 			freep = (struct freelist *)cp;
    326   1.8       cgd #ifdef DIAGNOSTIC
    327   1.8       cgd 			/*
    328   1.8       cgd 			 * Copy in known text to detect modification
    329   1.8       cgd 			 * after freeing.
    330   1.8       cgd 			 */
    331  1.11       cgd 			end = (int32_t *)&cp[copysize];
    332  1.11       cgd 			for (lp = (int32_t *)cp; lp < end; lp++)
    333   1.8       cgd 				*lp = WEIRD_ADDR;
    334   1.8       cgd 			freep->type = M_FREE;
    335   1.8       cgd #endif /* DIAGNOSTIC */
    336   1.8       cgd 			if (cp <= va)
    337   1.8       cgd 				break;
    338   1.8       cgd 			cp -= allocsize;
    339   1.8       cgd 			freep->next = cp;
    340   1.8       cgd 		}
    341   1.8       cgd 		freep->next = savedlist;
    342   1.8       cgd 		if (kbp->kb_last == NULL)
    343   1.8       cgd 			kbp->kb_last = (caddr_t)freep;
    344   1.1       cgd 	}
    345   1.1       cgd 	va = kbp->kb_next;
    346   1.8       cgd 	kbp->kb_next = ((struct freelist *)va)->next;
    347   1.8       cgd #ifdef DIAGNOSTIC
    348   1.8       cgd 	freep = (struct freelist *)va;
    349  1.77   thorpej 	/* XXX potential to get garbage pointer here. */
    350  1.77   thorpej 	savedtype = freep->type->ks_shortdesc;
    351  1.29       chs 	if (kbp->kb_next) {
    352  1.29       chs 		int rv;
    353  1.35       eeh 		vaddr_t addr = (vaddr_t)kbp->kb_next;
    354  1.29       chs 
    355  1.43   thorpej 		vm_map_lock(kmem_map);
    356  1.29       chs 		rv = uvm_map_checkprot(kmem_map, addr,
    357  1.69     enami 		    addr + sizeof(struct freelist), VM_PROT_WRITE);
    358  1.43   thorpej 		vm_map_unlock(kmem_map);
    359  1.29       chs 
    360  1.51   thorpej 		if (__predict_false(rv == 0)) {
    361  1.69     enami 			printf("Data modified on freelist: "
    362  1.69     enami 			    "word %ld of object %p size %ld previous type %s "
    363  1.69     enami 			    "(invalid addr %p)\n",
    364  1.41       mrg 			    (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
    365  1.69     enami 			    va, size, savedtype, kbp->kb_next);
    366  1.27   thorpej #ifdef MALLOCLOG
    367  1.41       mrg 			hitmlog(va);
    368  1.27   thorpej #endif
    369  1.41       mrg 			kbp->kb_next = NULL;
    370  1.29       chs 		}
    371   1.8       cgd 	}
    372  1.11       cgd 
    373  1.11       cgd 	/* Fill the fields that we've used with WEIRD_ADDR */
    374  1.77   thorpej #ifdef _LP64
    375  1.77   thorpej 	freep->type = (struct malloc_type *)
    376  1.77   thorpej 	    (WEIRD_ADDR | (((u_long) WEIRD_ADDR) << 32));
    377  1.77   thorpej #else
    378  1.77   thorpej 	freep->type = (struct malloc_type *) WEIRD_ADDR;
    379   1.8       cgd #endif
    380  1.11       cgd 	end = (int32_t *)&freep->next +
    381  1.11       cgd 	    (sizeof(freep->next) / sizeof(int32_t));
    382  1.11       cgd 	for (lp = (int32_t *)&freep->next; lp < end; lp++)
    383  1.11       cgd 		*lp = WEIRD_ADDR;
    384  1.11       cgd 
    385  1.11       cgd 	/* and check that the data hasn't been modified. */
    386  1.76   thorpej 	end = (uint32_t *)&va[copysize];
    387  1.11       cgd 	for (lp = (int32_t *)va; lp < end; lp++) {
    388  1.51   thorpej 		if (__predict_true(*lp == WEIRD_ADDR))
    389   1.8       cgd 			continue;
    390  1.69     enami 		printf("Data modified on freelist: "
    391  1.69     enami 		    "word %ld of object %p size %ld previous type %s "
    392  1.69     enami 		    "(0x%x != 0x%x)\n",
    393  1.76   thorpej 		    (long)(lp - (uint32_t *)va), va, size,
    394  1.21  christos 		    savedtype, *lp, WEIRD_ADDR);
    395  1.27   thorpej #ifdef MALLOCLOG
    396  1.27   thorpej 		hitmlog(va);
    397  1.27   thorpej #endif
    398   1.8       cgd 		break;
    399   1.8       cgd 	}
    400  1.11       cgd 
    401   1.8       cgd 	freep->spare0 = 0;
    402   1.8       cgd #endif /* DIAGNOSTIC */
    403   1.1       cgd #ifdef KMEMSTATS
    404   1.1       cgd 	kup = btokup(va);
    405   1.1       cgd 	if (kup->ku_indx != indx)
    406   1.1       cgd 		panic("malloc: wrong bucket");
    407   1.1       cgd 	if (kup->ku_freecnt == 0)
    408   1.1       cgd 		panic("malloc: lost data");
    409   1.1       cgd 	kup->ku_freecnt--;
    410   1.1       cgd 	kbp->kb_totalfree--;
    411   1.1       cgd 	ksp->ks_memuse += 1 << indx;
    412   1.1       cgd out:
    413   1.1       cgd 	kbp->kb_calls++;
    414   1.1       cgd 	ksp->ks_inuse++;
    415   1.1       cgd 	ksp->ks_calls++;
    416   1.1       cgd 	if (ksp->ks_memuse > ksp->ks_maxused)
    417   1.1       cgd 		ksp->ks_maxused = ksp->ks_memuse;
    418   1.1       cgd #else
    419   1.1       cgd out:
    420   1.1       cgd #endif
    421  1.27   thorpej #ifdef MALLOCLOG
    422  1.27   thorpej 	domlog(va, size, type, 1, file, line);
    423  1.27   thorpej #endif
    424  1.78        pk 	simple_unlock(&malloc_slock);
    425   1.1       cgd 	splx(s);
    426  1.67     enami 	if ((flags & M_ZERO) != 0)
    427  1.65     lukem 		memset(va, 0, size);
    428   1.1       cgd 	return ((void *) va);
    429   1.1       cgd }
    430   1.1       cgd 
    431   1.1       cgd /*
    432   1.1       cgd  * Free a block of memory allocated by malloc.
    433   1.1       cgd  */
    434  1.27   thorpej #ifdef MALLOCLOG
    435  1.27   thorpej void
    436  1.77   thorpej _free(void *addr, struct malloc_type *type, const char *file, long line)
    437  1.27   thorpej #else
    438   1.1       cgd void
    439  1.77   thorpej free(void *addr, struct malloc_type *ksp)
    440  1.27   thorpej #endif /* MALLOCLOG */
    441   1.1       cgd {
    442  1.50  augustss 	struct kmembuckets *kbp;
    443  1.50  augustss 	struct kmemusage *kup;
    444  1.50  augustss 	struct freelist *freep;
    445   1.8       cgd 	long size;
    446   1.8       cgd 	int s;
    447   1.5    andrew #ifdef DIAGNOSTIC
    448   1.8       cgd 	caddr_t cp;
    449  1.11       cgd 	int32_t *end, *lp;
    450  1.11       cgd 	long alloc, copysize;
    451   1.5    andrew #endif
    452  1.48   thorpej 
    453  1.62   thorpej #ifdef MALLOC_DEBUG
    454  1.77   thorpej 	if (debug_free(addr, ksp))
    455  1.62   thorpej 		return;
    456  1.62   thorpej #endif
    457  1.62   thorpej 
    458  1.48   thorpej #ifdef DIAGNOSTIC
    459  1.48   thorpej 	/*
    460  1.48   thorpej 	 * Ensure that we're free'ing something that we could
    461  1.48   thorpej 	 * have allocated in the first place.  That is, check
    462  1.48   thorpej 	 * to see that the address is within kmem_map.
    463  1.48   thorpej 	 */
    464  1.51   thorpej 	if (__predict_false((vaddr_t)addr < kmem_map->header.start ||
    465  1.69     enami 	    (vaddr_t)addr >= kmem_map->header.end))
    466  1.48   thorpej 		panic("free: addr %p not within kmem_map", addr);
    467   1.1       cgd #endif
    468   1.1       cgd 
    469   1.1       cgd 	kup = btokup(addr);
    470   1.1       cgd 	size = 1 << kup->ku_indx;
    471   1.8       cgd 	kbp = &bucket[kup->ku_indx];
    472  1.56   thorpej 	s = splvm();
    473  1.78        pk 	simple_lock(&malloc_slock);
    474  1.27   thorpej #ifdef MALLOCLOG
    475  1.27   thorpej 	domlog(addr, 0, type, 2, file, line);
    476  1.27   thorpej #endif
    477   1.1       cgd #ifdef DIAGNOSTIC
    478   1.8       cgd 	/*
    479   1.8       cgd 	 * Check for returns of data that do not point to the
    480   1.8       cgd 	 * beginning of the allocation.
    481   1.8       cgd 	 */
    482  1.49   thorpej 	if (size > PAGE_SIZE)
    483  1.49   thorpej 		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
    484   1.1       cgd 	else
    485   1.1       cgd 		alloc = addrmask[kup->ku_indx];
    486   1.8       cgd 	if (((u_long)addr & alloc) != 0)
    487  1.75    provos 		panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
    488  1.77   thorpej 		    addr, size, ksp->ks_shortdesc, alloc);
    489   1.1       cgd #endif /* DIAGNOSTIC */
    490   1.1       cgd 	if (size > MAXALLOCSAVE) {
    491  1.35       eeh 		uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt));
    492   1.1       cgd #ifdef KMEMSTATS
    493   1.1       cgd 		size = kup->ku_pagecnt << PGSHIFT;
    494   1.1       cgd 		ksp->ks_memuse -= size;
    495   1.1       cgd 		kup->ku_indx = 0;
    496   1.1       cgd 		kup->ku_pagecnt = 0;
    497   1.1       cgd 		if (ksp->ks_memuse + size >= ksp->ks_limit &&
    498   1.1       cgd 		    ksp->ks_memuse < ksp->ks_limit)
    499   1.1       cgd 			wakeup((caddr_t)ksp);
    500   1.1       cgd 		ksp->ks_inuse--;
    501   1.1       cgd 		kbp->kb_total -= 1;
    502   1.1       cgd #endif
    503  1.78        pk 		simple_unlock(&malloc_slock);
    504   1.1       cgd 		splx(s);
    505   1.1       cgd 		return;
    506   1.1       cgd 	}
    507   1.8       cgd 	freep = (struct freelist *)addr;
    508   1.8       cgd #ifdef DIAGNOSTIC
    509   1.8       cgd 	/*
    510   1.8       cgd 	 * Check for multiple frees. Use a quick check to see if
    511   1.8       cgd 	 * it looks free before laboriously searching the freelist.
    512   1.8       cgd 	 */
    513  1.51   thorpej 	if (__predict_false(freep->spare0 == WEIRD_ADDR)) {
    514  1.16       cgd 		for (cp = kbp->kb_next; cp;
    515  1.16       cgd 		    cp = ((struct freelist *)cp)->next) {
    516   1.8       cgd 			if (addr != cp)
    517   1.8       cgd 				continue;
    518  1.22  christos 			printf("multiply freed item %p\n", addr);
    519  1.27   thorpej #ifdef MALLOCLOG
    520  1.27   thorpej 			hitmlog(addr);
    521  1.27   thorpej #endif
    522   1.8       cgd 			panic("free: duplicated free");
    523   1.8       cgd 		}
    524   1.8       cgd 	}
    525  1.38       chs #ifdef LOCKDEBUG
    526  1.38       chs 	/*
    527  1.38       chs 	 * Check if we're freeing a locked simple lock.
    528  1.38       chs 	 */
    529  1.40       chs 	simple_lock_freecheck(addr, (char *)addr + size);
    530  1.38       chs #endif
    531   1.8       cgd 	/*
    532   1.8       cgd 	 * Copy in known text to detect modification after freeing
    533   1.8       cgd 	 * and to make it look free. Also, save the type being freed
    534   1.8       cgd 	 * so we can list likely culprit if modification is detected
    535   1.8       cgd 	 * when the object is reallocated.
    536   1.8       cgd 	 */
    537   1.8       cgd 	copysize = size < MAX_COPY ? size : MAX_COPY;
    538  1.11       cgd 	end = (int32_t *)&((caddr_t)addr)[copysize];
    539  1.11       cgd 	for (lp = (int32_t *)addr; lp < end; lp++)
    540   1.8       cgd 		*lp = WEIRD_ADDR;
    541  1.77   thorpej 	freep->type = ksp;
    542   1.8       cgd #endif /* DIAGNOSTIC */
    543   1.1       cgd #ifdef KMEMSTATS
    544   1.1       cgd 	kup->ku_freecnt++;
    545  1.36   thorpej 	if (kup->ku_freecnt >= kbp->kb_elmpercl) {
    546   1.1       cgd 		if (kup->ku_freecnt > kbp->kb_elmpercl)
    547   1.1       cgd 			panic("free: multiple frees");
    548   1.1       cgd 		else if (kbp->kb_totalfree > kbp->kb_highwat)
    549   1.1       cgd 			kbp->kb_couldfree++;
    550  1.36   thorpej 	}
    551   1.1       cgd 	kbp->kb_totalfree++;
    552   1.1       cgd 	ksp->ks_memuse -= size;
    553   1.1       cgd 	if (ksp->ks_memuse + size >= ksp->ks_limit &&
    554   1.1       cgd 	    ksp->ks_memuse < ksp->ks_limit)
    555   1.1       cgd 		wakeup((caddr_t)ksp);
    556   1.1       cgd 	ksp->ks_inuse--;
    557   1.1       cgd #endif
    558   1.8       cgd 	if (kbp->kb_next == NULL)
    559   1.8       cgd 		kbp->kb_next = addr;
    560   1.8       cgd 	else
    561   1.8       cgd 		((struct freelist *)kbp->kb_last)->next = addr;
    562   1.8       cgd 	freep->next = NULL;
    563   1.8       cgd 	kbp->kb_last = addr;
    564  1.78        pk 	simple_unlock(&malloc_slock);
    565   1.1       cgd 	splx(s);
    566  1.20       cgd }
    567  1.20       cgd 
    568  1.20       cgd /*
    569  1.20       cgd  * Change the size of a block of memory.
    570  1.20       cgd  */
    571  1.20       cgd void *
    572  1.77   thorpej realloc(void *curaddr, unsigned long newsize, struct malloc_type *ksp,
    573  1.77   thorpej     int flags)
    574  1.20       cgd {
    575  1.50  augustss 	struct kmemusage *kup;
    576  1.72   thorpej 	unsigned long cursize;
    577  1.20       cgd 	void *newaddr;
    578  1.20       cgd #ifdef DIAGNOSTIC
    579  1.20       cgd 	long alloc;
    580  1.20       cgd #endif
    581  1.20       cgd 
    582  1.20       cgd 	/*
    583  1.69     enami 	 * realloc() with a NULL pointer is the same as malloc().
    584  1.20       cgd 	 */
    585  1.20       cgd 	if (curaddr == NULL)
    586  1.77   thorpej 		return (malloc(newsize, ksp, flags));
    587  1.20       cgd 
    588  1.20       cgd 	/*
    589  1.69     enami 	 * realloc() with zero size is the same as free().
    590  1.20       cgd 	 */
    591  1.20       cgd 	if (newsize == 0) {
    592  1.77   thorpej 		free(curaddr, ksp);
    593  1.20       cgd 		return (NULL);
    594  1.20       cgd 	}
    595  1.59   thorpej 
    596  1.59   thorpej #ifdef LOCKDEBUG
    597  1.59   thorpej 	if ((flags & M_NOWAIT) == 0)
    598  1.59   thorpej 		simple_lock_only_held(NULL, "realloc");
    599  1.59   thorpej #endif
    600  1.20       cgd 
    601  1.20       cgd 	/*
    602  1.20       cgd 	 * Find out how large the old allocation was (and do some
    603  1.20       cgd 	 * sanity checking).
    604  1.20       cgd 	 */
    605  1.20       cgd 	kup = btokup(curaddr);
    606  1.20       cgd 	cursize = 1 << kup->ku_indx;
    607  1.20       cgd 
    608  1.20       cgd #ifdef DIAGNOSTIC
    609  1.20       cgd 	/*
    610  1.20       cgd 	 * Check for returns of data that do not point to the
    611  1.20       cgd 	 * beginning of the allocation.
    612  1.20       cgd 	 */
    613  1.49   thorpej 	if (cursize > PAGE_SIZE)
    614  1.49   thorpej 		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
    615  1.20       cgd 	else
    616  1.20       cgd 		alloc = addrmask[kup->ku_indx];
    617  1.20       cgd 	if (((u_long)curaddr & alloc) != 0)
    618  1.69     enami 		panic("realloc: "
    619  1.69     enami 		    "unaligned addr %p, size %ld, type %s, mask %ld\n",
    620  1.77   thorpej 		    curaddr, cursize, ksp->ks_shortdesc, alloc);
    621  1.20       cgd #endif /* DIAGNOSTIC */
    622  1.20       cgd 
    623  1.20       cgd 	if (cursize > MAXALLOCSAVE)
    624  1.20       cgd 		cursize = ctob(kup->ku_pagecnt);
    625  1.20       cgd 
    626  1.20       cgd 	/*
    627  1.20       cgd 	 * If we already actually have as much as they want, we're done.
    628  1.20       cgd 	 */
    629  1.20       cgd 	if (newsize <= cursize)
    630  1.20       cgd 		return (curaddr);
    631  1.20       cgd 
    632  1.20       cgd 	/*
    633  1.20       cgd 	 * Can't satisfy the allocation with the existing block.
    634  1.20       cgd 	 * Allocate a new one and copy the data.
    635  1.20       cgd 	 */
    636  1.77   thorpej 	newaddr = malloc(newsize, ksp, flags);
    637  1.51   thorpej 	if (__predict_false(newaddr == NULL)) {
    638  1.20       cgd 		/*
    639  1.69     enami 		 * malloc() failed, because flags included M_NOWAIT.
    640  1.20       cgd 		 * Return NULL to indicate that failure.  The old
    641  1.20       cgd 		 * pointer is still valid.
    642  1.20       cgd 		 */
    643  1.69     enami 		return (NULL);
    644  1.20       cgd 	}
    645  1.34     perry 	memcpy(newaddr, curaddr, cursize);
    646  1.20       cgd 
    647  1.20       cgd 	/*
    648  1.20       cgd 	 * We were successful: free the old allocation and return
    649  1.20       cgd 	 * the new one.
    650  1.20       cgd 	 */
    651  1.77   thorpej 	free(curaddr, ksp);
    652  1.20       cgd 	return (newaddr);
    653  1.70     enami }
    654  1.70     enami 
    655  1.70     enami /*
    656  1.70     enami  * Roundup size to the actual allocation size.
    657  1.70     enami  */
    658  1.70     enami unsigned long
    659  1.70     enami malloc_roundup(unsigned long size)
    660  1.70     enami {
    661  1.70     enami 
    662  1.70     enami 	if (size > MAXALLOCSAVE)
    663  1.70     enami 		return (roundup(size, PAGE_SIZE));
    664  1.70     enami 	else
    665  1.70     enami 		return (1 << BUCKETINDX(size));
    666   1.1       cgd }
    667   1.1       cgd 
    668   1.1       cgd /*
    669  1.77   thorpej  * Add a malloc type to the system.
    670  1.77   thorpej  */
    671  1.77   thorpej void
    672  1.77   thorpej malloc_type_attach(struct malloc_type *type)
    673  1.77   thorpej {
    674  1.77   thorpej 
    675  1.77   thorpej 	if (nkmempages == 0)
    676  1.77   thorpej 		panic("malloc_type_attach: nkmempages == 0");
    677  1.77   thorpej 
    678  1.77   thorpej 	if (type->ks_magic != M_MAGIC)
    679  1.77   thorpej 		panic("malloc_type_attach: bad magic");
    680  1.77   thorpej 
    681  1.77   thorpej #ifdef DIAGNOSTIC
    682  1.77   thorpej 	{
    683  1.77   thorpej 		struct malloc_type *ksp;
    684  1.77   thorpej 		for (ksp = kmemstatistics; ksp != NULL; ksp = ksp->ks_next) {
    685  1.77   thorpej 			if (ksp == type)
    686  1.77   thorpej 				panic("malloc_type_attach: already on list");
    687  1.77   thorpej 		}
    688  1.77   thorpej 	}
    689  1.77   thorpej #endif
    690  1.77   thorpej 
    691  1.77   thorpej #ifdef KMEMSTATS
    692  1.77   thorpej 	if (type->ks_limit == 0)
    693  1.77   thorpej 		type->ks_limit = ((u_long)nkmempages << PAGE_SHIFT) * 6U / 10U;
    694  1.77   thorpej #else
    695  1.77   thorpej 	type->ks_limit = 0;
    696  1.77   thorpej #endif
    697  1.77   thorpej 
    698  1.77   thorpej 	type->ks_next = kmemstatistics;
    699  1.77   thorpej 	kmemstatistics = type;
    700  1.77   thorpej }
    701  1.77   thorpej 
    702  1.77   thorpej /*
    703  1.77   thorpej  * Remove a malloc type from the system..
    704  1.77   thorpej  */
    705  1.77   thorpej void
    706  1.77   thorpej malloc_type_detach(struct malloc_type *type)
    707  1.77   thorpej {
    708  1.77   thorpej 	struct malloc_type *ksp;
    709  1.77   thorpej 
    710  1.77   thorpej #ifdef DIAGNOSTIC
    711  1.77   thorpej 	if (type->ks_magic != M_MAGIC)
    712  1.77   thorpej 		panic("malloc_type_detach: bad magic");
    713  1.77   thorpej #endif
    714  1.77   thorpej 
    715  1.77   thorpej 	if (type == kmemstatistics)
    716  1.77   thorpej 		kmemstatistics = type->ks_next;
    717  1.77   thorpej 	else {
    718  1.77   thorpej 		for (ksp = kmemstatistics; ksp->ks_next != NULL;
    719  1.77   thorpej 		     ksp = ksp->ks_next) {
    720  1.77   thorpej 			if (ksp->ks_next == type) {
    721  1.77   thorpej 				ksp->ks_next = type->ks_next;
    722  1.77   thorpej 				break;
    723  1.77   thorpej 			}
    724  1.77   thorpej 		}
    725  1.77   thorpej #ifdef DIAGNOSTIC
    726  1.77   thorpej 		if (ksp->ks_next == NULL)
    727  1.77   thorpej 			panic("malloc_type_detach: not on list");
    728  1.77   thorpej #endif
    729  1.77   thorpej 	}
    730  1.77   thorpej 	type->ks_next = NULL;
    731  1.77   thorpej }
    732  1.77   thorpej 
    733  1.77   thorpej /*
    734  1.77   thorpej  * Set the limit on a malloc type.
    735  1.77   thorpej  */
    736  1.77   thorpej void
    737  1.77   thorpej malloc_type_setlimit(struct malloc_type *type, u_long limit)
    738  1.77   thorpej {
    739  1.77   thorpej #ifdef KMEMSTATS
    740  1.77   thorpej 	int s;
    741  1.77   thorpej 
    742  1.77   thorpej 	s = splvm();
    743  1.77   thorpej 	type->ks_limit = limit;
    744  1.77   thorpej 	splx(s);
    745  1.77   thorpej #endif
    746  1.77   thorpej }
    747  1.77   thorpej 
    748  1.77   thorpej /*
    749  1.49   thorpej  * Compute the number of pages that kmem_map will map, that is,
    750  1.49   thorpej  * the size of the kernel malloc arena.
    751  1.49   thorpej  */
    752  1.49   thorpej void
    753  1.69     enami kmeminit_nkmempages(void)
    754  1.49   thorpej {
    755  1.49   thorpej 	int npages;
    756  1.49   thorpej 
    757  1.49   thorpej 	if (nkmempages != 0) {
    758  1.49   thorpej 		/*
    759  1.49   thorpej 		 * It's already been set (by us being here before, or
    760  1.49   thorpej 		 * by patching or kernel config options), bail out now.
    761  1.49   thorpej 		 */
    762  1.49   thorpej 		return;
    763  1.49   thorpej 	}
    764  1.49   thorpej 
    765  1.49   thorpej 	/*
    766  1.49   thorpej 	 * We use the following (simple) formula:
    767  1.49   thorpej 	 *
    768  1.49   thorpej 	 *	- Starting point is physical memory / 4.
    769  1.49   thorpej 	 *
    770  1.49   thorpej 	 *	- Clamp it down to NKMEMPAGES_MAX.
    771  1.49   thorpej 	 *
    772  1.49   thorpej 	 *	- Round it up to NKMEMPAGES_MIN.
    773  1.49   thorpej 	 */
    774  1.49   thorpej 	npages = physmem / 4;
    775  1.49   thorpej 
    776  1.49   thorpej 	if (npages > NKMEMPAGES_MAX)
    777  1.49   thorpej 		npages = NKMEMPAGES_MAX;
    778  1.49   thorpej 
    779  1.49   thorpej 	if (npages < NKMEMPAGES_MIN)
    780  1.49   thorpej 		npages = NKMEMPAGES_MIN;
    781  1.49   thorpej 
    782  1.49   thorpej 	nkmempages = npages;
    783  1.49   thorpej }
    784  1.49   thorpej 
    785  1.49   thorpej /*
    786   1.1       cgd  * Initialize the kernel memory allocator
    787   1.1       cgd  */
    788  1.12  christos void
    789  1.69     enami kmeminit(void)
    790   1.1       cgd {
    791  1.77   thorpej 	__link_set_decl(malloc_types, struct malloc_type);
    792  1.77   thorpej 	struct malloc_type * const *ksp;
    793  1.23       tls #ifdef KMEMSTATS
    794  1.50  augustss 	long indx;
    795  1.23       tls #endif
    796   1.1       cgd 
    797   1.1       cgd #if	((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
    798   1.1       cgd 		ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
    799   1.1       cgd #endif
    800   1.1       cgd #if	(MAXALLOCSAVE > MINALLOCSIZE * 32768)
    801   1.1       cgd 		ERROR!_kmeminit:_MAXALLOCSAVE_too_big
    802   1.1       cgd #endif
    803  1.47     ragge #if	(MAXALLOCSAVE < NBPG)
    804   1.1       cgd 		ERROR!_kmeminit:_MAXALLOCSAVE_too_small
    805   1.1       cgd #endif
    806  1.11       cgd 
    807  1.11       cgd 	if (sizeof(struct freelist) > (1 << MINBUCKET))
    808  1.11       cgd 		panic("minbucket too small/struct freelist too big");
    809  1.11       cgd 
    810  1.49   thorpej 	/*
    811  1.49   thorpej 	 * Compute the number of kmem_map pages, if we have not
    812  1.49   thorpej 	 * done so already.
    813  1.49   thorpej 	 */
    814  1.49   thorpej 	kmeminit_nkmempages();
    815  1.49   thorpej 
    816  1.28       mrg 	kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
    817  1.69     enami 	    (vsize_t)(nkmempages * sizeof(struct kmemusage)));
    818  1.76   thorpej 	kmem_map = uvm_km_suballoc(kernel_map, (void *)&kmembase,
    819  1.76   thorpej 	    (void *)&kmemlimit, (vsize_t)(nkmempages << PAGE_SHIFT),
    820  1.69     enami 	    VM_MAP_INTRSAFE, FALSE, &kmem_map_store);
    821   1.1       cgd #ifdef KMEMSTATS
    822   1.1       cgd 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
    823  1.49   thorpej 		if (1 << indx >= PAGE_SIZE)
    824   1.1       cgd 			bucket[indx].kb_elmpercl = 1;
    825   1.1       cgd 		else
    826  1.49   thorpej 			bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
    827   1.1       cgd 		bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
    828   1.1       cgd 	}
    829  1.62   thorpej #endif
    830  1.77   thorpej 
    831  1.77   thorpej 	/* Attach all of the statically-linked malloc types. */
    832  1.77   thorpej 	__link_set_foreach(ksp, malloc_types)
    833  1.77   thorpej 		malloc_type_attach(*ksp);
    834  1.77   thorpej 
    835  1.62   thorpej #ifdef MALLOC_DEBUG
    836  1.62   thorpej 	debug_malloc_init();
    837   1.1       cgd #endif
    838   1.1       cgd }
    839  1.39   thorpej 
    840  1.39   thorpej #ifdef DDB
    841  1.39   thorpej #include <ddb/db_output.h>
    842  1.39   thorpej 
    843  1.39   thorpej /*
    844  1.39   thorpej  * Dump kmem statistics from ddb.
    845  1.39   thorpej  *
    846  1.39   thorpej  * usage: call dump_kmemstats
    847  1.39   thorpej  */
    848  1.69     enami void	dump_kmemstats(void);
    849  1.39   thorpej 
    850  1.39   thorpej void
    851  1.69     enami dump_kmemstats(void)
    852  1.39   thorpej {
    853  1.39   thorpej #ifdef KMEMSTATS
    854  1.77   thorpej 	struct malloc_type *ksp;
    855  1.39   thorpej 
    856  1.77   thorpej 	for (ksp = kmemstatistics; ksp != NULL; ksp = ksp->ks_next) {
    857  1.77   thorpej 		if (ksp->ks_memuse == 0)
    858  1.77   thorpej 			continue;
    859  1.77   thorpej 		db_printf("%s%.*s %ld\n", ksp->ks_shortdesc,
    860  1.77   thorpej 		    (int)(20 - strlen(ksp->ks_shortdesc)),
    861  1.77   thorpej 		    "                    ",
    862  1.77   thorpej 		    ksp->ks_memuse);
    863  1.39   thorpej 	}
    864  1.39   thorpej #else
    865  1.39   thorpej 	db_printf("Kmem stats are not being collected.\n");
    866  1.39   thorpej #endif /* KMEMSTATS */
    867  1.39   thorpej }
    868  1.39   thorpej #endif /* DDB */
    869