Home | History | Annotate | Line # | Download | only in kern
kern_malloc.c revision 1.116.2.1
      1  1.116.2.1      yamt /*	$NetBSD: kern_malloc.c,v 1.116.2.1 2007/12/10 12:56:09 yamt Exp $	*/
      2        1.9       cgd 
      3        1.1       cgd /*
      4        1.8       cgd  * Copyright (c) 1987, 1991, 1993
      5        1.8       cgd  *	The Regents of the University of California.  All rights reserved.
      6        1.1       cgd  *
      7        1.1       cgd  * Redistribution and use in source and binary forms, with or without
      8        1.1       cgd  * modification, are permitted provided that the following conditions
      9        1.1       cgd  * are met:
     10        1.1       cgd  * 1. Redistributions of source code must retain the above copyright
     11        1.1       cgd  *    notice, this list of conditions and the following disclaimer.
     12        1.1       cgd  * 2. Redistributions in binary form must reproduce the above copyright
     13        1.1       cgd  *    notice, this list of conditions and the following disclaimer in the
     14        1.1       cgd  *    documentation and/or other materials provided with the distribution.
     15       1.81       agc  * 3. Neither the name of the University nor the names of its contributors
     16       1.81       agc  *    may be used to endorse or promote products derived from this software
     17       1.81       agc  *    without specific prior written permission.
     18       1.81       agc  *
     19       1.81       agc  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20       1.81       agc  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21       1.81       agc  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22       1.81       agc  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23       1.81       agc  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24       1.81       agc  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25       1.81       agc  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26       1.81       agc  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27       1.81       agc  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28       1.81       agc  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29       1.81       agc  * SUCH DAMAGE.
     30       1.81       agc  *
     31       1.81       agc  *	@(#)kern_malloc.c	8.4 (Berkeley) 5/20/95
     32       1.81       agc  */
     33       1.81       agc 
     34       1.81       agc /*
     35       1.81       agc  * Copyright (c) 1996 Christopher G. Demetriou.  All rights reserved.
     36       1.81       agc  *
     37       1.81       agc  * Redistribution and use in source and binary forms, with or without
     38       1.81       agc  * modification, are permitted provided that the following conditions
     39       1.81       agc  * are met:
     40       1.81       agc  * 1. Redistributions of source code must retain the above copyright
     41       1.81       agc  *    notice, this list of conditions and the following disclaimer.
     42       1.81       agc  * 2. Redistributions in binary form must reproduce the above copyright
     43       1.81       agc  *    notice, this list of conditions and the following disclaimer in the
     44       1.81       agc  *    documentation and/or other materials provided with the distribution.
     45        1.1       cgd  * 3. All advertising materials mentioning features or use of this software
     46        1.1       cgd  *    must display the following acknowledgement:
     47        1.1       cgd  *	This product includes software developed by the University of
     48        1.1       cgd  *	California, Berkeley and its contributors.
     49        1.1       cgd  * 4. Neither the name of the University nor the names of its contributors
     50        1.1       cgd  *    may be used to endorse or promote products derived from this software
     51        1.1       cgd  *    without specific prior written permission.
     52        1.1       cgd  *
     53        1.1       cgd  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54        1.1       cgd  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55        1.1       cgd  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56        1.1       cgd  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57        1.1       cgd  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58        1.1       cgd  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59        1.1       cgd  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60        1.1       cgd  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61        1.1       cgd  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62        1.1       cgd  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63        1.1       cgd  * SUCH DAMAGE.
     64        1.1       cgd  *
     65       1.32      fvdl  *	@(#)kern_malloc.c	8.4 (Berkeley) 5/20/95
     66        1.1       cgd  */
     67       1.64     lukem 
     68       1.64     lukem #include <sys/cdefs.h>
     69  1.116.2.1      yamt __KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.116.2.1 2007/12/10 12:56:09 yamt Exp $");
     70        1.1       cgd 
     71        1.7   mycroft #include <sys/param.h>
     72        1.7   mycroft #include <sys/proc.h>
     73        1.7   mycroft #include <sys/kernel.h>
     74        1.7   mycroft #include <sys/malloc.h>
     75       1.12  christos #include <sys/systm.h>
     76      1.106        ad #include <sys/debug.h>
     77      1.109        ad #include <sys/mutex.h>
     78      1.113        ad #include <sys/lockdebug.h>
     79  1.116.2.1      yamt #include <sys/kmem.h>
     80       1.24   thorpej 
     81       1.28       mrg #include <uvm/uvm_extern.h>
     82       1.28       mrg 
     83  1.116.2.1      yamt #if 0
     84       1.92      yamt static struct vm_map_kernel kmem_map_store;
     85       1.58       chs struct vm_map *kmem_map = NULL;
     86  1.116.2.1      yamt #endif
     87       1.28       mrg 
     88       1.49   thorpej #include "opt_kmempages.h"
     89       1.49   thorpej 
     90       1.49   thorpej #ifdef NKMEMCLUSTERS
     91       1.52  sommerfe #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
     92       1.49   thorpej #endif
     93       1.49   thorpej 
     94       1.49   thorpej /*
     95       1.49   thorpej  * Default number of pages in kmem_map.  We attempt to calculate this
     96       1.49   thorpej  * at run-time, but allow it to be either patched or set in the kernel
     97       1.49   thorpej  * config file.
     98       1.49   thorpej  */
     99       1.49   thorpej #ifndef NKMEMPAGES
    100       1.49   thorpej #define	NKMEMPAGES	0
    101       1.49   thorpej #endif
    102       1.49   thorpej int	nkmempages = NKMEMPAGES;
    103       1.49   thorpej 
    104       1.49   thorpej /*
    105       1.49   thorpej  * Defaults for lower- and upper-bounds for the kmem_map page count.
    106       1.49   thorpej  * Can be overridden by kernel config options.
    107       1.49   thorpej  */
    108       1.49   thorpej #ifndef	NKMEMPAGES_MIN
    109       1.49   thorpej #define	NKMEMPAGES_MIN	NKMEMPAGES_MIN_DEFAULT
    110       1.49   thorpej #endif
    111       1.49   thorpej 
    112       1.49   thorpej #ifndef NKMEMPAGES_MAX
    113       1.49   thorpej #define	NKMEMPAGES_MAX	NKMEMPAGES_MAX_DEFAULT
    114       1.49   thorpej #endif
    115       1.49   thorpej 
    116       1.24   thorpej #include "opt_kmemstats.h"
    117       1.27   thorpej #include "opt_malloclog.h"
    118       1.71      fvdl #include "opt_malloc_debug.h"
    119       1.12  christos 
    120      1.103       chs #define	MINALLOCSIZE	(1 << MINBUCKET)
    121      1.103       chs #define	BUCKETINDX(size) \
    122      1.103       chs 	((size) <= (MINALLOCSIZE * 128) \
    123      1.103       chs 		? (size) <= (MINALLOCSIZE * 8) \
    124      1.103       chs 			? (size) <= (MINALLOCSIZE * 2) \
    125      1.103       chs 				? (size) <= (MINALLOCSIZE * 1) \
    126      1.103       chs 					? (MINBUCKET + 0) \
    127      1.103       chs 					: (MINBUCKET + 1) \
    128      1.103       chs 				: (size) <= (MINALLOCSIZE * 4) \
    129      1.103       chs 					? (MINBUCKET + 2) \
    130      1.103       chs 					: (MINBUCKET + 3) \
    131      1.103       chs 			: (size) <= (MINALLOCSIZE* 32) \
    132      1.103       chs 				? (size) <= (MINALLOCSIZE * 16) \
    133      1.103       chs 					? (MINBUCKET + 4) \
    134      1.103       chs 					: (MINBUCKET + 5) \
    135      1.103       chs 				: (size) <= (MINALLOCSIZE * 64) \
    136      1.103       chs 					? (MINBUCKET + 6) \
    137      1.103       chs 					: (MINBUCKET + 7) \
    138      1.103       chs 		: (size) <= (MINALLOCSIZE * 2048) \
    139      1.103       chs 			? (size) <= (MINALLOCSIZE * 512) \
    140      1.103       chs 				? (size) <= (MINALLOCSIZE * 256) \
    141      1.103       chs 					? (MINBUCKET + 8) \
    142      1.103       chs 					: (MINBUCKET + 9) \
    143      1.103       chs 				: (size) <= (MINALLOCSIZE * 1024) \
    144      1.103       chs 					? (MINBUCKET + 10) \
    145      1.103       chs 					: (MINBUCKET + 11) \
    146      1.103       chs 			: (size) <= (MINALLOCSIZE * 8192) \
    147      1.103       chs 				? (size) <= (MINALLOCSIZE * 4096) \
    148      1.103       chs 					? (MINBUCKET + 12) \
    149      1.103       chs 					: (MINBUCKET + 13) \
    150      1.103       chs 				: (size) <= (MINALLOCSIZE * 16384) \
    151      1.103       chs 					? (MINBUCKET + 14) \
    152      1.103       chs 					: (MINBUCKET + 15))
    153      1.103       chs 
    154      1.103       chs /*
    155      1.103       chs  * Array of descriptors that describe the contents of each page
    156      1.103       chs  */
    157      1.103       chs struct kmemusage {
    158      1.103       chs 	short ku_indx;		/* bucket index */
    159      1.103       chs 	union {
    160      1.103       chs 		u_short freecnt;/* for small allocations, free pieces in page */
    161      1.103       chs 		u_short pagecnt;/* for large allocations, pages alloced */
    162      1.103       chs 	} ku_un;
    163      1.103       chs };
    164      1.103       chs #define	ku_freecnt ku_un.freecnt
    165      1.103       chs #define	ku_pagecnt ku_un.pagecnt
    166      1.103       chs 
    167       1.99       chs struct kmembuckets kmembuckets[MINBUCKET + 16];
    168        1.1       cgd struct kmemusage *kmemusage;
    169        1.1       cgd char *kmembase, *kmemlimit;
    170       1.77   thorpej 
    171  1.116.2.1      yamt #if 0
    172      1.106        ad #ifdef DEBUG
    173      1.106        ad static void *malloc_freecheck;
    174      1.106        ad #endif
    175  1.116.2.1      yamt #endif
    176      1.106        ad 
    177      1.103       chs /*
    178      1.103       chs  * Turn virtual addresses into kmem map indicies
    179      1.103       chs  */
    180      1.108  christos #define	btokup(addr)	(&kmemusage[((char *)(addr) - kmembase) >> PGSHIFT])
    181      1.103       chs 
    182       1.77   thorpej struct malloc_type *kmemstatistics;
    183        1.1       cgd 
    184       1.27   thorpej #ifdef MALLOCLOG
    185       1.27   thorpej #ifndef MALLOCLOGSIZE
    186       1.27   thorpej #define	MALLOCLOGSIZE	100000
    187       1.27   thorpej #endif
    188       1.27   thorpej 
    189       1.27   thorpej struct malloclog {
    190       1.27   thorpej 	void *addr;
    191       1.27   thorpej 	long size;
    192       1.77   thorpej 	struct malloc_type *type;
    193       1.27   thorpej 	int action;
    194       1.27   thorpej 	const char *file;
    195       1.27   thorpej 	long line;
    196       1.27   thorpej } malloclog[MALLOCLOGSIZE];
    197       1.27   thorpej 
    198       1.27   thorpej long	malloclogptr;
    199       1.27   thorpej 
    200       1.27   thorpej static void
    201       1.77   thorpej domlog(void *a, long size, struct malloc_type *type, int action,
    202       1.77   thorpej     const char *file, long line)
    203       1.27   thorpej {
    204       1.27   thorpej 
    205       1.27   thorpej 	malloclog[malloclogptr].addr = a;
    206       1.27   thorpej 	malloclog[malloclogptr].size = size;
    207       1.27   thorpej 	malloclog[malloclogptr].type = type;
    208       1.27   thorpej 	malloclog[malloclogptr].action = action;
    209       1.27   thorpej 	malloclog[malloclogptr].file = file;
    210       1.27   thorpej 	malloclog[malloclogptr].line = line;
    211       1.27   thorpej 	malloclogptr++;
    212       1.27   thorpej 	if (malloclogptr >= MALLOCLOGSIZE)
    213       1.27   thorpej 		malloclogptr = 0;
    214       1.27   thorpej }
    215       1.27   thorpej 
    216       1.27   thorpej static void
    217       1.69     enami hitmlog(void *a)
    218       1.27   thorpej {
    219       1.27   thorpej 	struct malloclog *lp;
    220       1.27   thorpej 	long l;
    221       1.27   thorpej 
    222       1.69     enami #define	PRT do { \
    223       1.88   mycroft 	lp = &malloclog[l]; \
    224       1.88   mycroft 	if (lp->addr == a && lp->action) { \
    225       1.27   thorpej 		printf("malloc log entry %ld:\n", l); \
    226       1.27   thorpej 		printf("\taddr = %p\n", lp->addr); \
    227       1.27   thorpej 		printf("\tsize = %ld\n", lp->size); \
    228       1.77   thorpej 		printf("\ttype = %s\n", lp->type->ks_shortdesc); \
    229       1.27   thorpej 		printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
    230       1.27   thorpej 		printf("\tfile = %s\n", lp->file); \
    231       1.27   thorpej 		printf("\tline = %ld\n", lp->line); \
    232       1.69     enami 	} \
    233       1.69     enami } while (/* CONSTCOND */0)
    234       1.27   thorpej 
    235       1.27   thorpej 	for (l = malloclogptr; l < MALLOCLOGSIZE; l++)
    236       1.69     enami 		PRT;
    237       1.27   thorpej 
    238       1.27   thorpej 	for (l = 0; l < malloclogptr; l++)
    239       1.69     enami 		PRT;
    240       1.88   mycroft #undef PRT
    241       1.27   thorpej }
    242       1.27   thorpej #endif /* MALLOCLOG */
    243       1.27   thorpej 
    244        1.8       cgd #ifdef DIAGNOSTIC
    245        1.8       cgd /*
    246        1.8       cgd  * This structure provides a set of masks to catch unaligned frees.
    247        1.8       cgd  */
    248       1.57  jdolecek const long addrmask[] = { 0,
    249        1.8       cgd 	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
    250        1.8       cgd 	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
    251        1.8       cgd 	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
    252        1.8       cgd 	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
    253        1.8       cgd };
    254        1.8       cgd 
    255        1.8       cgd /*
    256        1.8       cgd  * The WEIRD_ADDR is used as known text to copy into free objects so
    257        1.8       cgd  * that modifications after frees can be detected.
    258        1.8       cgd  */
    259       1.76   thorpej #define	WEIRD_ADDR	((uint32_t) 0xdeadbeef)
    260       1.55       chs #ifdef DEBUG
    261       1.69     enami #define	MAX_COPY	PAGE_SIZE
    262       1.55       chs #else
    263       1.69     enami #define	MAX_COPY	32
    264       1.55       chs #endif
    265        1.8       cgd 
    266        1.8       cgd /*
    267       1.11       cgd  * Normally the freelist structure is used only to hold the list pointer
    268       1.11       cgd  * for free objects.  However, when running with diagnostics, the first
    269       1.77   thorpej  * 8/16 bytes of the structure is unused except for diagnostic information,
    270       1.77   thorpej  * and the free list pointer is at offset 8/16 in the structure.  Since the
    271       1.11       cgd  * first 8 bytes is the portion of the structure most often modified, this
    272       1.11       cgd  * helps to detect memory reuse problems and avoid free list corruption.
    273        1.8       cgd  */
    274        1.8       cgd struct freelist {
    275       1.76   thorpej 	uint32_t spare0;
    276       1.77   thorpej #ifdef _LP64
    277       1.77   thorpej 	uint32_t spare1;		/* explicit padding */
    278       1.77   thorpej #endif
    279       1.77   thorpej 	struct malloc_type *type;
    280      1.108  christos 	void *	next;
    281        1.8       cgd };
    282        1.8       cgd #else /* !DIAGNOSTIC */
    283        1.8       cgd struct freelist {
    284      1.108  christos 	void *	next;
    285        1.8       cgd };
    286        1.8       cgd #endif /* DIAGNOSTIC */
    287        1.8       cgd 
    288        1.1       cgd /*
    289      1.100      jmmv  * The following are standard, built-in malloc types and are not
    290      1.100      jmmv  * specific to any subsystem.
    291       1.77   thorpej  */
    292       1.77   thorpej MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
    293       1.77   thorpej MALLOC_DEFINE(M_DMAMAP, "DMA map", "bus_dma(9) structures");
    294       1.77   thorpej MALLOC_DEFINE(M_FREE, "free", "should be on free list");
    295       1.77   thorpej MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
    296       1.77   thorpej MALLOC_DEFINE(M_SOFTINTR, "softintr", "Softinterrupt structures");
    297       1.77   thorpej MALLOC_DEFINE(M_TEMP, "temp", "misc. temporary data buffers");
    298       1.77   thorpej 
    299       1.77   thorpej /* XXX These should all be elsewhere. */
    300       1.77   thorpej MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables");
    301       1.77   thorpej MALLOC_DEFINE(M_FTABLE, "fragtbl", "fragment reassembly header");
    302       1.77   thorpej MALLOC_DEFINE(M_UFSMNT, "UFS mount", "UFS mount structure");
    303       1.77   thorpej MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
    304       1.77   thorpej MALLOC_DEFINE(M_IPMOPTS, "ip_moptions", "internet multicast options");
    305       1.77   thorpej MALLOC_DEFINE(M_IPMADDR, "in_multi", "internet multicast address");
    306       1.77   thorpej MALLOC_DEFINE(M_MRTABLE, "mrt", "multicast routing tables");
    307       1.90      manu MALLOC_DEFINE(M_BWMETER, "bwmeter", "multicast upcall bw meters");
    308       1.77   thorpej MALLOC_DEFINE(M_1394DATA, "1394data", "IEEE 1394 data buffers");
    309       1.77   thorpej 
    310      1.109        ad kmutex_t malloc_lock;
    311       1.78        pk 
    312  1.116.2.1      yamt struct malloc_header {
    313  1.116.2.1      yamt 	size_t mh_size;
    314  1.116.2.1      yamt };
    315  1.116.2.1      yamt 
    316       1.77   thorpej /*
    317        1.1       cgd  * Allocate a block of memory
    318        1.1       cgd  */
    319       1.27   thorpej #ifdef MALLOCLOG
    320       1.27   thorpej void *
    321      1.105      yamt _malloc(unsigned long size, struct malloc_type *ksp, int flags,
    322       1.77   thorpej     const char *file, long line)
    323       1.27   thorpej #else
    324        1.1       cgd void *
    325      1.105      yamt malloc(unsigned long size, struct malloc_type *ksp, int flags)
    326       1.27   thorpej #endif /* MALLOCLOG */
    327        1.1       cgd {
    328  1.116.2.1      yamt 	struct malloc_header *mh;
    329  1.116.2.1      yamt 	int kmflags = (flags & M_NOWAIT) != 0 ? KM_NOSLEEP : KM_SLEEP;
    330  1.116.2.1      yamt 	size_t allocsize = sizeof(struct malloc_header) + size;
    331  1.116.2.1      yamt 	void *p;
    332  1.116.2.1      yamt 
    333  1.116.2.1      yamt 	if ((flags & M_ZERO) != 0) {
    334  1.116.2.1      yamt 		p = kmem_zalloc(allocsize, kmflags);
    335  1.116.2.1      yamt 	} else {
    336  1.116.2.1      yamt 		p = kmem_alloc(allocsize, kmflags);
    337  1.116.2.1      yamt 	}
    338  1.116.2.1      yamt 	if (p == NULL) {
    339  1.116.2.1      yamt 		return NULL;
    340  1.116.2.1      yamt 	}
    341  1.116.2.1      yamt 	mh = (void *)p;
    342  1.116.2.1      yamt 	mh->mh_size = allocsize;
    343  1.116.2.1      yamt 
    344  1.116.2.1      yamt 	return mh + 1;
    345  1.116.2.1      yamt #if 0
    346       1.50  augustss 	struct kmembuckets *kbp;
    347       1.50  augustss 	struct kmemusage *kup;
    348       1.50  augustss 	struct freelist *freep;
    349        1.5    andrew 	long indx, npg, allocsize;
    350      1.108  christos 	char *va, *cp, *savedlist;
    351        1.8       cgd #ifdef DIAGNOSTIC
    352       1.76   thorpej 	uint32_t *end, *lp;
    353        1.8       cgd 	int copysize;
    354        1.8       cgd #endif
    355        1.1       cgd 
    356       1.59   thorpej #ifdef LOCKDEBUG
    357       1.59   thorpej 	if ((flags & M_NOWAIT) == 0)
    358      1.102      yamt 		ASSERT_SLEEPABLE(NULL, "malloc");
    359       1.59   thorpej #endif
    360       1.62   thorpej #ifdef MALLOC_DEBUG
    361      1.106        ad 	if (debug_malloc(size, ksp, flags, (void *) &va)) {
    362      1.106        ad 		if (va != 0)
    363      1.106        ad 			FREECHECK_OUT(&malloc_freecheck, (void *)va);
    364       1.62   thorpej 		return ((void *) va);
    365      1.106        ad 	}
    366       1.62   thorpej #endif
    367        1.1       cgd 	indx = BUCKETINDX(size);
    368       1.99       chs 	kbp = &kmembuckets[indx];
    369      1.113        ad 	mutex_spin_enter(&malloc_lock);
    370        1.1       cgd #ifdef KMEMSTATS
    371        1.1       cgd 	while (ksp->ks_memuse >= ksp->ks_limit) {
    372        1.1       cgd 		if (flags & M_NOWAIT) {
    373      1.113        ad 			mutex_spin_exit(&malloc_lock);
    374        1.1       cgd 			return ((void *) NULL);
    375        1.1       cgd 		}
    376        1.1       cgd 		if (ksp->ks_limblocks < 65535)
    377        1.1       cgd 			ksp->ks_limblocks++;
    378      1.109        ad 		mtsleep((void *)ksp, PSWP+2, ksp->ks_shortdesc, 0,
    379      1.109        ad 			&malloc_lock);
    380        1.1       cgd 	}
    381        1.8       cgd 	ksp->ks_size |= 1 << indx;
    382        1.8       cgd #endif
    383        1.8       cgd #ifdef DIAGNOSTIC
    384        1.8       cgd 	copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
    385        1.1       cgd #endif
    386        1.1       cgd 	if (kbp->kb_next == NULL) {
    387      1.111      yamt 		int s;
    388        1.8       cgd 		kbp->kb_last = NULL;
    389        1.1       cgd 		if (size > MAXALLOCSAVE)
    390       1.66     enami 			allocsize = round_page(size);
    391        1.1       cgd 		else
    392        1.1       cgd 			allocsize = 1 << indx;
    393       1.47     ragge 		npg = btoc(allocsize);
    394      1.113        ad 		mutex_spin_exit(&malloc_lock);
    395      1.111      yamt 		s = splvm();
    396      1.108  christos 		va = (void *) uvm_km_alloc(kmem_map,
    397       1.97      yamt 		    (vsize_t)ctob(npg), 0,
    398       1.73       chs 		    ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
    399       1.97      yamt 		    ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0) |
    400       1.97      yamt 		    UVM_KMF_WIRED);
    401      1.111      yamt 		splx(s);
    402       1.51   thorpej 		if (__predict_false(va == NULL)) {
    403       1.17       cgd 			/*
    404       1.17       cgd 			 * Kmem_malloc() can return NULL, even if it can
    405       1.91    simonb 			 * wait, if there is no map space available, because
    406       1.17       cgd 			 * it can't fix that problem.  Neither can we,
    407       1.17       cgd 			 * right now.  (We should release pages which
    408       1.99       chs 			 * are completely free and which are in kmembuckets
    409       1.17       cgd 			 * with too many free elements.)
    410       1.17       cgd 			 */
    411       1.68  jdolecek 			if ((flags & (M_NOWAIT|M_CANFAIL)) == 0)
    412       1.17       cgd 				panic("malloc: out of space in kmem_map");
    413       1.73       chs 			return (NULL);
    414        1.1       cgd 		}
    415      1.113        ad 		mutex_spin_enter(&malloc_lock);
    416        1.1       cgd #ifdef KMEMSTATS
    417        1.1       cgd 		kbp->kb_total += kbp->kb_elmpercl;
    418        1.1       cgd #endif
    419        1.1       cgd 		kup = btokup(va);
    420        1.1       cgd 		kup->ku_indx = indx;
    421        1.1       cgd 		if (allocsize > MAXALLOCSAVE) {
    422        1.1       cgd 			if (npg > 65535)
    423        1.1       cgd 				panic("malloc: allocation too large");
    424        1.1       cgd 			kup->ku_pagecnt = npg;
    425        1.1       cgd #ifdef KMEMSTATS
    426        1.1       cgd 			ksp->ks_memuse += allocsize;
    427        1.1       cgd #endif
    428        1.1       cgd 			goto out;
    429        1.1       cgd 		}
    430        1.1       cgd #ifdef KMEMSTATS
    431        1.1       cgd 		kup->ku_freecnt = kbp->kb_elmpercl;
    432        1.1       cgd 		kbp->kb_totalfree += kbp->kb_elmpercl;
    433        1.1       cgd #endif
    434        1.1       cgd 		/*
    435        1.1       cgd 		 * Just in case we blocked while allocating memory,
    436        1.1       cgd 		 * and someone else also allocated memory for this
    437       1.99       chs 		 * kmembucket, don't assume the list is still empty.
    438        1.1       cgd 		 */
    439        1.1       cgd 		savedlist = kbp->kb_next;
    440       1.49   thorpej 		kbp->kb_next = cp = va + (npg << PAGE_SHIFT) - allocsize;
    441        1.8       cgd 		for (;;) {
    442        1.8       cgd 			freep = (struct freelist *)cp;
    443        1.8       cgd #ifdef DIAGNOSTIC
    444        1.8       cgd 			/*
    445        1.8       cgd 			 * Copy in known text to detect modification
    446        1.8       cgd 			 * after freeing.
    447        1.8       cgd 			 */
    448       1.86     ragge 			end = (uint32_t *)&cp[copysize];
    449       1.86     ragge 			for (lp = (uint32_t *)cp; lp < end; lp++)
    450        1.8       cgd 				*lp = WEIRD_ADDR;
    451        1.8       cgd 			freep->type = M_FREE;
    452        1.8       cgd #endif /* DIAGNOSTIC */
    453        1.8       cgd 			if (cp <= va)
    454        1.8       cgd 				break;
    455        1.8       cgd 			cp -= allocsize;
    456        1.8       cgd 			freep->next = cp;
    457        1.8       cgd 		}
    458        1.8       cgd 		freep->next = savedlist;
    459        1.8       cgd 		if (kbp->kb_last == NULL)
    460      1.108  christos 			kbp->kb_last = (void *)freep;
    461        1.1       cgd 	}
    462        1.1       cgd 	va = kbp->kb_next;
    463        1.8       cgd 	kbp->kb_next = ((struct freelist *)va)->next;
    464        1.8       cgd #ifdef DIAGNOSTIC
    465        1.8       cgd 	freep = (struct freelist *)va;
    466       1.77   thorpej 	/* XXX potential to get garbage pointer here. */
    467       1.29       chs 	if (kbp->kb_next) {
    468       1.29       chs 		int rv;
    469       1.35       eeh 		vaddr_t addr = (vaddr_t)kbp->kb_next;
    470       1.29       chs 
    471       1.43   thorpej 		vm_map_lock(kmem_map);
    472       1.29       chs 		rv = uvm_map_checkprot(kmem_map, addr,
    473       1.69     enami 		    addr + sizeof(struct freelist), VM_PROT_WRITE);
    474       1.43   thorpej 		vm_map_unlock(kmem_map);
    475       1.29       chs 
    476       1.51   thorpej 		if (__predict_false(rv == 0)) {
    477       1.69     enami 			printf("Data modified on freelist: "
    478       1.69     enami 			    "word %ld of object %p size %ld previous type %s "
    479       1.69     enami 			    "(invalid addr %p)\n",
    480       1.41       mrg 			    (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
    481       1.80      manu 			    va, size, "foo", kbp->kb_next);
    482       1.27   thorpej #ifdef MALLOCLOG
    483       1.41       mrg 			hitmlog(va);
    484       1.27   thorpej #endif
    485       1.41       mrg 			kbp->kb_next = NULL;
    486       1.29       chs 		}
    487        1.8       cgd 	}
    488       1.11       cgd 
    489       1.11       cgd 	/* Fill the fields that we've used with WEIRD_ADDR */
    490       1.77   thorpej #ifdef _LP64
    491       1.77   thorpej 	freep->type = (struct malloc_type *)
    492       1.77   thorpej 	    (WEIRD_ADDR | (((u_long) WEIRD_ADDR) << 32));
    493       1.77   thorpej #else
    494       1.77   thorpej 	freep->type = (struct malloc_type *) WEIRD_ADDR;
    495        1.8       cgd #endif
    496       1.86     ragge 	end = (uint32_t *)&freep->next +
    497       1.11       cgd 	    (sizeof(freep->next) / sizeof(int32_t));
    498       1.86     ragge 	for (lp = (uint32_t *)&freep->next; lp < end; lp++)
    499       1.11       cgd 		*lp = WEIRD_ADDR;
    500       1.11       cgd 
    501       1.11       cgd 	/* and check that the data hasn't been modified. */
    502       1.76   thorpej 	end = (uint32_t *)&va[copysize];
    503       1.86     ragge 	for (lp = (uint32_t *)va; lp < end; lp++) {
    504       1.51   thorpej 		if (__predict_true(*lp == WEIRD_ADDR))
    505        1.8       cgd 			continue;
    506       1.69     enami 		printf("Data modified on freelist: "
    507       1.69     enami 		    "word %ld of object %p size %ld previous type %s "
    508       1.69     enami 		    "(0x%x != 0x%x)\n",
    509       1.76   thorpej 		    (long)(lp - (uint32_t *)va), va, size,
    510       1.80      manu 		    "bar", *lp, WEIRD_ADDR);
    511       1.27   thorpej #ifdef MALLOCLOG
    512       1.27   thorpej 		hitmlog(va);
    513       1.27   thorpej #endif
    514        1.8       cgd 		break;
    515        1.8       cgd 	}
    516       1.11       cgd 
    517        1.8       cgd 	freep->spare0 = 0;
    518        1.8       cgd #endif /* DIAGNOSTIC */
    519        1.1       cgd #ifdef KMEMSTATS
    520        1.1       cgd 	kup = btokup(va);
    521        1.1       cgd 	if (kup->ku_indx != indx)
    522        1.1       cgd 		panic("malloc: wrong bucket");
    523        1.1       cgd 	if (kup->ku_freecnt == 0)
    524        1.1       cgd 		panic("malloc: lost data");
    525        1.1       cgd 	kup->ku_freecnt--;
    526        1.1       cgd 	kbp->kb_totalfree--;
    527        1.1       cgd 	ksp->ks_memuse += 1 << indx;
    528        1.1       cgd out:
    529        1.1       cgd 	kbp->kb_calls++;
    530        1.1       cgd 	ksp->ks_inuse++;
    531        1.1       cgd 	ksp->ks_calls++;
    532        1.1       cgd 	if (ksp->ks_memuse > ksp->ks_maxused)
    533        1.1       cgd 		ksp->ks_maxused = ksp->ks_memuse;
    534        1.1       cgd #else
    535        1.1       cgd out:
    536        1.1       cgd #endif
    537       1.27   thorpej #ifdef MALLOCLOG
    538       1.80      manu 	domlog(va, size, ksp, 1, file, line);
    539       1.27   thorpej #endif
    540      1.113        ad 	mutex_spin_exit(&malloc_lock);
    541       1.67     enami 	if ((flags & M_ZERO) != 0)
    542       1.65     lukem 		memset(va, 0, size);
    543      1.106        ad 	FREECHECK_OUT(&malloc_freecheck, (void *)va);
    544        1.1       cgd 	return ((void *) va);
    545  1.116.2.1      yamt #endif
    546        1.1       cgd }
    547        1.1       cgd 
    548        1.1       cgd /*
    549        1.1       cgd  * Free a block of memory allocated by malloc.
    550        1.1       cgd  */
    551       1.27   thorpej #ifdef MALLOCLOG
    552       1.27   thorpej void
    553      1.105      yamt _free(void *addr, struct malloc_type *ksp, const char *file, long line)
    554       1.27   thorpej #else
    555        1.1       cgd void
    556      1.105      yamt free(void *addr, struct malloc_type *ksp)
    557       1.27   thorpej #endif /* MALLOCLOG */
    558        1.1       cgd {
    559  1.116.2.1      yamt 	struct malloc_header *mh;
    560  1.116.2.1      yamt 
    561  1.116.2.1      yamt 	mh = addr;
    562  1.116.2.1      yamt 	mh--;
    563  1.116.2.1      yamt 	kmem_free(mh, mh->mh_size);
    564  1.116.2.1      yamt #if 0
    565       1.50  augustss 	struct kmembuckets *kbp;
    566       1.50  augustss 	struct kmemusage *kup;
    567       1.50  augustss 	struct freelist *freep;
    568        1.8       cgd 	long size;
    569        1.5    andrew #ifdef DIAGNOSTIC
    570      1.108  christos 	void *cp;
    571       1.11       cgd 	int32_t *end, *lp;
    572       1.11       cgd 	long alloc, copysize;
    573        1.5    andrew #endif
    574       1.48   thorpej 
    575      1.106        ad 	FREECHECK_IN(&malloc_freecheck, addr);
    576       1.62   thorpej #ifdef MALLOC_DEBUG
    577       1.77   thorpej 	if (debug_free(addr, ksp))
    578       1.62   thorpej 		return;
    579       1.62   thorpej #endif
    580       1.62   thorpej 
    581       1.48   thorpej #ifdef DIAGNOSTIC
    582       1.48   thorpej 	/*
    583       1.48   thorpej 	 * Ensure that we're free'ing something that we could
    584       1.48   thorpej 	 * have allocated in the first place.  That is, check
    585       1.48   thorpej 	 * to see that the address is within kmem_map.
    586       1.48   thorpej 	 */
    587       1.83     enami 	if (__predict_false((vaddr_t)addr < vm_map_min(kmem_map) ||
    588       1.83     enami 	    (vaddr_t)addr >= vm_map_max(kmem_map)))
    589       1.48   thorpej 		panic("free: addr %p not within kmem_map", addr);
    590        1.1       cgd #endif
    591        1.1       cgd 
    592        1.1       cgd 	kup = btokup(addr);
    593        1.1       cgd 	size = 1 << kup->ku_indx;
    594       1.99       chs 	kbp = &kmembuckets[kup->ku_indx];
    595      1.113        ad 
    596      1.115      yamt 	LOCKDEBUG_MEM_CHECK(addr,
    597      1.115      yamt 	    size <= MAXALLOCSAVE ? size : ctob(kup->ku_pagecnt));
    598      1.113        ad 
    599      1.113        ad 	mutex_spin_enter(&malloc_lock);
    600       1.27   thorpej #ifdef MALLOCLOG
    601       1.80      manu 	domlog(addr, 0, ksp, 2, file, line);
    602       1.27   thorpej #endif
    603        1.1       cgd #ifdef DIAGNOSTIC
    604        1.8       cgd 	/*
    605        1.8       cgd 	 * Check for returns of data that do not point to the
    606        1.8       cgd 	 * beginning of the allocation.
    607        1.8       cgd 	 */
    608       1.49   thorpej 	if (size > PAGE_SIZE)
    609       1.49   thorpej 		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
    610        1.1       cgd 	else
    611        1.1       cgd 		alloc = addrmask[kup->ku_indx];
    612        1.8       cgd 	if (((u_long)addr & alloc) != 0)
    613       1.75    provos 		panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
    614       1.77   thorpej 		    addr, size, ksp->ks_shortdesc, alloc);
    615        1.1       cgd #endif /* DIAGNOSTIC */
    616        1.1       cgd 	if (size > MAXALLOCSAVE) {
    617       1.97      yamt 		uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt),
    618       1.97      yamt 		    UVM_KMF_WIRED);
    619        1.1       cgd #ifdef KMEMSTATS
    620        1.1       cgd 		size = kup->ku_pagecnt << PGSHIFT;
    621        1.1       cgd 		ksp->ks_memuse -= size;
    622        1.1       cgd 		kup->ku_indx = 0;
    623        1.1       cgd 		kup->ku_pagecnt = 0;
    624        1.1       cgd 		if (ksp->ks_memuse + size >= ksp->ks_limit &&
    625        1.1       cgd 		    ksp->ks_memuse < ksp->ks_limit)
    626      1.108  christos 			wakeup((void *)ksp);
    627       1.79      fvdl #ifdef DIAGNOSTIC
    628       1.79      fvdl 		if (ksp->ks_inuse == 0)
    629       1.79      fvdl 			panic("free 1: inuse 0, probable double free");
    630       1.79      fvdl #endif
    631        1.1       cgd 		ksp->ks_inuse--;
    632        1.1       cgd 		kbp->kb_total -= 1;
    633        1.1       cgd #endif
    634      1.113        ad 		mutex_spin_exit(&malloc_lock);
    635        1.1       cgd 		return;
    636        1.1       cgd 	}
    637        1.8       cgd 	freep = (struct freelist *)addr;
    638        1.8       cgd #ifdef DIAGNOSTIC
    639        1.8       cgd 	/*
    640        1.8       cgd 	 * Check for multiple frees. Use a quick check to see if
    641        1.8       cgd 	 * it looks free before laboriously searching the freelist.
    642        1.8       cgd 	 */
    643       1.51   thorpej 	if (__predict_false(freep->spare0 == WEIRD_ADDR)) {
    644       1.16       cgd 		for (cp = kbp->kb_next; cp;
    645       1.16       cgd 		    cp = ((struct freelist *)cp)->next) {
    646        1.8       cgd 			if (addr != cp)
    647        1.8       cgd 				continue;
    648       1.22  christos 			printf("multiply freed item %p\n", addr);
    649       1.27   thorpej #ifdef MALLOCLOG
    650       1.27   thorpej 			hitmlog(addr);
    651       1.27   thorpej #endif
    652        1.8       cgd 			panic("free: duplicated free");
    653        1.8       cgd 		}
    654        1.8       cgd 	}
    655      1.112        ad 
    656        1.8       cgd 	/*
    657        1.8       cgd 	 * Copy in known text to detect modification after freeing
    658        1.8       cgd 	 * and to make it look free. Also, save the type being freed
    659        1.8       cgd 	 * so we can list likely culprit if modification is detected
    660        1.8       cgd 	 * when the object is reallocated.
    661        1.8       cgd 	 */
    662        1.8       cgd 	copysize = size < MAX_COPY ? size : MAX_COPY;
    663      1.108  christos 	end = (int32_t *)&((char *)addr)[copysize];
    664       1.11       cgd 	for (lp = (int32_t *)addr; lp < end; lp++)
    665        1.8       cgd 		*lp = WEIRD_ADDR;
    666       1.77   thorpej 	freep->type = ksp;
    667        1.8       cgd #endif /* DIAGNOSTIC */
    668        1.1       cgd #ifdef KMEMSTATS
    669        1.1       cgd 	kup->ku_freecnt++;
    670       1.36   thorpej 	if (kup->ku_freecnt >= kbp->kb_elmpercl) {
    671        1.1       cgd 		if (kup->ku_freecnt > kbp->kb_elmpercl)
    672        1.1       cgd 			panic("free: multiple frees");
    673        1.1       cgd 		else if (kbp->kb_totalfree > kbp->kb_highwat)
    674        1.1       cgd 			kbp->kb_couldfree++;
    675       1.36   thorpej 	}
    676        1.1       cgd 	kbp->kb_totalfree++;
    677        1.1       cgd 	ksp->ks_memuse -= size;
    678        1.1       cgd 	if (ksp->ks_memuse + size >= ksp->ks_limit &&
    679        1.1       cgd 	    ksp->ks_memuse < ksp->ks_limit)
    680      1.108  christos 		wakeup((void *)ksp);
    681       1.79      fvdl #ifdef DIAGNOSTIC
    682       1.79      fvdl 	if (ksp->ks_inuse == 0)
    683       1.79      fvdl 		panic("free 2: inuse 0, probable double free");
    684       1.79      fvdl #endif
    685        1.1       cgd 	ksp->ks_inuse--;
    686        1.1       cgd #endif
    687        1.8       cgd 	if (kbp->kb_next == NULL)
    688        1.8       cgd 		kbp->kb_next = addr;
    689        1.8       cgd 	else
    690        1.8       cgd 		((struct freelist *)kbp->kb_last)->next = addr;
    691        1.8       cgd 	freep->next = NULL;
    692        1.8       cgd 	kbp->kb_last = addr;
    693      1.113        ad 	mutex_spin_exit(&malloc_lock);
    694  1.116.2.1      yamt #endif
    695       1.20       cgd }
    696       1.20       cgd 
    697       1.20       cgd /*
    698       1.20       cgd  * Change the size of a block of memory.
    699       1.20       cgd  */
    700       1.20       cgd void *
    701       1.77   thorpej realloc(void *curaddr, unsigned long newsize, struct malloc_type *ksp,
    702       1.77   thorpej     int flags)
    703       1.20       cgd {
    704       1.50  augustss 	struct kmemusage *kup;
    705       1.72   thorpej 	unsigned long cursize;
    706       1.20       cgd 	void *newaddr;
    707       1.20       cgd #ifdef DIAGNOSTIC
    708       1.20       cgd 	long alloc;
    709       1.20       cgd #endif
    710       1.20       cgd 
    711       1.20       cgd 	/*
    712       1.69     enami 	 * realloc() with a NULL pointer is the same as malloc().
    713       1.20       cgd 	 */
    714       1.20       cgd 	if (curaddr == NULL)
    715       1.77   thorpej 		return (malloc(newsize, ksp, flags));
    716       1.20       cgd 
    717       1.20       cgd 	/*
    718       1.69     enami 	 * realloc() with zero size is the same as free().
    719       1.20       cgd 	 */
    720       1.20       cgd 	if (newsize == 0) {
    721       1.77   thorpej 		free(curaddr, ksp);
    722       1.20       cgd 		return (NULL);
    723       1.20       cgd 	}
    724       1.59   thorpej 
    725       1.59   thorpej #ifdef LOCKDEBUG
    726       1.59   thorpej 	if ((flags & M_NOWAIT) == 0)
    727      1.102      yamt 		ASSERT_SLEEPABLE(NULL, "realloc");
    728       1.59   thorpej #endif
    729       1.20       cgd 
    730       1.20       cgd 	/*
    731       1.20       cgd 	 * Find out how large the old allocation was (and do some
    732       1.20       cgd 	 * sanity checking).
    733       1.20       cgd 	 */
    734       1.20       cgd 	kup = btokup(curaddr);
    735       1.20       cgd 	cursize = 1 << kup->ku_indx;
    736       1.20       cgd 
    737       1.20       cgd #ifdef DIAGNOSTIC
    738       1.20       cgd 	/*
    739       1.20       cgd 	 * Check for returns of data that do not point to the
    740       1.20       cgd 	 * beginning of the allocation.
    741       1.20       cgd 	 */
    742       1.49   thorpej 	if (cursize > PAGE_SIZE)
    743       1.49   thorpej 		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
    744       1.20       cgd 	else
    745       1.20       cgd 		alloc = addrmask[kup->ku_indx];
    746       1.20       cgd 	if (((u_long)curaddr & alloc) != 0)
    747       1.69     enami 		panic("realloc: "
    748       1.69     enami 		    "unaligned addr %p, size %ld, type %s, mask %ld\n",
    749       1.77   thorpej 		    curaddr, cursize, ksp->ks_shortdesc, alloc);
    750       1.20       cgd #endif /* DIAGNOSTIC */
    751       1.20       cgd 
    752       1.20       cgd 	if (cursize > MAXALLOCSAVE)
    753       1.20       cgd 		cursize = ctob(kup->ku_pagecnt);
    754       1.20       cgd 
    755       1.20       cgd 	/*
    756       1.20       cgd 	 * If we already actually have as much as they want, we're done.
    757       1.20       cgd 	 */
    758       1.20       cgd 	if (newsize <= cursize)
    759       1.20       cgd 		return (curaddr);
    760       1.20       cgd 
    761       1.20       cgd 	/*
    762       1.20       cgd 	 * Can't satisfy the allocation with the existing block.
    763       1.20       cgd 	 * Allocate a new one and copy the data.
    764       1.20       cgd 	 */
    765       1.77   thorpej 	newaddr = malloc(newsize, ksp, flags);
    766       1.51   thorpej 	if (__predict_false(newaddr == NULL)) {
    767       1.20       cgd 		/*
    768       1.69     enami 		 * malloc() failed, because flags included M_NOWAIT.
    769       1.20       cgd 		 * Return NULL to indicate that failure.  The old
    770       1.20       cgd 		 * pointer is still valid.
    771       1.20       cgd 		 */
    772       1.69     enami 		return (NULL);
    773       1.20       cgd 	}
    774       1.34     perry 	memcpy(newaddr, curaddr, cursize);
    775       1.20       cgd 
    776       1.20       cgd 	/*
    777       1.20       cgd 	 * We were successful: free the old allocation and return
    778       1.20       cgd 	 * the new one.
    779       1.20       cgd 	 */
    780       1.77   thorpej 	free(curaddr, ksp);
    781       1.20       cgd 	return (newaddr);
    782       1.70     enami }
    783       1.70     enami 
    784       1.70     enami /*
    785       1.70     enami  * Roundup size to the actual allocation size.
    786       1.70     enami  */
    787       1.70     enami unsigned long
    788       1.70     enami malloc_roundup(unsigned long size)
    789       1.70     enami {
    790       1.70     enami 
    791       1.70     enami 	if (size > MAXALLOCSAVE)
    792       1.70     enami 		return (roundup(size, PAGE_SIZE));
    793       1.70     enami 	else
    794       1.70     enami 		return (1 << BUCKETINDX(size));
    795        1.1       cgd }
    796        1.1       cgd 
    797        1.1       cgd /*
    798       1.77   thorpej  * Add a malloc type to the system.
    799       1.77   thorpej  */
    800       1.77   thorpej void
    801       1.77   thorpej malloc_type_attach(struct malloc_type *type)
    802       1.77   thorpej {
    803       1.77   thorpej 
    804  1.116.2.1      yamt #if 0
    805       1.77   thorpej 	if (nkmempages == 0)
    806       1.77   thorpej 		panic("malloc_type_attach: nkmempages == 0");
    807  1.116.2.1      yamt #endif
    808       1.77   thorpej 
    809       1.77   thorpej 	if (type->ks_magic != M_MAGIC)
    810       1.77   thorpej 		panic("malloc_type_attach: bad magic");
    811       1.77   thorpej 
    812       1.77   thorpej #ifdef DIAGNOSTIC
    813       1.77   thorpej 	{
    814       1.77   thorpej 		struct malloc_type *ksp;
    815       1.77   thorpej 		for (ksp = kmemstatistics; ksp != NULL; ksp = ksp->ks_next) {
    816       1.77   thorpej 			if (ksp == type)
    817       1.77   thorpej 				panic("malloc_type_attach: already on list");
    818       1.77   thorpej 		}
    819       1.77   thorpej 	}
    820       1.77   thorpej #endif
    821       1.77   thorpej 
    822       1.77   thorpej #ifdef KMEMSTATS
    823       1.77   thorpej 	if (type->ks_limit == 0)
    824       1.77   thorpej 		type->ks_limit = ((u_long)nkmempages << PAGE_SHIFT) * 6U / 10U;
    825       1.77   thorpej #else
    826       1.77   thorpej 	type->ks_limit = 0;
    827       1.77   thorpej #endif
    828       1.77   thorpej 
    829       1.77   thorpej 	type->ks_next = kmemstatistics;
    830       1.77   thorpej 	kmemstatistics = type;
    831       1.77   thorpej }
    832       1.77   thorpej 
    833       1.77   thorpej /*
    834       1.77   thorpej  * Remove a malloc type from the system..
    835       1.77   thorpej  */
    836       1.77   thorpej void
    837       1.77   thorpej malloc_type_detach(struct malloc_type *type)
    838       1.77   thorpej {
    839       1.77   thorpej 	struct malloc_type *ksp;
    840       1.77   thorpej 
    841       1.77   thorpej #ifdef DIAGNOSTIC
    842       1.77   thorpej 	if (type->ks_magic != M_MAGIC)
    843       1.77   thorpej 		panic("malloc_type_detach: bad magic");
    844       1.77   thorpej #endif
    845       1.77   thorpej 
    846       1.77   thorpej 	if (type == kmemstatistics)
    847       1.77   thorpej 		kmemstatistics = type->ks_next;
    848       1.77   thorpej 	else {
    849       1.77   thorpej 		for (ksp = kmemstatistics; ksp->ks_next != NULL;
    850       1.77   thorpej 		     ksp = ksp->ks_next) {
    851       1.77   thorpej 			if (ksp->ks_next == type) {
    852       1.77   thorpej 				ksp->ks_next = type->ks_next;
    853       1.77   thorpej 				break;
    854       1.77   thorpej 			}
    855       1.77   thorpej 		}
    856       1.77   thorpej #ifdef DIAGNOSTIC
    857       1.77   thorpej 		if (ksp->ks_next == NULL)
    858       1.77   thorpej 			panic("malloc_type_detach: not on list");
    859       1.77   thorpej #endif
    860       1.77   thorpej 	}
    861       1.77   thorpej 	type->ks_next = NULL;
    862       1.77   thorpej }
    863       1.77   thorpej 
    864       1.77   thorpej /*
    865       1.77   thorpej  * Set the limit on a malloc type.
    866       1.77   thorpej  */
    867       1.77   thorpej void
    868      1.105      yamt malloc_type_setlimit(struct malloc_type *type, u_long limit)
    869       1.77   thorpej {
    870       1.77   thorpej #ifdef KMEMSTATS
    871      1.113        ad 	mutex_spin_enter(&malloc_lock);
    872       1.77   thorpej 	type->ks_limit = limit;
    873      1.113        ad 	mutex_spin_exit(&malloc_lock);
    874       1.77   thorpej #endif
    875       1.77   thorpej }
    876       1.77   thorpej 
    877       1.77   thorpej /*
    878       1.49   thorpej  * Compute the number of pages that kmem_map will map, that is,
    879       1.49   thorpej  * the size of the kernel malloc arena.
    880       1.49   thorpej  */
    881       1.49   thorpej void
    882       1.69     enami kmeminit_nkmempages(void)
    883       1.49   thorpej {
    884       1.49   thorpej 	int npages;
    885       1.49   thorpej 
    886       1.49   thorpej 	if (nkmempages != 0) {
    887       1.49   thorpej 		/*
    888       1.49   thorpej 		 * It's already been set (by us being here before, or
    889       1.49   thorpej 		 * by patching or kernel config options), bail out now.
    890       1.49   thorpej 		 */
    891       1.49   thorpej 		return;
    892       1.49   thorpej 	}
    893       1.49   thorpej 
    894       1.94      yamt 	npages = physmem;
    895       1.49   thorpej 
    896       1.49   thorpej 	if (npages > NKMEMPAGES_MAX)
    897       1.49   thorpej 		npages = NKMEMPAGES_MAX;
    898       1.49   thorpej 
    899       1.49   thorpej 	if (npages < NKMEMPAGES_MIN)
    900       1.49   thorpej 		npages = NKMEMPAGES_MIN;
    901       1.49   thorpej 
    902       1.49   thorpej 	nkmempages = npages;
    903       1.49   thorpej }
    904       1.49   thorpej 
    905       1.49   thorpej /*
    906        1.1       cgd  * Initialize the kernel memory allocator
    907        1.1       cgd  */
    908       1.12  christos void
    909       1.69     enami kmeminit(void)
    910        1.1       cgd {
    911  1.116.2.1      yamt #if 0
    912       1.77   thorpej 	__link_set_decl(malloc_types, struct malloc_type);
    913       1.77   thorpej 	struct malloc_type * const *ksp;
    914       1.84     ragge 	vaddr_t kmb, kml;
    915       1.23       tls #ifdef KMEMSTATS
    916       1.50  augustss 	long indx;
    917       1.23       tls #endif
    918        1.1       cgd 
    919        1.1       cgd #if	((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
    920        1.1       cgd 		ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
    921        1.1       cgd #endif
    922        1.1       cgd #if	(MAXALLOCSAVE > MINALLOCSIZE * 32768)
    923        1.1       cgd 		ERROR!_kmeminit:_MAXALLOCSAVE_too_big
    924        1.1       cgd #endif
    925       1.47     ragge #if	(MAXALLOCSAVE < NBPG)
    926        1.1       cgd 		ERROR!_kmeminit:_MAXALLOCSAVE_too_small
    927        1.1       cgd #endif
    928       1.11       cgd 
    929       1.11       cgd 	if (sizeof(struct freelist) > (1 << MINBUCKET))
    930       1.11       cgd 		panic("minbucket too small/struct freelist too big");
    931       1.11       cgd 
    932      1.116        ad 	mutex_init(&malloc_lock, MUTEX_DEFAULT, IPL_VM);
    933      1.109        ad 
    934       1.49   thorpej 	/*
    935       1.49   thorpej 	 * Compute the number of kmem_map pages, if we have not
    936       1.49   thorpej 	 * done so already.
    937       1.49   thorpej 	 */
    938       1.49   thorpej 	kmeminit_nkmempages();
    939       1.49   thorpej 
    940       1.97      yamt 	kmemusage = (struct kmemusage *) uvm_km_alloc(kernel_map,
    941       1.97      yamt 	    (vsize_t)(nkmempages * sizeof(struct kmemusage)), 0,
    942       1.97      yamt 	    UVM_KMF_WIRED|UVM_KMF_ZERO);
    943       1.85      fvdl 	kmb = 0;
    944       1.84     ragge 	kmem_map = uvm_km_suballoc(kernel_map, &kmb,
    945       1.96     perry 	    &kml, ((vsize_t)nkmempages << PAGE_SHIFT),
    946      1.107   thorpej 	    VM_MAP_INTRSAFE, false, &kmem_map_store);
    947       1.93      yamt 	uvm_km_vacache_init(kmem_map, "kvakmem", 0);
    948       1.84     ragge 	kmembase = (char *)kmb;
    949       1.84     ragge 	kmemlimit = (char *)kml;
    950        1.1       cgd #ifdef KMEMSTATS
    951        1.1       cgd 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
    952       1.49   thorpej 		if (1 << indx >= PAGE_SIZE)
    953       1.99       chs 			kmembuckets[indx].kb_elmpercl = 1;
    954        1.1       cgd 		else
    955       1.99       chs 			kmembuckets[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
    956       1.99       chs 		kmembuckets[indx].kb_highwat =
    957       1.99       chs 			5 * kmembuckets[indx].kb_elmpercl;
    958        1.1       cgd 	}
    959       1.62   thorpej #endif
    960       1.77   thorpej 
    961       1.77   thorpej 	/* Attach all of the statically-linked malloc types. */
    962       1.77   thorpej 	__link_set_foreach(ksp, malloc_types)
    963       1.77   thorpej 		malloc_type_attach(*ksp);
    964  1.116.2.1      yamt #endif
    965        1.1       cgd }
    966       1.39   thorpej 
    967       1.39   thorpej #ifdef DDB
    968       1.39   thorpej #include <ddb/db_output.h>
    969       1.39   thorpej 
    970       1.39   thorpej /*
    971       1.39   thorpej  * Dump kmem statistics from ddb.
    972       1.39   thorpej  *
    973       1.39   thorpej  * usage: call dump_kmemstats
    974       1.39   thorpej  */
    975       1.69     enami void	dump_kmemstats(void);
    976       1.39   thorpej 
    977       1.39   thorpej void
    978       1.69     enami dump_kmemstats(void)
    979       1.39   thorpej {
    980       1.39   thorpej #ifdef KMEMSTATS
    981       1.77   thorpej 	struct malloc_type *ksp;
    982       1.39   thorpej 
    983       1.77   thorpej 	for (ksp = kmemstatistics; ksp != NULL; ksp = ksp->ks_next) {
    984       1.77   thorpej 		if (ksp->ks_memuse == 0)
    985       1.77   thorpej 			continue;
    986       1.77   thorpej 		db_printf("%s%.*s %ld\n", ksp->ks_shortdesc,
    987       1.77   thorpej 		    (int)(20 - strlen(ksp->ks_shortdesc)),
    988       1.77   thorpej 		    "                    ",
    989       1.77   thorpej 		    ksp->ks_memuse);
    990       1.39   thorpej 	}
    991       1.39   thorpej #else
    992       1.39   thorpej 	db_printf("Kmem stats are not being collected.\n");
    993       1.39   thorpej #endif /* KMEMSTATS */
    994       1.39   thorpej }
    995       1.39   thorpej #endif /* DDB */
    996       1.82      manu 
    997       1.82      manu 
    998       1.82      manu #if 0
    999       1.96     perry /*
   1000       1.82      manu  * Diagnostic messages about "Data modified on
   1001       1.82      manu  * freelist" indicate a memory corruption, but
   1002       1.82      manu  * they do not help tracking it down.
   1003       1.96     perry  * This function can be called at various places
   1004       1.82      manu  * to sanity check malloc's freelist and discover
   1005       1.82      manu  * where does the corruption take place.
   1006       1.82      manu  */
   1007       1.82      manu int
   1008       1.82      manu freelist_sanitycheck(void) {
   1009       1.82      manu 	int i,j;
   1010       1.82      manu 	struct kmembuckets *kbp;
   1011       1.82      manu 	struct freelist *freep;
   1012       1.82      manu 	int rv = 0;
   1013       1.96     perry 
   1014       1.82      manu 	for (i = MINBUCKET; i <= MINBUCKET + 15; i++) {
   1015       1.99       chs 		kbp = &kmembuckets[i];
   1016       1.82      manu 		freep = (struct freelist *)kbp->kb_next;
   1017       1.82      manu 		j = 0;
   1018       1.82      manu 		while(freep) {
   1019       1.82      manu 			vm_map_lock(kmem_map);
   1020       1.82      manu 			rv = uvm_map_checkprot(kmem_map, (vaddr_t)freep,
   1021       1.96     perry 			    (vaddr_t)freep + sizeof(struct freelist),
   1022       1.82      manu 			    VM_PROT_WRITE);
   1023       1.82      manu 			vm_map_unlock(kmem_map);
   1024       1.82      manu 
   1025       1.82      manu 			if ((rv == 0) || (*(int *)freep != WEIRD_ADDR)) {
   1026       1.82      manu 				printf("bucket %i, chunck %d at %p modified\n",
   1027       1.82      manu 				    i, j, freep);
   1028       1.82      manu 				return 1;
   1029       1.82      manu 			}
   1030       1.82      manu 			freep = (struct freelist *)freep->next;
   1031       1.82      manu 			j++;
   1032       1.82      manu 		}
   1033       1.82      manu 	}
   1034       1.82      manu 
   1035       1.82      manu 	return 0;
   1036       1.82      manu }
   1037       1.82      manu #endif
   1038