kern_malloc.c revision 1.111 1 1.111 yamt /* $NetBSD: kern_malloc.c,v 1.111 2007/04/19 11:03:44 yamt Exp $ */
2 1.9 cgd
3 1.1 cgd /*
4 1.8 cgd * Copyright (c) 1987, 1991, 1993
5 1.8 cgd * The Regents of the University of California. All rights reserved.
6 1.1 cgd *
7 1.1 cgd * Redistribution and use in source and binary forms, with or without
8 1.1 cgd * modification, are permitted provided that the following conditions
9 1.1 cgd * are met:
10 1.1 cgd * 1. Redistributions of source code must retain the above copyright
11 1.1 cgd * notice, this list of conditions and the following disclaimer.
12 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 cgd * notice, this list of conditions and the following disclaimer in the
14 1.1 cgd * documentation and/or other materials provided with the distribution.
15 1.81 agc * 3. Neither the name of the University nor the names of its contributors
16 1.81 agc * may be used to endorse or promote products derived from this software
17 1.81 agc * without specific prior written permission.
18 1.81 agc *
19 1.81 agc * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 1.81 agc * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 1.81 agc * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 1.81 agc * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 1.81 agc * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 1.81 agc * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 1.81 agc * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 1.81 agc * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 1.81 agc * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 1.81 agc * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 1.81 agc * SUCH DAMAGE.
30 1.81 agc *
31 1.81 agc * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
32 1.81 agc */
33 1.81 agc
34 1.81 agc /*
35 1.81 agc * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
36 1.81 agc *
37 1.81 agc * Redistribution and use in source and binary forms, with or without
38 1.81 agc * modification, are permitted provided that the following conditions
39 1.81 agc * are met:
40 1.81 agc * 1. Redistributions of source code must retain the above copyright
41 1.81 agc * notice, this list of conditions and the following disclaimer.
42 1.81 agc * 2. Redistributions in binary form must reproduce the above copyright
43 1.81 agc * notice, this list of conditions and the following disclaimer in the
44 1.81 agc * documentation and/or other materials provided with the distribution.
45 1.1 cgd * 3. All advertising materials mentioning features or use of this software
46 1.1 cgd * must display the following acknowledgement:
47 1.1 cgd * This product includes software developed by the University of
48 1.1 cgd * California, Berkeley and its contributors.
49 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
50 1.1 cgd * may be used to endorse or promote products derived from this software
51 1.1 cgd * without specific prior written permission.
52 1.1 cgd *
53 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 1.1 cgd * SUCH DAMAGE.
64 1.1 cgd *
65 1.32 fvdl * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
66 1.1 cgd */
67 1.64 lukem
68 1.64 lukem #include <sys/cdefs.h>
69 1.111 yamt __KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.111 2007/04/19 11:03:44 yamt Exp $");
70 1.1 cgd
71 1.7 mycroft #include <sys/param.h>
72 1.7 mycroft #include <sys/proc.h>
73 1.7 mycroft #include <sys/kernel.h>
74 1.7 mycroft #include <sys/malloc.h>
75 1.12 christos #include <sys/systm.h>
76 1.106 ad #include <sys/debug.h>
77 1.109 ad #include <sys/mutex.h>
78 1.24 thorpej
79 1.28 mrg #include <uvm/uvm_extern.h>
80 1.28 mrg
81 1.92 yamt static struct vm_map_kernel kmem_map_store;
82 1.58 chs struct vm_map *kmem_map = NULL;
83 1.28 mrg
84 1.49 thorpej #include "opt_kmempages.h"
85 1.49 thorpej
86 1.49 thorpej #ifdef NKMEMCLUSTERS
87 1.52 sommerfe #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
88 1.49 thorpej #endif
89 1.49 thorpej
90 1.49 thorpej /*
91 1.49 thorpej * Default number of pages in kmem_map. We attempt to calculate this
92 1.49 thorpej * at run-time, but allow it to be either patched or set in the kernel
93 1.49 thorpej * config file.
94 1.49 thorpej */
95 1.49 thorpej #ifndef NKMEMPAGES
96 1.49 thorpej #define NKMEMPAGES 0
97 1.49 thorpej #endif
98 1.49 thorpej int nkmempages = NKMEMPAGES;
99 1.49 thorpej
100 1.49 thorpej /*
101 1.49 thorpej * Defaults for lower- and upper-bounds for the kmem_map page count.
102 1.49 thorpej * Can be overridden by kernel config options.
103 1.49 thorpej */
104 1.49 thorpej #ifndef NKMEMPAGES_MIN
105 1.49 thorpej #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
106 1.49 thorpej #endif
107 1.49 thorpej
108 1.49 thorpej #ifndef NKMEMPAGES_MAX
109 1.49 thorpej #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
110 1.49 thorpej #endif
111 1.49 thorpej
112 1.24 thorpej #include "opt_kmemstats.h"
113 1.27 thorpej #include "opt_malloclog.h"
114 1.71 fvdl #include "opt_malloc_debug.h"
115 1.12 christos
116 1.103 chs #define MINALLOCSIZE (1 << MINBUCKET)
117 1.103 chs #define BUCKETINDX(size) \
118 1.103 chs ((size) <= (MINALLOCSIZE * 128) \
119 1.103 chs ? (size) <= (MINALLOCSIZE * 8) \
120 1.103 chs ? (size) <= (MINALLOCSIZE * 2) \
121 1.103 chs ? (size) <= (MINALLOCSIZE * 1) \
122 1.103 chs ? (MINBUCKET + 0) \
123 1.103 chs : (MINBUCKET + 1) \
124 1.103 chs : (size) <= (MINALLOCSIZE * 4) \
125 1.103 chs ? (MINBUCKET + 2) \
126 1.103 chs : (MINBUCKET + 3) \
127 1.103 chs : (size) <= (MINALLOCSIZE* 32) \
128 1.103 chs ? (size) <= (MINALLOCSIZE * 16) \
129 1.103 chs ? (MINBUCKET + 4) \
130 1.103 chs : (MINBUCKET + 5) \
131 1.103 chs : (size) <= (MINALLOCSIZE * 64) \
132 1.103 chs ? (MINBUCKET + 6) \
133 1.103 chs : (MINBUCKET + 7) \
134 1.103 chs : (size) <= (MINALLOCSIZE * 2048) \
135 1.103 chs ? (size) <= (MINALLOCSIZE * 512) \
136 1.103 chs ? (size) <= (MINALLOCSIZE * 256) \
137 1.103 chs ? (MINBUCKET + 8) \
138 1.103 chs : (MINBUCKET + 9) \
139 1.103 chs : (size) <= (MINALLOCSIZE * 1024) \
140 1.103 chs ? (MINBUCKET + 10) \
141 1.103 chs : (MINBUCKET + 11) \
142 1.103 chs : (size) <= (MINALLOCSIZE * 8192) \
143 1.103 chs ? (size) <= (MINALLOCSIZE * 4096) \
144 1.103 chs ? (MINBUCKET + 12) \
145 1.103 chs : (MINBUCKET + 13) \
146 1.103 chs : (size) <= (MINALLOCSIZE * 16384) \
147 1.103 chs ? (MINBUCKET + 14) \
148 1.103 chs : (MINBUCKET + 15))
149 1.103 chs
150 1.103 chs /*
151 1.103 chs * Array of descriptors that describe the contents of each page
152 1.103 chs */
153 1.103 chs struct kmemusage {
154 1.103 chs short ku_indx; /* bucket index */
155 1.103 chs union {
156 1.103 chs u_short freecnt;/* for small allocations, free pieces in page */
157 1.103 chs u_short pagecnt;/* for large allocations, pages alloced */
158 1.103 chs } ku_un;
159 1.103 chs };
160 1.103 chs #define ku_freecnt ku_un.freecnt
161 1.103 chs #define ku_pagecnt ku_un.pagecnt
162 1.103 chs
163 1.99 chs struct kmembuckets kmembuckets[MINBUCKET + 16];
164 1.1 cgd struct kmemusage *kmemusage;
165 1.1 cgd char *kmembase, *kmemlimit;
166 1.77 thorpej
167 1.106 ad #ifdef DEBUG
168 1.106 ad static void *malloc_freecheck;
169 1.106 ad #endif
170 1.106 ad
171 1.103 chs /*
172 1.103 chs * Turn virtual addresses into kmem map indicies
173 1.103 chs */
174 1.108 christos #define btokup(addr) (&kmemusage[((char *)(addr) - kmembase) >> PGSHIFT])
175 1.103 chs
176 1.77 thorpej struct malloc_type *kmemstatistics;
177 1.1 cgd
178 1.27 thorpej #ifdef MALLOCLOG
179 1.27 thorpej #ifndef MALLOCLOGSIZE
180 1.27 thorpej #define MALLOCLOGSIZE 100000
181 1.27 thorpej #endif
182 1.27 thorpej
183 1.27 thorpej struct malloclog {
184 1.27 thorpej void *addr;
185 1.27 thorpej long size;
186 1.77 thorpej struct malloc_type *type;
187 1.27 thorpej int action;
188 1.27 thorpej const char *file;
189 1.27 thorpej long line;
190 1.27 thorpej } malloclog[MALLOCLOGSIZE];
191 1.27 thorpej
192 1.27 thorpej long malloclogptr;
193 1.27 thorpej
194 1.27 thorpej static void
195 1.77 thorpej domlog(void *a, long size, struct malloc_type *type, int action,
196 1.77 thorpej const char *file, long line)
197 1.27 thorpej {
198 1.27 thorpej
199 1.27 thorpej malloclog[malloclogptr].addr = a;
200 1.27 thorpej malloclog[malloclogptr].size = size;
201 1.27 thorpej malloclog[malloclogptr].type = type;
202 1.27 thorpej malloclog[malloclogptr].action = action;
203 1.27 thorpej malloclog[malloclogptr].file = file;
204 1.27 thorpej malloclog[malloclogptr].line = line;
205 1.27 thorpej malloclogptr++;
206 1.27 thorpej if (malloclogptr >= MALLOCLOGSIZE)
207 1.27 thorpej malloclogptr = 0;
208 1.27 thorpej }
209 1.27 thorpej
210 1.27 thorpej static void
211 1.69 enami hitmlog(void *a)
212 1.27 thorpej {
213 1.27 thorpej struct malloclog *lp;
214 1.27 thorpej long l;
215 1.27 thorpej
216 1.69 enami #define PRT do { \
217 1.88 mycroft lp = &malloclog[l]; \
218 1.88 mycroft if (lp->addr == a && lp->action) { \
219 1.27 thorpej printf("malloc log entry %ld:\n", l); \
220 1.27 thorpej printf("\taddr = %p\n", lp->addr); \
221 1.27 thorpej printf("\tsize = %ld\n", lp->size); \
222 1.77 thorpej printf("\ttype = %s\n", lp->type->ks_shortdesc); \
223 1.27 thorpej printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
224 1.27 thorpej printf("\tfile = %s\n", lp->file); \
225 1.27 thorpej printf("\tline = %ld\n", lp->line); \
226 1.69 enami } \
227 1.69 enami } while (/* CONSTCOND */0)
228 1.27 thorpej
229 1.27 thorpej for (l = malloclogptr; l < MALLOCLOGSIZE; l++)
230 1.69 enami PRT;
231 1.27 thorpej
232 1.27 thorpej for (l = 0; l < malloclogptr; l++)
233 1.69 enami PRT;
234 1.88 mycroft #undef PRT
235 1.27 thorpej }
236 1.27 thorpej #endif /* MALLOCLOG */
237 1.27 thorpej
238 1.8 cgd #ifdef DIAGNOSTIC
239 1.8 cgd /*
240 1.8 cgd * This structure provides a set of masks to catch unaligned frees.
241 1.8 cgd */
242 1.57 jdolecek const long addrmask[] = { 0,
243 1.8 cgd 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
244 1.8 cgd 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
245 1.8 cgd 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
246 1.8 cgd 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
247 1.8 cgd };
248 1.8 cgd
249 1.8 cgd /*
250 1.8 cgd * The WEIRD_ADDR is used as known text to copy into free objects so
251 1.8 cgd * that modifications after frees can be detected.
252 1.8 cgd */
253 1.76 thorpej #define WEIRD_ADDR ((uint32_t) 0xdeadbeef)
254 1.55 chs #ifdef DEBUG
255 1.69 enami #define MAX_COPY PAGE_SIZE
256 1.55 chs #else
257 1.69 enami #define MAX_COPY 32
258 1.55 chs #endif
259 1.8 cgd
260 1.8 cgd /*
261 1.11 cgd * Normally the freelist structure is used only to hold the list pointer
262 1.11 cgd * for free objects. However, when running with diagnostics, the first
263 1.77 thorpej * 8/16 bytes of the structure is unused except for diagnostic information,
264 1.77 thorpej * and the free list pointer is at offset 8/16 in the structure. Since the
265 1.11 cgd * first 8 bytes is the portion of the structure most often modified, this
266 1.11 cgd * helps to detect memory reuse problems and avoid free list corruption.
267 1.8 cgd */
268 1.8 cgd struct freelist {
269 1.76 thorpej uint32_t spare0;
270 1.77 thorpej #ifdef _LP64
271 1.77 thorpej uint32_t spare1; /* explicit padding */
272 1.77 thorpej #endif
273 1.77 thorpej struct malloc_type *type;
274 1.108 christos void * next;
275 1.8 cgd };
276 1.8 cgd #else /* !DIAGNOSTIC */
277 1.8 cgd struct freelist {
278 1.108 christos void * next;
279 1.8 cgd };
280 1.8 cgd #endif /* DIAGNOSTIC */
281 1.8 cgd
282 1.1 cgd /*
283 1.100 jmmv * The following are standard, built-in malloc types and are not
284 1.100 jmmv * specific to any subsystem.
285 1.77 thorpej */
286 1.77 thorpej MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
287 1.77 thorpej MALLOC_DEFINE(M_DMAMAP, "DMA map", "bus_dma(9) structures");
288 1.77 thorpej MALLOC_DEFINE(M_FREE, "free", "should be on free list");
289 1.77 thorpej MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
290 1.77 thorpej MALLOC_DEFINE(M_SOFTINTR, "softintr", "Softinterrupt structures");
291 1.77 thorpej MALLOC_DEFINE(M_TEMP, "temp", "misc. temporary data buffers");
292 1.77 thorpej
293 1.77 thorpej /* XXX These should all be elsewhere. */
294 1.77 thorpej MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables");
295 1.77 thorpej MALLOC_DEFINE(M_FTABLE, "fragtbl", "fragment reassembly header");
296 1.77 thorpej MALLOC_DEFINE(M_UFSMNT, "UFS mount", "UFS mount structure");
297 1.77 thorpej MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
298 1.77 thorpej MALLOC_DEFINE(M_IPMOPTS, "ip_moptions", "internet multicast options");
299 1.77 thorpej MALLOC_DEFINE(M_IPMADDR, "in_multi", "internet multicast address");
300 1.77 thorpej MALLOC_DEFINE(M_MRTABLE, "mrt", "multicast routing tables");
301 1.90 manu MALLOC_DEFINE(M_BWMETER, "bwmeter", "multicast upcall bw meters");
302 1.77 thorpej MALLOC_DEFINE(M_1394DATA, "1394data", "IEEE 1394 data buffers");
303 1.77 thorpej
304 1.109 ad kmutex_t malloc_lock;
305 1.78 pk
306 1.77 thorpej /*
307 1.1 cgd * Allocate a block of memory
308 1.1 cgd */
309 1.27 thorpej #ifdef MALLOCLOG
310 1.27 thorpej void *
311 1.105 yamt _malloc(unsigned long size, struct malloc_type *ksp, int flags,
312 1.77 thorpej const char *file, long line)
313 1.27 thorpej #else
314 1.1 cgd void *
315 1.105 yamt malloc(unsigned long size, struct malloc_type *ksp, int flags)
316 1.27 thorpej #endif /* MALLOCLOG */
317 1.1 cgd {
318 1.50 augustss struct kmembuckets *kbp;
319 1.50 augustss struct kmemusage *kup;
320 1.50 augustss struct freelist *freep;
321 1.5 andrew long indx, npg, allocsize;
322 1.108 christos char *va, *cp, *savedlist;
323 1.8 cgd #ifdef DIAGNOSTIC
324 1.76 thorpej uint32_t *end, *lp;
325 1.8 cgd int copysize;
326 1.8 cgd #endif
327 1.1 cgd
328 1.59 thorpej #ifdef LOCKDEBUG
329 1.59 thorpej if ((flags & M_NOWAIT) == 0)
330 1.102 yamt ASSERT_SLEEPABLE(NULL, "malloc");
331 1.59 thorpej #endif
332 1.62 thorpej #ifdef MALLOC_DEBUG
333 1.106 ad if (debug_malloc(size, ksp, flags, (void *) &va)) {
334 1.106 ad if (va != 0)
335 1.106 ad FREECHECK_OUT(&malloc_freecheck, (void *)va);
336 1.62 thorpej return ((void *) va);
337 1.106 ad }
338 1.62 thorpej #endif
339 1.1 cgd indx = BUCKETINDX(size);
340 1.99 chs kbp = &kmembuckets[indx];
341 1.109 ad mutex_enter(&malloc_lock);
342 1.1 cgd #ifdef KMEMSTATS
343 1.1 cgd while (ksp->ks_memuse >= ksp->ks_limit) {
344 1.1 cgd if (flags & M_NOWAIT) {
345 1.109 ad mutex_exit(&malloc_lock);
346 1.1 cgd return ((void *) NULL);
347 1.1 cgd }
348 1.1 cgd if (ksp->ks_limblocks < 65535)
349 1.1 cgd ksp->ks_limblocks++;
350 1.109 ad mtsleep((void *)ksp, PSWP+2, ksp->ks_shortdesc, 0,
351 1.109 ad &malloc_lock);
352 1.1 cgd }
353 1.8 cgd ksp->ks_size |= 1 << indx;
354 1.8 cgd #endif
355 1.8 cgd #ifdef DIAGNOSTIC
356 1.8 cgd copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
357 1.1 cgd #endif
358 1.1 cgd if (kbp->kb_next == NULL) {
359 1.111 yamt int s;
360 1.8 cgd kbp->kb_last = NULL;
361 1.1 cgd if (size > MAXALLOCSAVE)
362 1.66 enami allocsize = round_page(size);
363 1.1 cgd else
364 1.1 cgd allocsize = 1 << indx;
365 1.47 ragge npg = btoc(allocsize);
366 1.109 ad mutex_exit(&malloc_lock);
367 1.111 yamt s = splvm();
368 1.108 christos va = (void *) uvm_km_alloc(kmem_map,
369 1.97 yamt (vsize_t)ctob(npg), 0,
370 1.73 chs ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
371 1.97 yamt ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0) |
372 1.97 yamt UVM_KMF_WIRED);
373 1.111 yamt splx(s);
374 1.51 thorpej if (__predict_false(va == NULL)) {
375 1.17 cgd /*
376 1.17 cgd * Kmem_malloc() can return NULL, even if it can
377 1.91 simonb * wait, if there is no map space available, because
378 1.17 cgd * it can't fix that problem. Neither can we,
379 1.17 cgd * right now. (We should release pages which
380 1.99 chs * are completely free and which are in kmembuckets
381 1.17 cgd * with too many free elements.)
382 1.17 cgd */
383 1.68 jdolecek if ((flags & (M_NOWAIT|M_CANFAIL)) == 0)
384 1.17 cgd panic("malloc: out of space in kmem_map");
385 1.73 chs return (NULL);
386 1.1 cgd }
387 1.109 ad mutex_enter(&malloc_lock);
388 1.1 cgd #ifdef KMEMSTATS
389 1.1 cgd kbp->kb_total += kbp->kb_elmpercl;
390 1.1 cgd #endif
391 1.1 cgd kup = btokup(va);
392 1.1 cgd kup->ku_indx = indx;
393 1.1 cgd if (allocsize > MAXALLOCSAVE) {
394 1.1 cgd if (npg > 65535)
395 1.1 cgd panic("malloc: allocation too large");
396 1.1 cgd kup->ku_pagecnt = npg;
397 1.1 cgd #ifdef KMEMSTATS
398 1.1 cgd ksp->ks_memuse += allocsize;
399 1.1 cgd #endif
400 1.1 cgd goto out;
401 1.1 cgd }
402 1.1 cgd #ifdef KMEMSTATS
403 1.1 cgd kup->ku_freecnt = kbp->kb_elmpercl;
404 1.1 cgd kbp->kb_totalfree += kbp->kb_elmpercl;
405 1.1 cgd #endif
406 1.1 cgd /*
407 1.1 cgd * Just in case we blocked while allocating memory,
408 1.1 cgd * and someone else also allocated memory for this
409 1.99 chs * kmembucket, don't assume the list is still empty.
410 1.1 cgd */
411 1.1 cgd savedlist = kbp->kb_next;
412 1.49 thorpej kbp->kb_next = cp = va + (npg << PAGE_SHIFT) - allocsize;
413 1.8 cgd for (;;) {
414 1.8 cgd freep = (struct freelist *)cp;
415 1.8 cgd #ifdef DIAGNOSTIC
416 1.8 cgd /*
417 1.8 cgd * Copy in known text to detect modification
418 1.8 cgd * after freeing.
419 1.8 cgd */
420 1.86 ragge end = (uint32_t *)&cp[copysize];
421 1.86 ragge for (lp = (uint32_t *)cp; lp < end; lp++)
422 1.8 cgd *lp = WEIRD_ADDR;
423 1.8 cgd freep->type = M_FREE;
424 1.8 cgd #endif /* DIAGNOSTIC */
425 1.8 cgd if (cp <= va)
426 1.8 cgd break;
427 1.8 cgd cp -= allocsize;
428 1.8 cgd freep->next = cp;
429 1.8 cgd }
430 1.8 cgd freep->next = savedlist;
431 1.8 cgd if (kbp->kb_last == NULL)
432 1.108 christos kbp->kb_last = (void *)freep;
433 1.1 cgd }
434 1.1 cgd va = kbp->kb_next;
435 1.8 cgd kbp->kb_next = ((struct freelist *)va)->next;
436 1.8 cgd #ifdef DIAGNOSTIC
437 1.8 cgd freep = (struct freelist *)va;
438 1.77 thorpej /* XXX potential to get garbage pointer here. */
439 1.29 chs if (kbp->kb_next) {
440 1.29 chs int rv;
441 1.35 eeh vaddr_t addr = (vaddr_t)kbp->kb_next;
442 1.29 chs
443 1.43 thorpej vm_map_lock(kmem_map);
444 1.29 chs rv = uvm_map_checkprot(kmem_map, addr,
445 1.69 enami addr + sizeof(struct freelist), VM_PROT_WRITE);
446 1.43 thorpej vm_map_unlock(kmem_map);
447 1.29 chs
448 1.51 thorpej if (__predict_false(rv == 0)) {
449 1.69 enami printf("Data modified on freelist: "
450 1.69 enami "word %ld of object %p size %ld previous type %s "
451 1.69 enami "(invalid addr %p)\n",
452 1.41 mrg (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
453 1.80 manu va, size, "foo", kbp->kb_next);
454 1.27 thorpej #ifdef MALLOCLOG
455 1.41 mrg hitmlog(va);
456 1.27 thorpej #endif
457 1.41 mrg kbp->kb_next = NULL;
458 1.29 chs }
459 1.8 cgd }
460 1.11 cgd
461 1.11 cgd /* Fill the fields that we've used with WEIRD_ADDR */
462 1.77 thorpej #ifdef _LP64
463 1.77 thorpej freep->type = (struct malloc_type *)
464 1.77 thorpej (WEIRD_ADDR | (((u_long) WEIRD_ADDR) << 32));
465 1.77 thorpej #else
466 1.77 thorpej freep->type = (struct malloc_type *) WEIRD_ADDR;
467 1.8 cgd #endif
468 1.86 ragge end = (uint32_t *)&freep->next +
469 1.11 cgd (sizeof(freep->next) / sizeof(int32_t));
470 1.86 ragge for (lp = (uint32_t *)&freep->next; lp < end; lp++)
471 1.11 cgd *lp = WEIRD_ADDR;
472 1.11 cgd
473 1.11 cgd /* and check that the data hasn't been modified. */
474 1.76 thorpej end = (uint32_t *)&va[copysize];
475 1.86 ragge for (lp = (uint32_t *)va; lp < end; lp++) {
476 1.51 thorpej if (__predict_true(*lp == WEIRD_ADDR))
477 1.8 cgd continue;
478 1.69 enami printf("Data modified on freelist: "
479 1.69 enami "word %ld of object %p size %ld previous type %s "
480 1.69 enami "(0x%x != 0x%x)\n",
481 1.76 thorpej (long)(lp - (uint32_t *)va), va, size,
482 1.80 manu "bar", *lp, WEIRD_ADDR);
483 1.27 thorpej #ifdef MALLOCLOG
484 1.27 thorpej hitmlog(va);
485 1.27 thorpej #endif
486 1.8 cgd break;
487 1.8 cgd }
488 1.11 cgd
489 1.8 cgd freep->spare0 = 0;
490 1.8 cgd #endif /* DIAGNOSTIC */
491 1.1 cgd #ifdef KMEMSTATS
492 1.1 cgd kup = btokup(va);
493 1.1 cgd if (kup->ku_indx != indx)
494 1.1 cgd panic("malloc: wrong bucket");
495 1.1 cgd if (kup->ku_freecnt == 0)
496 1.1 cgd panic("malloc: lost data");
497 1.1 cgd kup->ku_freecnt--;
498 1.1 cgd kbp->kb_totalfree--;
499 1.1 cgd ksp->ks_memuse += 1 << indx;
500 1.1 cgd out:
501 1.1 cgd kbp->kb_calls++;
502 1.1 cgd ksp->ks_inuse++;
503 1.1 cgd ksp->ks_calls++;
504 1.1 cgd if (ksp->ks_memuse > ksp->ks_maxused)
505 1.1 cgd ksp->ks_maxused = ksp->ks_memuse;
506 1.1 cgd #else
507 1.1 cgd out:
508 1.1 cgd #endif
509 1.27 thorpej #ifdef MALLOCLOG
510 1.80 manu domlog(va, size, ksp, 1, file, line);
511 1.27 thorpej #endif
512 1.109 ad mutex_exit(&malloc_lock);
513 1.67 enami if ((flags & M_ZERO) != 0)
514 1.65 lukem memset(va, 0, size);
515 1.106 ad FREECHECK_OUT(&malloc_freecheck, (void *)va);
516 1.1 cgd return ((void *) va);
517 1.1 cgd }
518 1.1 cgd
519 1.1 cgd /*
520 1.1 cgd * Free a block of memory allocated by malloc.
521 1.1 cgd */
522 1.27 thorpej #ifdef MALLOCLOG
523 1.27 thorpej void
524 1.105 yamt _free(void *addr, struct malloc_type *ksp, const char *file, long line)
525 1.27 thorpej #else
526 1.1 cgd void
527 1.105 yamt free(void *addr, struct malloc_type *ksp)
528 1.27 thorpej #endif /* MALLOCLOG */
529 1.1 cgd {
530 1.50 augustss struct kmembuckets *kbp;
531 1.50 augustss struct kmemusage *kup;
532 1.50 augustss struct freelist *freep;
533 1.8 cgd long size;
534 1.5 andrew #ifdef DIAGNOSTIC
535 1.108 christos void *cp;
536 1.11 cgd int32_t *end, *lp;
537 1.11 cgd long alloc, copysize;
538 1.5 andrew #endif
539 1.48 thorpej
540 1.106 ad FREECHECK_IN(&malloc_freecheck, addr);
541 1.106 ad
542 1.62 thorpej #ifdef MALLOC_DEBUG
543 1.77 thorpej if (debug_free(addr, ksp))
544 1.62 thorpej return;
545 1.62 thorpej #endif
546 1.62 thorpej
547 1.48 thorpej #ifdef DIAGNOSTIC
548 1.48 thorpej /*
549 1.48 thorpej * Ensure that we're free'ing something that we could
550 1.48 thorpej * have allocated in the first place. That is, check
551 1.48 thorpej * to see that the address is within kmem_map.
552 1.48 thorpej */
553 1.83 enami if (__predict_false((vaddr_t)addr < vm_map_min(kmem_map) ||
554 1.83 enami (vaddr_t)addr >= vm_map_max(kmem_map)))
555 1.48 thorpej panic("free: addr %p not within kmem_map", addr);
556 1.1 cgd #endif
557 1.1 cgd
558 1.1 cgd kup = btokup(addr);
559 1.1 cgd size = 1 << kup->ku_indx;
560 1.99 chs kbp = &kmembuckets[kup->ku_indx];
561 1.109 ad mutex_enter(&malloc_lock);
562 1.27 thorpej #ifdef MALLOCLOG
563 1.80 manu domlog(addr, 0, ksp, 2, file, line);
564 1.27 thorpej #endif
565 1.1 cgd #ifdef DIAGNOSTIC
566 1.8 cgd /*
567 1.8 cgd * Check for returns of data that do not point to the
568 1.8 cgd * beginning of the allocation.
569 1.8 cgd */
570 1.49 thorpej if (size > PAGE_SIZE)
571 1.49 thorpej alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
572 1.1 cgd else
573 1.1 cgd alloc = addrmask[kup->ku_indx];
574 1.8 cgd if (((u_long)addr & alloc) != 0)
575 1.75 provos panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
576 1.77 thorpej addr, size, ksp->ks_shortdesc, alloc);
577 1.1 cgd #endif /* DIAGNOSTIC */
578 1.1 cgd if (size > MAXALLOCSAVE) {
579 1.97 yamt uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt),
580 1.97 yamt UVM_KMF_WIRED);
581 1.1 cgd #ifdef KMEMSTATS
582 1.1 cgd size = kup->ku_pagecnt << PGSHIFT;
583 1.1 cgd ksp->ks_memuse -= size;
584 1.1 cgd kup->ku_indx = 0;
585 1.1 cgd kup->ku_pagecnt = 0;
586 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
587 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
588 1.108 christos wakeup((void *)ksp);
589 1.79 fvdl #ifdef DIAGNOSTIC
590 1.79 fvdl if (ksp->ks_inuse == 0)
591 1.79 fvdl panic("free 1: inuse 0, probable double free");
592 1.79 fvdl #endif
593 1.1 cgd ksp->ks_inuse--;
594 1.1 cgd kbp->kb_total -= 1;
595 1.1 cgd #endif
596 1.109 ad mutex_exit(&malloc_lock);
597 1.1 cgd return;
598 1.1 cgd }
599 1.8 cgd freep = (struct freelist *)addr;
600 1.8 cgd #ifdef DIAGNOSTIC
601 1.8 cgd /*
602 1.8 cgd * Check for multiple frees. Use a quick check to see if
603 1.8 cgd * it looks free before laboriously searching the freelist.
604 1.8 cgd */
605 1.51 thorpej if (__predict_false(freep->spare0 == WEIRD_ADDR)) {
606 1.16 cgd for (cp = kbp->kb_next; cp;
607 1.16 cgd cp = ((struct freelist *)cp)->next) {
608 1.8 cgd if (addr != cp)
609 1.8 cgd continue;
610 1.22 christos printf("multiply freed item %p\n", addr);
611 1.27 thorpej #ifdef MALLOCLOG
612 1.27 thorpej hitmlog(addr);
613 1.27 thorpej #endif
614 1.8 cgd panic("free: duplicated free");
615 1.8 cgd }
616 1.8 cgd }
617 1.38 chs #ifdef LOCKDEBUG
618 1.38 chs /*
619 1.38 chs * Check if we're freeing a locked simple lock.
620 1.38 chs */
621 1.40 chs simple_lock_freecheck(addr, (char *)addr + size);
622 1.38 chs #endif
623 1.8 cgd /*
624 1.8 cgd * Copy in known text to detect modification after freeing
625 1.8 cgd * and to make it look free. Also, save the type being freed
626 1.8 cgd * so we can list likely culprit if modification is detected
627 1.8 cgd * when the object is reallocated.
628 1.8 cgd */
629 1.8 cgd copysize = size < MAX_COPY ? size : MAX_COPY;
630 1.108 christos end = (int32_t *)&((char *)addr)[copysize];
631 1.11 cgd for (lp = (int32_t *)addr; lp < end; lp++)
632 1.8 cgd *lp = WEIRD_ADDR;
633 1.77 thorpej freep->type = ksp;
634 1.8 cgd #endif /* DIAGNOSTIC */
635 1.1 cgd #ifdef KMEMSTATS
636 1.1 cgd kup->ku_freecnt++;
637 1.36 thorpej if (kup->ku_freecnt >= kbp->kb_elmpercl) {
638 1.1 cgd if (kup->ku_freecnt > kbp->kb_elmpercl)
639 1.1 cgd panic("free: multiple frees");
640 1.1 cgd else if (kbp->kb_totalfree > kbp->kb_highwat)
641 1.1 cgd kbp->kb_couldfree++;
642 1.36 thorpej }
643 1.1 cgd kbp->kb_totalfree++;
644 1.1 cgd ksp->ks_memuse -= size;
645 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
646 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
647 1.108 christos wakeup((void *)ksp);
648 1.79 fvdl #ifdef DIAGNOSTIC
649 1.79 fvdl if (ksp->ks_inuse == 0)
650 1.79 fvdl panic("free 2: inuse 0, probable double free");
651 1.79 fvdl #endif
652 1.1 cgd ksp->ks_inuse--;
653 1.1 cgd #endif
654 1.8 cgd if (kbp->kb_next == NULL)
655 1.8 cgd kbp->kb_next = addr;
656 1.8 cgd else
657 1.8 cgd ((struct freelist *)kbp->kb_last)->next = addr;
658 1.8 cgd freep->next = NULL;
659 1.8 cgd kbp->kb_last = addr;
660 1.109 ad mutex_exit(&malloc_lock);
661 1.20 cgd }
662 1.20 cgd
663 1.20 cgd /*
664 1.20 cgd * Change the size of a block of memory.
665 1.20 cgd */
666 1.20 cgd void *
667 1.77 thorpej realloc(void *curaddr, unsigned long newsize, struct malloc_type *ksp,
668 1.77 thorpej int flags)
669 1.20 cgd {
670 1.50 augustss struct kmemusage *kup;
671 1.72 thorpej unsigned long cursize;
672 1.20 cgd void *newaddr;
673 1.20 cgd #ifdef DIAGNOSTIC
674 1.20 cgd long alloc;
675 1.20 cgd #endif
676 1.20 cgd
677 1.20 cgd /*
678 1.69 enami * realloc() with a NULL pointer is the same as malloc().
679 1.20 cgd */
680 1.20 cgd if (curaddr == NULL)
681 1.77 thorpej return (malloc(newsize, ksp, flags));
682 1.20 cgd
683 1.20 cgd /*
684 1.69 enami * realloc() with zero size is the same as free().
685 1.20 cgd */
686 1.20 cgd if (newsize == 0) {
687 1.77 thorpej free(curaddr, ksp);
688 1.20 cgd return (NULL);
689 1.20 cgd }
690 1.59 thorpej
691 1.59 thorpej #ifdef LOCKDEBUG
692 1.59 thorpej if ((flags & M_NOWAIT) == 0)
693 1.102 yamt ASSERT_SLEEPABLE(NULL, "realloc");
694 1.59 thorpej #endif
695 1.20 cgd
696 1.20 cgd /*
697 1.20 cgd * Find out how large the old allocation was (and do some
698 1.20 cgd * sanity checking).
699 1.20 cgd */
700 1.20 cgd kup = btokup(curaddr);
701 1.20 cgd cursize = 1 << kup->ku_indx;
702 1.20 cgd
703 1.20 cgd #ifdef DIAGNOSTIC
704 1.20 cgd /*
705 1.20 cgd * Check for returns of data that do not point to the
706 1.20 cgd * beginning of the allocation.
707 1.20 cgd */
708 1.49 thorpej if (cursize > PAGE_SIZE)
709 1.49 thorpej alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
710 1.20 cgd else
711 1.20 cgd alloc = addrmask[kup->ku_indx];
712 1.20 cgd if (((u_long)curaddr & alloc) != 0)
713 1.69 enami panic("realloc: "
714 1.69 enami "unaligned addr %p, size %ld, type %s, mask %ld\n",
715 1.77 thorpej curaddr, cursize, ksp->ks_shortdesc, alloc);
716 1.20 cgd #endif /* DIAGNOSTIC */
717 1.20 cgd
718 1.20 cgd if (cursize > MAXALLOCSAVE)
719 1.20 cgd cursize = ctob(kup->ku_pagecnt);
720 1.20 cgd
721 1.20 cgd /*
722 1.20 cgd * If we already actually have as much as they want, we're done.
723 1.20 cgd */
724 1.20 cgd if (newsize <= cursize)
725 1.20 cgd return (curaddr);
726 1.20 cgd
727 1.20 cgd /*
728 1.20 cgd * Can't satisfy the allocation with the existing block.
729 1.20 cgd * Allocate a new one and copy the data.
730 1.20 cgd */
731 1.77 thorpej newaddr = malloc(newsize, ksp, flags);
732 1.51 thorpej if (__predict_false(newaddr == NULL)) {
733 1.20 cgd /*
734 1.69 enami * malloc() failed, because flags included M_NOWAIT.
735 1.20 cgd * Return NULL to indicate that failure. The old
736 1.20 cgd * pointer is still valid.
737 1.20 cgd */
738 1.69 enami return (NULL);
739 1.20 cgd }
740 1.34 perry memcpy(newaddr, curaddr, cursize);
741 1.20 cgd
742 1.20 cgd /*
743 1.20 cgd * We were successful: free the old allocation and return
744 1.20 cgd * the new one.
745 1.20 cgd */
746 1.77 thorpej free(curaddr, ksp);
747 1.20 cgd return (newaddr);
748 1.70 enami }
749 1.70 enami
750 1.70 enami /*
751 1.70 enami * Roundup size to the actual allocation size.
752 1.70 enami */
753 1.70 enami unsigned long
754 1.70 enami malloc_roundup(unsigned long size)
755 1.70 enami {
756 1.70 enami
757 1.70 enami if (size > MAXALLOCSAVE)
758 1.70 enami return (roundup(size, PAGE_SIZE));
759 1.70 enami else
760 1.70 enami return (1 << BUCKETINDX(size));
761 1.1 cgd }
762 1.1 cgd
763 1.1 cgd /*
764 1.77 thorpej * Add a malloc type to the system.
765 1.77 thorpej */
766 1.77 thorpej void
767 1.77 thorpej malloc_type_attach(struct malloc_type *type)
768 1.77 thorpej {
769 1.77 thorpej
770 1.77 thorpej if (nkmempages == 0)
771 1.77 thorpej panic("malloc_type_attach: nkmempages == 0");
772 1.77 thorpej
773 1.77 thorpej if (type->ks_magic != M_MAGIC)
774 1.77 thorpej panic("malloc_type_attach: bad magic");
775 1.77 thorpej
776 1.77 thorpej #ifdef DIAGNOSTIC
777 1.77 thorpej {
778 1.77 thorpej struct malloc_type *ksp;
779 1.77 thorpej for (ksp = kmemstatistics; ksp != NULL; ksp = ksp->ks_next) {
780 1.77 thorpej if (ksp == type)
781 1.77 thorpej panic("malloc_type_attach: already on list");
782 1.77 thorpej }
783 1.77 thorpej }
784 1.77 thorpej #endif
785 1.77 thorpej
786 1.77 thorpej #ifdef KMEMSTATS
787 1.77 thorpej if (type->ks_limit == 0)
788 1.77 thorpej type->ks_limit = ((u_long)nkmempages << PAGE_SHIFT) * 6U / 10U;
789 1.77 thorpej #else
790 1.77 thorpej type->ks_limit = 0;
791 1.77 thorpej #endif
792 1.77 thorpej
793 1.77 thorpej type->ks_next = kmemstatistics;
794 1.77 thorpej kmemstatistics = type;
795 1.77 thorpej }
796 1.77 thorpej
797 1.77 thorpej /*
798 1.77 thorpej * Remove a malloc type from the system..
799 1.77 thorpej */
800 1.77 thorpej void
801 1.77 thorpej malloc_type_detach(struct malloc_type *type)
802 1.77 thorpej {
803 1.77 thorpej struct malloc_type *ksp;
804 1.77 thorpej
805 1.77 thorpej #ifdef DIAGNOSTIC
806 1.77 thorpej if (type->ks_magic != M_MAGIC)
807 1.77 thorpej panic("malloc_type_detach: bad magic");
808 1.77 thorpej #endif
809 1.77 thorpej
810 1.77 thorpej if (type == kmemstatistics)
811 1.77 thorpej kmemstatistics = type->ks_next;
812 1.77 thorpej else {
813 1.77 thorpej for (ksp = kmemstatistics; ksp->ks_next != NULL;
814 1.77 thorpej ksp = ksp->ks_next) {
815 1.77 thorpej if (ksp->ks_next == type) {
816 1.77 thorpej ksp->ks_next = type->ks_next;
817 1.77 thorpej break;
818 1.77 thorpej }
819 1.77 thorpej }
820 1.77 thorpej #ifdef DIAGNOSTIC
821 1.77 thorpej if (ksp->ks_next == NULL)
822 1.77 thorpej panic("malloc_type_detach: not on list");
823 1.77 thorpej #endif
824 1.77 thorpej }
825 1.77 thorpej type->ks_next = NULL;
826 1.77 thorpej }
827 1.77 thorpej
828 1.77 thorpej /*
829 1.77 thorpej * Set the limit on a malloc type.
830 1.77 thorpej */
831 1.77 thorpej void
832 1.105 yamt malloc_type_setlimit(struct malloc_type *type, u_long limit)
833 1.77 thorpej {
834 1.77 thorpej #ifdef KMEMSTATS
835 1.109 ad mutex_enter(&malloc_lock);
836 1.77 thorpej type->ks_limit = limit;
837 1.109 ad mutex_exit(&malloc_lock);
838 1.77 thorpej #endif
839 1.77 thorpej }
840 1.77 thorpej
841 1.77 thorpej /*
842 1.49 thorpej * Compute the number of pages that kmem_map will map, that is,
843 1.49 thorpej * the size of the kernel malloc arena.
844 1.49 thorpej */
845 1.49 thorpej void
846 1.69 enami kmeminit_nkmempages(void)
847 1.49 thorpej {
848 1.49 thorpej int npages;
849 1.49 thorpej
850 1.49 thorpej if (nkmempages != 0) {
851 1.49 thorpej /*
852 1.49 thorpej * It's already been set (by us being here before, or
853 1.49 thorpej * by patching or kernel config options), bail out now.
854 1.49 thorpej */
855 1.49 thorpej return;
856 1.49 thorpej }
857 1.49 thorpej
858 1.94 yamt npages = physmem;
859 1.49 thorpej
860 1.49 thorpej if (npages > NKMEMPAGES_MAX)
861 1.49 thorpej npages = NKMEMPAGES_MAX;
862 1.49 thorpej
863 1.49 thorpej if (npages < NKMEMPAGES_MIN)
864 1.49 thorpej npages = NKMEMPAGES_MIN;
865 1.49 thorpej
866 1.49 thorpej nkmempages = npages;
867 1.49 thorpej }
868 1.49 thorpej
869 1.49 thorpej /*
870 1.1 cgd * Initialize the kernel memory allocator
871 1.1 cgd */
872 1.12 christos void
873 1.69 enami kmeminit(void)
874 1.1 cgd {
875 1.77 thorpej __link_set_decl(malloc_types, struct malloc_type);
876 1.77 thorpej struct malloc_type * const *ksp;
877 1.84 ragge vaddr_t kmb, kml;
878 1.23 tls #ifdef KMEMSTATS
879 1.50 augustss long indx;
880 1.23 tls #endif
881 1.1 cgd
882 1.1 cgd #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
883 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
884 1.1 cgd #endif
885 1.1 cgd #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
886 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_big
887 1.1 cgd #endif
888 1.47 ragge #if (MAXALLOCSAVE < NBPG)
889 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_small
890 1.1 cgd #endif
891 1.11 cgd
892 1.11 cgd if (sizeof(struct freelist) > (1 << MINBUCKET))
893 1.11 cgd panic("minbucket too small/struct freelist too big");
894 1.11 cgd
895 1.109 ad mutex_init(&malloc_lock, MUTEX_DRIVER, IPL_VM);
896 1.109 ad
897 1.49 thorpej /*
898 1.49 thorpej * Compute the number of kmem_map pages, if we have not
899 1.49 thorpej * done so already.
900 1.49 thorpej */
901 1.49 thorpej kmeminit_nkmempages();
902 1.49 thorpej
903 1.97 yamt kmemusage = (struct kmemusage *) uvm_km_alloc(kernel_map,
904 1.97 yamt (vsize_t)(nkmempages * sizeof(struct kmemusage)), 0,
905 1.97 yamt UVM_KMF_WIRED|UVM_KMF_ZERO);
906 1.85 fvdl kmb = 0;
907 1.84 ragge kmem_map = uvm_km_suballoc(kernel_map, &kmb,
908 1.96 perry &kml, ((vsize_t)nkmempages << PAGE_SHIFT),
909 1.107 thorpej VM_MAP_INTRSAFE, false, &kmem_map_store);
910 1.93 yamt uvm_km_vacache_init(kmem_map, "kvakmem", 0);
911 1.84 ragge kmembase = (char *)kmb;
912 1.84 ragge kmemlimit = (char *)kml;
913 1.1 cgd #ifdef KMEMSTATS
914 1.1 cgd for (indx = 0; indx < MINBUCKET + 16; indx++) {
915 1.49 thorpej if (1 << indx >= PAGE_SIZE)
916 1.99 chs kmembuckets[indx].kb_elmpercl = 1;
917 1.1 cgd else
918 1.99 chs kmembuckets[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
919 1.99 chs kmembuckets[indx].kb_highwat =
920 1.99 chs 5 * kmembuckets[indx].kb_elmpercl;
921 1.1 cgd }
922 1.62 thorpej #endif
923 1.77 thorpej
924 1.77 thorpej /* Attach all of the statically-linked malloc types. */
925 1.77 thorpej __link_set_foreach(ksp, malloc_types)
926 1.77 thorpej malloc_type_attach(*ksp);
927 1.77 thorpej
928 1.62 thorpej #ifdef MALLOC_DEBUG
929 1.62 thorpej debug_malloc_init();
930 1.1 cgd #endif
931 1.1 cgd }
932 1.39 thorpej
933 1.39 thorpej #ifdef DDB
934 1.39 thorpej #include <ddb/db_output.h>
935 1.39 thorpej
936 1.39 thorpej /*
937 1.39 thorpej * Dump kmem statistics from ddb.
938 1.39 thorpej *
939 1.39 thorpej * usage: call dump_kmemstats
940 1.39 thorpej */
941 1.69 enami void dump_kmemstats(void);
942 1.39 thorpej
943 1.39 thorpej void
944 1.69 enami dump_kmemstats(void)
945 1.39 thorpej {
946 1.39 thorpej #ifdef KMEMSTATS
947 1.77 thorpej struct malloc_type *ksp;
948 1.39 thorpej
949 1.77 thorpej for (ksp = kmemstatistics; ksp != NULL; ksp = ksp->ks_next) {
950 1.77 thorpej if (ksp->ks_memuse == 0)
951 1.77 thorpej continue;
952 1.77 thorpej db_printf("%s%.*s %ld\n", ksp->ks_shortdesc,
953 1.77 thorpej (int)(20 - strlen(ksp->ks_shortdesc)),
954 1.77 thorpej " ",
955 1.77 thorpej ksp->ks_memuse);
956 1.39 thorpej }
957 1.39 thorpej #else
958 1.39 thorpej db_printf("Kmem stats are not being collected.\n");
959 1.39 thorpej #endif /* KMEMSTATS */
960 1.39 thorpej }
961 1.39 thorpej #endif /* DDB */
962 1.82 manu
963 1.82 manu
964 1.82 manu #if 0
965 1.96 perry /*
966 1.82 manu * Diagnostic messages about "Data modified on
967 1.82 manu * freelist" indicate a memory corruption, but
968 1.82 manu * they do not help tracking it down.
969 1.96 perry * This function can be called at various places
970 1.82 manu * to sanity check malloc's freelist and discover
971 1.82 manu * where does the corruption take place.
972 1.82 manu */
973 1.82 manu int
974 1.82 manu freelist_sanitycheck(void) {
975 1.82 manu int i,j;
976 1.82 manu struct kmembuckets *kbp;
977 1.82 manu struct freelist *freep;
978 1.82 manu int rv = 0;
979 1.96 perry
980 1.82 manu for (i = MINBUCKET; i <= MINBUCKET + 15; i++) {
981 1.99 chs kbp = &kmembuckets[i];
982 1.82 manu freep = (struct freelist *)kbp->kb_next;
983 1.82 manu j = 0;
984 1.82 manu while(freep) {
985 1.82 manu vm_map_lock(kmem_map);
986 1.82 manu rv = uvm_map_checkprot(kmem_map, (vaddr_t)freep,
987 1.96 perry (vaddr_t)freep + sizeof(struct freelist),
988 1.82 manu VM_PROT_WRITE);
989 1.82 manu vm_map_unlock(kmem_map);
990 1.82 manu
991 1.82 manu if ((rv == 0) || (*(int *)freep != WEIRD_ADDR)) {
992 1.82 manu printf("bucket %i, chunck %d at %p modified\n",
993 1.82 manu i, j, freep);
994 1.82 manu return 1;
995 1.82 manu }
996 1.82 manu freep = (struct freelist *)freep->next;
997 1.82 manu j++;
998 1.82 manu }
999 1.82 manu }
1000 1.82 manu
1001 1.82 manu return 0;
1002 1.82 manu }
1003 1.82 manu #endif
1004