kern_malloc.c revision 1.77 1 /* $NetBSD: kern_malloc.c,v 1.77 2003/02/01 06:23:43 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
5 * Copyright (c) 1987, 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.77 2003/02/01 06:23:43 thorpej Exp $");
41
42 #include "opt_lockdebug.h"
43
44 #include <sys/param.h>
45 #include <sys/proc.h>
46 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 #include <sys/systm.h>
49
50 #include <uvm/uvm_extern.h>
51
52 static struct vm_map kmem_map_store;
53 struct vm_map *kmem_map = NULL;
54
55 #include "opt_kmempages.h"
56
57 #ifdef NKMEMCLUSTERS
58 #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
59 #endif
60
61 /*
62 * Default number of pages in kmem_map. We attempt to calculate this
63 * at run-time, but allow it to be either patched or set in the kernel
64 * config file.
65 */
66 #ifndef NKMEMPAGES
67 #define NKMEMPAGES 0
68 #endif
69 int nkmempages = NKMEMPAGES;
70
71 /*
72 * Defaults for lower- and upper-bounds for the kmem_map page count.
73 * Can be overridden by kernel config options.
74 */
75 #ifndef NKMEMPAGES_MIN
76 #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
77 #endif
78
79 #ifndef NKMEMPAGES_MAX
80 #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
81 #endif
82
83 #include "opt_kmemstats.h"
84 #include "opt_malloclog.h"
85 #include "opt_malloc_debug.h"
86
87 struct kmembuckets bucket[MINBUCKET + 16];
88 struct kmemusage *kmemusage;
89 char *kmembase, *kmemlimit;
90
91 struct malloc_type *kmemstatistics;
92
93 #ifdef MALLOCLOG
94 #ifndef MALLOCLOGSIZE
95 #define MALLOCLOGSIZE 100000
96 #endif
97
98 struct malloclog {
99 void *addr;
100 long size;
101 struct malloc_type *type;
102 int action;
103 const char *file;
104 long line;
105 } malloclog[MALLOCLOGSIZE];
106
107 long malloclogptr;
108
109 static void
110 domlog(void *a, long size, struct malloc_type *type, int action,
111 const char *file, long line)
112 {
113
114 malloclog[malloclogptr].addr = a;
115 malloclog[malloclogptr].size = size;
116 malloclog[malloclogptr].type = type;
117 malloclog[malloclogptr].action = action;
118 malloclog[malloclogptr].file = file;
119 malloclog[malloclogptr].line = line;
120 malloclogptr++;
121 if (malloclogptr >= MALLOCLOGSIZE)
122 malloclogptr = 0;
123 }
124
125 static void
126 hitmlog(void *a)
127 {
128 struct malloclog *lp;
129 long l;
130
131 #define PRT do { \
132 if (malloclog[l].addr == a && malloclog[l].action) { \
133 lp = &malloclog[l]; \
134 printf("malloc log entry %ld:\n", l); \
135 printf("\taddr = %p\n", lp->addr); \
136 printf("\tsize = %ld\n", lp->size); \
137 printf("\ttype = %s\n", lp->type->ks_shortdesc); \
138 printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
139 printf("\tfile = %s\n", lp->file); \
140 printf("\tline = %ld\n", lp->line); \
141 } \
142 } while (/* CONSTCOND */0)
143
144 for (l = malloclogptr; l < MALLOCLOGSIZE; l++)
145 PRT;
146
147 for (l = 0; l < malloclogptr; l++)
148 PRT;
149 }
150 #endif /* MALLOCLOG */
151
152 #ifdef DIAGNOSTIC
153 /*
154 * This structure provides a set of masks to catch unaligned frees.
155 */
156 const long addrmask[] = { 0,
157 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
158 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
159 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
160 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
161 };
162
163 /*
164 * The WEIRD_ADDR is used as known text to copy into free objects so
165 * that modifications after frees can be detected.
166 */
167 #define WEIRD_ADDR ((uint32_t) 0xdeadbeef)
168 #ifdef DEBUG
169 #define MAX_COPY PAGE_SIZE
170 #else
171 #define MAX_COPY 32
172 #endif
173
174 /*
175 * Normally the freelist structure is used only to hold the list pointer
176 * for free objects. However, when running with diagnostics, the first
177 * 8/16 bytes of the structure is unused except for diagnostic information,
178 * and the free list pointer is at offset 8/16 in the structure. Since the
179 * first 8 bytes is the portion of the structure most often modified, this
180 * helps to detect memory reuse problems and avoid free list corruption.
181 */
182 struct freelist {
183 uint32_t spare0;
184 #ifdef _LP64
185 uint32_t spare1; /* explicit padding */
186 #endif
187 struct malloc_type *type;
188 caddr_t next;
189 };
190 #else /* !DIAGNOSTIC */
191 struct freelist {
192 caddr_t next;
193 };
194 #endif /* DIAGNOSTIC */
195
196 /*
197 * The following are standard, build-in malloc types are are not
198 * specific to any one subsystem.
199 */
200 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
201 MALLOC_DEFINE(M_DMAMAP, "DMA map", "bus_dma(9) structures");
202 MALLOC_DEFINE(M_FREE, "free", "should be on free list");
203 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
204 MALLOC_DEFINE(M_SOFTINTR, "softintr", "Softinterrupt structures");
205 MALLOC_DEFINE(M_TEMP, "temp", "misc. temporary data buffers");
206
207 /* XXX These should all be elsewhere. */
208 MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables");
209 MALLOC_DEFINE(M_FTABLE, "fragtbl", "fragment reassembly header");
210 MALLOC_DEFINE(M_UFSMNT, "UFS mount", "UFS mount structure");
211 MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
212 MALLOC_DEFINE(M_IPMOPTS, "ip_moptions", "internet multicast options");
213 MALLOC_DEFINE(M_IPMADDR, "in_multi", "internet multicast address");
214 MALLOC_DEFINE(M_MRTABLE, "mrt", "multicast routing tables");
215 MALLOC_DEFINE(M_1394DATA, "1394data", "IEEE 1394 data buffers");
216
217 /*
218 * Allocate a block of memory
219 */
220 #ifdef MALLOCLOG
221 void *
222 _malloc(unsigned long size, struct malloc_type *ksp, int flags,
223 const char *file, long line)
224 #else
225 void *
226 malloc(unsigned long size, struct malloc_type *ksp, int flags)
227 #endif /* MALLOCLOG */
228 {
229 struct kmembuckets *kbp;
230 struct kmemusage *kup;
231 struct freelist *freep;
232 long indx, npg, allocsize;
233 int s;
234 caddr_t va, cp, savedlist;
235 #ifdef DIAGNOSTIC
236 uint32_t *end, *lp;
237 int copysize;
238 const char *savedtype;
239 #endif
240
241 #ifdef LOCKDEBUG
242 if ((flags & M_NOWAIT) == 0)
243 simple_lock_only_held(NULL, "malloc");
244 #endif
245 #ifdef MALLOC_DEBUG
246 if (debug_malloc(size, ksp, flags, (void **) &va))
247 return ((void *) va);
248 #endif
249 indx = BUCKETINDX(size);
250 kbp = &bucket[indx];
251 s = splvm();
252 #ifdef KMEMSTATS
253 while (ksp->ks_memuse >= ksp->ks_limit) {
254 if (flags & M_NOWAIT) {
255 splx(s);
256 return ((void *) NULL);
257 }
258 if (ksp->ks_limblocks < 65535)
259 ksp->ks_limblocks++;
260 tsleep((caddr_t)ksp, PSWP+2, ksp->ks_shortdesc, 0);
261 }
262 ksp->ks_size |= 1 << indx;
263 #endif
264 #ifdef DIAGNOSTIC
265 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
266 #endif
267 if (kbp->kb_next == NULL) {
268 kbp->kb_last = NULL;
269 if (size > MAXALLOCSAVE)
270 allocsize = round_page(size);
271 else
272 allocsize = 1 << indx;
273 npg = btoc(allocsize);
274 va = (caddr_t) uvm_km_kmemalloc(kmem_map, NULL,
275 (vsize_t)ctob(npg),
276 ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
277 ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0));
278 if (__predict_false(va == NULL)) {
279 /*
280 * Kmem_malloc() can return NULL, even if it can
281 * wait, if there is no map space avaiable, because
282 * it can't fix that problem. Neither can we,
283 * right now. (We should release pages which
284 * are completely free and which are in buckets
285 * with too many free elements.)
286 */
287 if ((flags & (M_NOWAIT|M_CANFAIL)) == 0)
288 panic("malloc: out of space in kmem_map");
289 splx(s);
290 return (NULL);
291 }
292 #ifdef KMEMSTATS
293 kbp->kb_total += kbp->kb_elmpercl;
294 #endif
295 kup = btokup(va);
296 kup->ku_indx = indx;
297 if (allocsize > MAXALLOCSAVE) {
298 if (npg > 65535)
299 panic("malloc: allocation too large");
300 kup->ku_pagecnt = npg;
301 #ifdef KMEMSTATS
302 ksp->ks_memuse += allocsize;
303 #endif
304 goto out;
305 }
306 #ifdef KMEMSTATS
307 kup->ku_freecnt = kbp->kb_elmpercl;
308 kbp->kb_totalfree += kbp->kb_elmpercl;
309 #endif
310 /*
311 * Just in case we blocked while allocating memory,
312 * and someone else also allocated memory for this
313 * bucket, don't assume the list is still empty.
314 */
315 savedlist = kbp->kb_next;
316 kbp->kb_next = cp = va + (npg << PAGE_SHIFT) - allocsize;
317 for (;;) {
318 freep = (struct freelist *)cp;
319 #ifdef DIAGNOSTIC
320 /*
321 * Copy in known text to detect modification
322 * after freeing.
323 */
324 end = (int32_t *)&cp[copysize];
325 for (lp = (int32_t *)cp; lp < end; lp++)
326 *lp = WEIRD_ADDR;
327 freep->type = M_FREE;
328 #endif /* DIAGNOSTIC */
329 if (cp <= va)
330 break;
331 cp -= allocsize;
332 freep->next = cp;
333 }
334 freep->next = savedlist;
335 if (kbp->kb_last == NULL)
336 kbp->kb_last = (caddr_t)freep;
337 }
338 va = kbp->kb_next;
339 kbp->kb_next = ((struct freelist *)va)->next;
340 #ifdef DIAGNOSTIC
341 freep = (struct freelist *)va;
342 /* XXX potential to get garbage pointer here. */
343 savedtype = freep->type->ks_shortdesc;
344 if (kbp->kb_next) {
345 int rv;
346 vaddr_t addr = (vaddr_t)kbp->kb_next;
347
348 vm_map_lock(kmem_map);
349 rv = uvm_map_checkprot(kmem_map, addr,
350 addr + sizeof(struct freelist), VM_PROT_WRITE);
351 vm_map_unlock(kmem_map);
352
353 if (__predict_false(rv == 0)) {
354 printf("Data modified on freelist: "
355 "word %ld of object %p size %ld previous type %s "
356 "(invalid addr %p)\n",
357 (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
358 va, size, savedtype, kbp->kb_next);
359 #ifdef MALLOCLOG
360 hitmlog(va);
361 #endif
362 kbp->kb_next = NULL;
363 }
364 }
365
366 /* Fill the fields that we've used with WEIRD_ADDR */
367 #ifdef _LP64
368 freep->type = (struct malloc_type *)
369 (WEIRD_ADDR | (((u_long) WEIRD_ADDR) << 32));
370 #else
371 freep->type = (struct malloc_type *) WEIRD_ADDR;
372 #endif
373 end = (int32_t *)&freep->next +
374 (sizeof(freep->next) / sizeof(int32_t));
375 for (lp = (int32_t *)&freep->next; lp < end; lp++)
376 *lp = WEIRD_ADDR;
377
378 /* and check that the data hasn't been modified. */
379 end = (uint32_t *)&va[copysize];
380 for (lp = (int32_t *)va; lp < end; lp++) {
381 if (__predict_true(*lp == WEIRD_ADDR))
382 continue;
383 printf("Data modified on freelist: "
384 "word %ld of object %p size %ld previous type %s "
385 "(0x%x != 0x%x)\n",
386 (long)(lp - (uint32_t *)va), va, size,
387 savedtype, *lp, WEIRD_ADDR);
388 #ifdef MALLOCLOG
389 hitmlog(va);
390 #endif
391 break;
392 }
393
394 freep->spare0 = 0;
395 #endif /* DIAGNOSTIC */
396 #ifdef KMEMSTATS
397 kup = btokup(va);
398 if (kup->ku_indx != indx)
399 panic("malloc: wrong bucket");
400 if (kup->ku_freecnt == 0)
401 panic("malloc: lost data");
402 kup->ku_freecnt--;
403 kbp->kb_totalfree--;
404 ksp->ks_memuse += 1 << indx;
405 out:
406 kbp->kb_calls++;
407 ksp->ks_inuse++;
408 ksp->ks_calls++;
409 if (ksp->ks_memuse > ksp->ks_maxused)
410 ksp->ks_maxused = ksp->ks_memuse;
411 #else
412 out:
413 #endif
414 #ifdef MALLOCLOG
415 domlog(va, size, type, 1, file, line);
416 #endif
417 splx(s);
418 if ((flags & M_ZERO) != 0)
419 memset(va, 0, size);
420 return ((void *) va);
421 }
422
423 /*
424 * Free a block of memory allocated by malloc.
425 */
426 #ifdef MALLOCLOG
427 void
428 _free(void *addr, struct malloc_type *type, const char *file, long line)
429 #else
430 void
431 free(void *addr, struct malloc_type *ksp)
432 #endif /* MALLOCLOG */
433 {
434 struct kmembuckets *kbp;
435 struct kmemusage *kup;
436 struct freelist *freep;
437 long size;
438 int s;
439 #ifdef DIAGNOSTIC
440 caddr_t cp;
441 int32_t *end, *lp;
442 long alloc, copysize;
443 #endif
444
445 #ifdef MALLOC_DEBUG
446 if (debug_free(addr, ksp))
447 return;
448 #endif
449
450 #ifdef DIAGNOSTIC
451 /*
452 * Ensure that we're free'ing something that we could
453 * have allocated in the first place. That is, check
454 * to see that the address is within kmem_map.
455 */
456 if (__predict_false((vaddr_t)addr < kmem_map->header.start ||
457 (vaddr_t)addr >= kmem_map->header.end))
458 panic("free: addr %p not within kmem_map", addr);
459 #endif
460
461 kup = btokup(addr);
462 size = 1 << kup->ku_indx;
463 kbp = &bucket[kup->ku_indx];
464 s = splvm();
465 #ifdef MALLOCLOG
466 domlog(addr, 0, type, 2, file, line);
467 #endif
468 #ifdef DIAGNOSTIC
469 /*
470 * Check for returns of data that do not point to the
471 * beginning of the allocation.
472 */
473 if (size > PAGE_SIZE)
474 alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
475 else
476 alloc = addrmask[kup->ku_indx];
477 if (((u_long)addr & alloc) != 0)
478 panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
479 addr, size, ksp->ks_shortdesc, alloc);
480 #endif /* DIAGNOSTIC */
481 if (size > MAXALLOCSAVE) {
482 uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt));
483 #ifdef KMEMSTATS
484 size = kup->ku_pagecnt << PGSHIFT;
485 ksp->ks_memuse -= size;
486 kup->ku_indx = 0;
487 kup->ku_pagecnt = 0;
488 if (ksp->ks_memuse + size >= ksp->ks_limit &&
489 ksp->ks_memuse < ksp->ks_limit)
490 wakeup((caddr_t)ksp);
491 ksp->ks_inuse--;
492 kbp->kb_total -= 1;
493 #endif
494 splx(s);
495 return;
496 }
497 freep = (struct freelist *)addr;
498 #ifdef DIAGNOSTIC
499 /*
500 * Check for multiple frees. Use a quick check to see if
501 * it looks free before laboriously searching the freelist.
502 */
503 if (__predict_false(freep->spare0 == WEIRD_ADDR)) {
504 for (cp = kbp->kb_next; cp;
505 cp = ((struct freelist *)cp)->next) {
506 if (addr != cp)
507 continue;
508 printf("multiply freed item %p\n", addr);
509 #ifdef MALLOCLOG
510 hitmlog(addr);
511 #endif
512 panic("free: duplicated free");
513 }
514 }
515 #ifdef LOCKDEBUG
516 /*
517 * Check if we're freeing a locked simple lock.
518 */
519 simple_lock_freecheck(addr, (char *)addr + size);
520 #endif
521 /*
522 * Copy in known text to detect modification after freeing
523 * and to make it look free. Also, save the type being freed
524 * so we can list likely culprit if modification is detected
525 * when the object is reallocated.
526 */
527 copysize = size < MAX_COPY ? size : MAX_COPY;
528 end = (int32_t *)&((caddr_t)addr)[copysize];
529 for (lp = (int32_t *)addr; lp < end; lp++)
530 *lp = WEIRD_ADDR;
531 freep->type = ksp;
532 #endif /* DIAGNOSTIC */
533 #ifdef KMEMSTATS
534 kup->ku_freecnt++;
535 if (kup->ku_freecnt >= kbp->kb_elmpercl) {
536 if (kup->ku_freecnt > kbp->kb_elmpercl)
537 panic("free: multiple frees");
538 else if (kbp->kb_totalfree > kbp->kb_highwat)
539 kbp->kb_couldfree++;
540 }
541 kbp->kb_totalfree++;
542 ksp->ks_memuse -= size;
543 if (ksp->ks_memuse + size >= ksp->ks_limit &&
544 ksp->ks_memuse < ksp->ks_limit)
545 wakeup((caddr_t)ksp);
546 ksp->ks_inuse--;
547 #endif
548 if (kbp->kb_next == NULL)
549 kbp->kb_next = addr;
550 else
551 ((struct freelist *)kbp->kb_last)->next = addr;
552 freep->next = NULL;
553 kbp->kb_last = addr;
554 splx(s);
555 }
556
557 /*
558 * Change the size of a block of memory.
559 */
560 void *
561 realloc(void *curaddr, unsigned long newsize, struct malloc_type *ksp,
562 int flags)
563 {
564 struct kmemusage *kup;
565 unsigned long cursize;
566 void *newaddr;
567 #ifdef DIAGNOSTIC
568 long alloc;
569 #endif
570
571 /*
572 * realloc() with a NULL pointer is the same as malloc().
573 */
574 if (curaddr == NULL)
575 return (malloc(newsize, ksp, flags));
576
577 /*
578 * realloc() with zero size is the same as free().
579 */
580 if (newsize == 0) {
581 free(curaddr, ksp);
582 return (NULL);
583 }
584
585 #ifdef LOCKDEBUG
586 if ((flags & M_NOWAIT) == 0)
587 simple_lock_only_held(NULL, "realloc");
588 #endif
589
590 /*
591 * Find out how large the old allocation was (and do some
592 * sanity checking).
593 */
594 kup = btokup(curaddr);
595 cursize = 1 << kup->ku_indx;
596
597 #ifdef DIAGNOSTIC
598 /*
599 * Check for returns of data that do not point to the
600 * beginning of the allocation.
601 */
602 if (cursize > PAGE_SIZE)
603 alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
604 else
605 alloc = addrmask[kup->ku_indx];
606 if (((u_long)curaddr & alloc) != 0)
607 panic("realloc: "
608 "unaligned addr %p, size %ld, type %s, mask %ld\n",
609 curaddr, cursize, ksp->ks_shortdesc, alloc);
610 #endif /* DIAGNOSTIC */
611
612 if (cursize > MAXALLOCSAVE)
613 cursize = ctob(kup->ku_pagecnt);
614
615 /*
616 * If we already actually have as much as they want, we're done.
617 */
618 if (newsize <= cursize)
619 return (curaddr);
620
621 /*
622 * Can't satisfy the allocation with the existing block.
623 * Allocate a new one and copy the data.
624 */
625 newaddr = malloc(newsize, ksp, flags);
626 if (__predict_false(newaddr == NULL)) {
627 /*
628 * malloc() failed, because flags included M_NOWAIT.
629 * Return NULL to indicate that failure. The old
630 * pointer is still valid.
631 */
632 return (NULL);
633 }
634 memcpy(newaddr, curaddr, cursize);
635
636 /*
637 * We were successful: free the old allocation and return
638 * the new one.
639 */
640 free(curaddr, ksp);
641 return (newaddr);
642 }
643
644 /*
645 * Roundup size to the actual allocation size.
646 */
647 unsigned long
648 malloc_roundup(unsigned long size)
649 {
650
651 if (size > MAXALLOCSAVE)
652 return (roundup(size, PAGE_SIZE));
653 else
654 return (1 << BUCKETINDX(size));
655 }
656
657 /*
658 * Add a malloc type to the system.
659 */
660 void
661 malloc_type_attach(struct malloc_type *type)
662 {
663
664 if (nkmempages == 0)
665 panic("malloc_type_attach: nkmempages == 0");
666
667 if (type->ks_magic != M_MAGIC)
668 panic("malloc_type_attach: bad magic");
669
670 #ifdef DIAGNOSTIC
671 {
672 struct malloc_type *ksp;
673 for (ksp = kmemstatistics; ksp != NULL; ksp = ksp->ks_next) {
674 if (ksp == type)
675 panic("malloc_type_attach: already on list");
676 }
677 }
678 #endif
679
680 #ifdef KMEMSTATS
681 if (type->ks_limit == 0)
682 type->ks_limit = ((u_long)nkmempages << PAGE_SHIFT) * 6U / 10U;
683 #else
684 type->ks_limit = 0;
685 #endif
686
687 type->ks_next = kmemstatistics;
688 kmemstatistics = type;
689 }
690
691 /*
692 * Remove a malloc type from the system..
693 */
694 void
695 malloc_type_detach(struct malloc_type *type)
696 {
697 struct malloc_type *ksp;
698
699 #ifdef DIAGNOSTIC
700 if (type->ks_magic != M_MAGIC)
701 panic("malloc_type_detach: bad magic");
702 #endif
703
704 if (type == kmemstatistics)
705 kmemstatistics = type->ks_next;
706 else {
707 for (ksp = kmemstatistics; ksp->ks_next != NULL;
708 ksp = ksp->ks_next) {
709 if (ksp->ks_next == type) {
710 ksp->ks_next = type->ks_next;
711 break;
712 }
713 }
714 #ifdef DIAGNOSTIC
715 if (ksp->ks_next == NULL)
716 panic("malloc_type_detach: not on list");
717 #endif
718 }
719 type->ks_next = NULL;
720 }
721
722 /*
723 * Set the limit on a malloc type.
724 */
725 void
726 malloc_type_setlimit(struct malloc_type *type, u_long limit)
727 {
728 #ifdef KMEMSTATS
729 int s;
730
731 s = splvm();
732 type->ks_limit = limit;
733 splx(s);
734 #endif
735 }
736
737 /*
738 * Compute the number of pages that kmem_map will map, that is,
739 * the size of the kernel malloc arena.
740 */
741 void
742 kmeminit_nkmempages(void)
743 {
744 int npages;
745
746 if (nkmempages != 0) {
747 /*
748 * It's already been set (by us being here before, or
749 * by patching or kernel config options), bail out now.
750 */
751 return;
752 }
753
754 /*
755 * We use the following (simple) formula:
756 *
757 * - Starting point is physical memory / 4.
758 *
759 * - Clamp it down to NKMEMPAGES_MAX.
760 *
761 * - Round it up to NKMEMPAGES_MIN.
762 */
763 npages = physmem / 4;
764
765 if (npages > NKMEMPAGES_MAX)
766 npages = NKMEMPAGES_MAX;
767
768 if (npages < NKMEMPAGES_MIN)
769 npages = NKMEMPAGES_MIN;
770
771 nkmempages = npages;
772 }
773
774 /*
775 * Initialize the kernel memory allocator
776 */
777 void
778 kmeminit(void)
779 {
780 __link_set_decl(malloc_types, struct malloc_type);
781 struct malloc_type * const *ksp;
782 #ifdef KMEMSTATS
783 long indx;
784 #endif
785
786 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
787 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
788 #endif
789 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
790 ERROR!_kmeminit:_MAXALLOCSAVE_too_big
791 #endif
792 #if (MAXALLOCSAVE < NBPG)
793 ERROR!_kmeminit:_MAXALLOCSAVE_too_small
794 #endif
795
796 if (sizeof(struct freelist) > (1 << MINBUCKET))
797 panic("minbucket too small/struct freelist too big");
798
799 /*
800 * Compute the number of kmem_map pages, if we have not
801 * done so already.
802 */
803 kmeminit_nkmempages();
804
805 kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
806 (vsize_t)(nkmempages * sizeof(struct kmemusage)));
807 kmem_map = uvm_km_suballoc(kernel_map, (void *)&kmembase,
808 (void *)&kmemlimit, (vsize_t)(nkmempages << PAGE_SHIFT),
809 VM_MAP_INTRSAFE, FALSE, &kmem_map_store);
810 #ifdef KMEMSTATS
811 for (indx = 0; indx < MINBUCKET + 16; indx++) {
812 if (1 << indx >= PAGE_SIZE)
813 bucket[indx].kb_elmpercl = 1;
814 else
815 bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
816 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
817 }
818 #endif
819
820 /* Attach all of the statically-linked malloc types. */
821 __link_set_foreach(ksp, malloc_types)
822 malloc_type_attach(*ksp);
823
824 #ifdef MALLOC_DEBUG
825 debug_malloc_init();
826 #endif
827 }
828
829 #ifdef DDB
830 #include <ddb/db_output.h>
831
832 /*
833 * Dump kmem statistics from ddb.
834 *
835 * usage: call dump_kmemstats
836 */
837 void dump_kmemstats(void);
838
839 void
840 dump_kmemstats(void)
841 {
842 #ifdef KMEMSTATS
843 struct malloc_type *ksp;
844
845 for (ksp = kmemstatistics; ksp != NULL; ksp = ksp->ks_next) {
846 if (ksp->ks_memuse == 0)
847 continue;
848 db_printf("%s%.*s %ld\n", ksp->ks_shortdesc,
849 (int)(20 - strlen(ksp->ks_shortdesc)),
850 " ",
851 ksp->ks_memuse);
852 }
853 #else
854 db_printf("Kmem stats are not being collected.\n");
855 #endif /* KMEMSTATS */
856 }
857 #endif /* DDB */
858