kern_malloc.c revision 1.53 1 /* $NetBSD: kern_malloc.c,v 1.53 2000/06/26 14:21:14 mrg Exp $ */
2
3 /*
4 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
5 * Copyright (c) 1987, 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
37 */
38
39 #include "opt_lockdebug.h"
40
41 #include <sys/param.h>
42 #include <sys/proc.h>
43 #include <sys/map.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/systm.h>
47
48 #include <vm/vm.h>
49
50 #include <uvm/uvm_extern.h>
51
52 static struct vm_map_intrsafe kmem_map_store;
53 vm_map_t kmem_map = NULL;
54
55 #include "opt_kmempages.h"
56
57 #ifdef NKMEMCLUSTERS
58 #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
59 #endif
60
61 /*
62 * Default number of pages in kmem_map. We attempt to calculate this
63 * at run-time, but allow it to be either patched or set in the kernel
64 * config file.
65 */
66 #ifndef NKMEMPAGES
67 #define NKMEMPAGES 0
68 #endif
69 int nkmempages = NKMEMPAGES;
70
71 /*
72 * Defaults for lower- and upper-bounds for the kmem_map page count.
73 * Can be overridden by kernel config options.
74 */
75 #ifndef NKMEMPAGES_MIN
76 #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
77 #endif
78
79 #ifndef NKMEMPAGES_MAX
80 #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
81 #endif
82
83 #include "opt_kmemstats.h"
84 #include "opt_malloclog.h"
85
86 struct kmembuckets bucket[MINBUCKET + 16];
87 struct kmemstats kmemstats[M_LAST];
88 struct kmemusage *kmemusage;
89 char *kmembase, *kmemlimit;
90 const char *memname[] = INITKMEMNAMES;
91
92 #ifdef MALLOCLOG
93 #ifndef MALLOCLOGSIZE
94 #define MALLOCLOGSIZE 100000
95 #endif
96
97 struct malloclog {
98 void *addr;
99 long size;
100 int type;
101 int action;
102 const char *file;
103 long line;
104 } malloclog[MALLOCLOGSIZE];
105
106 long malloclogptr;
107
108 static void domlog __P((void *a, long size, int type, int action,
109 const char *file, long line));
110 static void hitmlog __P((void *a));
111
112 static void
113 domlog(a, size, type, action, file, line)
114 void *a;
115 long size;
116 int type;
117 int action;
118 const char *file;
119 long line;
120 {
121
122 malloclog[malloclogptr].addr = a;
123 malloclog[malloclogptr].size = size;
124 malloclog[malloclogptr].type = type;
125 malloclog[malloclogptr].action = action;
126 malloclog[malloclogptr].file = file;
127 malloclog[malloclogptr].line = line;
128 malloclogptr++;
129 if (malloclogptr >= MALLOCLOGSIZE)
130 malloclogptr = 0;
131 }
132
133 static void
134 hitmlog(a)
135 void *a;
136 {
137 struct malloclog *lp;
138 long l;
139
140 #define PRT \
141 if (malloclog[l].addr == a && malloclog[l].action) { \
142 lp = &malloclog[l]; \
143 printf("malloc log entry %ld:\n", l); \
144 printf("\taddr = %p\n", lp->addr); \
145 printf("\tsize = %ld\n", lp->size); \
146 printf("\ttype = %s\n", memname[lp->type]); \
147 printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
148 printf("\tfile = %s\n", lp->file); \
149 printf("\tline = %ld\n", lp->line); \
150 }
151
152 for (l = malloclogptr; l < MALLOCLOGSIZE; l++)
153 PRT
154
155 for (l = 0; l < malloclogptr; l++)
156 PRT
157 }
158 #endif /* MALLOCLOG */
159
160 #ifdef DIAGNOSTIC
161 /*
162 * This structure provides a set of masks to catch unaligned frees.
163 */
164 long addrmask[] = { 0,
165 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
166 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
167 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
168 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
169 };
170
171 /*
172 * The WEIRD_ADDR is used as known text to copy into free objects so
173 * that modifications after frees can be detected.
174 */
175 #define WEIRD_ADDR ((unsigned) 0xdeadbeef)
176 #define MAX_COPY 32
177
178 /*
179 * Normally the freelist structure is used only to hold the list pointer
180 * for free objects. However, when running with diagnostics, the first
181 * 8 bytes of the structure is unused except for diagnostic information,
182 * and the free list pointer is at offst 8 in the structure. Since the
183 * first 8 bytes is the portion of the structure most often modified, this
184 * helps to detect memory reuse problems and avoid free list corruption.
185 */
186 struct freelist {
187 int32_t spare0;
188 int16_t type;
189 int16_t spare1;
190 caddr_t next;
191 };
192 #else /* !DIAGNOSTIC */
193 struct freelist {
194 caddr_t next;
195 };
196 #endif /* DIAGNOSTIC */
197
198 /*
199 * Allocate a block of memory
200 */
201 #ifdef MALLOCLOG
202 void *
203 _malloc(size, type, flags, file, line)
204 unsigned long size;
205 int type, flags;
206 const char *file;
207 long line;
208 #else
209 void *
210 malloc(size, type, flags)
211 unsigned long size;
212 int type, flags;
213 #endif /* MALLOCLOG */
214 {
215 struct kmembuckets *kbp;
216 struct kmemusage *kup;
217 struct freelist *freep;
218 long indx, npg, allocsize;
219 int s;
220 caddr_t va, cp, savedlist;
221 #ifdef DIAGNOSTIC
222 int32_t *end, *lp;
223 int copysize;
224 const char *savedtype;
225 #endif
226 #ifdef KMEMSTATS
227 struct kmemstats *ksp = &kmemstats[type];
228
229 if (__predict_false(((unsigned long)type) > M_LAST))
230 panic("malloc - bogus type");
231 #endif
232 indx = BUCKETINDX(size);
233 kbp = &bucket[indx];
234 s = splmem();
235 #ifdef KMEMSTATS
236 while (ksp->ks_memuse >= ksp->ks_limit) {
237 if (flags & M_NOWAIT) {
238 splx(s);
239 return ((void *) NULL);
240 }
241 if (ksp->ks_limblocks < 65535)
242 ksp->ks_limblocks++;
243 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
244 }
245 ksp->ks_size |= 1 << indx;
246 #endif
247 #ifdef DIAGNOSTIC
248 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
249 #endif
250 if (kbp->kb_next == NULL) {
251 kbp->kb_last = NULL;
252 if (size > MAXALLOCSAVE)
253 allocsize = roundup(size, PAGE_SIZE);
254 else
255 allocsize = 1 << indx;
256 npg = btoc(allocsize);
257 va = (caddr_t) uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
258 (vsize_t)ctob(npg),
259 (flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0);
260 if (__predict_false(va == NULL)) {
261 /*
262 * Kmem_malloc() can return NULL, even if it can
263 * wait, if there is no map space avaiable, because
264 * it can't fix that problem. Neither can we,
265 * right now. (We should release pages which
266 * are completely free and which are in buckets
267 * with too many free elements.)
268 */
269 if ((flags & M_NOWAIT) == 0)
270 panic("malloc: out of space in kmem_map");
271 splx(s);
272 return ((void *) NULL);
273 }
274 #ifdef KMEMSTATS
275 kbp->kb_total += kbp->kb_elmpercl;
276 #endif
277 kup = btokup(va);
278 kup->ku_indx = indx;
279 if (allocsize > MAXALLOCSAVE) {
280 if (npg > 65535)
281 panic("malloc: allocation too large");
282 kup->ku_pagecnt = npg;
283 #ifdef KMEMSTATS
284 ksp->ks_memuse += allocsize;
285 #endif
286 goto out;
287 }
288 #ifdef KMEMSTATS
289 kup->ku_freecnt = kbp->kb_elmpercl;
290 kbp->kb_totalfree += kbp->kb_elmpercl;
291 #endif
292 /*
293 * Just in case we blocked while allocating memory,
294 * and someone else also allocated memory for this
295 * bucket, don't assume the list is still empty.
296 */
297 savedlist = kbp->kb_next;
298 kbp->kb_next = cp = va + (npg << PAGE_SHIFT) - allocsize;
299 for (;;) {
300 freep = (struct freelist *)cp;
301 #ifdef DIAGNOSTIC
302 /*
303 * Copy in known text to detect modification
304 * after freeing.
305 */
306 end = (int32_t *)&cp[copysize];
307 for (lp = (int32_t *)cp; lp < end; lp++)
308 *lp = WEIRD_ADDR;
309 freep->type = M_FREE;
310 #endif /* DIAGNOSTIC */
311 if (cp <= va)
312 break;
313 cp -= allocsize;
314 freep->next = cp;
315 }
316 freep->next = savedlist;
317 if (kbp->kb_last == NULL)
318 kbp->kb_last = (caddr_t)freep;
319 }
320 va = kbp->kb_next;
321 kbp->kb_next = ((struct freelist *)va)->next;
322 #ifdef DIAGNOSTIC
323 freep = (struct freelist *)va;
324 savedtype = (unsigned)freep->type < M_LAST ?
325 memname[freep->type] : "???";
326 if (kbp->kb_next) {
327 int rv;
328 vaddr_t addr = (vaddr_t)kbp->kb_next;
329
330 vm_map_lock(kmem_map);
331 rv = uvm_map_checkprot(kmem_map, addr,
332 addr + sizeof(struct freelist),
333 VM_PROT_WRITE);
334 vm_map_unlock(kmem_map);
335
336 if (__predict_false(rv == 0)) {
337 printf(
338 "%s %ld of object %p size %ld %s %s (invalid addr %p)\n",
339 "Data modified on freelist: word",
340 (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
341 va, size, "previous type", savedtype, kbp->kb_next);
342 #ifdef MALLOCLOG
343 hitmlog(va);
344 #endif
345 kbp->kb_next = NULL;
346 }
347 }
348
349 /* Fill the fields that we've used with WEIRD_ADDR */
350 #if BYTE_ORDER == BIG_ENDIAN
351 freep->type = WEIRD_ADDR >> 16;
352 #endif
353 #if BYTE_ORDER == LITTLE_ENDIAN
354 freep->type = (short)WEIRD_ADDR;
355 #endif
356 end = (int32_t *)&freep->next +
357 (sizeof(freep->next) / sizeof(int32_t));
358 for (lp = (int32_t *)&freep->next; lp < end; lp++)
359 *lp = WEIRD_ADDR;
360
361 /* and check that the data hasn't been modified. */
362 end = (int32_t *)&va[copysize];
363 for (lp = (int32_t *)va; lp < end; lp++) {
364 if (__predict_true(*lp == WEIRD_ADDR))
365 continue;
366 printf("%s %ld of object %p size %ld %s %s (0x%x != 0x%x)\n",
367 "Data modified on freelist: word",
368 (long)(lp - (int32_t *)va), va, size, "previous type",
369 savedtype, *lp, WEIRD_ADDR);
370 #ifdef MALLOCLOG
371 hitmlog(va);
372 #endif
373 break;
374 }
375
376 freep->spare0 = 0;
377 #endif /* DIAGNOSTIC */
378 #ifdef KMEMSTATS
379 kup = btokup(va);
380 if (kup->ku_indx != indx)
381 panic("malloc: wrong bucket");
382 if (kup->ku_freecnt == 0)
383 panic("malloc: lost data");
384 kup->ku_freecnt--;
385 kbp->kb_totalfree--;
386 ksp->ks_memuse += 1 << indx;
387 out:
388 kbp->kb_calls++;
389 ksp->ks_inuse++;
390 ksp->ks_calls++;
391 if (ksp->ks_memuse > ksp->ks_maxused)
392 ksp->ks_maxused = ksp->ks_memuse;
393 #else
394 out:
395 #endif
396 #ifdef MALLOCLOG
397 domlog(va, size, type, 1, file, line);
398 #endif
399 splx(s);
400 return ((void *) va);
401 }
402
403 /*
404 * Free a block of memory allocated by malloc.
405 */
406 #ifdef MALLOCLOG
407 void
408 _free(addr, type, file, line)
409 void *addr;
410 int type;
411 const char *file;
412 long line;
413 #else
414 void
415 free(addr, type)
416 void *addr;
417 int type;
418 #endif /* MALLOCLOG */
419 {
420 struct kmembuckets *kbp;
421 struct kmemusage *kup;
422 struct freelist *freep;
423 long size;
424 int s;
425 #ifdef DIAGNOSTIC
426 caddr_t cp;
427 int32_t *end, *lp;
428 long alloc, copysize;
429 #endif
430 #ifdef KMEMSTATS
431 struct kmemstats *ksp = &kmemstats[type];
432 #endif
433
434 #ifdef DIAGNOSTIC
435 /*
436 * Ensure that we're free'ing something that we could
437 * have allocated in the first place. That is, check
438 * to see that the address is within kmem_map.
439 */
440 if (__predict_false((vaddr_t)addr < kmem_map->header.start ||
441 (vaddr_t)addr >= kmem_map->header.end))
442 panic("free: addr %p not within kmem_map", addr);
443 #endif
444
445 kup = btokup(addr);
446 size = 1 << kup->ku_indx;
447 kbp = &bucket[kup->ku_indx];
448 s = splmem();
449 #ifdef MALLOCLOG
450 domlog(addr, 0, type, 2, file, line);
451 #endif
452 #ifdef DIAGNOSTIC
453 /*
454 * Check for returns of data that do not point to the
455 * beginning of the allocation.
456 */
457 if (size > PAGE_SIZE)
458 alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
459 else
460 alloc = addrmask[kup->ku_indx];
461 if (((u_long)addr & alloc) != 0)
462 panic("free: unaligned addr %p, size %ld, type %s, mask %ld\n",
463 addr, size, memname[type], alloc);
464 #endif /* DIAGNOSTIC */
465 if (size > MAXALLOCSAVE) {
466 uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt));
467 #ifdef KMEMSTATS
468 size = kup->ku_pagecnt << PGSHIFT;
469 ksp->ks_memuse -= size;
470 kup->ku_indx = 0;
471 kup->ku_pagecnt = 0;
472 if (ksp->ks_memuse + size >= ksp->ks_limit &&
473 ksp->ks_memuse < ksp->ks_limit)
474 wakeup((caddr_t)ksp);
475 ksp->ks_inuse--;
476 kbp->kb_total -= 1;
477 #endif
478 splx(s);
479 return;
480 }
481 freep = (struct freelist *)addr;
482 #ifdef DIAGNOSTIC
483 /*
484 * Check for multiple frees. Use a quick check to see if
485 * it looks free before laboriously searching the freelist.
486 */
487 if (__predict_false(freep->spare0 == WEIRD_ADDR)) {
488 for (cp = kbp->kb_next; cp;
489 cp = ((struct freelist *)cp)->next) {
490 if (addr != cp)
491 continue;
492 printf("multiply freed item %p\n", addr);
493 #ifdef MALLOCLOG
494 hitmlog(addr);
495 #endif
496 panic("free: duplicated free");
497 }
498 }
499 #ifdef LOCKDEBUG
500 /*
501 * Check if we're freeing a locked simple lock.
502 */
503 simple_lock_freecheck(addr, (char *)addr + size);
504 #endif
505 /*
506 * Copy in known text to detect modification after freeing
507 * and to make it look free. Also, save the type being freed
508 * so we can list likely culprit if modification is detected
509 * when the object is reallocated.
510 */
511 copysize = size < MAX_COPY ? size : MAX_COPY;
512 end = (int32_t *)&((caddr_t)addr)[copysize];
513 for (lp = (int32_t *)addr; lp < end; lp++)
514 *lp = WEIRD_ADDR;
515 freep->type = type;
516 #endif /* DIAGNOSTIC */
517 #ifdef KMEMSTATS
518 kup->ku_freecnt++;
519 if (kup->ku_freecnt >= kbp->kb_elmpercl) {
520 if (kup->ku_freecnt > kbp->kb_elmpercl)
521 panic("free: multiple frees");
522 else if (kbp->kb_totalfree > kbp->kb_highwat)
523 kbp->kb_couldfree++;
524 }
525 kbp->kb_totalfree++;
526 ksp->ks_memuse -= size;
527 if (ksp->ks_memuse + size >= ksp->ks_limit &&
528 ksp->ks_memuse < ksp->ks_limit)
529 wakeup((caddr_t)ksp);
530 ksp->ks_inuse--;
531 #endif
532 if (kbp->kb_next == NULL)
533 kbp->kb_next = addr;
534 else
535 ((struct freelist *)kbp->kb_last)->next = addr;
536 freep->next = NULL;
537 kbp->kb_last = addr;
538 splx(s);
539 }
540
541 /*
542 * Change the size of a block of memory.
543 */
544 void *
545 realloc(curaddr, newsize, type, flags)
546 void *curaddr;
547 unsigned long newsize;
548 int type, flags;
549 {
550 struct kmemusage *kup;
551 long cursize;
552 void *newaddr;
553 #ifdef DIAGNOSTIC
554 long alloc;
555 #endif
556
557 /*
558 * Realloc() with a NULL pointer is the same as malloc().
559 */
560 if (curaddr == NULL)
561 return (malloc(newsize, type, flags));
562
563 /*
564 * Realloc() with zero size is the same as free().
565 */
566 if (newsize == 0) {
567 free(curaddr, type);
568 return (NULL);
569 }
570
571 /*
572 * Find out how large the old allocation was (and do some
573 * sanity checking).
574 */
575 kup = btokup(curaddr);
576 cursize = 1 << kup->ku_indx;
577
578 #ifdef DIAGNOSTIC
579 /*
580 * Check for returns of data that do not point to the
581 * beginning of the allocation.
582 */
583 if (cursize > PAGE_SIZE)
584 alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
585 else
586 alloc = addrmask[kup->ku_indx];
587 if (((u_long)curaddr & alloc) != 0)
588 panic("realloc: unaligned addr %p, size %ld, type %s, mask %ld\n",
589 curaddr, cursize, memname[type], alloc);
590 #endif /* DIAGNOSTIC */
591
592 if (cursize > MAXALLOCSAVE)
593 cursize = ctob(kup->ku_pagecnt);
594
595 /*
596 * If we already actually have as much as they want, we're done.
597 */
598 if (newsize <= cursize)
599 return (curaddr);
600
601 /*
602 * Can't satisfy the allocation with the existing block.
603 * Allocate a new one and copy the data.
604 */
605 newaddr = malloc(newsize, type, flags);
606 if (__predict_false(newaddr == NULL)) {
607 /*
608 * Malloc() failed, because flags included M_NOWAIT.
609 * Return NULL to indicate that failure. The old
610 * pointer is still valid.
611 */
612 return NULL;
613 }
614 memcpy(newaddr, curaddr, cursize);
615
616 /*
617 * We were successful: free the old allocation and return
618 * the new one.
619 */
620 free(curaddr, type);
621 return (newaddr);
622 }
623
624 /*
625 * Compute the number of pages that kmem_map will map, that is,
626 * the size of the kernel malloc arena.
627 */
628 void
629 kmeminit_nkmempages()
630 {
631 int npages;
632
633 if (nkmempages != 0) {
634 /*
635 * It's already been set (by us being here before, or
636 * by patching or kernel config options), bail out now.
637 */
638 return;
639 }
640
641 /*
642 * We use the following (simple) formula:
643 *
644 * - Starting point is physical memory / 4.
645 *
646 * - Clamp it down to NKMEMPAGES_MAX.
647 *
648 * - Round it up to NKMEMPAGES_MIN.
649 */
650 npages = physmem / 4;
651
652 if (npages > NKMEMPAGES_MAX)
653 npages = NKMEMPAGES_MAX;
654
655 if (npages < NKMEMPAGES_MIN)
656 npages = NKMEMPAGES_MIN;
657
658 nkmempages = npages;
659 }
660
661 /*
662 * Initialize the kernel memory allocator
663 */
664 void
665 kmeminit()
666 {
667 #ifdef KMEMSTATS
668 long indx;
669 #endif
670
671 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
672 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
673 #endif
674 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
675 ERROR!_kmeminit:_MAXALLOCSAVE_too_big
676 #endif
677 #if (MAXALLOCSAVE < NBPG)
678 ERROR!_kmeminit:_MAXALLOCSAVE_too_small
679 #endif
680
681 if (sizeof(struct freelist) > (1 << MINBUCKET))
682 panic("minbucket too small/struct freelist too big");
683
684 /*
685 * Compute the number of kmem_map pages, if we have not
686 * done so already.
687 */
688 kmeminit_nkmempages();
689
690 kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
691 (vsize_t)(nkmempages * sizeof(struct kmemusage)));
692 kmem_map = uvm_km_suballoc(kernel_map, (vaddr_t *)&kmembase,
693 (vaddr_t *)&kmemlimit, (vsize_t)(nkmempages << PAGE_SHIFT),
694 VM_MAP_INTRSAFE, FALSE, &kmem_map_store.vmi_map);
695 #ifdef KMEMSTATS
696 for (indx = 0; indx < MINBUCKET + 16; indx++) {
697 if (1 << indx >= PAGE_SIZE)
698 bucket[indx].kb_elmpercl = 1;
699 else
700 bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
701 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
702 }
703 for (indx = 0; indx < M_LAST; indx++)
704 kmemstats[indx].ks_limit = (nkmempages << PAGE_SHIFT) * 6 / 10;
705 #endif
706 }
707
708 #ifdef DDB
709 #include <ddb/db_output.h>
710
711 /*
712 * Dump kmem statistics from ddb.
713 *
714 * usage: call dump_kmemstats
715 */
716 void dump_kmemstats __P((void));
717
718 void
719 dump_kmemstats()
720 {
721 #ifdef KMEMSTATS
722 const char *name;
723 int i;
724
725 for (i = 0; i < M_LAST; i++) {
726 name = memname[i] ? memname[i] : "";
727
728 db_printf("%2d %s%.*s %ld\n", i, name,
729 (int)(20 - strlen(name)), " ",
730 kmemstats[i].ks_memuse);
731 }
732 #else
733 db_printf("Kmem stats are not being collected.\n");
734 #endif /* KMEMSTATS */
735 }
736 #endif /* DDB */
737