kern_malloc.c revision 1.99.2.7 1 /* $NetBSD: kern_malloc.c,v 1.99.2.7 2007/12/07 17:32:45 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1987, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
32 */
33
34 /*
35 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
66 */
67
68 #include <sys/cdefs.h>
69 __KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.99.2.7 2007/12/07 17:32:45 yamt Exp $");
70
71 #include <sys/param.h>
72 #include <sys/proc.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/systm.h>
76 #include <sys/debug.h>
77 #include <sys/mutex.h>
78 #include <sys/lockdebug.h>
79
80 #include <uvm/uvm_extern.h>
81
82 static struct vm_map_kernel kmem_map_store;
83 struct vm_map *kmem_map = NULL;
84
85 #include "opt_kmempages.h"
86
87 #ifdef NKMEMCLUSTERS
88 #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
89 #endif
90
91 /*
92 * Default number of pages in kmem_map. We attempt to calculate this
93 * at run-time, but allow it to be either patched or set in the kernel
94 * config file.
95 */
96 #ifndef NKMEMPAGES
97 #define NKMEMPAGES 0
98 #endif
99 int nkmempages = NKMEMPAGES;
100
101 /*
102 * Defaults for lower- and upper-bounds for the kmem_map page count.
103 * Can be overridden by kernel config options.
104 */
105 #ifndef NKMEMPAGES_MIN
106 #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
107 #endif
108
109 #ifndef NKMEMPAGES_MAX
110 #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
111 #endif
112
113 #include "opt_kmemstats.h"
114 #include "opt_malloclog.h"
115 #include "opt_malloc_debug.h"
116
117 #define MINALLOCSIZE (1 << MINBUCKET)
118 #define BUCKETINDX(size) \
119 ((size) <= (MINALLOCSIZE * 128) \
120 ? (size) <= (MINALLOCSIZE * 8) \
121 ? (size) <= (MINALLOCSIZE * 2) \
122 ? (size) <= (MINALLOCSIZE * 1) \
123 ? (MINBUCKET + 0) \
124 : (MINBUCKET + 1) \
125 : (size) <= (MINALLOCSIZE * 4) \
126 ? (MINBUCKET + 2) \
127 : (MINBUCKET + 3) \
128 : (size) <= (MINALLOCSIZE* 32) \
129 ? (size) <= (MINALLOCSIZE * 16) \
130 ? (MINBUCKET + 4) \
131 : (MINBUCKET + 5) \
132 : (size) <= (MINALLOCSIZE * 64) \
133 ? (MINBUCKET + 6) \
134 : (MINBUCKET + 7) \
135 : (size) <= (MINALLOCSIZE * 2048) \
136 ? (size) <= (MINALLOCSIZE * 512) \
137 ? (size) <= (MINALLOCSIZE * 256) \
138 ? (MINBUCKET + 8) \
139 : (MINBUCKET + 9) \
140 : (size) <= (MINALLOCSIZE * 1024) \
141 ? (MINBUCKET + 10) \
142 : (MINBUCKET + 11) \
143 : (size) <= (MINALLOCSIZE * 8192) \
144 ? (size) <= (MINALLOCSIZE * 4096) \
145 ? (MINBUCKET + 12) \
146 : (MINBUCKET + 13) \
147 : (size) <= (MINALLOCSIZE * 16384) \
148 ? (MINBUCKET + 14) \
149 : (MINBUCKET + 15))
150
151 /*
152 * Array of descriptors that describe the contents of each page
153 */
154 struct kmemusage {
155 short ku_indx; /* bucket index */
156 union {
157 u_short freecnt;/* for small allocations, free pieces in page */
158 u_short pagecnt;/* for large allocations, pages alloced */
159 } ku_un;
160 };
161 #define ku_freecnt ku_un.freecnt
162 #define ku_pagecnt ku_un.pagecnt
163
164 struct kmembuckets kmembuckets[MINBUCKET + 16];
165 struct kmemusage *kmemusage;
166 char *kmembase, *kmemlimit;
167
168 #ifdef DEBUG
169 static void *malloc_freecheck;
170 #endif
171
172 /*
173 * Turn virtual addresses into kmem map indicies
174 */
175 #define btokup(addr) (&kmemusage[((char *)(addr) - kmembase) >> PGSHIFT])
176
177 struct malloc_type *kmemstatistics;
178
179 #ifdef MALLOCLOG
180 #ifndef MALLOCLOGSIZE
181 #define MALLOCLOGSIZE 100000
182 #endif
183
184 struct malloclog {
185 void *addr;
186 long size;
187 struct malloc_type *type;
188 int action;
189 const char *file;
190 long line;
191 } malloclog[MALLOCLOGSIZE];
192
193 long malloclogptr;
194
195 static void
196 domlog(void *a, long size, struct malloc_type *type, int action,
197 const char *file, long line)
198 {
199
200 malloclog[malloclogptr].addr = a;
201 malloclog[malloclogptr].size = size;
202 malloclog[malloclogptr].type = type;
203 malloclog[malloclogptr].action = action;
204 malloclog[malloclogptr].file = file;
205 malloclog[malloclogptr].line = line;
206 malloclogptr++;
207 if (malloclogptr >= MALLOCLOGSIZE)
208 malloclogptr = 0;
209 }
210
211 static void
212 hitmlog(void *a)
213 {
214 struct malloclog *lp;
215 long l;
216
217 #define PRT do { \
218 lp = &malloclog[l]; \
219 if (lp->addr == a && lp->action) { \
220 printf("malloc log entry %ld:\n", l); \
221 printf("\taddr = %p\n", lp->addr); \
222 printf("\tsize = %ld\n", lp->size); \
223 printf("\ttype = %s\n", lp->type->ks_shortdesc); \
224 printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
225 printf("\tfile = %s\n", lp->file); \
226 printf("\tline = %ld\n", lp->line); \
227 } \
228 } while (/* CONSTCOND */0)
229
230 for (l = malloclogptr; l < MALLOCLOGSIZE; l++)
231 PRT;
232
233 for (l = 0; l < malloclogptr; l++)
234 PRT;
235 #undef PRT
236 }
237 #endif /* MALLOCLOG */
238
239 #ifdef DIAGNOSTIC
240 /*
241 * This structure provides a set of masks to catch unaligned frees.
242 */
243 const long addrmask[] = { 0,
244 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
245 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
246 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
247 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
248 };
249
250 /*
251 * The WEIRD_ADDR is used as known text to copy into free objects so
252 * that modifications after frees can be detected.
253 */
254 #define WEIRD_ADDR ((uint32_t) 0xdeadbeef)
255 #ifdef DEBUG
256 #define MAX_COPY PAGE_SIZE
257 #else
258 #define MAX_COPY 32
259 #endif
260
261 /*
262 * Normally the freelist structure is used only to hold the list pointer
263 * for free objects. However, when running with diagnostics, the first
264 * 8/16 bytes of the structure is unused except for diagnostic information,
265 * and the free list pointer is at offset 8/16 in the structure. Since the
266 * first 8 bytes is the portion of the structure most often modified, this
267 * helps to detect memory reuse problems and avoid free list corruption.
268 */
269 struct freelist {
270 uint32_t spare0;
271 #ifdef _LP64
272 uint32_t spare1; /* explicit padding */
273 #endif
274 struct malloc_type *type;
275 void * next;
276 };
277 #else /* !DIAGNOSTIC */
278 struct freelist {
279 void * next;
280 };
281 #endif /* DIAGNOSTIC */
282
283 /*
284 * The following are standard, built-in malloc types and are not
285 * specific to any subsystem.
286 */
287 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
288 MALLOC_DEFINE(M_DMAMAP, "DMA map", "bus_dma(9) structures");
289 MALLOC_DEFINE(M_FREE, "free", "should be on free list");
290 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
291 MALLOC_DEFINE(M_SOFTINTR, "softintr", "Softinterrupt structures");
292 MALLOC_DEFINE(M_TEMP, "temp", "misc. temporary data buffers");
293
294 /* XXX These should all be elsewhere. */
295 MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables");
296 MALLOC_DEFINE(M_FTABLE, "fragtbl", "fragment reassembly header");
297 MALLOC_DEFINE(M_UFSMNT, "UFS mount", "UFS mount structure");
298 MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
299 MALLOC_DEFINE(M_IPMOPTS, "ip_moptions", "internet multicast options");
300 MALLOC_DEFINE(M_IPMADDR, "in_multi", "internet multicast address");
301 MALLOC_DEFINE(M_MRTABLE, "mrt", "multicast routing tables");
302 MALLOC_DEFINE(M_BWMETER, "bwmeter", "multicast upcall bw meters");
303 MALLOC_DEFINE(M_1394DATA, "1394data", "IEEE 1394 data buffers");
304
305 kmutex_t malloc_lock;
306
307 /*
308 * Allocate a block of memory
309 */
310 #ifdef MALLOCLOG
311 void *
312 _malloc(unsigned long size, struct malloc_type *ksp, int flags,
313 const char *file, long line)
314 #else
315 void *
316 malloc(unsigned long size, struct malloc_type *ksp, int flags)
317 #endif /* MALLOCLOG */
318 {
319 struct kmembuckets *kbp;
320 struct kmemusage *kup;
321 struct freelist *freep;
322 long indx, npg, allocsize;
323 char *va, *cp, *savedlist;
324 #ifdef DIAGNOSTIC
325 uint32_t *end, *lp;
326 int copysize;
327 #endif
328
329 #ifdef LOCKDEBUG
330 if ((flags & M_NOWAIT) == 0)
331 ASSERT_SLEEPABLE(NULL, "malloc");
332 #endif
333 #ifdef MALLOC_DEBUG
334 if (debug_malloc(size, ksp, flags, (void *) &va)) {
335 if (va != 0)
336 FREECHECK_OUT(&malloc_freecheck, (void *)va);
337 return ((void *) va);
338 }
339 #endif
340 indx = BUCKETINDX(size);
341 kbp = &kmembuckets[indx];
342 mutex_spin_enter(&malloc_lock);
343 #ifdef KMEMSTATS
344 while (ksp->ks_memuse >= ksp->ks_limit) {
345 if (flags & M_NOWAIT) {
346 mutex_spin_exit(&malloc_lock);
347 return ((void *) NULL);
348 }
349 if (ksp->ks_limblocks < 65535)
350 ksp->ks_limblocks++;
351 mtsleep((void *)ksp, PSWP+2, ksp->ks_shortdesc, 0,
352 &malloc_lock);
353 }
354 ksp->ks_size |= 1 << indx;
355 #endif
356 #ifdef DIAGNOSTIC
357 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
358 #endif
359 if (kbp->kb_next == NULL) {
360 int s;
361 kbp->kb_last = NULL;
362 if (size > MAXALLOCSAVE)
363 allocsize = round_page(size);
364 else
365 allocsize = 1 << indx;
366 npg = btoc(allocsize);
367 mutex_spin_exit(&malloc_lock);
368 s = splvm();
369 va = (void *) uvm_km_alloc(kmem_map,
370 (vsize_t)ctob(npg), 0,
371 ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
372 ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0) |
373 UVM_KMF_WIRED);
374 splx(s);
375 if (__predict_false(va == NULL)) {
376 /*
377 * Kmem_malloc() can return NULL, even if it can
378 * wait, if there is no map space available, because
379 * it can't fix that problem. Neither can we,
380 * right now. (We should release pages which
381 * are completely free and which are in kmembuckets
382 * with too many free elements.)
383 */
384 if ((flags & (M_NOWAIT|M_CANFAIL)) == 0)
385 panic("malloc: out of space in kmem_map");
386 return (NULL);
387 }
388 mutex_spin_enter(&malloc_lock);
389 #ifdef KMEMSTATS
390 kbp->kb_total += kbp->kb_elmpercl;
391 #endif
392 kup = btokup(va);
393 kup->ku_indx = indx;
394 if (allocsize > MAXALLOCSAVE) {
395 if (npg > 65535)
396 panic("malloc: allocation too large");
397 kup->ku_pagecnt = npg;
398 #ifdef KMEMSTATS
399 ksp->ks_memuse += allocsize;
400 #endif
401 goto out;
402 }
403 #ifdef KMEMSTATS
404 kup->ku_freecnt = kbp->kb_elmpercl;
405 kbp->kb_totalfree += kbp->kb_elmpercl;
406 #endif
407 /*
408 * Just in case we blocked while allocating memory,
409 * and someone else also allocated memory for this
410 * kmembucket, don't assume the list is still empty.
411 */
412 savedlist = kbp->kb_next;
413 kbp->kb_next = cp = va + (npg << PAGE_SHIFT) - allocsize;
414 for (;;) {
415 freep = (struct freelist *)cp;
416 #ifdef DIAGNOSTIC
417 /*
418 * Copy in known text to detect modification
419 * after freeing.
420 */
421 end = (uint32_t *)&cp[copysize];
422 for (lp = (uint32_t *)cp; lp < end; lp++)
423 *lp = WEIRD_ADDR;
424 freep->type = M_FREE;
425 #endif /* DIAGNOSTIC */
426 if (cp <= va)
427 break;
428 cp -= allocsize;
429 freep->next = cp;
430 }
431 freep->next = savedlist;
432 if (kbp->kb_last == NULL)
433 kbp->kb_last = (void *)freep;
434 }
435 va = kbp->kb_next;
436 kbp->kb_next = ((struct freelist *)va)->next;
437 #ifdef DIAGNOSTIC
438 freep = (struct freelist *)va;
439 /* XXX potential to get garbage pointer here. */
440 if (kbp->kb_next) {
441 int rv;
442 vaddr_t addr = (vaddr_t)kbp->kb_next;
443
444 vm_map_lock(kmem_map);
445 rv = uvm_map_checkprot(kmem_map, addr,
446 addr + sizeof(struct freelist), VM_PROT_WRITE);
447 vm_map_unlock(kmem_map);
448
449 if (__predict_false(rv == 0)) {
450 printf("Data modified on freelist: "
451 "word %ld of object %p size %ld previous type %s "
452 "(invalid addr %p)\n",
453 (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
454 va, size, "foo", kbp->kb_next);
455 #ifdef MALLOCLOG
456 hitmlog(va);
457 #endif
458 kbp->kb_next = NULL;
459 }
460 }
461
462 /* Fill the fields that we've used with WEIRD_ADDR */
463 #ifdef _LP64
464 freep->type = (struct malloc_type *)
465 (WEIRD_ADDR | (((u_long) WEIRD_ADDR) << 32));
466 #else
467 freep->type = (struct malloc_type *) WEIRD_ADDR;
468 #endif
469 end = (uint32_t *)&freep->next +
470 (sizeof(freep->next) / sizeof(int32_t));
471 for (lp = (uint32_t *)&freep->next; lp < end; lp++)
472 *lp = WEIRD_ADDR;
473
474 /* and check that the data hasn't been modified. */
475 end = (uint32_t *)&va[copysize];
476 for (lp = (uint32_t *)va; lp < end; lp++) {
477 if (__predict_true(*lp == WEIRD_ADDR))
478 continue;
479 printf("Data modified on freelist: "
480 "word %ld of object %p size %ld previous type %s "
481 "(0x%x != 0x%x)\n",
482 (long)(lp - (uint32_t *)va), va, size,
483 "bar", *lp, WEIRD_ADDR);
484 #ifdef MALLOCLOG
485 hitmlog(va);
486 #endif
487 break;
488 }
489
490 freep->spare0 = 0;
491 #endif /* DIAGNOSTIC */
492 #ifdef KMEMSTATS
493 kup = btokup(va);
494 if (kup->ku_indx != indx)
495 panic("malloc: wrong bucket");
496 if (kup->ku_freecnt == 0)
497 panic("malloc: lost data");
498 kup->ku_freecnt--;
499 kbp->kb_totalfree--;
500 ksp->ks_memuse += 1 << indx;
501 out:
502 kbp->kb_calls++;
503 ksp->ks_inuse++;
504 ksp->ks_calls++;
505 if (ksp->ks_memuse > ksp->ks_maxused)
506 ksp->ks_maxused = ksp->ks_memuse;
507 #else
508 out:
509 #endif
510 #ifdef MALLOCLOG
511 domlog(va, size, ksp, 1, file, line);
512 #endif
513 mutex_spin_exit(&malloc_lock);
514 if ((flags & M_ZERO) != 0)
515 memset(va, 0, size);
516 FREECHECK_OUT(&malloc_freecheck, (void *)va);
517 return ((void *) va);
518 }
519
520 /*
521 * Free a block of memory allocated by malloc.
522 */
523 #ifdef MALLOCLOG
524 void
525 _free(void *addr, struct malloc_type *ksp, const char *file, long line)
526 #else
527 void
528 free(void *addr, struct malloc_type *ksp)
529 #endif /* MALLOCLOG */
530 {
531 struct kmembuckets *kbp;
532 struct kmemusage *kup;
533 struct freelist *freep;
534 long size;
535 #ifdef DIAGNOSTIC
536 void *cp;
537 int32_t *end, *lp;
538 long alloc, copysize;
539 #endif
540
541 FREECHECK_IN(&malloc_freecheck, addr);
542 #ifdef MALLOC_DEBUG
543 if (debug_free(addr, ksp))
544 return;
545 #endif
546
547 #ifdef DIAGNOSTIC
548 /*
549 * Ensure that we're free'ing something that we could
550 * have allocated in the first place. That is, check
551 * to see that the address is within kmem_map.
552 */
553 if (__predict_false((vaddr_t)addr < vm_map_min(kmem_map) ||
554 (vaddr_t)addr >= vm_map_max(kmem_map)))
555 panic("free: addr %p not within kmem_map", addr);
556 #endif
557
558 kup = btokup(addr);
559 size = 1 << kup->ku_indx;
560 kbp = &kmembuckets[kup->ku_indx];
561
562 LOCKDEBUG_MEM_CHECK(addr,
563 size <= MAXALLOCSAVE ? size : ctob(kup->ku_pagecnt));
564
565 mutex_spin_enter(&malloc_lock);
566 #ifdef MALLOCLOG
567 domlog(addr, 0, ksp, 2, file, line);
568 #endif
569 #ifdef DIAGNOSTIC
570 /*
571 * Check for returns of data that do not point to the
572 * beginning of the allocation.
573 */
574 if (size > PAGE_SIZE)
575 alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
576 else
577 alloc = addrmask[kup->ku_indx];
578 if (((u_long)addr & alloc) != 0)
579 panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
580 addr, size, ksp->ks_shortdesc, alloc);
581 #endif /* DIAGNOSTIC */
582 if (size > MAXALLOCSAVE) {
583 uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt),
584 UVM_KMF_WIRED);
585 #ifdef KMEMSTATS
586 size = kup->ku_pagecnt << PGSHIFT;
587 ksp->ks_memuse -= size;
588 kup->ku_indx = 0;
589 kup->ku_pagecnt = 0;
590 if (ksp->ks_memuse + size >= ksp->ks_limit &&
591 ksp->ks_memuse < ksp->ks_limit)
592 wakeup((void *)ksp);
593 #ifdef DIAGNOSTIC
594 if (ksp->ks_inuse == 0)
595 panic("free 1: inuse 0, probable double free");
596 #endif
597 ksp->ks_inuse--;
598 kbp->kb_total -= 1;
599 #endif
600 mutex_spin_exit(&malloc_lock);
601 return;
602 }
603 freep = (struct freelist *)addr;
604 #ifdef DIAGNOSTIC
605 /*
606 * Check for multiple frees. Use a quick check to see if
607 * it looks free before laboriously searching the freelist.
608 */
609 if (__predict_false(freep->spare0 == WEIRD_ADDR)) {
610 for (cp = kbp->kb_next; cp;
611 cp = ((struct freelist *)cp)->next) {
612 if (addr != cp)
613 continue;
614 printf("multiply freed item %p\n", addr);
615 #ifdef MALLOCLOG
616 hitmlog(addr);
617 #endif
618 panic("free: duplicated free");
619 }
620 }
621
622 /*
623 * Copy in known text to detect modification after freeing
624 * and to make it look free. Also, save the type being freed
625 * so we can list likely culprit if modification is detected
626 * when the object is reallocated.
627 */
628 copysize = size < MAX_COPY ? size : MAX_COPY;
629 end = (int32_t *)&((char *)addr)[copysize];
630 for (lp = (int32_t *)addr; lp < end; lp++)
631 *lp = WEIRD_ADDR;
632 freep->type = ksp;
633 #endif /* DIAGNOSTIC */
634 #ifdef KMEMSTATS
635 kup->ku_freecnt++;
636 if (kup->ku_freecnt >= kbp->kb_elmpercl) {
637 if (kup->ku_freecnt > kbp->kb_elmpercl)
638 panic("free: multiple frees");
639 else if (kbp->kb_totalfree > kbp->kb_highwat)
640 kbp->kb_couldfree++;
641 }
642 kbp->kb_totalfree++;
643 ksp->ks_memuse -= size;
644 if (ksp->ks_memuse + size >= ksp->ks_limit &&
645 ksp->ks_memuse < ksp->ks_limit)
646 wakeup((void *)ksp);
647 #ifdef DIAGNOSTIC
648 if (ksp->ks_inuse == 0)
649 panic("free 2: inuse 0, probable double free");
650 #endif
651 ksp->ks_inuse--;
652 #endif
653 if (kbp->kb_next == NULL)
654 kbp->kb_next = addr;
655 else
656 ((struct freelist *)kbp->kb_last)->next = addr;
657 freep->next = NULL;
658 kbp->kb_last = addr;
659 mutex_spin_exit(&malloc_lock);
660 }
661
662 /*
663 * Change the size of a block of memory.
664 */
665 void *
666 realloc(void *curaddr, unsigned long newsize, struct malloc_type *ksp,
667 int flags)
668 {
669 struct kmemusage *kup;
670 unsigned long cursize;
671 void *newaddr;
672 #ifdef DIAGNOSTIC
673 long alloc;
674 #endif
675
676 /*
677 * realloc() with a NULL pointer is the same as malloc().
678 */
679 if (curaddr == NULL)
680 return (malloc(newsize, ksp, flags));
681
682 /*
683 * realloc() with zero size is the same as free().
684 */
685 if (newsize == 0) {
686 free(curaddr, ksp);
687 return (NULL);
688 }
689
690 #ifdef LOCKDEBUG
691 if ((flags & M_NOWAIT) == 0)
692 ASSERT_SLEEPABLE(NULL, "realloc");
693 #endif
694
695 /*
696 * Find out how large the old allocation was (and do some
697 * sanity checking).
698 */
699 kup = btokup(curaddr);
700 cursize = 1 << kup->ku_indx;
701
702 #ifdef DIAGNOSTIC
703 /*
704 * Check for returns of data that do not point to the
705 * beginning of the allocation.
706 */
707 if (cursize > PAGE_SIZE)
708 alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
709 else
710 alloc = addrmask[kup->ku_indx];
711 if (((u_long)curaddr & alloc) != 0)
712 panic("realloc: "
713 "unaligned addr %p, size %ld, type %s, mask %ld\n",
714 curaddr, cursize, ksp->ks_shortdesc, alloc);
715 #endif /* DIAGNOSTIC */
716
717 if (cursize > MAXALLOCSAVE)
718 cursize = ctob(kup->ku_pagecnt);
719
720 /*
721 * If we already actually have as much as they want, we're done.
722 */
723 if (newsize <= cursize)
724 return (curaddr);
725
726 /*
727 * Can't satisfy the allocation with the existing block.
728 * Allocate a new one and copy the data.
729 */
730 newaddr = malloc(newsize, ksp, flags);
731 if (__predict_false(newaddr == NULL)) {
732 /*
733 * malloc() failed, because flags included M_NOWAIT.
734 * Return NULL to indicate that failure. The old
735 * pointer is still valid.
736 */
737 return (NULL);
738 }
739 memcpy(newaddr, curaddr, cursize);
740
741 /*
742 * We were successful: free the old allocation and return
743 * the new one.
744 */
745 free(curaddr, ksp);
746 return (newaddr);
747 }
748
749 /*
750 * Roundup size to the actual allocation size.
751 */
752 unsigned long
753 malloc_roundup(unsigned long size)
754 {
755
756 if (size > MAXALLOCSAVE)
757 return (roundup(size, PAGE_SIZE));
758 else
759 return (1 << BUCKETINDX(size));
760 }
761
762 /*
763 * Add a malloc type to the system.
764 */
765 void
766 malloc_type_attach(struct malloc_type *type)
767 {
768
769 if (nkmempages == 0)
770 panic("malloc_type_attach: nkmempages == 0");
771
772 if (type->ks_magic != M_MAGIC)
773 panic("malloc_type_attach: bad magic");
774
775 #ifdef DIAGNOSTIC
776 {
777 struct malloc_type *ksp;
778 for (ksp = kmemstatistics; ksp != NULL; ksp = ksp->ks_next) {
779 if (ksp == type)
780 panic("malloc_type_attach: already on list");
781 }
782 }
783 #endif
784
785 #ifdef KMEMSTATS
786 if (type->ks_limit == 0)
787 type->ks_limit = ((u_long)nkmempages << PAGE_SHIFT) * 6U / 10U;
788 #else
789 type->ks_limit = 0;
790 #endif
791
792 type->ks_next = kmemstatistics;
793 kmemstatistics = type;
794 }
795
796 /*
797 * Remove a malloc type from the system..
798 */
799 void
800 malloc_type_detach(struct malloc_type *type)
801 {
802 struct malloc_type *ksp;
803
804 #ifdef DIAGNOSTIC
805 if (type->ks_magic != M_MAGIC)
806 panic("malloc_type_detach: bad magic");
807 #endif
808
809 if (type == kmemstatistics)
810 kmemstatistics = type->ks_next;
811 else {
812 for (ksp = kmemstatistics; ksp->ks_next != NULL;
813 ksp = ksp->ks_next) {
814 if (ksp->ks_next == type) {
815 ksp->ks_next = type->ks_next;
816 break;
817 }
818 }
819 #ifdef DIAGNOSTIC
820 if (ksp->ks_next == NULL)
821 panic("malloc_type_detach: not on list");
822 #endif
823 }
824 type->ks_next = NULL;
825 }
826
827 /*
828 * Set the limit on a malloc type.
829 */
830 void
831 malloc_type_setlimit(struct malloc_type *type, u_long limit)
832 {
833 #ifdef KMEMSTATS
834 mutex_spin_enter(&malloc_lock);
835 type->ks_limit = limit;
836 mutex_spin_exit(&malloc_lock);
837 #endif
838 }
839
840 /*
841 * Compute the number of pages that kmem_map will map, that is,
842 * the size of the kernel malloc arena.
843 */
844 void
845 kmeminit_nkmempages(void)
846 {
847 int npages;
848
849 if (nkmempages != 0) {
850 /*
851 * It's already been set (by us being here before, or
852 * by patching or kernel config options), bail out now.
853 */
854 return;
855 }
856
857 npages = physmem;
858
859 if (npages > NKMEMPAGES_MAX)
860 npages = NKMEMPAGES_MAX;
861
862 if (npages < NKMEMPAGES_MIN)
863 npages = NKMEMPAGES_MIN;
864
865 nkmempages = npages;
866 }
867
868 /*
869 * Initialize the kernel memory allocator
870 */
871 void
872 kmeminit(void)
873 {
874 __link_set_decl(malloc_types, struct malloc_type);
875 struct malloc_type * const *ksp;
876 vaddr_t kmb, kml;
877 #ifdef KMEMSTATS
878 long indx;
879 #endif
880
881 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
882 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
883 #endif
884 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
885 ERROR!_kmeminit:_MAXALLOCSAVE_too_big
886 #endif
887 #if (MAXALLOCSAVE < NBPG)
888 ERROR!_kmeminit:_MAXALLOCSAVE_too_small
889 #endif
890
891 if (sizeof(struct freelist) > (1 << MINBUCKET))
892 panic("minbucket too small/struct freelist too big");
893
894 mutex_init(&malloc_lock, MUTEX_DEFAULT, IPL_VM);
895
896 /*
897 * Compute the number of kmem_map pages, if we have not
898 * done so already.
899 */
900 kmeminit_nkmempages();
901
902 kmemusage = (struct kmemusage *) uvm_km_alloc(kernel_map,
903 (vsize_t)(nkmempages * sizeof(struct kmemusage)), 0,
904 UVM_KMF_WIRED|UVM_KMF_ZERO);
905 kmb = 0;
906 kmem_map = uvm_km_suballoc(kernel_map, &kmb,
907 &kml, ((vsize_t)nkmempages << PAGE_SHIFT),
908 VM_MAP_INTRSAFE, false, &kmem_map_store);
909 uvm_km_vacache_init(kmem_map, "kvakmem", 0);
910 kmembase = (char *)kmb;
911 kmemlimit = (char *)kml;
912 #ifdef KMEMSTATS
913 for (indx = 0; indx < MINBUCKET + 16; indx++) {
914 if (1 << indx >= PAGE_SIZE)
915 kmembuckets[indx].kb_elmpercl = 1;
916 else
917 kmembuckets[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
918 kmembuckets[indx].kb_highwat =
919 5 * kmembuckets[indx].kb_elmpercl;
920 }
921 #endif
922
923 /* Attach all of the statically-linked malloc types. */
924 __link_set_foreach(ksp, malloc_types)
925 malloc_type_attach(*ksp);
926 }
927
928 #ifdef DDB
929 #include <ddb/db_output.h>
930
931 /*
932 * Dump kmem statistics from ddb.
933 *
934 * usage: call dump_kmemstats
935 */
936 void dump_kmemstats(void);
937
938 void
939 dump_kmemstats(void)
940 {
941 #ifdef KMEMSTATS
942 struct malloc_type *ksp;
943
944 for (ksp = kmemstatistics; ksp != NULL; ksp = ksp->ks_next) {
945 if (ksp->ks_memuse == 0)
946 continue;
947 db_printf("%s%.*s %ld\n", ksp->ks_shortdesc,
948 (int)(20 - strlen(ksp->ks_shortdesc)),
949 " ",
950 ksp->ks_memuse);
951 }
952 #else
953 db_printf("Kmem stats are not being collected.\n");
954 #endif /* KMEMSTATS */
955 }
956 #endif /* DDB */
957
958
959 #if 0
960 /*
961 * Diagnostic messages about "Data modified on
962 * freelist" indicate a memory corruption, but
963 * they do not help tracking it down.
964 * This function can be called at various places
965 * to sanity check malloc's freelist and discover
966 * where does the corruption take place.
967 */
968 int
969 freelist_sanitycheck(void) {
970 int i,j;
971 struct kmembuckets *kbp;
972 struct freelist *freep;
973 int rv = 0;
974
975 for (i = MINBUCKET; i <= MINBUCKET + 15; i++) {
976 kbp = &kmembuckets[i];
977 freep = (struct freelist *)kbp->kb_next;
978 j = 0;
979 while(freep) {
980 vm_map_lock(kmem_map);
981 rv = uvm_map_checkprot(kmem_map, (vaddr_t)freep,
982 (vaddr_t)freep + sizeof(struct freelist),
983 VM_PROT_WRITE);
984 vm_map_unlock(kmem_map);
985
986 if ((rv == 0) || (*(int *)freep != WEIRD_ADDR)) {
987 printf("bucket %i, chunck %d at %p modified\n",
988 i, j, freep);
989 return 1;
990 }
991 freep = (struct freelist *)freep->next;
992 j++;
993 }
994 }
995
996 return 0;
997 }
998 #endif
999