kern_malloc.c revision 1.31 1 /* $NetBSD: kern_malloc.c,v 1.31 1998/02/10 14:09:34 mrg Exp $ */
2
3 /*
4 * Copyright 1996 Christopher G. Demetriou. All rights reserved.
5 * Copyright (c) 1987, 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
37 */
38
39 #include "opt_uvm.h"
40
41 #include <sys/param.h>
42 #include <sys/proc.h>
43 #include <sys/map.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/systm.h>
47
48 #include <vm/vm.h>
49 #include <vm/vm_kern.h>
50
51 #if defined(UVM)
52 #include <uvm/uvm_extern.h>
53
54 static struct vm_map kmem_map_store;
55 vm_map_t kmem_map = NULL;
56 #endif
57
58 #include "opt_kmemstats.h"
59 #include "opt_malloclog.h"
60
61 struct kmembuckets bucket[MINBUCKET + 16];
62 struct kmemstats kmemstats[M_LAST];
63 struct kmemusage *kmemusage;
64 char *kmembase, *kmemlimit;
65 const char *memname[] = INITKMEMNAMES;
66
67 #ifdef MALLOCLOG
68 #ifndef MALLOCLOGSIZE
69 #define MALLOCLOGSIZE 100000
70 #endif
71
72 struct malloclog {
73 void *addr;
74 long size;
75 int type;
76 int action;
77 const char *file;
78 long line;
79 } malloclog[MALLOCLOGSIZE];
80
81 long malloclogptr;
82
83 static void domlog __P((void *a, long size, int type, int action,
84 const char *file, long line));
85 static void hitmlog __P((void *a));
86
87 static void
88 domlog(a, size, type, action, file, line)
89 void *a;
90 long size;
91 int type;
92 int action;
93 const char *file;
94 long line;
95 {
96
97 malloclog[malloclogptr].addr = a;
98 malloclog[malloclogptr].size = size;
99 malloclog[malloclogptr].type = type;
100 malloclog[malloclogptr].action = action;
101 malloclog[malloclogptr].file = file;
102 malloclog[malloclogptr].line = line;
103 malloclogptr++;
104 if (malloclogptr >= MALLOCLOGSIZE)
105 malloclogptr = 0;
106 }
107
108 static void
109 hitmlog(a)
110 void *a;
111 {
112 struct malloclog *lp;
113 long l;
114
115 #define PRT \
116 if (malloclog[l].addr == a && malloclog[l].action) { \
117 lp = &malloclog[l]; \
118 printf("malloc log entry %ld:\n", l); \
119 printf("\taddr = %p\n", lp->addr); \
120 printf("\tsize = %ld\n", lp->size); \
121 printf("\ttype = %s\n", memname[lp->type]); \
122 printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
123 printf("\tfile = %s\n", lp->file); \
124 printf("\tline = %ld\n", lp->line); \
125 }
126
127 for (l = malloclogptr; l < MALLOCLOGSIZE; l++)
128 PRT
129
130 for (l = 0; l < malloclogptr; l++)
131 PRT
132 }
133 #endif /* MALLOCLOG */
134
135 #ifdef DIAGNOSTIC
136 /*
137 * This structure provides a set of masks to catch unaligned frees.
138 */
139 long addrmask[] = { 0,
140 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
141 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
142 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
143 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
144 };
145
146 /*
147 * The WEIRD_ADDR is used as known text to copy into free objects so
148 * that modifications after frees can be detected.
149 */
150 #define WEIRD_ADDR ((unsigned) 0xdeadbeef)
151 #define MAX_COPY 32
152
153 /*
154 * Normally the freelist structure is used only to hold the list pointer
155 * for free objects. However, when running with diagnostics, the first
156 * 8 bytes of the structure is unused except for diagnostic information,
157 * and the free list pointer is at offst 8 in the structure. Since the
158 * first 8 bytes is the portion of the structure most often modified, this
159 * helps to detect memory reuse problems and avoid free list corruption.
160 */
161 struct freelist {
162 int32_t spare0;
163 int16_t type;
164 int16_t spare1;
165 caddr_t next;
166 };
167 #else /* !DIAGNOSTIC */
168 struct freelist {
169 caddr_t next;
170 };
171 #endif /* DIAGNOSTIC */
172
173 /*
174 * Allocate a block of memory
175 */
176 #ifdef MALLOCLOG
177 void *
178 _malloc(size, type, flags, file, line)
179 unsigned long size;
180 int type, flags;
181 const char *file;
182 long line;
183 #else
184 void *
185 malloc(size, type, flags)
186 unsigned long size;
187 int type, flags;
188 #endif /* MALLOCLOG */
189 {
190 register struct kmembuckets *kbp;
191 register struct kmemusage *kup;
192 register struct freelist *freep;
193 long indx, npg, allocsize;
194 int s;
195 caddr_t va, cp, savedlist;
196 #ifdef DIAGNOSTIC
197 int32_t *end, *lp;
198 int copysize;
199 const char *savedtype;
200 #endif
201 #ifdef KMEMSTATS
202 register struct kmemstats *ksp = &kmemstats[type];
203
204 if (((unsigned long)type) > M_LAST)
205 panic("malloc - bogus type");
206 #endif
207 indx = BUCKETINDX(size);
208 kbp = &bucket[indx];
209 s = splimp();
210 #ifdef KMEMSTATS
211 while (ksp->ks_memuse >= ksp->ks_limit) {
212 if (flags & M_NOWAIT) {
213 splx(s);
214 return ((void *) NULL);
215 }
216 if (ksp->ks_limblocks < 65535)
217 ksp->ks_limblocks++;
218 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
219 }
220 ksp->ks_size |= 1 << indx;
221 #endif
222 #ifdef DIAGNOSTIC
223 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
224 #endif
225 if (kbp->kb_next == NULL) {
226 kbp->kb_last = NULL;
227 if (size > MAXALLOCSAVE)
228 allocsize = roundup(size, CLBYTES);
229 else
230 allocsize = 1 << indx;
231 npg = clrnd(btoc(allocsize));
232 #if defined(UVM)
233 va = (caddr_t) uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
234 (vm_size_t)ctob(npg),
235 (flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0);
236 #else
237 va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
238 !(flags & M_NOWAIT));
239 #endif
240 if (va == NULL) {
241 /*
242 * Kmem_malloc() can return NULL, even if it can
243 * wait, if there is no map space avaiable, because
244 * it can't fix that problem. Neither can we,
245 * right now. (We should release pages which
246 * are completely free and which are in buckets
247 * with too many free elements.)
248 */
249 if ((flags & M_NOWAIT) == 0)
250 panic("malloc: out of space in kmem_map");
251 splx(s);
252 return ((void *) NULL);
253 }
254 #ifdef KMEMSTATS
255 kbp->kb_total += kbp->kb_elmpercl;
256 #endif
257 kup = btokup(va);
258 kup->ku_indx = indx;
259 if (allocsize > MAXALLOCSAVE) {
260 if (npg > 65535)
261 panic("malloc: allocation too large");
262 kup->ku_pagecnt = npg;
263 #ifdef KMEMSTATS
264 ksp->ks_memuse += allocsize;
265 #endif
266 goto out;
267 }
268 #ifdef KMEMSTATS
269 kup->ku_freecnt = kbp->kb_elmpercl;
270 kbp->kb_totalfree += kbp->kb_elmpercl;
271 #endif
272 /*
273 * Just in case we blocked while allocating memory,
274 * and someone else also allocated memory for this
275 * bucket, don't assume the list is still empty.
276 */
277 savedlist = kbp->kb_next;
278 kbp->kb_next = cp = va + (npg * NBPG) - allocsize;
279 for (;;) {
280 freep = (struct freelist *)cp;
281 #ifdef DIAGNOSTIC
282 /*
283 * Copy in known text to detect modification
284 * after freeing.
285 */
286 end = (int32_t *)&cp[copysize];
287 for (lp = (int32_t *)cp; lp < end; lp++)
288 *lp = WEIRD_ADDR;
289 freep->type = M_FREE;
290 #endif /* DIAGNOSTIC */
291 if (cp <= va)
292 break;
293 cp -= allocsize;
294 freep->next = cp;
295 }
296 freep->next = savedlist;
297 if (kbp->kb_last == NULL)
298 kbp->kb_last = (caddr_t)freep;
299 }
300 va = kbp->kb_next;
301 kbp->kb_next = ((struct freelist *)va)->next;
302 #ifdef DIAGNOSTIC
303 freep = (struct freelist *)va;
304 savedtype = (unsigned)freep->type < M_LAST ?
305 memname[freep->type] : "???";
306 #if defined(UVM)
307 if (kbp->kb_next) {
308 int rv;
309 vm_offset_t addr = (vm_offset_t)kbp->kb_next;
310
311 vm_map_lock_read(kmem_map);
312 rv = uvm_map_checkprot(kmem_map, addr,
313 addr + sizeof(struct freelist),
314 VM_PROT_WRITE);
315 vm_map_unlock_read(kmem_map);
316
317 if (!rv)
318 #else
319 if (kbp->kb_next &&
320 !kernacc(kbp->kb_next, sizeof(struct freelist), 0))
321 #endif
322 {
323 printf(
324 "%s %ld of object %p size %ld %s %s (invalid addr %p)\n",
325 "Data modified on freelist: word",
326 (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
327 va, size, "previous type", savedtype, kbp->kb_next);
328 #ifdef MALLOCLOG
329 hitmlog(va);
330 #endif
331 kbp->kb_next = NULL;
332 #if defined(UVM)
333 }
334 #endif
335 }
336
337 /* Fill the fields that we've used with WEIRD_ADDR */
338 #if BYTE_ORDER == BIG_ENDIAN
339 freep->type = WEIRD_ADDR >> 16;
340 #endif
341 #if BYTE_ORDER == LITTLE_ENDIAN
342 freep->type = (short)WEIRD_ADDR;
343 #endif
344 end = (int32_t *)&freep->next +
345 (sizeof(freep->next) / sizeof(int32_t));
346 for (lp = (int32_t *)&freep->next; lp < end; lp++)
347 *lp = WEIRD_ADDR;
348
349 /* and check that the data hasn't been modified. */
350 end = (int32_t *)&va[copysize];
351 for (lp = (int32_t *)va; lp < end; lp++) {
352 if (*lp == WEIRD_ADDR)
353 continue;
354 printf("%s %ld of object %p size %ld %s %s (0x%x != 0x%x)\n",
355 "Data modified on freelist: word",
356 (long)(lp - (int32_t *)va), va, size, "previous type",
357 savedtype, *lp, WEIRD_ADDR);
358 #ifdef MALLOCLOG
359 hitmlog(va);
360 #endif
361 break;
362 }
363
364 freep->spare0 = 0;
365 #endif /* DIAGNOSTIC */
366 #ifdef KMEMSTATS
367 kup = btokup(va);
368 if (kup->ku_indx != indx)
369 panic("malloc: wrong bucket");
370 if (kup->ku_freecnt == 0)
371 panic("malloc: lost data");
372 kup->ku_freecnt--;
373 kbp->kb_totalfree--;
374 ksp->ks_memuse += 1 << indx;
375 out:
376 kbp->kb_calls++;
377 ksp->ks_inuse++;
378 ksp->ks_calls++;
379 if (ksp->ks_memuse > ksp->ks_maxused)
380 ksp->ks_maxused = ksp->ks_memuse;
381 #else
382 out:
383 #endif
384 #ifdef MALLOCLOG
385 domlog(va, size, type, 1, file, line);
386 #endif
387 splx(s);
388 return ((void *) va);
389 }
390
391 /*
392 * Free a block of memory allocated by malloc.
393 */
394 #ifdef MALLOCLOG
395 void
396 _free(addr, type, file, line)
397 void *addr;
398 int type;
399 const char *file;
400 long line;
401 #else
402 void
403 free(addr, type)
404 void *addr;
405 int type;
406 #endif /* MALLOCLOG */
407 {
408 register struct kmembuckets *kbp;
409 register struct kmemusage *kup;
410 register struct freelist *freep;
411 long size;
412 int s;
413 #ifdef DIAGNOSTIC
414 caddr_t cp;
415 int32_t *end, *lp;
416 long alloc, copysize;
417 #endif
418 #ifdef KMEMSTATS
419 register struct kmemstats *ksp = &kmemstats[type];
420 #endif
421
422 kup = btokup(addr);
423 size = 1 << kup->ku_indx;
424 kbp = &bucket[kup->ku_indx];
425 s = splimp();
426 #ifdef MALLOCLOG
427 domlog(addr, 0, type, 2, file, line);
428 #endif
429 #ifdef DIAGNOSTIC
430 /*
431 * Check for returns of data that do not point to the
432 * beginning of the allocation.
433 */
434 if (size > NBPG * CLSIZE)
435 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
436 else
437 alloc = addrmask[kup->ku_indx];
438 if (((u_long)addr & alloc) != 0)
439 panic("free: unaligned addr %p, size %ld, type %s, mask %ld\n",
440 addr, size, memname[type], alloc);
441 #endif /* DIAGNOSTIC */
442 if (size > MAXALLOCSAVE) {
443 #if defined(UVM)
444 uvm_km_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
445 #else
446 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
447 #endif
448 #ifdef KMEMSTATS
449 size = kup->ku_pagecnt << PGSHIFT;
450 ksp->ks_memuse -= size;
451 kup->ku_indx = 0;
452 kup->ku_pagecnt = 0;
453 if (ksp->ks_memuse + size >= ksp->ks_limit &&
454 ksp->ks_memuse < ksp->ks_limit)
455 wakeup((caddr_t)ksp);
456 ksp->ks_inuse--;
457 kbp->kb_total -= 1;
458 #endif
459 splx(s);
460 return;
461 }
462 freep = (struct freelist *)addr;
463 #ifdef DIAGNOSTIC
464 /*
465 * Check for multiple frees. Use a quick check to see if
466 * it looks free before laboriously searching the freelist.
467 */
468 if (freep->spare0 == WEIRD_ADDR) {
469 for (cp = kbp->kb_next; cp;
470 cp = ((struct freelist *)cp)->next) {
471 if (addr != cp)
472 continue;
473 printf("multiply freed item %p\n", addr);
474 #ifdef MALLOCLOG
475 hitmlog(addr);
476 #endif
477 panic("free: duplicated free");
478 }
479 }
480 /*
481 * Copy in known text to detect modification after freeing
482 * and to make it look free. Also, save the type being freed
483 * so we can list likely culprit if modification is detected
484 * when the object is reallocated.
485 */
486 copysize = size < MAX_COPY ? size : MAX_COPY;
487 end = (int32_t *)&((caddr_t)addr)[copysize];
488 for (lp = (int32_t *)addr; lp < end; lp++)
489 *lp = WEIRD_ADDR;
490 freep->type = type;
491 #endif /* DIAGNOSTIC */
492 #ifdef KMEMSTATS
493 kup->ku_freecnt++;
494 if (kup->ku_freecnt >= kbp->kb_elmpercl)
495 if (kup->ku_freecnt > kbp->kb_elmpercl)
496 panic("free: multiple frees");
497 else if (kbp->kb_totalfree > kbp->kb_highwat)
498 kbp->kb_couldfree++;
499 kbp->kb_totalfree++;
500 ksp->ks_memuse -= size;
501 if (ksp->ks_memuse + size >= ksp->ks_limit &&
502 ksp->ks_memuse < ksp->ks_limit)
503 wakeup((caddr_t)ksp);
504 ksp->ks_inuse--;
505 #endif
506 if (kbp->kb_next == NULL)
507 kbp->kb_next = addr;
508 else
509 ((struct freelist *)kbp->kb_last)->next = addr;
510 freep->next = NULL;
511 kbp->kb_last = addr;
512 splx(s);
513 }
514
515 /*
516 * Change the size of a block of memory.
517 */
518 void *
519 realloc(curaddr, newsize, type, flags)
520 void *curaddr;
521 unsigned long newsize;
522 int type, flags;
523 {
524 register struct kmemusage *kup;
525 long cursize;
526 void *newaddr;
527 #ifdef DIAGNOSTIC
528 long alloc;
529 #endif
530
531 /*
532 * Realloc() with a NULL pointer is the same as malloc().
533 */
534 if (curaddr == NULL)
535 return (malloc(newsize, type, flags));
536
537 /*
538 * Realloc() with zero size is the same as free().
539 */
540 if (newsize == 0) {
541 free(curaddr, type);
542 return (NULL);
543 }
544
545 /*
546 * Find out how large the old allocation was (and do some
547 * sanity checking).
548 */
549 kup = btokup(curaddr);
550 cursize = 1 << kup->ku_indx;
551
552 #ifdef DIAGNOSTIC
553 /*
554 * Check for returns of data that do not point to the
555 * beginning of the allocation.
556 */
557 if (cursize > NBPG * CLSIZE)
558 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
559 else
560 alloc = addrmask[kup->ku_indx];
561 if (((u_long)curaddr & alloc) != 0)
562 panic("realloc: unaligned addr %p, size %ld, type %s, mask %ld\n",
563 curaddr, cursize, memname[type], alloc);
564 #endif /* DIAGNOSTIC */
565
566 if (cursize > MAXALLOCSAVE)
567 cursize = ctob(kup->ku_pagecnt);
568
569 /*
570 * If we already actually have as much as they want, we're done.
571 */
572 if (newsize <= cursize)
573 return (curaddr);
574
575 /*
576 * Can't satisfy the allocation with the existing block.
577 * Allocate a new one and copy the data.
578 */
579 newaddr = malloc(newsize, type, flags);
580 if (newaddr == NULL) {
581 /*
582 * Malloc() failed, because flags included M_NOWAIT.
583 * Return NULL to indicate that failure. The old
584 * pointer is still valid.
585 */
586 return NULL;
587 }
588 bcopy(curaddr, newaddr, cursize);
589
590 /*
591 * We were successful: free the old allocation and return
592 * the new one.
593 */
594 free(curaddr, type);
595 return (newaddr);
596 }
597
598 /*
599 * Initialize the kernel memory allocator
600 */
601 void
602 kmeminit()
603 {
604 #ifdef KMEMSTATS
605 register long indx;
606 #endif
607 int npg;
608
609 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
610 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
611 #endif
612 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
613 ERROR!_kmeminit:_MAXALLOCSAVE_too_big
614 #endif
615 #if (MAXALLOCSAVE < CLBYTES)
616 ERROR!_kmeminit:_MAXALLOCSAVE_too_small
617 #endif
618
619 if (sizeof(struct freelist) > (1 << MINBUCKET))
620 panic("minbucket too small/struct freelist too big");
621
622 npg = VM_KMEM_SIZE/ NBPG;
623 #if defined(UVM)
624 kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
625 (vm_size_t)(npg * sizeof(struct kmemusage)));
626 kmem_map = uvm_km_suballoc(kernel_map, (vm_offset_t *)&kmembase,
627 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG),
628 FALSE, FALSE, &kmem_map_store);
629 #else
630 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
631 (vm_size_t)(npg * sizeof(struct kmemusage)));
632 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
633 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
634 #endif
635 #ifdef KMEMSTATS
636 for (indx = 0; indx < MINBUCKET + 16; indx++) {
637 if (1 << indx >= CLBYTES)
638 bucket[indx].kb_elmpercl = 1;
639 else
640 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
641 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
642 }
643 for (indx = 0; indx < M_LAST; indx++)
644 kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
645 #endif
646 }
647