kern_malloc.c revision 1.24 1 /* $NetBSD: kern_malloc.c,v 1.24 1997/02/02 21:22:16 thorpej Exp $ */
2
3 /*
4 * Copyright 1996 Christopher G. Demetriou. All rights reserved.
5 * Copyright (c) 1987, 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
37 */
38
39 #include <sys/param.h>
40 #include <sys/proc.h>
41 #include <sys/map.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/systm.h>
45
46 #include <vm/vm.h>
47 #include <vm/vm_kern.h>
48
49 #include "opt_kmemstats.h"
50
51 struct kmembuckets bucket[MINBUCKET + 16];
52 struct kmemstats kmemstats[M_LAST];
53 struct kmemusage *kmemusage;
54 char *kmembase, *kmemlimit;
55 char *memname[] = INITKMEMNAMES;
56
57 #ifdef DIAGNOSTIC
58 /*
59 * This structure provides a set of masks to catch unaligned frees.
60 */
61 long addrmask[] = { 0,
62 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
63 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
64 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
65 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
66 };
67
68 /*
69 * The WEIRD_ADDR is used as known text to copy into free objects so
70 * that modifications after frees can be detected.
71 */
72 #define WEIRD_ADDR ((unsigned) 0xdeadbeef)
73 #define MAX_COPY 32
74
75 /*
76 * Normally the freelist structure is used only to hold the list pointer
77 * for free objects. However, when running with diagnostics, the first
78 * 8 bytes of the structure is unused except for diagnostic information,
79 * and the free list pointer is at offst 8 in the structure. Since the
80 * first 8 bytes is the portion of the structure most often modified, this
81 * helps to detect memory reuse problems and avoid free list corruption.
82 */
83 struct freelist {
84 int32_t spare0;
85 int16_t type;
86 int16_t spare1;
87 caddr_t next;
88 };
89 #else /* !DIAGNOSTIC */
90 struct freelist {
91 caddr_t next;
92 };
93 #endif /* DIAGNOSTIC */
94
95 /*
96 * Allocate a block of memory
97 */
98 void *
99 malloc(size, type, flags)
100 unsigned long size;
101 int type, flags;
102 {
103 register struct kmembuckets *kbp;
104 register struct kmemusage *kup;
105 register struct freelist *freep;
106 long indx, npg, allocsize;
107 int s;
108 caddr_t va, cp, savedlist;
109 #ifdef DIAGNOSTIC
110 int32_t *end, *lp;
111 int copysize;
112 char *savedtype;
113 #endif
114 #ifdef KMEMSTATS
115 register struct kmemstats *ksp = &kmemstats[type];
116
117 if (((unsigned long)type) > M_LAST)
118 panic("malloc - bogus type");
119 #endif
120 indx = BUCKETINDX(size);
121 kbp = &bucket[indx];
122 s = splimp();
123 #ifdef KMEMSTATS
124 while (ksp->ks_memuse >= ksp->ks_limit) {
125 if (flags & M_NOWAIT) {
126 splx(s);
127 return ((void *) NULL);
128 }
129 if (ksp->ks_limblocks < 65535)
130 ksp->ks_limblocks++;
131 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
132 }
133 ksp->ks_size |= 1 << indx;
134 #endif
135 #ifdef DIAGNOSTIC
136 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
137 #endif
138 if (kbp->kb_next == NULL) {
139 kbp->kb_last = NULL;
140 if (size > MAXALLOCSAVE)
141 allocsize = roundup(size, CLBYTES);
142 else
143 allocsize = 1 << indx;
144 npg = clrnd(btoc(allocsize));
145 va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
146 !(flags & M_NOWAIT));
147 if (va == NULL) {
148 /*
149 * Kmem_malloc() can return NULL, even if it can
150 * wait, if there is no map space avaiable, because
151 * it can't fix that problem. Neither can we,
152 * right now. (We should release pages which
153 * are completely free and which are in buckets
154 * with too many free elements.)
155 */
156 if ((flags & M_NOWAIT) == 0)
157 panic("malloc: out of space in kmem_map");
158 splx(s);
159 return ((void *) NULL);
160 }
161 #ifdef KMEMSTATS
162 kbp->kb_total += kbp->kb_elmpercl;
163 #endif
164 kup = btokup(va);
165 kup->ku_indx = indx;
166 if (allocsize > MAXALLOCSAVE) {
167 if (npg > 65535)
168 panic("malloc: allocation too large");
169 kup->ku_pagecnt = npg;
170 #ifdef KMEMSTATS
171 ksp->ks_memuse += allocsize;
172 #endif
173 goto out;
174 }
175 #ifdef KMEMSTATS
176 kup->ku_freecnt = kbp->kb_elmpercl;
177 kbp->kb_totalfree += kbp->kb_elmpercl;
178 #endif
179 /*
180 * Just in case we blocked while allocating memory,
181 * and someone else also allocated memory for this
182 * bucket, don't assume the list is still empty.
183 */
184 savedlist = kbp->kb_next;
185 kbp->kb_next = cp = va + (npg * NBPG) - allocsize;
186 for (;;) {
187 freep = (struct freelist *)cp;
188 #ifdef DIAGNOSTIC
189 /*
190 * Copy in known text to detect modification
191 * after freeing.
192 */
193 end = (int32_t *)&cp[copysize];
194 for (lp = (int32_t *)cp; lp < end; lp++)
195 *lp = WEIRD_ADDR;
196 freep->type = M_FREE;
197 #endif /* DIAGNOSTIC */
198 if (cp <= va)
199 break;
200 cp -= allocsize;
201 freep->next = cp;
202 }
203 freep->next = savedlist;
204 if (kbp->kb_last == NULL)
205 kbp->kb_last = (caddr_t)freep;
206 }
207 va = kbp->kb_next;
208 kbp->kb_next = ((struct freelist *)va)->next;
209 #ifdef DIAGNOSTIC
210 freep = (struct freelist *)va;
211 savedtype = (unsigned)freep->type < M_LAST ?
212 memname[freep->type] : "???";
213 if (kbp->kb_next &&
214 !kernacc(kbp->kb_next, sizeof(struct freelist), 0)) {
215 printf(
216 "%s %ld of object %p size %ld %s %s (invalid addr %p)\n",
217 "Data modified on freelist: word",
218 (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
219 va, size, "previous type", savedtype, kbp->kb_next);
220 kbp->kb_next = NULL;
221 }
222
223 /* Fill the fields that we've used with WEIRD_ADDR */
224 #if BYTE_ORDER == BIG_ENDIAN
225 freep->type = WEIRD_ADDR >> 16;
226 #endif
227 #if BYTE_ORDER == LITTLE_ENDIAN
228 freep->type = (short)WEIRD_ADDR;
229 #endif
230 end = (int32_t *)&freep->next +
231 (sizeof(freep->next) / sizeof(int32_t));
232 for (lp = (int32_t *)&freep->next; lp < end; lp++)
233 *lp = WEIRD_ADDR;
234
235 /* and check that the data hasn't been modified. */
236 end = (int32_t *)&va[copysize];
237 for (lp = (int32_t *)va; lp < end; lp++) {
238 if (*lp == WEIRD_ADDR)
239 continue;
240 printf("%s %ld of object %p size %ld %s %s (0x%x != 0x%x)\n",
241 "Data modified on freelist: word",
242 (long)(lp - (int32_t *)va), va, size, "previous type",
243 savedtype, *lp, WEIRD_ADDR);
244 break;
245 }
246
247 freep->spare0 = 0;
248 #endif /* DIAGNOSTIC */
249 #ifdef KMEMSTATS
250 kup = btokup(va);
251 if (kup->ku_indx != indx)
252 panic("malloc: wrong bucket");
253 if (kup->ku_freecnt == 0)
254 panic("malloc: lost data");
255 kup->ku_freecnt--;
256 kbp->kb_totalfree--;
257 ksp->ks_memuse += 1 << indx;
258 out:
259 kbp->kb_calls++;
260 ksp->ks_inuse++;
261 ksp->ks_calls++;
262 if (ksp->ks_memuse > ksp->ks_maxused)
263 ksp->ks_maxused = ksp->ks_memuse;
264 #else
265 out:
266 #endif
267 splx(s);
268 return ((void *) va);
269 }
270
271 /*
272 * Free a block of memory allocated by malloc.
273 */
274 void
275 free(addr, type)
276 void *addr;
277 int type;
278 {
279 register struct kmembuckets *kbp;
280 register struct kmemusage *kup;
281 register struct freelist *freep;
282 long size;
283 int s;
284 #ifdef DIAGNOSTIC
285 caddr_t cp;
286 int32_t *end, *lp;
287 long alloc, copysize;
288 #endif
289 #ifdef KMEMSTATS
290 register struct kmemstats *ksp = &kmemstats[type];
291 #endif
292
293 kup = btokup(addr);
294 size = 1 << kup->ku_indx;
295 kbp = &bucket[kup->ku_indx];
296 s = splimp();
297 #ifdef DIAGNOSTIC
298 /*
299 * Check for returns of data that do not point to the
300 * beginning of the allocation.
301 */
302 if (size > NBPG * CLSIZE)
303 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
304 else
305 alloc = addrmask[kup->ku_indx];
306 if (((u_long)addr & alloc) != 0)
307 panic("free: unaligned addr %p, size %ld, type %s, mask %ld\n",
308 addr, size, memname[type], alloc);
309 #endif /* DIAGNOSTIC */
310 if (size > MAXALLOCSAVE) {
311 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
312 #ifdef KMEMSTATS
313 size = kup->ku_pagecnt << PGSHIFT;
314 ksp->ks_memuse -= size;
315 kup->ku_indx = 0;
316 kup->ku_pagecnt = 0;
317 if (ksp->ks_memuse + size >= ksp->ks_limit &&
318 ksp->ks_memuse < ksp->ks_limit)
319 wakeup((caddr_t)ksp);
320 ksp->ks_inuse--;
321 kbp->kb_total -= 1;
322 #endif
323 splx(s);
324 return;
325 }
326 freep = (struct freelist *)addr;
327 #ifdef DIAGNOSTIC
328 /*
329 * Check for multiple frees. Use a quick check to see if
330 * it looks free before laboriously searching the freelist.
331 */
332 if (freep->spare0 == WEIRD_ADDR) {
333 for (cp = kbp->kb_next; cp;
334 cp = ((struct freelist *)cp)->next) {
335 if (addr != cp)
336 continue;
337 printf("multiply freed item %p\n", addr);
338 panic("free: duplicated free");
339 }
340 }
341 /*
342 * Copy in known text to detect modification after freeing
343 * and to make it look free. Also, save the type being freed
344 * so we can list likely culprit if modification is detected
345 * when the object is reallocated.
346 */
347 copysize = size < MAX_COPY ? size : MAX_COPY;
348 end = (int32_t *)&((caddr_t)addr)[copysize];
349 for (lp = (int32_t *)addr; lp < end; lp++)
350 *lp = WEIRD_ADDR;
351 freep->type = type;
352 #endif /* DIAGNOSTIC */
353 #ifdef KMEMSTATS
354 kup->ku_freecnt++;
355 if (kup->ku_freecnt >= kbp->kb_elmpercl)
356 if (kup->ku_freecnt > kbp->kb_elmpercl)
357 panic("free: multiple frees");
358 else if (kbp->kb_totalfree > kbp->kb_highwat)
359 kbp->kb_couldfree++;
360 kbp->kb_totalfree++;
361 ksp->ks_memuse -= size;
362 if (ksp->ks_memuse + size >= ksp->ks_limit &&
363 ksp->ks_memuse < ksp->ks_limit)
364 wakeup((caddr_t)ksp);
365 ksp->ks_inuse--;
366 #endif
367 if (kbp->kb_next == NULL)
368 kbp->kb_next = addr;
369 else
370 ((struct freelist *)kbp->kb_last)->next = addr;
371 freep->next = NULL;
372 kbp->kb_last = addr;
373 splx(s);
374 }
375
376 /*
377 * Change the size of a block of memory.
378 */
379 void *
380 realloc(curaddr, newsize, type, flags)
381 void *curaddr;
382 unsigned long newsize;
383 int type, flags;
384 {
385 register struct kmemusage *kup;
386 long cursize;
387 void *newaddr;
388 #ifdef DIAGNOSTIC
389 long alloc;
390 #endif
391
392 /*
393 * Realloc() with a NULL pointer is the same as malloc().
394 */
395 if (curaddr == NULL)
396 return (malloc(newsize, type, flags));
397
398 /*
399 * Realloc() with zero size is the same as free().
400 */
401 if (newsize == 0) {
402 free(curaddr, type);
403 return (NULL);
404 }
405
406 /*
407 * Find out how large the old allocation was (and do some
408 * sanity checking).
409 */
410 kup = btokup(curaddr);
411 cursize = 1 << kup->ku_indx;
412
413 #ifdef DIAGNOSTIC
414 /*
415 * Check for returns of data that do not point to the
416 * beginning of the allocation.
417 */
418 if (cursize > NBPG * CLSIZE)
419 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
420 else
421 alloc = addrmask[kup->ku_indx];
422 if (((u_long)curaddr & alloc) != 0)
423 panic("realloc: unaligned addr %p, size %ld, type %s, mask %ld\n",
424 curaddr, cursize, memname[type], alloc);
425 #endif /* DIAGNOSTIC */
426
427 if (cursize > MAXALLOCSAVE)
428 cursize = ctob(kup->ku_pagecnt);
429
430 /*
431 * If we already actually have as much as they want, we're done.
432 */
433 if (newsize <= cursize)
434 return (curaddr);
435
436 /*
437 * Can't satisfy the allocation with the existing block.
438 * Allocate a new one and copy the data.
439 */
440 newaddr = malloc(newsize, type, flags);
441 if (newaddr == NULL) {
442 /*
443 * Malloc() failed, because flags included M_NOWAIT.
444 * Return NULL to indicate that failure. The old
445 * pointer is still valid.
446 */
447 return NULL;
448 }
449 bcopy(curaddr, newaddr, cursize);
450
451 /*
452 * We were successful: free the old allocation and return
453 * the new one.
454 */
455 free(curaddr, type);
456 return (newaddr);
457 }
458
459 /*
460 * Initialize the kernel memory allocator
461 */
462 void
463 kmeminit()
464 {
465 #ifdef KMEMSTATS
466 register long indx;
467 #endif
468 int npg;
469
470 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
471 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
472 #endif
473 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
474 ERROR!_kmeminit:_MAXALLOCSAVE_too_big
475 #endif
476 #if (MAXALLOCSAVE < CLBYTES)
477 ERROR!_kmeminit:_MAXALLOCSAVE_too_small
478 #endif
479
480 if (sizeof(struct freelist) > (1 << MINBUCKET))
481 panic("minbucket too small/struct freelist too big");
482
483 npg = VM_KMEM_SIZE/ NBPG;
484 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
485 (vm_size_t)(npg * sizeof(struct kmemusage)));
486 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
487 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
488 #ifdef KMEMSTATS
489 for (indx = 0; indx < MINBUCKET + 16; indx++) {
490 if (1 << indx >= CLBYTES)
491 bucket[indx].kb_elmpercl = 1;
492 else
493 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
494 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
495 }
496 for (indx = 0; indx < M_LAST; indx++)
497 kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
498 #endif
499 }
500