kern_malloc.c revision 1.64 1 1.64 lukem /* $NetBSD: kern_malloc.c,v 1.64 2001/11/12 15:25:12 lukem Exp $ */
2 1.9 cgd
3 1.1 cgd /*
4 1.37 christos * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
5 1.8 cgd * Copyright (c) 1987, 1991, 1993
6 1.8 cgd * The Regents of the University of California. All rights reserved.
7 1.1 cgd *
8 1.1 cgd * Redistribution and use in source and binary forms, with or without
9 1.1 cgd * modification, are permitted provided that the following conditions
10 1.1 cgd * are met:
11 1.1 cgd * 1. Redistributions of source code must retain the above copyright
12 1.1 cgd * notice, this list of conditions and the following disclaimer.
13 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 cgd * notice, this list of conditions and the following disclaimer in the
15 1.1 cgd * documentation and/or other materials provided with the distribution.
16 1.1 cgd * 3. All advertising materials mentioning features or use of this software
17 1.1 cgd * must display the following acknowledgement:
18 1.1 cgd * This product includes software developed by the University of
19 1.1 cgd * California, Berkeley and its contributors.
20 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
21 1.1 cgd * may be used to endorse or promote products derived from this software
22 1.1 cgd * without specific prior written permission.
23 1.1 cgd *
24 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 1.1 cgd * SUCH DAMAGE.
35 1.1 cgd *
36 1.32 fvdl * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
37 1.1 cgd */
38 1.64 lukem
39 1.64 lukem #include <sys/cdefs.h>
40 1.64 lukem __KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.64 2001/11/12 15:25:12 lukem Exp $");
41 1.31 mrg
42 1.33 thorpej #include "opt_lockdebug.h"
43 1.1 cgd
44 1.7 mycroft #include <sys/param.h>
45 1.7 mycroft #include <sys/proc.h>
46 1.8 cgd #include <sys/map.h>
47 1.7 mycroft #include <sys/kernel.h>
48 1.7 mycroft #include <sys/malloc.h>
49 1.12 christos #include <sys/systm.h>
50 1.24 thorpej
51 1.28 mrg #include <uvm/uvm_extern.h>
52 1.28 mrg
53 1.61 thorpej static struct vm_map kmem_map_store;
54 1.58 chs struct vm_map *kmem_map = NULL;
55 1.28 mrg
56 1.49 thorpej #include "opt_kmempages.h"
57 1.49 thorpej
58 1.49 thorpej #ifdef NKMEMCLUSTERS
59 1.52 sommerfe #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
60 1.49 thorpej #endif
61 1.49 thorpej
62 1.49 thorpej /*
63 1.49 thorpej * Default number of pages in kmem_map. We attempt to calculate this
64 1.49 thorpej * at run-time, but allow it to be either patched or set in the kernel
65 1.49 thorpej * config file.
66 1.49 thorpej */
67 1.49 thorpej #ifndef NKMEMPAGES
68 1.49 thorpej #define NKMEMPAGES 0
69 1.49 thorpej #endif
70 1.49 thorpej int nkmempages = NKMEMPAGES;
71 1.49 thorpej
72 1.49 thorpej /*
73 1.49 thorpej * Defaults for lower- and upper-bounds for the kmem_map page count.
74 1.49 thorpej * Can be overridden by kernel config options.
75 1.49 thorpej */
76 1.49 thorpej #ifndef NKMEMPAGES_MIN
77 1.49 thorpej #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
78 1.49 thorpej #endif
79 1.49 thorpej
80 1.49 thorpej #ifndef NKMEMPAGES_MAX
81 1.49 thorpej #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
82 1.49 thorpej #endif
83 1.49 thorpej
84 1.24 thorpej #include "opt_kmemstats.h"
85 1.27 thorpej #include "opt_malloclog.h"
86 1.12 christos
87 1.1 cgd struct kmembuckets bucket[MINBUCKET + 16];
88 1.8 cgd struct kmemstats kmemstats[M_LAST];
89 1.1 cgd struct kmemusage *kmemusage;
90 1.1 cgd char *kmembase, *kmemlimit;
91 1.57 jdolecek const char * const memname[] = INITKMEMNAMES;
92 1.1 cgd
93 1.27 thorpej #ifdef MALLOCLOG
94 1.27 thorpej #ifndef MALLOCLOGSIZE
95 1.27 thorpej #define MALLOCLOGSIZE 100000
96 1.27 thorpej #endif
97 1.27 thorpej
98 1.27 thorpej struct malloclog {
99 1.27 thorpej void *addr;
100 1.27 thorpej long size;
101 1.27 thorpej int type;
102 1.27 thorpej int action;
103 1.27 thorpej const char *file;
104 1.27 thorpej long line;
105 1.27 thorpej } malloclog[MALLOCLOGSIZE];
106 1.27 thorpej
107 1.27 thorpej long malloclogptr;
108 1.27 thorpej
109 1.27 thorpej static void domlog __P((void *a, long size, int type, int action,
110 1.27 thorpej const char *file, long line));
111 1.27 thorpej static void hitmlog __P((void *a));
112 1.27 thorpej
113 1.27 thorpej static void
114 1.27 thorpej domlog(a, size, type, action, file, line)
115 1.27 thorpej void *a;
116 1.27 thorpej long size;
117 1.27 thorpej int type;
118 1.27 thorpej int action;
119 1.27 thorpej const char *file;
120 1.27 thorpej long line;
121 1.27 thorpej {
122 1.27 thorpej
123 1.27 thorpej malloclog[malloclogptr].addr = a;
124 1.27 thorpej malloclog[malloclogptr].size = size;
125 1.27 thorpej malloclog[malloclogptr].type = type;
126 1.27 thorpej malloclog[malloclogptr].action = action;
127 1.27 thorpej malloclog[malloclogptr].file = file;
128 1.27 thorpej malloclog[malloclogptr].line = line;
129 1.27 thorpej malloclogptr++;
130 1.27 thorpej if (malloclogptr >= MALLOCLOGSIZE)
131 1.27 thorpej malloclogptr = 0;
132 1.27 thorpej }
133 1.27 thorpej
134 1.27 thorpej static void
135 1.27 thorpej hitmlog(a)
136 1.27 thorpej void *a;
137 1.27 thorpej {
138 1.27 thorpej struct malloclog *lp;
139 1.27 thorpej long l;
140 1.27 thorpej
141 1.27 thorpej #define PRT \
142 1.27 thorpej if (malloclog[l].addr == a && malloclog[l].action) { \
143 1.27 thorpej lp = &malloclog[l]; \
144 1.27 thorpej printf("malloc log entry %ld:\n", l); \
145 1.27 thorpej printf("\taddr = %p\n", lp->addr); \
146 1.27 thorpej printf("\tsize = %ld\n", lp->size); \
147 1.27 thorpej printf("\ttype = %s\n", memname[lp->type]); \
148 1.27 thorpej printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
149 1.27 thorpej printf("\tfile = %s\n", lp->file); \
150 1.27 thorpej printf("\tline = %ld\n", lp->line); \
151 1.27 thorpej }
152 1.27 thorpej
153 1.27 thorpej for (l = malloclogptr; l < MALLOCLOGSIZE; l++)
154 1.27 thorpej PRT
155 1.27 thorpej
156 1.27 thorpej for (l = 0; l < malloclogptr; l++)
157 1.27 thorpej PRT
158 1.27 thorpej }
159 1.27 thorpej #endif /* MALLOCLOG */
160 1.27 thorpej
161 1.8 cgd #ifdef DIAGNOSTIC
162 1.8 cgd /*
163 1.8 cgd * This structure provides a set of masks to catch unaligned frees.
164 1.8 cgd */
165 1.57 jdolecek const long addrmask[] = { 0,
166 1.8 cgd 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
167 1.8 cgd 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
168 1.8 cgd 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
169 1.8 cgd 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
170 1.8 cgd };
171 1.8 cgd
172 1.8 cgd /*
173 1.8 cgd * The WEIRD_ADDR is used as known text to copy into free objects so
174 1.8 cgd * that modifications after frees can be detected.
175 1.8 cgd */
176 1.12 christos #define WEIRD_ADDR ((unsigned) 0xdeadbeef)
177 1.55 chs #ifdef DEBUG
178 1.55 chs #define MAX_COPY PAGE_SIZE
179 1.55 chs #else
180 1.8 cgd #define MAX_COPY 32
181 1.55 chs #endif
182 1.8 cgd
183 1.8 cgd /*
184 1.11 cgd * Normally the freelist structure is used only to hold the list pointer
185 1.11 cgd * for free objects. However, when running with diagnostics, the first
186 1.11 cgd * 8 bytes of the structure is unused except for diagnostic information,
187 1.11 cgd * and the free list pointer is at offst 8 in the structure. Since the
188 1.11 cgd * first 8 bytes is the portion of the structure most often modified, this
189 1.11 cgd * helps to detect memory reuse problems and avoid free list corruption.
190 1.8 cgd */
191 1.8 cgd struct freelist {
192 1.11 cgd int32_t spare0;
193 1.11 cgd int16_t type;
194 1.11 cgd int16_t spare1;
195 1.8 cgd caddr_t next;
196 1.8 cgd };
197 1.8 cgd #else /* !DIAGNOSTIC */
198 1.8 cgd struct freelist {
199 1.8 cgd caddr_t next;
200 1.8 cgd };
201 1.8 cgd #endif /* DIAGNOSTIC */
202 1.8 cgd
203 1.1 cgd /*
204 1.1 cgd * Allocate a block of memory
205 1.1 cgd */
206 1.27 thorpej #ifdef MALLOCLOG
207 1.27 thorpej void *
208 1.27 thorpej _malloc(size, type, flags, file, line)
209 1.27 thorpej unsigned long size;
210 1.27 thorpej int type, flags;
211 1.27 thorpej const char *file;
212 1.27 thorpej long line;
213 1.27 thorpej #else
214 1.1 cgd void *
215 1.1 cgd malloc(size, type, flags)
216 1.1 cgd unsigned long size;
217 1.1 cgd int type, flags;
218 1.27 thorpej #endif /* MALLOCLOG */
219 1.1 cgd {
220 1.50 augustss struct kmembuckets *kbp;
221 1.50 augustss struct kmemusage *kup;
222 1.50 augustss struct freelist *freep;
223 1.5 andrew long indx, npg, allocsize;
224 1.1 cgd int s;
225 1.1 cgd caddr_t va, cp, savedlist;
226 1.8 cgd #ifdef DIAGNOSTIC
227 1.11 cgd int32_t *end, *lp;
228 1.8 cgd int copysize;
229 1.26 mycroft const char *savedtype;
230 1.8 cgd #endif
231 1.1 cgd #ifdef KMEMSTATS
232 1.50 augustss struct kmemstats *ksp = &kmemstats[type];
233 1.1 cgd
234 1.51 thorpej if (__predict_false(((unsigned long)type) > M_LAST))
235 1.1 cgd panic("malloc - bogus type");
236 1.1 cgd #endif
237 1.59 thorpej #ifdef LOCKDEBUG
238 1.59 thorpej if ((flags & M_NOWAIT) == 0)
239 1.59 thorpej simple_lock_only_held(NULL, "malloc");
240 1.59 thorpej #endif
241 1.62 thorpej #ifdef MALLOC_DEBUG
242 1.62 thorpej if (debug_malloc(size, type, flags, (void **) &va))
243 1.62 thorpej return ((void *) va);
244 1.62 thorpej #endif
245 1.1 cgd indx = BUCKETINDX(size);
246 1.1 cgd kbp = &bucket[indx];
247 1.56 thorpej s = splvm();
248 1.1 cgd #ifdef KMEMSTATS
249 1.1 cgd while (ksp->ks_memuse >= ksp->ks_limit) {
250 1.1 cgd if (flags & M_NOWAIT) {
251 1.1 cgd splx(s);
252 1.1 cgd return ((void *) NULL);
253 1.1 cgd }
254 1.1 cgd if (ksp->ks_limblocks < 65535)
255 1.1 cgd ksp->ks_limblocks++;
256 1.1 cgd tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
257 1.1 cgd }
258 1.8 cgd ksp->ks_size |= 1 << indx;
259 1.8 cgd #endif
260 1.8 cgd #ifdef DIAGNOSTIC
261 1.8 cgd copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
262 1.1 cgd #endif
263 1.1 cgd if (kbp->kb_next == NULL) {
264 1.8 cgd kbp->kb_last = NULL;
265 1.1 cgd if (size > MAXALLOCSAVE)
266 1.49 thorpej allocsize = roundup(size, PAGE_SIZE);
267 1.1 cgd else
268 1.1 cgd allocsize = 1 << indx;
269 1.47 ragge npg = btoc(allocsize);
270 1.63 chs va = (caddr_t) uvm_km_kmemalloc(kmem_map, NULL,
271 1.63 chs (vsize_t)ctob(npg),
272 1.28 mrg (flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0);
273 1.51 thorpej if (__predict_false(va == NULL)) {
274 1.17 cgd /*
275 1.17 cgd * Kmem_malloc() can return NULL, even if it can
276 1.17 cgd * wait, if there is no map space avaiable, because
277 1.17 cgd * it can't fix that problem. Neither can we,
278 1.17 cgd * right now. (We should release pages which
279 1.17 cgd * are completely free and which are in buckets
280 1.17 cgd * with too many free elements.)
281 1.17 cgd */
282 1.17 cgd if ((flags & M_NOWAIT) == 0)
283 1.17 cgd panic("malloc: out of space in kmem_map");
284 1.6 cgd splx(s);
285 1.6 cgd return ((void *) NULL);
286 1.1 cgd }
287 1.1 cgd #ifdef KMEMSTATS
288 1.1 cgd kbp->kb_total += kbp->kb_elmpercl;
289 1.1 cgd #endif
290 1.1 cgd kup = btokup(va);
291 1.1 cgd kup->ku_indx = indx;
292 1.1 cgd if (allocsize > MAXALLOCSAVE) {
293 1.1 cgd if (npg > 65535)
294 1.1 cgd panic("malloc: allocation too large");
295 1.1 cgd kup->ku_pagecnt = npg;
296 1.1 cgd #ifdef KMEMSTATS
297 1.1 cgd ksp->ks_memuse += allocsize;
298 1.1 cgd #endif
299 1.1 cgd goto out;
300 1.1 cgd }
301 1.1 cgd #ifdef KMEMSTATS
302 1.1 cgd kup->ku_freecnt = kbp->kb_elmpercl;
303 1.1 cgd kbp->kb_totalfree += kbp->kb_elmpercl;
304 1.1 cgd #endif
305 1.1 cgd /*
306 1.1 cgd * Just in case we blocked while allocating memory,
307 1.1 cgd * and someone else also allocated memory for this
308 1.1 cgd * bucket, don't assume the list is still empty.
309 1.1 cgd */
310 1.1 cgd savedlist = kbp->kb_next;
311 1.49 thorpej kbp->kb_next = cp = va + (npg << PAGE_SHIFT) - allocsize;
312 1.8 cgd for (;;) {
313 1.8 cgd freep = (struct freelist *)cp;
314 1.8 cgd #ifdef DIAGNOSTIC
315 1.8 cgd /*
316 1.8 cgd * Copy in known text to detect modification
317 1.8 cgd * after freeing.
318 1.8 cgd */
319 1.11 cgd end = (int32_t *)&cp[copysize];
320 1.11 cgd for (lp = (int32_t *)cp; lp < end; lp++)
321 1.8 cgd *lp = WEIRD_ADDR;
322 1.8 cgd freep->type = M_FREE;
323 1.8 cgd #endif /* DIAGNOSTIC */
324 1.8 cgd if (cp <= va)
325 1.8 cgd break;
326 1.8 cgd cp -= allocsize;
327 1.8 cgd freep->next = cp;
328 1.8 cgd }
329 1.8 cgd freep->next = savedlist;
330 1.8 cgd if (kbp->kb_last == NULL)
331 1.8 cgd kbp->kb_last = (caddr_t)freep;
332 1.1 cgd }
333 1.1 cgd va = kbp->kb_next;
334 1.8 cgd kbp->kb_next = ((struct freelist *)va)->next;
335 1.8 cgd #ifdef DIAGNOSTIC
336 1.8 cgd freep = (struct freelist *)va;
337 1.8 cgd savedtype = (unsigned)freep->type < M_LAST ?
338 1.8 cgd memname[freep->type] : "???";
339 1.29 chs if (kbp->kb_next) {
340 1.29 chs int rv;
341 1.35 eeh vaddr_t addr = (vaddr_t)kbp->kb_next;
342 1.29 chs
343 1.43 thorpej vm_map_lock(kmem_map);
344 1.29 chs rv = uvm_map_checkprot(kmem_map, addr,
345 1.29 chs addr + sizeof(struct freelist),
346 1.29 chs VM_PROT_WRITE);
347 1.43 thorpej vm_map_unlock(kmem_map);
348 1.29 chs
349 1.51 thorpej if (__predict_false(rv == 0)) {
350 1.41 mrg printf(
351 1.21 christos "%s %ld of object %p size %ld %s %s (invalid addr %p)\n",
352 1.41 mrg "Data modified on freelist: word",
353 1.41 mrg (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
354 1.41 mrg va, size, "previous type", savedtype, kbp->kb_next);
355 1.27 thorpej #ifdef MALLOCLOG
356 1.41 mrg hitmlog(va);
357 1.27 thorpej #endif
358 1.41 mrg kbp->kb_next = NULL;
359 1.29 chs }
360 1.8 cgd }
361 1.11 cgd
362 1.11 cgd /* Fill the fields that we've used with WEIRD_ADDR */
363 1.8 cgd #if BYTE_ORDER == BIG_ENDIAN
364 1.8 cgd freep->type = WEIRD_ADDR >> 16;
365 1.8 cgd #endif
366 1.8 cgd #if BYTE_ORDER == LITTLE_ENDIAN
367 1.8 cgd freep->type = (short)WEIRD_ADDR;
368 1.8 cgd #endif
369 1.11 cgd end = (int32_t *)&freep->next +
370 1.11 cgd (sizeof(freep->next) / sizeof(int32_t));
371 1.11 cgd for (lp = (int32_t *)&freep->next; lp < end; lp++)
372 1.11 cgd *lp = WEIRD_ADDR;
373 1.11 cgd
374 1.11 cgd /* and check that the data hasn't been modified. */
375 1.11 cgd end = (int32_t *)&va[copysize];
376 1.11 cgd for (lp = (int32_t *)va; lp < end; lp++) {
377 1.51 thorpej if (__predict_true(*lp == WEIRD_ADDR))
378 1.8 cgd continue;
379 1.22 christos printf("%s %ld of object %p size %ld %s %s (0x%x != 0x%x)\n",
380 1.21 christos "Data modified on freelist: word",
381 1.21 christos (long)(lp - (int32_t *)va), va, size, "previous type",
382 1.21 christos savedtype, *lp, WEIRD_ADDR);
383 1.27 thorpej #ifdef MALLOCLOG
384 1.27 thorpej hitmlog(va);
385 1.27 thorpej #endif
386 1.8 cgd break;
387 1.8 cgd }
388 1.11 cgd
389 1.8 cgd freep->spare0 = 0;
390 1.8 cgd #endif /* DIAGNOSTIC */
391 1.1 cgd #ifdef KMEMSTATS
392 1.1 cgd kup = btokup(va);
393 1.1 cgd if (kup->ku_indx != indx)
394 1.1 cgd panic("malloc: wrong bucket");
395 1.1 cgd if (kup->ku_freecnt == 0)
396 1.1 cgd panic("malloc: lost data");
397 1.1 cgd kup->ku_freecnt--;
398 1.1 cgd kbp->kb_totalfree--;
399 1.1 cgd ksp->ks_memuse += 1 << indx;
400 1.1 cgd out:
401 1.1 cgd kbp->kb_calls++;
402 1.1 cgd ksp->ks_inuse++;
403 1.1 cgd ksp->ks_calls++;
404 1.1 cgd if (ksp->ks_memuse > ksp->ks_maxused)
405 1.1 cgd ksp->ks_maxused = ksp->ks_memuse;
406 1.1 cgd #else
407 1.1 cgd out:
408 1.1 cgd #endif
409 1.27 thorpej #ifdef MALLOCLOG
410 1.27 thorpej domlog(va, size, type, 1, file, line);
411 1.27 thorpej #endif
412 1.1 cgd splx(s);
413 1.1 cgd return ((void *) va);
414 1.1 cgd }
415 1.1 cgd
416 1.1 cgd /*
417 1.1 cgd * Free a block of memory allocated by malloc.
418 1.1 cgd */
419 1.27 thorpej #ifdef MALLOCLOG
420 1.27 thorpej void
421 1.27 thorpej _free(addr, type, file, line)
422 1.27 thorpej void *addr;
423 1.27 thorpej int type;
424 1.27 thorpej const char *file;
425 1.27 thorpej long line;
426 1.27 thorpej #else
427 1.1 cgd void
428 1.1 cgd free(addr, type)
429 1.1 cgd void *addr;
430 1.1 cgd int type;
431 1.27 thorpej #endif /* MALLOCLOG */
432 1.1 cgd {
433 1.50 augustss struct kmembuckets *kbp;
434 1.50 augustss struct kmemusage *kup;
435 1.50 augustss struct freelist *freep;
436 1.8 cgd long size;
437 1.8 cgd int s;
438 1.5 andrew #ifdef DIAGNOSTIC
439 1.8 cgd caddr_t cp;
440 1.11 cgd int32_t *end, *lp;
441 1.11 cgd long alloc, copysize;
442 1.5 andrew #endif
443 1.1 cgd #ifdef KMEMSTATS
444 1.50 augustss struct kmemstats *ksp = &kmemstats[type];
445 1.48 thorpej #endif
446 1.48 thorpej
447 1.62 thorpej #ifdef MALLOC_DEBUG
448 1.62 thorpej if (debug_free(addr, type))
449 1.62 thorpej return;
450 1.62 thorpej #endif
451 1.62 thorpej
452 1.48 thorpej #ifdef DIAGNOSTIC
453 1.48 thorpej /*
454 1.48 thorpej * Ensure that we're free'ing something that we could
455 1.48 thorpej * have allocated in the first place. That is, check
456 1.48 thorpej * to see that the address is within kmem_map.
457 1.48 thorpej */
458 1.51 thorpej if (__predict_false((vaddr_t)addr < kmem_map->header.start ||
459 1.51 thorpej (vaddr_t)addr >= kmem_map->header.end))
460 1.48 thorpej panic("free: addr %p not within kmem_map", addr);
461 1.1 cgd #endif
462 1.1 cgd
463 1.1 cgd kup = btokup(addr);
464 1.1 cgd size = 1 << kup->ku_indx;
465 1.8 cgd kbp = &bucket[kup->ku_indx];
466 1.56 thorpej s = splvm();
467 1.27 thorpej #ifdef MALLOCLOG
468 1.27 thorpej domlog(addr, 0, type, 2, file, line);
469 1.27 thorpej #endif
470 1.1 cgd #ifdef DIAGNOSTIC
471 1.8 cgd /*
472 1.8 cgd * Check for returns of data that do not point to the
473 1.8 cgd * beginning of the allocation.
474 1.8 cgd */
475 1.49 thorpej if (size > PAGE_SIZE)
476 1.49 thorpej alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
477 1.1 cgd else
478 1.1 cgd alloc = addrmask[kup->ku_indx];
479 1.8 cgd if (((u_long)addr & alloc) != 0)
480 1.15 christos panic("free: unaligned addr %p, size %ld, type %s, mask %ld\n",
481 1.8 cgd addr, size, memname[type], alloc);
482 1.1 cgd #endif /* DIAGNOSTIC */
483 1.1 cgd if (size > MAXALLOCSAVE) {
484 1.35 eeh uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt));
485 1.1 cgd #ifdef KMEMSTATS
486 1.1 cgd size = kup->ku_pagecnt << PGSHIFT;
487 1.1 cgd ksp->ks_memuse -= size;
488 1.1 cgd kup->ku_indx = 0;
489 1.1 cgd kup->ku_pagecnt = 0;
490 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
491 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
492 1.1 cgd wakeup((caddr_t)ksp);
493 1.1 cgd ksp->ks_inuse--;
494 1.1 cgd kbp->kb_total -= 1;
495 1.1 cgd #endif
496 1.1 cgd splx(s);
497 1.1 cgd return;
498 1.1 cgd }
499 1.8 cgd freep = (struct freelist *)addr;
500 1.8 cgd #ifdef DIAGNOSTIC
501 1.8 cgd /*
502 1.8 cgd * Check for multiple frees. Use a quick check to see if
503 1.8 cgd * it looks free before laboriously searching the freelist.
504 1.8 cgd */
505 1.51 thorpej if (__predict_false(freep->spare0 == WEIRD_ADDR)) {
506 1.16 cgd for (cp = kbp->kb_next; cp;
507 1.16 cgd cp = ((struct freelist *)cp)->next) {
508 1.8 cgd if (addr != cp)
509 1.8 cgd continue;
510 1.22 christos printf("multiply freed item %p\n", addr);
511 1.27 thorpej #ifdef MALLOCLOG
512 1.27 thorpej hitmlog(addr);
513 1.27 thorpej #endif
514 1.8 cgd panic("free: duplicated free");
515 1.8 cgd }
516 1.8 cgd }
517 1.38 chs #ifdef LOCKDEBUG
518 1.38 chs /*
519 1.38 chs * Check if we're freeing a locked simple lock.
520 1.38 chs */
521 1.40 chs simple_lock_freecheck(addr, (char *)addr + size);
522 1.38 chs #endif
523 1.8 cgd /*
524 1.8 cgd * Copy in known text to detect modification after freeing
525 1.8 cgd * and to make it look free. Also, save the type being freed
526 1.8 cgd * so we can list likely culprit if modification is detected
527 1.8 cgd * when the object is reallocated.
528 1.8 cgd */
529 1.8 cgd copysize = size < MAX_COPY ? size : MAX_COPY;
530 1.11 cgd end = (int32_t *)&((caddr_t)addr)[copysize];
531 1.11 cgd for (lp = (int32_t *)addr; lp < end; lp++)
532 1.8 cgd *lp = WEIRD_ADDR;
533 1.8 cgd freep->type = type;
534 1.8 cgd #endif /* DIAGNOSTIC */
535 1.1 cgd #ifdef KMEMSTATS
536 1.1 cgd kup->ku_freecnt++;
537 1.36 thorpej if (kup->ku_freecnt >= kbp->kb_elmpercl) {
538 1.1 cgd if (kup->ku_freecnt > kbp->kb_elmpercl)
539 1.1 cgd panic("free: multiple frees");
540 1.1 cgd else if (kbp->kb_totalfree > kbp->kb_highwat)
541 1.1 cgd kbp->kb_couldfree++;
542 1.36 thorpej }
543 1.1 cgd kbp->kb_totalfree++;
544 1.1 cgd ksp->ks_memuse -= size;
545 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
546 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
547 1.1 cgd wakeup((caddr_t)ksp);
548 1.1 cgd ksp->ks_inuse--;
549 1.1 cgd #endif
550 1.8 cgd if (kbp->kb_next == NULL)
551 1.8 cgd kbp->kb_next = addr;
552 1.8 cgd else
553 1.8 cgd ((struct freelist *)kbp->kb_last)->next = addr;
554 1.8 cgd freep->next = NULL;
555 1.8 cgd kbp->kb_last = addr;
556 1.1 cgd splx(s);
557 1.20 cgd }
558 1.20 cgd
559 1.20 cgd /*
560 1.20 cgd * Change the size of a block of memory.
561 1.20 cgd */
562 1.20 cgd void *
563 1.20 cgd realloc(curaddr, newsize, type, flags)
564 1.20 cgd void *curaddr;
565 1.20 cgd unsigned long newsize;
566 1.20 cgd int type, flags;
567 1.20 cgd {
568 1.50 augustss struct kmemusage *kup;
569 1.20 cgd long cursize;
570 1.20 cgd void *newaddr;
571 1.20 cgd #ifdef DIAGNOSTIC
572 1.20 cgd long alloc;
573 1.20 cgd #endif
574 1.20 cgd
575 1.20 cgd /*
576 1.20 cgd * Realloc() with a NULL pointer is the same as malloc().
577 1.20 cgd */
578 1.20 cgd if (curaddr == NULL)
579 1.20 cgd return (malloc(newsize, type, flags));
580 1.20 cgd
581 1.20 cgd /*
582 1.20 cgd * Realloc() with zero size is the same as free().
583 1.20 cgd */
584 1.20 cgd if (newsize == 0) {
585 1.20 cgd free(curaddr, type);
586 1.20 cgd return (NULL);
587 1.20 cgd }
588 1.59 thorpej
589 1.59 thorpej #ifdef LOCKDEBUG
590 1.59 thorpej if ((flags & M_NOWAIT) == 0)
591 1.59 thorpej simple_lock_only_held(NULL, "realloc");
592 1.59 thorpej #endif
593 1.20 cgd
594 1.20 cgd /*
595 1.20 cgd * Find out how large the old allocation was (and do some
596 1.20 cgd * sanity checking).
597 1.20 cgd */
598 1.20 cgd kup = btokup(curaddr);
599 1.20 cgd cursize = 1 << kup->ku_indx;
600 1.20 cgd
601 1.20 cgd #ifdef DIAGNOSTIC
602 1.20 cgd /*
603 1.20 cgd * Check for returns of data that do not point to the
604 1.20 cgd * beginning of the allocation.
605 1.20 cgd */
606 1.49 thorpej if (cursize > PAGE_SIZE)
607 1.49 thorpej alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
608 1.20 cgd else
609 1.20 cgd alloc = addrmask[kup->ku_indx];
610 1.20 cgd if (((u_long)curaddr & alloc) != 0)
611 1.20 cgd panic("realloc: unaligned addr %p, size %ld, type %s, mask %ld\n",
612 1.20 cgd curaddr, cursize, memname[type], alloc);
613 1.20 cgd #endif /* DIAGNOSTIC */
614 1.20 cgd
615 1.20 cgd if (cursize > MAXALLOCSAVE)
616 1.20 cgd cursize = ctob(kup->ku_pagecnt);
617 1.20 cgd
618 1.20 cgd /*
619 1.20 cgd * If we already actually have as much as they want, we're done.
620 1.20 cgd */
621 1.20 cgd if (newsize <= cursize)
622 1.20 cgd return (curaddr);
623 1.20 cgd
624 1.20 cgd /*
625 1.20 cgd * Can't satisfy the allocation with the existing block.
626 1.20 cgd * Allocate a new one and copy the data.
627 1.20 cgd */
628 1.20 cgd newaddr = malloc(newsize, type, flags);
629 1.51 thorpej if (__predict_false(newaddr == NULL)) {
630 1.20 cgd /*
631 1.20 cgd * Malloc() failed, because flags included M_NOWAIT.
632 1.20 cgd * Return NULL to indicate that failure. The old
633 1.20 cgd * pointer is still valid.
634 1.20 cgd */
635 1.20 cgd return NULL;
636 1.20 cgd }
637 1.34 perry memcpy(newaddr, curaddr, cursize);
638 1.20 cgd
639 1.20 cgd /*
640 1.20 cgd * We were successful: free the old allocation and return
641 1.20 cgd * the new one.
642 1.20 cgd */
643 1.20 cgd free(curaddr, type);
644 1.20 cgd return (newaddr);
645 1.1 cgd }
646 1.1 cgd
647 1.1 cgd /*
648 1.49 thorpej * Compute the number of pages that kmem_map will map, that is,
649 1.49 thorpej * the size of the kernel malloc arena.
650 1.49 thorpej */
651 1.49 thorpej void
652 1.49 thorpej kmeminit_nkmempages()
653 1.49 thorpej {
654 1.49 thorpej int npages;
655 1.49 thorpej
656 1.49 thorpej if (nkmempages != 0) {
657 1.49 thorpej /*
658 1.49 thorpej * It's already been set (by us being here before, or
659 1.49 thorpej * by patching or kernel config options), bail out now.
660 1.49 thorpej */
661 1.49 thorpej return;
662 1.49 thorpej }
663 1.49 thorpej
664 1.49 thorpej /*
665 1.49 thorpej * We use the following (simple) formula:
666 1.49 thorpej *
667 1.49 thorpej * - Starting point is physical memory / 4.
668 1.49 thorpej *
669 1.49 thorpej * - Clamp it down to NKMEMPAGES_MAX.
670 1.49 thorpej *
671 1.49 thorpej * - Round it up to NKMEMPAGES_MIN.
672 1.49 thorpej */
673 1.49 thorpej npages = physmem / 4;
674 1.49 thorpej
675 1.49 thorpej if (npages > NKMEMPAGES_MAX)
676 1.49 thorpej npages = NKMEMPAGES_MAX;
677 1.49 thorpej
678 1.49 thorpej if (npages < NKMEMPAGES_MIN)
679 1.49 thorpej npages = NKMEMPAGES_MIN;
680 1.49 thorpej
681 1.49 thorpej nkmempages = npages;
682 1.49 thorpej }
683 1.49 thorpej
684 1.49 thorpej /*
685 1.1 cgd * Initialize the kernel memory allocator
686 1.1 cgd */
687 1.12 christos void
688 1.1 cgd kmeminit()
689 1.1 cgd {
690 1.23 tls #ifdef KMEMSTATS
691 1.50 augustss long indx;
692 1.23 tls #endif
693 1.1 cgd
694 1.1 cgd #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
695 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
696 1.1 cgd #endif
697 1.1 cgd #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
698 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_big
699 1.1 cgd #endif
700 1.47 ragge #if (MAXALLOCSAVE < NBPG)
701 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_small
702 1.1 cgd #endif
703 1.11 cgd
704 1.11 cgd if (sizeof(struct freelist) > (1 << MINBUCKET))
705 1.11 cgd panic("minbucket too small/struct freelist too big");
706 1.11 cgd
707 1.49 thorpej /*
708 1.49 thorpej * Compute the number of kmem_map pages, if we have not
709 1.49 thorpej * done so already.
710 1.49 thorpej */
711 1.49 thorpej kmeminit_nkmempages();
712 1.49 thorpej
713 1.28 mrg kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
714 1.49 thorpej (vsize_t)(nkmempages * sizeof(struct kmemusage)));
715 1.35 eeh kmem_map = uvm_km_suballoc(kernel_map, (vaddr_t *)&kmembase,
716 1.49 thorpej (vaddr_t *)&kmemlimit, (vsize_t)(nkmempages << PAGE_SHIFT),
717 1.61 thorpej VM_MAP_INTRSAFE, FALSE, &kmem_map_store);
718 1.1 cgd #ifdef KMEMSTATS
719 1.1 cgd for (indx = 0; indx < MINBUCKET + 16; indx++) {
720 1.49 thorpej if (1 << indx >= PAGE_SIZE)
721 1.1 cgd bucket[indx].kb_elmpercl = 1;
722 1.1 cgd else
723 1.49 thorpej bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
724 1.1 cgd bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
725 1.1 cgd }
726 1.8 cgd for (indx = 0; indx < M_LAST; indx++)
727 1.60 thorpej kmemstats[indx].ks_limit =
728 1.60 thorpej ((u_long)nkmempages << PAGE_SHIFT) * 6U / 10U;
729 1.62 thorpej #endif
730 1.62 thorpej #ifdef MALLOC_DEBUG
731 1.62 thorpej debug_malloc_init();
732 1.1 cgd #endif
733 1.1 cgd }
734 1.39 thorpej
735 1.39 thorpej #ifdef DDB
736 1.39 thorpej #include <ddb/db_output.h>
737 1.39 thorpej
738 1.39 thorpej /*
739 1.39 thorpej * Dump kmem statistics from ddb.
740 1.39 thorpej *
741 1.39 thorpej * usage: call dump_kmemstats
742 1.39 thorpej */
743 1.39 thorpej void dump_kmemstats __P((void));
744 1.39 thorpej
745 1.39 thorpej void
746 1.39 thorpej dump_kmemstats()
747 1.39 thorpej {
748 1.39 thorpej #ifdef KMEMSTATS
749 1.39 thorpej const char *name;
750 1.39 thorpej int i;
751 1.39 thorpej
752 1.39 thorpej for (i = 0; i < M_LAST; i++) {
753 1.39 thorpej name = memname[i] ? memname[i] : "";
754 1.39 thorpej
755 1.39 thorpej db_printf("%2d %s%.*s %ld\n", i, name,
756 1.39 thorpej (int)(20 - strlen(name)), " ",
757 1.39 thorpej kmemstats[i].ks_memuse);
758 1.39 thorpej }
759 1.39 thorpej #else
760 1.39 thorpej db_printf("Kmem stats are not being collected.\n");
761 1.39 thorpej #endif /* KMEMSTATS */
762 1.39 thorpej }
763 1.39 thorpej #endif /* DDB */
764