kern_malloc.c revision 1.45.2.1 1 1.45.2.1 bouyer /* $NetBSD: kern_malloc.c,v 1.45.2.1 2000/11/20 18:09:01 bouyer Exp $ */
2 1.9 cgd
3 1.1 cgd /*
4 1.37 christos * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
5 1.8 cgd * Copyright (c) 1987, 1991, 1993
6 1.8 cgd * The Regents of the University of California. All rights reserved.
7 1.1 cgd *
8 1.1 cgd * Redistribution and use in source and binary forms, with or without
9 1.1 cgd * modification, are permitted provided that the following conditions
10 1.1 cgd * are met:
11 1.1 cgd * 1. Redistributions of source code must retain the above copyright
12 1.1 cgd * notice, this list of conditions and the following disclaimer.
13 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 cgd * notice, this list of conditions and the following disclaimer in the
15 1.1 cgd * documentation and/or other materials provided with the distribution.
16 1.1 cgd * 3. All advertising materials mentioning features or use of this software
17 1.1 cgd * must display the following acknowledgement:
18 1.1 cgd * This product includes software developed by the University of
19 1.1 cgd * California, Berkeley and its contributors.
20 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
21 1.1 cgd * may be used to endorse or promote products derived from this software
22 1.1 cgd * without specific prior written permission.
23 1.1 cgd *
24 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 1.1 cgd * SUCH DAMAGE.
35 1.1 cgd *
36 1.32 fvdl * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
37 1.1 cgd */
38 1.31 mrg
39 1.33 thorpej #include "opt_lockdebug.h"
40 1.1 cgd
41 1.7 mycroft #include <sys/param.h>
42 1.7 mycroft #include <sys/proc.h>
43 1.8 cgd #include <sys/map.h>
44 1.7 mycroft #include <sys/kernel.h>
45 1.7 mycroft #include <sys/malloc.h>
46 1.12 christos #include <sys/systm.h>
47 1.7 mycroft
48 1.28 mrg #include <uvm/uvm_extern.h>
49 1.28 mrg
50 1.44 thorpej static struct vm_map_intrsafe kmem_map_store;
51 1.28 mrg vm_map_t kmem_map = NULL;
52 1.28 mrg
53 1.45.2.1 bouyer #include "opt_kmempages.h"
54 1.45.2.1 bouyer
55 1.45.2.1 bouyer #ifdef NKMEMCLUSTERS
56 1.45.2.1 bouyer #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
57 1.45.2.1 bouyer #endif
58 1.45.2.1 bouyer
59 1.45.2.1 bouyer /*
60 1.45.2.1 bouyer * Default number of pages in kmem_map. We attempt to calculate this
61 1.45.2.1 bouyer * at run-time, but allow it to be either patched or set in the kernel
62 1.45.2.1 bouyer * config file.
63 1.45.2.1 bouyer */
64 1.45.2.1 bouyer #ifndef NKMEMPAGES
65 1.45.2.1 bouyer #define NKMEMPAGES 0
66 1.45.2.1 bouyer #endif
67 1.45.2.1 bouyer int nkmempages = NKMEMPAGES;
68 1.45.2.1 bouyer
69 1.45.2.1 bouyer /*
70 1.45.2.1 bouyer * Defaults for lower- and upper-bounds for the kmem_map page count.
71 1.45.2.1 bouyer * Can be overridden by kernel config options.
72 1.45.2.1 bouyer */
73 1.45.2.1 bouyer #ifndef NKMEMPAGES_MIN
74 1.45.2.1 bouyer #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
75 1.45.2.1 bouyer #endif
76 1.45.2.1 bouyer
77 1.45.2.1 bouyer #ifndef NKMEMPAGES_MAX
78 1.45.2.1 bouyer #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
79 1.45.2.1 bouyer #endif
80 1.45.2.1 bouyer
81 1.24 thorpej #include "opt_kmemstats.h"
82 1.27 thorpej #include "opt_malloclog.h"
83 1.12 christos
84 1.1 cgd struct kmembuckets bucket[MINBUCKET + 16];
85 1.8 cgd struct kmemstats kmemstats[M_LAST];
86 1.1 cgd struct kmemusage *kmemusage;
87 1.1 cgd char *kmembase, *kmemlimit;
88 1.25 mycroft const char *memname[] = INITKMEMNAMES;
89 1.1 cgd
90 1.27 thorpej #ifdef MALLOCLOG
91 1.27 thorpej #ifndef MALLOCLOGSIZE
92 1.27 thorpej #define MALLOCLOGSIZE 100000
93 1.27 thorpej #endif
94 1.27 thorpej
95 1.27 thorpej struct malloclog {
96 1.27 thorpej void *addr;
97 1.27 thorpej long size;
98 1.27 thorpej int type;
99 1.27 thorpej int action;
100 1.27 thorpej const char *file;
101 1.27 thorpej long line;
102 1.27 thorpej } malloclog[MALLOCLOGSIZE];
103 1.27 thorpej
104 1.27 thorpej long malloclogptr;
105 1.27 thorpej
106 1.27 thorpej static void domlog __P((void *a, long size, int type, int action,
107 1.27 thorpej const char *file, long line));
108 1.27 thorpej static void hitmlog __P((void *a));
109 1.27 thorpej
110 1.27 thorpej static void
111 1.27 thorpej domlog(a, size, type, action, file, line)
112 1.27 thorpej void *a;
113 1.27 thorpej long size;
114 1.27 thorpej int type;
115 1.27 thorpej int action;
116 1.27 thorpej const char *file;
117 1.27 thorpej long line;
118 1.27 thorpej {
119 1.27 thorpej
120 1.27 thorpej malloclog[malloclogptr].addr = a;
121 1.27 thorpej malloclog[malloclogptr].size = size;
122 1.27 thorpej malloclog[malloclogptr].type = type;
123 1.27 thorpej malloclog[malloclogptr].action = action;
124 1.27 thorpej malloclog[malloclogptr].file = file;
125 1.27 thorpej malloclog[malloclogptr].line = line;
126 1.27 thorpej malloclogptr++;
127 1.27 thorpej if (malloclogptr >= MALLOCLOGSIZE)
128 1.27 thorpej malloclogptr = 0;
129 1.27 thorpej }
130 1.27 thorpej
131 1.27 thorpej static void
132 1.27 thorpej hitmlog(a)
133 1.27 thorpej void *a;
134 1.27 thorpej {
135 1.27 thorpej struct malloclog *lp;
136 1.27 thorpej long l;
137 1.27 thorpej
138 1.27 thorpej #define PRT \
139 1.27 thorpej if (malloclog[l].addr == a && malloclog[l].action) { \
140 1.27 thorpej lp = &malloclog[l]; \
141 1.27 thorpej printf("malloc log entry %ld:\n", l); \
142 1.27 thorpej printf("\taddr = %p\n", lp->addr); \
143 1.27 thorpej printf("\tsize = %ld\n", lp->size); \
144 1.27 thorpej printf("\ttype = %s\n", memname[lp->type]); \
145 1.27 thorpej printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
146 1.27 thorpej printf("\tfile = %s\n", lp->file); \
147 1.27 thorpej printf("\tline = %ld\n", lp->line); \
148 1.27 thorpej }
149 1.27 thorpej
150 1.27 thorpej for (l = malloclogptr; l < MALLOCLOGSIZE; l++)
151 1.27 thorpej PRT
152 1.27 thorpej
153 1.27 thorpej for (l = 0; l < malloclogptr; l++)
154 1.27 thorpej PRT
155 1.27 thorpej }
156 1.27 thorpej #endif /* MALLOCLOG */
157 1.27 thorpej
158 1.8 cgd #ifdef DIAGNOSTIC
159 1.8 cgd /*
160 1.8 cgd * This structure provides a set of masks to catch unaligned frees.
161 1.8 cgd */
162 1.8 cgd long addrmask[] = { 0,
163 1.8 cgd 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
164 1.8 cgd 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
165 1.8 cgd 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
166 1.8 cgd 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
167 1.8 cgd };
168 1.8 cgd
169 1.8 cgd /*
170 1.8 cgd * The WEIRD_ADDR is used as known text to copy into free objects so
171 1.8 cgd * that modifications after frees can be detected.
172 1.8 cgd */
173 1.12 christos #define WEIRD_ADDR ((unsigned) 0xdeadbeef)
174 1.8 cgd #define MAX_COPY 32
175 1.8 cgd
176 1.8 cgd /*
177 1.11 cgd * Normally the freelist structure is used only to hold the list pointer
178 1.11 cgd * for free objects. However, when running with diagnostics, the first
179 1.11 cgd * 8 bytes of the structure is unused except for diagnostic information,
180 1.11 cgd * and the free list pointer is at offst 8 in the structure. Since the
181 1.11 cgd * first 8 bytes is the portion of the structure most often modified, this
182 1.11 cgd * helps to detect memory reuse problems and avoid free list corruption.
183 1.8 cgd */
184 1.8 cgd struct freelist {
185 1.11 cgd int32_t spare0;
186 1.11 cgd int16_t type;
187 1.11 cgd int16_t spare1;
188 1.8 cgd caddr_t next;
189 1.8 cgd };
190 1.8 cgd #else /* !DIAGNOSTIC */
191 1.8 cgd struct freelist {
192 1.8 cgd caddr_t next;
193 1.8 cgd };
194 1.8 cgd #endif /* DIAGNOSTIC */
195 1.8 cgd
196 1.1 cgd /*
197 1.1 cgd * Allocate a block of memory
198 1.1 cgd */
199 1.27 thorpej #ifdef MALLOCLOG
200 1.27 thorpej void *
201 1.27 thorpej _malloc(size, type, flags, file, line)
202 1.27 thorpej unsigned long size;
203 1.27 thorpej int type, flags;
204 1.27 thorpej const char *file;
205 1.27 thorpej long line;
206 1.27 thorpej #else
207 1.1 cgd void *
208 1.1 cgd malloc(size, type, flags)
209 1.1 cgd unsigned long size;
210 1.1 cgd int type, flags;
211 1.27 thorpej #endif /* MALLOCLOG */
212 1.1 cgd {
213 1.45.2.1 bouyer struct kmembuckets *kbp;
214 1.45.2.1 bouyer struct kmemusage *kup;
215 1.45.2.1 bouyer struct freelist *freep;
216 1.5 andrew long indx, npg, allocsize;
217 1.1 cgd int s;
218 1.1 cgd caddr_t va, cp, savedlist;
219 1.8 cgd #ifdef DIAGNOSTIC
220 1.11 cgd int32_t *end, *lp;
221 1.8 cgd int copysize;
222 1.26 mycroft const char *savedtype;
223 1.8 cgd #endif
224 1.1 cgd #ifdef KMEMSTATS
225 1.45.2.1 bouyer struct kmemstats *ksp = &kmemstats[type];
226 1.1 cgd
227 1.45.2.1 bouyer if (__predict_false(((unsigned long)type) > M_LAST))
228 1.1 cgd panic("malloc - bogus type");
229 1.1 cgd #endif
230 1.1 cgd indx = BUCKETINDX(size);
231 1.1 cgd kbp = &bucket[indx];
232 1.45.2.1 bouyer s = splmem();
233 1.1 cgd #ifdef KMEMSTATS
234 1.1 cgd while (ksp->ks_memuse >= ksp->ks_limit) {
235 1.1 cgd if (flags & M_NOWAIT) {
236 1.1 cgd splx(s);
237 1.1 cgd return ((void *) NULL);
238 1.1 cgd }
239 1.1 cgd if (ksp->ks_limblocks < 65535)
240 1.1 cgd ksp->ks_limblocks++;
241 1.1 cgd tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
242 1.1 cgd }
243 1.8 cgd ksp->ks_size |= 1 << indx;
244 1.8 cgd #endif
245 1.8 cgd #ifdef DIAGNOSTIC
246 1.8 cgd copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
247 1.1 cgd #endif
248 1.1 cgd if (kbp->kb_next == NULL) {
249 1.8 cgd kbp->kb_last = NULL;
250 1.1 cgd if (size > MAXALLOCSAVE)
251 1.45.2.1 bouyer allocsize = roundup(size, PAGE_SIZE);
252 1.1 cgd else
253 1.1 cgd allocsize = 1 << indx;
254 1.45.2.1 bouyer npg = btoc(allocsize);
255 1.28 mrg va = (caddr_t) uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
256 1.35 eeh (vsize_t)ctob(npg),
257 1.28 mrg (flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0);
258 1.45.2.1 bouyer if (__predict_false(va == NULL)) {
259 1.17 cgd /*
260 1.17 cgd * Kmem_malloc() can return NULL, even if it can
261 1.17 cgd * wait, if there is no map space avaiable, because
262 1.17 cgd * it can't fix that problem. Neither can we,
263 1.17 cgd * right now. (We should release pages which
264 1.17 cgd * are completely free and which are in buckets
265 1.17 cgd * with too many free elements.)
266 1.17 cgd */
267 1.17 cgd if ((flags & M_NOWAIT) == 0)
268 1.17 cgd panic("malloc: out of space in kmem_map");
269 1.6 cgd splx(s);
270 1.6 cgd return ((void *) NULL);
271 1.1 cgd }
272 1.1 cgd #ifdef KMEMSTATS
273 1.1 cgd kbp->kb_total += kbp->kb_elmpercl;
274 1.1 cgd #endif
275 1.1 cgd kup = btokup(va);
276 1.1 cgd kup->ku_indx = indx;
277 1.1 cgd if (allocsize > MAXALLOCSAVE) {
278 1.1 cgd if (npg > 65535)
279 1.1 cgd panic("malloc: allocation too large");
280 1.1 cgd kup->ku_pagecnt = npg;
281 1.1 cgd #ifdef KMEMSTATS
282 1.1 cgd ksp->ks_memuse += allocsize;
283 1.1 cgd #endif
284 1.1 cgd goto out;
285 1.1 cgd }
286 1.1 cgd #ifdef KMEMSTATS
287 1.1 cgd kup->ku_freecnt = kbp->kb_elmpercl;
288 1.1 cgd kbp->kb_totalfree += kbp->kb_elmpercl;
289 1.1 cgd #endif
290 1.1 cgd /*
291 1.1 cgd * Just in case we blocked while allocating memory,
292 1.1 cgd * and someone else also allocated memory for this
293 1.1 cgd * bucket, don't assume the list is still empty.
294 1.1 cgd */
295 1.1 cgd savedlist = kbp->kb_next;
296 1.45.2.1 bouyer kbp->kb_next = cp = va + (npg << PAGE_SHIFT) - allocsize;
297 1.8 cgd for (;;) {
298 1.8 cgd freep = (struct freelist *)cp;
299 1.8 cgd #ifdef DIAGNOSTIC
300 1.8 cgd /*
301 1.8 cgd * Copy in known text to detect modification
302 1.8 cgd * after freeing.
303 1.8 cgd */
304 1.11 cgd end = (int32_t *)&cp[copysize];
305 1.11 cgd for (lp = (int32_t *)cp; lp < end; lp++)
306 1.8 cgd *lp = WEIRD_ADDR;
307 1.8 cgd freep->type = M_FREE;
308 1.8 cgd #endif /* DIAGNOSTIC */
309 1.8 cgd if (cp <= va)
310 1.8 cgd break;
311 1.8 cgd cp -= allocsize;
312 1.8 cgd freep->next = cp;
313 1.8 cgd }
314 1.8 cgd freep->next = savedlist;
315 1.8 cgd if (kbp->kb_last == NULL)
316 1.8 cgd kbp->kb_last = (caddr_t)freep;
317 1.1 cgd }
318 1.1 cgd va = kbp->kb_next;
319 1.8 cgd kbp->kb_next = ((struct freelist *)va)->next;
320 1.8 cgd #ifdef DIAGNOSTIC
321 1.8 cgd freep = (struct freelist *)va;
322 1.8 cgd savedtype = (unsigned)freep->type < M_LAST ?
323 1.8 cgd memname[freep->type] : "???";
324 1.29 chs if (kbp->kb_next) {
325 1.29 chs int rv;
326 1.35 eeh vaddr_t addr = (vaddr_t)kbp->kb_next;
327 1.29 chs
328 1.43 thorpej vm_map_lock(kmem_map);
329 1.29 chs rv = uvm_map_checkprot(kmem_map, addr,
330 1.29 chs addr + sizeof(struct freelist),
331 1.29 chs VM_PROT_WRITE);
332 1.43 thorpej vm_map_unlock(kmem_map);
333 1.29 chs
334 1.45.2.1 bouyer if (__predict_false(rv == 0)) {
335 1.41 mrg printf(
336 1.21 christos "%s %ld of object %p size %ld %s %s (invalid addr %p)\n",
337 1.41 mrg "Data modified on freelist: word",
338 1.41 mrg (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
339 1.41 mrg va, size, "previous type", savedtype, kbp->kb_next);
340 1.27 thorpej #ifdef MALLOCLOG
341 1.41 mrg hitmlog(va);
342 1.27 thorpej #endif
343 1.41 mrg kbp->kb_next = NULL;
344 1.29 chs }
345 1.8 cgd }
346 1.11 cgd
347 1.11 cgd /* Fill the fields that we've used with WEIRD_ADDR */
348 1.8 cgd #if BYTE_ORDER == BIG_ENDIAN
349 1.8 cgd freep->type = WEIRD_ADDR >> 16;
350 1.8 cgd #endif
351 1.8 cgd #if BYTE_ORDER == LITTLE_ENDIAN
352 1.8 cgd freep->type = (short)WEIRD_ADDR;
353 1.8 cgd #endif
354 1.11 cgd end = (int32_t *)&freep->next +
355 1.11 cgd (sizeof(freep->next) / sizeof(int32_t));
356 1.11 cgd for (lp = (int32_t *)&freep->next; lp < end; lp++)
357 1.11 cgd *lp = WEIRD_ADDR;
358 1.11 cgd
359 1.11 cgd /* and check that the data hasn't been modified. */
360 1.11 cgd end = (int32_t *)&va[copysize];
361 1.11 cgd for (lp = (int32_t *)va; lp < end; lp++) {
362 1.45.2.1 bouyer if (__predict_true(*lp == WEIRD_ADDR))
363 1.8 cgd continue;
364 1.22 christos printf("%s %ld of object %p size %ld %s %s (0x%x != 0x%x)\n",
365 1.21 christos "Data modified on freelist: word",
366 1.21 christos (long)(lp - (int32_t *)va), va, size, "previous type",
367 1.21 christos savedtype, *lp, WEIRD_ADDR);
368 1.27 thorpej #ifdef MALLOCLOG
369 1.27 thorpej hitmlog(va);
370 1.27 thorpej #endif
371 1.8 cgd break;
372 1.8 cgd }
373 1.11 cgd
374 1.8 cgd freep->spare0 = 0;
375 1.8 cgd #endif /* DIAGNOSTIC */
376 1.1 cgd #ifdef KMEMSTATS
377 1.1 cgd kup = btokup(va);
378 1.1 cgd if (kup->ku_indx != indx)
379 1.1 cgd panic("malloc: wrong bucket");
380 1.1 cgd if (kup->ku_freecnt == 0)
381 1.1 cgd panic("malloc: lost data");
382 1.1 cgd kup->ku_freecnt--;
383 1.1 cgd kbp->kb_totalfree--;
384 1.1 cgd ksp->ks_memuse += 1 << indx;
385 1.1 cgd out:
386 1.1 cgd kbp->kb_calls++;
387 1.1 cgd ksp->ks_inuse++;
388 1.1 cgd ksp->ks_calls++;
389 1.1 cgd if (ksp->ks_memuse > ksp->ks_maxused)
390 1.1 cgd ksp->ks_maxused = ksp->ks_memuse;
391 1.1 cgd #else
392 1.1 cgd out:
393 1.1 cgd #endif
394 1.27 thorpej #ifdef MALLOCLOG
395 1.27 thorpej domlog(va, size, type, 1, file, line);
396 1.27 thorpej #endif
397 1.1 cgd splx(s);
398 1.1 cgd return ((void *) va);
399 1.1 cgd }
400 1.1 cgd
401 1.1 cgd /*
402 1.1 cgd * Free a block of memory allocated by malloc.
403 1.1 cgd */
404 1.27 thorpej #ifdef MALLOCLOG
405 1.27 thorpej void
406 1.27 thorpej _free(addr, type, file, line)
407 1.27 thorpej void *addr;
408 1.27 thorpej int type;
409 1.27 thorpej const char *file;
410 1.27 thorpej long line;
411 1.27 thorpej #else
412 1.1 cgd void
413 1.1 cgd free(addr, type)
414 1.1 cgd void *addr;
415 1.1 cgd int type;
416 1.27 thorpej #endif /* MALLOCLOG */
417 1.1 cgd {
418 1.45.2.1 bouyer struct kmembuckets *kbp;
419 1.45.2.1 bouyer struct kmemusage *kup;
420 1.45.2.1 bouyer struct freelist *freep;
421 1.8 cgd long size;
422 1.8 cgd int s;
423 1.5 andrew #ifdef DIAGNOSTIC
424 1.8 cgd caddr_t cp;
425 1.11 cgd int32_t *end, *lp;
426 1.11 cgd long alloc, copysize;
427 1.5 andrew #endif
428 1.1 cgd #ifdef KMEMSTATS
429 1.45.2.1 bouyer struct kmemstats *ksp = &kmemstats[type];
430 1.45.2.1 bouyer #endif
431 1.45.2.1 bouyer
432 1.45.2.1 bouyer #ifdef DIAGNOSTIC
433 1.45.2.1 bouyer /*
434 1.45.2.1 bouyer * Ensure that we're free'ing something that we could
435 1.45.2.1 bouyer * have allocated in the first place. That is, check
436 1.45.2.1 bouyer * to see that the address is within kmem_map.
437 1.45.2.1 bouyer */
438 1.45.2.1 bouyer if (__predict_false((vaddr_t)addr < kmem_map->header.start ||
439 1.45.2.1 bouyer (vaddr_t)addr >= kmem_map->header.end))
440 1.45.2.1 bouyer panic("free: addr %p not within kmem_map", addr);
441 1.1 cgd #endif
442 1.1 cgd
443 1.1 cgd kup = btokup(addr);
444 1.1 cgd size = 1 << kup->ku_indx;
445 1.8 cgd kbp = &bucket[kup->ku_indx];
446 1.45.2.1 bouyer s = splmem();
447 1.27 thorpej #ifdef MALLOCLOG
448 1.27 thorpej domlog(addr, 0, type, 2, file, line);
449 1.27 thorpej #endif
450 1.1 cgd #ifdef DIAGNOSTIC
451 1.8 cgd /*
452 1.8 cgd * Check for returns of data that do not point to the
453 1.8 cgd * beginning of the allocation.
454 1.8 cgd */
455 1.45.2.1 bouyer if (size > PAGE_SIZE)
456 1.45.2.1 bouyer alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
457 1.1 cgd else
458 1.1 cgd alloc = addrmask[kup->ku_indx];
459 1.8 cgd if (((u_long)addr & alloc) != 0)
460 1.15 christos panic("free: unaligned addr %p, size %ld, type %s, mask %ld\n",
461 1.8 cgd addr, size, memname[type], alloc);
462 1.1 cgd #endif /* DIAGNOSTIC */
463 1.1 cgd if (size > MAXALLOCSAVE) {
464 1.35 eeh uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt));
465 1.1 cgd #ifdef KMEMSTATS
466 1.1 cgd size = kup->ku_pagecnt << PGSHIFT;
467 1.1 cgd ksp->ks_memuse -= size;
468 1.1 cgd kup->ku_indx = 0;
469 1.1 cgd kup->ku_pagecnt = 0;
470 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
471 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
472 1.1 cgd wakeup((caddr_t)ksp);
473 1.1 cgd ksp->ks_inuse--;
474 1.1 cgd kbp->kb_total -= 1;
475 1.1 cgd #endif
476 1.1 cgd splx(s);
477 1.1 cgd return;
478 1.1 cgd }
479 1.8 cgd freep = (struct freelist *)addr;
480 1.8 cgd #ifdef DIAGNOSTIC
481 1.8 cgd /*
482 1.8 cgd * Check for multiple frees. Use a quick check to see if
483 1.8 cgd * it looks free before laboriously searching the freelist.
484 1.8 cgd */
485 1.45.2.1 bouyer if (__predict_false(freep->spare0 == WEIRD_ADDR)) {
486 1.16 cgd for (cp = kbp->kb_next; cp;
487 1.16 cgd cp = ((struct freelist *)cp)->next) {
488 1.8 cgd if (addr != cp)
489 1.8 cgd continue;
490 1.22 christos printf("multiply freed item %p\n", addr);
491 1.27 thorpej #ifdef MALLOCLOG
492 1.27 thorpej hitmlog(addr);
493 1.27 thorpej #endif
494 1.8 cgd panic("free: duplicated free");
495 1.8 cgd }
496 1.8 cgd }
497 1.38 chs #ifdef LOCKDEBUG
498 1.38 chs /*
499 1.38 chs * Check if we're freeing a locked simple lock.
500 1.38 chs */
501 1.40 chs simple_lock_freecheck(addr, (char *)addr + size);
502 1.38 chs #endif
503 1.8 cgd /*
504 1.8 cgd * Copy in known text to detect modification after freeing
505 1.8 cgd * and to make it look free. Also, save the type being freed
506 1.8 cgd * so we can list likely culprit if modification is detected
507 1.8 cgd * when the object is reallocated.
508 1.8 cgd */
509 1.8 cgd copysize = size < MAX_COPY ? size : MAX_COPY;
510 1.11 cgd end = (int32_t *)&((caddr_t)addr)[copysize];
511 1.11 cgd for (lp = (int32_t *)addr; lp < end; lp++)
512 1.8 cgd *lp = WEIRD_ADDR;
513 1.8 cgd freep->type = type;
514 1.8 cgd #endif /* DIAGNOSTIC */
515 1.1 cgd #ifdef KMEMSTATS
516 1.1 cgd kup->ku_freecnt++;
517 1.36 thorpej if (kup->ku_freecnt >= kbp->kb_elmpercl) {
518 1.1 cgd if (kup->ku_freecnt > kbp->kb_elmpercl)
519 1.1 cgd panic("free: multiple frees");
520 1.1 cgd else if (kbp->kb_totalfree > kbp->kb_highwat)
521 1.1 cgd kbp->kb_couldfree++;
522 1.36 thorpej }
523 1.1 cgd kbp->kb_totalfree++;
524 1.1 cgd ksp->ks_memuse -= size;
525 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
526 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
527 1.1 cgd wakeup((caddr_t)ksp);
528 1.1 cgd ksp->ks_inuse--;
529 1.1 cgd #endif
530 1.8 cgd if (kbp->kb_next == NULL)
531 1.8 cgd kbp->kb_next = addr;
532 1.8 cgd else
533 1.8 cgd ((struct freelist *)kbp->kb_last)->next = addr;
534 1.8 cgd freep->next = NULL;
535 1.8 cgd kbp->kb_last = addr;
536 1.1 cgd splx(s);
537 1.20 cgd }
538 1.20 cgd
539 1.20 cgd /*
540 1.20 cgd * Change the size of a block of memory.
541 1.20 cgd */
542 1.20 cgd void *
543 1.20 cgd realloc(curaddr, newsize, type, flags)
544 1.20 cgd void *curaddr;
545 1.20 cgd unsigned long newsize;
546 1.20 cgd int type, flags;
547 1.20 cgd {
548 1.45.2.1 bouyer struct kmemusage *kup;
549 1.20 cgd long cursize;
550 1.20 cgd void *newaddr;
551 1.20 cgd #ifdef DIAGNOSTIC
552 1.20 cgd long alloc;
553 1.20 cgd #endif
554 1.20 cgd
555 1.20 cgd /*
556 1.20 cgd * Realloc() with a NULL pointer is the same as malloc().
557 1.20 cgd */
558 1.20 cgd if (curaddr == NULL)
559 1.20 cgd return (malloc(newsize, type, flags));
560 1.20 cgd
561 1.20 cgd /*
562 1.20 cgd * Realloc() with zero size is the same as free().
563 1.20 cgd */
564 1.20 cgd if (newsize == 0) {
565 1.20 cgd free(curaddr, type);
566 1.20 cgd return (NULL);
567 1.20 cgd }
568 1.20 cgd
569 1.20 cgd /*
570 1.20 cgd * Find out how large the old allocation was (and do some
571 1.20 cgd * sanity checking).
572 1.20 cgd */
573 1.20 cgd kup = btokup(curaddr);
574 1.20 cgd cursize = 1 << kup->ku_indx;
575 1.20 cgd
576 1.20 cgd #ifdef DIAGNOSTIC
577 1.20 cgd /*
578 1.20 cgd * Check for returns of data that do not point to the
579 1.20 cgd * beginning of the allocation.
580 1.20 cgd */
581 1.45.2.1 bouyer if (cursize > PAGE_SIZE)
582 1.45.2.1 bouyer alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
583 1.20 cgd else
584 1.20 cgd alloc = addrmask[kup->ku_indx];
585 1.20 cgd if (((u_long)curaddr & alloc) != 0)
586 1.20 cgd panic("realloc: unaligned addr %p, size %ld, type %s, mask %ld\n",
587 1.20 cgd curaddr, cursize, memname[type], alloc);
588 1.20 cgd #endif /* DIAGNOSTIC */
589 1.20 cgd
590 1.20 cgd if (cursize > MAXALLOCSAVE)
591 1.20 cgd cursize = ctob(kup->ku_pagecnt);
592 1.20 cgd
593 1.20 cgd /*
594 1.20 cgd * If we already actually have as much as they want, we're done.
595 1.20 cgd */
596 1.20 cgd if (newsize <= cursize)
597 1.20 cgd return (curaddr);
598 1.20 cgd
599 1.20 cgd /*
600 1.20 cgd * Can't satisfy the allocation with the existing block.
601 1.20 cgd * Allocate a new one and copy the data.
602 1.20 cgd */
603 1.20 cgd newaddr = malloc(newsize, type, flags);
604 1.45.2.1 bouyer if (__predict_false(newaddr == NULL)) {
605 1.20 cgd /*
606 1.20 cgd * Malloc() failed, because flags included M_NOWAIT.
607 1.20 cgd * Return NULL to indicate that failure. The old
608 1.20 cgd * pointer is still valid.
609 1.20 cgd */
610 1.20 cgd return NULL;
611 1.20 cgd }
612 1.34 perry memcpy(newaddr, curaddr, cursize);
613 1.20 cgd
614 1.20 cgd /*
615 1.20 cgd * We were successful: free the old allocation and return
616 1.20 cgd * the new one.
617 1.20 cgd */
618 1.20 cgd free(curaddr, type);
619 1.20 cgd return (newaddr);
620 1.1 cgd }
621 1.1 cgd
622 1.1 cgd /*
623 1.45.2.1 bouyer * Compute the number of pages that kmem_map will map, that is,
624 1.45.2.1 bouyer * the size of the kernel malloc arena.
625 1.45.2.1 bouyer */
626 1.45.2.1 bouyer void
627 1.45.2.1 bouyer kmeminit_nkmempages()
628 1.45.2.1 bouyer {
629 1.45.2.1 bouyer int npages;
630 1.45.2.1 bouyer
631 1.45.2.1 bouyer if (nkmempages != 0) {
632 1.45.2.1 bouyer /*
633 1.45.2.1 bouyer * It's already been set (by us being here before, or
634 1.45.2.1 bouyer * by patching or kernel config options), bail out now.
635 1.45.2.1 bouyer */
636 1.45.2.1 bouyer return;
637 1.45.2.1 bouyer }
638 1.45.2.1 bouyer
639 1.45.2.1 bouyer /*
640 1.45.2.1 bouyer * We use the following (simple) formula:
641 1.45.2.1 bouyer *
642 1.45.2.1 bouyer * - Starting point is physical memory / 4.
643 1.45.2.1 bouyer *
644 1.45.2.1 bouyer * - Clamp it down to NKMEMPAGES_MAX.
645 1.45.2.1 bouyer *
646 1.45.2.1 bouyer * - Round it up to NKMEMPAGES_MIN.
647 1.45.2.1 bouyer */
648 1.45.2.1 bouyer npages = physmem / 4;
649 1.45.2.1 bouyer
650 1.45.2.1 bouyer if (npages > NKMEMPAGES_MAX)
651 1.45.2.1 bouyer npages = NKMEMPAGES_MAX;
652 1.45.2.1 bouyer
653 1.45.2.1 bouyer if (npages < NKMEMPAGES_MIN)
654 1.45.2.1 bouyer npages = NKMEMPAGES_MIN;
655 1.45.2.1 bouyer
656 1.45.2.1 bouyer nkmempages = npages;
657 1.45.2.1 bouyer }
658 1.45.2.1 bouyer
659 1.45.2.1 bouyer /*
660 1.1 cgd * Initialize the kernel memory allocator
661 1.1 cgd */
662 1.12 christos void
663 1.1 cgd kmeminit()
664 1.1 cgd {
665 1.23 tls #ifdef KMEMSTATS
666 1.45.2.1 bouyer long indx;
667 1.23 tls #endif
668 1.1 cgd
669 1.1 cgd #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
670 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
671 1.1 cgd #endif
672 1.1 cgd #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
673 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_big
674 1.1 cgd #endif
675 1.45.2.1 bouyer #if (MAXALLOCSAVE < NBPG)
676 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_small
677 1.1 cgd #endif
678 1.11 cgd
679 1.11 cgd if (sizeof(struct freelist) > (1 << MINBUCKET))
680 1.11 cgd panic("minbucket too small/struct freelist too big");
681 1.11 cgd
682 1.45.2.1 bouyer /*
683 1.45.2.1 bouyer * Compute the number of kmem_map pages, if we have not
684 1.45.2.1 bouyer * done so already.
685 1.45.2.1 bouyer */
686 1.45.2.1 bouyer kmeminit_nkmempages();
687 1.45.2.1 bouyer
688 1.28 mrg kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
689 1.45.2.1 bouyer (vsize_t)(nkmempages * sizeof(struct kmemusage)));
690 1.35 eeh kmem_map = uvm_km_suballoc(kernel_map, (vaddr_t *)&kmembase,
691 1.45.2.1 bouyer (vaddr_t *)&kmemlimit, (vsize_t)(nkmempages << PAGE_SHIFT),
692 1.44 thorpej VM_MAP_INTRSAFE, FALSE, &kmem_map_store.vmi_map);
693 1.1 cgd #ifdef KMEMSTATS
694 1.1 cgd for (indx = 0; indx < MINBUCKET + 16; indx++) {
695 1.45.2.1 bouyer if (1 << indx >= PAGE_SIZE)
696 1.1 cgd bucket[indx].kb_elmpercl = 1;
697 1.1 cgd else
698 1.45.2.1 bouyer bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
699 1.1 cgd bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
700 1.1 cgd }
701 1.8 cgd for (indx = 0; indx < M_LAST; indx++)
702 1.45.2.1 bouyer kmemstats[indx].ks_limit = (nkmempages << PAGE_SHIFT) * 6 / 10;
703 1.1 cgd #endif
704 1.1 cgd }
705 1.39 thorpej
706 1.39 thorpej #ifdef DDB
707 1.39 thorpej #include <ddb/db_output.h>
708 1.39 thorpej
709 1.39 thorpej /*
710 1.39 thorpej * Dump kmem statistics from ddb.
711 1.39 thorpej *
712 1.39 thorpej * usage: call dump_kmemstats
713 1.39 thorpej */
714 1.39 thorpej void dump_kmemstats __P((void));
715 1.39 thorpej
716 1.39 thorpej void
717 1.39 thorpej dump_kmemstats()
718 1.39 thorpej {
719 1.39 thorpej #ifdef KMEMSTATS
720 1.39 thorpej const char *name;
721 1.39 thorpej int i;
722 1.39 thorpej
723 1.39 thorpej for (i = 0; i < M_LAST; i++) {
724 1.39 thorpej name = memname[i] ? memname[i] : "";
725 1.39 thorpej
726 1.39 thorpej db_printf("%2d %s%.*s %ld\n", i, name,
727 1.39 thorpej (int)(20 - strlen(name)), " ",
728 1.39 thorpej kmemstats[i].ks_memuse);
729 1.39 thorpej }
730 1.39 thorpej #else
731 1.39 thorpej db_printf("Kmem stats are not being collected.\n");
732 1.39 thorpej #endif /* KMEMSTATS */
733 1.39 thorpej }
734 1.39 thorpej #endif /* DDB */
735