kern_malloc.c revision 1.129 1 1.129 he /* $NetBSD: kern_malloc.c,v 1.129 2010/04/05 07:16:13 he Exp $ */
2 1.9 cgd
3 1.1 cgd /*
4 1.8 cgd * Copyright (c) 1987, 1991, 1993
5 1.8 cgd * The Regents of the University of California. All rights reserved.
6 1.1 cgd *
7 1.1 cgd * Redistribution and use in source and binary forms, with or without
8 1.1 cgd * modification, are permitted provided that the following conditions
9 1.1 cgd * are met:
10 1.1 cgd * 1. Redistributions of source code must retain the above copyright
11 1.1 cgd * notice, this list of conditions and the following disclaimer.
12 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 cgd * notice, this list of conditions and the following disclaimer in the
14 1.1 cgd * documentation and/or other materials provided with the distribution.
15 1.81 agc * 3. Neither the name of the University nor the names of its contributors
16 1.81 agc * may be used to endorse or promote products derived from this software
17 1.81 agc * without specific prior written permission.
18 1.81 agc *
19 1.81 agc * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 1.81 agc * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 1.81 agc * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 1.81 agc * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 1.81 agc * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 1.81 agc * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 1.81 agc * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 1.81 agc * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 1.81 agc * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 1.81 agc * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 1.81 agc * SUCH DAMAGE.
30 1.81 agc *
31 1.81 agc * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
32 1.81 agc */
33 1.81 agc
34 1.81 agc /*
35 1.81 agc * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
36 1.81 agc *
37 1.81 agc * Redistribution and use in source and binary forms, with or without
38 1.81 agc * modification, are permitted provided that the following conditions
39 1.81 agc * are met:
40 1.81 agc * 1. Redistributions of source code must retain the above copyright
41 1.81 agc * notice, this list of conditions and the following disclaimer.
42 1.81 agc * 2. Redistributions in binary form must reproduce the above copyright
43 1.81 agc * notice, this list of conditions and the following disclaimer in the
44 1.81 agc * documentation and/or other materials provided with the distribution.
45 1.1 cgd * 3. All advertising materials mentioning features or use of this software
46 1.1 cgd * must display the following acknowledgement:
47 1.1 cgd * This product includes software developed by the University of
48 1.1 cgd * California, Berkeley and its contributors.
49 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
50 1.1 cgd * may be used to endorse or promote products derived from this software
51 1.1 cgd * without specific prior written permission.
52 1.1 cgd *
53 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 1.1 cgd * SUCH DAMAGE.
64 1.1 cgd *
65 1.32 fvdl * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
66 1.1 cgd */
67 1.64 lukem
68 1.64 lukem #include <sys/cdefs.h>
69 1.129 he __KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.129 2010/04/05 07:16:13 he Exp $");
70 1.1 cgd
71 1.7 mycroft #include <sys/param.h>
72 1.7 mycroft #include <sys/proc.h>
73 1.7 mycroft #include <sys/kernel.h>
74 1.7 mycroft #include <sys/malloc.h>
75 1.12 christos #include <sys/systm.h>
76 1.106 ad #include <sys/debug.h>
77 1.109 ad #include <sys/mutex.h>
78 1.113 ad #include <sys/lockdebug.h>
79 1.24 thorpej
80 1.28 mrg #include <uvm/uvm_extern.h>
81 1.28 mrg
82 1.92 yamt static struct vm_map_kernel kmem_map_store;
83 1.58 chs struct vm_map *kmem_map = NULL;
84 1.28 mrg
85 1.49 thorpej #include "opt_kmempages.h"
86 1.49 thorpej
87 1.49 thorpej #ifdef NKMEMCLUSTERS
88 1.52 sommerfe #error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
89 1.49 thorpej #endif
90 1.49 thorpej
91 1.49 thorpej /*
92 1.49 thorpej * Default number of pages in kmem_map. We attempt to calculate this
93 1.49 thorpej * at run-time, but allow it to be either patched or set in the kernel
94 1.49 thorpej * config file.
95 1.49 thorpej */
96 1.49 thorpej #ifndef NKMEMPAGES
97 1.49 thorpej #define NKMEMPAGES 0
98 1.49 thorpej #endif
99 1.49 thorpej int nkmempages = NKMEMPAGES;
100 1.49 thorpej
101 1.49 thorpej /*
102 1.49 thorpej * Defaults for lower- and upper-bounds for the kmem_map page count.
103 1.49 thorpej * Can be overridden by kernel config options.
104 1.49 thorpej */
105 1.49 thorpej #ifndef NKMEMPAGES_MIN
106 1.49 thorpej #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
107 1.49 thorpej #endif
108 1.49 thorpej
109 1.49 thorpej #ifndef NKMEMPAGES_MAX
110 1.49 thorpej #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
111 1.49 thorpej #endif
112 1.49 thorpej
113 1.24 thorpej #include "opt_kmemstats.h"
114 1.27 thorpej #include "opt_malloclog.h"
115 1.71 fvdl #include "opt_malloc_debug.h"
116 1.12 christos
117 1.103 chs #define MINALLOCSIZE (1 << MINBUCKET)
118 1.103 chs #define BUCKETINDX(size) \
119 1.103 chs ((size) <= (MINALLOCSIZE * 128) \
120 1.103 chs ? (size) <= (MINALLOCSIZE * 8) \
121 1.103 chs ? (size) <= (MINALLOCSIZE * 2) \
122 1.103 chs ? (size) <= (MINALLOCSIZE * 1) \
123 1.103 chs ? (MINBUCKET + 0) \
124 1.103 chs : (MINBUCKET + 1) \
125 1.103 chs : (size) <= (MINALLOCSIZE * 4) \
126 1.103 chs ? (MINBUCKET + 2) \
127 1.103 chs : (MINBUCKET + 3) \
128 1.103 chs : (size) <= (MINALLOCSIZE* 32) \
129 1.103 chs ? (size) <= (MINALLOCSIZE * 16) \
130 1.103 chs ? (MINBUCKET + 4) \
131 1.103 chs : (MINBUCKET + 5) \
132 1.103 chs : (size) <= (MINALLOCSIZE * 64) \
133 1.103 chs ? (MINBUCKET + 6) \
134 1.103 chs : (MINBUCKET + 7) \
135 1.103 chs : (size) <= (MINALLOCSIZE * 2048) \
136 1.103 chs ? (size) <= (MINALLOCSIZE * 512) \
137 1.103 chs ? (size) <= (MINALLOCSIZE * 256) \
138 1.103 chs ? (MINBUCKET + 8) \
139 1.103 chs : (MINBUCKET + 9) \
140 1.103 chs : (size) <= (MINALLOCSIZE * 1024) \
141 1.103 chs ? (MINBUCKET + 10) \
142 1.103 chs : (MINBUCKET + 11) \
143 1.103 chs : (size) <= (MINALLOCSIZE * 8192) \
144 1.103 chs ? (size) <= (MINALLOCSIZE * 4096) \
145 1.103 chs ? (MINBUCKET + 12) \
146 1.103 chs : (MINBUCKET + 13) \
147 1.103 chs : (size) <= (MINALLOCSIZE * 16384) \
148 1.103 chs ? (MINBUCKET + 14) \
149 1.103 chs : (MINBUCKET + 15))
150 1.103 chs
151 1.103 chs /*
152 1.103 chs * Array of descriptors that describe the contents of each page
153 1.103 chs */
154 1.103 chs struct kmemusage {
155 1.103 chs short ku_indx; /* bucket index */
156 1.103 chs union {
157 1.103 chs u_short freecnt;/* for small allocations, free pieces in page */
158 1.103 chs u_short pagecnt;/* for large allocations, pages alloced */
159 1.103 chs } ku_un;
160 1.103 chs };
161 1.103 chs #define ku_freecnt ku_un.freecnt
162 1.103 chs #define ku_pagecnt ku_un.pagecnt
163 1.103 chs
164 1.99 chs struct kmembuckets kmembuckets[MINBUCKET + 16];
165 1.1 cgd struct kmemusage *kmemusage;
166 1.1 cgd char *kmembase, *kmemlimit;
167 1.77 thorpej
168 1.106 ad #ifdef DEBUG
169 1.106 ad static void *malloc_freecheck;
170 1.106 ad #endif
171 1.106 ad
172 1.103 chs /*
173 1.103 chs * Turn virtual addresses into kmem map indicies
174 1.103 chs */
175 1.108 christos #define btokup(addr) (&kmemusage[((char *)(addr) - kmembase) >> PGSHIFT])
176 1.103 chs
177 1.77 thorpej struct malloc_type *kmemstatistics;
178 1.1 cgd
179 1.27 thorpej #ifdef MALLOCLOG
180 1.27 thorpej #ifndef MALLOCLOGSIZE
181 1.27 thorpej #define MALLOCLOGSIZE 100000
182 1.27 thorpej #endif
183 1.27 thorpej
184 1.27 thorpej struct malloclog {
185 1.27 thorpej void *addr;
186 1.27 thorpej long size;
187 1.77 thorpej struct malloc_type *type;
188 1.27 thorpej int action;
189 1.27 thorpej const char *file;
190 1.27 thorpej long line;
191 1.27 thorpej } malloclog[MALLOCLOGSIZE];
192 1.27 thorpej
193 1.27 thorpej long malloclogptr;
194 1.27 thorpej
195 1.121 blymn /*
196 1.121 blymn * Fuzz factor for neighbour address match this must be a mask of the lower
197 1.121 blymn * bits we wish to ignore when comparing addresses
198 1.121 blymn */
199 1.121 blymn __uintptr_t malloclog_fuzz = 0x7FL;
200 1.121 blymn
201 1.121 blymn
202 1.27 thorpej static void
203 1.77 thorpej domlog(void *a, long size, struct malloc_type *type, int action,
204 1.77 thorpej const char *file, long line)
205 1.27 thorpej {
206 1.27 thorpej
207 1.27 thorpej malloclog[malloclogptr].addr = a;
208 1.27 thorpej malloclog[malloclogptr].size = size;
209 1.27 thorpej malloclog[malloclogptr].type = type;
210 1.27 thorpej malloclog[malloclogptr].action = action;
211 1.27 thorpej malloclog[malloclogptr].file = file;
212 1.27 thorpej malloclog[malloclogptr].line = line;
213 1.27 thorpej malloclogptr++;
214 1.27 thorpej if (malloclogptr >= MALLOCLOGSIZE)
215 1.27 thorpej malloclogptr = 0;
216 1.27 thorpej }
217 1.27 thorpej
218 1.128 hubertf #ifdef DIAGNOSTIC
219 1.27 thorpej static void
220 1.69 enami hitmlog(void *a)
221 1.27 thorpej {
222 1.27 thorpej struct malloclog *lp;
223 1.27 thorpej long l;
224 1.27 thorpej
225 1.69 enami #define PRT do { \
226 1.88 mycroft lp = &malloclog[l]; \
227 1.88 mycroft if (lp->addr == a && lp->action) { \
228 1.27 thorpej printf("malloc log entry %ld:\n", l); \
229 1.27 thorpej printf("\taddr = %p\n", lp->addr); \
230 1.27 thorpej printf("\tsize = %ld\n", lp->size); \
231 1.77 thorpej printf("\ttype = %s\n", lp->type->ks_shortdesc); \
232 1.27 thorpej printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
233 1.27 thorpej printf("\tfile = %s\n", lp->file); \
234 1.27 thorpej printf("\tline = %ld\n", lp->line); \
235 1.69 enami } \
236 1.69 enami } while (/* CONSTCOND */0)
237 1.27 thorpej
238 1.121 blymn /*
239 1.121 blymn * Print fuzzy matched "neighbour" - look for the memory block that has
240 1.121 blymn * been allocated below the address we are interested in. We look for a
241 1.121 blymn * base address + size that is within malloclog_fuzz of our target
242 1.121 blymn * address. If the base address and target address are the same then it is
243 1.121 blymn * likely we have found a free (size is 0 in this case) so we won't report
244 1.121 blymn * those, they will get reported by PRT anyway.
245 1.121 blymn */
246 1.121 blymn #define NPRT do { \
247 1.121 blymn __uintptr_t fuzz_mask = ~(malloclog_fuzz); \
248 1.121 blymn lp = &malloclog[l]; \
249 1.121 blymn if ((__uintptr_t)lp->addr != (__uintptr_t)a && \
250 1.121 blymn (((__uintptr_t)lp->addr + lp->size + malloclog_fuzz) & fuzz_mask) \
251 1.121 blymn == ((__uintptr_t)a & fuzz_mask) && lp->action) { \
252 1.121 blymn printf("neighbour malloc log entry %ld:\n", l); \
253 1.121 blymn printf("\taddr = %p\n", lp->addr); \
254 1.121 blymn printf("\tsize = %ld\n", lp->size); \
255 1.121 blymn printf("\ttype = %s\n", lp->type->ks_shortdesc); \
256 1.121 blymn printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
257 1.121 blymn printf("\tfile = %s\n", lp->file); \
258 1.121 blymn printf("\tline = %ld\n", lp->line); \
259 1.121 blymn } \
260 1.121 blymn } while (/* CONSTCOND */0)
261 1.121 blymn
262 1.121 blymn for (l = malloclogptr; l < MALLOCLOGSIZE; l++) {
263 1.69 enami PRT;
264 1.121 blymn NPRT;
265 1.121 blymn }
266 1.121 blymn
267 1.27 thorpej
268 1.121 blymn for (l = 0; l < malloclogptr; l++) {
269 1.69 enami PRT;
270 1.121 blymn NPRT;
271 1.121 blymn }
272 1.121 blymn
273 1.88 mycroft #undef PRT
274 1.27 thorpej }
275 1.128 hubertf #endif /* DIAGNOSTIC */
276 1.27 thorpej #endif /* MALLOCLOG */
277 1.27 thorpej
278 1.8 cgd #ifdef DIAGNOSTIC
279 1.8 cgd /*
280 1.8 cgd * This structure provides a set of masks to catch unaligned frees.
281 1.8 cgd */
282 1.57 jdolecek const long addrmask[] = { 0,
283 1.8 cgd 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
284 1.8 cgd 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
285 1.8 cgd 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
286 1.8 cgd 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
287 1.8 cgd };
288 1.8 cgd
289 1.8 cgd /*
290 1.8 cgd * The WEIRD_ADDR is used as known text to copy into free objects so
291 1.8 cgd * that modifications after frees can be detected.
292 1.8 cgd */
293 1.76 thorpej #define WEIRD_ADDR ((uint32_t) 0xdeadbeef)
294 1.55 chs #ifdef DEBUG
295 1.69 enami #define MAX_COPY PAGE_SIZE
296 1.55 chs #else
297 1.69 enami #define MAX_COPY 32
298 1.55 chs #endif
299 1.8 cgd
300 1.8 cgd /*
301 1.11 cgd * Normally the freelist structure is used only to hold the list pointer
302 1.11 cgd * for free objects. However, when running with diagnostics, the first
303 1.77 thorpej * 8/16 bytes of the structure is unused except for diagnostic information,
304 1.77 thorpej * and the free list pointer is at offset 8/16 in the structure. Since the
305 1.11 cgd * first 8 bytes is the portion of the structure most often modified, this
306 1.11 cgd * helps to detect memory reuse problems and avoid free list corruption.
307 1.8 cgd */
308 1.8 cgd struct freelist {
309 1.76 thorpej uint32_t spare0;
310 1.77 thorpej #ifdef _LP64
311 1.77 thorpej uint32_t spare1; /* explicit padding */
312 1.77 thorpej #endif
313 1.77 thorpej struct malloc_type *type;
314 1.108 christos void * next;
315 1.8 cgd };
316 1.8 cgd #else /* !DIAGNOSTIC */
317 1.8 cgd struct freelist {
318 1.108 christos void * next;
319 1.8 cgd };
320 1.8 cgd #endif /* DIAGNOSTIC */
321 1.8 cgd
322 1.109 ad kmutex_t malloc_lock;
323 1.78 pk
324 1.77 thorpej /*
325 1.1 cgd * Allocate a block of memory
326 1.1 cgd */
327 1.27 thorpej #ifdef MALLOCLOG
328 1.27 thorpej void *
329 1.125 pooka _kern_malloc(unsigned long size, struct malloc_type *ksp, int flags,
330 1.77 thorpej const char *file, long line)
331 1.27 thorpej #else
332 1.1 cgd void *
333 1.125 pooka kern_malloc(unsigned long size, struct malloc_type *ksp, int flags)
334 1.27 thorpej #endif /* MALLOCLOG */
335 1.1 cgd {
336 1.50 augustss struct kmembuckets *kbp;
337 1.50 augustss struct kmemusage *kup;
338 1.50 augustss struct freelist *freep;
339 1.5 andrew long indx, npg, allocsize;
340 1.108 christos char *va, *cp, *savedlist;
341 1.8 cgd #ifdef DIAGNOSTIC
342 1.76 thorpej uint32_t *end, *lp;
343 1.8 cgd int copysize;
344 1.8 cgd #endif
345 1.1 cgd
346 1.59 thorpej #ifdef LOCKDEBUG
347 1.119 ad if ((flags & M_NOWAIT) == 0) {
348 1.118 yamt ASSERT_SLEEPABLE();
349 1.119 ad }
350 1.59 thorpej #endif
351 1.62 thorpej #ifdef MALLOC_DEBUG
352 1.106 ad if (debug_malloc(size, ksp, flags, (void *) &va)) {
353 1.122 cegger if (va != 0) {
354 1.106 ad FREECHECK_OUT(&malloc_freecheck, (void *)va);
355 1.122 cegger }
356 1.62 thorpej return ((void *) va);
357 1.106 ad }
358 1.62 thorpej #endif
359 1.1 cgd indx = BUCKETINDX(size);
360 1.99 chs kbp = &kmembuckets[indx];
361 1.113 ad mutex_spin_enter(&malloc_lock);
362 1.1 cgd #ifdef KMEMSTATS
363 1.1 cgd while (ksp->ks_memuse >= ksp->ks_limit) {
364 1.1 cgd if (flags & M_NOWAIT) {
365 1.113 ad mutex_spin_exit(&malloc_lock);
366 1.1 cgd return ((void *) NULL);
367 1.1 cgd }
368 1.1 cgd if (ksp->ks_limblocks < 65535)
369 1.1 cgd ksp->ks_limblocks++;
370 1.109 ad mtsleep((void *)ksp, PSWP+2, ksp->ks_shortdesc, 0,
371 1.109 ad &malloc_lock);
372 1.1 cgd }
373 1.8 cgd ksp->ks_size |= 1 << indx;
374 1.129 he ksp->ks_active[indx]++;
375 1.8 cgd #endif
376 1.8 cgd #ifdef DIAGNOSTIC
377 1.8 cgd copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
378 1.1 cgd #endif
379 1.1 cgd if (kbp->kb_next == NULL) {
380 1.111 yamt int s;
381 1.8 cgd kbp->kb_last = NULL;
382 1.1 cgd if (size > MAXALLOCSAVE)
383 1.66 enami allocsize = round_page(size);
384 1.1 cgd else
385 1.1 cgd allocsize = 1 << indx;
386 1.47 ragge npg = btoc(allocsize);
387 1.113 ad mutex_spin_exit(&malloc_lock);
388 1.111 yamt s = splvm();
389 1.108 christos va = (void *) uvm_km_alloc(kmem_map,
390 1.97 yamt (vsize_t)ctob(npg), 0,
391 1.73 chs ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
392 1.97 yamt ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0) |
393 1.97 yamt UVM_KMF_WIRED);
394 1.111 yamt splx(s);
395 1.51 thorpej if (__predict_false(va == NULL)) {
396 1.17 cgd /*
397 1.17 cgd * Kmem_malloc() can return NULL, even if it can
398 1.91 simonb * wait, if there is no map space available, because
399 1.17 cgd * it can't fix that problem. Neither can we,
400 1.17 cgd * right now. (We should release pages which
401 1.99 chs * are completely free and which are in kmembuckets
402 1.17 cgd * with too many free elements.)
403 1.17 cgd */
404 1.68 jdolecek if ((flags & (M_NOWAIT|M_CANFAIL)) == 0)
405 1.17 cgd panic("malloc: out of space in kmem_map");
406 1.73 chs return (NULL);
407 1.1 cgd }
408 1.113 ad mutex_spin_enter(&malloc_lock);
409 1.1 cgd #ifdef KMEMSTATS
410 1.1 cgd kbp->kb_total += kbp->kb_elmpercl;
411 1.1 cgd #endif
412 1.1 cgd kup = btokup(va);
413 1.1 cgd kup->ku_indx = indx;
414 1.1 cgd if (allocsize > MAXALLOCSAVE) {
415 1.1 cgd if (npg > 65535)
416 1.1 cgd panic("malloc: allocation too large");
417 1.1 cgd kup->ku_pagecnt = npg;
418 1.1 cgd #ifdef KMEMSTATS
419 1.1 cgd ksp->ks_memuse += allocsize;
420 1.1 cgd #endif
421 1.1 cgd goto out;
422 1.1 cgd }
423 1.1 cgd #ifdef KMEMSTATS
424 1.1 cgd kup->ku_freecnt = kbp->kb_elmpercl;
425 1.1 cgd kbp->kb_totalfree += kbp->kb_elmpercl;
426 1.1 cgd #endif
427 1.1 cgd /*
428 1.1 cgd * Just in case we blocked while allocating memory,
429 1.1 cgd * and someone else also allocated memory for this
430 1.99 chs * kmembucket, don't assume the list is still empty.
431 1.1 cgd */
432 1.1 cgd savedlist = kbp->kb_next;
433 1.49 thorpej kbp->kb_next = cp = va + (npg << PAGE_SHIFT) - allocsize;
434 1.8 cgd for (;;) {
435 1.8 cgd freep = (struct freelist *)cp;
436 1.8 cgd #ifdef DIAGNOSTIC
437 1.8 cgd /*
438 1.8 cgd * Copy in known text to detect modification
439 1.8 cgd * after freeing.
440 1.8 cgd */
441 1.86 ragge end = (uint32_t *)&cp[copysize];
442 1.86 ragge for (lp = (uint32_t *)cp; lp < end; lp++)
443 1.8 cgd *lp = WEIRD_ADDR;
444 1.8 cgd freep->type = M_FREE;
445 1.8 cgd #endif /* DIAGNOSTIC */
446 1.8 cgd if (cp <= va)
447 1.8 cgd break;
448 1.8 cgd cp -= allocsize;
449 1.8 cgd freep->next = cp;
450 1.8 cgd }
451 1.8 cgd freep->next = savedlist;
452 1.117 yamt if (savedlist == NULL)
453 1.108 christos kbp->kb_last = (void *)freep;
454 1.1 cgd }
455 1.1 cgd va = kbp->kb_next;
456 1.8 cgd kbp->kb_next = ((struct freelist *)va)->next;
457 1.8 cgd #ifdef DIAGNOSTIC
458 1.8 cgd freep = (struct freelist *)va;
459 1.77 thorpej /* XXX potential to get garbage pointer here. */
460 1.29 chs if (kbp->kb_next) {
461 1.29 chs int rv;
462 1.35 eeh vaddr_t addr = (vaddr_t)kbp->kb_next;
463 1.29 chs
464 1.43 thorpej vm_map_lock(kmem_map);
465 1.29 chs rv = uvm_map_checkprot(kmem_map, addr,
466 1.69 enami addr + sizeof(struct freelist), VM_PROT_WRITE);
467 1.43 thorpej vm_map_unlock(kmem_map);
468 1.29 chs
469 1.51 thorpej if (__predict_false(rv == 0)) {
470 1.69 enami printf("Data modified on freelist: "
471 1.69 enami "word %ld of object %p size %ld previous type %s "
472 1.69 enami "(invalid addr %p)\n",
473 1.41 mrg (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
474 1.80 manu va, size, "foo", kbp->kb_next);
475 1.27 thorpej #ifdef MALLOCLOG
476 1.41 mrg hitmlog(va);
477 1.27 thorpej #endif
478 1.41 mrg kbp->kb_next = NULL;
479 1.29 chs }
480 1.8 cgd }
481 1.11 cgd
482 1.11 cgd /* Fill the fields that we've used with WEIRD_ADDR */
483 1.77 thorpej #ifdef _LP64
484 1.77 thorpej freep->type = (struct malloc_type *)
485 1.77 thorpej (WEIRD_ADDR | (((u_long) WEIRD_ADDR) << 32));
486 1.77 thorpej #else
487 1.77 thorpej freep->type = (struct malloc_type *) WEIRD_ADDR;
488 1.8 cgd #endif
489 1.86 ragge end = (uint32_t *)&freep->next +
490 1.11 cgd (sizeof(freep->next) / sizeof(int32_t));
491 1.86 ragge for (lp = (uint32_t *)&freep->next; lp < end; lp++)
492 1.11 cgd *lp = WEIRD_ADDR;
493 1.11 cgd
494 1.11 cgd /* and check that the data hasn't been modified. */
495 1.76 thorpej end = (uint32_t *)&va[copysize];
496 1.86 ragge for (lp = (uint32_t *)va; lp < end; lp++) {
497 1.51 thorpej if (__predict_true(*lp == WEIRD_ADDR))
498 1.8 cgd continue;
499 1.69 enami printf("Data modified on freelist: "
500 1.69 enami "word %ld of object %p size %ld previous type %s "
501 1.69 enami "(0x%x != 0x%x)\n",
502 1.76 thorpej (long)(lp - (uint32_t *)va), va, size,
503 1.80 manu "bar", *lp, WEIRD_ADDR);
504 1.27 thorpej #ifdef MALLOCLOG
505 1.27 thorpej hitmlog(va);
506 1.27 thorpej #endif
507 1.8 cgd break;
508 1.8 cgd }
509 1.11 cgd
510 1.8 cgd freep->spare0 = 0;
511 1.8 cgd #endif /* DIAGNOSTIC */
512 1.1 cgd #ifdef KMEMSTATS
513 1.1 cgd kup = btokup(va);
514 1.1 cgd if (kup->ku_indx != indx)
515 1.1 cgd panic("malloc: wrong bucket");
516 1.1 cgd if (kup->ku_freecnt == 0)
517 1.1 cgd panic("malloc: lost data");
518 1.1 cgd kup->ku_freecnt--;
519 1.1 cgd kbp->kb_totalfree--;
520 1.1 cgd ksp->ks_memuse += 1 << indx;
521 1.1 cgd out:
522 1.1 cgd kbp->kb_calls++;
523 1.1 cgd ksp->ks_inuse++;
524 1.1 cgd ksp->ks_calls++;
525 1.1 cgd if (ksp->ks_memuse > ksp->ks_maxused)
526 1.1 cgd ksp->ks_maxused = ksp->ks_memuse;
527 1.1 cgd #else
528 1.1 cgd out:
529 1.1 cgd #endif
530 1.27 thorpej #ifdef MALLOCLOG
531 1.80 manu domlog(va, size, ksp, 1, file, line);
532 1.27 thorpej #endif
533 1.113 ad mutex_spin_exit(&malloc_lock);
534 1.67 enami if ((flags & M_ZERO) != 0)
535 1.65 lukem memset(va, 0, size);
536 1.106 ad FREECHECK_OUT(&malloc_freecheck, (void *)va);
537 1.1 cgd return ((void *) va);
538 1.1 cgd }
539 1.1 cgd
540 1.1 cgd /*
541 1.1 cgd * Free a block of memory allocated by malloc.
542 1.1 cgd */
543 1.27 thorpej #ifdef MALLOCLOG
544 1.27 thorpej void
545 1.125 pooka _kern_free(void *addr, struct malloc_type *ksp, const char *file, long line)
546 1.27 thorpej #else
547 1.1 cgd void
548 1.125 pooka kern_free(void *addr, struct malloc_type *ksp)
549 1.27 thorpej #endif /* MALLOCLOG */
550 1.1 cgd {
551 1.50 augustss struct kmembuckets *kbp;
552 1.50 augustss struct kmemusage *kup;
553 1.50 augustss struct freelist *freep;
554 1.8 cgd long size;
555 1.5 andrew #ifdef DIAGNOSTIC
556 1.108 christos void *cp;
557 1.11 cgd int32_t *end, *lp;
558 1.11 cgd long alloc, copysize;
559 1.5 andrew #endif
560 1.48 thorpej
561 1.106 ad FREECHECK_IN(&malloc_freecheck, addr);
562 1.62 thorpej #ifdef MALLOC_DEBUG
563 1.77 thorpej if (debug_free(addr, ksp))
564 1.62 thorpej return;
565 1.62 thorpej #endif
566 1.62 thorpej
567 1.48 thorpej #ifdef DIAGNOSTIC
568 1.48 thorpej /*
569 1.48 thorpej * Ensure that we're free'ing something that we could
570 1.48 thorpej * have allocated in the first place. That is, check
571 1.48 thorpej * to see that the address is within kmem_map.
572 1.48 thorpej */
573 1.83 enami if (__predict_false((vaddr_t)addr < vm_map_min(kmem_map) ||
574 1.83 enami (vaddr_t)addr >= vm_map_max(kmem_map)))
575 1.48 thorpej panic("free: addr %p not within kmem_map", addr);
576 1.1 cgd #endif
577 1.1 cgd
578 1.1 cgd kup = btokup(addr);
579 1.1 cgd size = 1 << kup->ku_indx;
580 1.99 chs kbp = &kmembuckets[kup->ku_indx];
581 1.113 ad
582 1.115 yamt LOCKDEBUG_MEM_CHECK(addr,
583 1.115 yamt size <= MAXALLOCSAVE ? size : ctob(kup->ku_pagecnt));
584 1.113 ad
585 1.113 ad mutex_spin_enter(&malloc_lock);
586 1.27 thorpej #ifdef MALLOCLOG
587 1.80 manu domlog(addr, 0, ksp, 2, file, line);
588 1.27 thorpej #endif
589 1.1 cgd #ifdef DIAGNOSTIC
590 1.8 cgd /*
591 1.8 cgd * Check for returns of data that do not point to the
592 1.8 cgd * beginning of the allocation.
593 1.8 cgd */
594 1.49 thorpej if (size > PAGE_SIZE)
595 1.49 thorpej alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
596 1.1 cgd else
597 1.1 cgd alloc = addrmask[kup->ku_indx];
598 1.8 cgd if (((u_long)addr & alloc) != 0)
599 1.75 provos panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
600 1.77 thorpej addr, size, ksp->ks_shortdesc, alloc);
601 1.1 cgd #endif /* DIAGNOSTIC */
602 1.1 cgd if (size > MAXALLOCSAVE) {
603 1.97 yamt uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt),
604 1.97 yamt UVM_KMF_WIRED);
605 1.1 cgd #ifdef KMEMSTATS
606 1.1 cgd size = kup->ku_pagecnt << PGSHIFT;
607 1.1 cgd ksp->ks_memuse -= size;
608 1.129 he ksp->ks_active[kup->ku_indx]--;
609 1.1 cgd kup->ku_indx = 0;
610 1.1 cgd kup->ku_pagecnt = 0;
611 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
612 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
613 1.108 christos wakeup((void *)ksp);
614 1.79 fvdl #ifdef DIAGNOSTIC
615 1.79 fvdl if (ksp->ks_inuse == 0)
616 1.79 fvdl panic("free 1: inuse 0, probable double free");
617 1.79 fvdl #endif
618 1.1 cgd ksp->ks_inuse--;
619 1.1 cgd kbp->kb_total -= 1;
620 1.1 cgd #endif
621 1.113 ad mutex_spin_exit(&malloc_lock);
622 1.1 cgd return;
623 1.1 cgd }
624 1.8 cgd freep = (struct freelist *)addr;
625 1.8 cgd #ifdef DIAGNOSTIC
626 1.8 cgd /*
627 1.8 cgd * Check for multiple frees. Use a quick check to see if
628 1.8 cgd * it looks free before laboriously searching the freelist.
629 1.8 cgd */
630 1.51 thorpej if (__predict_false(freep->spare0 == WEIRD_ADDR)) {
631 1.16 cgd for (cp = kbp->kb_next; cp;
632 1.16 cgd cp = ((struct freelist *)cp)->next) {
633 1.8 cgd if (addr != cp)
634 1.8 cgd continue;
635 1.22 christos printf("multiply freed item %p\n", addr);
636 1.27 thorpej #ifdef MALLOCLOG
637 1.27 thorpej hitmlog(addr);
638 1.27 thorpej #endif
639 1.8 cgd panic("free: duplicated free");
640 1.8 cgd }
641 1.8 cgd }
642 1.112 ad
643 1.8 cgd /*
644 1.8 cgd * Copy in known text to detect modification after freeing
645 1.8 cgd * and to make it look free. Also, save the type being freed
646 1.8 cgd * so we can list likely culprit if modification is detected
647 1.8 cgd * when the object is reallocated.
648 1.8 cgd */
649 1.8 cgd copysize = size < MAX_COPY ? size : MAX_COPY;
650 1.108 christos end = (int32_t *)&((char *)addr)[copysize];
651 1.11 cgd for (lp = (int32_t *)addr; lp < end; lp++)
652 1.8 cgd *lp = WEIRD_ADDR;
653 1.77 thorpej freep->type = ksp;
654 1.8 cgd #endif /* DIAGNOSTIC */
655 1.1 cgd #ifdef KMEMSTATS
656 1.1 cgd kup->ku_freecnt++;
657 1.36 thorpej if (kup->ku_freecnt >= kbp->kb_elmpercl) {
658 1.1 cgd if (kup->ku_freecnt > kbp->kb_elmpercl)
659 1.1 cgd panic("free: multiple frees");
660 1.1 cgd else if (kbp->kb_totalfree > kbp->kb_highwat)
661 1.1 cgd kbp->kb_couldfree++;
662 1.36 thorpej }
663 1.1 cgd kbp->kb_totalfree++;
664 1.1 cgd ksp->ks_memuse -= size;
665 1.129 he ksp->ks_active[kup->ku_indx]--;
666 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
667 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
668 1.108 christos wakeup((void *)ksp);
669 1.79 fvdl #ifdef DIAGNOSTIC
670 1.79 fvdl if (ksp->ks_inuse == 0)
671 1.79 fvdl panic("free 2: inuse 0, probable double free");
672 1.79 fvdl #endif
673 1.1 cgd ksp->ks_inuse--;
674 1.1 cgd #endif
675 1.8 cgd if (kbp->kb_next == NULL)
676 1.8 cgd kbp->kb_next = addr;
677 1.8 cgd else
678 1.8 cgd ((struct freelist *)kbp->kb_last)->next = addr;
679 1.8 cgd freep->next = NULL;
680 1.8 cgd kbp->kb_last = addr;
681 1.113 ad mutex_spin_exit(&malloc_lock);
682 1.20 cgd }
683 1.20 cgd
684 1.20 cgd /*
685 1.20 cgd * Change the size of a block of memory.
686 1.20 cgd */
687 1.20 cgd void *
688 1.126 pooka kern_realloc(void *curaddr, unsigned long newsize, struct malloc_type *ksp,
689 1.77 thorpej int flags)
690 1.20 cgd {
691 1.50 augustss struct kmemusage *kup;
692 1.72 thorpej unsigned long cursize;
693 1.20 cgd void *newaddr;
694 1.20 cgd #ifdef DIAGNOSTIC
695 1.20 cgd long alloc;
696 1.20 cgd #endif
697 1.20 cgd
698 1.20 cgd /*
699 1.69 enami * realloc() with a NULL pointer is the same as malloc().
700 1.20 cgd */
701 1.20 cgd if (curaddr == NULL)
702 1.77 thorpej return (malloc(newsize, ksp, flags));
703 1.20 cgd
704 1.20 cgd /*
705 1.69 enami * realloc() with zero size is the same as free().
706 1.20 cgd */
707 1.20 cgd if (newsize == 0) {
708 1.77 thorpej free(curaddr, ksp);
709 1.20 cgd return (NULL);
710 1.20 cgd }
711 1.59 thorpej
712 1.59 thorpej #ifdef LOCKDEBUG
713 1.119 ad if ((flags & M_NOWAIT) == 0) {
714 1.118 yamt ASSERT_SLEEPABLE();
715 1.119 ad }
716 1.59 thorpej #endif
717 1.20 cgd
718 1.20 cgd /*
719 1.20 cgd * Find out how large the old allocation was (and do some
720 1.20 cgd * sanity checking).
721 1.20 cgd */
722 1.20 cgd kup = btokup(curaddr);
723 1.20 cgd cursize = 1 << kup->ku_indx;
724 1.20 cgd
725 1.20 cgd #ifdef DIAGNOSTIC
726 1.20 cgd /*
727 1.20 cgd * Check for returns of data that do not point to the
728 1.20 cgd * beginning of the allocation.
729 1.20 cgd */
730 1.49 thorpej if (cursize > PAGE_SIZE)
731 1.49 thorpej alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
732 1.20 cgd else
733 1.20 cgd alloc = addrmask[kup->ku_indx];
734 1.20 cgd if (((u_long)curaddr & alloc) != 0)
735 1.69 enami panic("realloc: "
736 1.69 enami "unaligned addr %p, size %ld, type %s, mask %ld\n",
737 1.77 thorpej curaddr, cursize, ksp->ks_shortdesc, alloc);
738 1.20 cgd #endif /* DIAGNOSTIC */
739 1.20 cgd
740 1.20 cgd if (cursize > MAXALLOCSAVE)
741 1.20 cgd cursize = ctob(kup->ku_pagecnt);
742 1.20 cgd
743 1.20 cgd /*
744 1.20 cgd * If we already actually have as much as they want, we're done.
745 1.20 cgd */
746 1.20 cgd if (newsize <= cursize)
747 1.20 cgd return (curaddr);
748 1.20 cgd
749 1.20 cgd /*
750 1.20 cgd * Can't satisfy the allocation with the existing block.
751 1.20 cgd * Allocate a new one and copy the data.
752 1.20 cgd */
753 1.77 thorpej newaddr = malloc(newsize, ksp, flags);
754 1.51 thorpej if (__predict_false(newaddr == NULL)) {
755 1.20 cgd /*
756 1.69 enami * malloc() failed, because flags included M_NOWAIT.
757 1.20 cgd * Return NULL to indicate that failure. The old
758 1.20 cgd * pointer is still valid.
759 1.20 cgd */
760 1.69 enami return (NULL);
761 1.20 cgd }
762 1.34 perry memcpy(newaddr, curaddr, cursize);
763 1.20 cgd
764 1.20 cgd /*
765 1.20 cgd * We were successful: free the old allocation and return
766 1.20 cgd * the new one.
767 1.20 cgd */
768 1.77 thorpej free(curaddr, ksp);
769 1.20 cgd return (newaddr);
770 1.70 enami }
771 1.70 enami
772 1.70 enami /*
773 1.70 enami * Roundup size to the actual allocation size.
774 1.70 enami */
775 1.70 enami unsigned long
776 1.70 enami malloc_roundup(unsigned long size)
777 1.70 enami {
778 1.70 enami
779 1.70 enami if (size > MAXALLOCSAVE)
780 1.70 enami return (roundup(size, PAGE_SIZE));
781 1.70 enami else
782 1.70 enami return (1 << BUCKETINDX(size));
783 1.1 cgd }
784 1.1 cgd
785 1.1 cgd /*
786 1.77 thorpej * Add a malloc type to the system.
787 1.77 thorpej */
788 1.77 thorpej void
789 1.77 thorpej malloc_type_attach(struct malloc_type *type)
790 1.77 thorpej {
791 1.77 thorpej
792 1.77 thorpej if (nkmempages == 0)
793 1.77 thorpej panic("malloc_type_attach: nkmempages == 0");
794 1.77 thorpej
795 1.77 thorpej if (type->ks_magic != M_MAGIC)
796 1.77 thorpej panic("malloc_type_attach: bad magic");
797 1.77 thorpej
798 1.77 thorpej #ifdef DIAGNOSTIC
799 1.77 thorpej {
800 1.77 thorpej struct malloc_type *ksp;
801 1.77 thorpej for (ksp = kmemstatistics; ksp != NULL; ksp = ksp->ks_next) {
802 1.77 thorpej if (ksp == type)
803 1.77 thorpej panic("malloc_type_attach: already on list");
804 1.77 thorpej }
805 1.77 thorpej }
806 1.77 thorpej #endif
807 1.77 thorpej
808 1.77 thorpej #ifdef KMEMSTATS
809 1.77 thorpej if (type->ks_limit == 0)
810 1.77 thorpej type->ks_limit = ((u_long)nkmempages << PAGE_SHIFT) * 6U / 10U;
811 1.77 thorpej #else
812 1.77 thorpej type->ks_limit = 0;
813 1.77 thorpej #endif
814 1.77 thorpej
815 1.77 thorpej type->ks_next = kmemstatistics;
816 1.77 thorpej kmemstatistics = type;
817 1.77 thorpej }
818 1.77 thorpej
819 1.77 thorpej /*
820 1.77 thorpej * Remove a malloc type from the system..
821 1.77 thorpej */
822 1.77 thorpej void
823 1.77 thorpej malloc_type_detach(struct malloc_type *type)
824 1.77 thorpej {
825 1.77 thorpej struct malloc_type *ksp;
826 1.77 thorpej
827 1.77 thorpej #ifdef DIAGNOSTIC
828 1.77 thorpej if (type->ks_magic != M_MAGIC)
829 1.77 thorpej panic("malloc_type_detach: bad magic");
830 1.77 thorpej #endif
831 1.77 thorpej
832 1.77 thorpej if (type == kmemstatistics)
833 1.77 thorpej kmemstatistics = type->ks_next;
834 1.77 thorpej else {
835 1.77 thorpej for (ksp = kmemstatistics; ksp->ks_next != NULL;
836 1.77 thorpej ksp = ksp->ks_next) {
837 1.77 thorpej if (ksp->ks_next == type) {
838 1.77 thorpej ksp->ks_next = type->ks_next;
839 1.77 thorpej break;
840 1.77 thorpej }
841 1.77 thorpej }
842 1.77 thorpej #ifdef DIAGNOSTIC
843 1.77 thorpej if (ksp->ks_next == NULL)
844 1.77 thorpej panic("malloc_type_detach: not on list");
845 1.77 thorpej #endif
846 1.77 thorpej }
847 1.77 thorpej type->ks_next = NULL;
848 1.77 thorpej }
849 1.77 thorpej
850 1.77 thorpej /*
851 1.77 thorpej * Set the limit on a malloc type.
852 1.77 thorpej */
853 1.77 thorpej void
854 1.105 yamt malloc_type_setlimit(struct malloc_type *type, u_long limit)
855 1.77 thorpej {
856 1.77 thorpej #ifdef KMEMSTATS
857 1.113 ad mutex_spin_enter(&malloc_lock);
858 1.77 thorpej type->ks_limit = limit;
859 1.113 ad mutex_spin_exit(&malloc_lock);
860 1.77 thorpej #endif
861 1.77 thorpej }
862 1.77 thorpej
863 1.77 thorpej /*
864 1.49 thorpej * Compute the number of pages that kmem_map will map, that is,
865 1.49 thorpej * the size of the kernel malloc arena.
866 1.49 thorpej */
867 1.49 thorpej void
868 1.69 enami kmeminit_nkmempages(void)
869 1.49 thorpej {
870 1.49 thorpej int npages;
871 1.49 thorpej
872 1.49 thorpej if (nkmempages != 0) {
873 1.49 thorpej /*
874 1.49 thorpej * It's already been set (by us being here before, or
875 1.49 thorpej * by patching or kernel config options), bail out now.
876 1.49 thorpej */
877 1.49 thorpej return;
878 1.49 thorpej }
879 1.49 thorpej
880 1.94 yamt npages = physmem;
881 1.49 thorpej
882 1.49 thorpej if (npages > NKMEMPAGES_MAX)
883 1.49 thorpej npages = NKMEMPAGES_MAX;
884 1.49 thorpej
885 1.49 thorpej if (npages < NKMEMPAGES_MIN)
886 1.49 thorpej npages = NKMEMPAGES_MIN;
887 1.49 thorpej
888 1.49 thorpej nkmempages = npages;
889 1.49 thorpej }
890 1.49 thorpej
891 1.49 thorpej /*
892 1.1 cgd * Initialize the kernel memory allocator
893 1.1 cgd */
894 1.12 christos void
895 1.69 enami kmeminit(void)
896 1.1 cgd {
897 1.77 thorpej __link_set_decl(malloc_types, struct malloc_type);
898 1.77 thorpej struct malloc_type * const *ksp;
899 1.84 ragge vaddr_t kmb, kml;
900 1.23 tls #ifdef KMEMSTATS
901 1.50 augustss long indx;
902 1.23 tls #endif
903 1.1 cgd
904 1.1 cgd #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
905 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
906 1.1 cgd #endif
907 1.1 cgd #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
908 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_big
909 1.1 cgd #endif
910 1.47 ragge #if (MAXALLOCSAVE < NBPG)
911 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_small
912 1.1 cgd #endif
913 1.11 cgd
914 1.11 cgd if (sizeof(struct freelist) > (1 << MINBUCKET))
915 1.11 cgd panic("minbucket too small/struct freelist too big");
916 1.11 cgd
917 1.116 ad mutex_init(&malloc_lock, MUTEX_DEFAULT, IPL_VM);
918 1.109 ad
919 1.49 thorpej /*
920 1.49 thorpej * Compute the number of kmem_map pages, if we have not
921 1.49 thorpej * done so already.
922 1.49 thorpej */
923 1.49 thorpej kmeminit_nkmempages();
924 1.49 thorpej
925 1.97 yamt kmemusage = (struct kmemusage *) uvm_km_alloc(kernel_map,
926 1.97 yamt (vsize_t)(nkmempages * sizeof(struct kmemusage)), 0,
927 1.97 yamt UVM_KMF_WIRED|UVM_KMF_ZERO);
928 1.85 fvdl kmb = 0;
929 1.84 ragge kmem_map = uvm_km_suballoc(kernel_map, &kmb,
930 1.96 perry &kml, ((vsize_t)nkmempages << PAGE_SHIFT),
931 1.107 thorpej VM_MAP_INTRSAFE, false, &kmem_map_store);
932 1.93 yamt uvm_km_vacache_init(kmem_map, "kvakmem", 0);
933 1.84 ragge kmembase = (char *)kmb;
934 1.84 ragge kmemlimit = (char *)kml;
935 1.1 cgd #ifdef KMEMSTATS
936 1.1 cgd for (indx = 0; indx < MINBUCKET + 16; indx++) {
937 1.49 thorpej if (1 << indx >= PAGE_SIZE)
938 1.99 chs kmembuckets[indx].kb_elmpercl = 1;
939 1.1 cgd else
940 1.99 chs kmembuckets[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
941 1.99 chs kmembuckets[indx].kb_highwat =
942 1.99 chs 5 * kmembuckets[indx].kb_elmpercl;
943 1.1 cgd }
944 1.62 thorpej #endif
945 1.77 thorpej
946 1.77 thorpej /* Attach all of the statically-linked malloc types. */
947 1.77 thorpej __link_set_foreach(ksp, malloc_types)
948 1.77 thorpej malloc_type_attach(*ksp);
949 1.127 pooka
950 1.127 pooka #ifdef MALLOC_DEBUG
951 1.127 pooka debug_malloc_init();
952 1.127 pooka #endif
953 1.1 cgd }
954 1.39 thorpej
955 1.39 thorpej #ifdef DDB
956 1.39 thorpej #include <ddb/db_output.h>
957 1.39 thorpej
958 1.39 thorpej /*
959 1.39 thorpej * Dump kmem statistics from ddb.
960 1.39 thorpej *
961 1.39 thorpej * usage: call dump_kmemstats
962 1.39 thorpej */
963 1.69 enami void dump_kmemstats(void);
964 1.39 thorpej
965 1.39 thorpej void
966 1.69 enami dump_kmemstats(void)
967 1.39 thorpej {
968 1.39 thorpej #ifdef KMEMSTATS
969 1.77 thorpej struct malloc_type *ksp;
970 1.39 thorpej
971 1.77 thorpej for (ksp = kmemstatistics; ksp != NULL; ksp = ksp->ks_next) {
972 1.77 thorpej if (ksp->ks_memuse == 0)
973 1.77 thorpej continue;
974 1.77 thorpej db_printf("%s%.*s %ld\n", ksp->ks_shortdesc,
975 1.77 thorpej (int)(20 - strlen(ksp->ks_shortdesc)),
976 1.77 thorpej " ",
977 1.77 thorpej ksp->ks_memuse);
978 1.39 thorpej }
979 1.39 thorpej #else
980 1.39 thorpej db_printf("Kmem stats are not being collected.\n");
981 1.39 thorpej #endif /* KMEMSTATS */
982 1.39 thorpej }
983 1.39 thorpej #endif /* DDB */
984 1.82 manu
985 1.82 manu
986 1.82 manu #if 0
987 1.96 perry /*
988 1.82 manu * Diagnostic messages about "Data modified on
989 1.82 manu * freelist" indicate a memory corruption, but
990 1.82 manu * they do not help tracking it down.
991 1.96 perry * This function can be called at various places
992 1.82 manu * to sanity check malloc's freelist and discover
993 1.82 manu * where does the corruption take place.
994 1.82 manu */
995 1.82 manu int
996 1.82 manu freelist_sanitycheck(void) {
997 1.82 manu int i,j;
998 1.82 manu struct kmembuckets *kbp;
999 1.82 manu struct freelist *freep;
1000 1.82 manu int rv = 0;
1001 1.96 perry
1002 1.82 manu for (i = MINBUCKET; i <= MINBUCKET + 15; i++) {
1003 1.99 chs kbp = &kmembuckets[i];
1004 1.82 manu freep = (struct freelist *)kbp->kb_next;
1005 1.82 manu j = 0;
1006 1.82 manu while(freep) {
1007 1.82 manu vm_map_lock(kmem_map);
1008 1.82 manu rv = uvm_map_checkprot(kmem_map, (vaddr_t)freep,
1009 1.96 perry (vaddr_t)freep + sizeof(struct freelist),
1010 1.82 manu VM_PROT_WRITE);
1011 1.82 manu vm_map_unlock(kmem_map);
1012 1.82 manu
1013 1.82 manu if ((rv == 0) || (*(int *)freep != WEIRD_ADDR)) {
1014 1.82 manu printf("bucket %i, chunck %d at %p modified\n",
1015 1.82 manu i, j, freep);
1016 1.82 manu return 1;
1017 1.82 manu }
1018 1.82 manu freep = (struct freelist *)freep->next;
1019 1.82 manu j++;
1020 1.82 manu }
1021 1.82 manu }
1022 1.82 manu
1023 1.82 manu return 0;
1024 1.82 manu }
1025 1.82 manu #endif
1026