kern_malloc.c revision 1.34 1 1.34 perry /* $NetBSD: kern_malloc.c,v 1.34 1998/08/04 04:03:13 perry Exp $ */
2 1.9 cgd
3 1.1 cgd /*
4 1.20 cgd * Copyright 1996 Christopher G. Demetriou. All rights reserved.
5 1.8 cgd * Copyright (c) 1987, 1991, 1993
6 1.8 cgd * The Regents of the University of California. All rights reserved.
7 1.1 cgd *
8 1.1 cgd * Redistribution and use in source and binary forms, with or without
9 1.1 cgd * modification, are permitted provided that the following conditions
10 1.1 cgd * are met:
11 1.1 cgd * 1. Redistributions of source code must retain the above copyright
12 1.1 cgd * notice, this list of conditions and the following disclaimer.
13 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 cgd * notice, this list of conditions and the following disclaimer in the
15 1.1 cgd * documentation and/or other materials provided with the distribution.
16 1.1 cgd * 3. All advertising materials mentioning features or use of this software
17 1.1 cgd * must display the following acknowledgement:
18 1.1 cgd * This product includes software developed by the University of
19 1.1 cgd * California, Berkeley and its contributors.
20 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
21 1.1 cgd * may be used to endorse or promote products derived from this software
22 1.1 cgd * without specific prior written permission.
23 1.1 cgd *
24 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 1.1 cgd * SUCH DAMAGE.
35 1.1 cgd *
36 1.32 fvdl * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
37 1.1 cgd */
38 1.31 mrg
39 1.33 thorpej #include "opt_lockdebug.h"
40 1.31 mrg #include "opt_uvm.h"
41 1.1 cgd
42 1.7 mycroft #include <sys/param.h>
43 1.7 mycroft #include <sys/proc.h>
44 1.8 cgd #include <sys/map.h>
45 1.7 mycroft #include <sys/kernel.h>
46 1.7 mycroft #include <sys/malloc.h>
47 1.12 christos #include <sys/systm.h>
48 1.7 mycroft
49 1.7 mycroft #include <vm/vm.h>
50 1.7 mycroft #include <vm/vm_kern.h>
51 1.24 thorpej
52 1.28 mrg #if defined(UVM)
53 1.28 mrg #include <uvm/uvm_extern.h>
54 1.28 mrg
55 1.28 mrg static struct vm_map kmem_map_store;
56 1.28 mrg vm_map_t kmem_map = NULL;
57 1.28 mrg #endif
58 1.28 mrg
59 1.24 thorpej #include "opt_kmemstats.h"
60 1.27 thorpej #include "opt_malloclog.h"
61 1.12 christos
62 1.1 cgd struct kmembuckets bucket[MINBUCKET + 16];
63 1.8 cgd struct kmemstats kmemstats[M_LAST];
64 1.1 cgd struct kmemusage *kmemusage;
65 1.1 cgd char *kmembase, *kmemlimit;
66 1.25 mycroft const char *memname[] = INITKMEMNAMES;
67 1.1 cgd
68 1.27 thorpej #ifdef MALLOCLOG
69 1.27 thorpej #ifndef MALLOCLOGSIZE
70 1.27 thorpej #define MALLOCLOGSIZE 100000
71 1.27 thorpej #endif
72 1.27 thorpej
73 1.27 thorpej struct malloclog {
74 1.27 thorpej void *addr;
75 1.27 thorpej long size;
76 1.27 thorpej int type;
77 1.27 thorpej int action;
78 1.27 thorpej const char *file;
79 1.27 thorpej long line;
80 1.27 thorpej } malloclog[MALLOCLOGSIZE];
81 1.27 thorpej
82 1.27 thorpej long malloclogptr;
83 1.27 thorpej
84 1.27 thorpej static void domlog __P((void *a, long size, int type, int action,
85 1.27 thorpej const char *file, long line));
86 1.27 thorpej static void hitmlog __P((void *a));
87 1.27 thorpej
88 1.27 thorpej static void
89 1.27 thorpej domlog(a, size, type, action, file, line)
90 1.27 thorpej void *a;
91 1.27 thorpej long size;
92 1.27 thorpej int type;
93 1.27 thorpej int action;
94 1.27 thorpej const char *file;
95 1.27 thorpej long line;
96 1.27 thorpej {
97 1.27 thorpej
98 1.27 thorpej malloclog[malloclogptr].addr = a;
99 1.27 thorpej malloclog[malloclogptr].size = size;
100 1.27 thorpej malloclog[malloclogptr].type = type;
101 1.27 thorpej malloclog[malloclogptr].action = action;
102 1.27 thorpej malloclog[malloclogptr].file = file;
103 1.27 thorpej malloclog[malloclogptr].line = line;
104 1.27 thorpej malloclogptr++;
105 1.27 thorpej if (malloclogptr >= MALLOCLOGSIZE)
106 1.27 thorpej malloclogptr = 0;
107 1.27 thorpej }
108 1.27 thorpej
109 1.27 thorpej static void
110 1.27 thorpej hitmlog(a)
111 1.27 thorpej void *a;
112 1.27 thorpej {
113 1.27 thorpej struct malloclog *lp;
114 1.27 thorpej long l;
115 1.27 thorpej
116 1.27 thorpej #define PRT \
117 1.27 thorpej if (malloclog[l].addr == a && malloclog[l].action) { \
118 1.27 thorpej lp = &malloclog[l]; \
119 1.27 thorpej printf("malloc log entry %ld:\n", l); \
120 1.27 thorpej printf("\taddr = %p\n", lp->addr); \
121 1.27 thorpej printf("\tsize = %ld\n", lp->size); \
122 1.27 thorpej printf("\ttype = %s\n", memname[lp->type]); \
123 1.27 thorpej printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
124 1.27 thorpej printf("\tfile = %s\n", lp->file); \
125 1.27 thorpej printf("\tline = %ld\n", lp->line); \
126 1.27 thorpej }
127 1.27 thorpej
128 1.27 thorpej for (l = malloclogptr; l < MALLOCLOGSIZE; l++)
129 1.27 thorpej PRT
130 1.27 thorpej
131 1.27 thorpej for (l = 0; l < malloclogptr; l++)
132 1.27 thorpej PRT
133 1.27 thorpej }
134 1.27 thorpej #endif /* MALLOCLOG */
135 1.27 thorpej
136 1.8 cgd #ifdef DIAGNOSTIC
137 1.8 cgd /*
138 1.8 cgd * This structure provides a set of masks to catch unaligned frees.
139 1.8 cgd */
140 1.8 cgd long addrmask[] = { 0,
141 1.8 cgd 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
142 1.8 cgd 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
143 1.8 cgd 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
144 1.8 cgd 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
145 1.8 cgd };
146 1.8 cgd
147 1.8 cgd /*
148 1.8 cgd * The WEIRD_ADDR is used as known text to copy into free objects so
149 1.8 cgd * that modifications after frees can be detected.
150 1.8 cgd */
151 1.12 christos #define WEIRD_ADDR ((unsigned) 0xdeadbeef)
152 1.8 cgd #define MAX_COPY 32
153 1.8 cgd
154 1.8 cgd /*
155 1.11 cgd * Normally the freelist structure is used only to hold the list pointer
156 1.11 cgd * for free objects. However, when running with diagnostics, the first
157 1.11 cgd * 8 bytes of the structure is unused except for diagnostic information,
158 1.11 cgd * and the free list pointer is at offst 8 in the structure. Since the
159 1.11 cgd * first 8 bytes is the portion of the structure most often modified, this
160 1.11 cgd * helps to detect memory reuse problems and avoid free list corruption.
161 1.8 cgd */
162 1.8 cgd struct freelist {
163 1.11 cgd int32_t spare0;
164 1.11 cgd int16_t type;
165 1.11 cgd int16_t spare1;
166 1.8 cgd caddr_t next;
167 1.8 cgd };
168 1.8 cgd #else /* !DIAGNOSTIC */
169 1.8 cgd struct freelist {
170 1.8 cgd caddr_t next;
171 1.8 cgd };
172 1.8 cgd #endif /* DIAGNOSTIC */
173 1.8 cgd
174 1.1 cgd /*
175 1.1 cgd * Allocate a block of memory
176 1.1 cgd */
177 1.27 thorpej #ifdef MALLOCLOG
178 1.27 thorpej void *
179 1.27 thorpej _malloc(size, type, flags, file, line)
180 1.27 thorpej unsigned long size;
181 1.27 thorpej int type, flags;
182 1.27 thorpej const char *file;
183 1.27 thorpej long line;
184 1.27 thorpej #else
185 1.1 cgd void *
186 1.1 cgd malloc(size, type, flags)
187 1.1 cgd unsigned long size;
188 1.1 cgd int type, flags;
189 1.27 thorpej #endif /* MALLOCLOG */
190 1.1 cgd {
191 1.1 cgd register struct kmembuckets *kbp;
192 1.1 cgd register struct kmemusage *kup;
193 1.8 cgd register struct freelist *freep;
194 1.5 andrew long indx, npg, allocsize;
195 1.1 cgd int s;
196 1.1 cgd caddr_t va, cp, savedlist;
197 1.8 cgd #ifdef DIAGNOSTIC
198 1.11 cgd int32_t *end, *lp;
199 1.8 cgd int copysize;
200 1.26 mycroft const char *savedtype;
201 1.8 cgd #endif
202 1.32 fvdl #ifdef LOCKDEBUG
203 1.32 fvdl extern int simplelockrecurse;
204 1.32 fvdl #endif
205 1.1 cgd #ifdef KMEMSTATS
206 1.1 cgd register struct kmemstats *ksp = &kmemstats[type];
207 1.1 cgd
208 1.1 cgd if (((unsigned long)type) > M_LAST)
209 1.1 cgd panic("malloc - bogus type");
210 1.1 cgd #endif
211 1.1 cgd indx = BUCKETINDX(size);
212 1.1 cgd kbp = &bucket[indx];
213 1.1 cgd s = splimp();
214 1.1 cgd #ifdef KMEMSTATS
215 1.1 cgd while (ksp->ks_memuse >= ksp->ks_limit) {
216 1.1 cgd if (flags & M_NOWAIT) {
217 1.1 cgd splx(s);
218 1.1 cgd return ((void *) NULL);
219 1.1 cgd }
220 1.1 cgd if (ksp->ks_limblocks < 65535)
221 1.1 cgd ksp->ks_limblocks++;
222 1.1 cgd tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
223 1.1 cgd }
224 1.8 cgd ksp->ks_size |= 1 << indx;
225 1.8 cgd #endif
226 1.8 cgd #ifdef DIAGNOSTIC
227 1.8 cgd copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
228 1.1 cgd #endif
229 1.32 fvdl #ifdef LOCKDEBUG
230 1.32 fvdl if (flags & M_NOWAIT)
231 1.32 fvdl simplelockrecurse++;
232 1.32 fvdl #endif
233 1.1 cgd if (kbp->kb_next == NULL) {
234 1.8 cgd kbp->kb_last = NULL;
235 1.1 cgd if (size > MAXALLOCSAVE)
236 1.1 cgd allocsize = roundup(size, CLBYTES);
237 1.1 cgd else
238 1.1 cgd allocsize = 1 << indx;
239 1.1 cgd npg = clrnd(btoc(allocsize));
240 1.28 mrg #if defined(UVM)
241 1.28 mrg va = (caddr_t) uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
242 1.28 mrg (vm_size_t)ctob(npg),
243 1.28 mrg (flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0);
244 1.28 mrg #else
245 1.1 cgd va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
246 1.1 cgd !(flags & M_NOWAIT));
247 1.28 mrg #endif
248 1.1 cgd if (va == NULL) {
249 1.17 cgd /*
250 1.17 cgd * Kmem_malloc() can return NULL, even if it can
251 1.17 cgd * wait, if there is no map space avaiable, because
252 1.17 cgd * it can't fix that problem. Neither can we,
253 1.17 cgd * right now. (We should release pages which
254 1.17 cgd * are completely free and which are in buckets
255 1.17 cgd * with too many free elements.)
256 1.17 cgd */
257 1.17 cgd if ((flags & M_NOWAIT) == 0)
258 1.17 cgd panic("malloc: out of space in kmem_map");
259 1.32 fvdl #ifdef LOCKDEBUG
260 1.32 fvdl simplelockrecurse--;
261 1.32 fvdl #endif
262 1.6 cgd splx(s);
263 1.6 cgd return ((void *) NULL);
264 1.1 cgd }
265 1.1 cgd #ifdef KMEMSTATS
266 1.1 cgd kbp->kb_total += kbp->kb_elmpercl;
267 1.1 cgd #endif
268 1.1 cgd kup = btokup(va);
269 1.1 cgd kup->ku_indx = indx;
270 1.1 cgd if (allocsize > MAXALLOCSAVE) {
271 1.1 cgd if (npg > 65535)
272 1.1 cgd panic("malloc: allocation too large");
273 1.1 cgd kup->ku_pagecnt = npg;
274 1.1 cgd #ifdef KMEMSTATS
275 1.1 cgd ksp->ks_memuse += allocsize;
276 1.1 cgd #endif
277 1.1 cgd goto out;
278 1.1 cgd }
279 1.1 cgd #ifdef KMEMSTATS
280 1.1 cgd kup->ku_freecnt = kbp->kb_elmpercl;
281 1.1 cgd kbp->kb_totalfree += kbp->kb_elmpercl;
282 1.1 cgd #endif
283 1.1 cgd /*
284 1.1 cgd * Just in case we blocked while allocating memory,
285 1.1 cgd * and someone else also allocated memory for this
286 1.1 cgd * bucket, don't assume the list is still empty.
287 1.1 cgd */
288 1.1 cgd savedlist = kbp->kb_next;
289 1.8 cgd kbp->kb_next = cp = va + (npg * NBPG) - allocsize;
290 1.8 cgd for (;;) {
291 1.8 cgd freep = (struct freelist *)cp;
292 1.8 cgd #ifdef DIAGNOSTIC
293 1.8 cgd /*
294 1.8 cgd * Copy in known text to detect modification
295 1.8 cgd * after freeing.
296 1.8 cgd */
297 1.11 cgd end = (int32_t *)&cp[copysize];
298 1.11 cgd for (lp = (int32_t *)cp; lp < end; lp++)
299 1.8 cgd *lp = WEIRD_ADDR;
300 1.8 cgd freep->type = M_FREE;
301 1.8 cgd #endif /* DIAGNOSTIC */
302 1.8 cgd if (cp <= va)
303 1.8 cgd break;
304 1.8 cgd cp -= allocsize;
305 1.8 cgd freep->next = cp;
306 1.8 cgd }
307 1.8 cgd freep->next = savedlist;
308 1.8 cgd if (kbp->kb_last == NULL)
309 1.8 cgd kbp->kb_last = (caddr_t)freep;
310 1.1 cgd }
311 1.1 cgd va = kbp->kb_next;
312 1.8 cgd kbp->kb_next = ((struct freelist *)va)->next;
313 1.8 cgd #ifdef DIAGNOSTIC
314 1.8 cgd freep = (struct freelist *)va;
315 1.8 cgd savedtype = (unsigned)freep->type < M_LAST ?
316 1.8 cgd memname[freep->type] : "???";
317 1.28 mrg #if defined(UVM)
318 1.29 chs if (kbp->kb_next) {
319 1.29 chs int rv;
320 1.29 chs vm_offset_t addr = (vm_offset_t)kbp->kb_next;
321 1.29 chs
322 1.29 chs vm_map_lock_read(kmem_map);
323 1.29 chs rv = uvm_map_checkprot(kmem_map, addr,
324 1.29 chs addr + sizeof(struct freelist),
325 1.29 chs VM_PROT_WRITE);
326 1.29 chs vm_map_unlock_read(kmem_map);
327 1.29 chs
328 1.29 chs if (!rv)
329 1.28 mrg #else
330 1.29 chs if (kbp->kb_next &&
331 1.28 mrg !kernacc(kbp->kb_next, sizeof(struct freelist), 0))
332 1.28 mrg #endif
333 1.28 mrg {
334 1.22 christos printf(
335 1.21 christos "%s %ld of object %p size %ld %s %s (invalid addr %p)\n",
336 1.21 christos "Data modified on freelist: word",
337 1.21 christos (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
338 1.21 christos va, size, "previous type", savedtype, kbp->kb_next);
339 1.27 thorpej #ifdef MALLOCLOG
340 1.27 thorpej hitmlog(va);
341 1.27 thorpej #endif
342 1.8 cgd kbp->kb_next = NULL;
343 1.29 chs #if defined(UVM)
344 1.29 chs }
345 1.29 chs #endif
346 1.8 cgd }
347 1.11 cgd
348 1.11 cgd /* Fill the fields that we've used with WEIRD_ADDR */
349 1.8 cgd #if BYTE_ORDER == BIG_ENDIAN
350 1.8 cgd freep->type = WEIRD_ADDR >> 16;
351 1.8 cgd #endif
352 1.8 cgd #if BYTE_ORDER == LITTLE_ENDIAN
353 1.8 cgd freep->type = (short)WEIRD_ADDR;
354 1.8 cgd #endif
355 1.11 cgd end = (int32_t *)&freep->next +
356 1.11 cgd (sizeof(freep->next) / sizeof(int32_t));
357 1.11 cgd for (lp = (int32_t *)&freep->next; lp < end; lp++)
358 1.11 cgd *lp = WEIRD_ADDR;
359 1.11 cgd
360 1.11 cgd /* and check that the data hasn't been modified. */
361 1.11 cgd end = (int32_t *)&va[copysize];
362 1.11 cgd for (lp = (int32_t *)va; lp < end; lp++) {
363 1.8 cgd if (*lp == WEIRD_ADDR)
364 1.8 cgd continue;
365 1.22 christos printf("%s %ld of object %p size %ld %s %s (0x%x != 0x%x)\n",
366 1.21 christos "Data modified on freelist: word",
367 1.21 christos (long)(lp - (int32_t *)va), va, size, "previous type",
368 1.21 christos savedtype, *lp, WEIRD_ADDR);
369 1.27 thorpej #ifdef MALLOCLOG
370 1.27 thorpej hitmlog(va);
371 1.27 thorpej #endif
372 1.8 cgd break;
373 1.8 cgd }
374 1.11 cgd
375 1.8 cgd freep->spare0 = 0;
376 1.8 cgd #endif /* DIAGNOSTIC */
377 1.1 cgd #ifdef KMEMSTATS
378 1.1 cgd kup = btokup(va);
379 1.1 cgd if (kup->ku_indx != indx)
380 1.1 cgd panic("malloc: wrong bucket");
381 1.1 cgd if (kup->ku_freecnt == 0)
382 1.1 cgd panic("malloc: lost data");
383 1.1 cgd kup->ku_freecnt--;
384 1.1 cgd kbp->kb_totalfree--;
385 1.1 cgd ksp->ks_memuse += 1 << indx;
386 1.1 cgd out:
387 1.1 cgd kbp->kb_calls++;
388 1.1 cgd ksp->ks_inuse++;
389 1.1 cgd ksp->ks_calls++;
390 1.1 cgd if (ksp->ks_memuse > ksp->ks_maxused)
391 1.1 cgd ksp->ks_maxused = ksp->ks_memuse;
392 1.1 cgd #else
393 1.1 cgd out:
394 1.1 cgd #endif
395 1.27 thorpej #ifdef MALLOCLOG
396 1.27 thorpej domlog(va, size, type, 1, file, line);
397 1.27 thorpej #endif
398 1.1 cgd splx(s);
399 1.32 fvdl #ifdef LOCKDEBUG
400 1.32 fvdl if (flags & M_NOWAIT)
401 1.32 fvdl simplelockrecurse--;
402 1.32 fvdl #endif
403 1.1 cgd return ((void *) va);
404 1.1 cgd }
405 1.1 cgd
406 1.1 cgd /*
407 1.1 cgd * Free a block of memory allocated by malloc.
408 1.1 cgd */
409 1.27 thorpej #ifdef MALLOCLOG
410 1.27 thorpej void
411 1.27 thorpej _free(addr, type, file, line)
412 1.27 thorpej void *addr;
413 1.27 thorpej int type;
414 1.27 thorpej const char *file;
415 1.27 thorpej long line;
416 1.27 thorpej #else
417 1.1 cgd void
418 1.1 cgd free(addr, type)
419 1.1 cgd void *addr;
420 1.1 cgd int type;
421 1.27 thorpej #endif /* MALLOCLOG */
422 1.1 cgd {
423 1.1 cgd register struct kmembuckets *kbp;
424 1.1 cgd register struct kmemusage *kup;
425 1.8 cgd register struct freelist *freep;
426 1.8 cgd long size;
427 1.8 cgd int s;
428 1.5 andrew #ifdef DIAGNOSTIC
429 1.8 cgd caddr_t cp;
430 1.11 cgd int32_t *end, *lp;
431 1.11 cgd long alloc, copysize;
432 1.5 andrew #endif
433 1.1 cgd #ifdef KMEMSTATS
434 1.1 cgd register struct kmemstats *ksp = &kmemstats[type];
435 1.1 cgd #endif
436 1.1 cgd
437 1.1 cgd kup = btokup(addr);
438 1.1 cgd size = 1 << kup->ku_indx;
439 1.8 cgd kbp = &bucket[kup->ku_indx];
440 1.8 cgd s = splimp();
441 1.27 thorpej #ifdef MALLOCLOG
442 1.27 thorpej domlog(addr, 0, type, 2, file, line);
443 1.27 thorpej #endif
444 1.1 cgd #ifdef DIAGNOSTIC
445 1.8 cgd /*
446 1.8 cgd * Check for returns of data that do not point to the
447 1.8 cgd * beginning of the allocation.
448 1.8 cgd */
449 1.1 cgd if (size > NBPG * CLSIZE)
450 1.1 cgd alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
451 1.1 cgd else
452 1.1 cgd alloc = addrmask[kup->ku_indx];
453 1.8 cgd if (((u_long)addr & alloc) != 0)
454 1.15 christos panic("free: unaligned addr %p, size %ld, type %s, mask %ld\n",
455 1.8 cgd addr, size, memname[type], alloc);
456 1.1 cgd #endif /* DIAGNOSTIC */
457 1.1 cgd if (size > MAXALLOCSAVE) {
458 1.28 mrg #if defined(UVM)
459 1.28 mrg uvm_km_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
460 1.28 mrg #else
461 1.1 cgd kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
462 1.28 mrg #endif
463 1.1 cgd #ifdef KMEMSTATS
464 1.1 cgd size = kup->ku_pagecnt << PGSHIFT;
465 1.1 cgd ksp->ks_memuse -= size;
466 1.1 cgd kup->ku_indx = 0;
467 1.1 cgd kup->ku_pagecnt = 0;
468 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
469 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
470 1.1 cgd wakeup((caddr_t)ksp);
471 1.1 cgd ksp->ks_inuse--;
472 1.1 cgd kbp->kb_total -= 1;
473 1.1 cgd #endif
474 1.1 cgd splx(s);
475 1.1 cgd return;
476 1.1 cgd }
477 1.8 cgd freep = (struct freelist *)addr;
478 1.8 cgd #ifdef DIAGNOSTIC
479 1.8 cgd /*
480 1.8 cgd * Check for multiple frees. Use a quick check to see if
481 1.8 cgd * it looks free before laboriously searching the freelist.
482 1.8 cgd */
483 1.8 cgd if (freep->spare0 == WEIRD_ADDR) {
484 1.16 cgd for (cp = kbp->kb_next; cp;
485 1.16 cgd cp = ((struct freelist *)cp)->next) {
486 1.8 cgd if (addr != cp)
487 1.8 cgd continue;
488 1.22 christos printf("multiply freed item %p\n", addr);
489 1.27 thorpej #ifdef MALLOCLOG
490 1.27 thorpej hitmlog(addr);
491 1.27 thorpej #endif
492 1.8 cgd panic("free: duplicated free");
493 1.8 cgd }
494 1.8 cgd }
495 1.8 cgd /*
496 1.8 cgd * Copy in known text to detect modification after freeing
497 1.8 cgd * and to make it look free. Also, save the type being freed
498 1.8 cgd * so we can list likely culprit if modification is detected
499 1.8 cgd * when the object is reallocated.
500 1.8 cgd */
501 1.8 cgd copysize = size < MAX_COPY ? size : MAX_COPY;
502 1.11 cgd end = (int32_t *)&((caddr_t)addr)[copysize];
503 1.11 cgd for (lp = (int32_t *)addr; lp < end; lp++)
504 1.8 cgd *lp = WEIRD_ADDR;
505 1.8 cgd freep->type = type;
506 1.8 cgd #endif /* DIAGNOSTIC */
507 1.1 cgd #ifdef KMEMSTATS
508 1.1 cgd kup->ku_freecnt++;
509 1.1 cgd if (kup->ku_freecnt >= kbp->kb_elmpercl)
510 1.1 cgd if (kup->ku_freecnt > kbp->kb_elmpercl)
511 1.1 cgd panic("free: multiple frees");
512 1.1 cgd else if (kbp->kb_totalfree > kbp->kb_highwat)
513 1.1 cgd kbp->kb_couldfree++;
514 1.1 cgd kbp->kb_totalfree++;
515 1.1 cgd ksp->ks_memuse -= size;
516 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
517 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
518 1.1 cgd wakeup((caddr_t)ksp);
519 1.1 cgd ksp->ks_inuse--;
520 1.1 cgd #endif
521 1.8 cgd if (kbp->kb_next == NULL)
522 1.8 cgd kbp->kb_next = addr;
523 1.8 cgd else
524 1.8 cgd ((struct freelist *)kbp->kb_last)->next = addr;
525 1.8 cgd freep->next = NULL;
526 1.8 cgd kbp->kb_last = addr;
527 1.1 cgd splx(s);
528 1.20 cgd }
529 1.20 cgd
530 1.20 cgd /*
531 1.20 cgd * Change the size of a block of memory.
532 1.20 cgd */
533 1.20 cgd void *
534 1.20 cgd realloc(curaddr, newsize, type, flags)
535 1.20 cgd void *curaddr;
536 1.20 cgd unsigned long newsize;
537 1.20 cgd int type, flags;
538 1.20 cgd {
539 1.20 cgd register struct kmemusage *kup;
540 1.20 cgd long cursize;
541 1.20 cgd void *newaddr;
542 1.20 cgd #ifdef DIAGNOSTIC
543 1.20 cgd long alloc;
544 1.20 cgd #endif
545 1.20 cgd
546 1.20 cgd /*
547 1.20 cgd * Realloc() with a NULL pointer is the same as malloc().
548 1.20 cgd */
549 1.20 cgd if (curaddr == NULL)
550 1.20 cgd return (malloc(newsize, type, flags));
551 1.20 cgd
552 1.20 cgd /*
553 1.20 cgd * Realloc() with zero size is the same as free().
554 1.20 cgd */
555 1.20 cgd if (newsize == 0) {
556 1.20 cgd free(curaddr, type);
557 1.20 cgd return (NULL);
558 1.20 cgd }
559 1.20 cgd
560 1.20 cgd /*
561 1.20 cgd * Find out how large the old allocation was (and do some
562 1.20 cgd * sanity checking).
563 1.20 cgd */
564 1.20 cgd kup = btokup(curaddr);
565 1.20 cgd cursize = 1 << kup->ku_indx;
566 1.20 cgd
567 1.20 cgd #ifdef DIAGNOSTIC
568 1.20 cgd /*
569 1.20 cgd * Check for returns of data that do not point to the
570 1.20 cgd * beginning of the allocation.
571 1.20 cgd */
572 1.20 cgd if (cursize > NBPG * CLSIZE)
573 1.20 cgd alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
574 1.20 cgd else
575 1.20 cgd alloc = addrmask[kup->ku_indx];
576 1.20 cgd if (((u_long)curaddr & alloc) != 0)
577 1.20 cgd panic("realloc: unaligned addr %p, size %ld, type %s, mask %ld\n",
578 1.20 cgd curaddr, cursize, memname[type], alloc);
579 1.20 cgd #endif /* DIAGNOSTIC */
580 1.20 cgd
581 1.20 cgd if (cursize > MAXALLOCSAVE)
582 1.20 cgd cursize = ctob(kup->ku_pagecnt);
583 1.20 cgd
584 1.20 cgd /*
585 1.20 cgd * If we already actually have as much as they want, we're done.
586 1.20 cgd */
587 1.20 cgd if (newsize <= cursize)
588 1.20 cgd return (curaddr);
589 1.20 cgd
590 1.20 cgd /*
591 1.20 cgd * Can't satisfy the allocation with the existing block.
592 1.20 cgd * Allocate a new one and copy the data.
593 1.20 cgd */
594 1.20 cgd newaddr = malloc(newsize, type, flags);
595 1.20 cgd if (newaddr == NULL) {
596 1.20 cgd /*
597 1.20 cgd * Malloc() failed, because flags included M_NOWAIT.
598 1.20 cgd * Return NULL to indicate that failure. The old
599 1.20 cgd * pointer is still valid.
600 1.20 cgd */
601 1.20 cgd return NULL;
602 1.20 cgd }
603 1.34 perry memcpy(newaddr, curaddr, cursize);
604 1.20 cgd
605 1.20 cgd /*
606 1.20 cgd * We were successful: free the old allocation and return
607 1.20 cgd * the new one.
608 1.20 cgd */
609 1.20 cgd free(curaddr, type);
610 1.20 cgd return (newaddr);
611 1.1 cgd }
612 1.1 cgd
613 1.1 cgd /*
614 1.1 cgd * Initialize the kernel memory allocator
615 1.1 cgd */
616 1.12 christos void
617 1.1 cgd kmeminit()
618 1.1 cgd {
619 1.23 tls #ifdef KMEMSTATS
620 1.1 cgd register long indx;
621 1.23 tls #endif
622 1.1 cgd int npg;
623 1.1 cgd
624 1.1 cgd #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
625 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
626 1.1 cgd #endif
627 1.1 cgd #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
628 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_big
629 1.1 cgd #endif
630 1.1 cgd #if (MAXALLOCSAVE < CLBYTES)
631 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_small
632 1.1 cgd #endif
633 1.11 cgd
634 1.11 cgd if (sizeof(struct freelist) > (1 << MINBUCKET))
635 1.11 cgd panic("minbucket too small/struct freelist too big");
636 1.11 cgd
637 1.1 cgd npg = VM_KMEM_SIZE/ NBPG;
638 1.28 mrg #if defined(UVM)
639 1.28 mrg kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
640 1.28 mrg (vm_size_t)(npg * sizeof(struct kmemusage)));
641 1.28 mrg kmem_map = uvm_km_suballoc(kernel_map, (vm_offset_t *)&kmembase,
642 1.28 mrg (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG),
643 1.30 thorpej FALSE, FALSE, &kmem_map_store);
644 1.28 mrg #else
645 1.1 cgd kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
646 1.1 cgd (vm_size_t)(npg * sizeof(struct kmemusage)));
647 1.1 cgd kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
648 1.1 cgd (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
649 1.28 mrg #endif
650 1.1 cgd #ifdef KMEMSTATS
651 1.1 cgd for (indx = 0; indx < MINBUCKET + 16; indx++) {
652 1.1 cgd if (1 << indx >= CLBYTES)
653 1.1 cgd bucket[indx].kb_elmpercl = 1;
654 1.1 cgd else
655 1.1 cgd bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
656 1.1 cgd bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
657 1.1 cgd }
658 1.8 cgd for (indx = 0; indx < M_LAST; indx++)
659 1.1 cgd kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
660 1.1 cgd #endif
661 1.1 cgd }
662