kern_malloc.c revision 1.30 1 1.30 thorpej /* $NetBSD: kern_malloc.c,v 1.30 1998/02/08 06:15:57 thorpej Exp $ */
2 1.9 cgd
3 1.1 cgd /*
4 1.20 cgd * Copyright 1996 Christopher G. Demetriou. All rights reserved.
5 1.8 cgd * Copyright (c) 1987, 1991, 1993
6 1.8 cgd * The Regents of the University of California. All rights reserved.
7 1.1 cgd *
8 1.1 cgd * Redistribution and use in source and binary forms, with or without
9 1.1 cgd * modification, are permitted provided that the following conditions
10 1.1 cgd * are met:
11 1.1 cgd * 1. Redistributions of source code must retain the above copyright
12 1.1 cgd * notice, this list of conditions and the following disclaimer.
13 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 cgd * notice, this list of conditions and the following disclaimer in the
15 1.1 cgd * documentation and/or other materials provided with the distribution.
16 1.1 cgd * 3. All advertising materials mentioning features or use of this software
17 1.1 cgd * must display the following acknowledgement:
18 1.1 cgd * This product includes software developed by the University of
19 1.1 cgd * California, Berkeley and its contributors.
20 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
21 1.1 cgd * may be used to endorse or promote products derived from this software
22 1.1 cgd * without specific prior written permission.
23 1.1 cgd *
24 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 1.1 cgd * SUCH DAMAGE.
35 1.1 cgd *
36 1.9 cgd * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
37 1.1 cgd */
38 1.1 cgd
39 1.7 mycroft #include <sys/param.h>
40 1.7 mycroft #include <sys/proc.h>
41 1.8 cgd #include <sys/map.h>
42 1.7 mycroft #include <sys/kernel.h>
43 1.7 mycroft #include <sys/malloc.h>
44 1.12 christos #include <sys/systm.h>
45 1.7 mycroft
46 1.7 mycroft #include <vm/vm.h>
47 1.7 mycroft #include <vm/vm_kern.h>
48 1.24 thorpej
49 1.28 mrg #if defined(UVM)
50 1.28 mrg #include <uvm/uvm_extern.h>
51 1.28 mrg
52 1.28 mrg static struct vm_map kmem_map_store;
53 1.28 mrg vm_map_t kmem_map = NULL;
54 1.28 mrg #endif
55 1.28 mrg
56 1.24 thorpej #include "opt_kmemstats.h"
57 1.27 thorpej #include "opt_malloclog.h"
58 1.12 christos
59 1.1 cgd struct kmembuckets bucket[MINBUCKET + 16];
60 1.8 cgd struct kmemstats kmemstats[M_LAST];
61 1.1 cgd struct kmemusage *kmemusage;
62 1.1 cgd char *kmembase, *kmemlimit;
63 1.25 mycroft const char *memname[] = INITKMEMNAMES;
64 1.1 cgd
65 1.27 thorpej #ifdef MALLOCLOG
66 1.27 thorpej #ifndef MALLOCLOGSIZE
67 1.27 thorpej #define MALLOCLOGSIZE 100000
68 1.27 thorpej #endif
69 1.27 thorpej
70 1.27 thorpej struct malloclog {
71 1.27 thorpej void *addr;
72 1.27 thorpej long size;
73 1.27 thorpej int type;
74 1.27 thorpej int action;
75 1.27 thorpej const char *file;
76 1.27 thorpej long line;
77 1.27 thorpej } malloclog[MALLOCLOGSIZE];
78 1.27 thorpej
79 1.27 thorpej long malloclogptr;
80 1.27 thorpej
81 1.27 thorpej static void domlog __P((void *a, long size, int type, int action,
82 1.27 thorpej const char *file, long line));
83 1.27 thorpej static void hitmlog __P((void *a));
84 1.27 thorpej
85 1.27 thorpej static void
86 1.27 thorpej domlog(a, size, type, action, file, line)
87 1.27 thorpej void *a;
88 1.27 thorpej long size;
89 1.27 thorpej int type;
90 1.27 thorpej int action;
91 1.27 thorpej const char *file;
92 1.27 thorpej long line;
93 1.27 thorpej {
94 1.27 thorpej
95 1.27 thorpej malloclog[malloclogptr].addr = a;
96 1.27 thorpej malloclog[malloclogptr].size = size;
97 1.27 thorpej malloclog[malloclogptr].type = type;
98 1.27 thorpej malloclog[malloclogptr].action = action;
99 1.27 thorpej malloclog[malloclogptr].file = file;
100 1.27 thorpej malloclog[malloclogptr].line = line;
101 1.27 thorpej malloclogptr++;
102 1.27 thorpej if (malloclogptr >= MALLOCLOGSIZE)
103 1.27 thorpej malloclogptr = 0;
104 1.27 thorpej }
105 1.27 thorpej
106 1.27 thorpej static void
107 1.27 thorpej hitmlog(a)
108 1.27 thorpej void *a;
109 1.27 thorpej {
110 1.27 thorpej struct malloclog *lp;
111 1.27 thorpej long l;
112 1.27 thorpej
113 1.27 thorpej #define PRT \
114 1.27 thorpej if (malloclog[l].addr == a && malloclog[l].action) { \
115 1.27 thorpej lp = &malloclog[l]; \
116 1.27 thorpej printf("malloc log entry %ld:\n", l); \
117 1.27 thorpej printf("\taddr = %p\n", lp->addr); \
118 1.27 thorpej printf("\tsize = %ld\n", lp->size); \
119 1.27 thorpej printf("\ttype = %s\n", memname[lp->type]); \
120 1.27 thorpej printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
121 1.27 thorpej printf("\tfile = %s\n", lp->file); \
122 1.27 thorpej printf("\tline = %ld\n", lp->line); \
123 1.27 thorpej }
124 1.27 thorpej
125 1.27 thorpej for (l = malloclogptr; l < MALLOCLOGSIZE; l++)
126 1.27 thorpej PRT
127 1.27 thorpej
128 1.27 thorpej for (l = 0; l < malloclogptr; l++)
129 1.27 thorpej PRT
130 1.27 thorpej }
131 1.27 thorpej #endif /* MALLOCLOG */
132 1.27 thorpej
133 1.8 cgd #ifdef DIAGNOSTIC
134 1.8 cgd /*
135 1.8 cgd * This structure provides a set of masks to catch unaligned frees.
136 1.8 cgd */
137 1.8 cgd long addrmask[] = { 0,
138 1.8 cgd 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
139 1.8 cgd 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
140 1.8 cgd 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
141 1.8 cgd 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
142 1.8 cgd };
143 1.8 cgd
144 1.8 cgd /*
145 1.8 cgd * The WEIRD_ADDR is used as known text to copy into free objects so
146 1.8 cgd * that modifications after frees can be detected.
147 1.8 cgd */
148 1.12 christos #define WEIRD_ADDR ((unsigned) 0xdeadbeef)
149 1.8 cgd #define MAX_COPY 32
150 1.8 cgd
151 1.8 cgd /*
152 1.11 cgd * Normally the freelist structure is used only to hold the list pointer
153 1.11 cgd * for free objects. However, when running with diagnostics, the first
154 1.11 cgd * 8 bytes of the structure is unused except for diagnostic information,
155 1.11 cgd * and the free list pointer is at offst 8 in the structure. Since the
156 1.11 cgd * first 8 bytes is the portion of the structure most often modified, this
157 1.11 cgd * helps to detect memory reuse problems and avoid free list corruption.
158 1.8 cgd */
159 1.8 cgd struct freelist {
160 1.11 cgd int32_t spare0;
161 1.11 cgd int16_t type;
162 1.11 cgd int16_t spare1;
163 1.8 cgd caddr_t next;
164 1.8 cgd };
165 1.8 cgd #else /* !DIAGNOSTIC */
166 1.8 cgd struct freelist {
167 1.8 cgd caddr_t next;
168 1.8 cgd };
169 1.8 cgd #endif /* DIAGNOSTIC */
170 1.8 cgd
171 1.1 cgd /*
172 1.1 cgd * Allocate a block of memory
173 1.1 cgd */
174 1.27 thorpej #ifdef MALLOCLOG
175 1.27 thorpej void *
176 1.27 thorpej _malloc(size, type, flags, file, line)
177 1.27 thorpej unsigned long size;
178 1.27 thorpej int type, flags;
179 1.27 thorpej const char *file;
180 1.27 thorpej long line;
181 1.27 thorpej #else
182 1.1 cgd void *
183 1.1 cgd malloc(size, type, flags)
184 1.1 cgd unsigned long size;
185 1.1 cgd int type, flags;
186 1.27 thorpej #endif /* MALLOCLOG */
187 1.1 cgd {
188 1.1 cgd register struct kmembuckets *kbp;
189 1.1 cgd register struct kmemusage *kup;
190 1.8 cgd register struct freelist *freep;
191 1.5 andrew long indx, npg, allocsize;
192 1.1 cgd int s;
193 1.1 cgd caddr_t va, cp, savedlist;
194 1.8 cgd #ifdef DIAGNOSTIC
195 1.11 cgd int32_t *end, *lp;
196 1.8 cgd int copysize;
197 1.26 mycroft const char *savedtype;
198 1.8 cgd #endif
199 1.1 cgd #ifdef KMEMSTATS
200 1.1 cgd register struct kmemstats *ksp = &kmemstats[type];
201 1.1 cgd
202 1.1 cgd if (((unsigned long)type) > M_LAST)
203 1.1 cgd panic("malloc - bogus type");
204 1.1 cgd #endif
205 1.1 cgd indx = BUCKETINDX(size);
206 1.1 cgd kbp = &bucket[indx];
207 1.1 cgd s = splimp();
208 1.1 cgd #ifdef KMEMSTATS
209 1.1 cgd while (ksp->ks_memuse >= ksp->ks_limit) {
210 1.1 cgd if (flags & M_NOWAIT) {
211 1.1 cgd splx(s);
212 1.1 cgd return ((void *) NULL);
213 1.1 cgd }
214 1.1 cgd if (ksp->ks_limblocks < 65535)
215 1.1 cgd ksp->ks_limblocks++;
216 1.1 cgd tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
217 1.1 cgd }
218 1.8 cgd ksp->ks_size |= 1 << indx;
219 1.8 cgd #endif
220 1.8 cgd #ifdef DIAGNOSTIC
221 1.8 cgd copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
222 1.1 cgd #endif
223 1.1 cgd if (kbp->kb_next == NULL) {
224 1.8 cgd kbp->kb_last = NULL;
225 1.1 cgd if (size > MAXALLOCSAVE)
226 1.1 cgd allocsize = roundup(size, CLBYTES);
227 1.1 cgd else
228 1.1 cgd allocsize = 1 << indx;
229 1.1 cgd npg = clrnd(btoc(allocsize));
230 1.28 mrg #if defined(UVM)
231 1.28 mrg va = (caddr_t) uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
232 1.28 mrg (vm_size_t)ctob(npg),
233 1.28 mrg (flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0);
234 1.28 mrg #else
235 1.1 cgd va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
236 1.1 cgd !(flags & M_NOWAIT));
237 1.28 mrg #endif
238 1.1 cgd if (va == NULL) {
239 1.17 cgd /*
240 1.17 cgd * Kmem_malloc() can return NULL, even if it can
241 1.17 cgd * wait, if there is no map space avaiable, because
242 1.17 cgd * it can't fix that problem. Neither can we,
243 1.17 cgd * right now. (We should release pages which
244 1.17 cgd * are completely free and which are in buckets
245 1.17 cgd * with too many free elements.)
246 1.17 cgd */
247 1.17 cgd if ((flags & M_NOWAIT) == 0)
248 1.17 cgd panic("malloc: out of space in kmem_map");
249 1.6 cgd splx(s);
250 1.6 cgd return ((void *) NULL);
251 1.1 cgd }
252 1.1 cgd #ifdef KMEMSTATS
253 1.1 cgd kbp->kb_total += kbp->kb_elmpercl;
254 1.1 cgd #endif
255 1.1 cgd kup = btokup(va);
256 1.1 cgd kup->ku_indx = indx;
257 1.1 cgd if (allocsize > MAXALLOCSAVE) {
258 1.1 cgd if (npg > 65535)
259 1.1 cgd panic("malloc: allocation too large");
260 1.1 cgd kup->ku_pagecnt = npg;
261 1.1 cgd #ifdef KMEMSTATS
262 1.1 cgd ksp->ks_memuse += allocsize;
263 1.1 cgd #endif
264 1.1 cgd goto out;
265 1.1 cgd }
266 1.1 cgd #ifdef KMEMSTATS
267 1.1 cgd kup->ku_freecnt = kbp->kb_elmpercl;
268 1.1 cgd kbp->kb_totalfree += kbp->kb_elmpercl;
269 1.1 cgd #endif
270 1.1 cgd /*
271 1.1 cgd * Just in case we blocked while allocating memory,
272 1.1 cgd * and someone else also allocated memory for this
273 1.1 cgd * bucket, don't assume the list is still empty.
274 1.1 cgd */
275 1.1 cgd savedlist = kbp->kb_next;
276 1.8 cgd kbp->kb_next = cp = va + (npg * NBPG) - allocsize;
277 1.8 cgd for (;;) {
278 1.8 cgd freep = (struct freelist *)cp;
279 1.8 cgd #ifdef DIAGNOSTIC
280 1.8 cgd /*
281 1.8 cgd * Copy in known text to detect modification
282 1.8 cgd * after freeing.
283 1.8 cgd */
284 1.11 cgd end = (int32_t *)&cp[copysize];
285 1.11 cgd for (lp = (int32_t *)cp; lp < end; lp++)
286 1.8 cgd *lp = WEIRD_ADDR;
287 1.8 cgd freep->type = M_FREE;
288 1.8 cgd #endif /* DIAGNOSTIC */
289 1.8 cgd if (cp <= va)
290 1.8 cgd break;
291 1.8 cgd cp -= allocsize;
292 1.8 cgd freep->next = cp;
293 1.8 cgd }
294 1.8 cgd freep->next = savedlist;
295 1.8 cgd if (kbp->kb_last == NULL)
296 1.8 cgd kbp->kb_last = (caddr_t)freep;
297 1.1 cgd }
298 1.1 cgd va = kbp->kb_next;
299 1.8 cgd kbp->kb_next = ((struct freelist *)va)->next;
300 1.8 cgd #ifdef DIAGNOSTIC
301 1.8 cgd freep = (struct freelist *)va;
302 1.8 cgd savedtype = (unsigned)freep->type < M_LAST ?
303 1.8 cgd memname[freep->type] : "???";
304 1.28 mrg #if defined(UVM)
305 1.29 chs if (kbp->kb_next) {
306 1.29 chs int rv;
307 1.29 chs vm_offset_t addr = (vm_offset_t)kbp->kb_next;
308 1.29 chs
309 1.29 chs vm_map_lock_read(kmem_map);
310 1.29 chs rv = uvm_map_checkprot(kmem_map, addr,
311 1.29 chs addr + sizeof(struct freelist),
312 1.29 chs VM_PROT_WRITE);
313 1.29 chs vm_map_unlock_read(kmem_map);
314 1.29 chs
315 1.29 chs if (!rv)
316 1.28 mrg #else
317 1.29 chs if (kbp->kb_next &&
318 1.28 mrg !kernacc(kbp->kb_next, sizeof(struct freelist), 0))
319 1.28 mrg #endif
320 1.28 mrg {
321 1.22 christos printf(
322 1.21 christos "%s %ld of object %p size %ld %s %s (invalid addr %p)\n",
323 1.21 christos "Data modified on freelist: word",
324 1.21 christos (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
325 1.21 christos va, size, "previous type", savedtype, kbp->kb_next);
326 1.27 thorpej #ifdef MALLOCLOG
327 1.27 thorpej hitmlog(va);
328 1.27 thorpej #endif
329 1.8 cgd kbp->kb_next = NULL;
330 1.29 chs #if defined(UVM)
331 1.29 chs }
332 1.29 chs #endif
333 1.8 cgd }
334 1.11 cgd
335 1.11 cgd /* Fill the fields that we've used with WEIRD_ADDR */
336 1.8 cgd #if BYTE_ORDER == BIG_ENDIAN
337 1.8 cgd freep->type = WEIRD_ADDR >> 16;
338 1.8 cgd #endif
339 1.8 cgd #if BYTE_ORDER == LITTLE_ENDIAN
340 1.8 cgd freep->type = (short)WEIRD_ADDR;
341 1.8 cgd #endif
342 1.11 cgd end = (int32_t *)&freep->next +
343 1.11 cgd (sizeof(freep->next) / sizeof(int32_t));
344 1.11 cgd for (lp = (int32_t *)&freep->next; lp < end; lp++)
345 1.11 cgd *lp = WEIRD_ADDR;
346 1.11 cgd
347 1.11 cgd /* and check that the data hasn't been modified. */
348 1.11 cgd end = (int32_t *)&va[copysize];
349 1.11 cgd for (lp = (int32_t *)va; lp < end; lp++) {
350 1.8 cgd if (*lp == WEIRD_ADDR)
351 1.8 cgd continue;
352 1.22 christos printf("%s %ld of object %p size %ld %s %s (0x%x != 0x%x)\n",
353 1.21 christos "Data modified on freelist: word",
354 1.21 christos (long)(lp - (int32_t *)va), va, size, "previous type",
355 1.21 christos savedtype, *lp, WEIRD_ADDR);
356 1.27 thorpej #ifdef MALLOCLOG
357 1.27 thorpej hitmlog(va);
358 1.27 thorpej #endif
359 1.8 cgd break;
360 1.8 cgd }
361 1.11 cgd
362 1.8 cgd freep->spare0 = 0;
363 1.8 cgd #endif /* DIAGNOSTIC */
364 1.1 cgd #ifdef KMEMSTATS
365 1.1 cgd kup = btokup(va);
366 1.1 cgd if (kup->ku_indx != indx)
367 1.1 cgd panic("malloc: wrong bucket");
368 1.1 cgd if (kup->ku_freecnt == 0)
369 1.1 cgd panic("malloc: lost data");
370 1.1 cgd kup->ku_freecnt--;
371 1.1 cgd kbp->kb_totalfree--;
372 1.1 cgd ksp->ks_memuse += 1 << indx;
373 1.1 cgd out:
374 1.1 cgd kbp->kb_calls++;
375 1.1 cgd ksp->ks_inuse++;
376 1.1 cgd ksp->ks_calls++;
377 1.1 cgd if (ksp->ks_memuse > ksp->ks_maxused)
378 1.1 cgd ksp->ks_maxused = ksp->ks_memuse;
379 1.1 cgd #else
380 1.1 cgd out:
381 1.1 cgd #endif
382 1.27 thorpej #ifdef MALLOCLOG
383 1.27 thorpej domlog(va, size, type, 1, file, line);
384 1.27 thorpej #endif
385 1.1 cgd splx(s);
386 1.1 cgd return ((void *) va);
387 1.1 cgd }
388 1.1 cgd
389 1.1 cgd /*
390 1.1 cgd * Free a block of memory allocated by malloc.
391 1.1 cgd */
392 1.27 thorpej #ifdef MALLOCLOG
393 1.27 thorpej void
394 1.27 thorpej _free(addr, type, file, line)
395 1.27 thorpej void *addr;
396 1.27 thorpej int type;
397 1.27 thorpej const char *file;
398 1.27 thorpej long line;
399 1.27 thorpej #else
400 1.1 cgd void
401 1.1 cgd free(addr, type)
402 1.1 cgd void *addr;
403 1.1 cgd int type;
404 1.27 thorpej #endif /* MALLOCLOG */
405 1.1 cgd {
406 1.1 cgd register struct kmembuckets *kbp;
407 1.1 cgd register struct kmemusage *kup;
408 1.8 cgd register struct freelist *freep;
409 1.8 cgd long size;
410 1.8 cgd int s;
411 1.5 andrew #ifdef DIAGNOSTIC
412 1.8 cgd caddr_t cp;
413 1.11 cgd int32_t *end, *lp;
414 1.11 cgd long alloc, copysize;
415 1.5 andrew #endif
416 1.1 cgd #ifdef KMEMSTATS
417 1.1 cgd register struct kmemstats *ksp = &kmemstats[type];
418 1.1 cgd #endif
419 1.1 cgd
420 1.1 cgd kup = btokup(addr);
421 1.1 cgd size = 1 << kup->ku_indx;
422 1.8 cgd kbp = &bucket[kup->ku_indx];
423 1.8 cgd s = splimp();
424 1.27 thorpej #ifdef MALLOCLOG
425 1.27 thorpej domlog(addr, 0, type, 2, file, line);
426 1.27 thorpej #endif
427 1.1 cgd #ifdef DIAGNOSTIC
428 1.8 cgd /*
429 1.8 cgd * Check for returns of data that do not point to the
430 1.8 cgd * beginning of the allocation.
431 1.8 cgd */
432 1.1 cgd if (size > NBPG * CLSIZE)
433 1.1 cgd alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
434 1.1 cgd else
435 1.1 cgd alloc = addrmask[kup->ku_indx];
436 1.8 cgd if (((u_long)addr & alloc) != 0)
437 1.15 christos panic("free: unaligned addr %p, size %ld, type %s, mask %ld\n",
438 1.8 cgd addr, size, memname[type], alloc);
439 1.1 cgd #endif /* DIAGNOSTIC */
440 1.1 cgd if (size > MAXALLOCSAVE) {
441 1.28 mrg #if defined(UVM)
442 1.28 mrg uvm_km_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
443 1.28 mrg #else
444 1.1 cgd kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
445 1.28 mrg #endif
446 1.1 cgd #ifdef KMEMSTATS
447 1.1 cgd size = kup->ku_pagecnt << PGSHIFT;
448 1.1 cgd ksp->ks_memuse -= size;
449 1.1 cgd kup->ku_indx = 0;
450 1.1 cgd kup->ku_pagecnt = 0;
451 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
452 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
453 1.1 cgd wakeup((caddr_t)ksp);
454 1.1 cgd ksp->ks_inuse--;
455 1.1 cgd kbp->kb_total -= 1;
456 1.1 cgd #endif
457 1.1 cgd splx(s);
458 1.1 cgd return;
459 1.1 cgd }
460 1.8 cgd freep = (struct freelist *)addr;
461 1.8 cgd #ifdef DIAGNOSTIC
462 1.8 cgd /*
463 1.8 cgd * Check for multiple frees. Use a quick check to see if
464 1.8 cgd * it looks free before laboriously searching the freelist.
465 1.8 cgd */
466 1.8 cgd if (freep->spare0 == WEIRD_ADDR) {
467 1.16 cgd for (cp = kbp->kb_next; cp;
468 1.16 cgd cp = ((struct freelist *)cp)->next) {
469 1.8 cgd if (addr != cp)
470 1.8 cgd continue;
471 1.22 christos printf("multiply freed item %p\n", addr);
472 1.27 thorpej #ifdef MALLOCLOG
473 1.27 thorpej hitmlog(addr);
474 1.27 thorpej #endif
475 1.8 cgd panic("free: duplicated free");
476 1.8 cgd }
477 1.8 cgd }
478 1.8 cgd /*
479 1.8 cgd * Copy in known text to detect modification after freeing
480 1.8 cgd * and to make it look free. Also, save the type being freed
481 1.8 cgd * so we can list likely culprit if modification is detected
482 1.8 cgd * when the object is reallocated.
483 1.8 cgd */
484 1.8 cgd copysize = size < MAX_COPY ? size : MAX_COPY;
485 1.11 cgd end = (int32_t *)&((caddr_t)addr)[copysize];
486 1.11 cgd for (lp = (int32_t *)addr; lp < end; lp++)
487 1.8 cgd *lp = WEIRD_ADDR;
488 1.8 cgd freep->type = type;
489 1.8 cgd #endif /* DIAGNOSTIC */
490 1.1 cgd #ifdef KMEMSTATS
491 1.1 cgd kup->ku_freecnt++;
492 1.1 cgd if (kup->ku_freecnt >= kbp->kb_elmpercl)
493 1.1 cgd if (kup->ku_freecnt > kbp->kb_elmpercl)
494 1.1 cgd panic("free: multiple frees");
495 1.1 cgd else if (kbp->kb_totalfree > kbp->kb_highwat)
496 1.1 cgd kbp->kb_couldfree++;
497 1.1 cgd kbp->kb_totalfree++;
498 1.1 cgd ksp->ks_memuse -= size;
499 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
500 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
501 1.1 cgd wakeup((caddr_t)ksp);
502 1.1 cgd ksp->ks_inuse--;
503 1.1 cgd #endif
504 1.8 cgd if (kbp->kb_next == NULL)
505 1.8 cgd kbp->kb_next = addr;
506 1.8 cgd else
507 1.8 cgd ((struct freelist *)kbp->kb_last)->next = addr;
508 1.8 cgd freep->next = NULL;
509 1.8 cgd kbp->kb_last = addr;
510 1.1 cgd splx(s);
511 1.20 cgd }
512 1.20 cgd
513 1.20 cgd /*
514 1.20 cgd * Change the size of a block of memory.
515 1.20 cgd */
516 1.20 cgd void *
517 1.20 cgd realloc(curaddr, newsize, type, flags)
518 1.20 cgd void *curaddr;
519 1.20 cgd unsigned long newsize;
520 1.20 cgd int type, flags;
521 1.20 cgd {
522 1.20 cgd register struct kmemusage *kup;
523 1.20 cgd long cursize;
524 1.20 cgd void *newaddr;
525 1.20 cgd #ifdef DIAGNOSTIC
526 1.20 cgd long alloc;
527 1.20 cgd #endif
528 1.20 cgd
529 1.20 cgd /*
530 1.20 cgd * Realloc() with a NULL pointer is the same as malloc().
531 1.20 cgd */
532 1.20 cgd if (curaddr == NULL)
533 1.20 cgd return (malloc(newsize, type, flags));
534 1.20 cgd
535 1.20 cgd /*
536 1.20 cgd * Realloc() with zero size is the same as free().
537 1.20 cgd */
538 1.20 cgd if (newsize == 0) {
539 1.20 cgd free(curaddr, type);
540 1.20 cgd return (NULL);
541 1.20 cgd }
542 1.20 cgd
543 1.20 cgd /*
544 1.20 cgd * Find out how large the old allocation was (and do some
545 1.20 cgd * sanity checking).
546 1.20 cgd */
547 1.20 cgd kup = btokup(curaddr);
548 1.20 cgd cursize = 1 << kup->ku_indx;
549 1.20 cgd
550 1.20 cgd #ifdef DIAGNOSTIC
551 1.20 cgd /*
552 1.20 cgd * Check for returns of data that do not point to the
553 1.20 cgd * beginning of the allocation.
554 1.20 cgd */
555 1.20 cgd if (cursize > NBPG * CLSIZE)
556 1.20 cgd alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
557 1.20 cgd else
558 1.20 cgd alloc = addrmask[kup->ku_indx];
559 1.20 cgd if (((u_long)curaddr & alloc) != 0)
560 1.20 cgd panic("realloc: unaligned addr %p, size %ld, type %s, mask %ld\n",
561 1.20 cgd curaddr, cursize, memname[type], alloc);
562 1.20 cgd #endif /* DIAGNOSTIC */
563 1.20 cgd
564 1.20 cgd if (cursize > MAXALLOCSAVE)
565 1.20 cgd cursize = ctob(kup->ku_pagecnt);
566 1.20 cgd
567 1.20 cgd /*
568 1.20 cgd * If we already actually have as much as they want, we're done.
569 1.20 cgd */
570 1.20 cgd if (newsize <= cursize)
571 1.20 cgd return (curaddr);
572 1.20 cgd
573 1.20 cgd /*
574 1.20 cgd * Can't satisfy the allocation with the existing block.
575 1.20 cgd * Allocate a new one and copy the data.
576 1.20 cgd */
577 1.20 cgd newaddr = malloc(newsize, type, flags);
578 1.20 cgd if (newaddr == NULL) {
579 1.20 cgd /*
580 1.20 cgd * Malloc() failed, because flags included M_NOWAIT.
581 1.20 cgd * Return NULL to indicate that failure. The old
582 1.20 cgd * pointer is still valid.
583 1.20 cgd */
584 1.20 cgd return NULL;
585 1.20 cgd }
586 1.20 cgd bcopy(curaddr, newaddr, cursize);
587 1.20 cgd
588 1.20 cgd /*
589 1.20 cgd * We were successful: free the old allocation and return
590 1.20 cgd * the new one.
591 1.20 cgd */
592 1.20 cgd free(curaddr, type);
593 1.20 cgd return (newaddr);
594 1.1 cgd }
595 1.1 cgd
596 1.1 cgd /*
597 1.1 cgd * Initialize the kernel memory allocator
598 1.1 cgd */
599 1.12 christos void
600 1.1 cgd kmeminit()
601 1.1 cgd {
602 1.23 tls #ifdef KMEMSTATS
603 1.1 cgd register long indx;
604 1.23 tls #endif
605 1.1 cgd int npg;
606 1.1 cgd
607 1.1 cgd #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
608 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
609 1.1 cgd #endif
610 1.1 cgd #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
611 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_big
612 1.1 cgd #endif
613 1.1 cgd #if (MAXALLOCSAVE < CLBYTES)
614 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_small
615 1.1 cgd #endif
616 1.11 cgd
617 1.11 cgd if (sizeof(struct freelist) > (1 << MINBUCKET))
618 1.11 cgd panic("minbucket too small/struct freelist too big");
619 1.11 cgd
620 1.1 cgd npg = VM_KMEM_SIZE/ NBPG;
621 1.28 mrg #if defined(UVM)
622 1.28 mrg kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
623 1.28 mrg (vm_size_t)(npg * sizeof(struct kmemusage)));
624 1.28 mrg kmem_map = uvm_km_suballoc(kernel_map, (vm_offset_t *)&kmembase,
625 1.28 mrg (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG),
626 1.30 thorpej FALSE, FALSE, &kmem_map_store);
627 1.28 mrg #else
628 1.1 cgd kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
629 1.1 cgd (vm_size_t)(npg * sizeof(struct kmemusage)));
630 1.1 cgd kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
631 1.1 cgd (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
632 1.28 mrg #endif
633 1.1 cgd #ifdef KMEMSTATS
634 1.1 cgd for (indx = 0; indx < MINBUCKET + 16; indx++) {
635 1.1 cgd if (1 << indx >= CLBYTES)
636 1.1 cgd bucket[indx].kb_elmpercl = 1;
637 1.1 cgd else
638 1.1 cgd bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
639 1.1 cgd bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
640 1.1 cgd }
641 1.8 cgd for (indx = 0; indx < M_LAST; indx++)
642 1.1 cgd kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
643 1.1 cgd #endif
644 1.1 cgd }
645