kern_malloc.c revision 1.26 1 1.26 mycroft /* $NetBSD: kern_malloc.c,v 1.26 1997/10/09 13:05:59 mycroft Exp $ */
2 1.9 cgd
3 1.1 cgd /*
4 1.20 cgd * Copyright 1996 Christopher G. Demetriou. All rights reserved.
5 1.8 cgd * Copyright (c) 1987, 1991, 1993
6 1.8 cgd * The Regents of the University of California. All rights reserved.
7 1.1 cgd *
8 1.1 cgd * Redistribution and use in source and binary forms, with or without
9 1.1 cgd * modification, are permitted provided that the following conditions
10 1.1 cgd * are met:
11 1.1 cgd * 1. Redistributions of source code must retain the above copyright
12 1.1 cgd * notice, this list of conditions and the following disclaimer.
13 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 cgd * notice, this list of conditions and the following disclaimer in the
15 1.1 cgd * documentation and/or other materials provided with the distribution.
16 1.1 cgd * 3. All advertising materials mentioning features or use of this software
17 1.1 cgd * must display the following acknowledgement:
18 1.1 cgd * This product includes software developed by the University of
19 1.1 cgd * California, Berkeley and its contributors.
20 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
21 1.1 cgd * may be used to endorse or promote products derived from this software
22 1.1 cgd * without specific prior written permission.
23 1.1 cgd *
24 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 1.1 cgd * SUCH DAMAGE.
35 1.1 cgd *
36 1.9 cgd * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
37 1.1 cgd */
38 1.1 cgd
39 1.7 mycroft #include <sys/param.h>
40 1.7 mycroft #include <sys/proc.h>
41 1.8 cgd #include <sys/map.h>
42 1.7 mycroft #include <sys/kernel.h>
43 1.7 mycroft #include <sys/malloc.h>
44 1.12 christos #include <sys/systm.h>
45 1.7 mycroft
46 1.7 mycroft #include <vm/vm.h>
47 1.7 mycroft #include <vm/vm_kern.h>
48 1.24 thorpej
49 1.24 thorpej #include "opt_kmemstats.h"
50 1.12 christos
51 1.1 cgd struct kmembuckets bucket[MINBUCKET + 16];
52 1.8 cgd struct kmemstats kmemstats[M_LAST];
53 1.1 cgd struct kmemusage *kmemusage;
54 1.1 cgd char *kmembase, *kmemlimit;
55 1.25 mycroft const char *memname[] = INITKMEMNAMES;
56 1.1 cgd
57 1.8 cgd #ifdef DIAGNOSTIC
58 1.8 cgd /*
59 1.8 cgd * This structure provides a set of masks to catch unaligned frees.
60 1.8 cgd */
61 1.8 cgd long addrmask[] = { 0,
62 1.8 cgd 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
63 1.8 cgd 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
64 1.8 cgd 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
65 1.8 cgd 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
66 1.8 cgd };
67 1.8 cgd
68 1.8 cgd /*
69 1.8 cgd * The WEIRD_ADDR is used as known text to copy into free objects so
70 1.8 cgd * that modifications after frees can be detected.
71 1.8 cgd */
72 1.12 christos #define WEIRD_ADDR ((unsigned) 0xdeadbeef)
73 1.8 cgd #define MAX_COPY 32
74 1.8 cgd
75 1.8 cgd /*
76 1.11 cgd * Normally the freelist structure is used only to hold the list pointer
77 1.11 cgd * for free objects. However, when running with diagnostics, the first
78 1.11 cgd * 8 bytes of the structure is unused except for diagnostic information,
79 1.11 cgd * and the free list pointer is at offst 8 in the structure. Since the
80 1.11 cgd * first 8 bytes is the portion of the structure most often modified, this
81 1.11 cgd * helps to detect memory reuse problems and avoid free list corruption.
82 1.8 cgd */
83 1.8 cgd struct freelist {
84 1.11 cgd int32_t spare0;
85 1.11 cgd int16_t type;
86 1.11 cgd int16_t spare1;
87 1.8 cgd caddr_t next;
88 1.8 cgd };
89 1.8 cgd #else /* !DIAGNOSTIC */
90 1.8 cgd struct freelist {
91 1.8 cgd caddr_t next;
92 1.8 cgd };
93 1.8 cgd #endif /* DIAGNOSTIC */
94 1.8 cgd
95 1.1 cgd /*
96 1.1 cgd * Allocate a block of memory
97 1.1 cgd */
98 1.1 cgd void *
99 1.1 cgd malloc(size, type, flags)
100 1.1 cgd unsigned long size;
101 1.1 cgd int type, flags;
102 1.1 cgd {
103 1.1 cgd register struct kmembuckets *kbp;
104 1.1 cgd register struct kmemusage *kup;
105 1.8 cgd register struct freelist *freep;
106 1.5 andrew long indx, npg, allocsize;
107 1.1 cgd int s;
108 1.1 cgd caddr_t va, cp, savedlist;
109 1.8 cgd #ifdef DIAGNOSTIC
110 1.11 cgd int32_t *end, *lp;
111 1.8 cgd int copysize;
112 1.26 mycroft const char *savedtype;
113 1.8 cgd #endif
114 1.1 cgd #ifdef KMEMSTATS
115 1.1 cgd register struct kmemstats *ksp = &kmemstats[type];
116 1.1 cgd
117 1.1 cgd if (((unsigned long)type) > M_LAST)
118 1.1 cgd panic("malloc - bogus type");
119 1.1 cgd #endif
120 1.1 cgd indx = BUCKETINDX(size);
121 1.1 cgd kbp = &bucket[indx];
122 1.1 cgd s = splimp();
123 1.1 cgd #ifdef KMEMSTATS
124 1.1 cgd while (ksp->ks_memuse >= ksp->ks_limit) {
125 1.1 cgd if (flags & M_NOWAIT) {
126 1.1 cgd splx(s);
127 1.1 cgd return ((void *) NULL);
128 1.1 cgd }
129 1.1 cgd if (ksp->ks_limblocks < 65535)
130 1.1 cgd ksp->ks_limblocks++;
131 1.1 cgd tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
132 1.1 cgd }
133 1.8 cgd ksp->ks_size |= 1 << indx;
134 1.8 cgd #endif
135 1.8 cgd #ifdef DIAGNOSTIC
136 1.8 cgd copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
137 1.1 cgd #endif
138 1.1 cgd if (kbp->kb_next == NULL) {
139 1.8 cgd kbp->kb_last = NULL;
140 1.1 cgd if (size > MAXALLOCSAVE)
141 1.1 cgd allocsize = roundup(size, CLBYTES);
142 1.1 cgd else
143 1.1 cgd allocsize = 1 << indx;
144 1.1 cgd npg = clrnd(btoc(allocsize));
145 1.1 cgd va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
146 1.1 cgd !(flags & M_NOWAIT));
147 1.1 cgd if (va == NULL) {
148 1.17 cgd /*
149 1.17 cgd * Kmem_malloc() can return NULL, even if it can
150 1.17 cgd * wait, if there is no map space avaiable, because
151 1.17 cgd * it can't fix that problem. Neither can we,
152 1.17 cgd * right now. (We should release pages which
153 1.17 cgd * are completely free and which are in buckets
154 1.17 cgd * with too many free elements.)
155 1.17 cgd */
156 1.17 cgd if ((flags & M_NOWAIT) == 0)
157 1.17 cgd panic("malloc: out of space in kmem_map");
158 1.6 cgd splx(s);
159 1.6 cgd return ((void *) NULL);
160 1.1 cgd }
161 1.1 cgd #ifdef KMEMSTATS
162 1.1 cgd kbp->kb_total += kbp->kb_elmpercl;
163 1.1 cgd #endif
164 1.1 cgd kup = btokup(va);
165 1.1 cgd kup->ku_indx = indx;
166 1.1 cgd if (allocsize > MAXALLOCSAVE) {
167 1.1 cgd if (npg > 65535)
168 1.1 cgd panic("malloc: allocation too large");
169 1.1 cgd kup->ku_pagecnt = npg;
170 1.1 cgd #ifdef KMEMSTATS
171 1.1 cgd ksp->ks_memuse += allocsize;
172 1.1 cgd #endif
173 1.1 cgd goto out;
174 1.1 cgd }
175 1.1 cgd #ifdef KMEMSTATS
176 1.1 cgd kup->ku_freecnt = kbp->kb_elmpercl;
177 1.1 cgd kbp->kb_totalfree += kbp->kb_elmpercl;
178 1.1 cgd #endif
179 1.1 cgd /*
180 1.1 cgd * Just in case we blocked while allocating memory,
181 1.1 cgd * and someone else also allocated memory for this
182 1.1 cgd * bucket, don't assume the list is still empty.
183 1.1 cgd */
184 1.1 cgd savedlist = kbp->kb_next;
185 1.8 cgd kbp->kb_next = cp = va + (npg * NBPG) - allocsize;
186 1.8 cgd for (;;) {
187 1.8 cgd freep = (struct freelist *)cp;
188 1.8 cgd #ifdef DIAGNOSTIC
189 1.8 cgd /*
190 1.8 cgd * Copy in known text to detect modification
191 1.8 cgd * after freeing.
192 1.8 cgd */
193 1.11 cgd end = (int32_t *)&cp[copysize];
194 1.11 cgd for (lp = (int32_t *)cp; lp < end; lp++)
195 1.8 cgd *lp = WEIRD_ADDR;
196 1.8 cgd freep->type = M_FREE;
197 1.8 cgd #endif /* DIAGNOSTIC */
198 1.8 cgd if (cp <= va)
199 1.8 cgd break;
200 1.8 cgd cp -= allocsize;
201 1.8 cgd freep->next = cp;
202 1.8 cgd }
203 1.8 cgd freep->next = savedlist;
204 1.8 cgd if (kbp->kb_last == NULL)
205 1.8 cgd kbp->kb_last = (caddr_t)freep;
206 1.1 cgd }
207 1.1 cgd va = kbp->kb_next;
208 1.8 cgd kbp->kb_next = ((struct freelist *)va)->next;
209 1.8 cgd #ifdef DIAGNOSTIC
210 1.8 cgd freep = (struct freelist *)va;
211 1.8 cgd savedtype = (unsigned)freep->type < M_LAST ?
212 1.8 cgd memname[freep->type] : "???";
213 1.8 cgd if (kbp->kb_next &&
214 1.8 cgd !kernacc(kbp->kb_next, sizeof(struct freelist), 0)) {
215 1.22 christos printf(
216 1.21 christos "%s %ld of object %p size %ld %s %s (invalid addr %p)\n",
217 1.21 christos "Data modified on freelist: word",
218 1.21 christos (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
219 1.21 christos va, size, "previous type", savedtype, kbp->kb_next);
220 1.8 cgd kbp->kb_next = NULL;
221 1.8 cgd }
222 1.11 cgd
223 1.11 cgd /* Fill the fields that we've used with WEIRD_ADDR */
224 1.8 cgd #if BYTE_ORDER == BIG_ENDIAN
225 1.8 cgd freep->type = WEIRD_ADDR >> 16;
226 1.8 cgd #endif
227 1.8 cgd #if BYTE_ORDER == LITTLE_ENDIAN
228 1.8 cgd freep->type = (short)WEIRD_ADDR;
229 1.8 cgd #endif
230 1.11 cgd end = (int32_t *)&freep->next +
231 1.11 cgd (sizeof(freep->next) / sizeof(int32_t));
232 1.11 cgd for (lp = (int32_t *)&freep->next; lp < end; lp++)
233 1.11 cgd *lp = WEIRD_ADDR;
234 1.11 cgd
235 1.11 cgd /* and check that the data hasn't been modified. */
236 1.11 cgd end = (int32_t *)&va[copysize];
237 1.11 cgd for (lp = (int32_t *)va; lp < end; lp++) {
238 1.8 cgd if (*lp == WEIRD_ADDR)
239 1.8 cgd continue;
240 1.22 christos printf("%s %ld of object %p size %ld %s %s (0x%x != 0x%x)\n",
241 1.21 christos "Data modified on freelist: word",
242 1.21 christos (long)(lp - (int32_t *)va), va, size, "previous type",
243 1.21 christos savedtype, *lp, WEIRD_ADDR);
244 1.8 cgd break;
245 1.8 cgd }
246 1.11 cgd
247 1.8 cgd freep->spare0 = 0;
248 1.8 cgd #endif /* DIAGNOSTIC */
249 1.1 cgd #ifdef KMEMSTATS
250 1.1 cgd kup = btokup(va);
251 1.1 cgd if (kup->ku_indx != indx)
252 1.1 cgd panic("malloc: wrong bucket");
253 1.1 cgd if (kup->ku_freecnt == 0)
254 1.1 cgd panic("malloc: lost data");
255 1.1 cgd kup->ku_freecnt--;
256 1.1 cgd kbp->kb_totalfree--;
257 1.1 cgd ksp->ks_memuse += 1 << indx;
258 1.1 cgd out:
259 1.1 cgd kbp->kb_calls++;
260 1.1 cgd ksp->ks_inuse++;
261 1.1 cgd ksp->ks_calls++;
262 1.1 cgd if (ksp->ks_memuse > ksp->ks_maxused)
263 1.1 cgd ksp->ks_maxused = ksp->ks_memuse;
264 1.1 cgd #else
265 1.1 cgd out:
266 1.1 cgd #endif
267 1.1 cgd splx(s);
268 1.1 cgd return ((void *) va);
269 1.1 cgd }
270 1.1 cgd
271 1.1 cgd /*
272 1.1 cgd * Free a block of memory allocated by malloc.
273 1.1 cgd */
274 1.1 cgd void
275 1.1 cgd free(addr, type)
276 1.1 cgd void *addr;
277 1.1 cgd int type;
278 1.1 cgd {
279 1.1 cgd register struct kmembuckets *kbp;
280 1.1 cgd register struct kmemusage *kup;
281 1.8 cgd register struct freelist *freep;
282 1.8 cgd long size;
283 1.8 cgd int s;
284 1.5 andrew #ifdef DIAGNOSTIC
285 1.8 cgd caddr_t cp;
286 1.11 cgd int32_t *end, *lp;
287 1.11 cgd long alloc, copysize;
288 1.5 andrew #endif
289 1.1 cgd #ifdef KMEMSTATS
290 1.1 cgd register struct kmemstats *ksp = &kmemstats[type];
291 1.1 cgd #endif
292 1.1 cgd
293 1.1 cgd kup = btokup(addr);
294 1.1 cgd size = 1 << kup->ku_indx;
295 1.8 cgd kbp = &bucket[kup->ku_indx];
296 1.8 cgd s = splimp();
297 1.1 cgd #ifdef DIAGNOSTIC
298 1.8 cgd /*
299 1.8 cgd * Check for returns of data that do not point to the
300 1.8 cgd * beginning of the allocation.
301 1.8 cgd */
302 1.1 cgd if (size > NBPG * CLSIZE)
303 1.1 cgd alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
304 1.1 cgd else
305 1.1 cgd alloc = addrmask[kup->ku_indx];
306 1.8 cgd if (((u_long)addr & alloc) != 0)
307 1.15 christos panic("free: unaligned addr %p, size %ld, type %s, mask %ld\n",
308 1.8 cgd addr, size, memname[type], alloc);
309 1.1 cgd #endif /* DIAGNOSTIC */
310 1.1 cgd if (size > MAXALLOCSAVE) {
311 1.1 cgd kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
312 1.1 cgd #ifdef KMEMSTATS
313 1.1 cgd size = kup->ku_pagecnt << PGSHIFT;
314 1.1 cgd ksp->ks_memuse -= size;
315 1.1 cgd kup->ku_indx = 0;
316 1.1 cgd kup->ku_pagecnt = 0;
317 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
318 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
319 1.1 cgd wakeup((caddr_t)ksp);
320 1.1 cgd ksp->ks_inuse--;
321 1.1 cgd kbp->kb_total -= 1;
322 1.1 cgd #endif
323 1.1 cgd splx(s);
324 1.1 cgd return;
325 1.1 cgd }
326 1.8 cgd freep = (struct freelist *)addr;
327 1.8 cgd #ifdef DIAGNOSTIC
328 1.8 cgd /*
329 1.8 cgd * Check for multiple frees. Use a quick check to see if
330 1.8 cgd * it looks free before laboriously searching the freelist.
331 1.8 cgd */
332 1.8 cgd if (freep->spare0 == WEIRD_ADDR) {
333 1.16 cgd for (cp = kbp->kb_next; cp;
334 1.16 cgd cp = ((struct freelist *)cp)->next) {
335 1.8 cgd if (addr != cp)
336 1.8 cgd continue;
337 1.22 christos printf("multiply freed item %p\n", addr);
338 1.8 cgd panic("free: duplicated free");
339 1.8 cgd }
340 1.8 cgd }
341 1.8 cgd /*
342 1.8 cgd * Copy in known text to detect modification after freeing
343 1.8 cgd * and to make it look free. Also, save the type being freed
344 1.8 cgd * so we can list likely culprit if modification is detected
345 1.8 cgd * when the object is reallocated.
346 1.8 cgd */
347 1.8 cgd copysize = size < MAX_COPY ? size : MAX_COPY;
348 1.11 cgd end = (int32_t *)&((caddr_t)addr)[copysize];
349 1.11 cgd for (lp = (int32_t *)addr; lp < end; lp++)
350 1.8 cgd *lp = WEIRD_ADDR;
351 1.8 cgd freep->type = type;
352 1.8 cgd #endif /* DIAGNOSTIC */
353 1.1 cgd #ifdef KMEMSTATS
354 1.1 cgd kup->ku_freecnt++;
355 1.1 cgd if (kup->ku_freecnt >= kbp->kb_elmpercl)
356 1.1 cgd if (kup->ku_freecnt > kbp->kb_elmpercl)
357 1.1 cgd panic("free: multiple frees");
358 1.1 cgd else if (kbp->kb_totalfree > kbp->kb_highwat)
359 1.1 cgd kbp->kb_couldfree++;
360 1.1 cgd kbp->kb_totalfree++;
361 1.1 cgd ksp->ks_memuse -= size;
362 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
363 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
364 1.1 cgd wakeup((caddr_t)ksp);
365 1.1 cgd ksp->ks_inuse--;
366 1.1 cgd #endif
367 1.8 cgd if (kbp->kb_next == NULL)
368 1.8 cgd kbp->kb_next = addr;
369 1.8 cgd else
370 1.8 cgd ((struct freelist *)kbp->kb_last)->next = addr;
371 1.8 cgd freep->next = NULL;
372 1.8 cgd kbp->kb_last = addr;
373 1.1 cgd splx(s);
374 1.20 cgd }
375 1.20 cgd
376 1.20 cgd /*
377 1.20 cgd * Change the size of a block of memory.
378 1.20 cgd */
379 1.20 cgd void *
380 1.20 cgd realloc(curaddr, newsize, type, flags)
381 1.20 cgd void *curaddr;
382 1.20 cgd unsigned long newsize;
383 1.20 cgd int type, flags;
384 1.20 cgd {
385 1.20 cgd register struct kmemusage *kup;
386 1.20 cgd long cursize;
387 1.20 cgd void *newaddr;
388 1.20 cgd #ifdef DIAGNOSTIC
389 1.20 cgd long alloc;
390 1.20 cgd #endif
391 1.20 cgd
392 1.20 cgd /*
393 1.20 cgd * Realloc() with a NULL pointer is the same as malloc().
394 1.20 cgd */
395 1.20 cgd if (curaddr == NULL)
396 1.20 cgd return (malloc(newsize, type, flags));
397 1.20 cgd
398 1.20 cgd /*
399 1.20 cgd * Realloc() with zero size is the same as free().
400 1.20 cgd */
401 1.20 cgd if (newsize == 0) {
402 1.20 cgd free(curaddr, type);
403 1.20 cgd return (NULL);
404 1.20 cgd }
405 1.20 cgd
406 1.20 cgd /*
407 1.20 cgd * Find out how large the old allocation was (and do some
408 1.20 cgd * sanity checking).
409 1.20 cgd */
410 1.20 cgd kup = btokup(curaddr);
411 1.20 cgd cursize = 1 << kup->ku_indx;
412 1.20 cgd
413 1.20 cgd #ifdef DIAGNOSTIC
414 1.20 cgd /*
415 1.20 cgd * Check for returns of data that do not point to the
416 1.20 cgd * beginning of the allocation.
417 1.20 cgd */
418 1.20 cgd if (cursize > NBPG * CLSIZE)
419 1.20 cgd alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
420 1.20 cgd else
421 1.20 cgd alloc = addrmask[kup->ku_indx];
422 1.20 cgd if (((u_long)curaddr & alloc) != 0)
423 1.20 cgd panic("realloc: unaligned addr %p, size %ld, type %s, mask %ld\n",
424 1.20 cgd curaddr, cursize, memname[type], alloc);
425 1.20 cgd #endif /* DIAGNOSTIC */
426 1.20 cgd
427 1.20 cgd if (cursize > MAXALLOCSAVE)
428 1.20 cgd cursize = ctob(kup->ku_pagecnt);
429 1.20 cgd
430 1.20 cgd /*
431 1.20 cgd * If we already actually have as much as they want, we're done.
432 1.20 cgd */
433 1.20 cgd if (newsize <= cursize)
434 1.20 cgd return (curaddr);
435 1.20 cgd
436 1.20 cgd /*
437 1.20 cgd * Can't satisfy the allocation with the existing block.
438 1.20 cgd * Allocate a new one and copy the data.
439 1.20 cgd */
440 1.20 cgd newaddr = malloc(newsize, type, flags);
441 1.20 cgd if (newaddr == NULL) {
442 1.20 cgd /*
443 1.20 cgd * Malloc() failed, because flags included M_NOWAIT.
444 1.20 cgd * Return NULL to indicate that failure. The old
445 1.20 cgd * pointer is still valid.
446 1.20 cgd */
447 1.20 cgd return NULL;
448 1.20 cgd }
449 1.20 cgd bcopy(curaddr, newaddr, cursize);
450 1.20 cgd
451 1.20 cgd /*
452 1.20 cgd * We were successful: free the old allocation and return
453 1.20 cgd * the new one.
454 1.20 cgd */
455 1.20 cgd free(curaddr, type);
456 1.20 cgd return (newaddr);
457 1.1 cgd }
458 1.1 cgd
459 1.1 cgd /*
460 1.1 cgd * Initialize the kernel memory allocator
461 1.1 cgd */
462 1.12 christos void
463 1.1 cgd kmeminit()
464 1.1 cgd {
465 1.23 tls #ifdef KMEMSTATS
466 1.1 cgd register long indx;
467 1.23 tls #endif
468 1.1 cgd int npg;
469 1.1 cgd
470 1.1 cgd #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
471 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
472 1.1 cgd #endif
473 1.1 cgd #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
474 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_big
475 1.1 cgd #endif
476 1.1 cgd #if (MAXALLOCSAVE < CLBYTES)
477 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_small
478 1.1 cgd #endif
479 1.11 cgd
480 1.11 cgd if (sizeof(struct freelist) > (1 << MINBUCKET))
481 1.11 cgd panic("minbucket too small/struct freelist too big");
482 1.11 cgd
483 1.1 cgd npg = VM_KMEM_SIZE/ NBPG;
484 1.1 cgd kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
485 1.1 cgd (vm_size_t)(npg * sizeof(struct kmemusage)));
486 1.1 cgd kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
487 1.1 cgd (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
488 1.1 cgd #ifdef KMEMSTATS
489 1.1 cgd for (indx = 0; indx < MINBUCKET + 16; indx++) {
490 1.1 cgd if (1 << indx >= CLBYTES)
491 1.1 cgd bucket[indx].kb_elmpercl = 1;
492 1.1 cgd else
493 1.1 cgd bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
494 1.1 cgd bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
495 1.1 cgd }
496 1.8 cgd for (indx = 0; indx < M_LAST; indx++)
497 1.1 cgd kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
498 1.1 cgd #endif
499 1.1 cgd }
500