kern_malloc.c revision 1.1.1.3 1 /*
2 * Copyright (c) 1987, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95
34 */
35
36 #include <sys/param.h>
37 #include <sys/proc.h>
38 #include <sys/map.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41
42 #include <vm/vm.h>
43 #include <vm/vm_kern.h>
44
45 struct kmembuckets bucket[MINBUCKET + 16];
46 struct kmemstats kmemstats[M_LAST];
47 struct kmemusage *kmemusage;
48 char *kmembase, *kmemlimit;
49 char *memname[] = INITKMEMNAMES;
50
51 #ifdef DIAGNOSTIC
52 /*
53 * This structure provides a set of masks to catch unaligned frees.
54 */
55 long addrmask[] = { 0,
56 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
57 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
58 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
59 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
60 };
61
62 /*
63 * The WEIRD_ADDR is used as known text to copy into free objects so
64 * that modifications after frees can be detected.
65 */
66 #define WEIRD_ADDR 0xdeadbeef
67 #define MAX_COPY 32
68
69 /*
70 * Normally the first word of the structure is used to hold the list
71 * pointer for free objects. However, when running with diagnostics,
72 * we use the third and fourth fields, so as to catch modifications
73 * in the most commonly trashed first two words.
74 */
75 struct freelist {
76 long spare0;
77 short type;
78 long spare1;
79 caddr_t next;
80 };
81 #else /* !DIAGNOSTIC */
82 struct freelist {
83 caddr_t next;
84 };
85 #endif /* DIAGNOSTIC */
86
87 /*
88 * Allocate a block of memory
89 */
90 void *
91 malloc(size, type, flags)
92 unsigned long size;
93 int type, flags;
94 {
95 register struct kmembuckets *kbp;
96 register struct kmemusage *kup;
97 register struct freelist *freep;
98 long indx, npg, allocsize;
99 int s;
100 caddr_t va, cp, savedlist;
101 #ifdef DIAGNOSTIC
102 long *end, *lp;
103 int copysize;
104 char *savedtype;
105 #endif
106 #ifdef DEBUG
107 extern int simplelockrecurse;
108 #endif
109 #ifdef KMEMSTATS
110 register struct kmemstats *ksp = &kmemstats[type];
111
112 if (((unsigned long)type) > M_LAST)
113 panic("malloc - bogus type");
114 #endif
115 indx = BUCKETINDX(size);
116 kbp = &bucket[indx];
117 s = splimp();
118 #ifdef KMEMSTATS
119 while (ksp->ks_memuse >= ksp->ks_limit) {
120 if (flags & M_NOWAIT) {
121 splx(s);
122 return ((void *) NULL);
123 }
124 if (ksp->ks_limblocks < 65535)
125 ksp->ks_limblocks++;
126 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
127 }
128 ksp->ks_size |= 1 << indx;
129 #endif
130 #ifdef DIAGNOSTIC
131 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
132 #endif
133 #ifdef DEBUG
134 if (flags & M_NOWAIT)
135 simplelockrecurse++;
136 #endif
137 if (kbp->kb_next == NULL) {
138 kbp->kb_last = NULL;
139 if (size > MAXALLOCSAVE)
140 allocsize = roundup(size, CLBYTES);
141 else
142 allocsize = 1 << indx;
143 npg = clrnd(btoc(allocsize));
144 va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
145 !(flags & M_NOWAIT));
146 if (va == NULL) {
147 splx(s);
148 #ifdef DEBUG
149 if (flags & M_NOWAIT)
150 simplelockrecurse--;
151 #endif
152 return ((void *) NULL);
153 }
154 #ifdef KMEMSTATS
155 kbp->kb_total += kbp->kb_elmpercl;
156 #endif
157 kup = btokup(va);
158 kup->ku_indx = indx;
159 if (allocsize > MAXALLOCSAVE) {
160 if (npg > 65535)
161 panic("malloc: allocation too large");
162 kup->ku_pagecnt = npg;
163 #ifdef KMEMSTATS
164 ksp->ks_memuse += allocsize;
165 #endif
166 goto out;
167 }
168 #ifdef KMEMSTATS
169 kup->ku_freecnt = kbp->kb_elmpercl;
170 kbp->kb_totalfree += kbp->kb_elmpercl;
171 #endif
172 /*
173 * Just in case we blocked while allocating memory,
174 * and someone else also allocated memory for this
175 * bucket, don't assume the list is still empty.
176 */
177 savedlist = kbp->kb_next;
178 kbp->kb_next = cp = va + (npg * NBPG) - allocsize;
179 for (;;) {
180 freep = (struct freelist *)cp;
181 #ifdef DIAGNOSTIC
182 /*
183 * Copy in known text to detect modification
184 * after freeing.
185 */
186 end = (long *)&cp[copysize];
187 for (lp = (long *)cp; lp < end; lp++)
188 *lp = WEIRD_ADDR;
189 freep->type = M_FREE;
190 #endif /* DIAGNOSTIC */
191 if (cp <= va)
192 break;
193 cp -= allocsize;
194 freep->next = cp;
195 }
196 freep->next = savedlist;
197 if (kbp->kb_last == NULL)
198 kbp->kb_last = (caddr_t)freep;
199 }
200 va = kbp->kb_next;
201 kbp->kb_next = ((struct freelist *)va)->next;
202 #ifdef DIAGNOSTIC
203 freep = (struct freelist *)va;
204 savedtype = (unsigned)freep->type < M_LAST ?
205 memname[freep->type] : "???";
206 if (kbp->kb_next &&
207 !kernacc(kbp->kb_next, sizeof(struct freelist), 0)) {
208 printf("%s of object 0x%x size %d %s %s (invalid addr 0x%x)\n",
209 "Data modified on freelist: word 2.5", va, size,
210 "previous type", savedtype, kbp->kb_next);
211 kbp->kb_next = NULL;
212 }
213 #if BYTE_ORDER == BIG_ENDIAN
214 freep->type = WEIRD_ADDR >> 16;
215 #endif
216 #if BYTE_ORDER == LITTLE_ENDIAN
217 freep->type = (short)WEIRD_ADDR;
218 #endif
219 if (((long)(&freep->next)) & 0x2)
220 freep->next = (caddr_t)((WEIRD_ADDR >> 16)|(WEIRD_ADDR << 16));
221 else
222 freep->next = (caddr_t)WEIRD_ADDR;
223 end = (long *)&va[copysize];
224 for (lp = (long *)va; lp < end; lp++) {
225 if (*lp == WEIRD_ADDR)
226 continue;
227 printf("%s %d of object 0x%x size %d %s %s (0x%x != 0x%x)\n",
228 "Data modified on freelist: word", lp - (long *)va,
229 va, size, "previous type", savedtype, *lp, WEIRD_ADDR);
230 break;
231 }
232 freep->spare0 = 0;
233 #endif /* DIAGNOSTIC */
234 #ifdef KMEMSTATS
235 kup = btokup(va);
236 if (kup->ku_indx != indx)
237 panic("malloc: wrong bucket");
238 if (kup->ku_freecnt == 0)
239 panic("malloc: lost data");
240 kup->ku_freecnt--;
241 kbp->kb_totalfree--;
242 ksp->ks_memuse += 1 << indx;
243 out:
244 kbp->kb_calls++;
245 ksp->ks_inuse++;
246 ksp->ks_calls++;
247 if (ksp->ks_memuse > ksp->ks_maxused)
248 ksp->ks_maxused = ksp->ks_memuse;
249 #else
250 out:
251 #endif
252 splx(s);
253 #ifdef DEBUG
254 if (flags & M_NOWAIT)
255 simplelockrecurse--;
256 #endif
257 return ((void *) va);
258 }
259
260 /*
261 * Free a block of memory allocated by malloc.
262 */
263 void
264 free(addr, type)
265 void *addr;
266 int type;
267 {
268 register struct kmembuckets *kbp;
269 register struct kmemusage *kup;
270 register struct freelist *freep;
271 long size;
272 int s;
273 #ifdef DIAGNOSTIC
274 caddr_t cp;
275 long *end, *lp, alloc, copysize;
276 #endif
277 #ifdef KMEMSTATS
278 register struct kmemstats *ksp = &kmemstats[type];
279 #endif
280
281 kup = btokup(addr);
282 size = 1 << kup->ku_indx;
283 kbp = &bucket[kup->ku_indx];
284 s = splimp();
285 #ifdef DIAGNOSTIC
286 /*
287 * Check for returns of data that do not point to the
288 * beginning of the allocation.
289 */
290 if (size > NBPG * CLSIZE)
291 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
292 else
293 alloc = addrmask[kup->ku_indx];
294 if (((u_long)addr & alloc) != 0)
295 panic("free: unaligned addr 0x%x, size %d, type %s, mask %d\n",
296 addr, size, memname[type], alloc);
297 #endif /* DIAGNOSTIC */
298 if (size > MAXALLOCSAVE) {
299 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
300 #ifdef KMEMSTATS
301 size = kup->ku_pagecnt << PGSHIFT;
302 ksp->ks_memuse -= size;
303 kup->ku_indx = 0;
304 kup->ku_pagecnt = 0;
305 if (ksp->ks_memuse + size >= ksp->ks_limit &&
306 ksp->ks_memuse < ksp->ks_limit)
307 wakeup((caddr_t)ksp);
308 ksp->ks_inuse--;
309 kbp->kb_total -= 1;
310 #endif
311 splx(s);
312 return;
313 }
314 freep = (struct freelist *)addr;
315 #ifdef DIAGNOSTIC
316 /*
317 * Check for multiple frees. Use a quick check to see if
318 * it looks free before laboriously searching the freelist.
319 */
320 if (freep->spare0 == WEIRD_ADDR) {
321 for (cp = kbp->kb_next; cp; cp = *(caddr_t *)cp) {
322 if (addr != cp)
323 continue;
324 printf("multiply freed item 0x%x\n", addr);
325 panic("free: duplicated free");
326 }
327 }
328 /*
329 * Copy in known text to detect modification after freeing
330 * and to make it look free. Also, save the type being freed
331 * so we can list likely culprit if modification is detected
332 * when the object is reallocated.
333 */
334 copysize = size < MAX_COPY ? size : MAX_COPY;
335 end = (long *)&((caddr_t)addr)[copysize];
336 for (lp = (long *)addr; lp < end; lp++)
337 *lp = WEIRD_ADDR;
338 freep->type = type;
339 #endif /* DIAGNOSTIC */
340 #ifdef KMEMSTATS
341 kup->ku_freecnt++;
342 if (kup->ku_freecnt >= kbp->kb_elmpercl)
343 if (kup->ku_freecnt > kbp->kb_elmpercl)
344 panic("free: multiple frees");
345 else if (kbp->kb_totalfree > kbp->kb_highwat)
346 kbp->kb_couldfree++;
347 kbp->kb_totalfree++;
348 ksp->ks_memuse -= size;
349 if (ksp->ks_memuse + size >= ksp->ks_limit &&
350 ksp->ks_memuse < ksp->ks_limit)
351 wakeup((caddr_t)ksp);
352 ksp->ks_inuse--;
353 #endif
354 if (kbp->kb_next == NULL)
355 kbp->kb_next = addr;
356 else
357 ((struct freelist *)kbp->kb_last)->next = addr;
358 freep->next = NULL;
359 kbp->kb_last = addr;
360 splx(s);
361 }
362
363 /*
364 * Initialize the kernel memory allocator
365 */
366 kmeminit()
367 {
368 register long indx;
369 int npg;
370
371 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
372 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
373 #endif
374 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
375 ERROR!_kmeminit:_MAXALLOCSAVE_too_big
376 #endif
377 #if (MAXALLOCSAVE < CLBYTES)
378 ERROR!_kmeminit:_MAXALLOCSAVE_too_small
379 #endif
380 npg = VM_KMEM_SIZE/ NBPG;
381 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
382 (vm_size_t)(npg * sizeof(struct kmemusage)));
383 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
384 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
385 #ifdef KMEMSTATS
386 for (indx = 0; indx < MINBUCKET + 16; indx++) {
387 if (1 << indx >= CLBYTES)
388 bucket[indx].kb_elmpercl = 1;
389 else
390 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
391 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
392 }
393 for (indx = 0; indx < M_LAST; indx++)
394 kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
395 #endif
396 }
397