kern_malloc.c revision 1.2 1 /*
2 * Copyright (c) 1987, 1991 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * from: @(#)kern_malloc.c 7.25 (Berkeley) 5/8/91
34 * $Id: kern_malloc.c,v 1.2 1993/05/20 02:54:29 cgd Exp $
35 */
36
37 #include "param.h"
38 #include "proc.h"
39 #include "kernel.h"
40 #include "malloc.h"
41 #include "vm/vm.h"
42 #include "vm/vm_kern.h"
43
44 struct kmembuckets bucket[MINBUCKET + 16];
45 struct kmemstats kmemstats[M_LAST];
46 struct kmemusage *kmemusage;
47 char *kmembase, *kmemlimit;
48 char *memname[] = INITKMEMNAMES;
49
50 /*
51 * Allocate a block of memory
52 */
53 void *
54 malloc(size, type, flags)
55 unsigned long size;
56 int type, flags;
57 {
58 register struct kmembuckets *kbp;
59 register struct kmemusage *kup;
60 long indx, npg, alloc, allocsize;
61 int s;
62 caddr_t va, cp, savedlist;
63 #ifdef KMEMSTATS
64 register struct kmemstats *ksp = &kmemstats[type];
65
66 if (((unsigned long)type) > M_LAST)
67 panic("malloc - bogus type");
68 #endif
69
70 indx = BUCKETINDX(size);
71 kbp = &bucket[indx];
72 s = splimp();
73 #ifdef KMEMSTATS
74 while (ksp->ks_memuse >= ksp->ks_limit) {
75 if (flags & M_NOWAIT) {
76 splx(s);
77 return ((void *) NULL);
78 }
79 if (ksp->ks_limblocks < 65535)
80 ksp->ks_limblocks++;
81 tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
82 }
83 #endif
84 if (kbp->kb_next == NULL) {
85 if (size > MAXALLOCSAVE)
86 allocsize = roundup(size, CLBYTES);
87 else
88 allocsize = 1 << indx;
89 npg = clrnd(btoc(allocsize));
90 va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
91 !(flags & M_NOWAIT));
92 if (va == NULL) {
93 splx(s);
94 return ((void *) NULL);
95 }
96 #ifdef KMEMSTATS
97 kbp->kb_total += kbp->kb_elmpercl;
98 #endif
99 kup = btokup(va);
100 kup->ku_indx = indx;
101 if (allocsize > MAXALLOCSAVE) {
102 if (npg > 65535)
103 panic("malloc: allocation too large");
104 kup->ku_pagecnt = npg;
105 #ifdef KMEMSTATS
106 ksp->ks_memuse += allocsize;
107 #endif
108 goto out;
109 }
110 #ifdef KMEMSTATS
111 kup->ku_freecnt = kbp->kb_elmpercl;
112 kbp->kb_totalfree += kbp->kb_elmpercl;
113 #endif
114 /*
115 * Just in case we blocked while allocating memory,
116 * and someone else also allocated memory for this
117 * bucket, don't assume the list is still empty.
118 */
119 savedlist = kbp->kb_next;
120 kbp->kb_next = va + (npg * NBPG) - allocsize;
121 for (cp = kbp->kb_next; cp > va; cp -= allocsize)
122 *(caddr_t *)cp = cp - allocsize;
123 *(caddr_t *)cp = savedlist;
124 }
125 va = kbp->kb_next;
126 kbp->kb_next = *(caddr_t *)va;
127 #ifdef KMEMSTATS
128 kup = btokup(va);
129 if (kup->ku_indx != indx)
130 panic("malloc: wrong bucket");
131 if (kup->ku_freecnt == 0)
132 panic("malloc: lost data");
133 kup->ku_freecnt--;
134 kbp->kb_totalfree--;
135 ksp->ks_memuse += 1 << indx;
136 out:
137 kbp->kb_calls++;
138 ksp->ks_inuse++;
139 ksp->ks_calls++;
140 if (ksp->ks_memuse > ksp->ks_maxused)
141 ksp->ks_maxused = ksp->ks_memuse;
142 #else
143 out:
144 #endif
145 splx(s);
146 return ((void *) va);
147 }
148
149 #ifdef DIAGNOSTIC
150 long addrmask[] = { 0x00000000,
151 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
152 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
153 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
154 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
155 };
156 #endif /* DIAGNOSTIC */
157
158 /*
159 * Free a block of memory allocated by malloc.
160 */
161 void
162 free(addr, type)
163 void *addr;
164 int type;
165 {
166 register struct kmembuckets *kbp;
167 register struct kmemusage *kup;
168 long alloc, size;
169 int s;
170 #ifdef KMEMSTATS
171 register struct kmemstats *ksp = &kmemstats[type];
172 #endif
173
174 kup = btokup(addr);
175 size = 1 << kup->ku_indx;
176 #ifdef DIAGNOSTIC
177 if (size > NBPG * CLSIZE)
178 alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
179 else
180 alloc = addrmask[kup->ku_indx];
181 if (((u_long)addr & alloc) != 0) {
182 printf("free: unaligned addr 0x%x, size %d, type %d, mask %d\n",
183 addr, size, type, alloc);
184 panic("free: unaligned addr");
185 }
186 #endif /* DIAGNOSTIC */
187 kbp = &bucket[kup->ku_indx];
188 s = splimp();
189 if (size > MAXALLOCSAVE) {
190 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
191 #ifdef KMEMSTATS
192 size = kup->ku_pagecnt << PGSHIFT;
193 ksp->ks_memuse -= size;
194 kup->ku_indx = 0;
195 kup->ku_pagecnt = 0;
196 if (ksp->ks_memuse + size >= ksp->ks_limit &&
197 ksp->ks_memuse < ksp->ks_limit)
198 wakeup((caddr_t)ksp);
199 ksp->ks_inuse--;
200 kbp->kb_total -= 1;
201 #endif
202 splx(s);
203 return;
204 }
205 #ifdef KMEMSTATS
206 kup->ku_freecnt++;
207 if (kup->ku_freecnt >= kbp->kb_elmpercl)
208 if (kup->ku_freecnt > kbp->kb_elmpercl)
209 panic("free: multiple frees");
210 else if (kbp->kb_totalfree > kbp->kb_highwat)
211 kbp->kb_couldfree++;
212 kbp->kb_totalfree++;
213 ksp->ks_memuse -= size;
214 if (ksp->ks_memuse + size >= ksp->ks_limit &&
215 ksp->ks_memuse < ksp->ks_limit)
216 wakeup((caddr_t)ksp);
217 ksp->ks_inuse--;
218 #endif
219 *(caddr_t *)addr = kbp->kb_next;
220 kbp->kb_next = addr;
221 splx(s);
222 }
223
224 /*
225 * Initialize the kernel memory allocator
226 */
227 kmeminit()
228 {
229 register long indx;
230 int npg;
231
232 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
233 ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
234 #endif
235 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
236 ERROR!_kmeminit:_MAXALLOCSAVE_too_big
237 #endif
238 #if (MAXALLOCSAVE < CLBYTES)
239 ERROR!_kmeminit:_MAXALLOCSAVE_too_small
240 #endif
241 npg = VM_KMEM_SIZE/ NBPG;
242 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
243 (vm_size_t)(npg * sizeof(struct kmemusage)));
244 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
245 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
246 #ifdef KMEMSTATS
247 for (indx = 0; indx < MINBUCKET + 16; indx++) {
248 if (1 << indx >= CLBYTES)
249 bucket[indx].kb_elmpercl = 1;
250 else
251 bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
252 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
253 }
254 for (indx = 0; indx < M_LAST; indx++)
255 kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
256 #endif
257 }
258