kern_malloc.c revision 1.6 1 1.1 cgd /*
2 1.1 cgd * Copyright (c) 1987, 1991 The Regents of the University of California.
3 1.1 cgd * All rights reserved.
4 1.1 cgd *
5 1.1 cgd * Redistribution and use in source and binary forms, with or without
6 1.1 cgd * modification, are permitted provided that the following conditions
7 1.1 cgd * are met:
8 1.1 cgd * 1. Redistributions of source code must retain the above copyright
9 1.1 cgd * notice, this list of conditions and the following disclaimer.
10 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
11 1.1 cgd * notice, this list of conditions and the following disclaimer in the
12 1.1 cgd * documentation and/or other materials provided with the distribution.
13 1.1 cgd * 3. All advertising materials mentioning features or use of this software
14 1.1 cgd * must display the following acknowledgement:
15 1.1 cgd * This product includes software developed by the University of
16 1.1 cgd * California, Berkeley and its contributors.
17 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
18 1.1 cgd * may be used to endorse or promote products derived from this software
19 1.1 cgd * without specific prior written permission.
20 1.1 cgd *
21 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 1.1 cgd * SUCH DAMAGE.
32 1.1 cgd *
33 1.2 cgd * from: @(#)kern_malloc.c 7.25 (Berkeley) 5/8/91
34 1.6 cgd * $Id: kern_malloc.c,v 1.6 1993/07/15 13:33:23 cgd Exp $
35 1.1 cgd */
36 1.1 cgd
37 1.1 cgd #include "param.h"
38 1.5 andrew #include "systm.h"
39 1.1 cgd #include "proc.h"
40 1.1 cgd #include "kernel.h"
41 1.1 cgd #include "malloc.h"
42 1.1 cgd #include "vm/vm.h"
43 1.1 cgd #include "vm/vm_kern.h"
44 1.1 cgd
45 1.1 cgd struct kmembuckets bucket[MINBUCKET + 16];
46 1.3 cgd struct kmemstats kmemstats[M_LAST + 1];
47 1.1 cgd struct kmemusage *kmemusage;
48 1.1 cgd char *kmembase, *kmemlimit;
49 1.1 cgd char *memname[] = INITKMEMNAMES;
50 1.1 cgd
51 1.1 cgd /*
52 1.1 cgd * Allocate a block of memory
53 1.1 cgd */
54 1.1 cgd void *
55 1.1 cgd malloc(size, type, flags)
56 1.1 cgd unsigned long size;
57 1.1 cgd int type, flags;
58 1.1 cgd {
59 1.1 cgd register struct kmembuckets *kbp;
60 1.1 cgd register struct kmemusage *kup;
61 1.5 andrew long indx, npg, allocsize;
62 1.1 cgd int s;
63 1.1 cgd caddr_t va, cp, savedlist;
64 1.1 cgd #ifdef KMEMSTATS
65 1.1 cgd register struct kmemstats *ksp = &kmemstats[type];
66 1.1 cgd
67 1.1 cgd if (((unsigned long)type) > M_LAST)
68 1.1 cgd panic("malloc - bogus type");
69 1.1 cgd #endif
70 1.1 cgd
71 1.1 cgd indx = BUCKETINDX(size);
72 1.1 cgd kbp = &bucket[indx];
73 1.1 cgd s = splimp();
74 1.1 cgd #ifdef KMEMSTATS
75 1.1 cgd while (ksp->ks_memuse >= ksp->ks_limit) {
76 1.1 cgd if (flags & M_NOWAIT) {
77 1.1 cgd splx(s);
78 1.1 cgd return ((void *) NULL);
79 1.1 cgd }
80 1.1 cgd if (ksp->ks_limblocks < 65535)
81 1.1 cgd ksp->ks_limblocks++;
82 1.1 cgd tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
83 1.1 cgd }
84 1.1 cgd #endif
85 1.1 cgd if (kbp->kb_next == NULL) {
86 1.1 cgd if (size > MAXALLOCSAVE)
87 1.1 cgd allocsize = roundup(size, CLBYTES);
88 1.1 cgd else
89 1.1 cgd allocsize = 1 << indx;
90 1.1 cgd npg = clrnd(btoc(allocsize));
91 1.1 cgd va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
92 1.1 cgd !(flags & M_NOWAIT));
93 1.1 cgd if (va == NULL) {
94 1.6 cgd splx(s);
95 1.6 cgd return ((void *) NULL);
96 1.1 cgd }
97 1.1 cgd #ifdef KMEMSTATS
98 1.1 cgd kbp->kb_total += kbp->kb_elmpercl;
99 1.1 cgd #endif
100 1.1 cgd kup = btokup(va);
101 1.1 cgd kup->ku_indx = indx;
102 1.1 cgd if (allocsize > MAXALLOCSAVE) {
103 1.1 cgd if (npg > 65535)
104 1.1 cgd panic("malloc: allocation too large");
105 1.1 cgd kup->ku_pagecnt = npg;
106 1.1 cgd #ifdef KMEMSTATS
107 1.1 cgd ksp->ks_memuse += allocsize;
108 1.1 cgd #endif
109 1.1 cgd goto out;
110 1.1 cgd }
111 1.1 cgd #ifdef KMEMSTATS
112 1.1 cgd kup->ku_freecnt = kbp->kb_elmpercl;
113 1.1 cgd kbp->kb_totalfree += kbp->kb_elmpercl;
114 1.1 cgd #endif
115 1.1 cgd /*
116 1.1 cgd * Just in case we blocked while allocating memory,
117 1.1 cgd * and someone else also allocated memory for this
118 1.1 cgd * bucket, don't assume the list is still empty.
119 1.1 cgd */
120 1.1 cgd savedlist = kbp->kb_next;
121 1.1 cgd kbp->kb_next = va + (npg * NBPG) - allocsize;
122 1.1 cgd for (cp = kbp->kb_next; cp > va; cp -= allocsize)
123 1.1 cgd *(caddr_t *)cp = cp - allocsize;
124 1.1 cgd *(caddr_t *)cp = savedlist;
125 1.1 cgd }
126 1.1 cgd va = kbp->kb_next;
127 1.1 cgd kbp->kb_next = *(caddr_t *)va;
128 1.1 cgd #ifdef KMEMSTATS
129 1.1 cgd kup = btokup(va);
130 1.1 cgd if (kup->ku_indx != indx)
131 1.1 cgd panic("malloc: wrong bucket");
132 1.1 cgd if (kup->ku_freecnt == 0)
133 1.1 cgd panic("malloc: lost data");
134 1.1 cgd kup->ku_freecnt--;
135 1.1 cgd kbp->kb_totalfree--;
136 1.1 cgd ksp->ks_memuse += 1 << indx;
137 1.1 cgd out:
138 1.1 cgd kbp->kb_calls++;
139 1.1 cgd ksp->ks_inuse++;
140 1.1 cgd ksp->ks_calls++;
141 1.1 cgd if (ksp->ks_memuse > ksp->ks_maxused)
142 1.1 cgd ksp->ks_maxused = ksp->ks_memuse;
143 1.1 cgd #else
144 1.1 cgd out:
145 1.1 cgd #endif
146 1.1 cgd splx(s);
147 1.1 cgd return ((void *) va);
148 1.1 cgd }
149 1.1 cgd
150 1.1 cgd #ifdef DIAGNOSTIC
151 1.1 cgd long addrmask[] = { 0x00000000,
152 1.1 cgd 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
153 1.1 cgd 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
154 1.1 cgd 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
155 1.1 cgd 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
156 1.1 cgd };
157 1.1 cgd #endif /* DIAGNOSTIC */
158 1.1 cgd
159 1.1 cgd /*
160 1.1 cgd * Free a block of memory allocated by malloc.
161 1.1 cgd */
162 1.1 cgd void
163 1.1 cgd free(addr, type)
164 1.1 cgd void *addr;
165 1.1 cgd int type;
166 1.1 cgd {
167 1.1 cgd register struct kmembuckets *kbp;
168 1.1 cgd register struct kmemusage *kup;
169 1.5 andrew #ifdef DIAGNOSTIC
170 1.5 andrew long alloc;
171 1.5 andrew #endif
172 1.5 andrew long size;
173 1.1 cgd int s;
174 1.1 cgd #ifdef KMEMSTATS
175 1.1 cgd register struct kmemstats *ksp = &kmemstats[type];
176 1.1 cgd #endif
177 1.1 cgd
178 1.1 cgd kup = btokup(addr);
179 1.1 cgd size = 1 << kup->ku_indx;
180 1.1 cgd #ifdef DIAGNOSTIC
181 1.1 cgd if (size > NBPG * CLSIZE)
182 1.1 cgd alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
183 1.1 cgd else
184 1.1 cgd alloc = addrmask[kup->ku_indx];
185 1.1 cgd if (((u_long)addr & alloc) != 0) {
186 1.1 cgd printf("free: unaligned addr 0x%x, size %d, type %d, mask %d\n",
187 1.1 cgd addr, size, type, alloc);
188 1.1 cgd panic("free: unaligned addr");
189 1.1 cgd }
190 1.1 cgd #endif /* DIAGNOSTIC */
191 1.1 cgd kbp = &bucket[kup->ku_indx];
192 1.1 cgd s = splimp();
193 1.1 cgd if (size > MAXALLOCSAVE) {
194 1.1 cgd kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
195 1.1 cgd #ifdef KMEMSTATS
196 1.1 cgd size = kup->ku_pagecnt << PGSHIFT;
197 1.1 cgd ksp->ks_memuse -= size;
198 1.1 cgd kup->ku_indx = 0;
199 1.1 cgd kup->ku_pagecnt = 0;
200 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
201 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
202 1.1 cgd wakeup((caddr_t)ksp);
203 1.1 cgd ksp->ks_inuse--;
204 1.1 cgd kbp->kb_total -= 1;
205 1.1 cgd #endif
206 1.1 cgd splx(s);
207 1.1 cgd return;
208 1.1 cgd }
209 1.1 cgd #ifdef KMEMSTATS
210 1.1 cgd kup->ku_freecnt++;
211 1.1 cgd if (kup->ku_freecnt >= kbp->kb_elmpercl)
212 1.1 cgd if (kup->ku_freecnt > kbp->kb_elmpercl)
213 1.1 cgd panic("free: multiple frees");
214 1.1 cgd else if (kbp->kb_totalfree > kbp->kb_highwat)
215 1.1 cgd kbp->kb_couldfree++;
216 1.1 cgd kbp->kb_totalfree++;
217 1.1 cgd ksp->ks_memuse -= size;
218 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
219 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
220 1.1 cgd wakeup((caddr_t)ksp);
221 1.1 cgd ksp->ks_inuse--;
222 1.1 cgd #endif
223 1.1 cgd *(caddr_t *)addr = kbp->kb_next;
224 1.1 cgd kbp->kb_next = addr;
225 1.1 cgd splx(s);
226 1.1 cgd }
227 1.1 cgd
228 1.1 cgd /*
229 1.1 cgd * Initialize the kernel memory allocator
230 1.1 cgd */
231 1.5 andrew void
232 1.1 cgd kmeminit()
233 1.1 cgd {
234 1.1 cgd register long indx;
235 1.1 cgd int npg;
236 1.1 cgd
237 1.1 cgd #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
238 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
239 1.1 cgd #endif
240 1.1 cgd #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
241 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_big
242 1.1 cgd #endif
243 1.1 cgd #if (MAXALLOCSAVE < CLBYTES)
244 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_small
245 1.1 cgd #endif
246 1.1 cgd npg = VM_KMEM_SIZE/ NBPG;
247 1.1 cgd kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
248 1.1 cgd (vm_size_t)(npg * sizeof(struct kmemusage)));
249 1.1 cgd kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
250 1.1 cgd (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
251 1.1 cgd #ifdef KMEMSTATS
252 1.1 cgd for (indx = 0; indx < MINBUCKET + 16; indx++) {
253 1.1 cgd if (1 << indx >= CLBYTES)
254 1.1 cgd bucket[indx].kb_elmpercl = 1;
255 1.1 cgd else
256 1.1 cgd bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
257 1.1 cgd bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
258 1.1 cgd }
259 1.3 cgd for (indx = 0; indx <= M_LAST; indx++)
260 1.1 cgd kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
261 1.1 cgd #endif
262 1.1 cgd }
263