kern_malloc.c revision 1.1 1 1.1 cgd /*
2 1.1 cgd * Copyright (c) 1987, 1991 The Regents of the University of California.
3 1.1 cgd * All rights reserved.
4 1.1 cgd *
5 1.1 cgd * Redistribution and use in source and binary forms, with or without
6 1.1 cgd * modification, are permitted provided that the following conditions
7 1.1 cgd * are met:
8 1.1 cgd * 1. Redistributions of source code must retain the above copyright
9 1.1 cgd * notice, this list of conditions and the following disclaimer.
10 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
11 1.1 cgd * notice, this list of conditions and the following disclaimer in the
12 1.1 cgd * documentation and/or other materials provided with the distribution.
13 1.1 cgd * 3. All advertising materials mentioning features or use of this software
14 1.1 cgd * must display the following acknowledgement:
15 1.1 cgd * This product includes software developed by the University of
16 1.1 cgd * California, Berkeley and its contributors.
17 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
18 1.1 cgd * may be used to endorse or promote products derived from this software
19 1.1 cgd * without specific prior written permission.
20 1.1 cgd *
21 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 1.1 cgd * SUCH DAMAGE.
32 1.1 cgd *
33 1.1 cgd * @(#)kern_malloc.c 7.25 (Berkeley) 5/8/91
34 1.1 cgd */
35 1.1 cgd
36 1.1 cgd #include "param.h"
37 1.1 cgd #include "proc.h"
38 1.1 cgd #include "kernel.h"
39 1.1 cgd #include "malloc.h"
40 1.1 cgd #include "vm/vm.h"
41 1.1 cgd #include "vm/vm_kern.h"
42 1.1 cgd
43 1.1 cgd struct kmembuckets bucket[MINBUCKET + 16];
44 1.1 cgd struct kmemstats kmemstats[M_LAST];
45 1.1 cgd struct kmemusage *kmemusage;
46 1.1 cgd char *kmembase, *kmemlimit;
47 1.1 cgd char *memname[] = INITKMEMNAMES;
48 1.1 cgd
49 1.1 cgd /*
50 1.1 cgd * Allocate a block of memory
51 1.1 cgd */
52 1.1 cgd void *
53 1.1 cgd malloc(size, type, flags)
54 1.1 cgd unsigned long size;
55 1.1 cgd int type, flags;
56 1.1 cgd {
57 1.1 cgd register struct kmembuckets *kbp;
58 1.1 cgd register struct kmemusage *kup;
59 1.1 cgd long indx, npg, alloc, allocsize;
60 1.1 cgd int s;
61 1.1 cgd caddr_t va, cp, savedlist;
62 1.1 cgd #ifdef KMEMSTATS
63 1.1 cgd register struct kmemstats *ksp = &kmemstats[type];
64 1.1 cgd
65 1.1 cgd if (((unsigned long)type) > M_LAST)
66 1.1 cgd panic("malloc - bogus type");
67 1.1 cgd #endif
68 1.1 cgd
69 1.1 cgd indx = BUCKETINDX(size);
70 1.1 cgd kbp = &bucket[indx];
71 1.1 cgd s = splimp();
72 1.1 cgd #ifdef KMEMSTATS
73 1.1 cgd while (ksp->ks_memuse >= ksp->ks_limit) {
74 1.1 cgd if (flags & M_NOWAIT) {
75 1.1 cgd splx(s);
76 1.1 cgd return ((void *) NULL);
77 1.1 cgd }
78 1.1 cgd if (ksp->ks_limblocks < 65535)
79 1.1 cgd ksp->ks_limblocks++;
80 1.1 cgd tsleep((caddr_t)ksp, PSWP+2, memname[type], 0);
81 1.1 cgd }
82 1.1 cgd #endif
83 1.1 cgd if (kbp->kb_next == NULL) {
84 1.1 cgd if (size > MAXALLOCSAVE)
85 1.1 cgd allocsize = roundup(size, CLBYTES);
86 1.1 cgd else
87 1.1 cgd allocsize = 1 << indx;
88 1.1 cgd npg = clrnd(btoc(allocsize));
89 1.1 cgd va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg),
90 1.1 cgd !(flags & M_NOWAIT));
91 1.1 cgd if (va == NULL) {
92 1.1 cgd splx(s);
93 1.1 cgd return ((void *) NULL);
94 1.1 cgd }
95 1.1 cgd #ifdef KMEMSTATS
96 1.1 cgd kbp->kb_total += kbp->kb_elmpercl;
97 1.1 cgd #endif
98 1.1 cgd kup = btokup(va);
99 1.1 cgd kup->ku_indx = indx;
100 1.1 cgd if (allocsize > MAXALLOCSAVE) {
101 1.1 cgd if (npg > 65535)
102 1.1 cgd panic("malloc: allocation too large");
103 1.1 cgd kup->ku_pagecnt = npg;
104 1.1 cgd #ifdef KMEMSTATS
105 1.1 cgd ksp->ks_memuse += allocsize;
106 1.1 cgd #endif
107 1.1 cgd goto out;
108 1.1 cgd }
109 1.1 cgd #ifdef KMEMSTATS
110 1.1 cgd kup->ku_freecnt = kbp->kb_elmpercl;
111 1.1 cgd kbp->kb_totalfree += kbp->kb_elmpercl;
112 1.1 cgd #endif
113 1.1 cgd /*
114 1.1 cgd * Just in case we blocked while allocating memory,
115 1.1 cgd * and someone else also allocated memory for this
116 1.1 cgd * bucket, don't assume the list is still empty.
117 1.1 cgd */
118 1.1 cgd savedlist = kbp->kb_next;
119 1.1 cgd kbp->kb_next = va + (npg * NBPG) - allocsize;
120 1.1 cgd for (cp = kbp->kb_next; cp > va; cp -= allocsize)
121 1.1 cgd *(caddr_t *)cp = cp - allocsize;
122 1.1 cgd *(caddr_t *)cp = savedlist;
123 1.1 cgd }
124 1.1 cgd va = kbp->kb_next;
125 1.1 cgd kbp->kb_next = *(caddr_t *)va;
126 1.1 cgd #ifdef KMEMSTATS
127 1.1 cgd kup = btokup(va);
128 1.1 cgd if (kup->ku_indx != indx)
129 1.1 cgd panic("malloc: wrong bucket");
130 1.1 cgd if (kup->ku_freecnt == 0)
131 1.1 cgd panic("malloc: lost data");
132 1.1 cgd kup->ku_freecnt--;
133 1.1 cgd kbp->kb_totalfree--;
134 1.1 cgd ksp->ks_memuse += 1 << indx;
135 1.1 cgd out:
136 1.1 cgd kbp->kb_calls++;
137 1.1 cgd ksp->ks_inuse++;
138 1.1 cgd ksp->ks_calls++;
139 1.1 cgd if (ksp->ks_memuse > ksp->ks_maxused)
140 1.1 cgd ksp->ks_maxused = ksp->ks_memuse;
141 1.1 cgd #else
142 1.1 cgd out:
143 1.1 cgd #endif
144 1.1 cgd splx(s);
145 1.1 cgd return ((void *) va);
146 1.1 cgd }
147 1.1 cgd
148 1.1 cgd #ifdef DIAGNOSTIC
149 1.1 cgd long addrmask[] = { 0x00000000,
150 1.1 cgd 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
151 1.1 cgd 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
152 1.1 cgd 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
153 1.1 cgd 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
154 1.1 cgd };
155 1.1 cgd #endif /* DIAGNOSTIC */
156 1.1 cgd
157 1.1 cgd /*
158 1.1 cgd * Free a block of memory allocated by malloc.
159 1.1 cgd */
160 1.1 cgd void
161 1.1 cgd free(addr, type)
162 1.1 cgd void *addr;
163 1.1 cgd int type;
164 1.1 cgd {
165 1.1 cgd register struct kmembuckets *kbp;
166 1.1 cgd register struct kmemusage *kup;
167 1.1 cgd long alloc, size;
168 1.1 cgd int s;
169 1.1 cgd #ifdef KMEMSTATS
170 1.1 cgd register struct kmemstats *ksp = &kmemstats[type];
171 1.1 cgd #endif
172 1.1 cgd
173 1.1 cgd kup = btokup(addr);
174 1.1 cgd size = 1 << kup->ku_indx;
175 1.1 cgd #ifdef DIAGNOSTIC
176 1.1 cgd if (size > NBPG * CLSIZE)
177 1.1 cgd alloc = addrmask[BUCKETINDX(NBPG * CLSIZE)];
178 1.1 cgd else
179 1.1 cgd alloc = addrmask[kup->ku_indx];
180 1.1 cgd if (((u_long)addr & alloc) != 0) {
181 1.1 cgd printf("free: unaligned addr 0x%x, size %d, type %d, mask %d\n",
182 1.1 cgd addr, size, type, alloc);
183 1.1 cgd panic("free: unaligned addr");
184 1.1 cgd }
185 1.1 cgd #endif /* DIAGNOSTIC */
186 1.1 cgd kbp = &bucket[kup->ku_indx];
187 1.1 cgd s = splimp();
188 1.1 cgd if (size > MAXALLOCSAVE) {
189 1.1 cgd kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
190 1.1 cgd #ifdef KMEMSTATS
191 1.1 cgd size = kup->ku_pagecnt << PGSHIFT;
192 1.1 cgd ksp->ks_memuse -= size;
193 1.1 cgd kup->ku_indx = 0;
194 1.1 cgd kup->ku_pagecnt = 0;
195 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
196 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
197 1.1 cgd wakeup((caddr_t)ksp);
198 1.1 cgd ksp->ks_inuse--;
199 1.1 cgd kbp->kb_total -= 1;
200 1.1 cgd #endif
201 1.1 cgd splx(s);
202 1.1 cgd return;
203 1.1 cgd }
204 1.1 cgd #ifdef KMEMSTATS
205 1.1 cgd kup->ku_freecnt++;
206 1.1 cgd if (kup->ku_freecnt >= kbp->kb_elmpercl)
207 1.1 cgd if (kup->ku_freecnt > kbp->kb_elmpercl)
208 1.1 cgd panic("free: multiple frees");
209 1.1 cgd else if (kbp->kb_totalfree > kbp->kb_highwat)
210 1.1 cgd kbp->kb_couldfree++;
211 1.1 cgd kbp->kb_totalfree++;
212 1.1 cgd ksp->ks_memuse -= size;
213 1.1 cgd if (ksp->ks_memuse + size >= ksp->ks_limit &&
214 1.1 cgd ksp->ks_memuse < ksp->ks_limit)
215 1.1 cgd wakeup((caddr_t)ksp);
216 1.1 cgd ksp->ks_inuse--;
217 1.1 cgd #endif
218 1.1 cgd *(caddr_t *)addr = kbp->kb_next;
219 1.1 cgd kbp->kb_next = addr;
220 1.1 cgd splx(s);
221 1.1 cgd }
222 1.1 cgd
223 1.1 cgd /*
224 1.1 cgd * Initialize the kernel memory allocator
225 1.1 cgd */
226 1.1 cgd kmeminit()
227 1.1 cgd {
228 1.1 cgd register long indx;
229 1.1 cgd int npg;
230 1.1 cgd
231 1.1 cgd #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
232 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
233 1.1 cgd #endif
234 1.1 cgd #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
235 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_big
236 1.1 cgd #endif
237 1.1 cgd #if (MAXALLOCSAVE < CLBYTES)
238 1.1 cgd ERROR!_kmeminit:_MAXALLOCSAVE_too_small
239 1.1 cgd #endif
240 1.1 cgd npg = VM_KMEM_SIZE/ NBPG;
241 1.1 cgd kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
242 1.1 cgd (vm_size_t)(npg * sizeof(struct kmemusage)));
243 1.1 cgd kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
244 1.1 cgd (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * NBPG), FALSE);
245 1.1 cgd #ifdef KMEMSTATS
246 1.1 cgd for (indx = 0; indx < MINBUCKET + 16; indx++) {
247 1.1 cgd if (1 << indx >= CLBYTES)
248 1.1 cgd bucket[indx].kb_elmpercl = 1;
249 1.1 cgd else
250 1.1 cgd bucket[indx].kb_elmpercl = CLBYTES / (1 << indx);
251 1.1 cgd bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
252 1.1 cgd }
253 1.1 cgd for (indx = 0; indx < M_LAST; indx++)
254 1.1 cgd kmemstats[indx].ks_limit = npg * NBPG * 6 / 10;
255 1.1 cgd #endif
256 1.1 cgd }
257