malloc.c revision 1.6 1 1.6 jtc /* $NetBSD: malloc.c,v 1.6 1996/01/17 02:45:25 jtc Exp $ */
2 1.5 thorpej
3 1.1 cgd /*
4 1.1 cgd * Copyright (c) 1983 Regents of the University of California.
5 1.1 cgd * All rights reserved.
6 1.1 cgd *
7 1.1 cgd * Redistribution and use in source and binary forms, with or without
8 1.1 cgd * modification, are permitted provided that the following conditions
9 1.1 cgd * are met:
10 1.1 cgd * 1. Redistributions of source code must retain the above copyright
11 1.1 cgd * notice, this list of conditions and the following disclaimer.
12 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 cgd * notice, this list of conditions and the following disclaimer in the
14 1.1 cgd * documentation and/or other materials provided with the distribution.
15 1.1 cgd * 3. All advertising materials mentioning features or use of this software
16 1.1 cgd * must display the following acknowledgement:
17 1.1 cgd * This product includes software developed by the University of
18 1.1 cgd * California, Berkeley and its contributors.
19 1.1 cgd * 4. Neither the name of the University nor the names of its contributors
20 1.1 cgd * may be used to endorse or promote products derived from this software
21 1.1 cgd * without specific prior written permission.
22 1.1 cgd *
23 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 1.1 cgd * SUCH DAMAGE.
34 1.1 cgd */
35 1.1 cgd
36 1.1 cgd #if defined(LIBC_SCCS) && !defined(lint)
37 1.5 thorpej #if 0
38 1.5 thorpej static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";
39 1.5 thorpej #else
40 1.6 jtc static char *rcsid = "$NetBSD: malloc.c,v 1.6 1996/01/17 02:45:25 jtc Exp $";
41 1.5 thorpej #endif
42 1.1 cgd #endif /* LIBC_SCCS and not lint */
43 1.1 cgd
44 1.1 cgd /*
45 1.1 cgd * malloc.c (Caltech) 2/21/82
46 1.1 cgd * Chris Kingsley, kingsley@cit-20.
47 1.1 cgd *
48 1.1 cgd * This is a very fast storage allocator. It allocates blocks of a small
49 1.1 cgd * number of different sizes, and keeps free lists of each size. Blocks that
50 1.1 cgd * don't exactly fit are passed up to the next larger size. In this
51 1.1 cgd * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long.
52 1.1 cgd * This is designed for use in a virtual memory environment.
53 1.1 cgd */
54 1.1 cgd
55 1.1 cgd #include <sys/types.h>
56 1.1 cgd #include <stdlib.h>
57 1.1 cgd #include <string.h>
58 1.1 cgd #include <unistd.h>
59 1.1 cgd
60 1.1 cgd #define NULL 0
61 1.1 cgd
62 1.1 cgd static void morecore();
63 1.1 cgd static int findbucket();
64 1.1 cgd
65 1.1 cgd /*
66 1.1 cgd * The overhead on a block is at least 4 bytes. When free, this space
67 1.1 cgd * contains a pointer to the next free block, and the bottom two bits must
68 1.1 cgd * be zero. When in use, the first byte is set to MAGIC, and the second
69 1.1 cgd * byte is the size index. The remaining bytes are for alignment.
70 1.1 cgd * If range checking is enabled then a second word holds the size of the
71 1.1 cgd * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC).
72 1.1 cgd * The order of elements is critical: ov_magic must overlay the low order
73 1.1 cgd * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
74 1.1 cgd */
75 1.1 cgd union overhead {
76 1.1 cgd union overhead *ov_next; /* when free */
77 1.1 cgd struct {
78 1.1 cgd u_char ovu_magic; /* magic number */
79 1.1 cgd u_char ovu_index; /* bucket # */
80 1.1 cgd #ifdef RCHECK
81 1.1 cgd u_short ovu_rmagic; /* range magic number */
82 1.4 cgd u_long ovu_size; /* actual block size */
83 1.1 cgd #endif
84 1.1 cgd } ovu;
85 1.1 cgd #define ov_magic ovu.ovu_magic
86 1.1 cgd #define ov_index ovu.ovu_index
87 1.1 cgd #define ov_rmagic ovu.ovu_rmagic
88 1.1 cgd #define ov_size ovu.ovu_size
89 1.1 cgd };
90 1.1 cgd
91 1.1 cgd #define MAGIC 0xef /* magic # on accounting info */
92 1.1 cgd #define RMAGIC 0x5555 /* magic # on range info */
93 1.1 cgd
94 1.1 cgd #ifdef RCHECK
95 1.1 cgd #define RSLOP sizeof (u_short)
96 1.1 cgd #else
97 1.1 cgd #define RSLOP 0
98 1.1 cgd #endif
99 1.1 cgd
100 1.1 cgd /*
101 1.1 cgd * nextf[i] is the pointer to the next free block of size 2^(i+3). The
102 1.1 cgd * smallest allocatable block is 8 bytes. The overhead information
103 1.1 cgd * precedes the data area returned to the user.
104 1.1 cgd */
105 1.1 cgd #define NBUCKETS 30
106 1.1 cgd static union overhead *nextf[NBUCKETS];
107 1.1 cgd extern char *sbrk();
108 1.1 cgd
109 1.1 cgd static int pagesz; /* page size */
110 1.1 cgd static int pagebucket; /* page size bucket */
111 1.1 cgd
112 1.1 cgd #ifdef MSTATS
113 1.1 cgd /*
114 1.1 cgd * nmalloc[i] is the difference between the number of mallocs and frees
115 1.1 cgd * for a given block size.
116 1.1 cgd */
117 1.1 cgd static u_int nmalloc[NBUCKETS];
118 1.1 cgd #include <stdio.h>
119 1.1 cgd #endif
120 1.1 cgd
121 1.1 cgd #if defined(DEBUG) || defined(RCHECK)
122 1.1 cgd #define ASSERT(p) if (!(p)) botch("p")
123 1.1 cgd #include <stdio.h>
124 1.1 cgd static
125 1.1 cgd botch(s)
126 1.1 cgd char *s;
127 1.1 cgd {
128 1.1 cgd fprintf(stderr, "\r\nassertion botched: %s\r\n", s);
129 1.1 cgd (void) fflush(stderr); /* just in case user buffered it */
130 1.1 cgd abort();
131 1.1 cgd }
132 1.1 cgd #else
133 1.1 cgd #define ASSERT(p)
134 1.1 cgd #endif
135 1.1 cgd
136 1.1 cgd void *
137 1.1 cgd malloc(nbytes)
138 1.1 cgd size_t nbytes;
139 1.1 cgd {
140 1.1 cgd register union overhead *op;
141 1.4 cgd register long bucket, n;
142 1.1 cgd register unsigned amt;
143 1.1 cgd
144 1.1 cgd /*
145 1.1 cgd * First time malloc is called, setup page size and
146 1.1 cgd * align break pointer so all data will be page aligned.
147 1.1 cgd */
148 1.1 cgd if (pagesz == 0) {
149 1.1 cgd pagesz = n = getpagesize();
150 1.1 cgd op = (union overhead *)sbrk(0);
151 1.4 cgd n = n - sizeof (*op) - ((long)op & (n - 1));
152 1.1 cgd if (n < 0)
153 1.1 cgd n += pagesz;
154 1.1 cgd if (n) {
155 1.1 cgd if (sbrk(n) == (char *)-1)
156 1.1 cgd return (NULL);
157 1.1 cgd }
158 1.1 cgd bucket = 0;
159 1.1 cgd amt = 8;
160 1.1 cgd while (pagesz > amt) {
161 1.1 cgd amt <<= 1;
162 1.1 cgd bucket++;
163 1.1 cgd }
164 1.1 cgd pagebucket = bucket;
165 1.1 cgd }
166 1.1 cgd /*
167 1.1 cgd * Convert amount of memory requested into closest block size
168 1.1 cgd * stored in hash buckets which satisfies request.
169 1.1 cgd * Account for space used per block for accounting.
170 1.1 cgd */
171 1.1 cgd if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) {
172 1.1 cgd #ifndef RCHECK
173 1.1 cgd amt = 8; /* size of first bucket */
174 1.1 cgd bucket = 0;
175 1.1 cgd #else
176 1.1 cgd amt = 16; /* size of first bucket */
177 1.1 cgd bucket = 1;
178 1.1 cgd #endif
179 1.4 cgd n = -((long)sizeof (*op) + RSLOP);
180 1.1 cgd } else {
181 1.1 cgd amt = pagesz;
182 1.1 cgd bucket = pagebucket;
183 1.1 cgd }
184 1.1 cgd while (nbytes > amt + n) {
185 1.1 cgd amt <<= 1;
186 1.1 cgd if (amt == 0)
187 1.1 cgd return (NULL);
188 1.1 cgd bucket++;
189 1.1 cgd }
190 1.1 cgd /*
191 1.1 cgd * If nothing in hash bucket right now,
192 1.1 cgd * request more memory from the system.
193 1.1 cgd */
194 1.1 cgd if ((op = nextf[bucket]) == NULL) {
195 1.1 cgd morecore(bucket);
196 1.1 cgd if ((op = nextf[bucket]) == NULL)
197 1.1 cgd return (NULL);
198 1.1 cgd }
199 1.1 cgd /* remove from linked list */
200 1.1 cgd nextf[bucket] = op->ov_next;
201 1.1 cgd op->ov_magic = MAGIC;
202 1.1 cgd op->ov_index = bucket;
203 1.1 cgd #ifdef MSTATS
204 1.1 cgd nmalloc[bucket]++;
205 1.1 cgd #endif
206 1.1 cgd #ifdef RCHECK
207 1.1 cgd /*
208 1.1 cgd * Record allocated size of block and
209 1.1 cgd * bound space with magic numbers.
210 1.1 cgd */
211 1.1 cgd op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
212 1.1 cgd op->ov_rmagic = RMAGIC;
213 1.1 cgd *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
214 1.1 cgd #endif
215 1.1 cgd return ((char *)(op + 1));
216 1.1 cgd }
217 1.1 cgd
218 1.1 cgd /*
219 1.1 cgd * Allocate more memory to the indicated bucket.
220 1.1 cgd */
221 1.1 cgd static void
222 1.1 cgd morecore(bucket)
223 1.1 cgd int bucket;
224 1.1 cgd {
225 1.1 cgd register union overhead *op;
226 1.4 cgd register long sz; /* size of desired block */
227 1.4 cgd long amt; /* amount to allocate */
228 1.1 cgd int nblks; /* how many blocks we get */
229 1.1 cgd
230 1.1 cgd /*
231 1.1 cgd * sbrk_size <= 0 only for big, FLUFFY, requests (about
232 1.1 cgd * 2^30 bytes on a VAX, I think) or for a negative arg.
233 1.1 cgd */
234 1.1 cgd sz = 1 << (bucket + 3);
235 1.1 cgd #ifdef DEBUG
236 1.1 cgd ASSERT(sz > 0);
237 1.1 cgd #else
238 1.1 cgd if (sz <= 0)
239 1.1 cgd return;
240 1.1 cgd #endif
241 1.1 cgd if (sz < pagesz) {
242 1.1 cgd amt = pagesz;
243 1.1 cgd nblks = amt / sz;
244 1.1 cgd } else {
245 1.1 cgd amt = sz + pagesz;
246 1.1 cgd nblks = 1;
247 1.1 cgd }
248 1.1 cgd op = (union overhead *)sbrk(amt);
249 1.1 cgd /* no more room! */
250 1.4 cgd if ((long)op == -1)
251 1.1 cgd return;
252 1.1 cgd /*
253 1.1 cgd * Add new memory allocated to that on
254 1.1 cgd * free list for this hash bucket.
255 1.1 cgd */
256 1.1 cgd nextf[bucket] = op;
257 1.1 cgd while (--nblks > 0) {
258 1.1 cgd op->ov_next = (union overhead *)((caddr_t)op + sz);
259 1.1 cgd op = (union overhead *)((caddr_t)op + sz);
260 1.1 cgd }
261 1.1 cgd }
262 1.1 cgd
263 1.1 cgd void
264 1.1 cgd free(cp)
265 1.1 cgd void *cp;
266 1.1 cgd {
267 1.4 cgd register long size;
268 1.1 cgd register union overhead *op;
269 1.1 cgd
270 1.1 cgd if (cp == NULL)
271 1.1 cgd return;
272 1.1 cgd op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
273 1.1 cgd #ifdef DEBUG
274 1.1 cgd ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */
275 1.1 cgd #else
276 1.1 cgd if (op->ov_magic != MAGIC)
277 1.1 cgd return; /* sanity */
278 1.1 cgd #endif
279 1.1 cgd #ifdef RCHECK
280 1.1 cgd ASSERT(op->ov_rmagic == RMAGIC);
281 1.1 cgd ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC);
282 1.1 cgd #endif
283 1.1 cgd size = op->ov_index;
284 1.1 cgd ASSERT(size < NBUCKETS);
285 1.1 cgd op->ov_next = nextf[size]; /* also clobbers ov_magic */
286 1.1 cgd nextf[size] = op;
287 1.1 cgd #ifdef MSTATS
288 1.1 cgd nmalloc[size]--;
289 1.1 cgd #endif
290 1.1 cgd }
291 1.1 cgd
292 1.1 cgd /*
293 1.1 cgd * When a program attempts "storage compaction" as mentioned in the
294 1.1 cgd * old malloc man page, it realloc's an already freed block. Usually
295 1.1 cgd * this is the last block it freed; occasionally it might be farther
296 1.1 cgd * back. We have to search all the free lists for the block in order
297 1.1 cgd * to determine its bucket: 1st we make one pass thru the lists
298 1.1 cgd * checking only the first block in each; if that fails we search
299 1.1 cgd * ``realloc_srchlen'' blocks in each list for a match (the variable
300 1.1 cgd * is extern so the caller can modify it). If that fails we just copy
301 1.1 cgd * however many bytes was given to realloc() and hope it's not huge.
302 1.1 cgd */
303 1.1 cgd int realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */
304 1.1 cgd
305 1.1 cgd void *
306 1.1 cgd realloc(cp, nbytes)
307 1.1 cgd void *cp;
308 1.1 cgd size_t nbytes;
309 1.1 cgd {
310 1.4 cgd register u_long onb;
311 1.4 cgd register long i;
312 1.1 cgd union overhead *op;
313 1.1 cgd char *res;
314 1.1 cgd int was_alloced = 0;
315 1.1 cgd
316 1.1 cgd if (cp == NULL)
317 1.1 cgd return (malloc(nbytes));
318 1.6 jtc if (nbytes == 0) {
319 1.6 jtc free (cp);
320 1.6 jtc return NULL;
321 1.6 jtc }
322 1.1 cgd op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
323 1.1 cgd if (op->ov_magic == MAGIC) {
324 1.1 cgd was_alloced++;
325 1.1 cgd i = op->ov_index;
326 1.1 cgd } else {
327 1.1 cgd /*
328 1.1 cgd * Already free, doing "compaction".
329 1.1 cgd *
330 1.1 cgd * Search for the old block of memory on the
331 1.1 cgd * free list. First, check the most common
332 1.1 cgd * case (last element free'd), then (this failing)
333 1.1 cgd * the last ``realloc_srchlen'' items free'd.
334 1.1 cgd * If all lookups fail, then assume the size of
335 1.1 cgd * the memory block being realloc'd is the
336 1.1 cgd * largest possible (so that all "nbytes" of new
337 1.1 cgd * memory are copied into). Note that this could cause
338 1.1 cgd * a memory fault if the old area was tiny, and the moon
339 1.1 cgd * is gibbous. However, that is very unlikely.
340 1.1 cgd */
341 1.1 cgd if ((i = findbucket(op, 1)) < 0 &&
342 1.1 cgd (i = findbucket(op, realloc_srchlen)) < 0)
343 1.1 cgd i = NBUCKETS;
344 1.1 cgd }
345 1.1 cgd onb = 1 << (i + 3);
346 1.1 cgd if (onb < pagesz)
347 1.1 cgd onb -= sizeof (*op) + RSLOP;
348 1.1 cgd else
349 1.1 cgd onb += pagesz - sizeof (*op) - RSLOP;
350 1.1 cgd /* avoid the copy if same size block */
351 1.1 cgd if (was_alloced) {
352 1.1 cgd if (i) {
353 1.1 cgd i = 1 << (i + 2);
354 1.1 cgd if (i < pagesz)
355 1.1 cgd i -= sizeof (*op) + RSLOP;
356 1.1 cgd else
357 1.1 cgd i += pagesz - sizeof (*op) - RSLOP;
358 1.1 cgd }
359 1.1 cgd if (nbytes <= onb && nbytes > i) {
360 1.1 cgd #ifdef RCHECK
361 1.1 cgd op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
362 1.1 cgd *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
363 1.1 cgd #endif
364 1.1 cgd return(cp);
365 1.1 cgd } else
366 1.1 cgd free(cp);
367 1.1 cgd }
368 1.1 cgd if ((res = malloc(nbytes)) == NULL)
369 1.1 cgd return (NULL);
370 1.1 cgd if (cp != res) /* common optimization if "compacting" */
371 1.1 cgd bcopy(cp, res, (nbytes < onb) ? nbytes : onb);
372 1.1 cgd return (res);
373 1.1 cgd }
374 1.1 cgd
375 1.1 cgd /*
376 1.1 cgd * Search ``srchlen'' elements of each free list for a block whose
377 1.1 cgd * header starts at ``freep''. If srchlen is -1 search the whole list.
378 1.1 cgd * Return bucket number, or -1 if not found.
379 1.1 cgd */
380 1.1 cgd static
381 1.1 cgd findbucket(freep, srchlen)
382 1.1 cgd union overhead *freep;
383 1.1 cgd int srchlen;
384 1.1 cgd {
385 1.1 cgd register union overhead *p;
386 1.1 cgd register int i, j;
387 1.1 cgd
388 1.1 cgd for (i = 0; i < NBUCKETS; i++) {
389 1.1 cgd j = 0;
390 1.1 cgd for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
391 1.1 cgd if (p == freep)
392 1.1 cgd return (i);
393 1.1 cgd j++;
394 1.1 cgd }
395 1.1 cgd }
396 1.1 cgd return (-1);
397 1.1 cgd }
398 1.1 cgd
399 1.1 cgd #ifdef MSTATS
400 1.1 cgd /*
401 1.1 cgd * mstats - print out statistics about malloc
402 1.1 cgd *
403 1.1 cgd * Prints two lines of numbers, one showing the length of the free list
404 1.1 cgd * for each size category, the second showing the number of mallocs -
405 1.1 cgd * frees for each size category.
406 1.1 cgd */
407 1.1 cgd mstats(s)
408 1.1 cgd char *s;
409 1.1 cgd {
410 1.1 cgd register int i, j;
411 1.1 cgd register union overhead *p;
412 1.1 cgd int totfree = 0,
413 1.1 cgd totused = 0;
414 1.1 cgd
415 1.1 cgd fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s);
416 1.1 cgd for (i = 0; i < NBUCKETS; i++) {
417 1.1 cgd for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
418 1.1 cgd ;
419 1.1 cgd fprintf(stderr, " %d", j);
420 1.1 cgd totfree += j * (1 << (i + 3));
421 1.1 cgd }
422 1.1 cgd fprintf(stderr, "\nused:\t");
423 1.1 cgd for (i = 0; i < NBUCKETS; i++) {
424 1.1 cgd fprintf(stderr, " %d", nmalloc[i]);
425 1.1 cgd totused += nmalloc[i] * (1 << (i + 3));
426 1.1 cgd }
427 1.1 cgd fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n",
428 1.1 cgd totused, totfree);
429 1.1 cgd }
430 1.1 cgd #endif
431