malloc.c revision 1.33 1 1.33 christos /* $NetBSD: malloc.c,v 1.33 2000/07/06 03:13:22 christos Exp $ */
2 1.18 thorpej
3 1.1 cgd /*
4 1.17 tls * ----------------------------------------------------------------------------
5 1.17 tls * "THE BEER-WARE LICENSE" (Revision 42):
6 1.17 tls * <phk (at) FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 1.17 tls * can do whatever you want with this stuff. If we meet some day, and you think
8 1.17 tls * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 1.17 tls * ----------------------------------------------------------------------------
10 1.1 cgd *
11 1.17 tls * From FreeBSD: malloc.c,v 1.43 1998/09/30 06:13:59 jb
12 1.1 cgd *
13 1.17 tls */
14 1.17 tls
15 1.17 tls /*
16 1.19 thorpej * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
17 1.17 tls * to internal conditions and consistency in malloc.c. This has a
18 1.17 tls * noticeable runtime performance hit, and generally will not do you
19 1.17 tls * any good unless you fiddle with the internals of malloc or want
20 1.17 tls * to catch random pointer corruption as early as possible.
21 1.17 tls */
22 1.17 tls #ifndef MALLOC_EXTRA_SANITY
23 1.17 tls #undef MALLOC_EXTRA_SANITY
24 1.5 thorpej #endif
25 1.1 cgd
26 1.1 cgd /*
27 1.17 tls * What to use for Junk. This is the byte value we use to fill with
28 1.17 tls * when the 'J' option is enabled.
29 1.17 tls */
30 1.17 tls #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
31 1.17 tls
32 1.17 tls /*
33 1.17 tls * The basic parameters you can tweak.
34 1.17 tls *
35 1.17 tls * malloc_minsize minimum size of an allocation in bytes.
36 1.17 tls * If this is too small it's too much work
37 1.17 tls * to manage them. This is also the smallest
38 1.17 tls * unit of alignment used for the storage
39 1.17 tls * returned by malloc/realloc.
40 1.1 cgd *
41 1.1 cgd */
42 1.1 cgd
43 1.17 tls #if defined(__FreeBSD__)
44 1.17 tls # if defined(__i386__)
45 1.17 tls # define malloc_minsize 16U
46 1.17 tls # endif
47 1.17 tls # if defined(__alpha__)
48 1.17 tls # define malloc_minsize 16U
49 1.17 tls # endif
50 1.17 tls # if !defined(__NETBSD_SYSCALLS)
51 1.17 tls # define HAS_UTRACE
52 1.17 tls # endif
53 1.17 tls /*
54 1.17 tls * Make malloc/free/realloc thread-safe in libc for use with
55 1.17 tls * kernel threads.
56 1.17 tls */
57 1.17 tls # include "libc_private.h"
58 1.17 tls # include "spinlock.h"
59 1.17 tls static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
60 1.17 tls # define THREAD_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
61 1.17 tls # define THREAD_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
62 1.17 tls #endif /* __FreeBSD__ */
63 1.17 tls
64 1.17 tls #if defined(__NetBSD__)
65 1.17 tls # define malloc_minsize 16U
66 1.17 tls #endif /* __NetBSD__ */
67 1.17 tls
68 1.17 tls #if defined(__sparc__) && defined(sun)
69 1.17 tls # define malloc_minsize 16U
70 1.17 tls # define MAP_ANON (0)
71 1.17 tls static int fdzero;
72 1.17 tls # define MMAP_FD fdzero
73 1.17 tls # define INIT_MMAP() \
74 1.17 tls { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \
75 1.17 tls wrterror("open of /dev/zero"); }
76 1.17 tls #endif /* __sparc__ */
77 1.17 tls
78 1.17 tls /* Insert your combination here... */
79 1.17 tls #if defined(__FOOCPU__) && defined(__BAROS__)
80 1.17 tls # define malloc_minsize 16U
81 1.17 tls #endif /* __FOOCPU__ && __BAROS__ */
82 1.17 tls
83 1.17 tls
84 1.17 tls /*
85 1.17 tls * No user serviceable parts behind this point.
86 1.17 tls */
87 1.25 kleink #include "namespace.h"
88 1.16 kleink #include <sys/types.h>
89 1.17 tls #include <sys/mman.h>
90 1.17 tls #include <errno.h>
91 1.17 tls #include <fcntl.h>
92 1.17 tls #include <stddef.h>
93 1.9 christos #include <stdio.h>
94 1.1 cgd #include <stdlib.h>
95 1.1 cgd #include <string.h>
96 1.1 cgd #include <unistd.h>
97 1.1 cgd
98 1.17 tls /*
99 1.17 tls * This structure describes a page worth of chunks.
100 1.17 tls */
101 1.17 tls
102 1.17 tls struct pginfo {
103 1.17 tls struct pginfo *next; /* next on the free list */
104 1.17 tls void *page; /* Pointer to the page */
105 1.17 tls u_short size; /* size of this page's chunks */
106 1.17 tls u_short shift; /* How far to shift for this size chunks */
107 1.17 tls u_short free; /* How many free chunks */
108 1.17 tls u_short total; /* How many chunk */
109 1.17 tls u_int bits[1]; /* Which chunks are free */
110 1.17 tls };
111 1.1 cgd
112 1.1 cgd /*
113 1.17 tls * This structure describes a number of free pages.
114 1.17 tls */
115 1.17 tls
116 1.17 tls struct pgfree {
117 1.17 tls struct pgfree *next; /* next run of free pages */
118 1.17 tls struct pgfree *prev; /* prev run of free pages */
119 1.17 tls void *page; /* pointer to free pages */
120 1.17 tls void *end; /* pointer to end of free pages */
121 1.17 tls size_t size; /* number of bytes free */
122 1.1 cgd };
123 1.1 cgd
124 1.17 tls /*
125 1.17 tls * How many bits per u_int in the bitmap.
126 1.17 tls * Change only if not 8 bits/byte
127 1.17 tls */
128 1.17 tls #define MALLOC_BITS (8*sizeof(u_int))
129 1.17 tls
130 1.17 tls /*
131 1.17 tls * Magic values to put in the page_directory
132 1.17 tls */
133 1.17 tls #define MALLOC_NOT_MINE ((struct pginfo*) 0)
134 1.17 tls #define MALLOC_FREE ((struct pginfo*) 1)
135 1.17 tls #define MALLOC_FIRST ((struct pginfo*) 2)
136 1.17 tls #define MALLOC_FOLLOW ((struct pginfo*) 3)
137 1.17 tls #define MALLOC_MAGIC ((struct pginfo*) 4)
138 1.17 tls
139 1.20 thorpej /*
140 1.20 thorpej * Page size related parameters, computed at run-time.
141 1.20 thorpej */
142 1.20 thorpej static size_t malloc_pagesize;
143 1.20 thorpej static size_t malloc_pageshift;
144 1.20 thorpej static size_t malloc_pagemask;
145 1.17 tls
146 1.17 tls #ifndef malloc_minsize
147 1.17 tls #define malloc_minsize 16U
148 1.17 tls #endif
149 1.17 tls
150 1.17 tls #ifndef malloc_maxsize
151 1.17 tls #define malloc_maxsize ((malloc_pagesize)>>1)
152 1.17 tls #endif
153 1.17 tls
154 1.17 tls #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
155 1.33 christos #define ptr2idx(foo) (((size_t)(u_long)(foo) >> malloc_pageshift)-malloc_origo)
156 1.1 cgd
157 1.17 tls #ifndef THREAD_LOCK
158 1.17 tls #define THREAD_LOCK()
159 1.17 tls #endif
160 1.1 cgd
161 1.17 tls #ifndef THREAD_UNLOCK
162 1.17 tls #define THREAD_UNLOCK()
163 1.16 kleink #endif
164 1.16 kleink
165 1.17 tls #ifndef MMAP_FD
166 1.17 tls #define MMAP_FD (-1)
167 1.1 cgd #endif
168 1.1 cgd
169 1.17 tls #ifndef INIT_MMAP
170 1.17 tls #define INIT_MMAP()
171 1.18 thorpej #endif
172 1.18 thorpej
173 1.18 thorpej #ifndef MADV_FREE
174 1.18 thorpej #define MADV_FREE MADV_DONTNEED
175 1.9 christos #endif
176 1.9 christos
177 1.17 tls /* Set when initialization has been done */
178 1.17 tls static unsigned malloc_started;
179 1.17 tls
180 1.17 tls /* Recusion flag for public interface. */
181 1.17 tls static int malloc_active;
182 1.17 tls
183 1.17 tls /* Number of free pages we cache */
184 1.17 tls static unsigned malloc_cache = 16;
185 1.17 tls
186 1.17 tls /* The offset from pagenumber to index into the page directory */
187 1.30 enami static size_t malloc_origo;
188 1.17 tls
189 1.17 tls /* The last index in the page directory we care about */
190 1.30 enami static size_t last_idx;
191 1.17 tls
192 1.17 tls /* Pointer to page directory. Allocated "as if with" malloc */
193 1.17 tls static struct pginfo **page_dir;
194 1.17 tls
195 1.17 tls /* How many slots in the page directory */
196 1.17 tls static unsigned malloc_ninfo;
197 1.17 tls
198 1.17 tls /* Free pages line up here */
199 1.17 tls static struct pgfree free_list;
200 1.17 tls
201 1.17 tls /* Abort(), user doesn't handle problems. */
202 1.17 tls static int malloc_abort;
203 1.17 tls
204 1.17 tls /* Are we trying to die ? */
205 1.17 tls static int suicide;
206 1.17 tls
207 1.17 tls /* always realloc ? */
208 1.17 tls static int malloc_realloc;
209 1.9 christos
210 1.17 tls /* pass the kernel a hint on free pages ? */
211 1.32 simonb static int malloc_hint = 0;
212 1.17 tls
213 1.17 tls /* xmalloc behaviour ? */
214 1.17 tls static int malloc_xmalloc;
215 1.17 tls
216 1.17 tls /* sysv behaviour for malloc(0) ? */
217 1.17 tls static int malloc_sysv;
218 1.17 tls
219 1.17 tls /* zero fill ? */
220 1.17 tls static int malloc_zero;
221 1.17 tls
222 1.17 tls /* junk fill ? */
223 1.17 tls static int malloc_junk;
224 1.17 tls
225 1.17 tls #ifdef HAS_UTRACE
226 1.17 tls
227 1.17 tls /* utrace ? */
228 1.17 tls static int malloc_utrace;
229 1.17 tls
230 1.17 tls struct ut { void *p; size_t s; void *r; };
231 1.17 tls
232 1.17 tls void utrace __P((struct ut *, int));
233 1.17 tls
234 1.17 tls #define UTRACE(a, b, c) \
235 1.17 tls if (malloc_utrace) \
236 1.17 tls {struct ut u; u.p=a; u.s = b; u.r=c; utrace(&u, sizeof u);}
237 1.17 tls #else /* !HAS_UTRACE */
238 1.17 tls #define UTRACE(a,b,c)
239 1.17 tls #endif /* HAS_UTRACE */
240 1.17 tls
241 1.17 tls /* my last break. */
242 1.17 tls static void *malloc_brk;
243 1.17 tls
244 1.17 tls /* one location cache for free-list holders */
245 1.17 tls static struct pgfree *px;
246 1.17 tls
247 1.17 tls /* compile-time options */
248 1.17 tls char *malloc_options;
249 1.17 tls
250 1.17 tls /* Name of the current public function */
251 1.22 thorpej static char *malloc_func;
252 1.17 tls
253 1.17 tls /* Macro for mmap */
254 1.17 tls #define MMAP(size) \
255 1.17 tls mmap(0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
256 1.30 enami MMAP_FD, (off_t)0);
257 1.9 christos
258 1.16 kleink /*
259 1.17 tls * Necessary function declarations
260 1.16 kleink */
261 1.30 enami static int extend_pgdir(size_t idx);
262 1.17 tls static void *imalloc(size_t size);
263 1.17 tls static void ifree(void *ptr);
264 1.17 tls static void *irealloc(void *ptr, size_t size);
265 1.17 tls
266 1.17 tls extern char *__progname;
267 1.17 tls
268 1.17 tls static void
269 1.22 thorpej wrterror(char *p)
270 1.17 tls {
271 1.22 thorpej char *q = " error: ";
272 1.17 tls write(STDERR_FILENO, __progname, strlen(__progname));
273 1.17 tls write(STDERR_FILENO, malloc_func, strlen(malloc_func));
274 1.17 tls write(STDERR_FILENO, q, strlen(q));
275 1.17 tls write(STDERR_FILENO, p, strlen(p));
276 1.17 tls suicide = 1;
277 1.17 tls abort();
278 1.17 tls }
279 1.17 tls
280 1.16 kleink static void
281 1.22 thorpej wrtwarning(char *p)
282 1.1 cgd {
283 1.22 thorpej char *q = " warning: ";
284 1.17 tls if (malloc_abort)
285 1.17 tls wrterror(p);
286 1.17 tls write(STDERR_FILENO, __progname, strlen(__progname));
287 1.17 tls write(STDERR_FILENO, malloc_func, strlen(malloc_func));
288 1.17 tls write(STDERR_FILENO, q, strlen(q));
289 1.17 tls write(STDERR_FILENO, p, strlen(p));
290 1.17 tls }
291 1.17 tls
292 1.16 kleink
293 1.17 tls /*
294 1.17 tls * Allocate a number of pages from the OS
295 1.17 tls */
296 1.17 tls static void *
297 1.30 enami map_pages(size_t pages)
298 1.17 tls {
299 1.17 tls caddr_t result, tail;
300 1.16 kleink
301 1.33 christos result = (caddr_t)pageround((size_t)(u_long)sbrk(0));
302 1.17 tls tail = result + (pages << malloc_pageshift);
303 1.17 tls
304 1.17 tls if (brk(tail)) {
305 1.19 thorpej #ifdef MALLOC_EXTRA_SANITY
306 1.17 tls wrterror("(ES): map_pages fails\n");
307 1.19 thorpej #endif /* MALLOC_EXTRA_SANITY */
308 1.17 tls return 0;
309 1.17 tls }
310 1.23 thorpej last_idx = ptr2idx(tail) - 1;
311 1.17 tls malloc_brk = tail;
312 1.17 tls
313 1.23 thorpej if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx))
314 1.17 tls return 0;;
315 1.16 kleink
316 1.17 tls return result;
317 1.1 cgd }
318 1.1 cgd
319 1.17 tls /*
320 1.17 tls * Extend page directory
321 1.17 tls */
322 1.17 tls static int
323 1.30 enami extend_pgdir(size_t idx)
324 1.1 cgd {
325 1.17 tls struct pginfo **new, **old;
326 1.30 enami size_t newlen, oldlen;
327 1.1 cgd
328 1.17 tls /* Make it this many pages */
329 1.30 enami newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
330 1.17 tls
331 1.17 tls /* remember the old mapping size */
332 1.17 tls oldlen = malloc_ninfo * sizeof *page_dir;
333 1.17 tls
334 1.17 tls /*
335 1.17 tls * NOTE: we allocate new pages and copy the directory rather than tempt
336 1.17 tls * fate by trying to "grow" the region.. There is nothing to prevent
337 1.17 tls * us from accidently re-mapping space that's been allocated by our caller
338 1.17 tls * via dlopen() or other mmap().
339 1.17 tls *
340 1.17 tls * The copy problem is not too bad, as there is 4K of page index per
341 1.17 tls * 4MB of malloc arena.
342 1.17 tls *
343 1.17 tls * We can totally avoid the copy if we open a file descriptor to associate
344 1.17 tls * the anon mappings with. Then, when we remap the pages at the new
345 1.17 tls * address, the old pages will be "magically" remapped.. But this means
346 1.17 tls * keeping open a "secret" file descriptor.....
347 1.17 tls */
348 1.17 tls
349 1.17 tls /* Get new pages */
350 1.30 enami new = (struct pginfo**) MMAP(newlen);
351 1.17 tls if (new == (struct pginfo **)-1)
352 1.17 tls return 0;
353 1.17 tls
354 1.17 tls /* Copy the old stuff */
355 1.30 enami memcpy(new, page_dir, oldlen);
356 1.17 tls
357 1.17 tls /* register the new size */
358 1.30 enami malloc_ninfo = newlen / sizeof *page_dir;
359 1.17 tls
360 1.17 tls /* swap the pointers */
361 1.17 tls old = page_dir;
362 1.17 tls page_dir = new;
363 1.17 tls
364 1.17 tls /* Now free the old stuff */
365 1.17 tls munmap(old, oldlen);
366 1.17 tls return 1;
367 1.17 tls }
368 1.16 kleink
369 1.17 tls /*
370 1.17 tls * Initialize the world
371 1.17 tls */
372 1.17 tls static void
373 1.17 tls malloc_init (void)
374 1.17 tls {
375 1.17 tls char *p, b[64];
376 1.17 tls int i, j;
377 1.17 tls int errnosave;
378 1.20 thorpej
379 1.20 thorpej /*
380 1.20 thorpej * Compute page-size related variables.
381 1.20 thorpej */
382 1.30 enami malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
383 1.20 thorpej malloc_pagemask = malloc_pagesize - 1;
384 1.20 thorpej for (malloc_pageshift = 0;
385 1.20 thorpej (1UL << malloc_pageshift) != malloc_pagesize;
386 1.20 thorpej malloc_pageshift++)
387 1.20 thorpej /* nothing */ ;
388 1.17 tls
389 1.17 tls INIT_MMAP();
390 1.17 tls
391 1.19 thorpej #ifdef MALLOC_EXTRA_SANITY
392 1.17 tls malloc_junk = 1;
393 1.19 thorpej #endif /* MALLOC_EXTRA_SANITY */
394 1.17 tls
395 1.17 tls for (i = 0; i < 3; i++) {
396 1.17 tls if (i == 0) {
397 1.17 tls errnosave = errno;
398 1.17 tls j = readlink("/etc/malloc.conf", b, sizeof b - 1);
399 1.17 tls errno = errnosave;
400 1.17 tls if (j <= 0)
401 1.17 tls continue;
402 1.17 tls b[j] = '\0';
403 1.17 tls p = b;
404 1.17 tls } else if (i == 1) {
405 1.17 tls p = getenv("MALLOC_OPTIONS");
406 1.17 tls } else {
407 1.17 tls p = malloc_options;
408 1.1 cgd }
409 1.17 tls for (; p && *p; p++) {
410 1.17 tls switch (*p) {
411 1.17 tls case '>': malloc_cache <<= 1; break;
412 1.17 tls case '<': malloc_cache >>= 1; break;
413 1.17 tls case 'a': malloc_abort = 0; break;
414 1.17 tls case 'A': malloc_abort = 1; break;
415 1.17 tls case 'h': malloc_hint = 0; break;
416 1.17 tls case 'H': malloc_hint = 1; break;
417 1.17 tls case 'r': malloc_realloc = 0; break;
418 1.17 tls case 'R': malloc_realloc = 1; break;
419 1.17 tls case 'j': malloc_junk = 0; break;
420 1.17 tls case 'J': malloc_junk = 1; break;
421 1.17 tls #ifdef HAS_UTRACE
422 1.17 tls case 'u': malloc_utrace = 0; break;
423 1.17 tls case 'U': malloc_utrace = 1; break;
424 1.17 tls #endif
425 1.17 tls case 'v': malloc_sysv = 0; break;
426 1.17 tls case 'V': malloc_sysv = 1; break;
427 1.17 tls case 'x': malloc_xmalloc = 0; break;
428 1.17 tls case 'X': malloc_xmalloc = 1; break;
429 1.17 tls case 'z': malloc_zero = 0; break;
430 1.17 tls case 'Z': malloc_zero = 1; break;
431 1.17 tls default:
432 1.17 tls j = malloc_abort;
433 1.17 tls malloc_abort = 0;
434 1.17 tls wrtwarning("unknown char in MALLOC_OPTIONS\n");
435 1.17 tls malloc_abort = j;
436 1.17 tls break;
437 1.17 tls }
438 1.1 cgd }
439 1.17 tls }
440 1.17 tls
441 1.17 tls UTRACE(0, 0, 0);
442 1.17 tls
443 1.17 tls /*
444 1.17 tls * We want junk in the entire allocation, and zero only in the part
445 1.17 tls * the user asked for.
446 1.17 tls */
447 1.17 tls if (malloc_zero)
448 1.17 tls malloc_junk=1;
449 1.17 tls
450 1.17 tls /*
451 1.17 tls * If we run with junk (or implicitly from above: zero), we want to
452 1.17 tls * force realloc() to get new storage, so we can DTRT with it.
453 1.17 tls */
454 1.17 tls if (malloc_junk)
455 1.17 tls malloc_realloc=1;
456 1.17 tls
457 1.17 tls /* Allocate one page for the page directory */
458 1.17 tls page_dir = (struct pginfo **) MMAP(malloc_pagesize);
459 1.17 tls
460 1.17 tls if (page_dir == (struct pginfo **) -1)
461 1.17 tls wrterror("mmap(2) failed, check limits.\n");
462 1.17 tls
463 1.17 tls /*
464 1.17 tls * We need a maximum of malloc_pageshift buckets, steal these from the
465 1.17 tls * front of the page_directory;
466 1.17 tls */
467 1.33 christos malloc_origo = pageround((size_t)(u_long)sbrk(0)) >> malloc_pageshift;
468 1.17 tls malloc_origo -= malloc_pageshift;
469 1.17 tls
470 1.17 tls malloc_ninfo = malloc_pagesize / sizeof *page_dir;
471 1.17 tls
472 1.17 tls /* Recalculate the cache size in bytes, and make sure it's nonzero */
473 1.17 tls
474 1.17 tls if (!malloc_cache)
475 1.17 tls malloc_cache++;
476 1.17 tls
477 1.17 tls malloc_cache <<= malloc_pageshift;
478 1.17 tls
479 1.17 tls /*
480 1.17 tls * This is a nice hack from Kaleb Keithly (kaleb (at) x.org).
481 1.17 tls * We can sbrk(2) further back when we keep this on a low address.
482 1.17 tls */
483 1.17 tls px = (struct pgfree *) imalloc (sizeof *px);
484 1.17 tls
485 1.17 tls /* Been here, done that */
486 1.17 tls malloc_started++;
487 1.17 tls }
488 1.17 tls
489 1.17 tls /*
490 1.17 tls * Allocate a number of complete pages
491 1.17 tls */
492 1.17 tls static void *
493 1.17 tls malloc_pages(size_t size)
494 1.17 tls {
495 1.17 tls void *p, *delay_free = 0;
496 1.17 tls int i;
497 1.17 tls struct pgfree *pf;
498 1.30 enami size_t idx;
499 1.17 tls
500 1.17 tls size = pageround(size);
501 1.17 tls
502 1.17 tls p = 0;
503 1.17 tls
504 1.17 tls /* Look for free pages before asking for more */
505 1.17 tls for(pf = free_list.next; pf; pf = pf->next) {
506 1.17 tls
507 1.19 thorpej #ifdef MALLOC_EXTRA_SANITY
508 1.17 tls if (pf->size & malloc_pagemask)
509 1.17 tls wrterror("(ES): junk length entry on free_list\n");
510 1.17 tls if (!pf->size)
511 1.17 tls wrterror("(ES): zero length entry on free_list\n");
512 1.17 tls if (pf->page == pf->end)
513 1.17 tls wrterror("(ES): zero entry on free_list\n");
514 1.17 tls if (pf->page > pf->end)
515 1.17 tls wrterror("(ES): sick entry on free_list\n");
516 1.17 tls if ((void*)pf->page >= (void*)sbrk(0))
517 1.17 tls wrterror("(ES): entry on free_list past brk\n");
518 1.23 thorpej if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
519 1.17 tls wrterror("(ES): non-free first page on free-list\n");
520 1.23 thorpej if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
521 1.17 tls wrterror("(ES): non-free last page on free-list\n");
522 1.19 thorpej #endif /* MALLOC_EXTRA_SANITY */
523 1.17 tls
524 1.17 tls if (pf->size < size)
525 1.17 tls continue;
526 1.17 tls
527 1.17 tls if (pf->size == size) {
528 1.17 tls p = pf->page;
529 1.17 tls if (pf->next)
530 1.17 tls pf->next->prev = pf->prev;
531 1.17 tls pf->prev->next = pf->next;
532 1.17 tls delay_free = pf;
533 1.17 tls break;
534 1.17 tls }
535 1.17 tls
536 1.17 tls p = pf->page;
537 1.17 tls pf->page = (char *)pf->page + size;
538 1.17 tls pf->size -= size;
539 1.17 tls break;
540 1.17 tls }
541 1.17 tls
542 1.19 thorpej #ifdef MALLOC_EXTRA_SANITY
543 1.23 thorpej if (p && page_dir[ptr2idx(p)] != MALLOC_FREE)
544 1.17 tls wrterror("(ES): allocated non-free page on free-list\n");
545 1.19 thorpej #endif /* MALLOC_EXTRA_SANITY */
546 1.17 tls
547 1.17 tls size >>= malloc_pageshift;
548 1.17 tls
549 1.17 tls /* Map new pages */
550 1.17 tls if (!p)
551 1.17 tls p = map_pages(size);
552 1.17 tls
553 1.17 tls if (p) {
554 1.17 tls
555 1.23 thorpej idx = ptr2idx(p);
556 1.23 thorpej page_dir[idx] = MALLOC_FIRST;
557 1.17 tls for (i=1;i<size;i++)
558 1.23 thorpej page_dir[idx+i] = MALLOC_FOLLOW;
559 1.17 tls
560 1.17 tls if (malloc_junk)
561 1.17 tls memset(p, SOME_JUNK, size << malloc_pageshift);
562 1.17 tls }
563 1.17 tls
564 1.17 tls if (delay_free) {
565 1.17 tls if (!px)
566 1.17 tls px = delay_free;
567 1.17 tls else
568 1.17 tls ifree(delay_free);
569 1.17 tls }
570 1.17 tls
571 1.17 tls return p;
572 1.17 tls }
573 1.17 tls
574 1.17 tls /*
575 1.17 tls * Allocate a page of fragments
576 1.17 tls */
577 1.17 tls
578 1.17 tls static __inline__ int
579 1.17 tls malloc_make_chunks(int bits)
580 1.17 tls {
581 1.17 tls struct pginfo *bp;
582 1.17 tls void *pp;
583 1.31 enami int i, k, l;
584 1.17 tls
585 1.17 tls /* Allocate a new bucket */
586 1.17 tls pp = malloc_pages(malloc_pagesize);
587 1.17 tls if (!pp)
588 1.17 tls return 0;
589 1.17 tls
590 1.17 tls /* Find length of admin structure */
591 1.33 christos l = (int)offsetof(struct pginfo, bits[0]);
592 1.17 tls l += sizeof bp->bits[0] *
593 1.17 tls (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
594 1.17 tls
595 1.17 tls /* Don't waste more than two chunks on this */
596 1.17 tls if ((1<<(bits)) <= l+l) {
597 1.17 tls bp = (struct pginfo *)pp;
598 1.17 tls } else {
599 1.31 enami bp = (struct pginfo *)imalloc((size_t)l);
600 1.17 tls if (!bp) {
601 1.17 tls ifree(pp);
602 1.17 tls return 0;
603 1.1 cgd }
604 1.17 tls }
605 1.17 tls
606 1.17 tls bp->size = (1<<bits);
607 1.17 tls bp->shift = bits;
608 1.17 tls bp->total = bp->free = malloc_pagesize >> bits;
609 1.17 tls bp->page = pp;
610 1.17 tls
611 1.17 tls /* set all valid bits in the bitmap */
612 1.17 tls k = bp->total;
613 1.17 tls i = 0;
614 1.17 tls
615 1.17 tls /* Do a bunch at a time */
616 1.17 tls for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
617 1.30 enami bp->bits[i / MALLOC_BITS] = ~0U;
618 1.17 tls
619 1.17 tls for(; i < k; i++)
620 1.17 tls bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
621 1.17 tls
622 1.17 tls if (bp == bp->page) {
623 1.17 tls /* Mark the ones we stole for ourselves */
624 1.17 tls for(i=0;l > 0;i++) {
625 1.17 tls bp->bits[i/MALLOC_BITS] &= ~(1<<(i%MALLOC_BITS));
626 1.17 tls bp->free--;
627 1.17 tls bp->total--;
628 1.17 tls l -= (1 << bits);
629 1.1 cgd }
630 1.17 tls }
631 1.17 tls
632 1.17 tls /* MALLOC_LOCK */
633 1.17 tls
634 1.23 thorpej page_dir[ptr2idx(pp)] = bp;
635 1.17 tls
636 1.17 tls bp->next = page_dir[bits];
637 1.17 tls page_dir[bits] = bp;
638 1.17 tls
639 1.17 tls /* MALLOC_UNLOCK */
640 1.17 tls
641 1.17 tls return 1;
642 1.1 cgd }
643 1.1 cgd
644 1.1 cgd /*
645 1.17 tls * Allocate a fragment
646 1.1 cgd */
647 1.17 tls static void *
648 1.17 tls malloc_bytes(size_t size)
649 1.1 cgd {
650 1.30 enami size_t i;
651 1.30 enami int j;
652 1.17 tls u_int u;
653 1.17 tls struct pginfo *bp;
654 1.17 tls int k;
655 1.17 tls u_int *lp;
656 1.17 tls
657 1.17 tls /* Don't bother with anything less than this */
658 1.17 tls if (size < malloc_minsize)
659 1.17 tls size = malloc_minsize;
660 1.17 tls
661 1.17 tls /* Find the right bucket */
662 1.17 tls j = 1;
663 1.17 tls i = size-1;
664 1.17 tls while (i >>= 1)
665 1.17 tls j++;
666 1.17 tls
667 1.17 tls /* If it's empty, make a page more of that size chunks */
668 1.17 tls if (!page_dir[j] && !malloc_make_chunks(j))
669 1.17 tls return 0;
670 1.17 tls
671 1.17 tls bp = page_dir[j];
672 1.17 tls
673 1.17 tls /* Find first word of bitmap which isn't empty */
674 1.17 tls for (lp = bp->bits; !*lp; lp++)
675 1.17 tls ;
676 1.17 tls
677 1.17 tls /* Find that bit, and tweak it */
678 1.17 tls u = 1;
679 1.17 tls k = 0;
680 1.17 tls while (!(*lp & u)) {
681 1.17 tls u += u;
682 1.17 tls k++;
683 1.17 tls }
684 1.17 tls *lp ^= u;
685 1.17 tls
686 1.17 tls /* If there are no more free, remove from free-list */
687 1.17 tls if (!--bp->free) {
688 1.17 tls page_dir[j] = bp->next;
689 1.17 tls bp->next = 0;
690 1.17 tls }
691 1.17 tls
692 1.17 tls /* Adjust to the real offset of that chunk */
693 1.17 tls k += (lp-bp->bits)*MALLOC_BITS;
694 1.17 tls k <<= bp->shift;
695 1.17 tls
696 1.17 tls if (malloc_junk)
697 1.30 enami memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
698 1.1 cgd
699 1.17 tls return (u_char *)bp->page + k;
700 1.1 cgd }
701 1.1 cgd
702 1.17 tls /*
703 1.17 tls * Allocate a piece of memory
704 1.17 tls */
705 1.17 tls static void *
706 1.17 tls imalloc(size_t size)
707 1.17 tls {
708 1.17 tls void *result;
709 1.17 tls
710 1.17 tls if (suicide)
711 1.17 tls abort();
712 1.17 tls
713 1.17 tls if ((size + malloc_pagesize) < size) /* Check for overflow */
714 1.17 tls result = 0;
715 1.17 tls else if (size <= malloc_maxsize)
716 1.17 tls result = malloc_bytes(size);
717 1.17 tls else
718 1.17 tls result = malloc_pages(size);
719 1.17 tls
720 1.17 tls if (malloc_abort && !result)
721 1.17 tls wrterror("allocation failed.\n");
722 1.17 tls
723 1.17 tls if (malloc_zero && result)
724 1.17 tls memset(result, 0, size);
725 1.17 tls
726 1.17 tls return result;
727 1.1 cgd }
728 1.1 cgd
729 1.1 cgd /*
730 1.17 tls * Change the size of an allocation.
731 1.1 cgd */
732 1.17 tls static void *
733 1.17 tls irealloc(void *ptr, size_t size)
734 1.17 tls {
735 1.17 tls void *p;
736 1.30 enami size_t osize, idx;
737 1.17 tls struct pginfo **mp;
738 1.30 enami size_t i;
739 1.17 tls
740 1.17 tls if (suicide)
741 1.17 tls abort();
742 1.17 tls
743 1.23 thorpej idx = ptr2idx(ptr);
744 1.1 cgd
745 1.23 thorpej if (idx < malloc_pageshift) {
746 1.17 tls wrtwarning("junk pointer, too low to make sense.\n");
747 1.17 tls return 0;
748 1.17 tls }
749 1.17 tls
750 1.23 thorpej if (idx > last_idx) {
751 1.17 tls wrtwarning("junk pointer, too high to make sense.\n");
752 1.17 tls return 0;
753 1.17 tls }
754 1.17 tls
755 1.23 thorpej mp = &page_dir[idx];
756 1.17 tls
757 1.17 tls if (*mp == MALLOC_FIRST) { /* Page allocation */
758 1.17 tls
759 1.17 tls /* Check the pointer */
760 1.33 christos if ((size_t)(u_long)ptr & malloc_pagemask) {
761 1.17 tls wrtwarning("modified (page-) pointer.\n");
762 1.17 tls return 0;
763 1.17 tls }
764 1.17 tls
765 1.17 tls /* Find the size in bytes */
766 1.17 tls for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
767 1.17 tls osize += malloc_pagesize;
768 1.17 tls
769 1.17 tls if (!malloc_realloc && /* unless we have to, */
770 1.17 tls size <= osize && /* .. or are too small, */
771 1.17 tls size > (osize - malloc_pagesize)) { /* .. or can free a page, */
772 1.17 tls return ptr; /* don't do anything. */
773 1.6 jtc }
774 1.17 tls
775 1.17 tls } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
776 1.17 tls
777 1.17 tls /* Check the pointer for sane values */
778 1.33 christos if (((size_t)(u_long)ptr & ((*mp)->size-1))) {
779 1.17 tls wrtwarning("modified (chunk-) pointer.\n");
780 1.17 tls return 0;
781 1.1 cgd }
782 1.17 tls
783 1.17 tls /* Find the chunk index in the page */
784 1.33 christos i = ((size_t)(u_long)ptr & malloc_pagemask) >> (*mp)->shift;
785 1.17 tls
786 1.17 tls /* Verify that it isn't a free chunk already */
787 1.17 tls if ((*mp)->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
788 1.17 tls wrtwarning("chunk is already free.\n");
789 1.17 tls return 0;
790 1.16 kleink }
791 1.17 tls
792 1.17 tls osize = (*mp)->size;
793 1.17 tls
794 1.17 tls if (!malloc_realloc && /* Unless we have to, */
795 1.17 tls size < osize && /* ..or are too small, */
796 1.17 tls (size > osize/2 || /* ..or could use a smaller size, */
797 1.17 tls osize == malloc_minsize)) { /* ..(if there is one) */
798 1.17 tls return ptr; /* ..Don't do anything */
799 1.1 cgd }
800 1.17 tls
801 1.17 tls } else {
802 1.17 tls wrtwarning("pointer to wrong page.\n");
803 1.17 tls return 0;
804 1.17 tls }
805 1.17 tls
806 1.17 tls p = imalloc(size);
807 1.17 tls
808 1.17 tls if (p) {
809 1.17 tls /* copy the lesser of the two sizes, and free the old one */
810 1.17 tls if (!size || !osize)
811 1.17 tls ;
812 1.17 tls else if (osize < size)
813 1.17 tls memcpy(p, ptr, osize);
814 1.17 tls else
815 1.17 tls memcpy(p, ptr, size);
816 1.17 tls ifree(ptr);
817 1.17 tls }
818 1.17 tls return p;
819 1.1 cgd }
820 1.1 cgd
821 1.1 cgd /*
822 1.17 tls * Free a sequence of pages
823 1.1 cgd */
824 1.17 tls
825 1.17 tls static __inline__ void
826 1.30 enami free_pages(void *ptr, size_t idx, struct pginfo *info)
827 1.17 tls {
828 1.30 enami size_t i;
829 1.17 tls struct pgfree *pf, *pt=0;
830 1.30 enami size_t l;
831 1.17 tls void *tail;
832 1.17 tls
833 1.17 tls if (info == MALLOC_FREE) {
834 1.17 tls wrtwarning("page is already free.\n");
835 1.17 tls return;
836 1.17 tls }
837 1.17 tls
838 1.17 tls if (info != MALLOC_FIRST) {
839 1.17 tls wrtwarning("pointer to wrong page.\n");
840 1.17 tls return;
841 1.17 tls }
842 1.17 tls
843 1.33 christos if ((size_t)(u_long)ptr & malloc_pagemask) {
844 1.17 tls wrtwarning("modified (page-) pointer.\n");
845 1.17 tls return;
846 1.17 tls }
847 1.17 tls
848 1.17 tls /* Count how many pages and mark them free at the same time */
849 1.23 thorpej page_dir[idx] = MALLOC_FREE;
850 1.23 thorpej for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
851 1.23 thorpej page_dir[idx + i] = MALLOC_FREE;
852 1.17 tls
853 1.17 tls l = i << malloc_pageshift;
854 1.17 tls
855 1.17 tls if (malloc_junk)
856 1.17 tls memset(ptr, SOME_JUNK, l);
857 1.17 tls
858 1.17 tls if (malloc_hint)
859 1.17 tls madvise(ptr, l, MADV_FREE);
860 1.17 tls
861 1.17 tls tail = (char *)ptr+l;
862 1.17 tls
863 1.17 tls /* add to free-list */
864 1.17 tls if (!px)
865 1.17 tls px = imalloc(sizeof *pt); /* This cannot fail... */
866 1.17 tls px->page = ptr;
867 1.17 tls px->end = tail;
868 1.17 tls px->size = l;
869 1.17 tls if (!free_list.next) {
870 1.17 tls
871 1.17 tls /* Nothing on free list, put this at head */
872 1.17 tls px->next = free_list.next;
873 1.17 tls px->prev = &free_list;
874 1.17 tls free_list.next = px;
875 1.17 tls pf = px;
876 1.17 tls px = 0;
877 1.17 tls
878 1.17 tls } else {
879 1.17 tls
880 1.17 tls /* Find the right spot, leave pf pointing to the modified entry. */
881 1.17 tls tail = (char *)ptr+l;
882 1.17 tls
883 1.17 tls for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next)
884 1.17 tls ; /* Race ahead here */
885 1.17 tls
886 1.17 tls if (pf->page > tail) {
887 1.17 tls /* Insert before entry */
888 1.17 tls px->next = pf;
889 1.17 tls px->prev = pf->prev;
890 1.17 tls pf->prev = px;
891 1.17 tls px->prev->next = px;
892 1.17 tls pf = px;
893 1.17 tls px = 0;
894 1.17 tls } else if (pf->end == ptr ) {
895 1.17 tls /* Append to the previous entry */
896 1.17 tls pf->end = (char *)pf->end + l;
897 1.17 tls pf->size += l;
898 1.17 tls if (pf->next && pf->end == pf->next->page ) {
899 1.17 tls /* And collapse the next too. */
900 1.17 tls pt = pf->next;
901 1.17 tls pf->end = pt->end;
902 1.17 tls pf->size += pt->size;
903 1.17 tls pf->next = pt->next;
904 1.17 tls if (pf->next)
905 1.17 tls pf->next->prev = pf;
906 1.17 tls }
907 1.17 tls } else if (pf->page == tail) {
908 1.17 tls /* Prepend to entry */
909 1.17 tls pf->size += l;
910 1.17 tls pf->page = ptr;
911 1.17 tls } else if (!pf->next) {
912 1.17 tls /* Append at tail of chain */
913 1.17 tls px->next = 0;
914 1.17 tls px->prev = pf;
915 1.17 tls pf->next = px;
916 1.17 tls pf = px;
917 1.17 tls px = 0;
918 1.17 tls } else {
919 1.17 tls wrterror("freelist is destroyed.\n");
920 1.1 cgd }
921 1.17 tls }
922 1.17 tls
923 1.17 tls /* Return something to OS ? */
924 1.17 tls if (!pf->next && /* If we're the last one, */
925 1.17 tls pf->size > malloc_cache && /* ..and the cache is full, */
926 1.17 tls pf->end == malloc_brk && /* ..and none behind us, */
927 1.17 tls malloc_brk == sbrk(0)) { /* ..and it's OK to do... */
928 1.17 tls
929 1.17 tls /*
930 1.17 tls * Keep the cache intact. Notice that the '>' above guarantees that
931 1.17 tls * the pf will always have at least one page afterwards.
932 1.17 tls */
933 1.17 tls pf->end = (char *)pf->page + malloc_cache;
934 1.17 tls pf->size = malloc_cache;
935 1.17 tls
936 1.17 tls brk(pf->end);
937 1.17 tls malloc_brk = pf->end;
938 1.17 tls
939 1.23 thorpej idx = ptr2idx(pf->end);
940 1.23 thorpej last_idx = idx - 1;
941 1.17 tls
942 1.23 thorpej for(i=idx;i <= last_idx;)
943 1.17 tls page_dir[i++] = MALLOC_NOT_MINE;
944 1.17 tls
945 1.17 tls /* XXX: We could realloc/shrink the pagedir here I guess. */
946 1.17 tls }
947 1.17 tls if (pt)
948 1.17 tls ifree(pt);
949 1.1 cgd }
950 1.1 cgd
951 1.1 cgd /*
952 1.17 tls * Free a chunk, and possibly the page it's on, if the page becomes empty.
953 1.1 cgd */
954 1.17 tls
955 1.17 tls static __inline__ void
956 1.30 enami free_bytes(void *ptr, size_t idx, struct pginfo *info)
957 1.17 tls {
958 1.30 enami size_t i;
959 1.17 tls struct pginfo **mp;
960 1.17 tls void *vp;
961 1.17 tls
962 1.17 tls /* Find the chunk number on the page */
963 1.33 christos i = ((size_t)(u_long)ptr & malloc_pagemask) >> info->shift;
964 1.17 tls
965 1.33 christos if (((size_t)(u_long)ptr & (info->size-1))) {
966 1.17 tls wrtwarning("modified (chunk-) pointer.\n");
967 1.17 tls return;
968 1.17 tls }
969 1.17 tls
970 1.17 tls if (info->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
971 1.17 tls wrtwarning("chunk is already free.\n");
972 1.17 tls return;
973 1.17 tls }
974 1.17 tls
975 1.17 tls if (malloc_junk)
976 1.30 enami memset(ptr, SOME_JUNK, (size_t)info->size);
977 1.17 tls
978 1.17 tls info->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
979 1.17 tls info->free++;
980 1.17 tls
981 1.17 tls mp = page_dir + info->shift;
982 1.17 tls
983 1.17 tls if (info->free == 1) {
984 1.17 tls
985 1.17 tls /* Page became non-full */
986 1.17 tls
987 1.17 tls mp = page_dir + info->shift;
988 1.17 tls /* Insert in address order */
989 1.17 tls while (*mp && (*mp)->next && (*mp)->next->page < info->page)
990 1.17 tls mp = &(*mp)->next;
991 1.17 tls info->next = *mp;
992 1.17 tls *mp = info;
993 1.17 tls return;
994 1.17 tls }
995 1.17 tls
996 1.17 tls if (info->free != info->total)
997 1.17 tls return;
998 1.17 tls
999 1.17 tls /* Find & remove this page in the queue */
1000 1.17 tls while (*mp != info) {
1001 1.17 tls mp = &((*mp)->next);
1002 1.19 thorpej #ifdef MALLOC_EXTRA_SANITY
1003 1.17 tls if (!*mp)
1004 1.17 tls wrterror("(ES): Not on queue\n");
1005 1.19 thorpej #endif /* MALLOC_EXTRA_SANITY */
1006 1.17 tls }
1007 1.17 tls *mp = info->next;
1008 1.17 tls
1009 1.17 tls /* Free the page & the info structure if need be */
1010 1.30 enami page_dir[idx] = MALLOC_FIRST;
1011 1.17 tls vp = info->page; /* Order is important ! */
1012 1.17 tls if(vp != (void*)info)
1013 1.17 tls ifree(info);
1014 1.17 tls ifree(vp);
1015 1.17 tls }
1016 1.17 tls
1017 1.17 tls static void
1018 1.17 tls ifree(void *ptr)
1019 1.17 tls {
1020 1.17 tls struct pginfo *info;
1021 1.30 enami size_t idx;
1022 1.17 tls
1023 1.17 tls /* This is legal */
1024 1.17 tls if (!ptr)
1025 1.17 tls return;
1026 1.17 tls
1027 1.17 tls if (!malloc_started) {
1028 1.17 tls wrtwarning("malloc() has never been called.\n");
1029 1.17 tls return;
1030 1.17 tls }
1031 1.17 tls
1032 1.17 tls /* If we're already sinking, don't make matters any worse. */
1033 1.17 tls if (suicide)
1034 1.17 tls return;
1035 1.17 tls
1036 1.23 thorpej idx = ptr2idx(ptr);
1037 1.17 tls
1038 1.23 thorpej if (idx < malloc_pageshift) {
1039 1.17 tls wrtwarning("junk pointer, too low to make sense.\n");
1040 1.17 tls return;
1041 1.17 tls }
1042 1.17 tls
1043 1.23 thorpej if (idx > last_idx) {
1044 1.17 tls wrtwarning("junk pointer, too high to make sense.\n");
1045 1.17 tls return;
1046 1.17 tls }
1047 1.17 tls
1048 1.23 thorpej info = page_dir[idx];
1049 1.17 tls
1050 1.17 tls if (info < MALLOC_MAGIC)
1051 1.23 thorpej free_pages(ptr, idx, info);
1052 1.17 tls else
1053 1.23 thorpej free_bytes(ptr, idx, info);
1054 1.17 tls return;
1055 1.17 tls }
1056 1.17 tls
1057 1.17 tls /*
1058 1.17 tls * These are the public exported interface routines.
1059 1.17 tls */
1060 1.17 tls
1061 1.17 tls
1062 1.17 tls void *
1063 1.17 tls malloc(size_t size)
1064 1.17 tls {
1065 1.17 tls register void *r;
1066 1.17 tls
1067 1.17 tls THREAD_LOCK();
1068 1.17 tls malloc_func = " in malloc():";
1069 1.17 tls if (malloc_active++) {
1070 1.17 tls wrtwarning("recursive call.\n");
1071 1.17 tls malloc_active--;
1072 1.17 tls return (0);
1073 1.17 tls }
1074 1.17 tls if (!malloc_started)
1075 1.17 tls malloc_init();
1076 1.17 tls if (malloc_sysv && !size)
1077 1.17 tls r = 0;
1078 1.17 tls else
1079 1.17 tls r = imalloc(size);
1080 1.17 tls UTRACE(0, size, r);
1081 1.17 tls malloc_active--;
1082 1.17 tls THREAD_UNLOCK();
1083 1.26 kleink if (r == NULL && (size != 0 || !malloc_sysv)) {
1084 1.24 thorpej if (malloc_xmalloc)
1085 1.24 thorpej wrterror("out of memory.\n");
1086 1.24 thorpej errno = ENOMEM;
1087 1.24 thorpej }
1088 1.17 tls return (r);
1089 1.17 tls }
1090 1.17 tls
1091 1.9 christos void
1092 1.17 tls free(void *ptr)
1093 1.1 cgd {
1094 1.17 tls THREAD_LOCK();
1095 1.17 tls malloc_func = " in free():";
1096 1.17 tls if (malloc_active++) {
1097 1.17 tls wrtwarning("recursive call.\n");
1098 1.17 tls malloc_active--;
1099 1.17 tls return;
1100 1.17 tls } else {
1101 1.17 tls ifree(ptr);
1102 1.17 tls UTRACE(ptr, 0, 0);
1103 1.17 tls }
1104 1.17 tls malloc_active--;
1105 1.17 tls THREAD_UNLOCK();
1106 1.17 tls return;
1107 1.17 tls }
1108 1.17 tls
1109 1.17 tls void *
1110 1.17 tls realloc(void *ptr, size_t size)
1111 1.17 tls {
1112 1.17 tls register void *r;
1113 1.17 tls
1114 1.17 tls THREAD_LOCK();
1115 1.17 tls malloc_func = " in realloc():";
1116 1.17 tls if (malloc_active++) {
1117 1.17 tls wrtwarning("recursive call.\n");
1118 1.17 tls malloc_active--;
1119 1.17 tls return (0);
1120 1.17 tls }
1121 1.17 tls if (ptr && !malloc_started) {
1122 1.17 tls wrtwarning("malloc() has never been called.\n");
1123 1.17 tls ptr = 0;
1124 1.17 tls }
1125 1.17 tls if (!malloc_started)
1126 1.17 tls malloc_init();
1127 1.17 tls if (malloc_sysv && !size) {
1128 1.17 tls ifree(ptr);
1129 1.17 tls r = 0;
1130 1.17 tls } else if (!ptr) {
1131 1.17 tls r = imalloc(size);
1132 1.17 tls } else {
1133 1.17 tls r = irealloc(ptr, size);
1134 1.17 tls }
1135 1.17 tls UTRACE(ptr, size, r);
1136 1.17 tls malloc_active--;
1137 1.17 tls THREAD_UNLOCK();
1138 1.26 kleink if (r == NULL && (size != 0 || !malloc_sysv)) {
1139 1.24 thorpej if (malloc_xmalloc)
1140 1.24 thorpej wrterror("out of memory.\n");
1141 1.24 thorpej errno = ENOMEM;
1142 1.24 thorpej }
1143 1.17 tls return (r);
1144 1.1 cgd }
1145