malloc.c revision 1.49 1 /* $NetBSD: malloc.c,v 1.49 2007/11/28 21:55:14 christos Exp $ */
2
3 /*
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk (at) FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
10 *
11 * From FreeBSD: malloc.c,v 1.91 2006/01/12 07:28:20 jasone
12 *
13 */
14
15 /*
16 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
17 * to internal conditions and consistency in malloc.c. This has a
18 * noticeable runtime performance hit, and generally will not do you
19 * any good unless you fiddle with the internals of malloc or want
20 * to catch random pointer corruption as early as possible.
21 */
22 #ifndef MALLOC_EXTRA_SANITY
23 #undef MALLOC_EXTRA_SANITY
24 #endif
25
26 /*
27 * What to use for Junk. This is the byte value we use to fill with
28 * when the 'J' option is enabled.
29 */
30 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
31
32 /*
33 * The basic parameters you can tweak.
34 *
35 * malloc_minsize minimum size of an allocation in bytes.
36 * If this is too small it's too much work
37 * to manage them. This is also the smallest
38 * unit of alignment used for the storage
39 * returned by malloc/realloc.
40 *
41 */
42
43 #include "namespace.h"
44 #if defined(__FreeBSD__)
45 # if defined(__i386__)
46 # define malloc_minsize 16U
47 # endif
48 # if defined(__ia64__)
49 # define malloc_pageshift 13U
50 # define malloc_minsize 16U
51 # endif
52 # if defined(__alpha__)
53 # define malloc_pageshift 13U
54 # define malloc_minsize 16U
55 # endif
56 # if defined(__sparc64__)
57 # define malloc_pageshift 13U
58 # define malloc_minsize 16U
59 # endif
60 # if defined(__amd64__)
61 # define malloc_pageshift 12U
62 # define malloc_minsize 16U
63 # endif
64 # if defined(__arm__)
65 # define malloc_pageshift 12U
66 # define malloc_minsize 16U
67 # endif
68 # define HAS_UTRACE
69 # define UTRACE_LABEL
70
71 #include <sys/cdefs.h>
72 void utrace(struct ut *, int);
73
74 /*
75 * Make malloc/free/realloc thread-safe in libc for use with
76 * kernel threads.
77 */
78 # include "libc_private.h"
79 # include "spinlock.h"
80 static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
81 # define _MALLOC_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
82 # define _MALLOC_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
83 #endif /* __FreeBSD__ */
84
85 #if defined(__NetBSD__)
86 void _malloc_prefork(void);
87 void _malloc_postfork(void);
88 # define malloc_minsize 16U
89 # define HAS_UTRACE
90 # define UTRACE_LABEL "malloc",
91 #include <sys/cdefs.h>
92 #if defined(LIBC_SCCS) && !defined(lint)
93 __RCSID("$NetBSD: malloc.c,v 1.49 2007/11/28 21:55:14 christos Exp $");
94 #endif /* LIBC_SCCS and not lint */
95 #include <sys/types.h>
96 int utrace(const char *, void *, size_t);
97
98 #include <reentrant.h>
99 extern int __isthreaded;
100 static mutex_t thread_lock = MUTEX_INITIALIZER;
101 #define _MALLOC_LOCK() if (__isthreaded) mutex_lock(&thread_lock);
102 #define _MALLOC_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock);
103 #endif /* __NetBSD__ */
104
105 #if defined(__sparc__) && defined(sun)
106 # define malloc_minsize 16U
107 # define MAP_ANON (0)
108 static int fdzero;
109 # define MMAP_FD fdzero
110 # define INIT_MMAP() \
111 { if ((fdzero = open(_PATH_DEVZERO, O_RDWR, 0000)) == -1) \
112 wrterror("open of /dev/zero"); }
113 #endif /* __sparc__ */
114
115 /* Insert your combination here... */
116 #if defined(__FOOCPU__) && defined(__BAROS__)
117 # define malloc_minsize 16U
118 #endif /* __FOOCPU__ && __BAROS__ */
119
120 #ifndef ZEROSIZEPTR
121 #define ZEROSIZEPTR ((void *)(uintptr_t)(1UL << (malloc_pageshift - 1)))
122 #endif
123
124 /*
125 * No user serviceable parts behind this point.
126 */
127 #include <sys/types.h>
128 #include <sys/mman.h>
129 #include <errno.h>
130 #include <fcntl.h>
131 #include <paths.h>
132 #include <stddef.h>
133 #include <stdio.h>
134 #include <stdlib.h>
135 #include <string.h>
136 #include <unistd.h>
137
138 /*
139 * This structure describes a page worth of chunks.
140 */
141
142 struct pginfo {
143 struct pginfo *next; /* next on the free list */
144 void *page; /* Pointer to the page */
145 u_short size; /* size of this page's chunks */
146 u_short shift; /* How far to shift for this size chunks */
147 u_short free; /* How many free chunks */
148 u_short total; /* How many chunk */
149 u_int bits[1]; /* Which chunks are free */
150 };
151
152 /*
153 * This structure describes a number of free pages.
154 */
155
156 struct pgfree {
157 struct pgfree *next; /* next run of free pages */
158 struct pgfree *prev; /* prev run of free pages */
159 void *page; /* pointer to free pages */
160 void *end; /* pointer to end of free pages */
161 size_t size; /* number of bytes free */
162 };
163
164 /*
165 * How many bits per u_int in the bitmap.
166 * Change only if not 8 bits/byte
167 */
168 #define MALLOC_BITS ((int)(8*sizeof(u_int)))
169
170 /*
171 * Magic values to put in the page_directory
172 */
173 #define MALLOC_NOT_MINE ((struct pginfo*) 0)
174 #define MALLOC_FREE ((struct pginfo*) 1)
175 #define MALLOC_FIRST ((struct pginfo*) 2)
176 #define MALLOC_FOLLOW ((struct pginfo*) 3)
177 #define MALLOC_MAGIC ((struct pginfo*) 4)
178
179 /*
180 * Page size related parameters, computed at run-time.
181 */
182 static size_t malloc_pagesize;
183 static size_t malloc_pageshift;
184 static size_t malloc_pagemask;
185
186 #ifndef malloc_minsize
187 #define malloc_minsize 16U
188 #endif
189
190 #ifndef malloc_maxsize
191 #define malloc_maxsize ((malloc_pagesize)>>1)
192 #endif
193
194 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
195 #define ptr2idx(foo) \
196 (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
197
198 #ifndef _MALLOC_LOCK
199 #define _MALLOC_LOCK()
200 #endif
201
202 #ifndef _MALLOC_UNLOCK
203 #define _MALLOC_UNLOCK()
204 #endif
205
206 #ifndef MMAP_FD
207 #define MMAP_FD (-1)
208 #endif
209
210 #ifndef INIT_MMAP
211 #define INIT_MMAP()
212 #endif
213
214 #ifndef MADV_FREE
215 #define MADV_FREE MADV_DONTNEED
216 #endif
217
218 /* Number of free pages we cache */
219 static size_t malloc_cache = 16;
220
221 /* The offset from pagenumber to index into the page directory */
222 static size_t malloc_origo;
223
224 /* The last index in the page directory we care about */
225 static size_t last_idx;
226
227 /* Pointer to page directory. Allocated "as if with" malloc */
228 static struct pginfo **page_dir;
229
230 /* How many slots in the page directory */
231 static size_t malloc_ninfo;
232
233 /* Free pages line up here */
234 static struct pgfree free_list;
235
236 /* Abort(), user doesn't handle problems. */
237 static int malloc_abort;
238
239 /* Are we trying to die ? */
240 static int suicide;
241
242 /* always realloc ? */
243 static int malloc_realloc;
244
245 /* pass the kernel a hint on free pages ? */
246 #if defined(MADV_FREE)
247 static int malloc_hint = 0;
248 #endif
249
250 /* xmalloc behaviour ? */
251 static int malloc_xmalloc;
252
253 /* sysv behaviour for malloc(0) ? */
254 static int malloc_sysv;
255
256 /* zero fill ? */
257 static int malloc_zero;
258
259 /* junk fill ? */
260 static int malloc_junk;
261
262 #ifdef HAS_UTRACE
263
264 /* utrace ? */
265 static int malloc_utrace;
266
267 struct ut { void *p; size_t s; void *r; };
268
269 #define UTRACE(a, b, c) \
270 if (malloc_utrace) { \
271 struct ut u; \
272 u.p=a; u.s = b; u.r=c; \
273 utrace(UTRACE_LABEL (void *) &u, sizeof u); \
274 }
275 #else /* !HAS_UTRACE */
276 #define UTRACE(a,b,c)
277 #endif /* HAS_UTRACE */
278
279 /* my last break. */
280 static void *malloc_brk;
281
282 /* one location cache for free-list holders */
283 static struct pgfree *px;
284
285 /* compile-time options */
286 const char *_malloc_options;
287
288 /* Name of the current public function */
289 static const char *malloc_func;
290
291 /* Macro for mmap */
292 #define MMAP(size) \
293 mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
294 MMAP_FD, (off_t)0);
295
296 /*
297 * Necessary function declarations
298 */
299 static int extend_pgdir(size_t idx);
300 static void *imalloc(size_t size);
301 static void ifree(void *ptr);
302 static void *irealloc(void *ptr, size_t size);
303
304 static void
305 wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
306 {
307
308 write(STDERR_FILENO, p1, strlen(p1));
309 write(STDERR_FILENO, p2, strlen(p2));
310 write(STDERR_FILENO, p3, strlen(p3));
311 write(STDERR_FILENO, p4, strlen(p4));
312 }
313
314 void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
315 const char *p4) = wrtmessage;
316 static void
317 wrterror(const char *p)
318 {
319
320 suicide = 1;
321 _malloc_message(getprogname(), malloc_func, " error: ", p);
322 abort();
323 }
324
325 static void
326 wrtwarning(const char *p)
327 {
328
329 /*
330 * Sensitive processes, somewhat arbitrarily defined here as setuid,
331 * setgid, root and wheel cannot afford to have malloc mistakes.
332 */
333 if (malloc_abort || issetugid() || getuid() == 0 || getgid() == 0)
334 wrterror(p);
335 }
336
337 /*
338 * Allocate a number of pages from the OS
339 */
340 static void *
341 map_pages(size_t pages)
342 {
343 caddr_t result, rresult, tail;
344 intptr_t bytes = pages << malloc_pageshift;
345
346 if (bytes < 0 || (size_t)bytes < pages) {
347 errno = ENOMEM;
348 return NULL;
349 }
350
351 if ((result = sbrk(bytes)) == (void *)-1)
352 return NULL;
353
354 /*
355 * Round to a page, in case sbrk(2) did not do this for us
356 */
357 rresult = (caddr_t)pageround((size_t)(uintptr_t)result);
358 if (result < rresult) {
359 /* make sure we have enough space to fit bytes */
360 if (sbrk((intptr_t)(rresult - result)) == (void *) -1) {
361 /* we failed, put everything back */
362 if (brk(result)) {
363 wrterror("brk(2) failed [internal error]\n");
364 }
365 }
366 }
367 tail = rresult + (size_t)bytes;
368
369 last_idx = ptr2idx(tail) - 1;
370 malloc_brk = tail;
371
372 if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) {
373 malloc_brk = result;
374 last_idx = ptr2idx(malloc_brk) - 1;
375 /* Put back break point since we failed. */
376 if (brk(malloc_brk))
377 wrterror("brk(2) failed [internal error]\n");
378 return 0;
379 }
380
381 return rresult;
382 }
383
384 /*
385 * Extend page directory
386 */
387 static int
388 extend_pgdir(size_t idx)
389 {
390 struct pginfo **new, **old;
391 size_t newlen, oldlen;
392
393 /* check for overflow */
394 if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1)
395 + (malloc_pagesize / sizeof *page_dir)) < idx) {
396 errno = ENOMEM;
397 return 0;
398 }
399
400 /* Make it this many pages */
401 newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
402
403 /* remember the old mapping size */
404 oldlen = malloc_ninfo * sizeof *page_dir;
405
406 /*
407 * NOTE: we allocate new pages and copy the directory rather than tempt
408 * fate by trying to "grow" the region.. There is nothing to prevent
409 * us from accidentally re-mapping space that's been allocated by our caller
410 * via dlopen() or other mmap().
411 *
412 * The copy problem is not too bad, as there is 4K of page index per
413 * 4MB of malloc arena.
414 *
415 * We can totally avoid the copy if we open a file descriptor to associate
416 * the anon mappings with. Then, when we remap the pages at the new
417 * address, the old pages will be "magically" remapped.. But this means
418 * keeping open a "secret" file descriptor.....
419 */
420
421 /* Get new pages */
422 new = MMAP(newlen);
423 if (new == MAP_FAILED)
424 return 0;
425
426 /* Copy the old stuff */
427 memcpy(new, page_dir, oldlen);
428
429 /* register the new size */
430 malloc_ninfo = newlen / sizeof *page_dir;
431
432 /* swap the pointers */
433 old = page_dir;
434 page_dir = new;
435
436 /* Now free the old stuff */
437 munmap(old, oldlen);
438 return 1;
439 }
440
441 /*
442 * Initialize the world
443 */
444 static void
445 malloc_init(void)
446 {
447 const char *p;
448 char b[64];
449 size_t i;
450 ssize_t j;
451 int save_errno = errno;
452
453 /*
454 * Compute page-size related variables.
455 */
456 malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
457 malloc_pagemask = malloc_pagesize - 1;
458 for (malloc_pageshift = 0;
459 (1UL << malloc_pageshift) != malloc_pagesize;
460 malloc_pageshift++)
461 /* nothing */ ;
462
463 INIT_MMAP();
464
465 #ifdef MALLOC_EXTRA_SANITY
466 malloc_junk = 1;
467 #endif /* MALLOC_EXTRA_SANITY */
468
469 for (i = 0; i < 3; i++) {
470 if (i == 0) {
471 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
472 if (j <= 0)
473 continue;
474 b[j] = '\0';
475 p = b;
476 } else if (i == 1 && issetugid() == 0) {
477 p = getenv("MALLOC_OPTIONS");
478 } else if (i == 1) {
479 continue;
480 } else {
481 p = _malloc_options;
482 }
483 for (; p != NULL && *p != '\0'; p++) {
484 switch (*p) {
485 case '>': malloc_cache <<= 1; break;
486 case '<': malloc_cache >>= 1; break;
487 case 'a': malloc_abort = 0; break;
488 case 'A': malloc_abort = 1; break;
489 case 'h': malloc_hint = 0; break;
490 case 'H': malloc_hint = 1; break;
491 case 'r': malloc_realloc = 0; break;
492 case 'R': malloc_realloc = 1; break;
493 case 'j': malloc_junk = 0; break;
494 case 'J': malloc_junk = 1; break;
495 #ifdef HAS_UTRACE
496 case 'u': malloc_utrace = 0; break;
497 case 'U': malloc_utrace = 1; break;
498 #endif
499 case 'v': malloc_sysv = 0; break;
500 case 'V': malloc_sysv = 1; break;
501 case 'x': malloc_xmalloc = 0; break;
502 case 'X': malloc_xmalloc = 1; break;
503 case 'z': malloc_zero = 0; break;
504 case 'Z': malloc_zero = 1; break;
505 default:
506 _malloc_message(getprogname(), malloc_func,
507 " warning: ", "unknown char in MALLOC_OPTIONS\n");
508 break;
509 }
510 }
511 }
512
513 UTRACE(0, 0, 0);
514
515 /*
516 * We want junk in the entire allocation, and zero only in the part
517 * the user asked for.
518 */
519 if (malloc_zero)
520 malloc_junk = 1;
521
522 /* Allocate one page for the page directory */
523 page_dir = MMAP(malloc_pagesize);
524
525 if (page_dir == MAP_FAILED)
526 wrterror("mmap(2) failed, check limits.\n");
527
528 /*
529 * We need a maximum of malloc_pageshift buckets, steal these from the
530 * front of the page_directory;
531 */
532 malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
533 >> malloc_pageshift;
534 malloc_origo -= malloc_pageshift;
535
536 malloc_ninfo = malloc_pagesize / sizeof *page_dir;
537
538 /* Recalculate the cache size in bytes, and make sure it's nonzero */
539
540 if (!malloc_cache)
541 malloc_cache++;
542
543 malloc_cache <<= malloc_pageshift;
544
545 /*
546 * This is a nice hack from Kaleb Keithly (kaleb (at) x.org).
547 * We can sbrk(2) further back when we keep this on a low address.
548 */
549 px = imalloc(sizeof *px);
550
551 errno = save_errno;
552 }
553
554 /*
555 * Allocate a number of complete pages
556 */
557 static void *
558 malloc_pages(size_t size)
559 {
560 void *p, *delay_free = NULL;
561 size_t i;
562 struct pgfree *pf;
563 size_t idx;
564
565 idx = pageround(size);
566 if (idx < size) {
567 errno = ENOMEM;
568 return NULL;
569 } else
570 size = idx;
571
572 p = NULL;
573
574 /* Look for free pages before asking for more */
575 for(pf = free_list.next; pf; pf = pf->next) {
576
577 #ifdef MALLOC_EXTRA_SANITY
578 if (pf->size & malloc_pagemask)
579 wrterror("(ES): junk length entry on free_list.\n");
580 if (!pf->size)
581 wrterror("(ES): zero length entry on free_list.\n");
582 if (pf->page == pf->end)
583 wrterror("(ES): zero entry on free_list.\n");
584 if (pf->page > pf->end)
585 wrterror("(ES): sick entry on free_list.\n");
586 if ((void*)pf->page >= (void*)sbrk(0))
587 wrterror("(ES): entry on free_list past brk.\n");
588 if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
589 wrterror("(ES): non-free first page on free-list.\n");
590 if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
591 wrterror("(ES): non-free last page on free-list.\n");
592 #endif /* MALLOC_EXTRA_SANITY */
593
594 if (pf->size < size)
595 continue;
596
597 if (pf->size == size) {
598 p = pf->page;
599 if (pf->next != NULL)
600 pf->next->prev = pf->prev;
601 pf->prev->next = pf->next;
602 delay_free = pf;
603 break;
604 }
605
606 p = pf->page;
607 pf->page = (char *)pf->page + size;
608 pf->size -= size;
609 break;
610 }
611
612 #ifdef MALLOC_EXTRA_SANITY
613 if (p != NULL && page_dir[ptr2idx(p)] != MALLOC_FREE)
614 wrterror("(ES): allocated non-free page on free-list.\n");
615 #endif /* MALLOC_EXTRA_SANITY */
616
617 size >>= malloc_pageshift;
618
619 /* Map new pages */
620 if (p == NULL)
621 p = map_pages(size);
622
623 if (p != NULL) {
624
625 idx = ptr2idx(p);
626 page_dir[idx] = MALLOC_FIRST;
627 for (i=1;i<size;i++)
628 page_dir[idx+i] = MALLOC_FOLLOW;
629
630 if (malloc_junk)
631 memset(p, SOME_JUNK, size << malloc_pageshift);
632 }
633
634 if (delay_free) {
635 if (px == NULL)
636 px = delay_free;
637 else
638 ifree(delay_free);
639 }
640
641 return p;
642 }
643
644 /*
645 * Allocate a page of fragments
646 */
647
648 static inline int
649 malloc_make_chunks(int bits)
650 {
651 struct pginfo *bp;
652 void *pp;
653 int i, k;
654 long l;
655
656 /* Allocate a new bucket */
657 pp = malloc_pages(malloc_pagesize);
658 if (pp == NULL)
659 return 0;
660
661 /* Find length of admin structure */
662 l = (long)offsetof(struct pginfo, bits[0]);
663 l += (long)sizeof bp->bits[0] *
664 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
665
666 /* Don't waste more than two chunks on this */
667 if ((1<<(bits)) <= l+l) {
668 bp = (struct pginfo *)pp;
669 } else {
670 bp = imalloc((size_t)l);
671 if (bp == NULL) {
672 ifree(pp);
673 return 0;
674 }
675 }
676
677 bp->size = (1<<bits);
678 bp->shift = bits;
679 bp->total = bp->free = (u_short)(malloc_pagesize >> bits);
680 bp->page = pp;
681
682 /* set all valid bits in the bitmap */
683 k = bp->total;
684 i = 0;
685
686 /* Do a bunch at a time */
687 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
688 bp->bits[i / MALLOC_BITS] = ~0U;
689
690 for(; i < k; i++)
691 bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
692
693 if (bp == bp->page) {
694 /* Mark the ones we stole for ourselves */
695 for(i = 0; l > 0; i++) {
696 bp->bits[i / MALLOC_BITS] &= ~(1 << (i % MALLOC_BITS));
697 bp->free--;
698 bp->total--;
699 l -= (long)(1 << bits);
700 }
701 }
702
703 /* MALLOC_LOCK */
704
705 page_dir[ptr2idx(pp)] = bp;
706
707 bp->next = page_dir[bits];
708 page_dir[bits] = bp;
709
710 /* MALLOC_UNLOCK */
711
712 return 1;
713 }
714
715 /*
716 * Allocate a fragment
717 */
718 static void *
719 malloc_bytes(size_t size)
720 {
721 size_t i;
722 int j;
723 u_int u;
724 struct pginfo *bp;
725 size_t k;
726 u_int *lp;
727
728 /* Don't bother with anything less than this */
729 if (size < malloc_minsize)
730 size = malloc_minsize;
731
732
733 /* Find the right bucket */
734 j = 1;
735 i = size-1;
736 while (i >>= 1)
737 j++;
738
739 /* If it's empty, make a page more of that size chunks */
740 if (page_dir[j] == NULL && !malloc_make_chunks(j))
741 return NULL;
742
743 bp = page_dir[j];
744
745 /* Find first word of bitmap which isn't empty */
746 for (lp = bp->bits; !*lp; lp++)
747 ;
748
749 /* Find that bit, and tweak it */
750 u = 1;
751 k = 0;
752 while (!(*lp & u)) {
753 u += u;
754 k++;
755 }
756 *lp ^= u;
757
758 /* If there are no more free, remove from free-list */
759 if (!--bp->free) {
760 page_dir[j] = bp->next;
761 bp->next = NULL;
762 }
763
764 /* Adjust to the real offset of that chunk */
765 k += (lp-bp->bits)*MALLOC_BITS;
766 k <<= bp->shift;
767
768 if (malloc_junk)
769 memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
770
771 return (u_char *)bp->page + k;
772 }
773
774 /*
775 * Allocate a piece of memory
776 */
777 static void *
778 imalloc(size_t size)
779 {
780 void *result;
781
782 if (suicide)
783 abort();
784
785 if ((size + malloc_pagesize) < size) /* Check for overflow */
786 result = NULL;
787 else if ((size + malloc_pagesize) >= (uintptr_t)page_dir)
788 result = NULL;
789 else if (size <= malloc_maxsize)
790 result = malloc_bytes(size);
791 else
792 result = malloc_pages(size);
793
794 if (malloc_abort && result == NULL)
795 wrterror("allocation failed.\n");
796
797 if (malloc_zero && result != NULL)
798 memset(result, 0, size);
799
800 return result;
801 }
802
803 /*
804 * Change the size of an allocation.
805 */
806 static void *
807 irealloc(void *ptr, size_t size)
808 {
809 void *p;
810 size_t osize, idx;
811 struct pginfo **mp;
812 size_t i;
813
814 if (suicide)
815 abort();
816
817 idx = ptr2idx(ptr);
818
819 if (idx < malloc_pageshift) {
820 wrtwarning("junk pointer, too low to make sense.\n");
821 return 0;
822 }
823
824 if (idx > last_idx) {
825 wrtwarning("junk pointer, too high to make sense.\n");
826 return 0;
827 }
828
829 mp = &page_dir[idx];
830
831 if (*mp == MALLOC_FIRST) { /* Page allocation */
832
833 /* Check the pointer */
834 if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
835 wrtwarning("modified (page-) pointer.\n");
836 return NULL;
837 }
838
839 /* Find the size in bytes */
840 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
841 osize += malloc_pagesize;
842
843 if (!malloc_realloc && /* unless we have to, */
844 size <= osize && /* .. or are too small, */
845 size > (osize - malloc_pagesize)) { /* .. or can free a page, */
846 if (malloc_junk)
847 memset((u_char *)ptr + size, SOME_JUNK, osize-size);
848 return ptr; /* don't do anything. */
849 }
850
851 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
852
853 /* Check the pointer for sane values */
854 if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) {
855 wrtwarning("modified (chunk-) pointer.\n");
856 return NULL;
857 }
858
859 /* Find the chunk index in the page */
860 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift;
861
862 /* Verify that it isn't a free chunk already */
863 if ((*mp)->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
864 wrtwarning("chunk is already free.\n");
865 return NULL;
866 }
867
868 osize = (*mp)->size;
869
870 if (!malloc_realloc && /* Unless we have to, */
871 size <= osize && /* ..or are too small, */
872 (size > osize / 2 || /* ..or could use a smaller size, */
873 osize == malloc_minsize)) { /* ..(if there is one) */
874 if (malloc_junk)
875 memset((u_char *)ptr + size, SOME_JUNK, osize-size);
876 return ptr; /* ..Don't do anything */
877 }
878
879 } else {
880 wrtwarning("pointer to wrong page.\n");
881 return NULL;
882 }
883
884 p = imalloc(size);
885
886 if (p != NULL) {
887 /* copy the lesser of the two sizes, and free the old one */
888 if (!size || !osize)
889 ;
890 else if (osize < size)
891 memcpy(p, ptr, osize);
892 else
893 memcpy(p, ptr, size);
894 ifree(ptr);
895 }
896 return p;
897 }
898
899 /*
900 * Free a sequence of pages
901 */
902
903 static inline void
904 free_pages(void *ptr, size_t idx, struct pginfo *info)
905 {
906 size_t i;
907 struct pgfree *pf, *pt=NULL;
908 size_t l;
909 void *tail;
910
911 if (info == MALLOC_FREE) {
912 wrtwarning("page is already free.\n");
913 return;
914 }
915
916 if (info != MALLOC_FIRST) {
917 wrtwarning("pointer to wrong page.\n");
918 return;
919 }
920
921 if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
922 wrtwarning("modified (page-) pointer.\n");
923 return;
924 }
925
926 /* Count how many pages and mark them free at the same time */
927 page_dir[idx] = MALLOC_FREE;
928 for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
929 page_dir[idx + i] = MALLOC_FREE;
930
931 l = i << malloc_pageshift;
932
933 if (malloc_junk)
934 memset(ptr, SOME_JUNK, l);
935
936 if (malloc_hint)
937 madvise(ptr, l, MADV_FREE);
938
939 tail = (char *)ptr+l;
940
941 /* add to free-list */
942 if (px == NULL)
943 px = imalloc(sizeof *px); /* This cannot fail... */
944 px->page = ptr;
945 px->end = tail;
946 px->size = l;
947 if (free_list.next == NULL) {
948
949 /* Nothing on free list, put this at head */
950 px->next = free_list.next;
951 px->prev = &free_list;
952 free_list.next = px;
953 pf = px;
954 px = NULL;
955
956 } else {
957
958 /* Find the right spot, leave pf pointing to the modified entry. */
959 tail = (char *)ptr+l;
960
961 for(pf = free_list.next; pf->end < ptr && pf->next != NULL;
962 pf = pf->next)
963 ; /* Race ahead here */
964
965 if (pf->page > tail) {
966 /* Insert before entry */
967 px->next = pf;
968 px->prev = pf->prev;
969 pf->prev = px;
970 px->prev->next = px;
971 pf = px;
972 px = NULL;
973 } else if (pf->end == ptr ) {
974 /* Append to the previous entry */
975 pf->end = (char *)pf->end + l;
976 pf->size += l;
977 if (pf->next != NULL && pf->end == pf->next->page ) {
978 /* And collapse the next too. */
979 pt = pf->next;
980 pf->end = pt->end;
981 pf->size += pt->size;
982 pf->next = pt->next;
983 if (pf->next != NULL)
984 pf->next->prev = pf;
985 }
986 } else if (pf->page == tail) {
987 /* Prepend to entry */
988 pf->size += l;
989 pf->page = ptr;
990 } else if (pf->next == NULL) {
991 /* Append at tail of chain */
992 px->next = NULL;
993 px->prev = pf;
994 pf->next = px;
995 pf = px;
996 px = NULL;
997 } else {
998 wrterror("freelist is destroyed.\n");
999 }
1000 }
1001
1002 /* Return something to OS ? */
1003 if (pf->next == NULL && /* If we're the last one, */
1004 pf->size > malloc_cache && /* ..and the cache is full, */
1005 pf->end == malloc_brk && /* ..and none behind us, */
1006 malloc_brk == sbrk((intptr_t)0)) { /* ..and it's OK to do... */
1007
1008 /*
1009 * Keep the cache intact. Notice that the '>' above guarantees that
1010 * the pf will always have at least one page afterwards.
1011 */
1012 pf->end = (char *)pf->page + malloc_cache;
1013 pf->size = malloc_cache;
1014
1015 brk(pf->end);
1016 malloc_brk = pf->end;
1017
1018 idx = ptr2idx(pf->end);
1019
1020 for(i=idx;i <= last_idx;)
1021 page_dir[i++] = MALLOC_NOT_MINE;
1022
1023 last_idx = idx - 1;
1024
1025 /* XXX: We could realloc/shrink the pagedir here I guess. */
1026 }
1027 if (pt != NULL)
1028 ifree(pt);
1029 }
1030
1031 /*
1032 * Free a chunk, and possibly the page it's on, if the page becomes empty.
1033 */
1034
1035 static inline void
1036 free_bytes(void *ptr, size_t idx, struct pginfo *info)
1037 {
1038 size_t i;
1039 struct pginfo **mp;
1040 void *vp;
1041
1042 /* Find the chunk number on the page */
1043 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift;
1044
1045 if (((size_t)(uintptr_t)ptr & (info->size-1))) {
1046 wrtwarning("modified (chunk-) pointer.\n");
1047 return;
1048 }
1049
1050 if (info->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
1051 wrtwarning("chunk is already free.\n");
1052 return;
1053 }
1054
1055 if (malloc_junk)
1056 memset(ptr, SOME_JUNK, (size_t)info->size);
1057
1058 info->bits[i/MALLOC_BITS] |= (u_int)(1UL << (i % MALLOC_BITS));
1059 info->free++;
1060
1061 mp = page_dir + info->shift;
1062
1063 if (info->free == 1) {
1064
1065 /* Page became non-full */
1066
1067 mp = page_dir + info->shift;
1068 /* Insert in address order */
1069 while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1070 mp = &(*mp)->next;
1071 info->next = *mp;
1072 *mp = info;
1073 return;
1074 }
1075
1076 if (info->free != info->total)
1077 return;
1078
1079 /* Find & remove this page in the queue */
1080 while (*mp != info) {
1081 mp = &((*mp)->next);
1082 #ifdef MALLOC_EXTRA_SANITY
1083 if (!*mp)
1084 wrterror("(ES): Not on queue.\n");
1085 #endif /* MALLOC_EXTRA_SANITY */
1086 }
1087 *mp = info->next;
1088
1089 /* Free the page & the info structure if need be */
1090 page_dir[idx] = MALLOC_FIRST;
1091 vp = info->page; /* Order is important ! */
1092 if(vp != (void*)info)
1093 ifree(info);
1094 ifree(vp);
1095 }
1096
1097 static void
1098 ifree(void *ptr)
1099 {
1100 struct pginfo *info;
1101 size_t idx;
1102
1103 /* This is legal */
1104 if (ptr == NULL)
1105 return;
1106
1107 /* If we're already sinking, don't make matters any worse. */
1108 if (suicide)
1109 return;
1110
1111 idx = ptr2idx(ptr);
1112
1113 if (idx < malloc_pageshift) {
1114 wrtwarning("junk pointer, too low to make sense.\n");
1115 return;
1116 }
1117
1118 if (idx > last_idx) {
1119 wrtwarning("junk pointer, too high to make sense.\n");
1120 return;
1121 }
1122
1123 info = page_dir[idx];
1124
1125 if (info < MALLOC_MAGIC)
1126 free_pages(ptr, idx, info);
1127 else
1128 free_bytes(ptr, idx, info);
1129 return;
1130 }
1131
1132 static void *
1133 pubrealloc(void *ptr, size_t size, const char *func)
1134 {
1135 void *r;
1136 int err = 0;
1137 static int malloc_active; /* Recusion flag for public interface. */
1138 static unsigned malloc_started; /* Set when initialization has been done */
1139
1140 /*
1141 * If a thread is inside our code with a functional lock held, and then
1142 * catches a signal which calls us again, we would get a deadlock if the
1143 * lock is not of a recursive type.
1144 */
1145 _MALLOC_LOCK();
1146 malloc_func = func;
1147 if (malloc_active > 0) {
1148 if (malloc_active == 1) {
1149 wrtwarning("recursive call\n");
1150 malloc_active = 2;
1151 }
1152 _MALLOC_UNLOCK();
1153 errno = EINVAL;
1154 return (NULL);
1155 }
1156 malloc_active = 1;
1157
1158 if (!malloc_started) {
1159 if (ptr != NULL) {
1160 wrtwarning("malloc() has never been called\n");
1161 malloc_active = 0;
1162 _MALLOC_UNLOCK();
1163 errno = EINVAL;
1164 return (NULL);
1165 }
1166 malloc_init();
1167 malloc_started = 1;
1168 }
1169
1170 if (ptr == ZEROSIZEPTR)
1171 ptr = NULL;
1172 if (malloc_sysv && !size) {
1173 if (ptr != NULL)
1174 ifree(ptr);
1175 r = NULL;
1176 } else if (!size) {
1177 if (ptr != NULL)
1178 ifree(ptr);
1179 r = ZEROSIZEPTR;
1180 } else if (ptr == NULL) {
1181 r = imalloc(size);
1182 err = (r == NULL);
1183 } else {
1184 r = irealloc(ptr, size);
1185 err = (r == NULL);
1186 }
1187 UTRACE(ptr, size, r);
1188 malloc_active = 0;
1189 _MALLOC_UNLOCK();
1190 if (malloc_xmalloc && err)
1191 wrterror("out of memory\n");
1192 if (err)
1193 errno = ENOMEM;
1194 return (r);
1195 }
1196
1197 /*
1198 * These are the public exported interface routines.
1199 */
1200
1201 void *
1202 malloc(size_t size)
1203 {
1204
1205 return pubrealloc(NULL, size, " in malloc():");
1206 }
1207
1208 int
1209 posix_memalign(void **memptr, size_t alignment, size_t size)
1210 {
1211 int err;
1212 void *result;
1213
1214 /* Make sure that alignment is a large enough power of 2. */
1215 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
1216 return EINVAL;
1217
1218 /*
1219 * (size & alignment) is enough to assure the requested alignment, since
1220 * the allocator always allocates power-of-two blocks.
1221 */
1222 err = errno; /* Protect errno against changes in pubrealloc(). */
1223 result = pubrealloc(NULL, (size & alignment), " in posix_memalign()");
1224 errno = err;
1225
1226 if (result == NULL)
1227 return ENOMEM;
1228
1229 *memptr = result;
1230 return 0;
1231 }
1232
1233 void *
1234 calloc(size_t num, size_t size)
1235 {
1236 void *ret;
1237
1238 if (size != 0 && (num * size) / size != num) {
1239 /* size_t overflow. */
1240 errno = ENOMEM;
1241 return (NULL);
1242 }
1243
1244 ret = pubrealloc(NULL, num * size, " in calloc():");
1245
1246 if (ret != NULL)
1247 memset(ret, 0, num * size);
1248
1249 return ret;
1250 }
1251
1252 void
1253 free(void *ptr)
1254 {
1255
1256 pubrealloc(ptr, 0, " in free():");
1257 }
1258
1259 void *
1260 realloc(void *ptr, size_t size)
1261 {
1262
1263 return pubrealloc(ptr, size, " in realloc():");
1264 }
1265
1266 /*
1267 * Begin library-private functions, used by threading libraries for protection
1268 * of malloc during fork(). These functions are only called if the program is
1269 * running in threaded mode, so there is no need to check whether the program
1270 * is threaded here.
1271 */
1272
1273 void
1274 _malloc_prefork(void)
1275 {
1276
1277 _MALLOC_LOCK();
1278 }
1279
1280 void
1281 _malloc_postfork(void)
1282 {
1283
1284 _MALLOC_UNLOCK();
1285 }
1286