malloc.c revision 1.36.2.4 1 /* $NetBSD: malloc.c,v 1.36.2.4 2002/03/11 21:10:15 nathanw Exp $ */
2
3 /*
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk (at) FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
10 *
11 * From FreeBSD: malloc.c,v 1.43 1998/09/30 06:13:59 jb
12 *
13 */
14
15 /*
16 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
17 * to internal conditions and consistency in malloc.c. This has a
18 * noticeable runtime performance hit, and generally will not do you
19 * any good unless you fiddle with the internals of malloc or want
20 * to catch random pointer corruption as early as possible.
21 */
22 #ifndef MALLOC_EXTRA_SANITY
23 #undef MALLOC_EXTRA_SANITY
24 #endif
25
26 /*
27 * What to use for Junk. This is the byte value we use to fill with
28 * when the 'J' option is enabled.
29 */
30 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
31
32 /*
33 * The basic parameters you can tweak.
34 *
35 * malloc_minsize minimum size of an allocation in bytes.
36 * If this is too small it's too much work
37 * to manage them. This is also the smallest
38 * unit of alignment used for the storage
39 * returned by malloc/realloc.
40 *
41 */
42
43 #if defined(__FreeBSD__)
44 # if defined(__i386__)
45 # define malloc_minsize 16U
46 # endif
47 # if defined(__alpha__)
48 # define malloc_minsize 16U
49 # endif
50 # define HAS_UTRACE
51 # define UTRACE_LABEL
52
53 #include <sys/cdefs.h>
54 void utrace __P((struct ut *, int));
55
56 /*
57 * Make malloc/free/realloc thread-safe in libc for use with
58 * kernel threads.
59 */
60 # include "libc_private.h"
61 # include "spinlock.h"
62 static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
63 # define THREAD_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
64 # define THREAD_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
65 #endif /* __FreeBSD__ */
66
67 #if defined(__NetBSD__)
68 # define malloc_minsize 16U
69 # define HAS_UTRACE
70 # define UTRACE_LABEL "malloc",
71 #include <sys/cdefs.h>
72 #include <sys/types.h>
73 int utrace __P((const char *, void *, size_t));
74
75 #include <pthread.h>
76 extern int __isthreaded;
77 static pthread_mutex_t thread_lock = PTHREAD_MUTEX_INITIALIZER;
78 #define THREAD_LOCK() if (__isthreaded) pthread_mutex_lock(&thread_lock);
79 #define THREAD_UNLOCK() if (__isthreaded) pthread_mutex_unlock(&thread_lock);
80 #endif /* __NetBSD__ */
81
82 #if defined(__sparc__) && defined(sun)
83 # define malloc_minsize 16U
84 # define MAP_ANON (0)
85 static int fdzero;
86 # define MMAP_FD fdzero
87 # define INIT_MMAP() \
88 { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \
89 wrterror("open of /dev/zero"); }
90 #endif /* __sparc__ */
91
92 /* Insert your combination here... */
93 #if defined(__FOOCPU__) && defined(__BAROS__)
94 # define malloc_minsize 16U
95 #endif /* __FOOCPU__ && __BAROS__ */
96
97
98 /*
99 * No user serviceable parts behind this point.
100 */
101 #include "namespace.h"
102 #include <sys/types.h>
103 #include <sys/mman.h>
104 #include <errno.h>
105 #include <fcntl.h>
106 #include <stddef.h>
107 #include <stdint.h>
108 #include <stdio.h>
109 #include <stdlib.h>
110 #include <string.h>
111 #include <unistd.h>
112
113 /*
114 * This structure describes a page worth of chunks.
115 */
116
117 struct pginfo {
118 struct pginfo *next; /* next on the free list */
119 void *page; /* Pointer to the page */
120 u_short size; /* size of this page's chunks */
121 u_short shift; /* How far to shift for this size chunks */
122 u_short free; /* How many free chunks */
123 u_short total; /* How many chunk */
124 u_int bits[1]; /* Which chunks are free */
125 };
126
127 /*
128 * This structure describes a number of free pages.
129 */
130
131 struct pgfree {
132 struct pgfree *next; /* next run of free pages */
133 struct pgfree *prev; /* prev run of free pages */
134 void *page; /* pointer to free pages */
135 void *end; /* pointer to end of free pages */
136 size_t size; /* number of bytes free */
137 };
138
139 /*
140 * How many bits per u_int in the bitmap.
141 * Change only if not 8 bits/byte
142 */
143 #define MALLOC_BITS (8*sizeof(u_int))
144
145 /*
146 * Magic values to put in the page_directory
147 */
148 #define MALLOC_NOT_MINE ((struct pginfo*) 0)
149 #define MALLOC_FREE ((struct pginfo*) 1)
150 #define MALLOC_FIRST ((struct pginfo*) 2)
151 #define MALLOC_FOLLOW ((struct pginfo*) 3)
152 #define MALLOC_MAGIC ((struct pginfo*) 4)
153
154 /*
155 * Page size related parameters, computed at run-time.
156 */
157 static size_t malloc_pagesize;
158 static size_t malloc_pageshift;
159 static size_t malloc_pagemask;
160
161 #ifndef malloc_minsize
162 #define malloc_minsize 16U
163 #endif
164
165 #ifndef malloc_maxsize
166 #define malloc_maxsize ((malloc_pagesize)>>1)
167 #endif
168
169 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
170 #define ptr2idx(foo) \
171 (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
172
173 #ifndef THREAD_LOCK
174 #define THREAD_LOCK()
175 #endif
176
177 #ifndef THREAD_UNLOCK
178 #define THREAD_UNLOCK()
179 #endif
180
181 #ifndef MMAP_FD
182 #define MMAP_FD (-1)
183 #endif
184
185 #ifndef INIT_MMAP
186 #define INIT_MMAP()
187 #endif
188
189 #ifndef MADV_FREE
190 #define MADV_FREE MADV_DONTNEED
191 #endif
192
193 /* Set when initialization has been done */
194 static unsigned malloc_started;
195
196 /* Recusion flag for public interface. */
197 static int malloc_active;
198
199 /* Number of free pages we cache */
200 static unsigned malloc_cache = 16;
201
202 /* The offset from pagenumber to index into the page directory */
203 static size_t malloc_origo;
204
205 /* The last index in the page directory we care about */
206 static size_t last_idx;
207
208 /* Pointer to page directory. Allocated "as if with" malloc */
209 static struct pginfo **page_dir;
210
211 /* How many slots in the page directory */
212 static unsigned malloc_ninfo;
213
214 /* Free pages line up here */
215 static struct pgfree free_list;
216
217 /* Abort(), user doesn't handle problems. */
218 static int malloc_abort;
219
220 /* Are we trying to die ? */
221 static int suicide;
222
223 /* always realloc ? */
224 static int malloc_realloc;
225
226 /* pass the kernel a hint on free pages ? */
227 static int malloc_hint = 0;
228
229 /* xmalloc behaviour ? */
230 static int malloc_xmalloc;
231
232 /* sysv behaviour for malloc(0) ? */
233 static int malloc_sysv;
234
235 /* zero fill ? */
236 static int malloc_zero;
237
238 /* junk fill ? */
239 static int malloc_junk;
240
241 #ifdef HAS_UTRACE
242
243 /* utrace ? */
244 static int malloc_utrace;
245
246 struct ut { void *p; size_t s; void *r; };
247
248 #define UTRACE(a, b, c) \
249 if (malloc_utrace) { \
250 struct ut u; \
251 u.p=a; u.s = b; u.r=c; \
252 utrace(UTRACE_LABEL (void *) &u, sizeof u); \
253 }
254 #else /* !HAS_UTRACE */
255 #define UTRACE(a,b,c)
256 #endif /* HAS_UTRACE */
257
258 /* my last break. */
259 static void *malloc_brk;
260
261 /* one location cache for free-list holders */
262 static struct pgfree *px;
263
264 /* compile-time options */
265 char *malloc_options;
266
267 /* Name of the current public function */
268 static char *malloc_func;
269
270 /* Macro for mmap */
271 #define MMAP(size) \
272 mmap(0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
273 MMAP_FD, (off_t)0);
274
275 /*
276 * Necessary function declarations
277 */
278 static int extend_pgdir(size_t idx);
279 static void *imalloc(size_t size);
280 static void ifree(void *ptr);
281 static void *irealloc(void *ptr, size_t size);
282
283 static void
284 wrterror(char *p)
285 {
286 const char *progname = getprogname();
287 char *q = " error: ";
288 write(STDERR_FILENO, progname, strlen(progname));
289 write(STDERR_FILENO, malloc_func, strlen(malloc_func));
290 write(STDERR_FILENO, q, strlen(q));
291 write(STDERR_FILENO, p, strlen(p));
292 suicide = 1;
293 abort();
294 }
295
296 static void
297 wrtwarning(char *p)
298 {
299 const char *progname = getprogname();
300 char *q = " warning: ";
301 if (malloc_abort)
302 wrterror(p);
303 write(STDERR_FILENO, progname, strlen(progname));
304 write(STDERR_FILENO, malloc_func, strlen(malloc_func));
305 write(STDERR_FILENO, q, strlen(q));
306 write(STDERR_FILENO, p, strlen(p));
307 }
308
309
310 /*
311 * Allocate a number of pages from the OS
312 */
313 static void *
314 map_pages(size_t pages)
315 {
316 caddr_t result, rresult, tail;
317 intptr_t bytes = pages << malloc_pageshift;
318
319 if (bytes < 0 || (size_t)bytes < pages) {
320 errno = ENOMEM;
321 return NULL;
322 }
323
324 if ((result = sbrk(bytes)) == (void *)-1)
325 return NULL;
326
327 /*
328 * Round to a page, in case sbrk(2) did not do this for us
329 */
330 rresult = (caddr_t)pageround((size_t)(uintptr_t)result);
331 if (result < rresult) {
332 /* make sure we have enough space to fit bytes */
333 if (sbrk((intptr_t)(rresult - result)) == (void *) -1) {
334 /* we failed, put everything back */
335 if (brk(result)) {
336 wrterror("brk(2) failed [internal error]\n");
337 }
338 }
339 }
340 tail = rresult + (size_t)bytes;
341
342 last_idx = ptr2idx(tail) - 1;
343 malloc_brk = tail;
344
345 if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) {
346 malloc_brk = result;
347 last_idx = ptr2idx(malloc_brk) - 1;
348 /* Put back break point since we failed. */
349 if (brk(malloc_brk))
350 wrterror("brk(2) failed [internal error]\n");
351 return 0;
352 }
353
354 return rresult;
355 }
356
357 /*
358 * Extend page directory
359 */
360 static int
361 extend_pgdir(size_t idx)
362 {
363 struct pginfo **new, **old;
364 size_t newlen, oldlen;
365
366 /* check for overflow */
367 if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1)
368 + (malloc_pagesize / sizeof *page_dir)) < idx) {
369 errno = ENOMEM;
370 return 0;
371 }
372
373 /* Make it this many pages */
374 newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
375
376 /* remember the old mapping size */
377 oldlen = malloc_ninfo * sizeof *page_dir;
378
379 /*
380 * NOTE: we allocate new pages and copy the directory rather than tempt
381 * fate by trying to "grow" the region.. There is nothing to prevent
382 * us from accidently re-mapping space that's been allocated by our caller
383 * via dlopen() or other mmap().
384 *
385 * The copy problem is not too bad, as there is 4K of page index per
386 * 4MB of malloc arena.
387 *
388 * We can totally avoid the copy if we open a file descriptor to associate
389 * the anon mappings with. Then, when we remap the pages at the new
390 * address, the old pages will be "magically" remapped.. But this means
391 * keeping open a "secret" file descriptor.....
392 */
393
394 /* Get new pages */
395 new = (struct pginfo**) MMAP(newlen);
396 if (new == (struct pginfo **)-1)
397 return 0;
398
399 /* Copy the old stuff */
400 memcpy(new, page_dir, oldlen);
401
402 /* register the new size */
403 malloc_ninfo = newlen / sizeof *page_dir;
404
405 /* swap the pointers */
406 old = page_dir;
407 page_dir = new;
408
409 /* Now free the old stuff */
410 munmap(old, oldlen);
411 return 1;
412 }
413
414 /*
415 * Initialize the world
416 */
417 static void
418 malloc_init (void)
419 {
420 char *p, b[64];
421 int i, j;
422 int errnosave;
423
424 /*
425 * Compute page-size related variables.
426 */
427 malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
428 malloc_pagemask = malloc_pagesize - 1;
429 for (malloc_pageshift = 0;
430 (1UL << malloc_pageshift) != malloc_pagesize;
431 malloc_pageshift++)
432 /* nothing */ ;
433
434 INIT_MMAP();
435
436 #ifdef MALLOC_EXTRA_SANITY
437 malloc_junk = 1;
438 #endif /* MALLOC_EXTRA_SANITY */
439
440 for (i = 0; i < 3; i++) {
441 if (i == 0) {
442 errnosave = errno;
443 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
444 errno = errnosave;
445 if (j <= 0)
446 continue;
447 b[j] = '\0';
448 p = b;
449 } else if (i == 1) {
450 p = getenv("MALLOC_OPTIONS");
451 } else {
452 p = malloc_options;
453 }
454 for (; p && *p; p++) {
455 switch (*p) {
456 case '>': malloc_cache <<= 1; break;
457 case '<': malloc_cache >>= 1; break;
458 case 'a': malloc_abort = 0; break;
459 case 'A': malloc_abort = 1; break;
460 case 'h': malloc_hint = 0; break;
461 case 'H': malloc_hint = 1; break;
462 case 'r': malloc_realloc = 0; break;
463 case 'R': malloc_realloc = 1; break;
464 case 'j': malloc_junk = 0; break;
465 case 'J': malloc_junk = 1; break;
466 #ifdef HAS_UTRACE
467 case 'u': malloc_utrace = 0; break;
468 case 'U': malloc_utrace = 1; break;
469 #endif
470 case 'v': malloc_sysv = 0; break;
471 case 'V': malloc_sysv = 1; break;
472 case 'x': malloc_xmalloc = 0; break;
473 case 'X': malloc_xmalloc = 1; break;
474 case 'z': malloc_zero = 0; break;
475 case 'Z': malloc_zero = 1; break;
476 default:
477 j = malloc_abort;
478 malloc_abort = 0;
479 wrtwarning("unknown char in MALLOC_OPTIONS\n");
480 malloc_abort = j;
481 break;
482 }
483 }
484 }
485
486 UTRACE(0, 0, 0);
487
488 /*
489 * We want junk in the entire allocation, and zero only in the part
490 * the user asked for.
491 */
492 if (malloc_zero)
493 malloc_junk=1;
494
495 /*
496 * If we run with junk (or implicitly from above: zero), we want to
497 * force realloc() to get new storage, so we can DTRT with it.
498 */
499 if (malloc_junk)
500 malloc_realloc=1;
501
502 /* Allocate one page for the page directory */
503 page_dir = (struct pginfo **) MMAP(malloc_pagesize);
504
505 if (page_dir == (struct pginfo **) -1)
506 wrterror("mmap(2) failed, check limits.\n");
507
508 /*
509 * We need a maximum of malloc_pageshift buckets, steal these from the
510 * front of the page_directory;
511 */
512 malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
513 >> malloc_pageshift;
514 malloc_origo -= malloc_pageshift;
515
516 malloc_ninfo = malloc_pagesize / sizeof *page_dir;
517
518 /* Recalculate the cache size in bytes, and make sure it's nonzero */
519
520 if (!malloc_cache)
521 malloc_cache++;
522
523 malloc_cache <<= malloc_pageshift;
524
525 /*
526 * This is a nice hack from Kaleb Keithly (kaleb (at) x.org).
527 * We can sbrk(2) further back when we keep this on a low address.
528 */
529 px = (struct pgfree *) imalloc (sizeof *px);
530
531 /* Been here, done that */
532 malloc_started++;
533 }
534
535 /*
536 * Allocate a number of complete pages
537 */
538 static void *
539 malloc_pages(size_t size)
540 {
541 void *p, *delay_free = 0;
542 size_t i;
543 struct pgfree *pf;
544 size_t idx;
545
546 idx = pageround(size);
547 if (idx < size) {
548 errno = ENOMEM;
549 return NULL;
550 } else
551 size = idx;
552
553 p = 0;
554
555 /* Look for free pages before asking for more */
556 for(pf = free_list.next; pf; pf = pf->next) {
557
558 #ifdef MALLOC_EXTRA_SANITY
559 if (pf->size & malloc_pagemask)
560 wrterror("(ES): junk length entry on free_list\n");
561 if (!pf->size)
562 wrterror("(ES): zero length entry on free_list\n");
563 if (pf->page == pf->end)
564 wrterror("(ES): zero entry on free_list\n");
565 if (pf->page > pf->end)
566 wrterror("(ES): sick entry on free_list\n");
567 if ((void*)pf->page >= (void*)sbrk(0))
568 wrterror("(ES): entry on free_list past brk\n");
569 if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
570 wrterror("(ES): non-free first page on free-list\n");
571 if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
572 wrterror("(ES): non-free last page on free-list\n");
573 #endif /* MALLOC_EXTRA_SANITY */
574
575 if (pf->size < size)
576 continue;
577
578 if (pf->size == size) {
579 p = pf->page;
580 if (pf->next)
581 pf->next->prev = pf->prev;
582 pf->prev->next = pf->next;
583 delay_free = pf;
584 break;
585 }
586
587 p = pf->page;
588 pf->page = (char *)pf->page + size;
589 pf->size -= size;
590 break;
591 }
592
593 #ifdef MALLOC_EXTRA_SANITY
594 if (p && page_dir[ptr2idx(p)] != MALLOC_FREE)
595 wrterror("(ES): allocated non-free page on free-list\n");
596 #endif /* MALLOC_EXTRA_SANITY */
597
598 size >>= malloc_pageshift;
599
600 /* Map new pages */
601 if (!p)
602 p = map_pages(size);
603
604 if (p) {
605
606 idx = ptr2idx(p);
607 page_dir[idx] = MALLOC_FIRST;
608 for (i=1;i<size;i++)
609 page_dir[idx+i] = MALLOC_FOLLOW;
610
611 if (malloc_junk)
612 memset(p, SOME_JUNK, size << malloc_pageshift);
613 }
614
615 if (delay_free) {
616 if (!px)
617 px = delay_free;
618 else
619 ifree(delay_free);
620 }
621
622 return p;
623 }
624
625 /*
626 * Allocate a page of fragments
627 */
628
629 static __inline__ int
630 malloc_make_chunks(int bits)
631 {
632 struct pginfo *bp;
633 void *pp;
634 int i, k, l;
635
636 /* Allocate a new bucket */
637 pp = malloc_pages(malloc_pagesize);
638 if (!pp)
639 return 0;
640
641 /* Find length of admin structure */
642 l = (int)offsetof(struct pginfo, bits[0]);
643 l += sizeof bp->bits[0] *
644 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
645
646 /* Don't waste more than two chunks on this */
647 if ((1<<(bits)) <= l+l) {
648 bp = (struct pginfo *)pp;
649 } else {
650 bp = (struct pginfo *)imalloc((size_t)l);
651 if (!bp) {
652 ifree(pp);
653 return 0;
654 }
655 }
656
657 bp->size = (1<<bits);
658 bp->shift = bits;
659 bp->total = bp->free = malloc_pagesize >> bits;
660 bp->page = pp;
661
662 /* set all valid bits in the bitmap */
663 k = bp->total;
664 i = 0;
665
666 /* Do a bunch at a time */
667 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
668 bp->bits[i / MALLOC_BITS] = ~0U;
669
670 for(; i < k; i++)
671 bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
672
673 if (bp == bp->page) {
674 /* Mark the ones we stole for ourselves */
675 for(i=0;l > 0;i++) {
676 bp->bits[i/MALLOC_BITS] &= ~(1<<(i%MALLOC_BITS));
677 bp->free--;
678 bp->total--;
679 l -= (1 << bits);
680 }
681 }
682
683 /* MALLOC_LOCK */
684
685 page_dir[ptr2idx(pp)] = bp;
686
687 bp->next = page_dir[bits];
688 page_dir[bits] = bp;
689
690 /* MALLOC_UNLOCK */
691
692 return 1;
693 }
694
695 /*
696 * Allocate a fragment
697 */
698 static void *
699 malloc_bytes(size_t size)
700 {
701 size_t i;
702 int j;
703 u_int u;
704 struct pginfo *bp;
705 int k;
706 u_int *lp;
707
708 /* Don't bother with anything less than this */
709 if (size < malloc_minsize)
710 size = malloc_minsize;
711
712 /* Find the right bucket */
713 j = 1;
714 i = size-1;
715 while (i >>= 1)
716 j++;
717
718 /* If it's empty, make a page more of that size chunks */
719 if (!page_dir[j] && !malloc_make_chunks(j))
720 return 0;
721
722 bp = page_dir[j];
723
724 /* Find first word of bitmap which isn't empty */
725 for (lp = bp->bits; !*lp; lp++)
726 ;
727
728 /* Find that bit, and tweak it */
729 u = 1;
730 k = 0;
731 while (!(*lp & u)) {
732 u += u;
733 k++;
734 }
735 *lp ^= u;
736
737 /* If there are no more free, remove from free-list */
738 if (!--bp->free) {
739 page_dir[j] = bp->next;
740 bp->next = 0;
741 }
742
743 /* Adjust to the real offset of that chunk */
744 k += (lp-bp->bits)*MALLOC_BITS;
745 k <<= bp->shift;
746
747 if (malloc_junk)
748 memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
749
750 return (u_char *)bp->page + k;
751 }
752
753 /*
754 * Allocate a piece of memory
755 */
756 static void *
757 imalloc(size_t size)
758 {
759 void *result;
760
761 if (suicide)
762 abort();
763
764 if ((size + malloc_pagesize) < size) /* Check for overflow */
765 result = 0;
766 else if (size <= malloc_maxsize)
767 result = malloc_bytes(size);
768 else
769 result = malloc_pages(size);
770
771 if (malloc_abort && !result)
772 wrterror("allocation failed.\n");
773
774 if (malloc_zero && result)
775 memset(result, 0, size);
776
777 return result;
778 }
779
780 /*
781 * Change the size of an allocation.
782 */
783 static void *
784 irealloc(void *ptr, size_t size)
785 {
786 void *p;
787 size_t osize, idx;
788 struct pginfo **mp;
789 size_t i;
790
791 if (suicide)
792 abort();
793
794 idx = ptr2idx(ptr);
795
796 if (idx < malloc_pageshift) {
797 wrtwarning("junk pointer, too low to make sense.\n");
798 return 0;
799 }
800
801 if (idx > last_idx) {
802 wrtwarning("junk pointer, too high to make sense.\n");
803 return 0;
804 }
805
806 mp = &page_dir[idx];
807
808 if (*mp == MALLOC_FIRST) { /* Page allocation */
809
810 /* Check the pointer */
811 if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
812 wrtwarning("modified (page-) pointer.\n");
813 return 0;
814 }
815
816 /* Find the size in bytes */
817 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
818 osize += malloc_pagesize;
819
820 if (!malloc_realloc && /* unless we have to, */
821 size <= osize && /* .. or are too small, */
822 size > (osize - malloc_pagesize)) { /* .. or can free a page, */
823 return ptr; /* don't do anything. */
824 }
825
826 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
827
828 /* Check the pointer for sane values */
829 if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) {
830 wrtwarning("modified (chunk-) pointer.\n");
831 return 0;
832 }
833
834 /* Find the chunk index in the page */
835 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift;
836
837 /* Verify that it isn't a free chunk already */
838 if ((*mp)->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
839 wrtwarning("chunk is already free.\n");
840 return 0;
841 }
842
843 osize = (*mp)->size;
844
845 if (!malloc_realloc && /* Unless we have to, */
846 size < osize && /* ..or are too small, */
847 (size > osize/2 || /* ..or could use a smaller size, */
848 osize == malloc_minsize)) { /* ..(if there is one) */
849 return ptr; /* ..Don't do anything */
850 }
851
852 } else {
853 wrtwarning("pointer to wrong page.\n");
854 return 0;
855 }
856
857 p = imalloc(size);
858
859 if (p) {
860 /* copy the lesser of the two sizes, and free the old one */
861 if (!size || !osize)
862 ;
863 else if (osize < size)
864 memcpy(p, ptr, osize);
865 else
866 memcpy(p, ptr, size);
867 ifree(ptr);
868 }
869 return p;
870 }
871
872 /*
873 * Free a sequence of pages
874 */
875
876 static __inline__ void
877 free_pages(void *ptr, size_t idx, struct pginfo *info)
878 {
879 size_t i;
880 struct pgfree *pf, *pt=0;
881 size_t l;
882 void *tail;
883
884 if (info == MALLOC_FREE) {
885 wrtwarning("page is already free.\n");
886 return;
887 }
888
889 if (info != MALLOC_FIRST) {
890 wrtwarning("pointer to wrong page.\n");
891 return;
892 }
893
894 if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
895 wrtwarning("modified (page-) pointer.\n");
896 return;
897 }
898
899 /* Count how many pages and mark them free at the same time */
900 page_dir[idx] = MALLOC_FREE;
901 for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
902 page_dir[idx + i] = MALLOC_FREE;
903
904 l = i << malloc_pageshift;
905
906 if (malloc_junk)
907 memset(ptr, SOME_JUNK, l);
908
909 if (malloc_hint)
910 madvise(ptr, l, MADV_FREE);
911
912 tail = (char *)ptr+l;
913
914 /* add to free-list */
915 if (!px)
916 px = imalloc(sizeof *pt); /* This cannot fail... */
917 px->page = ptr;
918 px->end = tail;
919 px->size = l;
920 if (!free_list.next) {
921
922 /* Nothing on free list, put this at head */
923 px->next = free_list.next;
924 px->prev = &free_list;
925 free_list.next = px;
926 pf = px;
927 px = 0;
928
929 } else {
930
931 /* Find the right spot, leave pf pointing to the modified entry. */
932 tail = (char *)ptr+l;
933
934 for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next)
935 ; /* Race ahead here */
936
937 if (pf->page > tail) {
938 /* Insert before entry */
939 px->next = pf;
940 px->prev = pf->prev;
941 pf->prev = px;
942 px->prev->next = px;
943 pf = px;
944 px = 0;
945 } else if (pf->end == ptr ) {
946 /* Append to the previous entry */
947 pf->end = (char *)pf->end + l;
948 pf->size += l;
949 if (pf->next && pf->end == pf->next->page ) {
950 /* And collapse the next too. */
951 pt = pf->next;
952 pf->end = pt->end;
953 pf->size += pt->size;
954 pf->next = pt->next;
955 if (pf->next)
956 pf->next->prev = pf;
957 }
958 } else if (pf->page == tail) {
959 /* Prepend to entry */
960 pf->size += l;
961 pf->page = ptr;
962 } else if (!pf->next) {
963 /* Append at tail of chain */
964 px->next = 0;
965 px->prev = pf;
966 pf->next = px;
967 pf = px;
968 px = 0;
969 } else {
970 wrterror("freelist is destroyed.\n");
971 }
972 }
973
974 /* Return something to OS ? */
975 if (!pf->next && /* If we're the last one, */
976 pf->size > malloc_cache && /* ..and the cache is full, */
977 pf->end == malloc_brk && /* ..and none behind us, */
978 malloc_brk == sbrk((intptr_t)0)) { /* ..and it's OK to do... */
979
980 /*
981 * Keep the cache intact. Notice that the '>' above guarantees that
982 * the pf will always have at least one page afterwards.
983 */
984 pf->end = (char *)pf->page + malloc_cache;
985 pf->size = malloc_cache;
986
987 brk(pf->end);
988 malloc_brk = pf->end;
989
990 idx = ptr2idx(pf->end);
991 last_idx = idx - 1;
992
993 for(i=idx;i <= last_idx;)
994 page_dir[i++] = MALLOC_NOT_MINE;
995
996 /* XXX: We could realloc/shrink the pagedir here I guess. */
997 }
998 if (pt)
999 ifree(pt);
1000 }
1001
1002 /*
1003 * Free a chunk, and possibly the page it's on, if the page becomes empty.
1004 */
1005
1006 static __inline__ void
1007 free_bytes(void *ptr, size_t idx, struct pginfo *info)
1008 {
1009 size_t i;
1010 struct pginfo **mp;
1011 void *vp;
1012
1013 /* Find the chunk number on the page */
1014 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift;
1015
1016 if (((size_t)(uintptr_t)ptr & (info->size-1))) {
1017 wrtwarning("modified (chunk-) pointer.\n");
1018 return;
1019 }
1020
1021 if (info->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
1022 wrtwarning("chunk is already free.\n");
1023 return;
1024 }
1025
1026 if (malloc_junk)
1027 memset(ptr, SOME_JUNK, (size_t)info->size);
1028
1029 info->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
1030 info->free++;
1031
1032 mp = page_dir + info->shift;
1033
1034 if (info->free == 1) {
1035
1036 /* Page became non-full */
1037
1038 mp = page_dir + info->shift;
1039 /* Insert in address order */
1040 while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1041 mp = &(*mp)->next;
1042 info->next = *mp;
1043 *mp = info;
1044 return;
1045 }
1046
1047 if (info->free != info->total)
1048 return;
1049
1050 /* Find & remove this page in the queue */
1051 while (*mp != info) {
1052 mp = &((*mp)->next);
1053 #ifdef MALLOC_EXTRA_SANITY
1054 if (!*mp)
1055 wrterror("(ES): Not on queue\n");
1056 #endif /* MALLOC_EXTRA_SANITY */
1057 }
1058 *mp = info->next;
1059
1060 /* Free the page & the info structure if need be */
1061 page_dir[idx] = MALLOC_FIRST;
1062 vp = info->page; /* Order is important ! */
1063 if(vp != (void*)info)
1064 ifree(info);
1065 ifree(vp);
1066 }
1067
1068 static void
1069 ifree(void *ptr)
1070 {
1071 struct pginfo *info;
1072 size_t idx;
1073
1074 /* This is legal */
1075 if (!ptr)
1076 return;
1077
1078 if (!malloc_started) {
1079 wrtwarning("malloc() has never been called.\n");
1080 return;
1081 }
1082
1083 /* If we're already sinking, don't make matters any worse. */
1084 if (suicide)
1085 return;
1086
1087 idx = ptr2idx(ptr);
1088
1089 if (idx < malloc_pageshift) {
1090 wrtwarning("junk pointer, too low to make sense.\n");
1091 return;
1092 }
1093
1094 if (idx > last_idx) {
1095 wrtwarning("junk pointer, too high to make sense.\n");
1096 return;
1097 }
1098
1099 info = page_dir[idx];
1100
1101 if (info < MALLOC_MAGIC)
1102 free_pages(ptr, idx, info);
1103 else
1104 free_bytes(ptr, idx, info);
1105 return;
1106 }
1107
1108 /*
1109 * These are the public exported interface routines.
1110 */
1111
1112
1113 void *
1114 malloc(size_t size)
1115 {
1116 register void *r;
1117
1118 THREAD_LOCK();
1119 malloc_func = " in malloc():";
1120 if (malloc_active++) {
1121 wrtwarning("recursive call.\n");
1122 malloc_active--;
1123 return (0);
1124 }
1125 if (!malloc_started)
1126 malloc_init();
1127 if (malloc_sysv && !size)
1128 r = 0;
1129 else
1130 r = imalloc(size);
1131 UTRACE(0, size, r);
1132 malloc_active--;
1133 THREAD_UNLOCK();
1134 if (r == NULL && (size != 0 || !malloc_sysv)) {
1135 if (malloc_xmalloc)
1136 wrterror("out of memory.\n");
1137 errno = ENOMEM;
1138 }
1139 return (r);
1140 }
1141
1142 void
1143 free(void *ptr)
1144 {
1145 THREAD_LOCK();
1146 malloc_func = " in free():";
1147 if (malloc_active++) {
1148 wrtwarning("recursive call.\n");
1149 malloc_active--;
1150 THREAD_UNLOCK();
1151 return;
1152 } else {
1153 ifree(ptr);
1154 UTRACE(ptr, 0, 0);
1155 }
1156 malloc_active--;
1157 THREAD_UNLOCK();
1158 return;
1159 }
1160
1161 void *
1162 realloc(void *ptr, size_t size)
1163 {
1164 register void *r;
1165
1166 THREAD_LOCK();
1167 malloc_func = " in realloc():";
1168 if (malloc_active++) {
1169 wrtwarning("recursive call.\n");
1170 malloc_active--;
1171 return (0);
1172 }
1173 if (ptr && !malloc_started) {
1174 wrtwarning("malloc() has never been called.\n");
1175 ptr = 0;
1176 }
1177 if (!malloc_started)
1178 malloc_init();
1179 if (malloc_sysv && !size) {
1180 ifree(ptr);
1181 r = 0;
1182 } else if (!ptr) {
1183 r = imalloc(size);
1184 } else {
1185 r = irealloc(ptr, size);
1186 }
1187 UTRACE(ptr, size, r);
1188 malloc_active--;
1189 THREAD_UNLOCK();
1190 if (r == NULL && (size != 0 || !malloc_sysv)) {
1191 if (malloc_xmalloc)
1192 wrterror("out of memory.\n");
1193 errno = ENOMEM;
1194 }
1195 return (r);
1196 }
1197