malloc.c revision 1.43 1 /* $NetBSD: malloc.c,v 1.43 2003/10/05 04:49:46 junyoung Exp $ */
2
3 /*
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk (at) FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
10 *
11 * From FreeBSD: malloc.c,v 1.43 1998/09/30 06:13:59 jb
12 *
13 */
14
15 /*
16 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
17 * to internal conditions and consistency in malloc.c. This has a
18 * noticeable runtime performance hit, and generally will not do you
19 * any good unless you fiddle with the internals of malloc or want
20 * to catch random pointer corruption as early as possible.
21 */
22 #ifndef MALLOC_EXTRA_SANITY
23 #undef MALLOC_EXTRA_SANITY
24 #endif
25
26 /*
27 * What to use for Junk. This is the byte value we use to fill with
28 * when the 'J' option is enabled.
29 */
30 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
31
32 /*
33 * The basic parameters you can tweak.
34 *
35 * malloc_minsize minimum size of an allocation in bytes.
36 * If this is too small it's too much work
37 * to manage them. This is also the smallest
38 * unit of alignment used for the storage
39 * returned by malloc/realloc.
40 *
41 */
42
43 #if defined(__FreeBSD__)
44 # if defined(__i386__)
45 # define malloc_minsize 16U
46 # endif
47 # if defined(__alpha__)
48 # define malloc_minsize 16U
49 # endif
50 # define HAS_UTRACE
51 # define UTRACE_LABEL
52
53 #include <sys/cdefs.h>
54 void utrace(struct ut *, int);
55
56 /*
57 * Make malloc/free/realloc thread-safe in libc for use with
58 * kernel threads.
59 */
60 # include "libc_private.h"
61 # include "spinlock.h"
62 static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
63 # define THREAD_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
64 # define THREAD_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
65 #endif /* __FreeBSD__ */
66
67 #if defined(__NetBSD__)
68 # define malloc_minsize 16U
69 # define HAS_UTRACE
70 # define UTRACE_LABEL "malloc",
71 #include <sys/cdefs.h>
72 #include <sys/types.h>
73 int utrace(const char *, void *, size_t);
74
75 #include <reentrant.h>
76 extern int __isthreaded;
77 static mutex_t thread_lock = MUTEX_INITIALIZER;
78 #define THREAD_LOCK() if (__isthreaded) mutex_lock(&thread_lock);
79 #define THREAD_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock);
80 #endif /* __NetBSD__ */
81
82 #if defined(__sparc__) && defined(sun)
83 # define malloc_minsize 16U
84 # define MAP_ANON (0)
85 static int fdzero;
86 # define MMAP_FD fdzero
87 # define INIT_MMAP() \
88 { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \
89 wrterror("open of /dev/zero"); }
90 #endif /* __sparc__ */
91
92 /* Insert your combination here... */
93 #if defined(__FOOCPU__) && defined(__BAROS__)
94 # define malloc_minsize 16U
95 #endif /* __FOOCPU__ && __BAROS__ */
96
97
98 /*
99 * No user serviceable parts behind this point.
100 */
101 #include "namespace.h"
102 #include <sys/types.h>
103 #include <sys/mman.h>
104 #include <errno.h>
105 #include <fcntl.h>
106 #include <stddef.h>
107 #include <stdio.h>
108 #include <stdlib.h>
109 #include <string.h>
110 #include <unistd.h>
111
112 /*
113 * This structure describes a page worth of chunks.
114 */
115
116 struct pginfo {
117 struct pginfo *next; /* next on the free list */
118 void *page; /* Pointer to the page */
119 u_short size; /* size of this page's chunks */
120 u_short shift; /* How far to shift for this size chunks */
121 u_short free; /* How many free chunks */
122 u_short total; /* How many chunk */
123 u_int bits[1]; /* Which chunks are free */
124 };
125
126 /*
127 * This structure describes a number of free pages.
128 */
129
130 struct pgfree {
131 struct pgfree *next; /* next run of free pages */
132 struct pgfree *prev; /* prev run of free pages */
133 void *page; /* pointer to free pages */
134 void *end; /* pointer to end of free pages */
135 size_t size; /* number of bytes free */
136 };
137
138 /*
139 * How many bits per u_int in the bitmap.
140 * Change only if not 8 bits/byte
141 */
142 #define MALLOC_BITS ((int)(8*sizeof(u_int)))
143
144 /*
145 * Magic values to put in the page_directory
146 */
147 #define MALLOC_NOT_MINE ((struct pginfo*) 0)
148 #define MALLOC_FREE ((struct pginfo*) 1)
149 #define MALLOC_FIRST ((struct pginfo*) 2)
150 #define MALLOC_FOLLOW ((struct pginfo*) 3)
151 #define MALLOC_MAGIC ((struct pginfo*) 4)
152
153 /*
154 * Page size related parameters, computed at run-time.
155 */
156 static size_t malloc_pagesize;
157 static size_t malloc_pageshift;
158 static size_t malloc_pagemask;
159
160 #ifndef malloc_minsize
161 #define malloc_minsize 16U
162 #endif
163
164 #ifndef malloc_maxsize
165 #define malloc_maxsize ((malloc_pagesize)>>1)
166 #endif
167
168 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
169 #define ptr2idx(foo) \
170 (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
171
172 #ifndef THREAD_LOCK
173 #define THREAD_LOCK()
174 #endif
175
176 #ifndef THREAD_UNLOCK
177 #define THREAD_UNLOCK()
178 #endif
179
180 #ifndef MMAP_FD
181 #define MMAP_FD (-1)
182 #endif
183
184 #ifndef INIT_MMAP
185 #define INIT_MMAP()
186 #endif
187
188 #ifndef MADV_FREE
189 #define MADV_FREE MADV_DONTNEED
190 #endif
191
192 /* Set when initialization has been done */
193 static unsigned malloc_started;
194
195 /* Recusion flag for public interface. */
196 static int malloc_active;
197
198 /* Number of free pages we cache */
199 static unsigned malloc_cache = 16;
200
201 /* The offset from pagenumber to index into the page directory */
202 static size_t malloc_origo;
203
204 /* The last index in the page directory we care about */
205 static size_t last_idx;
206
207 /* Pointer to page directory. Allocated "as if with" malloc */
208 static struct pginfo **page_dir;
209
210 /* How many slots in the page directory */
211 static unsigned malloc_ninfo;
212
213 /* Free pages line up here */
214 static struct pgfree free_list;
215
216 /* Abort(), user doesn't handle problems. */
217 static int malloc_abort;
218
219 /* Are we trying to die ? */
220 static int suicide;
221
222 /* always realloc ? */
223 static int malloc_realloc;
224
225 /* pass the kernel a hint on free pages ? */
226 static int malloc_hint = 0;
227
228 /* xmalloc behaviour ? */
229 static int malloc_xmalloc;
230
231 /* sysv behaviour for malloc(0) ? */
232 static int malloc_sysv;
233
234 /* zero fill ? */
235 static int malloc_zero;
236
237 /* junk fill ? */
238 static int malloc_junk;
239
240 #ifdef HAS_UTRACE
241
242 /* utrace ? */
243 static int malloc_utrace;
244
245 struct ut { void *p; size_t s; void *r; };
246
247 #define UTRACE(a, b, c) \
248 if (malloc_utrace) { \
249 struct ut u; \
250 u.p=a; u.s = b; u.r=c; \
251 utrace(UTRACE_LABEL (void *) &u, sizeof u); \
252 }
253 #else /* !HAS_UTRACE */
254 #define UTRACE(a,b,c)
255 #endif /* HAS_UTRACE */
256
257 /* my last break. */
258 static void *malloc_brk;
259
260 /* one location cache for free-list holders */
261 static struct pgfree *px;
262
263 /* compile-time options */
264 char *malloc_options;
265
266 /* Name of the current public function */
267 static char *malloc_func;
268
269 /* Macro for mmap */
270 #define MMAP(size) \
271 mmap(0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
272 MMAP_FD, (off_t)0);
273
274 /*
275 * Necessary function declarations
276 */
277 static int extend_pgdir(size_t idx);
278 static void *imalloc(size_t size);
279 static void ifree(void *ptr);
280 static void *irealloc(void *ptr, size_t size);
281
282 static void
283 wrterror(char *p)
284 {
285 const char *progname = getprogname();
286 char *q = " error: ";
287 write(STDERR_FILENO, progname, strlen(progname));
288 write(STDERR_FILENO, malloc_func, strlen(malloc_func));
289 write(STDERR_FILENO, q, strlen(q));
290 write(STDERR_FILENO, p, strlen(p));
291 suicide = 1;
292 abort();
293 }
294
295 static void
296 wrtwarning(char *p)
297 {
298 const char *progname = getprogname();
299 char *q = " warning: ";
300 if (malloc_abort)
301 wrterror(p);
302 write(STDERR_FILENO, progname, strlen(progname));
303 write(STDERR_FILENO, malloc_func, strlen(malloc_func));
304 write(STDERR_FILENO, q, strlen(q));
305 write(STDERR_FILENO, p, strlen(p));
306 }
307
308 /*
309 * Allocate a number of pages from the OS
310 */
311 static void *
312 map_pages(size_t pages)
313 {
314 caddr_t result, rresult, tail;
315 intptr_t bytes = pages << malloc_pageshift;
316
317 if (bytes < 0 || (size_t)bytes < pages) {
318 errno = ENOMEM;
319 return NULL;
320 }
321
322 if ((result = sbrk(bytes)) == (void *)-1)
323 return NULL;
324
325 /*
326 * Round to a page, in case sbrk(2) did not do this for us
327 */
328 rresult = (caddr_t)pageround((size_t)(uintptr_t)result);
329 if (result < rresult) {
330 /* make sure we have enough space to fit bytes */
331 if (sbrk((intptr_t)(rresult - result)) == (void *) -1) {
332 /* we failed, put everything back */
333 if (brk(result)) {
334 wrterror("brk(2) failed [internal error]\n");
335 }
336 }
337 }
338 tail = rresult + (size_t)bytes;
339
340 last_idx = ptr2idx(tail) - 1;
341 malloc_brk = tail;
342
343 if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) {
344 malloc_brk = result;
345 last_idx = ptr2idx(malloc_brk) - 1;
346 /* Put back break point since we failed. */
347 if (brk(malloc_brk))
348 wrterror("brk(2) failed [internal error]\n");
349 return 0;
350 }
351
352 return rresult;
353 }
354
355 /*
356 * Extend page directory
357 */
358 static int
359 extend_pgdir(size_t idx)
360 {
361 struct pginfo **new, **old;
362 size_t newlen, oldlen;
363
364 /* check for overflow */
365 if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1)
366 + (malloc_pagesize / sizeof *page_dir)) < idx) {
367 errno = ENOMEM;
368 return 0;
369 }
370
371 /* Make it this many pages */
372 newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
373
374 /* remember the old mapping size */
375 oldlen = malloc_ninfo * sizeof *page_dir;
376
377 /*
378 * NOTE: we allocate new pages and copy the directory rather than tempt
379 * fate by trying to "grow" the region.. There is nothing to prevent
380 * us from accidently re-mapping space that's been allocated by our caller
381 * via dlopen() or other mmap().
382 *
383 * The copy problem is not too bad, as there is 4K of page index per
384 * 4MB of malloc arena.
385 *
386 * We can totally avoid the copy if we open a file descriptor to associate
387 * the anon mappings with. Then, when we remap the pages at the new
388 * address, the old pages will be "magically" remapped.. But this means
389 * keeping open a "secret" file descriptor.....
390 */
391
392 /* Get new pages */
393 new = (struct pginfo**) MMAP(newlen);
394 if (new == MAP_FAILED)
395 return 0;
396
397 /* Copy the old stuff */
398 memcpy(new, page_dir, oldlen);
399
400 /* register the new size */
401 malloc_ninfo = newlen / sizeof *page_dir;
402
403 /* swap the pointers */
404 old = page_dir;
405 page_dir = new;
406
407 /* Now free the old stuff */
408 munmap(old, oldlen);
409 return 1;
410 }
411
412 /*
413 * Initialize the world
414 */
415 static void
416 malloc_init (void)
417 {
418 char *p, b[64];
419 int i, j;
420 int errnosave;
421
422 /*
423 * Compute page-size related variables.
424 */
425 malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
426 malloc_pagemask = malloc_pagesize - 1;
427 for (malloc_pageshift = 0;
428 (1UL << malloc_pageshift) != malloc_pagesize;
429 malloc_pageshift++)
430 /* nothing */ ;
431
432 INIT_MMAP();
433
434 #ifdef MALLOC_EXTRA_SANITY
435 malloc_junk = 1;
436 #endif /* MALLOC_EXTRA_SANITY */
437
438 for (i = 0; i < 3; i++) {
439 if (i == 0) {
440 errnosave = errno;
441 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
442 errno = errnosave;
443 if (j <= 0)
444 continue;
445 b[j] = '\0';
446 p = b;
447 } else if (i == 1) {
448 p = getenv("MALLOC_OPTIONS");
449 } else {
450 p = malloc_options;
451 }
452 for (; p != NULL && *p != '\0'; p++) {
453 switch (*p) {
454 case '>': malloc_cache <<= 1; break;
455 case '<': malloc_cache >>= 1; break;
456 case 'a': malloc_abort = 0; break;
457 case 'A': malloc_abort = 1; break;
458 case 'h': malloc_hint = 0; break;
459 case 'H': malloc_hint = 1; break;
460 case 'r': malloc_realloc = 0; break;
461 case 'R': malloc_realloc = 1; break;
462 case 'j': malloc_junk = 0; break;
463 case 'J': malloc_junk = 1; break;
464 #ifdef HAS_UTRACE
465 case 'u': malloc_utrace = 0; break;
466 case 'U': malloc_utrace = 1; break;
467 #endif
468 case 'v': malloc_sysv = 0; break;
469 case 'V': malloc_sysv = 1; break;
470 case 'x': malloc_xmalloc = 0; break;
471 case 'X': malloc_xmalloc = 1; break;
472 case 'z': malloc_zero = 0; break;
473 case 'Z': malloc_zero = 1; break;
474 default:
475 j = malloc_abort;
476 malloc_abort = 0;
477 wrtwarning("unknown char in MALLOC_OPTIONS.\n");
478 malloc_abort = j;
479 break;
480 }
481 }
482 }
483
484 UTRACE(0, 0, 0);
485
486 /*
487 * We want junk in the entire allocation, and zero only in the part
488 * the user asked for.
489 */
490 if (malloc_zero)
491 malloc_junk=1;
492
493 /*
494 * If we run with junk (or implicitly from above: zero), we want to
495 * force realloc() to get new storage, so we can DTRT with it.
496 */
497 if (malloc_junk)
498 malloc_realloc=1;
499
500 /* Allocate one page for the page directory */
501 page_dir = (struct pginfo **) MMAP(malloc_pagesize);
502
503 if (page_dir == MAP_FAILED)
504 wrterror("mmap(2) failed, check limits.\n");
505
506 /*
507 * We need a maximum of malloc_pageshift buckets, steal these from the
508 * front of the page_directory;
509 */
510 malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
511 >> malloc_pageshift;
512 malloc_origo -= malloc_pageshift;
513
514 malloc_ninfo = malloc_pagesize / sizeof *page_dir;
515
516 /* Recalculate the cache size in bytes, and make sure it's nonzero */
517
518 if (!malloc_cache)
519 malloc_cache++;
520
521 malloc_cache <<= malloc_pageshift;
522
523 /*
524 * This is a nice hack from Kaleb Keithly (kaleb (at) x.org).
525 * We can sbrk(2) further back when we keep this on a low address.
526 */
527 px = (struct pgfree *) imalloc (sizeof *px);
528
529 /* Been here, done that */
530 malloc_started++;
531 }
532
533 /*
534 * Allocate a number of complete pages
535 */
536 static void *
537 malloc_pages(size_t size)
538 {
539 void *p, *delay_free = NULL;
540 size_t i;
541 struct pgfree *pf;
542 size_t idx;
543
544 idx = pageround(size);
545 if (idx < size) {
546 errno = ENOMEM;
547 return NULL;
548 } else
549 size = idx;
550
551 p = NULL;
552
553 /* Look for free pages before asking for more */
554 for(pf = free_list.next; pf; pf = pf->next) {
555
556 #ifdef MALLOC_EXTRA_SANITY
557 if (pf->size & malloc_pagemask)
558 wrterror("(ES): junk length entry on free_list.\n");
559 if (!pf->size)
560 wrterror("(ES): zero length entry on free_list.\n");
561 if (pf->page == pf->end)
562 wrterror("(ES): zero entry on free_list.\n");
563 if (pf->page > pf->end)
564 wrterror("(ES): sick entry on free_list.\n");
565 if ((void*)pf->page >= (void*)sbrk(0))
566 wrterror("(ES): entry on free_list past brk.\n");
567 if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
568 wrterror("(ES): non-free first page on free-list.\n");
569 if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
570 wrterror("(ES): non-free last page on free-list.\n");
571 #endif /* MALLOC_EXTRA_SANITY */
572
573 if (pf->size < size)
574 continue;
575
576 if (pf->size == size) {
577 p = pf->page;
578 if (pf->next != NULL)
579 pf->next->prev = pf->prev;
580 pf->prev->next = pf->next;
581 delay_free = pf;
582 break;
583 }
584
585 p = pf->page;
586 pf->page = (char *)pf->page + size;
587 pf->size -= size;
588 break;
589 }
590
591 #ifdef MALLOC_EXTRA_SANITY
592 if (p != NULL && page_dir[ptr2idx(p)] != MALLOC_FREE)
593 wrterror("(ES): allocated non-free page on free-list.\n");
594 #endif /* MALLOC_EXTRA_SANITY */
595
596 size >>= malloc_pageshift;
597
598 /* Map new pages */
599 if (p == NULL)
600 p = map_pages(size);
601
602 if (p != NULL) {
603
604 idx = ptr2idx(p);
605 page_dir[idx] = MALLOC_FIRST;
606 for (i=1;i<size;i++)
607 page_dir[idx+i] = MALLOC_FOLLOW;
608
609 if (malloc_junk)
610 memset(p, SOME_JUNK, size << malloc_pageshift);
611 }
612
613 if (delay_free) {
614 if (px == NULL)
615 px = delay_free;
616 else
617 ifree(delay_free);
618 }
619
620 return p;
621 }
622
623 /*
624 * Allocate a page of fragments
625 */
626
627 static __inline__ int
628 malloc_make_chunks(int bits)
629 {
630 struct pginfo *bp;
631 void *pp;
632 int i, k, l;
633
634 /* Allocate a new bucket */
635 pp = malloc_pages(malloc_pagesize);
636 if (pp == NULL)
637 return 0;
638
639 /* Find length of admin structure */
640 l = (int)offsetof(struct pginfo, bits[0]);
641 l += sizeof bp->bits[0] *
642 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
643
644 /* Don't waste more than two chunks on this */
645 if ((1<<(bits)) <= l+l) {
646 bp = (struct pginfo *)pp;
647 } else {
648 bp = (struct pginfo *)imalloc((size_t)l);
649 if (bp == NULL) {
650 ifree(pp);
651 return 0;
652 }
653 }
654
655 bp->size = (1<<bits);
656 bp->shift = bits;
657 bp->total = bp->free = malloc_pagesize >> bits;
658 bp->page = pp;
659
660 /* set all valid bits in the bitmap */
661 k = bp->total;
662 i = 0;
663
664 /* Do a bunch at a time */
665 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
666 bp->bits[i / MALLOC_BITS] = ~0U;
667
668 for(; i < k; i++)
669 bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
670
671 if (bp == bp->page) {
672 /* Mark the ones we stole for ourselves */
673 for(i=0;l > 0;i++) {
674 bp->bits[i/MALLOC_BITS] &= ~(1<<(i%MALLOC_BITS));
675 bp->free--;
676 bp->total--;
677 l -= (1 << bits);
678 }
679 }
680
681 /* MALLOC_LOCK */
682
683 page_dir[ptr2idx(pp)] = bp;
684
685 bp->next = page_dir[bits];
686 page_dir[bits] = bp;
687
688 /* MALLOC_UNLOCK */
689
690 return 1;
691 }
692
693 /*
694 * Allocate a fragment
695 */
696 static void *
697 malloc_bytes(size_t size)
698 {
699 size_t i;
700 int j;
701 u_int u;
702 struct pginfo *bp;
703 int k;
704 u_int *lp;
705
706 /* Don't bother with anything less than this */
707 if (size < malloc_minsize)
708 size = malloc_minsize;
709
710 /* Find the right bucket */
711 j = 1;
712 i = size-1;
713 while (i >>= 1)
714 j++;
715
716 /* If it's empty, make a page more of that size chunks */
717 if (page_dir[j] == NULL && !malloc_make_chunks(j))
718 return NULL;
719
720 bp = page_dir[j];
721
722 /* Find first word of bitmap which isn't empty */
723 for (lp = bp->bits; !*lp; lp++)
724 ;
725
726 /* Find that bit, and tweak it */
727 u = 1;
728 k = 0;
729 while (!(*lp & u)) {
730 u += u;
731 k++;
732 }
733 *lp ^= u;
734
735 /* If there are no more free, remove from free-list */
736 if (!--bp->free) {
737 page_dir[j] = bp->next;
738 bp->next = NULL;
739 }
740
741 /* Adjust to the real offset of that chunk */
742 k += (lp-bp->bits)*MALLOC_BITS;
743 k <<= bp->shift;
744
745 if (malloc_junk)
746 memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
747
748 return (u_char *)bp->page + k;
749 }
750
751 /*
752 * Allocate a piece of memory
753 */
754 static void *
755 imalloc(size_t size)
756 {
757 void *result;
758
759 if (suicide)
760 abort();
761
762 if ((size + malloc_pagesize) < size) /* Check for overflow */
763 result = NULL;
764 else if (size <= malloc_maxsize)
765 result = malloc_bytes(size);
766 else
767 result = malloc_pages(size);
768
769 if (malloc_abort && result == NULL)
770 wrterror("allocation failed.\n");
771
772 if (malloc_zero && result != NULL)
773 memset(result, 0, size);
774
775 return result;
776 }
777
778 /*
779 * Change the size of an allocation.
780 */
781 static void *
782 irealloc(void *ptr, size_t size)
783 {
784 void *p;
785 size_t osize, idx;
786 struct pginfo **mp;
787 size_t i;
788
789 if (suicide)
790 abort();
791
792 idx = ptr2idx(ptr);
793
794 if (idx < malloc_pageshift) {
795 wrtwarning("junk pointer, too low to make sense.\n");
796 return 0;
797 }
798
799 if (idx > last_idx) {
800 wrtwarning("junk pointer, too high to make sense.\n");
801 return 0;
802 }
803
804 mp = &page_dir[idx];
805
806 if (*mp == MALLOC_FIRST) { /* Page allocation */
807
808 /* Check the pointer */
809 if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
810 wrtwarning("modified (page-) pointer.\n");
811 return NULL;
812 }
813
814 /* Find the size in bytes */
815 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
816 osize += malloc_pagesize;
817
818 if (!malloc_realloc && /* unless we have to, */
819 size <= osize && /* .. or are too small, */
820 size > (osize - malloc_pagesize)) { /* .. or can free a page, */
821 return ptr; /* don't do anything. */
822 }
823
824 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
825
826 /* Check the pointer for sane values */
827 if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) {
828 wrtwarning("modified (chunk-) pointer.\n");
829 return NULL;
830 }
831
832 /* Find the chunk index in the page */
833 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift;
834
835 /* Verify that it isn't a free chunk already */
836 if ((*mp)->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
837 wrtwarning("chunk is already free.\n");
838 return NULL;
839 }
840
841 osize = (*mp)->size;
842
843 if (!malloc_realloc && /* Unless we have to, */
844 size < osize && /* ..or are too small, */
845 (size > osize/2 || /* ..or could use a smaller size, */
846 osize == malloc_minsize)) { /* ..(if there is one) */
847 return ptr; /* ..Don't do anything */
848 }
849
850 } else {
851 wrtwarning("pointer to wrong page.\n");
852 return NULL;
853 }
854
855 p = imalloc(size);
856
857 if (p != NULL) {
858 /* copy the lesser of the two sizes, and free the old one */
859 if (!size || !osize)
860 ;
861 else if (osize < size)
862 memcpy(p, ptr, osize);
863 else
864 memcpy(p, ptr, size);
865 ifree(ptr);
866 }
867 return p;
868 }
869
870 /*
871 * Free a sequence of pages
872 */
873
874 static __inline__ void
875 free_pages(void *ptr, size_t idx, struct pginfo *info)
876 {
877 size_t i;
878 struct pgfree *pf, *pt=NULL;
879 size_t l;
880 void *tail;
881
882 if (info == MALLOC_FREE) {
883 wrtwarning("page is already free.\n");
884 return;
885 }
886
887 if (info != MALLOC_FIRST) {
888 wrtwarning("pointer to wrong page.\n");
889 return;
890 }
891
892 if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
893 wrtwarning("modified (page-) pointer.\n");
894 return;
895 }
896
897 /* Count how many pages and mark them free at the same time */
898 page_dir[idx] = MALLOC_FREE;
899 for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
900 page_dir[idx + i] = MALLOC_FREE;
901
902 l = i << malloc_pageshift;
903
904 if (malloc_junk)
905 memset(ptr, SOME_JUNK, l);
906
907 if (malloc_hint)
908 madvise(ptr, l, MADV_FREE);
909
910 tail = (char *)ptr+l;
911
912 /* add to free-list */
913 if (px == NULL)
914 px = imalloc(sizeof *pt); /* This cannot fail... */
915 px->page = ptr;
916 px->end = tail;
917 px->size = l;
918 if (free_list.next == NULL) {
919
920 /* Nothing on free list, put this at head */
921 px->next = free_list.next;
922 px->prev = &free_list;
923 free_list.next = px;
924 pf = px;
925 px = NULL;
926
927 } else {
928
929 /* Find the right spot, leave pf pointing to the modified entry. */
930 tail = (char *)ptr+l;
931
932 for(pf = free_list.next; pf->end < ptr && pf->next != NULL;
933 pf = pf->next)
934 ; /* Race ahead here */
935
936 if (pf->page > tail) {
937 /* Insert before entry */
938 px->next = pf;
939 px->prev = pf->prev;
940 pf->prev = px;
941 px->prev->next = px;
942 pf = px;
943 px = NULL;
944 } else if (pf->end == ptr ) {
945 /* Append to the previous entry */
946 pf->end = (char *)pf->end + l;
947 pf->size += l;
948 if (pf->next != NULL && pf->end == pf->next->page ) {
949 /* And collapse the next too. */
950 pt = pf->next;
951 pf->end = pt->end;
952 pf->size += pt->size;
953 pf->next = pt->next;
954 if (pf->next != NULL)
955 pf->next->prev = pf;
956 }
957 } else if (pf->page == tail) {
958 /* Prepend to entry */
959 pf->size += l;
960 pf->page = ptr;
961 } else if (pf->next == NULL) {
962 /* Append at tail of chain */
963 px->next = NULL;
964 px->prev = pf;
965 pf->next = px;
966 pf = px;
967 px = NULL;
968 } else {
969 wrterror("freelist is destroyed.\n");
970 }
971 }
972
973 /* Return something to OS ? */
974 if (pf->next == NULL && /* If we're the last one, */
975 pf->size > malloc_cache && /* ..and the cache is full, */
976 pf->end == malloc_brk && /* ..and none behind us, */
977 malloc_brk == sbrk((intptr_t)0)) { /* ..and it's OK to do... */
978
979 /*
980 * Keep the cache intact. Notice that the '>' above guarantees that
981 * the pf will always have at least one page afterwards.
982 */
983 pf->end = (char *)pf->page + malloc_cache;
984 pf->size = malloc_cache;
985
986 brk(pf->end);
987 malloc_brk = pf->end;
988
989 idx = ptr2idx(pf->end);
990 last_idx = idx - 1;
991
992 for(i=idx;i <= last_idx;)
993 page_dir[i++] = MALLOC_NOT_MINE;
994
995 /* XXX: We could realloc/shrink the pagedir here I guess. */
996 }
997 if (pt != NULL)
998 ifree(pt);
999 }
1000
1001 /*
1002 * Free a chunk, and possibly the page it's on, if the page becomes empty.
1003 */
1004
1005 static __inline__ void
1006 free_bytes(void *ptr, size_t idx, struct pginfo *info)
1007 {
1008 size_t i;
1009 struct pginfo **mp;
1010 void *vp;
1011
1012 /* Find the chunk number on the page */
1013 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift;
1014
1015 if (((size_t)(uintptr_t)ptr & (info->size-1))) {
1016 wrtwarning("modified (chunk-) pointer.\n");
1017 return;
1018 }
1019
1020 if (info->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
1021 wrtwarning("chunk is already free.\n");
1022 return;
1023 }
1024
1025 if (malloc_junk)
1026 memset(ptr, SOME_JUNK, (size_t)info->size);
1027
1028 info->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
1029 info->free++;
1030
1031 mp = page_dir + info->shift;
1032
1033 if (info->free == 1) {
1034
1035 /* Page became non-full */
1036
1037 mp = page_dir + info->shift;
1038 /* Insert in address order */
1039 while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1040 mp = &(*mp)->next;
1041 info->next = *mp;
1042 *mp = info;
1043 return;
1044 }
1045
1046 if (info->free != info->total)
1047 return;
1048
1049 /* Find & remove this page in the queue */
1050 while (*mp != info) {
1051 mp = &((*mp)->next);
1052 #ifdef MALLOC_EXTRA_SANITY
1053 if (!*mp)
1054 wrterror("(ES): Not on queue.\n");
1055 #endif /* MALLOC_EXTRA_SANITY */
1056 }
1057 *mp = info->next;
1058
1059 /* Free the page & the info structure if need be */
1060 page_dir[idx] = MALLOC_FIRST;
1061 vp = info->page; /* Order is important ! */
1062 if(vp != (void*)info)
1063 ifree(info);
1064 ifree(vp);
1065 }
1066
1067 static void
1068 ifree(void *ptr)
1069 {
1070 struct pginfo *info;
1071 size_t idx;
1072
1073 /* This is legal */
1074 if (ptr == NULL)
1075 return;
1076
1077 /* If we're already sinking, don't make matters any worse. */
1078 if (suicide)
1079 return;
1080
1081 idx = ptr2idx(ptr);
1082
1083 if (idx < malloc_pageshift) {
1084 wrtwarning("junk pointer, too low to make sense.\n");
1085 return;
1086 }
1087
1088 if (idx > last_idx) {
1089 wrtwarning("junk pointer, too high to make sense.\n");
1090 return;
1091 }
1092
1093 info = page_dir[idx];
1094
1095 if (info < MALLOC_MAGIC)
1096 free_pages(ptr, idx, info);
1097 else
1098 free_bytes(ptr, idx, info);
1099 return;
1100 }
1101
1102 /*
1103 * These are the public exported interface routines.
1104 */
1105
1106
1107 void *
1108 malloc(size_t size)
1109 {
1110 void *r;
1111
1112 THREAD_LOCK();
1113 malloc_func = " in malloc():";
1114 if (malloc_active++) {
1115 wrtwarning("recursive call.\n");
1116 malloc_active--;
1117 THREAD_UNLOCK();
1118 return (NULL);
1119 }
1120 if (!malloc_started)
1121 malloc_init();
1122 if (malloc_sysv && !size)
1123 r = NULL;
1124 else
1125 r = imalloc(size);
1126 UTRACE(0, size, r);
1127 malloc_active--;
1128 THREAD_UNLOCK();
1129 if (r == NULL && (size != 0 || !malloc_sysv)) {
1130 if (malloc_xmalloc)
1131 wrterror("out of memory.\n");
1132 errno = ENOMEM;
1133 }
1134 return (r);
1135 }
1136
1137 void
1138 free(void *ptr)
1139 {
1140 THREAD_LOCK();
1141 malloc_func = " in free():";
1142 if (malloc_active++) {
1143 wrtwarning("recursive call.\n");
1144 malloc_active--;
1145 THREAD_UNLOCK();
1146 return;
1147 }
1148 if (!malloc_started)
1149 malloc_init();
1150 ifree(ptr);
1151 UTRACE(ptr, 0, 0);
1152 malloc_active--;
1153 THREAD_UNLOCK();
1154 return;
1155 }
1156
1157 void *
1158 realloc(void *ptr, size_t size)
1159 {
1160 void *r;
1161
1162 THREAD_LOCK();
1163 malloc_func = " in realloc():";
1164 if (malloc_active++) {
1165 wrtwarning("recursive call.\n");
1166 malloc_active--;
1167 THREAD_UNLOCK();
1168 return (NULL);
1169 }
1170 if (!malloc_started)
1171 malloc_init();
1172 if (malloc_sysv && !size) {
1173 ifree(ptr);
1174 r = NULL;
1175 } else if (!ptr) {
1176 r = imalloc(size);
1177 } else {
1178 r = irealloc(ptr, size);
1179 }
1180 UTRACE(ptr, size, r);
1181 malloc_active--;
1182 THREAD_UNLOCK();
1183 if (r == NULL && (size != 0 || !malloc_sysv)) {
1184 if (malloc_xmalloc)
1185 wrterror("out of memory.\n");
1186 errno = ENOMEM;
1187 }
1188 return (r);
1189 }
1190