malloc.c revision 1.36.2.1 1 /* $NetBSD: malloc.c,v 1.36.2.1 2001/08/08 16:25:14 nathanw Exp $ */
2
3 /*
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk (at) FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
10 *
11 * From FreeBSD: malloc.c,v 1.43 1998/09/30 06:13:59 jb
12 *
13 */
14
15 /*
16 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
17 * to internal conditions and consistency in malloc.c. This has a
18 * noticeable runtime performance hit, and generally will not do you
19 * any good unless you fiddle with the internals of malloc or want
20 * to catch random pointer corruption as early as possible.
21 */
22 #ifndef MALLOC_EXTRA_SANITY
23 #undef MALLOC_EXTRA_SANITY
24 #endif
25
26 /*
27 * What to use for Junk. This is the byte value we use to fill with
28 * when the 'J' option is enabled.
29 */
30 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
31
32 /*
33 * The basic parameters you can tweak.
34 *
35 * malloc_minsize minimum size of an allocation in bytes.
36 * If this is too small it's too much work
37 * to manage them. This is also the smallest
38 * unit of alignment used for the storage
39 * returned by malloc/realloc.
40 *
41 */
42
43 #if defined(__FreeBSD__)
44 # if defined(__i386__)
45 # define malloc_minsize 16U
46 # endif
47 # if defined(__alpha__)
48 # define malloc_minsize 16U
49 # endif
50 # define HAS_UTRACE
51 # define UTRACE_LABEL
52
53 #include <sys/cdefs.h>
54 void utrace __P((struct ut *, int));
55
56 /*
57 * Make malloc/free/realloc thread-safe in libc for use with
58 * kernel threads.
59 */
60 # include "libc_private.h"
61 # include "spinlock.h"
62 static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
63 # define THREAD_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
64 # define THREAD_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
65 #endif /* __FreeBSD__ */
66
67 #if defined(__NetBSD__)
68 # define malloc_minsize 16U
69 # define HAS_UTRACE
70 # define UTRACE_LABEL "malloc",
71 #include <sys/cdefs.h>
72 #include <sys/types.h>
73 int utrace __P((const char *, void *, size_t));
74
75 #include <pthread.h>
76 extern int __isthreaded;
77 static pt_spin_t thread_lock = __SIMPLELOCK_UNLOCKED;
78 #define THREAD_LOCK() if (__isthreaded) pthread_spinlock(pthread_self(), &thread_lock);
79 #define THREAD_UNLOCK() if (__isthreaded) pthread_spinunlock(pthread_self(), &thread_lock);
80 #endif /* __NetBSD__ */
81
82 #if defined(__sparc__) && defined(sun)
83 # define malloc_minsize 16U
84 # define MAP_ANON (0)
85 static int fdzero;
86 # define MMAP_FD fdzero
87 # define INIT_MMAP() \
88 { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \
89 wrterror("open of /dev/zero"); }
90 #endif /* __sparc__ */
91
92 /* Insert your combination here... */
93 #if defined(__FOOCPU__) && defined(__BAROS__)
94 # define malloc_minsize 16U
95 #endif /* __FOOCPU__ && __BAROS__ */
96
97
98 /*
99 * No user serviceable parts behind this point.
100 */
101 #include "namespace.h"
102 #include <sys/types.h>
103 #include <sys/mman.h>
104 #include <errno.h>
105 #include <fcntl.h>
106 #include <stddef.h>
107 #include <stdio.h>
108 #include <stdlib.h>
109 #include <string.h>
110 #include <unistd.h>
111
112 /*
113 * This structure describes a page worth of chunks.
114 */
115
116 struct pginfo {
117 struct pginfo *next; /* next on the free list */
118 void *page; /* Pointer to the page */
119 u_short size; /* size of this page's chunks */
120 u_short shift; /* How far to shift for this size chunks */
121 u_short free; /* How many free chunks */
122 u_short total; /* How many chunk */
123 u_int bits[1]; /* Which chunks are free */
124 };
125
126 /*
127 * This structure describes a number of free pages.
128 */
129
130 struct pgfree {
131 struct pgfree *next; /* next run of free pages */
132 struct pgfree *prev; /* prev run of free pages */
133 void *page; /* pointer to free pages */
134 void *end; /* pointer to end of free pages */
135 size_t size; /* number of bytes free */
136 };
137
138 /*
139 * How many bits per u_int in the bitmap.
140 * Change only if not 8 bits/byte
141 */
142 #define MALLOC_BITS (8*sizeof(u_int))
143
144 /*
145 * Magic values to put in the page_directory
146 */
147 #define MALLOC_NOT_MINE ((struct pginfo*) 0)
148 #define MALLOC_FREE ((struct pginfo*) 1)
149 #define MALLOC_FIRST ((struct pginfo*) 2)
150 #define MALLOC_FOLLOW ((struct pginfo*) 3)
151 #define MALLOC_MAGIC ((struct pginfo*) 4)
152
153 /*
154 * Page size related parameters, computed at run-time.
155 */
156 static size_t malloc_pagesize;
157 static size_t malloc_pageshift;
158 static size_t malloc_pagemask;
159
160 #ifndef malloc_minsize
161 #define malloc_minsize 16U
162 #endif
163
164 #ifndef malloc_maxsize
165 #define malloc_maxsize ((malloc_pagesize)>>1)
166 #endif
167
168 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
169 #define ptr2idx(foo) (((size_t)(u_long)(foo) >> malloc_pageshift)-malloc_origo)
170
171 #ifndef THREAD_LOCK
172 #define THREAD_LOCK()
173 #endif
174
175 #ifndef THREAD_UNLOCK
176 #define THREAD_UNLOCK()
177 #endif
178
179 #ifndef MMAP_FD
180 #define MMAP_FD (-1)
181 #endif
182
183 #ifndef INIT_MMAP
184 #define INIT_MMAP()
185 #endif
186
187 #ifndef MADV_FREE
188 #define MADV_FREE MADV_DONTNEED
189 #endif
190
191 /* Set when initialization has been done */
192 static unsigned malloc_started;
193
194 /* Recusion flag for public interface. */
195 static int malloc_active;
196
197 /* Number of free pages we cache */
198 static unsigned malloc_cache = 16;
199
200 /* The offset from pagenumber to index into the page directory */
201 static size_t malloc_origo;
202
203 /* The last index in the page directory we care about */
204 static size_t last_idx;
205
206 /* Pointer to page directory. Allocated "as if with" malloc */
207 static struct pginfo **page_dir;
208
209 /* How many slots in the page directory */
210 static unsigned malloc_ninfo;
211
212 /* Free pages line up here */
213 static struct pgfree free_list;
214
215 /* Abort(), user doesn't handle problems. */
216 static int malloc_abort;
217
218 /* Are we trying to die ? */
219 static int suicide;
220
221 /* always realloc ? */
222 static int malloc_realloc;
223
224 /* pass the kernel a hint on free pages ? */
225 static int malloc_hint = 0;
226
227 /* xmalloc behaviour ? */
228 static int malloc_xmalloc;
229
230 /* sysv behaviour for malloc(0) ? */
231 static int malloc_sysv;
232
233 /* zero fill ? */
234 static int malloc_zero;
235
236 /* junk fill ? */
237 static int malloc_junk;
238
239 #ifdef HAS_UTRACE
240
241 /* utrace ? */
242 static int malloc_utrace;
243
244 struct ut { void *p; size_t s; void *r; };
245
246 #define UTRACE(a, b, c) \
247 if (malloc_utrace) { \
248 struct ut u; \
249 u.p=a; u.s = b; u.r=c; \
250 utrace(UTRACE_LABEL (void *) &u, sizeof u); \
251 }
252 #else /* !HAS_UTRACE */
253 #define UTRACE(a,b,c)
254 #endif /* HAS_UTRACE */
255
256 /* my last break. */
257 static void *malloc_brk;
258
259 /* one location cache for free-list holders */
260 static struct pgfree *px;
261
262 /* compile-time options */
263 char *malloc_options;
264
265 /* Name of the current public function */
266 static char *malloc_func;
267
268 /* Macro for mmap */
269 #define MMAP(size) \
270 mmap(0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
271 MMAP_FD, (off_t)0);
272
273 /*
274 * Necessary function declarations
275 */
276 static int extend_pgdir(size_t idx);
277 static void *imalloc(size_t size);
278 static void ifree(void *ptr);
279 static void *irealloc(void *ptr, size_t size);
280
281 static void
282 wrterror(char *p)
283 {
284 const char *progname = getprogname();
285 char *q = " error: ";
286 write(STDERR_FILENO, progname, strlen(progname));
287 write(STDERR_FILENO, malloc_func, strlen(malloc_func));
288 write(STDERR_FILENO, q, strlen(q));
289 write(STDERR_FILENO, p, strlen(p));
290 suicide = 1;
291 abort();
292 }
293
294 static void
295 wrtwarning(char *p)
296 {
297 const char *progname = getprogname();
298 char *q = " warning: ";
299 if (malloc_abort)
300 wrterror(p);
301 write(STDERR_FILENO, progname, strlen(progname));
302 write(STDERR_FILENO, malloc_func, strlen(malloc_func));
303 write(STDERR_FILENO, q, strlen(q));
304 write(STDERR_FILENO, p, strlen(p));
305 }
306
307
308 /*
309 * Allocate a number of pages from the OS
310 */
311 static void *
312 map_pages(size_t pages)
313 {
314 caddr_t result, tail;
315
316 result = (caddr_t)pageround((size_t)(u_long)sbrk((intptr_t)0));
317 tail = result + (pages << malloc_pageshift);
318
319 if (brk(tail)) {
320 #ifdef MALLOC_EXTRA_SANITY
321 wrterror("(ES): map_pages fails\n");
322 #endif /* MALLOC_EXTRA_SANITY */
323 return 0;
324 }
325 last_idx = ptr2idx(tail) - 1;
326 malloc_brk = tail;
327
328 if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx))
329 return 0;;
330
331 return result;
332 }
333
334 /*
335 * Extend page directory
336 */
337 static int
338 extend_pgdir(size_t idx)
339 {
340 struct pginfo **new, **old;
341 size_t newlen, oldlen;
342
343 /* Make it this many pages */
344 newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
345
346 /* remember the old mapping size */
347 oldlen = malloc_ninfo * sizeof *page_dir;
348
349 /*
350 * NOTE: we allocate new pages and copy the directory rather than tempt
351 * fate by trying to "grow" the region.. There is nothing to prevent
352 * us from accidently re-mapping space that's been allocated by our caller
353 * via dlopen() or other mmap().
354 *
355 * The copy problem is not too bad, as there is 4K of page index per
356 * 4MB of malloc arena.
357 *
358 * We can totally avoid the copy if we open a file descriptor to associate
359 * the anon mappings with. Then, when we remap the pages at the new
360 * address, the old pages will be "magically" remapped.. But this means
361 * keeping open a "secret" file descriptor.....
362 */
363
364 /* Get new pages */
365 new = (struct pginfo**) MMAP(newlen);
366 if (new == (struct pginfo **)-1)
367 return 0;
368
369 /* Copy the old stuff */
370 memcpy(new, page_dir, oldlen);
371
372 /* register the new size */
373 malloc_ninfo = newlen / sizeof *page_dir;
374
375 /* swap the pointers */
376 old = page_dir;
377 page_dir = new;
378
379 /* Now free the old stuff */
380 munmap(old, oldlen);
381 return 1;
382 }
383
384 /*
385 * Initialize the world
386 */
387 static void
388 malloc_init (void)
389 {
390 char *p, b[64];
391 int i, j;
392 int errnosave;
393
394 /*
395 * Compute page-size related variables.
396 */
397 malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
398 malloc_pagemask = malloc_pagesize - 1;
399 for (malloc_pageshift = 0;
400 (1UL << malloc_pageshift) != malloc_pagesize;
401 malloc_pageshift++)
402 /* nothing */ ;
403
404 INIT_MMAP();
405
406 #ifdef MALLOC_EXTRA_SANITY
407 malloc_junk = 1;
408 #endif /* MALLOC_EXTRA_SANITY */
409
410 for (i = 0; i < 3; i++) {
411 if (i == 0) {
412 errnosave = errno;
413 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
414 errno = errnosave;
415 if (j <= 0)
416 continue;
417 b[j] = '\0';
418 p = b;
419 } else if (i == 1) {
420 p = getenv("MALLOC_OPTIONS");
421 } else {
422 p = malloc_options;
423 }
424 for (; p && *p; p++) {
425 switch (*p) {
426 case '>': malloc_cache <<= 1; break;
427 case '<': malloc_cache >>= 1; break;
428 case 'a': malloc_abort = 0; break;
429 case 'A': malloc_abort = 1; break;
430 case 'h': malloc_hint = 0; break;
431 case 'H': malloc_hint = 1; break;
432 case 'r': malloc_realloc = 0; break;
433 case 'R': malloc_realloc = 1; break;
434 case 'j': malloc_junk = 0; break;
435 case 'J': malloc_junk = 1; break;
436 #ifdef HAS_UTRACE
437 case 'u': malloc_utrace = 0; break;
438 case 'U': malloc_utrace = 1; break;
439 #endif
440 case 'v': malloc_sysv = 0; break;
441 case 'V': malloc_sysv = 1; break;
442 case 'x': malloc_xmalloc = 0; break;
443 case 'X': malloc_xmalloc = 1; break;
444 case 'z': malloc_zero = 0; break;
445 case 'Z': malloc_zero = 1; break;
446 default:
447 j = malloc_abort;
448 malloc_abort = 0;
449 wrtwarning("unknown char in MALLOC_OPTIONS\n");
450 malloc_abort = j;
451 break;
452 }
453 }
454 }
455
456 UTRACE(0, 0, 0);
457
458 /*
459 * We want junk in the entire allocation, and zero only in the part
460 * the user asked for.
461 */
462 if (malloc_zero)
463 malloc_junk=1;
464
465 /*
466 * If we run with junk (or implicitly from above: zero), we want to
467 * force realloc() to get new storage, so we can DTRT with it.
468 */
469 if (malloc_junk)
470 malloc_realloc=1;
471
472 /* Allocate one page for the page directory */
473 page_dir = (struct pginfo **) MMAP(malloc_pagesize);
474
475 if (page_dir == (struct pginfo **) -1)
476 wrterror("mmap(2) failed, check limits.\n");
477
478 /*
479 * We need a maximum of malloc_pageshift buckets, steal these from the
480 * front of the page_directory;
481 */
482 malloc_origo = pageround((size_t)(u_long)sbrk((intptr_t)0))
483 >> malloc_pageshift;
484 malloc_origo -= malloc_pageshift;
485
486 malloc_ninfo = malloc_pagesize / sizeof *page_dir;
487
488 /* Recalculate the cache size in bytes, and make sure it's nonzero */
489
490 if (!malloc_cache)
491 malloc_cache++;
492
493 malloc_cache <<= malloc_pageshift;
494
495 /*
496 * This is a nice hack from Kaleb Keithly (kaleb (at) x.org).
497 * We can sbrk(2) further back when we keep this on a low address.
498 */
499 px = (struct pgfree *) imalloc (sizeof *px);
500
501 /* Been here, done that */
502 malloc_started++;
503 }
504
505 /*
506 * Allocate a number of complete pages
507 */
508 static void *
509 malloc_pages(size_t size)
510 {
511 void *p, *delay_free = 0;
512 int i;
513 struct pgfree *pf;
514 size_t idx;
515
516 size = pageround(size);
517
518 p = 0;
519
520 /* Look for free pages before asking for more */
521 for(pf = free_list.next; pf; pf = pf->next) {
522
523 #ifdef MALLOC_EXTRA_SANITY
524 if (pf->size & malloc_pagemask)
525 wrterror("(ES): junk length entry on free_list\n");
526 if (!pf->size)
527 wrterror("(ES): zero length entry on free_list\n");
528 if (pf->page == pf->end)
529 wrterror("(ES): zero entry on free_list\n");
530 if (pf->page > pf->end)
531 wrterror("(ES): sick entry on free_list\n");
532 if ((void*)pf->page >= (void*)sbrk(0))
533 wrterror("(ES): entry on free_list past brk\n");
534 if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
535 wrterror("(ES): non-free first page on free-list\n");
536 if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
537 wrterror("(ES): non-free last page on free-list\n");
538 #endif /* MALLOC_EXTRA_SANITY */
539
540 if (pf->size < size)
541 continue;
542
543 if (pf->size == size) {
544 p = pf->page;
545 if (pf->next)
546 pf->next->prev = pf->prev;
547 pf->prev->next = pf->next;
548 delay_free = pf;
549 break;
550 }
551
552 p = pf->page;
553 pf->page = (char *)pf->page + size;
554 pf->size -= size;
555 break;
556 }
557
558 #ifdef MALLOC_EXTRA_SANITY
559 if (p && page_dir[ptr2idx(p)] != MALLOC_FREE)
560 wrterror("(ES): allocated non-free page on free-list\n");
561 #endif /* MALLOC_EXTRA_SANITY */
562
563 size >>= malloc_pageshift;
564
565 /* Map new pages */
566 if (!p)
567 p = map_pages(size);
568
569 if (p) {
570
571 idx = ptr2idx(p);
572 page_dir[idx] = MALLOC_FIRST;
573 for (i=1;i<size;i++)
574 page_dir[idx+i] = MALLOC_FOLLOW;
575
576 if (malloc_junk)
577 memset(p, SOME_JUNK, size << malloc_pageshift);
578 }
579
580 if (delay_free) {
581 if (!px)
582 px = delay_free;
583 else
584 ifree(delay_free);
585 }
586
587 return p;
588 }
589
590 /*
591 * Allocate a page of fragments
592 */
593
594 static __inline__ int
595 malloc_make_chunks(int bits)
596 {
597 struct pginfo *bp;
598 void *pp;
599 int i, k, l;
600
601 /* Allocate a new bucket */
602 pp = malloc_pages(malloc_pagesize);
603 if (!pp)
604 return 0;
605
606 /* Find length of admin structure */
607 l = (int)offsetof(struct pginfo, bits[0]);
608 l += sizeof bp->bits[0] *
609 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
610
611 /* Don't waste more than two chunks on this */
612 if ((1<<(bits)) <= l+l) {
613 bp = (struct pginfo *)pp;
614 } else {
615 bp = (struct pginfo *)imalloc((size_t)l);
616 if (!bp) {
617 ifree(pp);
618 return 0;
619 }
620 }
621
622 bp->size = (1<<bits);
623 bp->shift = bits;
624 bp->total = bp->free = malloc_pagesize >> bits;
625 bp->page = pp;
626
627 /* set all valid bits in the bitmap */
628 k = bp->total;
629 i = 0;
630
631 /* Do a bunch at a time */
632 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
633 bp->bits[i / MALLOC_BITS] = ~0U;
634
635 for(; i < k; i++)
636 bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
637
638 if (bp == bp->page) {
639 /* Mark the ones we stole for ourselves */
640 for(i=0;l > 0;i++) {
641 bp->bits[i/MALLOC_BITS] &= ~(1<<(i%MALLOC_BITS));
642 bp->free--;
643 bp->total--;
644 l -= (1 << bits);
645 }
646 }
647
648 /* MALLOC_LOCK */
649
650 page_dir[ptr2idx(pp)] = bp;
651
652 bp->next = page_dir[bits];
653 page_dir[bits] = bp;
654
655 /* MALLOC_UNLOCK */
656
657 return 1;
658 }
659
660 /*
661 * Allocate a fragment
662 */
663 static void *
664 malloc_bytes(size_t size)
665 {
666 size_t i;
667 int j;
668 u_int u;
669 struct pginfo *bp;
670 int k;
671 u_int *lp;
672
673 /* Don't bother with anything less than this */
674 if (size < malloc_minsize)
675 size = malloc_minsize;
676
677 /* Find the right bucket */
678 j = 1;
679 i = size-1;
680 while (i >>= 1)
681 j++;
682
683 /* If it's empty, make a page more of that size chunks */
684 if (!page_dir[j] && !malloc_make_chunks(j))
685 return 0;
686
687 bp = page_dir[j];
688
689 /* Find first word of bitmap which isn't empty */
690 for (lp = bp->bits; !*lp; lp++)
691 ;
692
693 /* Find that bit, and tweak it */
694 u = 1;
695 k = 0;
696 while (!(*lp & u)) {
697 u += u;
698 k++;
699 }
700 *lp ^= u;
701
702 /* If there are no more free, remove from free-list */
703 if (!--bp->free) {
704 page_dir[j] = bp->next;
705 bp->next = 0;
706 }
707
708 /* Adjust to the real offset of that chunk */
709 k += (lp-bp->bits)*MALLOC_BITS;
710 k <<= bp->shift;
711
712 if (malloc_junk)
713 memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
714
715 return (u_char *)bp->page + k;
716 }
717
718 /*
719 * Allocate a piece of memory
720 */
721 static void *
722 imalloc(size_t size)
723 {
724 void *result;
725
726 if (suicide)
727 abort();
728
729 if ((size + malloc_pagesize) < size) /* Check for overflow */
730 result = 0;
731 else if (size <= malloc_maxsize)
732 result = malloc_bytes(size);
733 else
734 result = malloc_pages(size);
735
736 if (malloc_abort && !result)
737 wrterror("allocation failed.\n");
738
739 if (malloc_zero && result)
740 memset(result, 0, size);
741
742 return result;
743 }
744
745 /*
746 * Change the size of an allocation.
747 */
748 static void *
749 irealloc(void *ptr, size_t size)
750 {
751 void *p;
752 size_t osize, idx;
753 struct pginfo **mp;
754 size_t i;
755
756 if (suicide)
757 abort();
758
759 idx = ptr2idx(ptr);
760
761 if (idx < malloc_pageshift) {
762 wrtwarning("junk pointer, too low to make sense.\n");
763 return 0;
764 }
765
766 if (idx > last_idx) {
767 wrtwarning("junk pointer, too high to make sense.\n");
768 return 0;
769 }
770
771 mp = &page_dir[idx];
772
773 if (*mp == MALLOC_FIRST) { /* Page allocation */
774
775 /* Check the pointer */
776 if ((size_t)(u_long)ptr & malloc_pagemask) {
777 wrtwarning("modified (page-) pointer.\n");
778 return 0;
779 }
780
781 /* Find the size in bytes */
782 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
783 osize += malloc_pagesize;
784
785 if (!malloc_realloc && /* unless we have to, */
786 size <= osize && /* .. or are too small, */
787 size > (osize - malloc_pagesize)) { /* .. or can free a page, */
788 return ptr; /* don't do anything. */
789 }
790
791 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
792
793 /* Check the pointer for sane values */
794 if (((size_t)(u_long)ptr & ((*mp)->size-1))) {
795 wrtwarning("modified (chunk-) pointer.\n");
796 return 0;
797 }
798
799 /* Find the chunk index in the page */
800 i = ((size_t)(u_long)ptr & malloc_pagemask) >> (*mp)->shift;
801
802 /* Verify that it isn't a free chunk already */
803 if ((*mp)->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
804 wrtwarning("chunk is already free.\n");
805 return 0;
806 }
807
808 osize = (*mp)->size;
809
810 if (!malloc_realloc && /* Unless we have to, */
811 size < osize && /* ..or are too small, */
812 (size > osize/2 || /* ..or could use a smaller size, */
813 osize == malloc_minsize)) { /* ..(if there is one) */
814 return ptr; /* ..Don't do anything */
815 }
816
817 } else {
818 wrtwarning("pointer to wrong page.\n");
819 return 0;
820 }
821
822 p = imalloc(size);
823
824 if (p) {
825 /* copy the lesser of the two sizes, and free the old one */
826 if (!size || !osize)
827 ;
828 else if (osize < size)
829 memcpy(p, ptr, osize);
830 else
831 memcpy(p, ptr, size);
832 ifree(ptr);
833 }
834 return p;
835 }
836
837 /*
838 * Free a sequence of pages
839 */
840
841 static __inline__ void
842 free_pages(void *ptr, size_t idx, struct pginfo *info)
843 {
844 size_t i;
845 struct pgfree *pf, *pt=0;
846 size_t l;
847 void *tail;
848
849 if (info == MALLOC_FREE) {
850 wrtwarning("page is already free.\n");
851 return;
852 }
853
854 if (info != MALLOC_FIRST) {
855 wrtwarning("pointer to wrong page.\n");
856 return;
857 }
858
859 if ((size_t)(u_long)ptr & malloc_pagemask) {
860 wrtwarning("modified (page-) pointer.\n");
861 return;
862 }
863
864 /* Count how many pages and mark them free at the same time */
865 page_dir[idx] = MALLOC_FREE;
866 for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
867 page_dir[idx + i] = MALLOC_FREE;
868
869 l = i << malloc_pageshift;
870
871 if (malloc_junk)
872 memset(ptr, SOME_JUNK, l);
873
874 if (malloc_hint)
875 madvise(ptr, l, MADV_FREE);
876
877 tail = (char *)ptr+l;
878
879 /* add to free-list */
880 if (!px)
881 px = imalloc(sizeof *pt); /* This cannot fail... */
882 px->page = ptr;
883 px->end = tail;
884 px->size = l;
885 if (!free_list.next) {
886
887 /* Nothing on free list, put this at head */
888 px->next = free_list.next;
889 px->prev = &free_list;
890 free_list.next = px;
891 pf = px;
892 px = 0;
893
894 } else {
895
896 /* Find the right spot, leave pf pointing to the modified entry. */
897 tail = (char *)ptr+l;
898
899 for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next)
900 ; /* Race ahead here */
901
902 if (pf->page > tail) {
903 /* Insert before entry */
904 px->next = pf;
905 px->prev = pf->prev;
906 pf->prev = px;
907 px->prev->next = px;
908 pf = px;
909 px = 0;
910 } else if (pf->end == ptr ) {
911 /* Append to the previous entry */
912 pf->end = (char *)pf->end + l;
913 pf->size += l;
914 if (pf->next && pf->end == pf->next->page ) {
915 /* And collapse the next too. */
916 pt = pf->next;
917 pf->end = pt->end;
918 pf->size += pt->size;
919 pf->next = pt->next;
920 if (pf->next)
921 pf->next->prev = pf;
922 }
923 } else if (pf->page == tail) {
924 /* Prepend to entry */
925 pf->size += l;
926 pf->page = ptr;
927 } else if (!pf->next) {
928 /* Append at tail of chain */
929 px->next = 0;
930 px->prev = pf;
931 pf->next = px;
932 pf = px;
933 px = 0;
934 } else {
935 wrterror("freelist is destroyed.\n");
936 }
937 }
938
939 /* Return something to OS ? */
940 if (!pf->next && /* If we're the last one, */
941 pf->size > malloc_cache && /* ..and the cache is full, */
942 pf->end == malloc_brk && /* ..and none behind us, */
943 malloc_brk == sbrk((intptr_t)0)) { /* ..and it's OK to do... */
944
945 /*
946 * Keep the cache intact. Notice that the '>' above guarantees that
947 * the pf will always have at least one page afterwards.
948 */
949 pf->end = (char *)pf->page + malloc_cache;
950 pf->size = malloc_cache;
951
952 brk(pf->end);
953 malloc_brk = pf->end;
954
955 idx = ptr2idx(pf->end);
956 last_idx = idx - 1;
957
958 for(i=idx;i <= last_idx;)
959 page_dir[i++] = MALLOC_NOT_MINE;
960
961 /* XXX: We could realloc/shrink the pagedir here I guess. */
962 }
963 if (pt)
964 ifree(pt);
965 }
966
967 /*
968 * Free a chunk, and possibly the page it's on, if the page becomes empty.
969 */
970
971 static __inline__ void
972 free_bytes(void *ptr, size_t idx, struct pginfo *info)
973 {
974 size_t i;
975 struct pginfo **mp;
976 void *vp;
977
978 /* Find the chunk number on the page */
979 i = ((size_t)(u_long)ptr & malloc_pagemask) >> info->shift;
980
981 if (((size_t)(u_long)ptr & (info->size-1))) {
982 wrtwarning("modified (chunk-) pointer.\n");
983 return;
984 }
985
986 if (info->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
987 wrtwarning("chunk is already free.\n");
988 return;
989 }
990
991 if (malloc_junk)
992 memset(ptr, SOME_JUNK, (size_t)info->size);
993
994 info->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
995 info->free++;
996
997 mp = page_dir + info->shift;
998
999 if (info->free == 1) {
1000
1001 /* Page became non-full */
1002
1003 mp = page_dir + info->shift;
1004 /* Insert in address order */
1005 while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1006 mp = &(*mp)->next;
1007 info->next = *mp;
1008 *mp = info;
1009 return;
1010 }
1011
1012 if (info->free != info->total)
1013 return;
1014
1015 /* Find & remove this page in the queue */
1016 while (*mp != info) {
1017 mp = &((*mp)->next);
1018 #ifdef MALLOC_EXTRA_SANITY
1019 if (!*mp)
1020 wrterror("(ES): Not on queue\n");
1021 #endif /* MALLOC_EXTRA_SANITY */
1022 }
1023 *mp = info->next;
1024
1025 /* Free the page & the info structure if need be */
1026 page_dir[idx] = MALLOC_FIRST;
1027 vp = info->page; /* Order is important ! */
1028 if(vp != (void*)info)
1029 ifree(info);
1030 ifree(vp);
1031 }
1032
1033 static void
1034 ifree(void *ptr)
1035 {
1036 struct pginfo *info;
1037 size_t idx;
1038
1039 /* This is legal */
1040 if (!ptr)
1041 return;
1042
1043 if (!malloc_started) {
1044 wrtwarning("malloc() has never been called.\n");
1045 return;
1046 }
1047
1048 /* If we're already sinking, don't make matters any worse. */
1049 if (suicide)
1050 return;
1051
1052 idx = ptr2idx(ptr);
1053
1054 if (idx < malloc_pageshift) {
1055 wrtwarning("junk pointer, too low to make sense.\n");
1056 return;
1057 }
1058
1059 if (idx > last_idx) {
1060 wrtwarning("junk pointer, too high to make sense.\n");
1061 return;
1062 }
1063
1064 info = page_dir[idx];
1065
1066 if (info < MALLOC_MAGIC)
1067 free_pages(ptr, idx, info);
1068 else
1069 free_bytes(ptr, idx, info);
1070 return;
1071 }
1072
1073 /*
1074 * These are the public exported interface routines.
1075 */
1076
1077
1078 void *
1079 malloc(size_t size)
1080 {
1081 register void *r;
1082
1083 THREAD_LOCK();
1084 malloc_func = " in malloc():";
1085 if (malloc_active++) {
1086 wrtwarning("recursive call.\n");
1087 malloc_active--;
1088 return (0);
1089 }
1090 if (!malloc_started)
1091 malloc_init();
1092 if (malloc_sysv && !size)
1093 r = 0;
1094 else
1095 r = imalloc(size);
1096 UTRACE(0, size, r);
1097 malloc_active--;
1098 THREAD_UNLOCK();
1099 if (r == NULL && (size != 0 || !malloc_sysv)) {
1100 if (malloc_xmalloc)
1101 wrterror("out of memory.\n");
1102 errno = ENOMEM;
1103 }
1104 return (r);
1105 }
1106
1107 void
1108 free(void *ptr)
1109 {
1110 THREAD_LOCK();
1111 malloc_func = " in free():";
1112 if (malloc_active++) {
1113 wrtwarning("recursive call.\n");
1114 malloc_active--;
1115 return;
1116 } else {
1117 ifree(ptr);
1118 UTRACE(ptr, 0, 0);
1119 }
1120 malloc_active--;
1121 THREAD_UNLOCK();
1122 return;
1123 }
1124
1125 void *
1126 realloc(void *ptr, size_t size)
1127 {
1128 register void *r;
1129
1130 THREAD_LOCK();
1131 malloc_func = " in realloc():";
1132 if (malloc_active++) {
1133 wrtwarning("recursive call.\n");
1134 malloc_active--;
1135 return (0);
1136 }
1137 if (ptr && !malloc_started) {
1138 wrtwarning("malloc() has never been called.\n");
1139 ptr = 0;
1140 }
1141 if (!malloc_started)
1142 malloc_init();
1143 if (malloc_sysv && !size) {
1144 ifree(ptr);
1145 r = 0;
1146 } else if (!ptr) {
1147 r = imalloc(size);
1148 } else {
1149 r = irealloc(ptr, size);
1150 }
1151 UTRACE(ptr, size, r);
1152 malloc_active--;
1153 THREAD_UNLOCK();
1154 if (r == NULL && (size != 0 || !malloc_sysv)) {
1155 if (malloc_xmalloc)
1156 wrterror("out of memory.\n");
1157 errno = ENOMEM;
1158 }
1159 return (r);
1160 }
1161