malloc.c revision 1.28 1 /* $NetBSD: malloc.c,v 1.28 2000/01/23 00:49:41 mycroft Exp $ */
2
3 /*
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk (at) FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
10 *
11 * From FreeBSD: malloc.c,v 1.43 1998/09/30 06:13:59 jb
12 *
13 */
14
15 /*
16 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
17 * to internal conditions and consistency in malloc.c. This has a
18 * noticeable runtime performance hit, and generally will not do you
19 * any good unless you fiddle with the internals of malloc or want
20 * to catch random pointer corruption as early as possible.
21 */
22 #ifndef MALLOC_EXTRA_SANITY
23 #undef MALLOC_EXTRA_SANITY
24 #endif
25
26 /*
27 * What to use for Junk. This is the byte value we use to fill with
28 * when the 'J' option is enabled.
29 */
30 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
31
32 /*
33 * The basic parameters you can tweak.
34 *
35 * malloc_minsize minimum size of an allocation in bytes.
36 * If this is too small it's too much work
37 * to manage them. This is also the smallest
38 * unit of alignment used for the storage
39 * returned by malloc/realloc.
40 *
41 */
42
43 #if defined(__FreeBSD__)
44 # if defined(__i386__)
45 # define malloc_minsize 16U
46 # endif
47 # if defined(__alpha__)
48 # define malloc_minsize 16U
49 # endif
50 # if !defined(__NETBSD_SYSCALLS)
51 # define HAS_UTRACE
52 # endif
53 /*
54 * Make malloc/free/realloc thread-safe in libc for use with
55 * kernel threads.
56 */
57 # include "libc_private.h"
58 # include "spinlock.h"
59 static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
60 # define THREAD_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
61 # define THREAD_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
62 #endif /* __FreeBSD__ */
63
64 #if defined(__NetBSD__)
65 # define malloc_minsize 16U
66 #endif /* __NetBSD__ */
67
68 #if defined(__sparc__) && defined(sun)
69 # define malloc_minsize 16U
70 # define MAP_ANON (0)
71 static int fdzero;
72 # define MMAP_FD fdzero
73 # define INIT_MMAP() \
74 { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \
75 wrterror("open of /dev/zero"); }
76 #endif /* __sparc__ */
77
78 /* Insert your combination here... */
79 #if defined(__FOOCPU__) && defined(__BAROS__)
80 # define malloc_minsize 16U
81 #endif /* __FOOCPU__ && __BAROS__ */
82
83
84 /*
85 * No user serviceable parts behind this point.
86 */
87 #include "namespace.h"
88 #include <sys/types.h>
89 #include <sys/mman.h>
90 #include <errno.h>
91 #include <fcntl.h>
92 #include <stddef.h>
93 #include <stdio.h>
94 #include <stdlib.h>
95 #include <string.h>
96 #include <unistd.h>
97
98 /*
99 * This structure describes a page worth of chunks.
100 */
101
102 struct pginfo {
103 struct pginfo *next; /* next on the free list */
104 void *page; /* Pointer to the page */
105 u_short size; /* size of this page's chunks */
106 u_short shift; /* How far to shift for this size chunks */
107 u_short free; /* How many free chunks */
108 u_short total; /* How many chunk */
109 u_int bits[1]; /* Which chunks are free */
110 };
111
112 /*
113 * This structure describes a number of free pages.
114 */
115
116 struct pgfree {
117 struct pgfree *next; /* next run of free pages */
118 struct pgfree *prev; /* prev run of free pages */
119 void *page; /* pointer to free pages */
120 void *end; /* pointer to end of free pages */
121 size_t size; /* number of bytes free */
122 };
123
124 /*
125 * How many bits per u_int in the bitmap.
126 * Change only if not 8 bits/byte
127 */
128 #define MALLOC_BITS (8*sizeof(u_int))
129
130 /*
131 * Magic values to put in the page_directory
132 */
133 #define MALLOC_NOT_MINE ((struct pginfo*) 0)
134 #define MALLOC_FREE ((struct pginfo*) 1)
135 #define MALLOC_FIRST ((struct pginfo*) 2)
136 #define MALLOC_FOLLOW ((struct pginfo*) 3)
137 #define MALLOC_MAGIC ((struct pginfo*) 4)
138
139 /*
140 * Page size related parameters, computed at run-time.
141 */
142 static size_t malloc_pagesize;
143 static size_t malloc_pageshift;
144 static size_t malloc_pagemask;
145
146 #ifndef malloc_minsize
147 #define malloc_minsize 16U
148 #endif
149
150 #ifndef malloc_maxsize
151 #define malloc_maxsize ((malloc_pagesize)>>1)
152 #endif
153
154 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
155 #define ptr2idx(foo) (((size_t)(foo) >> malloc_pageshift)-malloc_origo)
156
157 #ifndef THREAD_LOCK
158 #define THREAD_LOCK()
159 #endif
160
161 #ifndef THREAD_UNLOCK
162 #define THREAD_UNLOCK()
163 #endif
164
165 #ifndef MMAP_FD
166 #define MMAP_FD (-1)
167 #endif
168
169 #ifndef INIT_MMAP
170 #define INIT_MMAP()
171 #endif
172
173 #ifndef MADV_FREE
174 #define MADV_FREE MADV_DONTNEED
175 #endif
176
177 /* Set when initialization has been done */
178 static unsigned malloc_started;
179
180 /* Recusion flag for public interface. */
181 static int malloc_active;
182
183 /* Number of free pages we cache */
184 static unsigned malloc_cache = 16;
185
186 /* The offset from pagenumber to index into the page directory */
187 static size_t malloc_origo;
188
189 /* The last index in the page directory we care about */
190 static size_t last_idx;
191
192 /* Pointer to page directory. Allocated "as if with" malloc */
193 static struct pginfo **page_dir;
194
195 /* How many slots in the page directory */
196 static unsigned malloc_ninfo;
197
198 /* Free pages line up here */
199 static struct pgfree free_list;
200
201 /* Abort(), user doesn't handle problems. */
202 static int malloc_abort;
203
204 /* Are we trying to die ? */
205 static int suicide;
206
207 /* always realloc ? */
208 static int malloc_realloc;
209
210 /* pass the kernel a hint on free pages ? */
211 static int malloc_hint = 1;
212
213 /* xmalloc behaviour ? */
214 static int malloc_xmalloc;
215
216 /* sysv behaviour for malloc(0) ? */
217 static int malloc_sysv;
218
219 /* zero fill ? */
220 static int malloc_zero;
221
222 /* junk fill ? */
223 static int malloc_junk;
224
225 #ifdef HAS_UTRACE
226
227 /* utrace ? */
228 static int malloc_utrace;
229
230 struct ut { void *p; size_t s; void *r; };
231
232 void utrace __P((struct ut *, int));
233
234 #define UTRACE(a, b, c) \
235 if (malloc_utrace) \
236 {struct ut u; u.p=a; u.s = b; u.r=c; utrace(&u, sizeof u);}
237 #else /* !HAS_UTRACE */
238 #define UTRACE(a,b,c)
239 #endif /* HAS_UTRACE */
240
241 /* my last break. */
242 static void *malloc_brk;
243
244 /* one location cache for free-list holders */
245 static struct pgfree *px;
246
247 /* compile-time options */
248 char *malloc_options;
249
250 /* Name of the current public function */
251 static char *malloc_func;
252
253 /* Macro for mmap */
254 #define MMAP(size) \
255 mmap(0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
256 MMAP_FD, (off_t)0);
257
258 /*
259 * Necessary function declarations
260 */
261 static int extend_pgdir(size_t idx);
262 static void *imalloc(size_t size);
263 static void ifree(void *ptr);
264 static void *irealloc(void *ptr, size_t size);
265
266 extern char *__progname;
267
268 static void
269 wrterror(char *p)
270 {
271 char *q = " error: ";
272 write(STDERR_FILENO, __progname, strlen(__progname));
273 write(STDERR_FILENO, malloc_func, strlen(malloc_func));
274 write(STDERR_FILENO, q, strlen(q));
275 write(STDERR_FILENO, p, strlen(p));
276 suicide = 1;
277 abort();
278 }
279
280 static void
281 wrtwarning(char *p)
282 {
283 char *q = " warning: ";
284 if (malloc_abort)
285 wrterror(p);
286 write(STDERR_FILENO, __progname, strlen(__progname));
287 write(STDERR_FILENO, malloc_func, strlen(malloc_func));
288 write(STDERR_FILENO, q, strlen(q));
289 write(STDERR_FILENO, p, strlen(p));
290 }
291
292
293 /*
294 * Allocate a number of pages from the OS
295 */
296 static void *
297 map_pages(size_t pages)
298 {
299 caddr_t result, tail;
300
301 result = (caddr_t)pageround((size_t)sbrk(0));
302 tail = result + (pages << malloc_pageshift);
303
304 if (brk(tail)) {
305 #ifdef MALLOC_EXTRA_SANITY
306 wrterror("(ES): map_pages fails\n");
307 #endif /* MALLOC_EXTRA_SANITY */
308 return 0;
309 }
310
311 last_idx = ptr2idx(tail) - 1;
312 malloc_brk = tail;
313
314 if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx))
315 return 0;;
316
317 return result;
318 }
319
320 /*
321 * Extend page directory
322 */
323 static int
324 extend_pgdir(size_t idx)
325 {
326 struct pginfo **new, **old;
327 size_t newlen, oldlen;
328
329 /* Make it this many pages */
330 newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
331
332 /* remember the old mapping size */
333 oldlen = malloc_ninfo * sizeof *page_dir;
334
335 /*
336 * NOTE: we allocate new pages and copy the directory rather than tempt
337 * fate by trying to "grow" the region.. There is nothing to prevent
338 * us from accidently re-mapping space that's been allocated by our caller
339 * via dlopen() or other mmap().
340 *
341 * The copy problem is not too bad, as there is 4K of page index per
342 * 4MB of malloc arena.
343 *
344 * We can totally avoid the copy if we open a file descriptor to associate
345 * the anon mappings with. Then, when we remap the pages at the new
346 * address, the old pages will be "magically" remapped.. But this means
347 * keeping open a "secret" file descriptor.....
348 */
349
350 /* Get new pages */
351 new = (struct pginfo**) MMAP(newlen);
352 if (new == (struct pginfo **)-1)
353 return 0;
354
355 /* Copy the old stuff */
356 memcpy(new, page_dir, oldlen);
357
358 /* register the new size */
359 malloc_ninfo = newlen / sizeof *page_dir;
360
361 /* swap the pointers */
362 old = page_dir;
363 page_dir = new;
364
365 /* Now free the old stuff */
366 munmap(old, oldlen);
367 return 1;
368 }
369
370 /*
371 * Initialize the world
372 */
373 static void
374 malloc_init (void)
375 {
376 char *p, b[64];
377 int i, j;
378 int errnosave;
379
380 /*
381 * Compute page-size related variables.
382 */
383 malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
384 malloc_pagemask = malloc_pagesize - 1;
385 for (malloc_pageshift = 0;
386 (1UL << malloc_pageshift) != malloc_pagesize;
387 malloc_pageshift++)
388 /* nothing */ ;
389
390 INIT_MMAP();
391
392 #ifdef MALLOC_EXTRA_SANITY
393 malloc_junk = 1;
394 #endif /* MALLOC_EXTRA_SANITY */
395
396 for (i = 0; i < 3; i++) {
397 if (i == 0) {
398 errnosave = errno;
399 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
400 errno = errnosave;
401 if (j <= 0)
402 continue;
403 b[j] = '\0';
404 p = b;
405 } else if (i == 1) {
406 p = getenv("MALLOC_OPTIONS");
407 } else {
408 p = malloc_options;
409 }
410 for (; p && *p; p++) {
411 switch (*p) {
412 case '>': malloc_cache <<= 1; break;
413 case '<': malloc_cache >>= 1; break;
414 case 'a': malloc_abort = 0; break;
415 case 'A': malloc_abort = 1; break;
416 case 'h': malloc_hint = 0; break;
417 case 'H': malloc_hint = 1; break;
418 case 'r': malloc_realloc = 0; break;
419 case 'R': malloc_realloc = 1; break;
420 case 'j': malloc_junk = 0; break;
421 case 'J': malloc_junk = 1; break;
422 #ifdef HAS_UTRACE
423 case 'u': malloc_utrace = 0; break;
424 case 'U': malloc_utrace = 1; break;
425 #endif
426 case 'v': malloc_sysv = 0; break;
427 case 'V': malloc_sysv = 1; break;
428 case 'x': malloc_xmalloc = 0; break;
429 case 'X': malloc_xmalloc = 1; break;
430 case 'z': malloc_zero = 0; break;
431 case 'Z': malloc_zero = 1; break;
432 default:
433 j = malloc_abort;
434 malloc_abort = 0;
435 wrtwarning("unknown char in MALLOC_OPTIONS\n");
436 malloc_abort = j;
437 break;
438 }
439 }
440 }
441
442 UTRACE(0, 0, 0);
443
444 /*
445 * We want junk in the entire allocation, and zero only in the part
446 * the user asked for.
447 */
448 if (malloc_zero)
449 malloc_junk=1;
450
451 /*
452 * If we run with junk (or implicitly from above: zero), we want to
453 * force realloc() to get new storage, so we can DTRT with it.
454 */
455 if (malloc_junk)
456 malloc_realloc=1;
457
458 /* Allocate one page for the page directory */
459 page_dir = (struct pginfo **) MMAP(malloc_pagesize);
460
461 if (page_dir == (struct pginfo **) -1)
462 wrterror("mmap(2) failed, check limits.\n");
463
464 /*
465 * We need a maximum of malloc_pageshift buckets, steal these from the
466 * front of the page_directory;
467 */
468 malloc_origo = pageround((size_t)sbrk(0)) >> malloc_pageshift;
469 malloc_origo -= malloc_pageshift;
470
471 malloc_ninfo = malloc_pagesize / sizeof *page_dir;
472
473 /* Recalculate the cache size in bytes, and make sure it's nonzero */
474
475 if (!malloc_cache)
476 malloc_cache++;
477
478 malloc_cache <<= malloc_pageshift;
479
480 /*
481 * This is a nice hack from Kaleb Keithly (kaleb (at) x.org).
482 * We can sbrk(2) further back when we keep this on a low address.
483 */
484 px = (struct pgfree *) imalloc (sizeof *px);
485
486 /* Been here, done that */
487 malloc_started++;
488 }
489
490 /*
491 * Allocate a number of complete pages
492 */
493 static void *
494 malloc_pages(size_t size)
495 {
496 void *p, *delay_free = 0;
497 int i;
498 struct pgfree *pf;
499 size_t idx;
500
501 size = pageround(size);
502
503 p = 0;
504
505 /* Look for free pages before asking for more */
506 for(pf = free_list.next; pf; pf = pf->next) {
507
508 #ifdef MALLOC_EXTRA_SANITY
509 if (pf->size & malloc_pagemask)
510 wrterror("(ES): junk length entry on free_list\n");
511 if (!pf->size)
512 wrterror("(ES): zero length entry on free_list\n");
513 if (pf->page == pf->end)
514 wrterror("(ES): zero entry on free_list\n");
515 if (pf->page > pf->end)
516 wrterror("(ES): sick entry on free_list\n");
517 if ((void*)pf->page >= (void*)sbrk(0))
518 wrterror("(ES): entry on free_list past brk\n");
519 if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
520 wrterror("(ES): non-free first page on free-list\n");
521 if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
522 wrterror("(ES): non-free last page on free-list\n");
523 #endif /* MALLOC_EXTRA_SANITY */
524
525 if (pf->size < size)
526 continue;
527
528 if (pf->size == size) {
529 p = pf->page;
530 if (pf->next)
531 pf->next->prev = pf->prev;
532 pf->prev->next = pf->next;
533 delay_free = pf;
534 break;
535 }
536
537 p = pf->page;
538 pf->page = (char *)pf->page + size;
539 pf->size -= size;
540 break;
541 }
542
543 #ifdef MALLOC_EXTRA_SANITY
544 if (p && page_dir[ptr2idx(p)] != MALLOC_FREE)
545 wrterror("(ES): allocated non-free page on free-list\n");
546 #endif /* MALLOC_EXTRA_SANITY */
547
548 size >>= malloc_pageshift;
549
550 /* Map new pages */
551 if (!p)
552 p = map_pages(size);
553
554 if (p) {
555
556 idx = ptr2idx(p);
557 page_dir[idx] = MALLOC_FIRST;
558 for (i=1;i<size;i++)
559 page_dir[idx+i] = MALLOC_FOLLOW;
560
561 if (malloc_junk)
562 memset(p, SOME_JUNK, size << malloc_pageshift);
563 }
564
565 if (delay_free) {
566 if (!px)
567 px = delay_free;
568 else
569 ifree(delay_free);
570 }
571
572 return p;
573 }
574
575 /*
576 * Allocate a page of fragments
577 */
578
579 static __inline__ int
580 malloc_make_chunks(int bits)
581 {
582 struct pginfo *bp;
583 void *pp;
584 int i, k;
585 size_t l;
586
587 /* Allocate a new bucket */
588 pp = malloc_pages(malloc_pagesize);
589 if (!pp)
590 return 0;
591
592 /* Find length of admin structure */
593 l = offsetof(struct pginfo, bits[0]);
594 l += sizeof bp->bits[0] *
595 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
596
597 /* Don't waste more than two chunks on this */
598 if ((1<<(bits)) <= l+l) {
599 bp = (struct pginfo *)pp;
600 } else {
601 bp = (struct pginfo *)imalloc(l);
602 if (!bp) {
603 ifree(pp);
604 return 0;
605 }
606 }
607
608 bp->size = (1<<bits);
609 bp->shift = bits;
610 bp->total = bp->free = malloc_pagesize >> bits;
611 bp->page = pp;
612
613 /* set all valid bits in the bitmap */
614 k = bp->total;
615 i = 0;
616
617 /* Do a bunch at a time */
618 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
619 bp->bits[i / MALLOC_BITS] = ~0U;
620
621 for(; i < k; i++)
622 bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
623
624 if (bp == bp->page) {
625 /* Mark the ones we stole for ourselves */
626 for(i=0;l > 0;i++) {
627 bp->bits[i/MALLOC_BITS] &= ~(1<<(i%MALLOC_BITS));
628 bp->free--;
629 bp->total--;
630 l -= (1 << bits);
631 }
632 }
633
634 /* MALLOC_LOCK */
635
636 page_dir[ptr2idx(pp)] = bp;
637
638 bp->next = page_dir[bits];
639 page_dir[bits] = bp;
640
641 /* MALLOC_UNLOCK */
642
643 return 1;
644 }
645
646 /*
647 * Allocate a fragment
648 */
649 static void *
650 malloc_bytes(size_t size)
651 {
652 size_t i;
653 int j;
654 u_int u;
655 struct pginfo *bp;
656 int k;
657 u_int *lp;
658
659 /* Don't bother with anything less than this */
660 if (size < malloc_minsize)
661 size = malloc_minsize;
662
663 /* Find the right bucket */
664 j = 1;
665 i = size-1;
666 while (i >>= 1)
667 j++;
668
669 /* If it's empty, make a page more of that size chunks */
670 if (!page_dir[j] && !malloc_make_chunks(j))
671 return 0;
672
673 bp = page_dir[j];
674
675 /* Find first word of bitmap which isn't empty */
676 for (lp = bp->bits; !*lp; lp++)
677 ;
678
679 /* Find that bit, and tweak it */
680 u = 1;
681 k = 0;
682 while (!(*lp & u)) {
683 u += u;
684 k++;
685 }
686 *lp ^= u;
687
688 /* If there are no more free, remove from free-list */
689 if (!--bp->free) {
690 page_dir[j] = bp->next;
691 bp->next = 0;
692 }
693
694 /* Adjust to the real offset of that chunk */
695 k += (lp-bp->bits)*MALLOC_BITS;
696 k <<= bp->shift;
697
698 if (malloc_junk)
699 memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
700
701 return (u_char *)bp->page + k;
702 }
703
704 /*
705 * Allocate a piece of memory
706 */
707 static void *
708 imalloc(size_t size)
709 {
710 void *result;
711
712 if (suicide)
713 abort();
714
715 if ((size + malloc_pagesize) < size) /* Check for overflow */
716 result = 0;
717 else if (size <= malloc_maxsize)
718 result = malloc_bytes(size);
719 else
720 result = malloc_pages(size);
721
722 if (malloc_abort && !result)
723 wrterror("allocation failed.\n");
724
725 if (malloc_zero && result)
726 memset(result, 0, size);
727
728 return result;
729 }
730
731 /*
732 * Change the size of an allocation.
733 */
734 static void *
735 irealloc(void *ptr, size_t size)
736 {
737 void *p;
738 size_t osize, idx;
739 struct pginfo **mp;
740 size_t i;
741
742 if (suicide)
743 abort();
744
745 idx = ptr2idx(ptr);
746
747 if (idx < malloc_pageshift) {
748 wrtwarning("junk pointer, too low to make sense.\n");
749 return 0;
750 }
751
752 if (idx > last_idx) {
753 wrtwarning("junk pointer, too high to make sense.\n");
754 return 0;
755 }
756
757 mp = &page_dir[idx];
758
759 if (*mp == MALLOC_FIRST) { /* Page allocation */
760
761 /* Check the pointer */
762 if ((size_t)ptr & malloc_pagemask) {
763 wrtwarning("modified (page-) pointer.\n");
764 return 0;
765 }
766
767 /* Find the size in bytes */
768 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
769 osize += malloc_pagesize;
770
771 if (!malloc_realloc && /* unless we have to, */
772 size <= osize && /* .. or are too small, */
773 size > (osize - malloc_pagesize)) { /* .. or can free a page, */
774 return ptr; /* don't do anything. */
775 }
776
777 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
778
779 /* Check the pointer for sane values */
780 if (((size_t)ptr & ((*mp)->size-1))) {
781 wrtwarning("modified (chunk-) pointer.\n");
782 return 0;
783 }
784
785 /* Find the chunk index in the page */
786 i = ((size_t)ptr & malloc_pagemask) >> (*mp)->shift;
787
788 /* Verify that it isn't a free chunk already */
789 if ((*mp)->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
790 wrtwarning("chunk is already free.\n");
791 return 0;
792 }
793
794 osize = (*mp)->size;
795
796 if (!malloc_realloc && /* Unless we have to, */
797 size < osize && /* ..or are too small, */
798 (size > osize/2 || /* ..or could use a smaller size, */
799 osize == malloc_minsize)) { /* ..(if there is one) */
800 return ptr; /* ..Don't do anything */
801 }
802
803 } else {
804 wrtwarning("pointer to wrong page.\n");
805 return 0;
806 }
807
808 p = imalloc(size);
809
810 if (p) {
811 /* copy the lesser of the two sizes, and free the old one */
812 if (!size || !osize)
813 ;
814 else if (osize < size)
815 memcpy(p, ptr, osize);
816 else
817 memcpy(p, ptr, size);
818 ifree(ptr);
819 }
820 return p;
821 }
822
823 /*
824 * Free a sequence of pages
825 */
826
827 static __inline__ void
828 free_pages(void *ptr, size_t idx, struct pginfo *info)
829 {
830 size_t i;
831 struct pgfree *pf, *pt=0;
832 size_t l;
833 void *tail;
834
835 if (info == MALLOC_FREE) {
836 wrtwarning("page is already free.\n");
837 return;
838 }
839
840 if (info != MALLOC_FIRST) {
841 wrtwarning("pointer to wrong page.\n");
842 return;
843 }
844
845 if ((size_t)ptr & malloc_pagemask) {
846 wrtwarning("modified (page-) pointer.\n");
847 return;
848 }
849
850 /* Count how many pages and mark them free at the same time */
851 page_dir[idx] = MALLOC_FREE;
852 for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
853 page_dir[idx + i] = MALLOC_FREE;
854
855 l = i << malloc_pageshift;
856
857 if (malloc_junk)
858 memset(ptr, SOME_JUNK, l);
859
860 if (malloc_hint)
861 madvise(ptr, l, MADV_FREE);
862
863 tail = (char *)ptr+l;
864
865 /* add to free-list */
866 if (!px)
867 px = imalloc(sizeof *pt); /* This cannot fail... */
868 px->page = ptr;
869 px->end = tail;
870 px->size = l;
871 if (!free_list.next) {
872
873 /* Nothing on free list, put this at head */
874 px->next = free_list.next;
875 px->prev = &free_list;
876 free_list.next = px;
877 pf = px;
878 px = 0;
879
880 } else {
881
882 /* Find the right spot, leave pf pointing to the modified entry. */
883 tail = (char *)ptr+l;
884
885 for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next)
886 ; /* Race ahead here */
887
888 if (pf->page > tail) {
889 /* Insert before entry */
890 px->next = pf;
891 px->prev = pf->prev;
892 pf->prev = px;
893 px->prev->next = px;
894 pf = px;
895 px = 0;
896 } else if (pf->end == ptr ) {
897 /* Append to the previous entry */
898 pf->end = (char *)pf->end + l;
899 pf->size += l;
900 if (pf->next && pf->end == pf->next->page ) {
901 /* And collapse the next too. */
902 pt = pf->next;
903 pf->end = pt->end;
904 pf->size += pt->size;
905 pf->next = pt->next;
906 if (pf->next)
907 pf->next->prev = pf;
908 }
909 } else if (pf->page == tail) {
910 /* Prepend to entry */
911 pf->size += l;
912 pf->page = ptr;
913 } else if (!pf->next) {
914 /* Append at tail of chain */
915 px->next = 0;
916 px->prev = pf;
917 pf->next = px;
918 pf = px;
919 px = 0;
920 } else {
921 wrterror("freelist is destroyed.\n");
922 }
923 }
924
925 /* Return something to OS ? */
926 if (!pf->next && /* If we're the last one, */
927 pf->size > malloc_cache && /* ..and the cache is full, */
928 pf->end == malloc_brk && /* ..and none behind us, */
929 malloc_brk == sbrk(0)) { /* ..and it's OK to do... */
930
931 /*
932 * Keep the cache intact. Notice that the '>' above guarantees that
933 * the pf will always have at least one page afterwards.
934 */
935 pf->end = (char *)pf->page + malloc_cache;
936 pf->size = malloc_cache;
937
938 brk(pf->end);
939 malloc_brk = pf->end;
940
941 idx = ptr2idx(pf->end);
942 last_idx = idx - 1;
943
944 for(i=idx;i <= last_idx;)
945 page_dir[i++] = MALLOC_NOT_MINE;
946
947 /* XXX: We could realloc/shrink the pagedir here I guess. */
948 }
949 if (pt)
950 ifree(pt);
951 }
952
953 /*
954 * Free a chunk, and possibly the page it's on, if the page becomes empty.
955 */
956
957 static __inline__ void
958 free_bytes(void *ptr, size_t idx, struct pginfo *info)
959 {
960 size_t i;
961 struct pginfo **mp;
962 void *vp;
963
964 /* Find the chunk number on the page */
965 i = ((size_t)ptr & malloc_pagemask) >> info->shift;
966
967 if (((size_t)ptr & (info->size-1))) {
968 wrtwarning("modified (chunk-) pointer.\n");
969 return;
970 }
971
972 if (info->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
973 wrtwarning("chunk is already free.\n");
974 return;
975 }
976
977 if (malloc_junk)
978 memset(ptr, SOME_JUNK, (size_t)info->size);
979
980 info->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
981 info->free++;
982
983 mp = page_dir + info->shift;
984
985 if (info->free == 1) {
986
987 /* Page became non-full */
988
989 mp = page_dir + info->shift;
990 /* Insert in address order */
991 while (*mp && (*mp)->next && (*mp)->next->page < info->page)
992 mp = &(*mp)->next;
993 info->next = *mp;
994 *mp = info;
995 return;
996 }
997
998 if (info->free != info->total)
999 return;
1000
1001 /* Find & remove this page in the queue */
1002 while (*mp != info) {
1003 mp = &((*mp)->next);
1004 #ifdef MALLOC_EXTRA_SANITY
1005 if (!*mp)
1006 wrterror("(ES): Not on queue\n");
1007 #endif /* MALLOC_EXTRA_SANITY */
1008 }
1009 *mp = info->next;
1010
1011 /* Free the page & the info structure if need be */
1012 page_dir[idx] = MALLOC_FIRST;
1013 vp = info->page; /* Order is important ! */
1014 if(vp != (void*)info)
1015 ifree(info);
1016 ifree(vp);
1017 }
1018
1019 static void
1020 ifree(void *ptr)
1021 {
1022 struct pginfo *info;
1023 size_t idx;
1024
1025 /* This is legal */
1026 if (!ptr)
1027 return;
1028
1029 if (!malloc_started) {
1030 wrtwarning("malloc() has never been called.\n");
1031 return;
1032 }
1033
1034 /* If we're already sinking, don't make matters any worse. */
1035 if (suicide)
1036 return;
1037
1038 idx = ptr2idx(ptr);
1039
1040 if (idx < malloc_pageshift) {
1041 wrtwarning("junk pointer, too low to make sense.\n");
1042 return;
1043 }
1044
1045 if (idx > last_idx) {
1046 wrtwarning("junk pointer, too high to make sense.\n");
1047 return;
1048 }
1049
1050 info = page_dir[idx];
1051
1052 if (info < MALLOC_MAGIC)
1053 free_pages(ptr, idx, info);
1054 else
1055 free_bytes(ptr, idx, info);
1056 return;
1057 }
1058
1059 /*
1060 * These are the public exported interface routines.
1061 */
1062
1063
1064 void *
1065 malloc(size_t size)
1066 {
1067 register void *r;
1068
1069 THREAD_LOCK();
1070 malloc_func = " in malloc():";
1071 if (malloc_active++) {
1072 wrtwarning("recursive call.\n");
1073 malloc_active--;
1074 return (0);
1075 }
1076 if (!malloc_started)
1077 malloc_init();
1078 if (malloc_sysv && !size)
1079 r = 0;
1080 else
1081 r = imalloc(size);
1082 UTRACE(0, size, r);
1083 malloc_active--;
1084 THREAD_UNLOCK();
1085 if (r == NULL && (size != 0 || !malloc_sysv)) {
1086 if (malloc_xmalloc)
1087 wrterror("out of memory.\n");
1088 errno = ENOMEM;
1089 }
1090 return (r);
1091 }
1092
1093 void
1094 free(void *ptr)
1095 {
1096 THREAD_LOCK();
1097 malloc_func = " in free():";
1098 if (malloc_active++) {
1099 wrtwarning("recursive call.\n");
1100 malloc_active--;
1101 return;
1102 } else {
1103 ifree(ptr);
1104 UTRACE(ptr, 0, 0);
1105 }
1106 malloc_active--;
1107 THREAD_UNLOCK();
1108 return;
1109 }
1110
1111 void *
1112 realloc(void *ptr, size_t size)
1113 {
1114 register void *r;
1115
1116 THREAD_LOCK();
1117 malloc_func = " in realloc():";
1118 if (malloc_active++) {
1119 wrtwarning("recursive call.\n");
1120 malloc_active--;
1121 return (0);
1122 }
1123 if (ptr && !malloc_started) {
1124 wrtwarning("malloc() has never been called.\n");
1125 ptr = 0;
1126 }
1127 if (!malloc_started)
1128 malloc_init();
1129 if (malloc_sysv && !size) {
1130 ifree(ptr);
1131 r = 0;
1132 } else if (!ptr) {
1133 r = imalloc(size);
1134 } else {
1135 r = irealloc(ptr, size);
1136 }
1137 UTRACE(ptr, size, r);
1138 malloc_active--;
1139 THREAD_UNLOCK();
1140 if (r == NULL && (size != 0 || !malloc_sysv)) {
1141 if (malloc_xmalloc)
1142 wrterror("out of memory.\n");
1143 errno = ENOMEM;
1144 }
1145 return (r);
1146 }
1147