malloc.c revision 1.40 1 /* $NetBSD: malloc.c,v 1.40 2002/12/09 14:14:59 chris Exp $ */
2
3 /*
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk (at) FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
10 *
11 * From FreeBSD: malloc.c,v 1.43 1998/09/30 06:13:59 jb
12 *
13 */
14
15 /*
16 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
17 * to internal conditions and consistency in malloc.c. This has a
18 * noticeable runtime performance hit, and generally will not do you
19 * any good unless you fiddle with the internals of malloc or want
20 * to catch random pointer corruption as early as possible.
21 */
22 #ifndef MALLOC_EXTRA_SANITY
23 #undef MALLOC_EXTRA_SANITY
24 #endif
25
26 /*
27 * What to use for Junk. This is the byte value we use to fill with
28 * when the 'J' option is enabled.
29 */
30 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
31
32 /*
33 * The basic parameters you can tweak.
34 *
35 * malloc_minsize minimum size of an allocation in bytes.
36 * If this is too small it's too much work
37 * to manage them. This is also the smallest
38 * unit of alignment used for the storage
39 * returned by malloc/realloc.
40 *
41 */
42
43 #if defined(__FreeBSD__)
44 # if defined(__i386__)
45 # define malloc_minsize 16U
46 # endif
47 # if defined(__alpha__)
48 # define malloc_minsize 16U
49 # endif
50 # define HAS_UTRACE
51 # define UTRACE_LABEL
52
53 #include <sys/cdefs.h>
54 void utrace __P((struct ut *, int));
55
56 /*
57 * Make malloc/free/realloc thread-safe in libc for use with
58 * kernel threads.
59 */
60 # include "libc_private.h"
61 # include "spinlock.h"
62 static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
63 # define THREAD_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
64 # define THREAD_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
65 #endif /* __FreeBSD__ */
66
67 #if defined(__NetBSD__)
68 # define malloc_minsize 16U
69 # define HAS_UTRACE
70 # define UTRACE_LABEL "malloc",
71 #include <sys/cdefs.h>
72 #include <sys/types.h>
73 int utrace __P((const char *, void *, size_t));
74 #endif /* __NetBSD__ */
75
76 #if defined(__sparc__) && defined(sun)
77 # define malloc_minsize 16U
78 # define MAP_ANON (0)
79 static int fdzero;
80 # define MMAP_FD fdzero
81 # define INIT_MMAP() \
82 { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \
83 wrterror("open of /dev/zero"); }
84 #endif /* __sparc__ */
85
86 /* Insert your combination here... */
87 #if defined(__FOOCPU__) && defined(__BAROS__)
88 # define malloc_minsize 16U
89 #endif /* __FOOCPU__ && __BAROS__ */
90
91
92 /*
93 * No user serviceable parts behind this point.
94 */
95 #include "namespace.h"
96 #include <sys/types.h>
97 #include <sys/mman.h>
98 #include <errno.h>
99 #include <fcntl.h>
100 #include <stddef.h>
101 #include <stdint.h>
102 #include <stdio.h>
103 #include <stdlib.h>
104 #include <string.h>
105 #include <unistd.h>
106
107 /*
108 * This structure describes a page worth of chunks.
109 */
110
111 struct pginfo {
112 struct pginfo *next; /* next on the free list */
113 void *page; /* Pointer to the page */
114 u_short size; /* size of this page's chunks */
115 u_short shift; /* How far to shift for this size chunks */
116 u_short free; /* How many free chunks */
117 u_short total; /* How many chunk */
118 u_int bits[1]; /* Which chunks are free */
119 };
120
121 /*
122 * This structure describes a number of free pages.
123 */
124
125 struct pgfree {
126 struct pgfree *next; /* next run of free pages */
127 struct pgfree *prev; /* prev run of free pages */
128 void *page; /* pointer to free pages */
129 void *end; /* pointer to end of free pages */
130 size_t size; /* number of bytes free */
131 };
132
133 /*
134 * How many bits per u_int in the bitmap.
135 * Change only if not 8 bits/byte
136 */
137 #define MALLOC_BITS ((int)(8*sizeof(u_int)))
138
139 /*
140 * Magic values to put in the page_directory
141 */
142 #define MALLOC_NOT_MINE ((struct pginfo*) 0)
143 #define MALLOC_FREE ((struct pginfo*) 1)
144 #define MALLOC_FIRST ((struct pginfo*) 2)
145 #define MALLOC_FOLLOW ((struct pginfo*) 3)
146 #define MALLOC_MAGIC ((struct pginfo*) 4)
147
148 /*
149 * Page size related parameters, computed at run-time.
150 */
151 static size_t malloc_pagesize;
152 static size_t malloc_pageshift;
153 static size_t malloc_pagemask;
154
155 #ifndef malloc_minsize
156 #define malloc_minsize 16U
157 #endif
158
159 #ifndef malloc_maxsize
160 #define malloc_maxsize ((malloc_pagesize)>>1)
161 #endif
162
163 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
164 #define ptr2idx(foo) \
165 (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
166
167 #ifndef THREAD_LOCK
168 #define THREAD_LOCK()
169 #endif
170
171 #ifndef THREAD_UNLOCK
172 #define THREAD_UNLOCK()
173 #endif
174
175 #ifndef MMAP_FD
176 #define MMAP_FD (-1)
177 #endif
178
179 #ifndef INIT_MMAP
180 #define INIT_MMAP()
181 #endif
182
183 #ifndef MADV_FREE
184 #define MADV_FREE MADV_DONTNEED
185 #endif
186
187 /* Set when initialization has been done */
188 static unsigned malloc_started;
189
190 /* Recusion flag for public interface. */
191 static int malloc_active;
192
193 /* Number of free pages we cache */
194 static unsigned malloc_cache = 16;
195
196 /* The offset from pagenumber to index into the page directory */
197 static size_t malloc_origo;
198
199 /* The last index in the page directory we care about */
200 static size_t last_idx;
201
202 /* Pointer to page directory. Allocated "as if with" malloc */
203 static struct pginfo **page_dir;
204
205 /* How many slots in the page directory */
206 static unsigned malloc_ninfo;
207
208 /* Free pages line up here */
209 static struct pgfree free_list;
210
211 /* Abort(), user doesn't handle problems. */
212 static int malloc_abort;
213
214 /* Are we trying to die ? */
215 static int suicide;
216
217 /* always realloc ? */
218 static int malloc_realloc;
219
220 /* pass the kernel a hint on free pages ? */
221 static int malloc_hint = 0;
222
223 /* xmalloc behaviour ? */
224 static int malloc_xmalloc;
225
226 /* sysv behaviour for malloc(0) ? */
227 static int malloc_sysv;
228
229 /* zero fill ? */
230 static int malloc_zero;
231
232 /* junk fill ? */
233 static int malloc_junk;
234
235 #ifdef HAS_UTRACE
236
237 /* utrace ? */
238 static int malloc_utrace;
239
240 struct ut { void *p; size_t s; void *r; };
241
242 #define UTRACE(a, b, c) \
243 if (malloc_utrace) { \
244 struct ut u; \
245 u.p=a; u.s = b; u.r=c; \
246 utrace(UTRACE_LABEL (void *) &u, sizeof u); \
247 }
248 #else /* !HAS_UTRACE */
249 #define UTRACE(a,b,c)
250 #endif /* HAS_UTRACE */
251
252 /* my last break. */
253 static void *malloc_brk;
254
255 /* one location cache for free-list holders */
256 static struct pgfree *px;
257
258 /* compile-time options */
259 char *malloc_options;
260
261 /* Name of the current public function */
262 static char *malloc_func;
263
264 /* Macro for mmap */
265 #define MMAP(size) \
266 mmap(0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
267 MMAP_FD, (off_t)0);
268
269 /*
270 * Necessary function declarations
271 */
272 static int extend_pgdir(size_t idx);
273 static void *imalloc(size_t size);
274 static void ifree(void *ptr);
275 static void *irealloc(void *ptr, size_t size);
276
277 static void
278 wrterror(char *p)
279 {
280 const char *progname = getprogname();
281 char *q = " error: ";
282 write(STDERR_FILENO, progname, strlen(progname));
283 write(STDERR_FILENO, malloc_func, strlen(malloc_func));
284 write(STDERR_FILENO, q, strlen(q));
285 write(STDERR_FILENO, p, strlen(p));
286 suicide = 1;
287 abort();
288 }
289
290 static void
291 wrtwarning(char *p)
292 {
293 const char *progname = getprogname();
294 char *q = " warning: ";
295 if (malloc_abort)
296 wrterror(p);
297 write(STDERR_FILENO, progname, strlen(progname));
298 write(STDERR_FILENO, malloc_func, strlen(malloc_func));
299 write(STDERR_FILENO, q, strlen(q));
300 write(STDERR_FILENO, p, strlen(p));
301 }
302
303
304 /*
305 * Allocate a number of pages from the OS
306 */
307 static void *
308 map_pages(size_t pages)
309 {
310 caddr_t result, rresult, tail;
311 intptr_t bytes = pages << malloc_pageshift;
312
313 if (bytes < 0 || (size_t)bytes < pages) {
314 errno = ENOMEM;
315 return NULL;
316 }
317
318 if ((result = sbrk(bytes)) == (void *)-1)
319 return NULL;
320
321 /*
322 * Round to a page, in case sbrk(2) did not do this for us
323 */
324 rresult = (caddr_t)pageround((size_t)(uintptr_t)result);
325 if (result < rresult) {
326 /* make sure we have enough space to fit bytes */
327 if (sbrk((intptr_t)(rresult - result)) == (void *) -1) {
328 /* we failed, put everything back */
329 if (brk(result)) {
330 wrterror("brk(2) failed [internal error]\n");
331 }
332 }
333 }
334 tail = rresult + (size_t)bytes;
335
336 last_idx = ptr2idx(tail) - 1;
337 malloc_brk = tail;
338
339 if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) {
340 malloc_brk = result;
341 last_idx = ptr2idx(malloc_brk) - 1;
342 /* Put back break point since we failed. */
343 if (brk(malloc_brk))
344 wrterror("brk(2) failed [internal error]\n");
345 return 0;
346 }
347
348 return rresult;
349 }
350
351 /*
352 * Extend page directory
353 */
354 static int
355 extend_pgdir(size_t idx)
356 {
357 struct pginfo **new, **old;
358 size_t newlen, oldlen;
359
360 /* check for overflow */
361 if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1)
362 + (malloc_pagesize / sizeof *page_dir)) < idx) {
363 errno = ENOMEM;
364 return 0;
365 }
366
367 /* Make it this many pages */
368 newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
369
370 /* remember the old mapping size */
371 oldlen = malloc_ninfo * sizeof *page_dir;
372
373 /*
374 * NOTE: we allocate new pages and copy the directory rather than tempt
375 * fate by trying to "grow" the region.. There is nothing to prevent
376 * us from accidently re-mapping space that's been allocated by our caller
377 * via dlopen() or other mmap().
378 *
379 * The copy problem is not too bad, as there is 4K of page index per
380 * 4MB of malloc arena.
381 *
382 * We can totally avoid the copy if we open a file descriptor to associate
383 * the anon mappings with. Then, when we remap the pages at the new
384 * address, the old pages will be "magically" remapped.. But this means
385 * keeping open a "secret" file descriptor.....
386 */
387
388 /* Get new pages */
389 new = (struct pginfo**) MMAP(newlen);
390 if (new == (struct pginfo **)-1)
391 return 0;
392
393 /* Copy the old stuff */
394 memcpy(new, page_dir, oldlen);
395
396 /* register the new size */
397 malloc_ninfo = newlen / sizeof *page_dir;
398
399 /* swap the pointers */
400 old = page_dir;
401 page_dir = new;
402
403 /* Now free the old stuff */
404 munmap(old, oldlen);
405 return 1;
406 }
407
408 /*
409 * Initialize the world
410 */
411 static void
412 malloc_init (void)
413 {
414 char *p, b[64];
415 int i, j;
416 int errnosave;
417
418 /*
419 * Compute page-size related variables.
420 */
421 malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
422 malloc_pagemask = malloc_pagesize - 1;
423 for (malloc_pageshift = 0;
424 (1UL << malloc_pageshift) != malloc_pagesize;
425 malloc_pageshift++)
426 /* nothing */ ;
427
428 INIT_MMAP();
429
430 #ifdef MALLOC_EXTRA_SANITY
431 malloc_junk = 1;
432 #endif /* MALLOC_EXTRA_SANITY */
433
434 for (i = 0; i < 3; i++) {
435 if (i == 0) {
436 errnosave = errno;
437 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
438 errno = errnosave;
439 if (j <= 0)
440 continue;
441 b[j] = '\0';
442 p = b;
443 } else if (i == 1) {
444 p = getenv("MALLOC_OPTIONS");
445 } else {
446 p = malloc_options;
447 }
448 for (; p && *p; p++) {
449 switch (*p) {
450 case '>': malloc_cache <<= 1; break;
451 case '<': malloc_cache >>= 1; break;
452 case 'a': malloc_abort = 0; break;
453 case 'A': malloc_abort = 1; break;
454 case 'h': malloc_hint = 0; break;
455 case 'H': malloc_hint = 1; break;
456 case 'r': malloc_realloc = 0; break;
457 case 'R': malloc_realloc = 1; break;
458 case 'j': malloc_junk = 0; break;
459 case 'J': malloc_junk = 1; break;
460 #ifdef HAS_UTRACE
461 case 'u': malloc_utrace = 0; break;
462 case 'U': malloc_utrace = 1; break;
463 #endif
464 case 'v': malloc_sysv = 0; break;
465 case 'V': malloc_sysv = 1; break;
466 case 'x': malloc_xmalloc = 0; break;
467 case 'X': malloc_xmalloc = 1; break;
468 case 'z': malloc_zero = 0; break;
469 case 'Z': malloc_zero = 1; break;
470 default:
471 j = malloc_abort;
472 malloc_abort = 0;
473 wrtwarning("unknown char in MALLOC_OPTIONS\n");
474 malloc_abort = j;
475 break;
476 }
477 }
478 }
479
480 UTRACE(0, 0, 0);
481
482 /*
483 * We want junk in the entire allocation, and zero only in the part
484 * the user asked for.
485 */
486 if (malloc_zero)
487 malloc_junk=1;
488
489 /*
490 * If we run with junk (or implicitly from above: zero), we want to
491 * force realloc() to get new storage, so we can DTRT with it.
492 */
493 if (malloc_junk)
494 malloc_realloc=1;
495
496 /* Allocate one page for the page directory */
497 page_dir = (struct pginfo **) MMAP(malloc_pagesize);
498
499 if (page_dir == (struct pginfo **) -1)
500 wrterror("mmap(2) failed, check limits.\n");
501
502 /*
503 * We need a maximum of malloc_pageshift buckets, steal these from the
504 * front of the page_directory;
505 */
506 malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
507 >> malloc_pageshift;
508 malloc_origo -= malloc_pageshift;
509
510 malloc_ninfo = malloc_pagesize / sizeof *page_dir;
511
512 /* Recalculate the cache size in bytes, and make sure it's nonzero */
513
514 if (!malloc_cache)
515 malloc_cache++;
516
517 malloc_cache <<= malloc_pageshift;
518
519 /*
520 * This is a nice hack from Kaleb Keithly (kaleb (at) x.org).
521 * We can sbrk(2) further back when we keep this on a low address.
522 */
523 px = (struct pgfree *) imalloc (sizeof *px);
524
525 /* Been here, done that */
526 malloc_started++;
527 }
528
529 /*
530 * Allocate a number of complete pages
531 */
532 static void *
533 malloc_pages(size_t size)
534 {
535 void *p, *delay_free = 0;
536 size_t i;
537 struct pgfree *pf;
538 size_t idx;
539
540 idx = pageround(size);
541 if (idx < size) {
542 errno = ENOMEM;
543 return NULL;
544 } else
545 size = idx;
546
547 p = 0;
548
549 /* Look for free pages before asking for more */
550 for(pf = free_list.next; pf; pf = pf->next) {
551
552 #ifdef MALLOC_EXTRA_SANITY
553 if (pf->size & malloc_pagemask)
554 wrterror("(ES): junk length entry on free_list\n");
555 if (!pf->size)
556 wrterror("(ES): zero length entry on free_list\n");
557 if (pf->page == pf->end)
558 wrterror("(ES): zero entry on free_list\n");
559 if (pf->page > pf->end)
560 wrterror("(ES): sick entry on free_list\n");
561 if ((void*)pf->page >= (void*)sbrk(0))
562 wrterror("(ES): entry on free_list past brk\n");
563 if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
564 wrterror("(ES): non-free first page on free-list\n");
565 if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
566 wrterror("(ES): non-free last page on free-list\n");
567 #endif /* MALLOC_EXTRA_SANITY */
568
569 if (pf->size < size)
570 continue;
571
572 if (pf->size == size) {
573 p = pf->page;
574 if (pf->next)
575 pf->next->prev = pf->prev;
576 pf->prev->next = pf->next;
577 delay_free = pf;
578 break;
579 }
580
581 p = pf->page;
582 pf->page = (char *)pf->page + size;
583 pf->size -= size;
584 break;
585 }
586
587 #ifdef MALLOC_EXTRA_SANITY
588 if (p && page_dir[ptr2idx(p)] != MALLOC_FREE)
589 wrterror("(ES): allocated non-free page on free-list\n");
590 #endif /* MALLOC_EXTRA_SANITY */
591
592 size >>= malloc_pageshift;
593
594 /* Map new pages */
595 if (!p)
596 p = map_pages(size);
597
598 if (p) {
599
600 idx = ptr2idx(p);
601 page_dir[idx] = MALLOC_FIRST;
602 for (i=1;i<size;i++)
603 page_dir[idx+i] = MALLOC_FOLLOW;
604
605 if (malloc_junk)
606 memset(p, SOME_JUNK, size << malloc_pageshift);
607 }
608
609 if (delay_free) {
610 if (!px)
611 px = delay_free;
612 else
613 ifree(delay_free);
614 }
615
616 return p;
617 }
618
619 /*
620 * Allocate a page of fragments
621 */
622
623 static __inline__ int
624 malloc_make_chunks(int bits)
625 {
626 struct pginfo *bp;
627 void *pp;
628 int i, k, l;
629
630 /* Allocate a new bucket */
631 pp = malloc_pages(malloc_pagesize);
632 if (!pp)
633 return 0;
634
635 /* Find length of admin structure */
636 l = (int)offsetof(struct pginfo, bits[0]);
637 l += sizeof bp->bits[0] *
638 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
639
640 /* Don't waste more than two chunks on this */
641 if ((1<<(bits)) <= l+l) {
642 bp = (struct pginfo *)pp;
643 } else {
644 bp = (struct pginfo *)imalloc((size_t)l);
645 if (!bp) {
646 ifree(pp);
647 return 0;
648 }
649 }
650
651 bp->size = (1<<bits);
652 bp->shift = bits;
653 bp->total = bp->free = malloc_pagesize >> bits;
654 bp->page = pp;
655
656 /* set all valid bits in the bitmap */
657 k = bp->total;
658 i = 0;
659
660 /* Do a bunch at a time */
661 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
662 bp->bits[i / MALLOC_BITS] = ~0U;
663
664 for(; i < k; i++)
665 bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
666
667 if (bp == bp->page) {
668 /* Mark the ones we stole for ourselves */
669 for(i=0;l > 0;i++) {
670 bp->bits[i/MALLOC_BITS] &= ~(1<<(i%MALLOC_BITS));
671 bp->free--;
672 bp->total--;
673 l -= (1 << bits);
674 }
675 }
676
677 /* MALLOC_LOCK */
678
679 page_dir[ptr2idx(pp)] = bp;
680
681 bp->next = page_dir[bits];
682 page_dir[bits] = bp;
683
684 /* MALLOC_UNLOCK */
685
686 return 1;
687 }
688
689 /*
690 * Allocate a fragment
691 */
692 static void *
693 malloc_bytes(size_t size)
694 {
695 size_t i;
696 int j;
697 u_int u;
698 struct pginfo *bp;
699 int k;
700 u_int *lp;
701
702 /* Don't bother with anything less than this */
703 if (size < malloc_minsize)
704 size = malloc_minsize;
705
706 /* Find the right bucket */
707 j = 1;
708 i = size-1;
709 while (i >>= 1)
710 j++;
711
712 /* If it's empty, make a page more of that size chunks */
713 if (!page_dir[j] && !malloc_make_chunks(j))
714 return 0;
715
716 bp = page_dir[j];
717
718 /* Find first word of bitmap which isn't empty */
719 for (lp = bp->bits; !*lp; lp++)
720 ;
721
722 /* Find that bit, and tweak it */
723 u = 1;
724 k = 0;
725 while (!(*lp & u)) {
726 u += u;
727 k++;
728 }
729 *lp ^= u;
730
731 /* If there are no more free, remove from free-list */
732 if (!--bp->free) {
733 page_dir[j] = bp->next;
734 bp->next = 0;
735 }
736
737 /* Adjust to the real offset of that chunk */
738 k += (lp-bp->bits)*MALLOC_BITS;
739 k <<= bp->shift;
740
741 if (malloc_junk)
742 memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
743
744 return (u_char *)bp->page + k;
745 }
746
747 /*
748 * Allocate a piece of memory
749 */
750 static void *
751 imalloc(size_t size)
752 {
753 void *result;
754
755 if (suicide)
756 abort();
757
758 if ((size + malloc_pagesize) < size) /* Check for overflow */
759 result = 0;
760 else if (size <= malloc_maxsize)
761 result = malloc_bytes(size);
762 else
763 result = malloc_pages(size);
764
765 if (malloc_abort && !result)
766 wrterror("allocation failed.\n");
767
768 if (malloc_zero && result)
769 memset(result, 0, size);
770
771 return result;
772 }
773
774 /*
775 * Change the size of an allocation.
776 */
777 static void *
778 irealloc(void *ptr, size_t size)
779 {
780 void *p;
781 size_t osize, idx;
782 struct pginfo **mp;
783 size_t i;
784
785 if (suicide)
786 abort();
787
788 idx = ptr2idx(ptr);
789
790 if (idx < malloc_pageshift) {
791 wrtwarning("junk pointer, too low to make sense.\n");
792 return 0;
793 }
794
795 if (idx > last_idx) {
796 wrtwarning("junk pointer, too high to make sense.\n");
797 return 0;
798 }
799
800 mp = &page_dir[idx];
801
802 if (*mp == MALLOC_FIRST) { /* Page allocation */
803
804 /* Check the pointer */
805 if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
806 wrtwarning("modified (page-) pointer.\n");
807 return 0;
808 }
809
810 /* Find the size in bytes */
811 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
812 osize += malloc_pagesize;
813
814 if (!malloc_realloc && /* unless we have to, */
815 size <= osize && /* .. or are too small, */
816 size > (osize - malloc_pagesize)) { /* .. or can free a page, */
817 return ptr; /* don't do anything. */
818 }
819
820 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
821
822 /* Check the pointer for sane values */
823 if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) {
824 wrtwarning("modified (chunk-) pointer.\n");
825 return 0;
826 }
827
828 /* Find the chunk index in the page */
829 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift;
830
831 /* Verify that it isn't a free chunk already */
832 if ((*mp)->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
833 wrtwarning("chunk is already free.\n");
834 return 0;
835 }
836
837 osize = (*mp)->size;
838
839 if (!malloc_realloc && /* Unless we have to, */
840 size < osize && /* ..or are too small, */
841 (size > osize/2 || /* ..or could use a smaller size, */
842 osize == malloc_minsize)) { /* ..(if there is one) */
843 return ptr; /* ..Don't do anything */
844 }
845
846 } else {
847 wrtwarning("pointer to wrong page.\n");
848 return 0;
849 }
850
851 p = imalloc(size);
852
853 if (p) {
854 /* copy the lesser of the two sizes, and free the old one */
855 if (!size || !osize)
856 ;
857 else if (osize < size)
858 memcpy(p, ptr, osize);
859 else
860 memcpy(p, ptr, size);
861 ifree(ptr);
862 }
863 return p;
864 }
865
866 /*
867 * Free a sequence of pages
868 */
869
870 static __inline__ void
871 free_pages(void *ptr, size_t idx, struct pginfo *info)
872 {
873 size_t i;
874 struct pgfree *pf, *pt=0;
875 size_t l;
876 void *tail;
877
878 if (info == MALLOC_FREE) {
879 wrtwarning("page is already free.\n");
880 return;
881 }
882
883 if (info != MALLOC_FIRST) {
884 wrtwarning("pointer to wrong page.\n");
885 return;
886 }
887
888 if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
889 wrtwarning("modified (page-) pointer.\n");
890 return;
891 }
892
893 /* Count how many pages and mark them free at the same time */
894 page_dir[idx] = MALLOC_FREE;
895 for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
896 page_dir[idx + i] = MALLOC_FREE;
897
898 l = i << malloc_pageshift;
899
900 if (malloc_junk)
901 memset(ptr, SOME_JUNK, l);
902
903 if (malloc_hint)
904 madvise(ptr, l, MADV_FREE);
905
906 tail = (char *)ptr+l;
907
908 /* add to free-list */
909 if (!px)
910 px = imalloc(sizeof *pt); /* This cannot fail... */
911 px->page = ptr;
912 px->end = tail;
913 px->size = l;
914 if (!free_list.next) {
915
916 /* Nothing on free list, put this at head */
917 px->next = free_list.next;
918 px->prev = &free_list;
919 free_list.next = px;
920 pf = px;
921 px = 0;
922
923 } else {
924
925 /* Find the right spot, leave pf pointing to the modified entry. */
926 tail = (char *)ptr+l;
927
928 for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next)
929 ; /* Race ahead here */
930
931 if (pf->page > tail) {
932 /* Insert before entry */
933 px->next = pf;
934 px->prev = pf->prev;
935 pf->prev = px;
936 px->prev->next = px;
937 pf = px;
938 px = 0;
939 } else if (pf->end == ptr ) {
940 /* Append to the previous entry */
941 pf->end = (char *)pf->end + l;
942 pf->size += l;
943 if (pf->next && pf->end == pf->next->page ) {
944 /* And collapse the next too. */
945 pt = pf->next;
946 pf->end = pt->end;
947 pf->size += pt->size;
948 pf->next = pt->next;
949 if (pf->next)
950 pf->next->prev = pf;
951 }
952 } else if (pf->page == tail) {
953 /* Prepend to entry */
954 pf->size += l;
955 pf->page = ptr;
956 } else if (!pf->next) {
957 /* Append at tail of chain */
958 px->next = 0;
959 px->prev = pf;
960 pf->next = px;
961 pf = px;
962 px = 0;
963 } else {
964 wrterror("freelist is destroyed.\n");
965 }
966 }
967
968 /* Return something to OS ? */
969 if (!pf->next && /* If we're the last one, */
970 pf->size > malloc_cache && /* ..and the cache is full, */
971 pf->end == malloc_brk && /* ..and none behind us, */
972 malloc_brk == sbrk((intptr_t)0)) { /* ..and it's OK to do... */
973
974 /*
975 * Keep the cache intact. Notice that the '>' above guarantees that
976 * the pf will always have at least one page afterwards.
977 */
978 pf->end = (char *)pf->page + malloc_cache;
979 pf->size = malloc_cache;
980
981 brk(pf->end);
982 malloc_brk = pf->end;
983
984 idx = ptr2idx(pf->end);
985 last_idx = idx - 1;
986
987 for(i=idx;i <= last_idx;)
988 page_dir[i++] = MALLOC_NOT_MINE;
989
990 /* XXX: We could realloc/shrink the pagedir here I guess. */
991 }
992 if (pt)
993 ifree(pt);
994 }
995
996 /*
997 * Free a chunk, and possibly the page it's on, if the page becomes empty.
998 */
999
1000 static __inline__ void
1001 free_bytes(void *ptr, size_t idx, struct pginfo *info)
1002 {
1003 size_t i;
1004 struct pginfo **mp;
1005 void *vp;
1006
1007 /* Find the chunk number on the page */
1008 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift;
1009
1010 if (((size_t)(uintptr_t)ptr & (info->size-1))) {
1011 wrtwarning("modified (chunk-) pointer.\n");
1012 return;
1013 }
1014
1015 if (info->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
1016 wrtwarning("chunk is already free.\n");
1017 return;
1018 }
1019
1020 if (malloc_junk)
1021 memset(ptr, SOME_JUNK, (size_t)info->size);
1022
1023 info->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
1024 info->free++;
1025
1026 mp = page_dir + info->shift;
1027
1028 if (info->free == 1) {
1029
1030 /* Page became non-full */
1031
1032 mp = page_dir + info->shift;
1033 /* Insert in address order */
1034 while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1035 mp = &(*mp)->next;
1036 info->next = *mp;
1037 *mp = info;
1038 return;
1039 }
1040
1041 if (info->free != info->total)
1042 return;
1043
1044 /* Find & remove this page in the queue */
1045 while (*mp != info) {
1046 mp = &((*mp)->next);
1047 #ifdef MALLOC_EXTRA_SANITY
1048 if (!*mp)
1049 wrterror("(ES): Not on queue\n");
1050 #endif /* MALLOC_EXTRA_SANITY */
1051 }
1052 *mp = info->next;
1053
1054 /* Free the page & the info structure if need be */
1055 page_dir[idx] = MALLOC_FIRST;
1056 vp = info->page; /* Order is important ! */
1057 if(vp != (void*)info)
1058 ifree(info);
1059 ifree(vp);
1060 }
1061
1062 static void
1063 ifree(void *ptr)
1064 {
1065 struct pginfo *info;
1066 size_t idx;
1067
1068 /* This is legal */
1069 if (!ptr)
1070 return;
1071
1072 if (!malloc_started) {
1073 wrtwarning("malloc() has never been called.\n");
1074 return;
1075 }
1076
1077 /* If we're already sinking, don't make matters any worse. */
1078 if (suicide)
1079 return;
1080
1081 idx = ptr2idx(ptr);
1082
1083 if (idx < malloc_pageshift) {
1084 wrtwarning("junk pointer, too low to make sense.\n");
1085 return;
1086 }
1087
1088 if (idx > last_idx) {
1089 wrtwarning("junk pointer, too high to make sense.\n");
1090 return;
1091 }
1092
1093 info = page_dir[idx];
1094
1095 if (info < MALLOC_MAGIC)
1096 free_pages(ptr, idx, info);
1097 else
1098 free_bytes(ptr, idx, info);
1099 return;
1100 }
1101
1102 /*
1103 * These are the public exported interface routines.
1104 */
1105
1106
1107 void *
1108 malloc(size_t size)
1109 {
1110 register void *r;
1111
1112 THREAD_LOCK();
1113 malloc_func = " in malloc():";
1114 if (malloc_active++) {
1115 wrtwarning("recursive call.\n");
1116 malloc_active--;
1117 THREAD_UNLOCK();
1118 return (0);
1119 }
1120 if (!malloc_started)
1121 malloc_init();
1122 if (malloc_sysv && !size)
1123 r = 0;
1124 else
1125 r = imalloc(size);
1126 UTRACE(0, size, r);
1127 malloc_active--;
1128 THREAD_UNLOCK();
1129 if (r == NULL && (size != 0 || !malloc_sysv)) {
1130 if (malloc_xmalloc)
1131 wrterror("out of memory.\n");
1132 errno = ENOMEM;
1133 }
1134 return (r);
1135 }
1136
1137 void
1138 free(void *ptr)
1139 {
1140 THREAD_LOCK();
1141 malloc_func = " in free():";
1142 if (malloc_active++) {
1143 wrtwarning("recursive call.\n");
1144 malloc_active--;
1145 THREAD_UNLOCK();
1146 return;
1147 } else {
1148 ifree(ptr);
1149 UTRACE(ptr, 0, 0);
1150 }
1151 malloc_active--;
1152 THREAD_UNLOCK();
1153 return;
1154 }
1155
1156 void *
1157 realloc(void *ptr, size_t size)
1158 {
1159 register void *r;
1160
1161 THREAD_LOCK();
1162 malloc_func = " in realloc():";
1163 if (malloc_active++) {
1164 wrtwarning("recursive call.\n");
1165 malloc_active--;
1166 THREAD_UNLOCK();
1167 return (0);
1168 }
1169 if (ptr && !malloc_started) {
1170 wrtwarning("malloc() has never been called.\n");
1171 ptr = 0;
1172 }
1173 if (!malloc_started)
1174 malloc_init();
1175 if (malloc_sysv && !size) {
1176 ifree(ptr);
1177 r = 0;
1178 } else if (!ptr) {
1179 r = imalloc(size);
1180 } else {
1181 r = irealloc(ptr, size);
1182 }
1183 UTRACE(ptr, size, r);
1184 malloc_active--;
1185 THREAD_UNLOCK();
1186 if (r == NULL && (size != 0 || !malloc_sysv)) {
1187 if (malloc_xmalloc)
1188 wrterror("out of memory.\n");
1189 errno = ENOMEM;
1190 }
1191 return (r);
1192 }
1193