malloc.c revision 1.27 1 /* $NetBSD: malloc.c,v 1.27 2000/01/23 00:38:43 mycroft Exp $ */
2
3 /*
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk (at) FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
10 *
11 * From FreeBSD: malloc.c,v 1.43 1998/09/30 06:13:59 jb
12 *
13 */
14
15 /*
16 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
17 * to internal conditions and consistency in malloc.c. This has a
18 * noticeable runtime performance hit, and generally will not do you
19 * any good unless you fiddle with the internals of malloc or want
20 * to catch random pointer corruption as early as possible.
21 */
22 #ifndef MALLOC_EXTRA_SANITY
23 #undef MALLOC_EXTRA_SANITY
24 #endif
25
26 /*
27 * What to use for Junk. This is the byte value we use to fill with
28 * when the 'J' option is enabled.
29 */
30 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
31
32 /*
33 * The basic parameters you can tweak.
34 *
35 * malloc_minsize minimum size of an allocation in bytes.
36 * If this is too small it's too much work
37 * to manage them. This is also the smallest
38 * unit of alignment used for the storage
39 * returned by malloc/realloc.
40 *
41 */
42
43 #if defined(__FreeBSD__)
44 # if defined(__i386__)
45 # define malloc_minsize 16U
46 # endif
47 # if defined(__alpha__)
48 # define malloc_minsize 16U
49 # endif
50 # if !defined(__NETBSD_SYSCALLS)
51 # define HAS_UTRACE
52 # endif
53 /*
54 * Make malloc/free/realloc thread-safe in libc for use with
55 * kernel threads.
56 */
57 # include "libc_private.h"
58 # include "spinlock.h"
59 static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
60 # define THREAD_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
61 # define THREAD_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
62 #endif /* __FreeBSD__ */
63
64 #if defined(__NetBSD__)
65 # define malloc_minsize 16U
66 #endif /* __NetBSD__ */
67
68 #if defined(__sparc__) && defined(sun)
69 # define malloc_minsize 16U
70 # define MAP_ANON (0)
71 static int fdzero;
72 # define MMAP_FD fdzero
73 # define INIT_MMAP() \
74 { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \
75 wrterror("open of /dev/zero"); }
76 #endif /* __sparc__ */
77
78 /* Insert your combination here... */
79 #if defined(__FOOCPU__) && defined(__BAROS__)
80 # define malloc_minsize 16U
81 #endif /* __FOOCPU__ && __BAROS__ */
82
83
84 /*
85 * No user serviceable parts behind this point.
86 */
87 #include "namespace.h"
88 #include <sys/types.h>
89 #include <sys/mman.h>
90 #include <errno.h>
91 #include <fcntl.h>
92 #include <stddef.h>
93 #include <stdio.h>
94 #include <stdlib.h>
95 #include <string.h>
96 #include <unistd.h>
97
98 /*
99 * This structure describes a page worth of chunks.
100 */
101
102 struct pginfo {
103 struct pginfo *next; /* next on the free list */
104 void *page; /* Pointer to the page */
105 u_short size; /* size of this page's chunks */
106 u_short shift; /* How far to shift for this size chunks */
107 u_short free; /* How many free chunks */
108 u_short total; /* How many chunk */
109 u_int bits[1]; /* Which chunks are free */
110 };
111
112 /*
113 * This structure describes a number of free pages.
114 */
115
116 struct pgfree {
117 struct pgfree *next; /* next run of free pages */
118 struct pgfree *prev; /* prev run of free pages */
119 void *page; /* pointer to free pages */
120 void *end; /* pointer to end of free pages */
121 size_t size; /* number of bytes free */
122 };
123
124 /*
125 * How many bits per u_int in the bitmap.
126 * Change only if not 8 bits/byte
127 */
128 #define MALLOC_BITS (8*sizeof(u_int))
129
130 /*
131 * Magic values to put in the page_directory
132 */
133 #define MALLOC_NOT_MINE ((struct pginfo*) 0)
134 #define MALLOC_FREE ((struct pginfo*) 1)
135 #define MALLOC_FIRST ((struct pginfo*) 2)
136 #define MALLOC_FOLLOW ((struct pginfo*) 3)
137 #define MALLOC_MAGIC ((struct pginfo*) 4)
138
139 /*
140 * Page size related parameters, computed at run-time.
141 */
142 static size_t malloc_pagesize;
143 static size_t malloc_pageshift;
144 static size_t malloc_pagemask;
145
146 #ifndef malloc_minsize
147 #define malloc_minsize 16U
148 #endif
149
150 #ifndef malloc_maxsize
151 #define malloc_maxsize ((malloc_pagesize)>>1)
152 #endif
153
154 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
155 #define ptr2idx(foo) (((u_long)(foo) >> malloc_pageshift)-malloc_origo)
156
157 #ifndef THREAD_LOCK
158 #define THREAD_LOCK()
159 #endif
160
161 #ifndef THREAD_UNLOCK
162 #define THREAD_UNLOCK()
163 #endif
164
165 #ifndef MMAP_FD
166 #define MMAP_FD (-1)
167 #endif
168
169 #ifndef INIT_MMAP
170 #define INIT_MMAP()
171 #endif
172
173 #ifndef MADV_FREE
174 #define MADV_FREE MADV_DONTNEED
175 #endif
176
177 /* Set when initialization has been done */
178 static unsigned malloc_started;
179
180 /* Recusion flag for public interface. */
181 static int malloc_active;
182
183 /* Number of free pages we cache */
184 static unsigned malloc_cache = 16;
185
186 /* The offset from pagenumber to index into the page directory */
187 static u_long malloc_origo;
188
189 /* The last index in the page directory we care about */
190 static u_long last_idx;
191
192 /* Pointer to page directory. Allocated "as if with" malloc */
193 static struct pginfo **page_dir;
194
195 /* How many slots in the page directory */
196 static unsigned malloc_ninfo;
197
198 /* Free pages line up here */
199 static struct pgfree free_list;
200
201 /* Abort(), user doesn't handle problems. */
202 static int malloc_abort;
203
204 /* Are we trying to die ? */
205 static int suicide;
206
207 /* always realloc ? */
208 static int malloc_realloc;
209
210 /* pass the kernel a hint on free pages ? */
211 static int malloc_hint = 1;
212
213 /* xmalloc behaviour ? */
214 static int malloc_xmalloc;
215
216 /* sysv behaviour for malloc(0) ? */
217 static int malloc_sysv;
218
219 /* zero fill ? */
220 static int malloc_zero;
221
222 /* junk fill ? */
223 static int malloc_junk;
224
225 #ifdef HAS_UTRACE
226
227 /* utrace ? */
228 static int malloc_utrace;
229
230 struct ut { void *p; size_t s; void *r; };
231
232 void utrace __P((struct ut *, int));
233
234 #define UTRACE(a, b, c) \
235 if (malloc_utrace) \
236 {struct ut u; u.p=a; u.s = b; u.r=c; utrace(&u, sizeof u);}
237 #else /* !HAS_UTRACE */
238 #define UTRACE(a,b,c)
239 #endif /* HAS_UTRACE */
240
241 /* my last break. */
242 static void *malloc_brk;
243
244 /* one location cache for free-list holders */
245 static struct pgfree *px;
246
247 /* compile-time options */
248 char *malloc_options;
249
250 /* Name of the current public function */
251 static char *malloc_func;
252
253 /* Macro for mmap */
254 #define MMAP(size) \
255 mmap(0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
256 MMAP_FD, (off_t)0);
257
258 /*
259 * Necessary function declarations
260 */
261 static int extend_pgdir(u_long idx);
262 static void *imalloc(size_t size);
263 static void ifree(void *ptr);
264 static void *irealloc(void *ptr, size_t size);
265
266 extern char *__progname;
267
268 static void
269 wrterror(char *p)
270 {
271 char *q = " error: ";
272 write(STDERR_FILENO, __progname, strlen(__progname));
273 write(STDERR_FILENO, malloc_func, strlen(malloc_func));
274 write(STDERR_FILENO, q, strlen(q));
275 write(STDERR_FILENO, p, strlen(p));
276 suicide = 1;
277 abort();
278 }
279
280 static void
281 wrtwarning(char *p)
282 {
283 char *q = " warning: ";
284 if (malloc_abort)
285 wrterror(p);
286 write(STDERR_FILENO, __progname, strlen(__progname));
287 write(STDERR_FILENO, malloc_func, strlen(malloc_func));
288 write(STDERR_FILENO, q, strlen(q));
289 write(STDERR_FILENO, p, strlen(p));
290 }
291
292
293 /*
294 * Allocate a number of pages from the OS
295 */
296 static void *
297 map_pages(size_t pages)
298 {
299 caddr_t result, tail;
300
301 result = (caddr_t)pageround((u_long)sbrk(0));
302 tail = result + (pages << malloc_pageshift);
303
304 if (brk(tail)) {
305 #ifdef MALLOC_EXTRA_SANITY
306 wrterror("(ES): map_pages fails\n");
307 #endif /* MALLOC_EXTRA_SANITY */
308 return 0;
309 }
310
311 last_idx = ptr2idx(tail) - 1;
312 malloc_brk = tail;
313
314 if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx))
315 return 0;;
316
317 return result;
318 }
319
320 /*
321 * Extend page directory
322 */
323 static int
324 extend_pgdir(u_long idx)
325 {
326 struct pginfo **new, **old;
327 size_t newlen, oldlen;
328
329 /* Make it this many pages */
330 newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
331
332 /* remember the old mapping size */
333 oldlen = malloc_ninfo * sizeof *page_dir;
334
335 /*
336 * NOTE: we allocate new pages and copy the directory rather than tempt
337 * fate by trying to "grow" the region.. There is nothing to prevent
338 * us from accidently re-mapping space that's been allocated by our caller
339 * via dlopen() or other mmap().
340 *
341 * The copy problem is not too bad, as there is 4K of page index per
342 * 4MB of malloc arena.
343 *
344 * We can totally avoid the copy if we open a file descriptor to associate
345 * the anon mappings with. Then, when we remap the pages at the new
346 * address, the old pages will be "magically" remapped.. But this means
347 * keeping open a "secret" file descriptor.....
348 */
349
350 /* Get new pages */
351 new = (struct pginfo**) MMAP(newlen);
352 if (new == (struct pginfo **)-1)
353 return 0;
354
355 /* Copy the old stuff */
356 memcpy(new, page_dir, oldlen);
357
358 /* register the new size */
359 malloc_ninfo = newlen / sizeof *page_dir;
360
361 /* swap the pointers */
362 old = page_dir;
363 page_dir = new;
364
365 /* Now free the old stuff */
366 munmap(old, oldlen);
367 return 1;
368 }
369
370 /*
371 * Initialize the world
372 */
373 static void
374 malloc_init (void)
375 {
376 char *p, b[64];
377 int i, j;
378 int errnosave;
379
380 /*
381 * Compute page-size related variables.
382 */
383 malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
384 malloc_pagemask = malloc_pagesize - 1;
385 for (malloc_pageshift = 0;
386 (1UL << malloc_pageshift) != malloc_pagesize;
387 malloc_pageshift++)
388 /* nothing */ ;
389
390 INIT_MMAP();
391
392 #ifdef MALLOC_EXTRA_SANITY
393 malloc_junk = 1;
394 #endif /* MALLOC_EXTRA_SANITY */
395
396 for (i = 0; i < 3; i++) {
397 if (i == 0) {
398 errnosave = errno;
399 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
400 errno = errnosave;
401 if (j <= 0)
402 continue;
403 b[j] = '\0';
404 p = b;
405 } else if (i == 1) {
406 p = getenv("MALLOC_OPTIONS");
407 } else {
408 p = malloc_options;
409 }
410 for (; p && *p; p++) {
411 switch (*p) {
412 case '>': malloc_cache <<= 1; break;
413 case '<': malloc_cache >>= 1; break;
414 case 'a': malloc_abort = 0; break;
415 case 'A': malloc_abort = 1; break;
416 case 'h': malloc_hint = 0; break;
417 case 'H': malloc_hint = 1; break;
418 case 'r': malloc_realloc = 0; break;
419 case 'R': malloc_realloc = 1; break;
420 case 'j': malloc_junk = 0; break;
421 case 'J': malloc_junk = 1; break;
422 #ifdef HAS_UTRACE
423 case 'u': malloc_utrace = 0; break;
424 case 'U': malloc_utrace = 1; break;
425 #endif
426 case 'v': malloc_sysv = 0; break;
427 case 'V': malloc_sysv = 1; break;
428 case 'x': malloc_xmalloc = 0; break;
429 case 'X': malloc_xmalloc = 1; break;
430 case 'z': malloc_zero = 0; break;
431 case 'Z': malloc_zero = 1; break;
432 default:
433 j = malloc_abort;
434 malloc_abort = 0;
435 wrtwarning("unknown char in MALLOC_OPTIONS\n");
436 malloc_abort = j;
437 break;
438 }
439 }
440 }
441
442 UTRACE(0, 0, 0);
443
444 /*
445 * We want junk in the entire allocation, and zero only in the part
446 * the user asked for.
447 */
448 if (malloc_zero)
449 malloc_junk=1;
450
451 /*
452 * If we run with junk (or implicitly from above: zero), we want to
453 * force realloc() to get new storage, so we can DTRT with it.
454 */
455 if (malloc_junk)
456 malloc_realloc=1;
457
458 /* Allocate one page for the page directory */
459 page_dir = (struct pginfo **) MMAP(malloc_pagesize);
460
461 if (page_dir == (struct pginfo **) -1)
462 wrterror("mmap(2) failed, check limits.\n");
463
464 /*
465 * We need a maximum of malloc_pageshift buckets, steal these from the
466 * front of the page_directory;
467 */
468 malloc_origo = ((u_long)pageround((u_long)sbrk(0))) >> malloc_pageshift;
469 malloc_origo -= malloc_pageshift;
470
471 malloc_ninfo = malloc_pagesize / sizeof *page_dir;
472
473 /* Recalculate the cache size in bytes, and make sure it's nonzero */
474
475 if (!malloc_cache)
476 malloc_cache++;
477
478 malloc_cache <<= malloc_pageshift;
479
480 /*
481 * This is a nice hack from Kaleb Keithly (kaleb (at) x.org).
482 * We can sbrk(2) further back when we keep this on a low address.
483 */
484 px = (struct pgfree *) imalloc (sizeof *px);
485
486 /* Been here, done that */
487 malloc_started++;
488 }
489
490 /*
491 * Allocate a number of complete pages
492 */
493 static void *
494 malloc_pages(size_t size)
495 {
496 void *p, *delay_free = 0;
497 int i;
498 struct pgfree *pf;
499 u_long idx;
500
501 size = pageround(size);
502
503 p = 0;
504
505 /* Look for free pages before asking for more */
506 for(pf = free_list.next; pf; pf = pf->next) {
507
508 #ifdef MALLOC_EXTRA_SANITY
509 if (pf->size & malloc_pagemask)
510 wrterror("(ES): junk length entry on free_list\n");
511 if (!pf->size)
512 wrterror("(ES): zero length entry on free_list\n");
513 if (pf->page == pf->end)
514 wrterror("(ES): zero entry on free_list\n");
515 if (pf->page > pf->end)
516 wrterror("(ES): sick entry on free_list\n");
517 if ((void*)pf->page >= (void*)sbrk(0))
518 wrterror("(ES): entry on free_list past brk\n");
519 if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
520 wrterror("(ES): non-free first page on free-list\n");
521 if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
522 wrterror("(ES): non-free last page on free-list\n");
523 #endif /* MALLOC_EXTRA_SANITY */
524
525 if (pf->size < size)
526 continue;
527
528 if (pf->size == size) {
529 p = pf->page;
530 if (pf->next)
531 pf->next->prev = pf->prev;
532 pf->prev->next = pf->next;
533 delay_free = pf;
534 break;
535 }
536
537 p = pf->page;
538 pf->page = (char *)pf->page + size;
539 pf->size -= size;
540 break;
541 }
542
543 #ifdef MALLOC_EXTRA_SANITY
544 if (p && page_dir[ptr2idx(p)] != MALLOC_FREE)
545 wrterror("(ES): allocated non-free page on free-list\n");
546 #endif /* MALLOC_EXTRA_SANITY */
547
548 size >>= malloc_pageshift;
549
550 /* Map new pages */
551 if (!p)
552 p = map_pages(size);
553
554 if (p) {
555
556 idx = ptr2idx(p);
557 page_dir[idx] = MALLOC_FIRST;
558 for (i=1;i<size;i++)
559 page_dir[idx+i] = MALLOC_FOLLOW;
560
561 if (malloc_junk)
562 memset(p, SOME_JUNK, size << malloc_pageshift);
563 }
564
565 if (delay_free) {
566 if (!px)
567 px = delay_free;
568 else
569 ifree(delay_free);
570 }
571
572 return p;
573 }
574
575 /*
576 * Allocate a page of fragments
577 */
578
579 static __inline__ int
580 malloc_make_chunks(int bits)
581 {
582 struct pginfo *bp;
583 void *pp;
584 int i, k, l;
585
586 /* Allocate a new bucket */
587 pp = malloc_pages(malloc_pagesize);
588 if (!pp)
589 return 0;
590
591 /* Find length of admin structure */
592 l = offsetof(struct pginfo, bits[0]);
593 l += sizeof bp->bits[0] *
594 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
595
596 /* Don't waste more than two chunks on this */
597 if ((1<<(bits)) <= l+l) {
598 bp = (struct pginfo *)pp;
599 } else {
600 bp = (struct pginfo *)imalloc(l);
601 if (!bp) {
602 ifree(pp);
603 return 0;
604 }
605 }
606
607 bp->size = (1<<bits);
608 bp->shift = bits;
609 bp->total = bp->free = malloc_pagesize >> bits;
610 bp->page = pp;
611
612 /* set all valid bits in the bitmap */
613 k = bp->total;
614 i = 0;
615
616 /* Do a bunch at a time */
617 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
618 bp->bits[i / MALLOC_BITS] = ~0U;
619
620 for(; i < k; i++)
621 bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
622
623 if (bp == bp->page) {
624 /* Mark the ones we stole for ourselves */
625 for(i=0;l > 0;i++) {
626 bp->bits[i/MALLOC_BITS] &= ~(1<<(i%MALLOC_BITS));
627 bp->free--;
628 bp->total--;
629 l -= (1 << bits);
630 }
631 }
632
633 /* MALLOC_LOCK */
634
635 page_dir[ptr2idx(pp)] = bp;
636
637 bp->next = page_dir[bits];
638 page_dir[bits] = bp;
639
640 /* MALLOC_UNLOCK */
641
642 return 1;
643 }
644
645 /*
646 * Allocate a fragment
647 */
648 static void *
649 malloc_bytes(size_t size)
650 {
651 int i,j;
652 u_int u;
653 struct pginfo *bp;
654 int k;
655 u_int *lp;
656
657 /* Don't bother with anything less than this */
658 if (size < malloc_minsize)
659 size = malloc_minsize;
660
661 /* Find the right bucket */
662 j = 1;
663 i = size-1;
664 while (i >>= 1)
665 j++;
666
667 /* If it's empty, make a page more of that size chunks */
668 if (!page_dir[j] && !malloc_make_chunks(j))
669 return 0;
670
671 bp = page_dir[j];
672
673 /* Find first word of bitmap which isn't empty */
674 for (lp = bp->bits; !*lp; lp++)
675 ;
676
677 /* Find that bit, and tweak it */
678 u = 1;
679 k = 0;
680 while (!(*lp & u)) {
681 u += u;
682 k++;
683 }
684 *lp ^= u;
685
686 /* If there are no more free, remove from free-list */
687 if (!--bp->free) {
688 page_dir[j] = bp->next;
689 bp->next = 0;
690 }
691
692 /* Adjust to the real offset of that chunk */
693 k += (lp-bp->bits)*MALLOC_BITS;
694 k <<= bp->shift;
695
696 if (malloc_junk)
697 memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
698
699 return (u_char *)bp->page + k;
700 }
701
702 /*
703 * Allocate a piece of memory
704 */
705 static void *
706 imalloc(size_t size)
707 {
708 void *result;
709
710 if (suicide)
711 abort();
712
713 if ((size + malloc_pagesize) < size) /* Check for overflow */
714 result = 0;
715 else if (size <= malloc_maxsize)
716 result = malloc_bytes(size);
717 else
718 result = malloc_pages(size);
719
720 if (malloc_abort && !result)
721 wrterror("allocation failed.\n");
722
723 if (malloc_zero && result)
724 memset(result, 0, size);
725
726 return result;
727 }
728
729 /*
730 * Change the size of an allocation.
731 */
732 static void *
733 irealloc(void *ptr, size_t size)
734 {
735 void *p;
736 u_long osize, idx;
737 struct pginfo **mp;
738 int i;
739
740 if (suicide)
741 abort();
742
743 idx = ptr2idx(ptr);
744
745 if (idx < malloc_pageshift) {
746 wrtwarning("junk pointer, too low to make sense.\n");
747 return 0;
748 }
749
750 if (idx > last_idx) {
751 wrtwarning("junk pointer, too high to make sense.\n");
752 return 0;
753 }
754
755 mp = &page_dir[idx];
756
757 if (*mp == MALLOC_FIRST) { /* Page allocation */
758
759 /* Check the pointer */
760 if ((u_long)ptr & malloc_pagemask) {
761 wrtwarning("modified (page-) pointer.\n");
762 return 0;
763 }
764
765 /* Find the size in bytes */
766 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
767 osize += malloc_pagesize;
768
769 if (!malloc_realloc && /* unless we have to, */
770 size <= osize && /* .. or are too small, */
771 size > (osize - malloc_pagesize)) { /* .. or can free a page, */
772 return ptr; /* don't do anything. */
773 }
774
775 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
776
777 /* Check the pointer for sane values */
778 if (((u_long)ptr & ((*mp)->size-1))) {
779 wrtwarning("modified (chunk-) pointer.\n");
780 return 0;
781 }
782
783 /* Find the chunk index in the page */
784 i = ((u_long)ptr & malloc_pagemask) >> (*mp)->shift;
785
786 /* Verify that it isn't a free chunk already */
787 if ((*mp)->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
788 wrtwarning("chunk is already free.\n");
789 return 0;
790 }
791
792 osize = (*mp)->size;
793
794 if (!malloc_realloc && /* Unless we have to, */
795 size < osize && /* ..or are too small, */
796 (size > osize/2 || /* ..or could use a smaller size, */
797 osize == malloc_minsize)) { /* ..(if there is one) */
798 return ptr; /* ..Don't do anything */
799 }
800
801 } else {
802 wrtwarning("pointer to wrong page.\n");
803 return 0;
804 }
805
806 p = imalloc(size);
807
808 if (p) {
809 /* copy the lesser of the two sizes, and free the old one */
810 if (!size || !osize)
811 ;
812 else if (osize < size)
813 memcpy(p, ptr, osize);
814 else
815 memcpy(p, ptr, size);
816 ifree(ptr);
817 }
818 return p;
819 }
820
821 /*
822 * Free a sequence of pages
823 */
824
825 static __inline__ void
826 free_pages(void *ptr, int idx, struct pginfo *info)
827 {
828 int i;
829 struct pgfree *pf, *pt=0;
830 u_long l;
831 void *tail;
832
833 if (info == MALLOC_FREE) {
834 wrtwarning("page is already free.\n");
835 return;
836 }
837
838 if (info != MALLOC_FIRST) {
839 wrtwarning("pointer to wrong page.\n");
840 return;
841 }
842
843 if ((u_long)ptr & malloc_pagemask) {
844 wrtwarning("modified (page-) pointer.\n");
845 return;
846 }
847
848 /* Count how many pages and mark them free at the same time */
849 page_dir[idx] = MALLOC_FREE;
850 for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
851 page_dir[idx + i] = MALLOC_FREE;
852
853 l = i << malloc_pageshift;
854
855 if (malloc_junk)
856 memset(ptr, SOME_JUNK, l);
857
858 if (malloc_hint)
859 madvise(ptr, l, MADV_FREE);
860
861 tail = (char *)ptr+l;
862
863 /* add to free-list */
864 if (!px)
865 px = imalloc(sizeof *pt); /* This cannot fail... */
866 px->page = ptr;
867 px->end = tail;
868 px->size = l;
869 if (!free_list.next) {
870
871 /* Nothing on free list, put this at head */
872 px->next = free_list.next;
873 px->prev = &free_list;
874 free_list.next = px;
875 pf = px;
876 px = 0;
877
878 } else {
879
880 /* Find the right spot, leave pf pointing to the modified entry. */
881 tail = (char *)ptr+l;
882
883 for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next)
884 ; /* Race ahead here */
885
886 if (pf->page > tail) {
887 /* Insert before entry */
888 px->next = pf;
889 px->prev = pf->prev;
890 pf->prev = px;
891 px->prev->next = px;
892 pf = px;
893 px = 0;
894 } else if (pf->end == ptr ) {
895 /* Append to the previous entry */
896 pf->end = (char *)pf->end + l;
897 pf->size += l;
898 if (pf->next && pf->end == pf->next->page ) {
899 /* And collapse the next too. */
900 pt = pf->next;
901 pf->end = pt->end;
902 pf->size += pt->size;
903 pf->next = pt->next;
904 if (pf->next)
905 pf->next->prev = pf;
906 }
907 } else if (pf->page == tail) {
908 /* Prepend to entry */
909 pf->size += l;
910 pf->page = ptr;
911 } else if (!pf->next) {
912 /* Append at tail of chain */
913 px->next = 0;
914 px->prev = pf;
915 pf->next = px;
916 pf = px;
917 px = 0;
918 } else {
919 wrterror("freelist is destroyed.\n");
920 }
921 }
922
923 /* Return something to OS ? */
924 if (!pf->next && /* If we're the last one, */
925 pf->size > malloc_cache && /* ..and the cache is full, */
926 pf->end == malloc_brk && /* ..and none behind us, */
927 malloc_brk == sbrk(0)) { /* ..and it's OK to do... */
928
929 /*
930 * Keep the cache intact. Notice that the '>' above guarantees that
931 * the pf will always have at least one page afterwards.
932 */
933 pf->end = (char *)pf->page + malloc_cache;
934 pf->size = malloc_cache;
935
936 brk(pf->end);
937 malloc_brk = pf->end;
938
939 idx = ptr2idx(pf->end);
940 last_idx = idx - 1;
941
942 for(i=idx;i <= last_idx;)
943 page_dir[i++] = MALLOC_NOT_MINE;
944
945 /* XXX: We could realloc/shrink the pagedir here I guess. */
946 }
947 if (pt)
948 ifree(pt);
949 }
950
951 /*
952 * Free a chunk, and possibly the page it's on, if the page becomes empty.
953 */
954
955 static __inline__ void
956 free_bytes(void *ptr, int idx, struct pginfo *info)
957 {
958 int i;
959 struct pginfo **mp;
960 void *vp;
961
962 /* Find the chunk number on the page */
963 i = ((u_long)ptr & malloc_pagemask) >> info->shift;
964
965 if (((u_long)ptr & (info->size-1))) {
966 wrtwarning("modified (chunk-) pointer.\n");
967 return;
968 }
969
970 if (info->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
971 wrtwarning("chunk is already free.\n");
972 return;
973 }
974
975 if (malloc_junk)
976 memset(ptr, SOME_JUNK, (size_t)info->size);
977
978 info->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
979 info->free++;
980
981 mp = page_dir + info->shift;
982
983 if (info->free == 1) {
984
985 /* Page became non-full */
986
987 mp = page_dir + info->shift;
988 /* Insert in address order */
989 while (*mp && (*mp)->next && (*mp)->next->page < info->page)
990 mp = &(*mp)->next;
991 info->next = *mp;
992 *mp = info;
993 return;
994 }
995
996 if (info->free != info->total)
997 return;
998
999 /* Find & remove this page in the queue */
1000 while (*mp != info) {
1001 mp = &((*mp)->next);
1002 #ifdef MALLOC_EXTRA_SANITY
1003 if (!*mp)
1004 wrterror("(ES): Not on queue\n");
1005 #endif /* MALLOC_EXTRA_SANITY */
1006 }
1007 *mp = info->next;
1008
1009 /* Free the page & the info structure if need be */
1010 page_dir[idx] = MALLOC_FIRST;
1011 vp = info->page; /* Order is important ! */
1012 if(vp != (void*)info)
1013 ifree(info);
1014 ifree(vp);
1015 }
1016
1017 static void
1018 ifree(void *ptr)
1019 {
1020 struct pginfo *info;
1021 int idx;
1022
1023 /* This is legal */
1024 if (!ptr)
1025 return;
1026
1027 if (!malloc_started) {
1028 wrtwarning("malloc() has never been called.\n");
1029 return;
1030 }
1031
1032 /* If we're already sinking, don't make matters any worse. */
1033 if (suicide)
1034 return;
1035
1036 idx = ptr2idx(ptr);
1037
1038 if (idx < malloc_pageshift) {
1039 wrtwarning("junk pointer, too low to make sense.\n");
1040 return;
1041 }
1042
1043 if (idx > last_idx) {
1044 wrtwarning("junk pointer, too high to make sense.\n");
1045 return;
1046 }
1047
1048 info = page_dir[idx];
1049
1050 if (info < MALLOC_MAGIC)
1051 free_pages(ptr, idx, info);
1052 else
1053 free_bytes(ptr, idx, info);
1054 return;
1055 }
1056
1057 /*
1058 * These are the public exported interface routines.
1059 */
1060
1061
1062 void *
1063 malloc(size_t size)
1064 {
1065 register void *r;
1066
1067 THREAD_LOCK();
1068 malloc_func = " in malloc():";
1069 if (malloc_active++) {
1070 wrtwarning("recursive call.\n");
1071 malloc_active--;
1072 return (0);
1073 }
1074 if (!malloc_started)
1075 malloc_init();
1076 if (malloc_sysv && !size)
1077 r = 0;
1078 else
1079 r = imalloc(size);
1080 UTRACE(0, size, r);
1081 malloc_active--;
1082 THREAD_UNLOCK();
1083 if (r == NULL && (size != 0 || !malloc_sysv)) {
1084 if (malloc_xmalloc)
1085 wrterror("out of memory.\n");
1086 errno = ENOMEM;
1087 }
1088 return (r);
1089 }
1090
1091 void
1092 free(void *ptr)
1093 {
1094 THREAD_LOCK();
1095 malloc_func = " in free():";
1096 if (malloc_active++) {
1097 wrtwarning("recursive call.\n");
1098 malloc_active--;
1099 return;
1100 } else {
1101 ifree(ptr);
1102 UTRACE(ptr, 0, 0);
1103 }
1104 malloc_active--;
1105 THREAD_UNLOCK();
1106 return;
1107 }
1108
1109 void *
1110 realloc(void *ptr, size_t size)
1111 {
1112 register void *r;
1113
1114 THREAD_LOCK();
1115 malloc_func = " in realloc():";
1116 if (malloc_active++) {
1117 wrtwarning("recursive call.\n");
1118 malloc_active--;
1119 return (0);
1120 }
1121 if (ptr && !malloc_started) {
1122 wrtwarning("malloc() has never been called.\n");
1123 ptr = 0;
1124 }
1125 if (!malloc_started)
1126 malloc_init();
1127 if (malloc_sysv && !size) {
1128 ifree(ptr);
1129 r = 0;
1130 } else if (!ptr) {
1131 r = imalloc(size);
1132 } else {
1133 r = irealloc(ptr, size);
1134 }
1135 UTRACE(ptr, size, r);
1136 malloc_active--;
1137 THREAD_UNLOCK();
1138 if (r == NULL && (size != 0 || !malloc_sysv)) {
1139 if (malloc_xmalloc)
1140 wrterror("out of memory.\n");
1141 errno = ENOMEM;
1142 }
1143 return (r);
1144 }
1145