malloc.c revision 1.21 1 /* $NetBSD: malloc.c,v 1.21 1999/07/05 21:55:46 christos Exp $ */
2
3 /*
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk (at) FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
10 *
11 * From FreeBSD: malloc.c,v 1.43 1998/09/30 06:13:59 jb
12 *
13 */
14
15 /*
16 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
17 * to internal conditions and consistency in malloc.c. This has a
18 * noticeable runtime performance hit, and generally will not do you
19 * any good unless you fiddle with the internals of malloc or want
20 * to catch random pointer corruption as early as possible.
21 */
22 #ifndef MALLOC_EXTRA_SANITY
23 #undef MALLOC_EXTRA_SANITY
24 #endif
25
26 /*
27 * What to use for Junk. This is the byte value we use to fill with
28 * when the 'J' option is enabled.
29 */
30 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
31
32 /*
33 * The basic parameters you can tweak.
34 *
35 * malloc_minsize minimum size of an allocation in bytes.
36 * If this is too small it's too much work
37 * to manage them. This is also the smallest
38 * unit of alignment used for the storage
39 * returned by malloc/realloc.
40 *
41 */
42
43 #if defined(__FreeBSD__)
44 # if defined(__i386__)
45 # define malloc_minsize 16U
46 # endif
47 # if defined(__alpha__)
48 # define malloc_minsize 16U
49 # endif
50 # if !defined(__NETBSD_SYSCALLS)
51 # define HAS_UTRACE
52 # endif
53 /*
54 * Make malloc/free/realloc thread-safe in libc for use with
55 * kernel threads.
56 */
57 # include "libc_private.h"
58 # include "spinlock.h"
59 static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
60 # define THREAD_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
61 # define THREAD_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
62 #endif /* __FreeBSD__ */
63
64 #if defined(__NetBSD__)
65 # include <sys/param.h>
66 size_t pagesize;
67 # define malloc_pageshift pagesize
68 # define malloc_minsize 16U
69 #endif /* __NetBSD__ */
70
71 #if defined(__sparc__) && defined(sun)
72 # define malloc_minsize 16U
73 # define MAP_ANON (0)
74 static int fdzero;
75 # define MMAP_FD fdzero
76 # define INIT_MMAP() \
77 { if ((fdzero=open("/dev/zero", O_RDWR, 0000)) == -1) \
78 wrterror("open of /dev/zero"); }
79 #endif /* __sparc__ */
80
81 /* Insert your combination here... */
82 #if defined(__FOOCPU__) && defined(__BAROS__)
83 # define malloc_minsize 16U
84 #endif /* __FOOCPU__ && __BAROS__ */
85
86
87 /*
88 * No user serviceable parts behind this point.
89 */
90 #include <sys/types.h>
91 #include <sys/mman.h>
92 #include <errno.h>
93 #include <fcntl.h>
94 #include <stddef.h>
95 #include <stdio.h>
96 #include <stdlib.h>
97 #include <string.h>
98 #include <unistd.h>
99
100 /*
101 * This structure describes a page worth of chunks.
102 */
103
104 struct pginfo {
105 struct pginfo *next; /* next on the free list */
106 void *page; /* Pointer to the page */
107 u_short size; /* size of this page's chunks */
108 u_short shift; /* How far to shift for this size chunks */
109 u_short free; /* How many free chunks */
110 u_short total; /* How many chunk */
111 u_int bits[1]; /* Which chunks are free */
112 };
113
114 /*
115 * This structure describes a number of free pages.
116 */
117
118 struct pgfree {
119 struct pgfree *next; /* next run of free pages */
120 struct pgfree *prev; /* prev run of free pages */
121 void *page; /* pointer to free pages */
122 void *end; /* pointer to end of free pages */
123 size_t size; /* number of bytes free */
124 };
125
126 /*
127 * How many bits per u_int in the bitmap.
128 * Change only if not 8 bits/byte
129 */
130 #define MALLOC_BITS (8*sizeof(u_int))
131
132 /*
133 * Magic values to put in the page_directory
134 */
135 #define MALLOC_NOT_MINE ((struct pginfo*) 0)
136 #define MALLOC_FREE ((struct pginfo*) 1)
137 #define MALLOC_FIRST ((struct pginfo*) 2)
138 #define MALLOC_FOLLOW ((struct pginfo*) 3)
139 #define MALLOC_MAGIC ((struct pginfo*) 4)
140
141 /*
142 * Page size related parameters, computed at run-time.
143 */
144 static size_t malloc_pagesize;
145 static size_t malloc_pageshift;
146 static size_t malloc_pagemask;
147
148 #ifndef malloc_minsize
149 #define malloc_minsize 16U
150 #endif
151
152 #ifndef malloc_maxsize
153 #define malloc_maxsize ((malloc_pagesize)>>1)
154 #endif
155
156 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
157 #define ptr2index(foo) (((u_long)(foo) >> malloc_pageshift)-malloc_origo)
158
159 #ifndef THREAD_LOCK
160 #define THREAD_LOCK()
161 #endif
162
163 #ifndef THREAD_UNLOCK
164 #define THREAD_UNLOCK()
165 #endif
166
167 #ifndef MMAP_FD
168 #define MMAP_FD (-1)
169 #endif
170
171 #ifndef INIT_MMAP
172 #define INIT_MMAP()
173 #endif
174
175 #ifndef MADV_FREE
176 #define MADV_FREE MADV_DONTNEED
177 #endif
178
179 /* Set when initialization has been done */
180 static unsigned malloc_started;
181
182 /* Recusion flag for public interface. */
183 static int malloc_active;
184
185 /* Number of free pages we cache */
186 static unsigned malloc_cache = 16;
187
188 /* The offset from pagenumber to index into the page directory */
189 static u_long malloc_origo;
190
191 /* The last index in the page directory we care about */
192 static u_long last_index;
193
194 /* Pointer to page directory. Allocated "as if with" malloc */
195 static struct pginfo **page_dir;
196
197 /* How many slots in the page directory */
198 static unsigned malloc_ninfo;
199
200 /* Free pages line up here */
201 static struct pgfree free_list;
202
203 /* Abort(), user doesn't handle problems. */
204 static int malloc_abort;
205
206 /* Are we trying to die ? */
207 static int suicide;
208
209 /* always realloc ? */
210 static int malloc_realloc;
211
212 /* pass the kernel a hint on free pages ? */
213 static int malloc_hint = 1;
214
215 /* xmalloc behaviour ? */
216 static int malloc_xmalloc;
217
218 /* sysv behaviour for malloc(0) ? */
219 static int malloc_sysv;
220
221 /* zero fill ? */
222 static int malloc_zero;
223
224 /* junk fill ? */
225 static int malloc_junk;
226
227 #ifdef HAS_UTRACE
228
229 /* utrace ? */
230 static int malloc_utrace;
231
232 struct ut { void *p; size_t s; void *r; };
233
234 void utrace __P((struct ut *, int));
235
236 #define UTRACE(a, b, c) \
237 if (malloc_utrace) \
238 {struct ut u; u.p=a; u.s = b; u.r=c; utrace(&u, sizeof u);}
239 #else /* !HAS_UTRACE */
240 #define UTRACE(a,b,c)
241 #endif /* HAS_UTRACE */
242
243 /* my last break. */
244 static void *malloc_brk;
245
246 /* one location cache for free-list holders */
247 static struct pgfree *px;
248
249 /* compile-time options */
250 char *malloc_options;
251
252 /* Name of the current public function */
253 static const char *malloc_func;
254
255 /* Macro for mmap */
256 #define MMAP(size) \
257 mmap(0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
258 MMAP_FD, 0);
259
260 /*
261 * Necessary function declarations
262 */
263 static int extend_pgdir(u_long idx);
264 static void *imalloc(size_t size);
265 static void ifree(void *ptr);
266 static void *irealloc(void *ptr, size_t size);
267
268 extern char *__progname;
269
270 static void
271 wrterror(const char *p)
272 {
273 const char *q = " error: ";
274 write(STDERR_FILENO, __progname, strlen(__progname));
275 write(STDERR_FILENO, malloc_func, strlen(malloc_func));
276 write(STDERR_FILENO, q, strlen(q));
277 write(STDERR_FILENO, p, strlen(p));
278 suicide = 1;
279 abort();
280 }
281
282 static void
283 wrtwarning(const char *p)
284 {
285 const char *q = " warning: ";
286 if (malloc_abort)
287 wrterror(p);
288 write(STDERR_FILENO, __progname, strlen(__progname));
289 write(STDERR_FILENO, malloc_func, strlen(malloc_func));
290 write(STDERR_FILENO, q, strlen(q));
291 write(STDERR_FILENO, p, strlen(p));
292 }
293
294
295 /*
296 * Allocate a number of pages from the OS
297 */
298 static void *
299 map_pages(int pages)
300 {
301 caddr_t result, tail;
302
303 result = (caddr_t)pageround((u_long)sbrk(0));
304 tail = result + (pages << malloc_pageshift);
305
306 if (brk(tail)) {
307 #ifdef MALLOC_EXTRA_SANITY
308 wrterror("(ES): map_pages fails\n");
309 #endif /* MALLOC_EXTRA_SANITY */
310 return 0;
311 }
312
313 last_index = ptr2index(tail) - 1;
314 malloc_brk = tail;
315
316 if ((last_index+1) >= malloc_ninfo && !extend_pgdir(last_index))
317 return 0;;
318
319 return result;
320 }
321
322 /*
323 * Extend page directory
324 */
325 static int
326 extend_pgdir(u_long idx)
327 {
328 struct pginfo **new, **old;
329 int i, oldlen;
330
331 /* Make it this many pages */
332 i = idx * sizeof *page_dir;
333 i /= malloc_pagesize;
334 i += 2;
335
336 /* remember the old mapping size */
337 oldlen = malloc_ninfo * sizeof *page_dir;
338
339 /*
340 * NOTE: we allocate new pages and copy the directory rather than tempt
341 * fate by trying to "grow" the region.. There is nothing to prevent
342 * us from accidently re-mapping space that's been allocated by our caller
343 * via dlopen() or other mmap().
344 *
345 * The copy problem is not too bad, as there is 4K of page index per
346 * 4MB of malloc arena.
347 *
348 * We can totally avoid the copy if we open a file descriptor to associate
349 * the anon mappings with. Then, when we remap the pages at the new
350 * address, the old pages will be "magically" remapped.. But this means
351 * keeping open a "secret" file descriptor.....
352 */
353
354 /* Get new pages */
355 new = (struct pginfo**) MMAP(i * malloc_pagesize);
356 if (new == (struct pginfo **)-1)
357 return 0;
358
359 /* Copy the old stuff */
360 memcpy(new, page_dir,
361 malloc_ninfo * sizeof *page_dir);
362
363 /* register the new size */
364 malloc_ninfo = i * malloc_pagesize / sizeof *page_dir;
365
366 /* swap the pointers */
367 old = page_dir;
368 page_dir = new;
369
370 /* Now free the old stuff */
371 munmap(old, oldlen);
372 return 1;
373 }
374
375 /*
376 * Initialize the world
377 */
378 static void
379 malloc_init (void)
380 {
381 char *p, b[64];
382 int i, j;
383 int errnosave;
384
385 /*
386 * Compute page-size related variables.
387 */
388 malloc_pagesize = sysconf(_SC_PAGESIZE);
389 malloc_pagemask = malloc_pagesize - 1;
390 for (malloc_pageshift = 0;
391 (1UL << malloc_pageshift) != malloc_pagesize;
392 malloc_pageshift++)
393 /* nothing */ ;
394
395 INIT_MMAP();
396
397 #ifdef MALLOC_EXTRA_SANITY
398 malloc_junk = 1;
399 #endif /* MALLOC_EXTRA_SANITY */
400 #ifdef __NetBSD__
401 pagesize = sysconf(_SC_PAGESIZE);
402 #endif
403
404 for (i = 0; i < 3; i++) {
405 if (i == 0) {
406 errnosave = errno;
407 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
408 errno = errnosave;
409 if (j <= 0)
410 continue;
411 b[j] = '\0';
412 p = b;
413 } else if (i == 1) {
414 p = getenv("MALLOC_OPTIONS");
415 } else {
416 p = malloc_options;
417 }
418 for (; p && *p; p++) {
419 switch (*p) {
420 case '>': malloc_cache <<= 1; break;
421 case '<': malloc_cache >>= 1; break;
422 case 'a': malloc_abort = 0; break;
423 case 'A': malloc_abort = 1; break;
424 case 'h': malloc_hint = 0; break;
425 case 'H': malloc_hint = 1; break;
426 case 'r': malloc_realloc = 0; break;
427 case 'R': malloc_realloc = 1; break;
428 case 'j': malloc_junk = 0; break;
429 case 'J': malloc_junk = 1; break;
430 #ifdef HAS_UTRACE
431 case 'u': malloc_utrace = 0; break;
432 case 'U': malloc_utrace = 1; break;
433 #endif
434 case 'v': malloc_sysv = 0; break;
435 case 'V': malloc_sysv = 1; break;
436 case 'x': malloc_xmalloc = 0; break;
437 case 'X': malloc_xmalloc = 1; break;
438 case 'z': malloc_zero = 0; break;
439 case 'Z': malloc_zero = 1; break;
440 default:
441 j = malloc_abort;
442 malloc_abort = 0;
443 wrtwarning("unknown char in MALLOC_OPTIONS\n");
444 malloc_abort = j;
445 break;
446 }
447 }
448 }
449
450 UTRACE(0, 0, 0);
451
452 /*
453 * We want junk in the entire allocation, and zero only in the part
454 * the user asked for.
455 */
456 if (malloc_zero)
457 malloc_junk=1;
458
459 /*
460 * If we run with junk (or implicitly from above: zero), we want to
461 * force realloc() to get new storage, so we can DTRT with it.
462 */
463 if (malloc_junk)
464 malloc_realloc=1;
465
466 /* Allocate one page for the page directory */
467 page_dir = (struct pginfo **) MMAP(malloc_pagesize);
468
469 if (page_dir == (struct pginfo **) -1)
470 wrterror("mmap(2) failed, check limits.\n");
471
472 /*
473 * We need a maximum of malloc_pageshift buckets, steal these from the
474 * front of the page_directory;
475 */
476 malloc_origo = ((u_long)pageround((u_long)sbrk(0))) >> malloc_pageshift;
477 malloc_origo -= malloc_pageshift;
478
479 malloc_ninfo = malloc_pagesize / sizeof *page_dir;
480
481 /* Recalculate the cache size in bytes, and make sure it's nonzero */
482
483 if (!malloc_cache)
484 malloc_cache++;
485
486 malloc_cache <<= malloc_pageshift;
487
488 /*
489 * This is a nice hack from Kaleb Keithly (kaleb (at) x.org).
490 * We can sbrk(2) further back when we keep this on a low address.
491 */
492 px = (struct pgfree *) imalloc (sizeof *px);
493
494 /* Been here, done that */
495 malloc_started++;
496 }
497
498 /*
499 * Allocate a number of complete pages
500 */
501 static void *
502 malloc_pages(size_t size)
503 {
504 void *p, *delay_free = 0;
505 int i;
506 struct pgfree *pf;
507 u_long idx;
508
509 size = pageround(size);
510
511 p = 0;
512
513 /* Look for free pages before asking for more */
514 for(pf = free_list.next; pf; pf = pf->next) {
515
516 #ifdef MALLOC_EXTRA_SANITY
517 if (pf->size & malloc_pagemask)
518 wrterror("(ES): junk length entry on free_list\n");
519 if (!pf->size)
520 wrterror("(ES): zero length entry on free_list\n");
521 if (pf->page == pf->end)
522 wrterror("(ES): zero entry on free_list\n");
523 if (pf->page > pf->end)
524 wrterror("(ES): sick entry on free_list\n");
525 if ((void*)pf->page >= (void*)sbrk(0))
526 wrterror("(ES): entry on free_list past brk\n");
527 if (page_dir[ptr2index(pf->page)] != MALLOC_FREE)
528 wrterror("(ES): non-free first page on free-list\n");
529 if (page_dir[ptr2index(pf->end)-1] != MALLOC_FREE)
530 wrterror("(ES): non-free last page on free-list\n");
531 #endif /* MALLOC_EXTRA_SANITY */
532
533 if (pf->size < size)
534 continue;
535
536 if (pf->size == size) {
537 p = pf->page;
538 if (pf->next)
539 pf->next->prev = pf->prev;
540 pf->prev->next = pf->next;
541 delay_free = pf;
542 break;
543 }
544
545 p = pf->page;
546 pf->page = (char *)pf->page + size;
547 pf->size -= size;
548 break;
549 }
550
551 #ifdef MALLOC_EXTRA_SANITY
552 if (p && page_dir[ptr2index(p)] != MALLOC_FREE)
553 wrterror("(ES): allocated non-free page on free-list\n");
554 #endif /* MALLOC_EXTRA_SANITY */
555
556 size >>= malloc_pageshift;
557
558 /* Map new pages */
559 if (!p)
560 p = map_pages(size);
561
562 if (p) {
563
564 idx = ptr2index(p);
565 page_dir[idx] = MALLOC_FIRST;
566 for (i=1;i<size;i++)
567 page_dir[idx+i] = MALLOC_FOLLOW;
568
569 if (malloc_junk)
570 memset(p, SOME_JUNK, size << malloc_pageshift);
571 }
572
573 if (delay_free) {
574 if (!px)
575 px = delay_free;
576 else
577 ifree(delay_free);
578 }
579
580 return p;
581 }
582
583 /*
584 * Allocate a page of fragments
585 */
586
587 static __inline__ int
588 malloc_make_chunks(int bits)
589 {
590 struct pginfo *bp;
591 void *pp;
592 int i, k, l;
593
594 /* Allocate a new bucket */
595 pp = malloc_pages(malloc_pagesize);
596 if (!pp)
597 return 0;
598
599 /* Find length of admin structure */
600 l = offsetof(struct pginfo, bits[0]);
601 l += sizeof bp->bits[0] *
602 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
603
604 /* Don't waste more than two chunks on this */
605 if ((1<<(bits)) <= l+l) {
606 bp = (struct pginfo *)pp;
607 } else {
608 bp = (struct pginfo *)imalloc(l);
609 if (!bp) {
610 ifree(pp);
611 return 0;
612 }
613 }
614
615 bp->size = (1<<bits);
616 bp->shift = bits;
617 bp->total = bp->free = malloc_pagesize >> bits;
618 bp->page = pp;
619
620 /* set all valid bits in the bitmap */
621 k = bp->total;
622 i = 0;
623
624 /* Do a bunch at a time */
625 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
626 bp->bits[i / MALLOC_BITS] = ~0;
627
628 for(; i < k; i++)
629 bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
630
631 if (bp == bp->page) {
632 /* Mark the ones we stole for ourselves */
633 for(i=0;l > 0;i++) {
634 bp->bits[i/MALLOC_BITS] &= ~(1<<(i%MALLOC_BITS));
635 bp->free--;
636 bp->total--;
637 l -= (1 << bits);
638 }
639 }
640
641 /* MALLOC_LOCK */
642
643 page_dir[ptr2index(pp)] = bp;
644
645 bp->next = page_dir[bits];
646 page_dir[bits] = bp;
647
648 /* MALLOC_UNLOCK */
649
650 return 1;
651 }
652
653 /*
654 * Allocate a fragment
655 */
656 static void *
657 malloc_bytes(size_t size)
658 {
659 int i,j;
660 u_int u;
661 struct pginfo *bp;
662 int k;
663 u_int *lp;
664
665 /* Don't bother with anything less than this */
666 if (size < malloc_minsize)
667 size = malloc_minsize;
668
669 /* Find the right bucket */
670 j = 1;
671 i = size-1;
672 while (i >>= 1)
673 j++;
674
675 /* If it's empty, make a page more of that size chunks */
676 if (!page_dir[j] && !malloc_make_chunks(j))
677 return 0;
678
679 bp = page_dir[j];
680
681 /* Find first word of bitmap which isn't empty */
682 for (lp = bp->bits; !*lp; lp++)
683 ;
684
685 /* Find that bit, and tweak it */
686 u = 1;
687 k = 0;
688 while (!(*lp & u)) {
689 u += u;
690 k++;
691 }
692 *lp ^= u;
693
694 /* If there are no more free, remove from free-list */
695 if (!--bp->free) {
696 page_dir[j] = bp->next;
697 bp->next = 0;
698 }
699
700 /* Adjust to the real offset of that chunk */
701 k += (lp-bp->bits)*MALLOC_BITS;
702 k <<= bp->shift;
703
704 if (malloc_junk)
705 memset((u_char*)bp->page + k, SOME_JUNK, bp->size);
706
707 return (u_char *)bp->page + k;
708 }
709
710 /*
711 * Allocate a piece of memory
712 */
713 static void *
714 imalloc(size_t size)
715 {
716 void *result;
717
718 if (suicide)
719 abort();
720
721 if ((size + malloc_pagesize) < size) /* Check for overflow */
722 result = 0;
723 else if (size <= malloc_maxsize)
724 result = malloc_bytes(size);
725 else
726 result = malloc_pages(size);
727
728 if (malloc_abort && !result)
729 wrterror("allocation failed.\n");
730
731 if (malloc_zero && result)
732 memset(result, 0, size);
733
734 return result;
735 }
736
737 /*
738 * Change the size of an allocation.
739 */
740 static void *
741 irealloc(void *ptr, size_t size)
742 {
743 void *p;
744 u_long osize, idx;
745 struct pginfo **mp;
746 int i;
747
748 if (suicide)
749 abort();
750
751 idx = ptr2index(ptr);
752
753 if (idx < malloc_pageshift) {
754 wrtwarning("junk pointer, too low to make sense.\n");
755 return 0;
756 }
757
758 if (idx > last_index) {
759 wrtwarning("junk pointer, too high to make sense.\n");
760 return 0;
761 }
762
763 mp = &page_dir[idx];
764
765 if (*mp == MALLOC_FIRST) { /* Page allocation */
766
767 /* Check the pointer */
768 if ((u_long)ptr & malloc_pagemask) {
769 wrtwarning("modified (page-) pointer.\n");
770 return 0;
771 }
772
773 /* Find the size in bytes */
774 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
775 osize += malloc_pagesize;
776
777 if (!malloc_realloc && /* unless we have to, */
778 size <= osize && /* .. or are too small, */
779 size > (osize - malloc_pagesize)) { /* .. or can free a page, */
780 return ptr; /* don't do anything. */
781 }
782
783 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
784
785 /* Check the pointer for sane values */
786 if (((u_long)ptr & ((*mp)->size-1))) {
787 wrtwarning("modified (chunk-) pointer.\n");
788 return 0;
789 }
790
791 /* Find the chunk index in the page */
792 i = ((u_long)ptr & malloc_pagemask) >> (*mp)->shift;
793
794 /* Verify that it isn't a free chunk already */
795 if ((*mp)->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
796 wrtwarning("chunk is already free.\n");
797 return 0;
798 }
799
800 osize = (*mp)->size;
801
802 if (!malloc_realloc && /* Unless we have to, */
803 size < osize && /* ..or are too small, */
804 (size > osize/2 || /* ..or could use a smaller size, */
805 osize == malloc_minsize)) { /* ..(if there is one) */
806 return ptr; /* ..Don't do anything */
807 }
808
809 } else {
810 wrtwarning("pointer to wrong page.\n");
811 return 0;
812 }
813
814 p = imalloc(size);
815
816 if (p) {
817 /* copy the lesser of the two sizes, and free the old one */
818 if (!size || !osize)
819 ;
820 else if (osize < size)
821 memcpy(p, ptr, osize);
822 else
823 memcpy(p, ptr, size);
824 ifree(ptr);
825 }
826 return p;
827 }
828
829 /*
830 * Free a sequence of pages
831 */
832
833 static __inline__ void
834 free_pages(void *ptr, int idx, struct pginfo *info)
835 {
836 int i;
837 struct pgfree *pf, *pt=0;
838 u_long l;
839 void *tail;
840
841 if (info == MALLOC_FREE) {
842 wrtwarning("page is already free.\n");
843 return;
844 }
845
846 if (info != MALLOC_FIRST) {
847 wrtwarning("pointer to wrong page.\n");
848 return;
849 }
850
851 if ((u_long)ptr & malloc_pagemask) {
852 wrtwarning("modified (page-) pointer.\n");
853 return;
854 }
855
856 /* Count how many pages and mark them free at the same time */
857 page_dir[idx] = MALLOC_FREE;
858 for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
859 page_dir[idx + i] = MALLOC_FREE;
860
861 l = i << malloc_pageshift;
862
863 if (malloc_junk)
864 memset(ptr, SOME_JUNK, l);
865
866 if (malloc_hint)
867 madvise(ptr, l, MADV_FREE);
868
869 tail = (char *)ptr+l;
870
871 /* add to free-list */
872 if (!px)
873 px = imalloc(sizeof *pt); /* This cannot fail... */
874 px->page = ptr;
875 px->end = tail;
876 px->size = l;
877 if (!free_list.next) {
878
879 /* Nothing on free list, put this at head */
880 px->next = free_list.next;
881 px->prev = &free_list;
882 free_list.next = px;
883 pf = px;
884 px = 0;
885
886 } else {
887
888 /* Find the right spot, leave pf pointing to the modified entry. */
889 tail = (char *)ptr+l;
890
891 for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next)
892 ; /* Race ahead here */
893
894 if (pf->page > tail) {
895 /* Insert before entry */
896 px->next = pf;
897 px->prev = pf->prev;
898 pf->prev = px;
899 px->prev->next = px;
900 pf = px;
901 px = 0;
902 } else if (pf->end == ptr ) {
903 /* Append to the previous entry */
904 pf->end = (char *)pf->end + l;
905 pf->size += l;
906 if (pf->next && pf->end == pf->next->page ) {
907 /* And collapse the next too. */
908 pt = pf->next;
909 pf->end = pt->end;
910 pf->size += pt->size;
911 pf->next = pt->next;
912 if (pf->next)
913 pf->next->prev = pf;
914 }
915 } else if (pf->page == tail) {
916 /* Prepend to entry */
917 pf->size += l;
918 pf->page = ptr;
919 } else if (!pf->next) {
920 /* Append at tail of chain */
921 px->next = 0;
922 px->prev = pf;
923 pf->next = px;
924 pf = px;
925 px = 0;
926 } else {
927 wrterror("freelist is destroyed.\n");
928 }
929 }
930
931 /* Return something to OS ? */
932 if (!pf->next && /* If we're the last one, */
933 pf->size > malloc_cache && /* ..and the cache is full, */
934 pf->end == malloc_brk && /* ..and none behind us, */
935 malloc_brk == sbrk(0)) { /* ..and it's OK to do... */
936
937 /*
938 * Keep the cache intact. Notice that the '>' above guarantees that
939 * the pf will always have at least one page afterwards.
940 */
941 pf->end = (char *)pf->page + malloc_cache;
942 pf->size = malloc_cache;
943
944 brk(pf->end);
945 malloc_brk = pf->end;
946
947 idx = ptr2index(pf->end);
948 last_index = idx - 1;
949
950 for(i=idx;i <= last_index;)
951 page_dir[i++] = MALLOC_NOT_MINE;
952
953 /* XXX: We could realloc/shrink the pagedir here I guess. */
954 }
955 if (pt)
956 ifree(pt);
957 }
958
959 /*
960 * Free a chunk, and possibly the page it's on, if the page becomes empty.
961 */
962
963 static __inline__ void
964 free_bytes(void *ptr, int idx, struct pginfo *info)
965 {
966 int i;
967 struct pginfo **mp;
968 void *vp;
969
970 /* Find the chunk number on the page */
971 i = ((u_long)ptr & malloc_pagemask) >> info->shift;
972
973 if (((u_long)ptr & (info->size-1))) {
974 wrtwarning("modified (chunk-) pointer.\n");
975 return;
976 }
977
978 if (info->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
979 wrtwarning("chunk is already free.\n");
980 return;
981 }
982
983 if (malloc_junk)
984 memset(ptr, SOME_JUNK, info->size);
985
986 info->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
987 info->free++;
988
989 mp = page_dir + info->shift;
990
991 if (info->free == 1) {
992
993 /* Page became non-full */
994
995 mp = page_dir + info->shift;
996 /* Insert in address order */
997 while (*mp && (*mp)->next && (*mp)->next->page < info->page)
998 mp = &(*mp)->next;
999 info->next = *mp;
1000 *mp = info;
1001 return;
1002 }
1003
1004 if (info->free != info->total)
1005 return;
1006
1007 /* Find & remove this page in the queue */
1008 while (*mp != info) {
1009 mp = &((*mp)->next);
1010 #ifdef MALLOC_EXTRA_SANITY
1011 if (!*mp)
1012 wrterror("(ES): Not on queue\n");
1013 #endif /* MALLOC_EXTRA_SANITY */
1014 }
1015 *mp = info->next;
1016
1017 /* Free the page & the info structure if need be */
1018 page_dir[ptr2index(info->page)] = MALLOC_FIRST;
1019 vp = info->page; /* Order is important ! */
1020 if(vp != (void*)info)
1021 ifree(info);
1022 ifree(vp);
1023 }
1024
1025 static void
1026 ifree(void *ptr)
1027 {
1028 struct pginfo *info;
1029 int idx;
1030
1031 /* This is legal */
1032 if (!ptr)
1033 return;
1034
1035 if (!malloc_started) {
1036 wrtwarning("malloc() has never been called.\n");
1037 return;
1038 }
1039
1040 /* If we're already sinking, don't make matters any worse. */
1041 if (suicide)
1042 return;
1043
1044 idx = ptr2index(ptr);
1045
1046 if (idx < malloc_pageshift) {
1047 wrtwarning("junk pointer, too low to make sense.\n");
1048 return;
1049 }
1050
1051 if (idx > last_index) {
1052 wrtwarning("junk pointer, too high to make sense.\n");
1053 return;
1054 }
1055
1056 info = page_dir[idx];
1057
1058 if (info < MALLOC_MAGIC)
1059 free_pages(ptr, idx, info);
1060 else
1061 free_bytes(ptr, idx, info);
1062 return;
1063 }
1064
1065 /*
1066 * These are the public exported interface routines.
1067 */
1068
1069
1070 void *
1071 malloc(size_t size)
1072 {
1073 register void *r;
1074
1075 THREAD_LOCK();
1076 malloc_func = " in malloc():";
1077 if (malloc_active++) {
1078 wrtwarning("recursive call.\n");
1079 malloc_active--;
1080 return (0);
1081 }
1082 if (!malloc_started)
1083 malloc_init();
1084 if (malloc_sysv && !size)
1085 r = 0;
1086 else
1087 r = imalloc(size);
1088 UTRACE(0, size, r);
1089 malloc_active--;
1090 THREAD_UNLOCK();
1091 if (malloc_xmalloc && !r)
1092 wrterror("out of memory.\n");
1093 return (r);
1094 }
1095
1096 void
1097 free(void *ptr)
1098 {
1099 THREAD_LOCK();
1100 malloc_func = " in free():";
1101 if (malloc_active++) {
1102 wrtwarning("recursive call.\n");
1103 malloc_active--;
1104 return;
1105 } else {
1106 ifree(ptr);
1107 UTRACE(ptr, 0, 0);
1108 }
1109 malloc_active--;
1110 THREAD_UNLOCK();
1111 return;
1112 }
1113
1114 void *
1115 realloc(void *ptr, size_t size)
1116 {
1117 register void *r;
1118
1119 THREAD_LOCK();
1120 malloc_func = " in realloc():";
1121 if (malloc_active++) {
1122 wrtwarning("recursive call.\n");
1123 malloc_active--;
1124 return (0);
1125 }
1126 if (ptr && !malloc_started) {
1127 wrtwarning("malloc() has never been called.\n");
1128 ptr = 0;
1129 }
1130 if (!malloc_started)
1131 malloc_init();
1132 if (malloc_sysv && !size) {
1133 ifree(ptr);
1134 r = 0;
1135 } else if (!ptr) {
1136 r = imalloc(size);
1137 } else {
1138 r = irealloc(ptr, size);
1139 }
1140 UTRACE(ptr, size, r);
1141 malloc_active--;
1142 THREAD_UNLOCK();
1143 if (malloc_xmalloc && !r)
1144 wrterror("out of memory.\n");
1145 return (r);
1146 }
1147