malloc.c revision 1.56 1 1.56 christos /* $NetBSD: malloc.c,v 1.56 2014/09/18 13:58:20 christos Exp $ */
2 1.18 thorpej
3 1.1 cgd /*
4 1.17 tls * ----------------------------------------------------------------------------
5 1.17 tls * "THE BEER-WARE LICENSE" (Revision 42):
6 1.17 tls * <phk (at) FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 1.17 tls * can do whatever you want with this stuff. If we meet some day, and you think
8 1.17 tls * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 1.17 tls * ----------------------------------------------------------------------------
10 1.1 cgd *
11 1.49 christos * From FreeBSD: malloc.c,v 1.91 2006/01/12 07:28:20 jasone
12 1.1 cgd *
13 1.17 tls */
14 1.17 tls
15 1.17 tls /*
16 1.19 thorpej * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
17 1.17 tls * to internal conditions and consistency in malloc.c. This has a
18 1.17 tls * noticeable runtime performance hit, and generally will not do you
19 1.17 tls * any good unless you fiddle with the internals of malloc or want
20 1.17 tls * to catch random pointer corruption as early as possible.
21 1.17 tls */
22 1.17 tls #ifndef MALLOC_EXTRA_SANITY
23 1.17 tls #undef MALLOC_EXTRA_SANITY
24 1.5 thorpej #endif
25 1.1 cgd
26 1.1 cgd /*
27 1.17 tls * What to use for Junk. This is the byte value we use to fill with
28 1.17 tls * when the 'J' option is enabled.
29 1.17 tls */
30 1.17 tls #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
31 1.17 tls
32 1.17 tls /*
33 1.17 tls * The basic parameters you can tweak.
34 1.17 tls *
35 1.17 tls * malloc_minsize minimum size of an allocation in bytes.
36 1.17 tls * If this is too small it's too much work
37 1.17 tls * to manage them. This is also the smallest
38 1.17 tls * unit of alignment used for the storage
39 1.17 tls * returned by malloc/realloc.
40 1.1 cgd *
41 1.1 cgd */
42 1.1 cgd
43 1.49 christos #include "namespace.h"
44 1.17 tls #if defined(__FreeBSD__)
45 1.17 tls # if defined(__i386__)
46 1.17 tls # define malloc_minsize 16U
47 1.17 tls # endif
48 1.49 christos # if defined(__ia64__)
49 1.49 christos # define malloc_pageshift 13U
50 1.49 christos # define malloc_minsize 16U
51 1.49 christos # endif
52 1.17 tls # if defined(__alpha__)
53 1.49 christos # define malloc_pageshift 13U
54 1.49 christos # define malloc_minsize 16U
55 1.49 christos # endif
56 1.49 christos # if defined(__sparc64__)
57 1.49 christos # define malloc_pageshift 13U
58 1.17 tls # define malloc_minsize 16U
59 1.17 tls # endif
60 1.49 christos # if defined(__amd64__)
61 1.49 christos # define malloc_pageshift 12U
62 1.49 christos # define malloc_minsize 16U
63 1.49 christos # endif
64 1.49 christos # if defined(__arm__)
65 1.49 christos # define malloc_pageshift 12U
66 1.49 christos # define malloc_minsize 16U
67 1.49 christos # endif
68 1.35 jdolecek # define HAS_UTRACE
69 1.35 jdolecek # define UTRACE_LABEL
70 1.35 jdolecek
71 1.35 jdolecek #include <sys/cdefs.h>
72 1.43 junyoung void utrace(struct ut *, int);
73 1.35 jdolecek
74 1.17 tls /*
75 1.17 tls * Make malloc/free/realloc thread-safe in libc for use with
76 1.17 tls * kernel threads.
77 1.17 tls */
78 1.17 tls # include "libc_private.h"
79 1.17 tls # include "spinlock.h"
80 1.17 tls static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
81 1.49 christos # define _MALLOC_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
82 1.49 christos # define _MALLOC_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
83 1.17 tls #endif /* __FreeBSD__ */
84 1.17 tls
85 1.51 macallan #include <sys/types.h>
86 1.17 tls #if defined(__NetBSD__)
87 1.17 tls # define malloc_minsize 16U
88 1.35 jdolecek # define HAS_UTRACE
89 1.35 jdolecek # define UTRACE_LABEL "malloc",
90 1.35 jdolecek #include <sys/cdefs.h>
91 1.50 christos #include "extern.h"
92 1.44 lukem #if defined(LIBC_SCCS) && !defined(lint)
93 1.56 christos __RCSID("$NetBSD: malloc.c,v 1.56 2014/09/18 13:58:20 christos Exp $");
94 1.44 lukem #endif /* LIBC_SCCS and not lint */
95 1.43 junyoung int utrace(const char *, void *, size_t);
96 1.41 thorpej
97 1.41 thorpej #include <reentrant.h>
98 1.41 thorpej extern int __isthreaded;
99 1.41 thorpej static mutex_t thread_lock = MUTEX_INITIALIZER;
100 1.49 christos #define _MALLOC_LOCK() if (__isthreaded) mutex_lock(&thread_lock);
101 1.49 christos #define _MALLOC_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock);
102 1.17 tls #endif /* __NetBSD__ */
103 1.17 tls
104 1.17 tls #if defined(__sparc__) && defined(sun)
105 1.17 tls # define malloc_minsize 16U
106 1.17 tls # define MAP_ANON (0)
107 1.17 tls static int fdzero;
108 1.17 tls # define MMAP_FD fdzero
109 1.17 tls # define INIT_MMAP() \
110 1.56 christos { if ((fdzero = open(_PATH_DEVZERO, O_RDWR | O_CLOEXEC, 0000)) == -1) \
111 1.17 tls wrterror("open of /dev/zero"); }
112 1.17 tls #endif /* __sparc__ */
113 1.17 tls
114 1.17 tls /* Insert your combination here... */
115 1.17 tls #if defined(__FOOCPU__) && defined(__BAROS__)
116 1.17 tls # define malloc_minsize 16U
117 1.17 tls #endif /* __FOOCPU__ && __BAROS__ */
118 1.17 tls
119 1.49 christos #ifndef ZEROSIZEPTR
120 1.49 christos #define ZEROSIZEPTR ((void *)(uintptr_t)(1UL << (malloc_pageshift - 1)))
121 1.49 christos #endif
122 1.17 tls
123 1.17 tls /*
124 1.17 tls * No user serviceable parts behind this point.
125 1.17 tls */
126 1.16 kleink #include <sys/types.h>
127 1.17 tls #include <sys/mman.h>
128 1.17 tls #include <errno.h>
129 1.17 tls #include <fcntl.h>
130 1.49 christos #include <paths.h>
131 1.17 tls #include <stddef.h>
132 1.9 christos #include <stdio.h>
133 1.1 cgd #include <stdlib.h>
134 1.1 cgd #include <string.h>
135 1.1 cgd #include <unistd.h>
136 1.1 cgd
137 1.17 tls /*
138 1.17 tls * This structure describes a page worth of chunks.
139 1.17 tls */
140 1.17 tls
141 1.17 tls struct pginfo {
142 1.17 tls struct pginfo *next; /* next on the free list */
143 1.17 tls void *page; /* Pointer to the page */
144 1.17 tls u_short size; /* size of this page's chunks */
145 1.17 tls u_short shift; /* How far to shift for this size chunks */
146 1.17 tls u_short free; /* How many free chunks */
147 1.17 tls u_short total; /* How many chunk */
148 1.17 tls u_int bits[1]; /* Which chunks are free */
149 1.17 tls };
150 1.1 cgd
151 1.1 cgd /*
152 1.17 tls * This structure describes a number of free pages.
153 1.17 tls */
154 1.17 tls
155 1.17 tls struct pgfree {
156 1.17 tls struct pgfree *next; /* next run of free pages */
157 1.17 tls struct pgfree *prev; /* prev run of free pages */
158 1.17 tls void *page; /* pointer to free pages */
159 1.17 tls void *end; /* pointer to end of free pages */
160 1.17 tls size_t size; /* number of bytes free */
161 1.1 cgd };
162 1.1 cgd
163 1.17 tls /*
164 1.17 tls * How many bits per u_int in the bitmap.
165 1.17 tls * Change only if not 8 bits/byte
166 1.17 tls */
167 1.39 thorpej #define MALLOC_BITS ((int)(8*sizeof(u_int)))
168 1.17 tls
169 1.17 tls /*
170 1.17 tls * Magic values to put in the page_directory
171 1.17 tls */
172 1.17 tls #define MALLOC_NOT_MINE ((struct pginfo*) 0)
173 1.17 tls #define MALLOC_FREE ((struct pginfo*) 1)
174 1.17 tls #define MALLOC_FIRST ((struct pginfo*) 2)
175 1.17 tls #define MALLOC_FOLLOW ((struct pginfo*) 3)
176 1.17 tls #define MALLOC_MAGIC ((struct pginfo*) 4)
177 1.17 tls
178 1.20 thorpej /*
179 1.20 thorpej * Page size related parameters, computed at run-time.
180 1.20 thorpej */
181 1.20 thorpej static size_t malloc_pagesize;
182 1.20 thorpej static size_t malloc_pageshift;
183 1.20 thorpej static size_t malloc_pagemask;
184 1.17 tls
185 1.17 tls #ifndef malloc_minsize
186 1.17 tls #define malloc_minsize 16U
187 1.17 tls #endif
188 1.17 tls
189 1.17 tls #ifndef malloc_maxsize
190 1.17 tls #define malloc_maxsize ((malloc_pagesize)>>1)
191 1.17 tls #endif
192 1.17 tls
193 1.17 tls #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
194 1.38 christos #define ptr2idx(foo) \
195 1.38 christos (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
196 1.1 cgd
197 1.49 christos #ifndef _MALLOC_LOCK
198 1.49 christos #define _MALLOC_LOCK()
199 1.17 tls #endif
200 1.1 cgd
201 1.49 christos #ifndef _MALLOC_UNLOCK
202 1.49 christos #define _MALLOC_UNLOCK()
203 1.16 kleink #endif
204 1.16 kleink
205 1.17 tls #ifndef MMAP_FD
206 1.17 tls #define MMAP_FD (-1)
207 1.1 cgd #endif
208 1.1 cgd
209 1.17 tls #ifndef INIT_MMAP
210 1.17 tls #define INIT_MMAP()
211 1.18 thorpej #endif
212 1.18 thorpej
213 1.18 thorpej #ifndef MADV_FREE
214 1.18 thorpej #define MADV_FREE MADV_DONTNEED
215 1.9 christos #endif
216 1.9 christos
217 1.17 tls /* Number of free pages we cache */
218 1.49 christos static size_t malloc_cache = 16;
219 1.17 tls
220 1.17 tls /* The offset from pagenumber to index into the page directory */
221 1.30 enami static size_t malloc_origo;
222 1.17 tls
223 1.17 tls /* The last index in the page directory we care about */
224 1.30 enami static size_t last_idx;
225 1.17 tls
226 1.17 tls /* Pointer to page directory. Allocated "as if with" malloc */
227 1.17 tls static struct pginfo **page_dir;
228 1.17 tls
229 1.17 tls /* How many slots in the page directory */
230 1.49 christos static size_t malloc_ninfo;
231 1.17 tls
232 1.17 tls /* Free pages line up here */
233 1.17 tls static struct pgfree free_list;
234 1.17 tls
235 1.17 tls /* Abort(), user doesn't handle problems. */
236 1.17 tls static int malloc_abort;
237 1.17 tls
238 1.17 tls /* Are we trying to die ? */
239 1.17 tls static int suicide;
240 1.17 tls
241 1.17 tls /* always realloc ? */
242 1.17 tls static int malloc_realloc;
243 1.9 christos
244 1.17 tls /* pass the kernel a hint on free pages ? */
245 1.49 christos #if defined(MADV_FREE)
246 1.32 simonb static int malloc_hint = 0;
247 1.49 christos #endif
248 1.17 tls
249 1.17 tls /* xmalloc behaviour ? */
250 1.17 tls static int malloc_xmalloc;
251 1.17 tls
252 1.17 tls /* sysv behaviour for malloc(0) ? */
253 1.17 tls static int malloc_sysv;
254 1.17 tls
255 1.17 tls /* zero fill ? */
256 1.17 tls static int malloc_zero;
257 1.17 tls
258 1.17 tls /* junk fill ? */
259 1.17 tls static int malloc_junk;
260 1.17 tls
261 1.17 tls #ifdef HAS_UTRACE
262 1.17 tls
263 1.17 tls /* utrace ? */
264 1.17 tls static int malloc_utrace;
265 1.17 tls
266 1.17 tls struct ut { void *p; size_t s; void *r; };
267 1.17 tls
268 1.17 tls #define UTRACE(a, b, c) \
269 1.35 jdolecek if (malloc_utrace) { \
270 1.35 jdolecek struct ut u; \
271 1.35 jdolecek u.p=a; u.s = b; u.r=c; \
272 1.35 jdolecek utrace(UTRACE_LABEL (void *) &u, sizeof u); \
273 1.35 jdolecek }
274 1.17 tls #else /* !HAS_UTRACE */
275 1.17 tls #define UTRACE(a,b,c)
276 1.17 tls #endif /* HAS_UTRACE */
277 1.17 tls
278 1.17 tls /* my last break. */
279 1.17 tls static void *malloc_brk;
280 1.17 tls
281 1.17 tls /* one location cache for free-list holders */
282 1.17 tls static struct pgfree *px;
283 1.17 tls
284 1.17 tls /* compile-time options */
285 1.49 christos const char *_malloc_options;
286 1.17 tls
287 1.17 tls /* Name of the current public function */
288 1.45 christos static const char *malloc_func;
289 1.17 tls
290 1.17 tls /* Macro for mmap */
291 1.17 tls #define MMAP(size) \
292 1.49 christos mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
293 1.30 enami MMAP_FD, (off_t)0);
294 1.9 christos
295 1.16 kleink /*
296 1.17 tls * Necessary function declarations
297 1.16 kleink */
298 1.30 enami static int extend_pgdir(size_t idx);
299 1.17 tls static void *imalloc(size_t size);
300 1.17 tls static void ifree(void *ptr);
301 1.17 tls static void *irealloc(void *ptr, size_t size);
302 1.17 tls
303 1.17 tls static void
304 1.49 christos wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
305 1.49 christos {
306 1.49 christos
307 1.49 christos write(STDERR_FILENO, p1, strlen(p1));
308 1.49 christos write(STDERR_FILENO, p2, strlen(p2));
309 1.49 christos write(STDERR_FILENO, p3, strlen(p3));
310 1.49 christos write(STDERR_FILENO, p4, strlen(p4));
311 1.49 christos }
312 1.49 christos
313 1.49 christos void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
314 1.49 christos const char *p4) = wrtmessage;
315 1.49 christos static void
316 1.45 christos wrterror(const char *p)
317 1.17 tls {
318 1.49 christos
319 1.17 tls suicide = 1;
320 1.49 christos _malloc_message(getprogname(), malloc_func, " error: ", p);
321 1.17 tls abort();
322 1.17 tls }
323 1.17 tls
324 1.16 kleink static void
325 1.45 christos wrtwarning(const char *p)
326 1.1 cgd {
327 1.49 christos
328 1.49 christos /*
329 1.49 christos * Sensitive processes, somewhat arbitrarily defined here as setuid,
330 1.49 christos * setgid, root and wheel cannot afford to have malloc mistakes.
331 1.49 christos */
332 1.49 christos if (malloc_abort || issetugid() || getuid() == 0 || getgid() == 0)
333 1.17 tls wrterror(p);
334 1.17 tls }
335 1.17 tls
336 1.17 tls /*
337 1.17 tls * Allocate a number of pages from the OS
338 1.17 tls */
339 1.17 tls static void *
340 1.30 enami map_pages(size_t pages)
341 1.17 tls {
342 1.38 christos caddr_t result, rresult, tail;
343 1.38 christos intptr_t bytes = pages << malloc_pageshift;
344 1.16 kleink
345 1.38 christos if (bytes < 0 || (size_t)bytes < pages) {
346 1.38 christos errno = ENOMEM;
347 1.38 christos return NULL;
348 1.38 christos }
349 1.17 tls
350 1.38 christos if ((result = sbrk(bytes)) == (void *)-1)
351 1.38 christos return NULL;
352 1.38 christos
353 1.38 christos /*
354 1.38 christos * Round to a page, in case sbrk(2) did not do this for us
355 1.38 christos */
356 1.38 christos rresult = (caddr_t)pageround((size_t)(uintptr_t)result);
357 1.38 christos if (result < rresult) {
358 1.38 christos /* make sure we have enough space to fit bytes */
359 1.38 christos if (sbrk((intptr_t)(rresult - result)) == (void *) -1) {
360 1.38 christos /* we failed, put everything back */
361 1.38 christos if (brk(result)) {
362 1.38 christos wrterror("brk(2) failed [internal error]\n");
363 1.38 christos }
364 1.38 christos }
365 1.17 tls }
366 1.38 christos tail = rresult + (size_t)bytes;
367 1.38 christos
368 1.23 thorpej last_idx = ptr2idx(tail) - 1;
369 1.17 tls malloc_brk = tail;
370 1.17 tls
371 1.38 christos if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) {
372 1.38 christos malloc_brk = result;
373 1.38 christos last_idx = ptr2idx(malloc_brk) - 1;
374 1.38 christos /* Put back break point since we failed. */
375 1.38 christos if (brk(malloc_brk))
376 1.38 christos wrterror("brk(2) failed [internal error]\n");
377 1.38 christos return 0;
378 1.38 christos }
379 1.16 kleink
380 1.38 christos return rresult;
381 1.1 cgd }
382 1.1 cgd
383 1.17 tls /*
384 1.17 tls * Extend page directory
385 1.17 tls */
386 1.17 tls static int
387 1.30 enami extend_pgdir(size_t idx)
388 1.1 cgd {
389 1.17 tls struct pginfo **new, **old;
390 1.30 enami size_t newlen, oldlen;
391 1.37 christos
392 1.37 christos /* check for overflow */
393 1.37 christos if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1)
394 1.37 christos + (malloc_pagesize / sizeof *page_dir)) < idx) {
395 1.37 christos errno = ENOMEM;
396 1.37 christos return 0;
397 1.37 christos }
398 1.1 cgd
399 1.17 tls /* Make it this many pages */
400 1.30 enami newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
401 1.17 tls
402 1.17 tls /* remember the old mapping size */
403 1.17 tls oldlen = malloc_ninfo * sizeof *page_dir;
404 1.17 tls
405 1.17 tls /*
406 1.17 tls * NOTE: we allocate new pages and copy the directory rather than tempt
407 1.17 tls * fate by trying to "grow" the region.. There is nothing to prevent
408 1.48 christos * us from accidentally re-mapping space that's been allocated by our caller
409 1.17 tls * via dlopen() or other mmap().
410 1.17 tls *
411 1.17 tls * The copy problem is not too bad, as there is 4K of page index per
412 1.17 tls * 4MB of malloc arena.
413 1.17 tls *
414 1.17 tls * We can totally avoid the copy if we open a file descriptor to associate
415 1.17 tls * the anon mappings with. Then, when we remap the pages at the new
416 1.17 tls * address, the old pages will be "magically" remapped.. But this means
417 1.17 tls * keeping open a "secret" file descriptor.....
418 1.17 tls */
419 1.17 tls
420 1.17 tls /* Get new pages */
421 1.49 christos new = MMAP(newlen);
422 1.43 junyoung if (new == MAP_FAILED)
423 1.17 tls return 0;
424 1.17 tls
425 1.17 tls /* Copy the old stuff */
426 1.30 enami memcpy(new, page_dir, oldlen);
427 1.17 tls
428 1.17 tls /* register the new size */
429 1.30 enami malloc_ninfo = newlen / sizeof *page_dir;
430 1.17 tls
431 1.17 tls /* swap the pointers */
432 1.17 tls old = page_dir;
433 1.17 tls page_dir = new;
434 1.17 tls
435 1.17 tls /* Now free the old stuff */
436 1.17 tls munmap(old, oldlen);
437 1.17 tls return 1;
438 1.17 tls }
439 1.16 kleink
440 1.17 tls /*
441 1.17 tls * Initialize the world
442 1.17 tls */
443 1.17 tls static void
444 1.49 christos malloc_init(void)
445 1.17 tls {
446 1.49 christos const char *p;
447 1.49 christos char b[64];
448 1.49 christos size_t i;
449 1.49 christos ssize_t j;
450 1.54 christos int serrno = errno;
451 1.20 thorpej
452 1.20 thorpej /*
453 1.20 thorpej * Compute page-size related variables.
454 1.20 thorpej */
455 1.30 enami malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
456 1.20 thorpej malloc_pagemask = malloc_pagesize - 1;
457 1.20 thorpej for (malloc_pageshift = 0;
458 1.20 thorpej (1UL << malloc_pageshift) != malloc_pagesize;
459 1.20 thorpej malloc_pageshift++)
460 1.20 thorpej /* nothing */ ;
461 1.17 tls
462 1.17 tls INIT_MMAP();
463 1.17 tls
464 1.19 thorpej #ifdef MALLOC_EXTRA_SANITY
465 1.17 tls malloc_junk = 1;
466 1.19 thorpej #endif /* MALLOC_EXTRA_SANITY */
467 1.17 tls
468 1.17 tls for (i = 0; i < 3; i++) {
469 1.17 tls if (i == 0) {
470 1.17 tls j = readlink("/etc/malloc.conf", b, sizeof b - 1);
471 1.54 christos if (j == -1)
472 1.17 tls continue;
473 1.17 tls b[j] = '\0';
474 1.17 tls p = b;
475 1.49 christos } else if (i == 1 && issetugid() == 0) {
476 1.49 christos p = getenv("MALLOC_OPTIONS");
477 1.17 tls } else if (i == 1) {
478 1.49 christos continue;
479 1.17 tls } else {
480 1.49 christos p = _malloc_options;
481 1.1 cgd }
482 1.43 junyoung for (; p != NULL && *p != '\0'; p++) {
483 1.17 tls switch (*p) {
484 1.17 tls case '>': malloc_cache <<= 1; break;
485 1.17 tls case '<': malloc_cache >>= 1; break;
486 1.17 tls case 'a': malloc_abort = 0; break;
487 1.17 tls case 'A': malloc_abort = 1; break;
488 1.17 tls case 'h': malloc_hint = 0; break;
489 1.17 tls case 'H': malloc_hint = 1; break;
490 1.17 tls case 'r': malloc_realloc = 0; break;
491 1.17 tls case 'R': malloc_realloc = 1; break;
492 1.17 tls case 'j': malloc_junk = 0; break;
493 1.17 tls case 'J': malloc_junk = 1; break;
494 1.17 tls #ifdef HAS_UTRACE
495 1.17 tls case 'u': malloc_utrace = 0; break;
496 1.17 tls case 'U': malloc_utrace = 1; break;
497 1.17 tls #endif
498 1.17 tls case 'v': malloc_sysv = 0; break;
499 1.17 tls case 'V': malloc_sysv = 1; break;
500 1.17 tls case 'x': malloc_xmalloc = 0; break;
501 1.17 tls case 'X': malloc_xmalloc = 1; break;
502 1.17 tls case 'z': malloc_zero = 0; break;
503 1.17 tls case 'Z': malloc_zero = 1; break;
504 1.17 tls default:
505 1.49 christos _malloc_message(getprogname(), malloc_func,
506 1.49 christos " warning: ", "unknown char in MALLOC_OPTIONS\n");
507 1.17 tls break;
508 1.17 tls }
509 1.1 cgd }
510 1.17 tls }
511 1.17 tls
512 1.17 tls UTRACE(0, 0, 0);
513 1.17 tls
514 1.17 tls /*
515 1.17 tls * We want junk in the entire allocation, and zero only in the part
516 1.17 tls * the user asked for.
517 1.17 tls */
518 1.17 tls if (malloc_zero)
519 1.49 christos malloc_junk = 1;
520 1.17 tls
521 1.17 tls /* Allocate one page for the page directory */
522 1.49 christos page_dir = MMAP(malloc_pagesize);
523 1.17 tls
524 1.43 junyoung if (page_dir == MAP_FAILED)
525 1.17 tls wrterror("mmap(2) failed, check limits.\n");
526 1.17 tls
527 1.17 tls /*
528 1.17 tls * We need a maximum of malloc_pageshift buckets, steal these from the
529 1.17 tls * front of the page_directory;
530 1.17 tls */
531 1.38 christos malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
532 1.34 christos >> malloc_pageshift;
533 1.17 tls malloc_origo -= malloc_pageshift;
534 1.17 tls
535 1.17 tls malloc_ninfo = malloc_pagesize / sizeof *page_dir;
536 1.17 tls
537 1.17 tls /* Recalculate the cache size in bytes, and make sure it's nonzero */
538 1.17 tls
539 1.17 tls if (!malloc_cache)
540 1.17 tls malloc_cache++;
541 1.17 tls
542 1.17 tls malloc_cache <<= malloc_pageshift;
543 1.17 tls
544 1.17 tls /*
545 1.17 tls * This is a nice hack from Kaleb Keithly (kaleb (at) x.org).
546 1.17 tls * We can sbrk(2) further back when we keep this on a low address.
547 1.17 tls */
548 1.49 christos px = imalloc(sizeof *px);
549 1.17 tls
550 1.54 christos errno = serrno;
551 1.17 tls }
552 1.17 tls
553 1.17 tls /*
554 1.17 tls * Allocate a number of complete pages
555 1.17 tls */
556 1.17 tls static void *
557 1.17 tls malloc_pages(size_t size)
558 1.17 tls {
559 1.43 junyoung void *p, *delay_free = NULL;
560 1.38 christos size_t i;
561 1.17 tls struct pgfree *pf;
562 1.30 enami size_t idx;
563 1.17 tls
564 1.38 christos idx = pageround(size);
565 1.38 christos if (idx < size) {
566 1.38 christos errno = ENOMEM;
567 1.38 christos return NULL;
568 1.38 christos } else
569 1.38 christos size = idx;
570 1.17 tls
571 1.43 junyoung p = NULL;
572 1.17 tls
573 1.17 tls /* Look for free pages before asking for more */
574 1.17 tls for(pf = free_list.next; pf; pf = pf->next) {
575 1.17 tls
576 1.19 thorpej #ifdef MALLOC_EXTRA_SANITY
577 1.17 tls if (pf->size & malloc_pagemask)
578 1.43 junyoung wrterror("(ES): junk length entry on free_list.\n");
579 1.17 tls if (!pf->size)
580 1.43 junyoung wrterror("(ES): zero length entry on free_list.\n");
581 1.17 tls if (pf->page == pf->end)
582 1.43 junyoung wrterror("(ES): zero entry on free_list.\n");
583 1.43 junyoung if (pf->page > pf->end)
584 1.43 junyoung wrterror("(ES): sick entry on free_list.\n");
585 1.17 tls if ((void*)pf->page >= (void*)sbrk(0))
586 1.43 junyoung wrterror("(ES): entry on free_list past brk.\n");
587 1.43 junyoung if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
588 1.43 junyoung wrterror("(ES): non-free first page on free-list.\n");
589 1.23 thorpej if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
590 1.43 junyoung wrterror("(ES): non-free last page on free-list.\n");
591 1.19 thorpej #endif /* MALLOC_EXTRA_SANITY */
592 1.17 tls
593 1.17 tls if (pf->size < size)
594 1.17 tls continue;
595 1.17 tls
596 1.17 tls if (pf->size == size) {
597 1.17 tls p = pf->page;
598 1.43 junyoung if (pf->next != NULL)
599 1.17 tls pf->next->prev = pf->prev;
600 1.17 tls pf->prev->next = pf->next;
601 1.17 tls delay_free = pf;
602 1.17 tls break;
603 1.17 tls }
604 1.17 tls
605 1.17 tls p = pf->page;
606 1.17 tls pf->page = (char *)pf->page + size;
607 1.17 tls pf->size -= size;
608 1.17 tls break;
609 1.17 tls }
610 1.17 tls
611 1.19 thorpej #ifdef MALLOC_EXTRA_SANITY
612 1.43 junyoung if (p != NULL && page_dir[ptr2idx(p)] != MALLOC_FREE)
613 1.43 junyoung wrterror("(ES): allocated non-free page on free-list.\n");
614 1.19 thorpej #endif /* MALLOC_EXTRA_SANITY */
615 1.17 tls
616 1.17 tls size >>= malloc_pageshift;
617 1.17 tls
618 1.17 tls /* Map new pages */
619 1.43 junyoung if (p == NULL)
620 1.17 tls p = map_pages(size);
621 1.17 tls
622 1.43 junyoung if (p != NULL) {
623 1.17 tls
624 1.23 thorpej idx = ptr2idx(p);
625 1.23 thorpej page_dir[idx] = MALLOC_FIRST;
626 1.17 tls for (i=1;i<size;i++)
627 1.23 thorpej page_dir[idx+i] = MALLOC_FOLLOW;
628 1.17 tls
629 1.17 tls if (malloc_junk)
630 1.17 tls memset(p, SOME_JUNK, size << malloc_pageshift);
631 1.17 tls }
632 1.17 tls
633 1.17 tls if (delay_free) {
634 1.43 junyoung if (px == NULL)
635 1.17 tls px = delay_free;
636 1.17 tls else
637 1.17 tls ifree(delay_free);
638 1.17 tls }
639 1.17 tls
640 1.17 tls return p;
641 1.17 tls }
642 1.17 tls
643 1.17 tls /*
644 1.17 tls * Allocate a page of fragments
645 1.17 tls */
646 1.17 tls
647 1.46 perry static inline int
648 1.17 tls malloc_make_chunks(int bits)
649 1.17 tls {
650 1.17 tls struct pginfo *bp;
651 1.17 tls void *pp;
652 1.49 christos int i, k;
653 1.49 christos long l;
654 1.17 tls
655 1.17 tls /* Allocate a new bucket */
656 1.17 tls pp = malloc_pages(malloc_pagesize);
657 1.43 junyoung if (pp == NULL)
658 1.17 tls return 0;
659 1.17 tls
660 1.17 tls /* Find length of admin structure */
661 1.49 christos l = (long)offsetof(struct pginfo, bits[0]);
662 1.49 christos l += (long)sizeof bp->bits[0] *
663 1.17 tls (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
664 1.17 tls
665 1.17 tls /* Don't waste more than two chunks on this */
666 1.17 tls if ((1<<(bits)) <= l+l) {
667 1.17 tls bp = (struct pginfo *)pp;
668 1.17 tls } else {
669 1.49 christos bp = imalloc((size_t)l);
670 1.43 junyoung if (bp == NULL) {
671 1.17 tls ifree(pp);
672 1.17 tls return 0;
673 1.1 cgd }
674 1.17 tls }
675 1.17 tls
676 1.17 tls bp->size = (1<<bits);
677 1.17 tls bp->shift = bits;
678 1.49 christos bp->total = bp->free = (u_short)(malloc_pagesize >> bits);
679 1.17 tls bp->page = pp;
680 1.17 tls
681 1.17 tls /* set all valid bits in the bitmap */
682 1.17 tls k = bp->total;
683 1.17 tls i = 0;
684 1.17 tls
685 1.17 tls /* Do a bunch at a time */
686 1.17 tls for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
687 1.30 enami bp->bits[i / MALLOC_BITS] = ~0U;
688 1.17 tls
689 1.17 tls for(; i < k; i++)
690 1.17 tls bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
691 1.17 tls
692 1.17 tls if (bp == bp->page) {
693 1.17 tls /* Mark the ones we stole for ourselves */
694 1.49 christos for(i = 0; l > 0; i++) {
695 1.49 christos bp->bits[i / MALLOC_BITS] &= ~(1 << (i % MALLOC_BITS));
696 1.17 tls bp->free--;
697 1.17 tls bp->total--;
698 1.49 christos l -= (long)(1 << bits);
699 1.1 cgd }
700 1.17 tls }
701 1.17 tls
702 1.17 tls /* MALLOC_LOCK */
703 1.17 tls
704 1.23 thorpej page_dir[ptr2idx(pp)] = bp;
705 1.17 tls
706 1.17 tls bp->next = page_dir[bits];
707 1.17 tls page_dir[bits] = bp;
708 1.17 tls
709 1.17 tls /* MALLOC_UNLOCK */
710 1.17 tls
711 1.17 tls return 1;
712 1.1 cgd }
713 1.1 cgd
714 1.1 cgd /*
715 1.17 tls * Allocate a fragment
716 1.1 cgd */
717 1.17 tls static void *
718 1.17 tls malloc_bytes(size_t size)
719 1.1 cgd {
720 1.30 enami size_t i;
721 1.30 enami int j;
722 1.17 tls u_int u;
723 1.17 tls struct pginfo *bp;
724 1.49 christos size_t k;
725 1.17 tls u_int *lp;
726 1.17 tls
727 1.17 tls /* Don't bother with anything less than this */
728 1.17 tls if (size < malloc_minsize)
729 1.17 tls size = malloc_minsize;
730 1.17 tls
731 1.49 christos
732 1.17 tls /* Find the right bucket */
733 1.17 tls j = 1;
734 1.17 tls i = size-1;
735 1.17 tls while (i >>= 1)
736 1.17 tls j++;
737 1.17 tls
738 1.17 tls /* If it's empty, make a page more of that size chunks */
739 1.43 junyoung if (page_dir[j] == NULL && !malloc_make_chunks(j))
740 1.43 junyoung return NULL;
741 1.17 tls
742 1.17 tls bp = page_dir[j];
743 1.17 tls
744 1.17 tls /* Find first word of bitmap which isn't empty */
745 1.17 tls for (lp = bp->bits; !*lp; lp++)
746 1.17 tls ;
747 1.17 tls
748 1.17 tls /* Find that bit, and tweak it */
749 1.17 tls u = 1;
750 1.17 tls k = 0;
751 1.17 tls while (!(*lp & u)) {
752 1.17 tls u += u;
753 1.17 tls k++;
754 1.17 tls }
755 1.17 tls *lp ^= u;
756 1.17 tls
757 1.17 tls /* If there are no more free, remove from free-list */
758 1.17 tls if (!--bp->free) {
759 1.17 tls page_dir[j] = bp->next;
760 1.43 junyoung bp->next = NULL;
761 1.17 tls }
762 1.17 tls
763 1.17 tls /* Adjust to the real offset of that chunk */
764 1.17 tls k += (lp-bp->bits)*MALLOC_BITS;
765 1.17 tls k <<= bp->shift;
766 1.17 tls
767 1.17 tls if (malloc_junk)
768 1.30 enami memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
769 1.1 cgd
770 1.17 tls return (u_char *)bp->page + k;
771 1.1 cgd }
772 1.1 cgd
773 1.17 tls /*
774 1.17 tls * Allocate a piece of memory
775 1.17 tls */
776 1.17 tls static void *
777 1.17 tls imalloc(size_t size)
778 1.17 tls {
779 1.17 tls void *result;
780 1.17 tls
781 1.17 tls if (suicide)
782 1.17 tls abort();
783 1.17 tls
784 1.17 tls if ((size + malloc_pagesize) < size) /* Check for overflow */
785 1.43 junyoung result = NULL;
786 1.49 christos else if ((size + malloc_pagesize) >= (uintptr_t)page_dir)
787 1.49 christos result = NULL;
788 1.17 tls else if (size <= malloc_maxsize)
789 1.43 junyoung result = malloc_bytes(size);
790 1.17 tls else
791 1.43 junyoung result = malloc_pages(size);
792 1.17 tls
793 1.43 junyoung if (malloc_abort && result == NULL)
794 1.17 tls wrterror("allocation failed.\n");
795 1.17 tls
796 1.43 junyoung if (malloc_zero && result != NULL)
797 1.17 tls memset(result, 0, size);
798 1.17 tls
799 1.17 tls return result;
800 1.1 cgd }
801 1.1 cgd
802 1.1 cgd /*
803 1.17 tls * Change the size of an allocation.
804 1.1 cgd */
805 1.17 tls static void *
806 1.17 tls irealloc(void *ptr, size_t size)
807 1.17 tls {
808 1.17 tls void *p;
809 1.30 enami size_t osize, idx;
810 1.17 tls struct pginfo **mp;
811 1.30 enami size_t i;
812 1.17 tls
813 1.17 tls if (suicide)
814 1.17 tls abort();
815 1.17 tls
816 1.23 thorpej idx = ptr2idx(ptr);
817 1.1 cgd
818 1.23 thorpej if (idx < malloc_pageshift) {
819 1.17 tls wrtwarning("junk pointer, too low to make sense.\n");
820 1.17 tls return 0;
821 1.17 tls }
822 1.17 tls
823 1.23 thorpej if (idx > last_idx) {
824 1.17 tls wrtwarning("junk pointer, too high to make sense.\n");
825 1.17 tls return 0;
826 1.17 tls }
827 1.17 tls
828 1.23 thorpej mp = &page_dir[idx];
829 1.17 tls
830 1.17 tls if (*mp == MALLOC_FIRST) { /* Page allocation */
831 1.17 tls
832 1.17 tls /* Check the pointer */
833 1.38 christos if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
834 1.17 tls wrtwarning("modified (page-) pointer.\n");
835 1.43 junyoung return NULL;
836 1.17 tls }
837 1.17 tls
838 1.17 tls /* Find the size in bytes */
839 1.17 tls for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
840 1.17 tls osize += malloc_pagesize;
841 1.17 tls
842 1.17 tls if (!malloc_realloc && /* unless we have to, */
843 1.17 tls size <= osize && /* .. or are too small, */
844 1.17 tls size > (osize - malloc_pagesize)) { /* .. or can free a page, */
845 1.49 christos if (malloc_junk)
846 1.49 christos memset((u_char *)ptr + size, SOME_JUNK, osize-size);
847 1.17 tls return ptr; /* don't do anything. */
848 1.6 jtc }
849 1.17 tls
850 1.17 tls } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
851 1.17 tls
852 1.17 tls /* Check the pointer for sane values */
853 1.38 christos if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) {
854 1.17 tls wrtwarning("modified (chunk-) pointer.\n");
855 1.43 junyoung return NULL;
856 1.1 cgd }
857 1.17 tls
858 1.17 tls /* Find the chunk index in the page */
859 1.38 christos i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift;
860 1.17 tls
861 1.17 tls /* Verify that it isn't a free chunk already */
862 1.49 christos if ((*mp)->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
863 1.17 tls wrtwarning("chunk is already free.\n");
864 1.43 junyoung return NULL;
865 1.16 kleink }
866 1.17 tls
867 1.17 tls osize = (*mp)->size;
868 1.17 tls
869 1.17 tls if (!malloc_realloc && /* Unless we have to, */
870 1.49 christos size <= osize && /* ..or are too small, */
871 1.49 christos (size > osize / 2 || /* ..or could use a smaller size, */
872 1.17 tls osize == malloc_minsize)) { /* ..(if there is one) */
873 1.49 christos if (malloc_junk)
874 1.49 christos memset((u_char *)ptr + size, SOME_JUNK, osize-size);
875 1.17 tls return ptr; /* ..Don't do anything */
876 1.1 cgd }
877 1.17 tls
878 1.17 tls } else {
879 1.17 tls wrtwarning("pointer to wrong page.\n");
880 1.43 junyoung return NULL;
881 1.17 tls }
882 1.17 tls
883 1.17 tls p = imalloc(size);
884 1.17 tls
885 1.43 junyoung if (p != NULL) {
886 1.17 tls /* copy the lesser of the two sizes, and free the old one */
887 1.17 tls if (!size || !osize)
888 1.17 tls ;
889 1.17 tls else if (osize < size)
890 1.17 tls memcpy(p, ptr, osize);
891 1.17 tls else
892 1.17 tls memcpy(p, ptr, size);
893 1.17 tls ifree(ptr);
894 1.17 tls }
895 1.17 tls return p;
896 1.1 cgd }
897 1.1 cgd
898 1.1 cgd /*
899 1.17 tls * Free a sequence of pages
900 1.1 cgd */
901 1.17 tls
902 1.46 perry static inline void
903 1.30 enami free_pages(void *ptr, size_t idx, struct pginfo *info)
904 1.17 tls {
905 1.30 enami size_t i;
906 1.43 junyoung struct pgfree *pf, *pt=NULL;
907 1.30 enami size_t l;
908 1.17 tls void *tail;
909 1.17 tls
910 1.17 tls if (info == MALLOC_FREE) {
911 1.17 tls wrtwarning("page is already free.\n");
912 1.17 tls return;
913 1.17 tls }
914 1.17 tls
915 1.17 tls if (info != MALLOC_FIRST) {
916 1.17 tls wrtwarning("pointer to wrong page.\n");
917 1.17 tls return;
918 1.17 tls }
919 1.17 tls
920 1.38 christos if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
921 1.17 tls wrtwarning("modified (page-) pointer.\n");
922 1.17 tls return;
923 1.17 tls }
924 1.17 tls
925 1.17 tls /* Count how many pages and mark them free at the same time */
926 1.23 thorpej page_dir[idx] = MALLOC_FREE;
927 1.23 thorpej for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
928 1.23 thorpej page_dir[idx + i] = MALLOC_FREE;
929 1.17 tls
930 1.17 tls l = i << malloc_pageshift;
931 1.17 tls
932 1.17 tls if (malloc_junk)
933 1.17 tls memset(ptr, SOME_JUNK, l);
934 1.17 tls
935 1.17 tls if (malloc_hint)
936 1.17 tls madvise(ptr, l, MADV_FREE);
937 1.17 tls
938 1.17 tls tail = (char *)ptr+l;
939 1.17 tls
940 1.17 tls /* add to free-list */
941 1.43 junyoung if (px == NULL)
942 1.49 christos px = imalloc(sizeof *px); /* This cannot fail... */
943 1.17 tls px->page = ptr;
944 1.17 tls px->end = tail;
945 1.17 tls px->size = l;
946 1.43 junyoung if (free_list.next == NULL) {
947 1.17 tls
948 1.17 tls /* Nothing on free list, put this at head */
949 1.17 tls px->next = free_list.next;
950 1.17 tls px->prev = &free_list;
951 1.17 tls free_list.next = px;
952 1.17 tls pf = px;
953 1.43 junyoung px = NULL;
954 1.17 tls
955 1.17 tls } else {
956 1.17 tls
957 1.17 tls /* Find the right spot, leave pf pointing to the modified entry. */
958 1.17 tls tail = (char *)ptr+l;
959 1.17 tls
960 1.43 junyoung for(pf = free_list.next; pf->end < ptr && pf->next != NULL;
961 1.43 junyoung pf = pf->next)
962 1.17 tls ; /* Race ahead here */
963 1.17 tls
964 1.17 tls if (pf->page > tail) {
965 1.17 tls /* Insert before entry */
966 1.17 tls px->next = pf;
967 1.17 tls px->prev = pf->prev;
968 1.17 tls pf->prev = px;
969 1.17 tls px->prev->next = px;
970 1.17 tls pf = px;
971 1.43 junyoung px = NULL;
972 1.17 tls } else if (pf->end == ptr ) {
973 1.17 tls /* Append to the previous entry */
974 1.17 tls pf->end = (char *)pf->end + l;
975 1.17 tls pf->size += l;
976 1.43 junyoung if (pf->next != NULL && pf->end == pf->next->page ) {
977 1.17 tls /* And collapse the next too. */
978 1.17 tls pt = pf->next;
979 1.17 tls pf->end = pt->end;
980 1.17 tls pf->size += pt->size;
981 1.17 tls pf->next = pt->next;
982 1.43 junyoung if (pf->next != NULL)
983 1.17 tls pf->next->prev = pf;
984 1.17 tls }
985 1.17 tls } else if (pf->page == tail) {
986 1.17 tls /* Prepend to entry */
987 1.17 tls pf->size += l;
988 1.17 tls pf->page = ptr;
989 1.43 junyoung } else if (pf->next == NULL) {
990 1.17 tls /* Append at tail of chain */
991 1.43 junyoung px->next = NULL;
992 1.17 tls px->prev = pf;
993 1.17 tls pf->next = px;
994 1.17 tls pf = px;
995 1.43 junyoung px = NULL;
996 1.17 tls } else {
997 1.17 tls wrterror("freelist is destroyed.\n");
998 1.1 cgd }
999 1.17 tls }
1000 1.17 tls
1001 1.17 tls /* Return something to OS ? */
1002 1.43 junyoung if (pf->next == NULL && /* If we're the last one, */
1003 1.17 tls pf->size > malloc_cache && /* ..and the cache is full, */
1004 1.17 tls pf->end == malloc_brk && /* ..and none behind us, */
1005 1.34 christos malloc_brk == sbrk((intptr_t)0)) { /* ..and it's OK to do... */
1006 1.17 tls
1007 1.17 tls /*
1008 1.17 tls * Keep the cache intact. Notice that the '>' above guarantees that
1009 1.17 tls * the pf will always have at least one page afterwards.
1010 1.17 tls */
1011 1.17 tls pf->end = (char *)pf->page + malloc_cache;
1012 1.17 tls pf->size = malloc_cache;
1013 1.17 tls
1014 1.17 tls brk(pf->end);
1015 1.17 tls malloc_brk = pf->end;
1016 1.17 tls
1017 1.23 thorpej idx = ptr2idx(pf->end);
1018 1.17 tls
1019 1.23 thorpej for(i=idx;i <= last_idx;)
1020 1.17 tls page_dir[i++] = MALLOC_NOT_MINE;
1021 1.17 tls
1022 1.47 elad last_idx = idx - 1;
1023 1.47 elad
1024 1.17 tls /* XXX: We could realloc/shrink the pagedir here I guess. */
1025 1.17 tls }
1026 1.43 junyoung if (pt != NULL)
1027 1.17 tls ifree(pt);
1028 1.1 cgd }
1029 1.1 cgd
1030 1.1 cgd /*
1031 1.17 tls * Free a chunk, and possibly the page it's on, if the page becomes empty.
1032 1.1 cgd */
1033 1.17 tls
1034 1.46 perry static inline void
1035 1.30 enami free_bytes(void *ptr, size_t idx, struct pginfo *info)
1036 1.17 tls {
1037 1.30 enami size_t i;
1038 1.17 tls struct pginfo **mp;
1039 1.17 tls void *vp;
1040 1.17 tls
1041 1.17 tls /* Find the chunk number on the page */
1042 1.38 christos i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift;
1043 1.17 tls
1044 1.38 christos if (((size_t)(uintptr_t)ptr & (info->size-1))) {
1045 1.17 tls wrtwarning("modified (chunk-) pointer.\n");
1046 1.17 tls return;
1047 1.17 tls }
1048 1.17 tls
1049 1.49 christos if (info->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
1050 1.17 tls wrtwarning("chunk is already free.\n");
1051 1.17 tls return;
1052 1.17 tls }
1053 1.17 tls
1054 1.17 tls if (malloc_junk)
1055 1.30 enami memset(ptr, SOME_JUNK, (size_t)info->size);
1056 1.17 tls
1057 1.49 christos info->bits[i/MALLOC_BITS] |= (u_int)(1UL << (i % MALLOC_BITS));
1058 1.17 tls info->free++;
1059 1.17 tls
1060 1.17 tls mp = page_dir + info->shift;
1061 1.17 tls
1062 1.17 tls if (info->free == 1) {
1063 1.17 tls
1064 1.17 tls /* Page became non-full */
1065 1.17 tls
1066 1.17 tls mp = page_dir + info->shift;
1067 1.17 tls /* Insert in address order */
1068 1.17 tls while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1069 1.17 tls mp = &(*mp)->next;
1070 1.17 tls info->next = *mp;
1071 1.17 tls *mp = info;
1072 1.17 tls return;
1073 1.17 tls }
1074 1.17 tls
1075 1.17 tls if (info->free != info->total)
1076 1.17 tls return;
1077 1.17 tls
1078 1.17 tls /* Find & remove this page in the queue */
1079 1.17 tls while (*mp != info) {
1080 1.17 tls mp = &((*mp)->next);
1081 1.19 thorpej #ifdef MALLOC_EXTRA_SANITY
1082 1.17 tls if (!*mp)
1083 1.43 junyoung wrterror("(ES): Not on queue.\n");
1084 1.19 thorpej #endif /* MALLOC_EXTRA_SANITY */
1085 1.17 tls }
1086 1.17 tls *mp = info->next;
1087 1.17 tls
1088 1.17 tls /* Free the page & the info structure if need be */
1089 1.30 enami page_dir[idx] = MALLOC_FIRST;
1090 1.17 tls vp = info->page; /* Order is important ! */
1091 1.17 tls if(vp != (void*)info)
1092 1.17 tls ifree(info);
1093 1.17 tls ifree(vp);
1094 1.17 tls }
1095 1.17 tls
1096 1.17 tls static void
1097 1.17 tls ifree(void *ptr)
1098 1.17 tls {
1099 1.17 tls struct pginfo *info;
1100 1.30 enami size_t idx;
1101 1.17 tls
1102 1.17 tls /* This is legal */
1103 1.43 junyoung if (ptr == NULL)
1104 1.17 tls return;
1105 1.17 tls
1106 1.17 tls /* If we're already sinking, don't make matters any worse. */
1107 1.17 tls if (suicide)
1108 1.17 tls return;
1109 1.17 tls
1110 1.23 thorpej idx = ptr2idx(ptr);
1111 1.17 tls
1112 1.23 thorpej if (idx < malloc_pageshift) {
1113 1.17 tls wrtwarning("junk pointer, too low to make sense.\n");
1114 1.17 tls return;
1115 1.17 tls }
1116 1.17 tls
1117 1.23 thorpej if (idx > last_idx) {
1118 1.17 tls wrtwarning("junk pointer, too high to make sense.\n");
1119 1.17 tls return;
1120 1.17 tls }
1121 1.17 tls
1122 1.23 thorpej info = page_dir[idx];
1123 1.17 tls
1124 1.17 tls if (info < MALLOC_MAGIC)
1125 1.23 thorpej free_pages(ptr, idx, info);
1126 1.17 tls else
1127 1.23 thorpej free_bytes(ptr, idx, info);
1128 1.17 tls return;
1129 1.17 tls }
1130 1.17 tls
1131 1.55 dholland static int malloc_active; /* Recursion flag for public interface. */
1132 1.52 christos static unsigned malloc_started; /* Set when initialization has been done */
1133 1.52 christos
1134 1.49 christos static void *
1135 1.49 christos pubrealloc(void *ptr, size_t size, const char *func)
1136 1.49 christos {
1137 1.49 christos void *r;
1138 1.49 christos int err = 0;
1139 1.49 christos
1140 1.49 christos /*
1141 1.49 christos * If a thread is inside our code with a functional lock held, and then
1142 1.49 christos * catches a signal which calls us again, we would get a deadlock if the
1143 1.49 christos * lock is not of a recursive type.
1144 1.49 christos */
1145 1.49 christos _MALLOC_LOCK();
1146 1.49 christos malloc_func = func;
1147 1.49 christos if (malloc_active > 0) {
1148 1.49 christos if (malloc_active == 1) {
1149 1.49 christos wrtwarning("recursive call\n");
1150 1.49 christos malloc_active = 2;
1151 1.49 christos }
1152 1.49 christos _MALLOC_UNLOCK();
1153 1.49 christos errno = EINVAL;
1154 1.49 christos return (NULL);
1155 1.49 christos }
1156 1.49 christos malloc_active = 1;
1157 1.49 christos
1158 1.49 christos if (!malloc_started) {
1159 1.49 christos if (ptr != NULL) {
1160 1.49 christos wrtwarning("malloc() has never been called\n");
1161 1.49 christos malloc_active = 0;
1162 1.49 christos _MALLOC_UNLOCK();
1163 1.49 christos errno = EINVAL;
1164 1.49 christos return (NULL);
1165 1.49 christos }
1166 1.49 christos malloc_init();
1167 1.49 christos malloc_started = 1;
1168 1.49 christos }
1169 1.49 christos
1170 1.49 christos if (ptr == ZEROSIZEPTR)
1171 1.49 christos ptr = NULL;
1172 1.49 christos if (malloc_sysv && !size) {
1173 1.49 christos if (ptr != NULL)
1174 1.49 christos ifree(ptr);
1175 1.49 christos r = NULL;
1176 1.49 christos } else if (!size) {
1177 1.49 christos if (ptr != NULL)
1178 1.49 christos ifree(ptr);
1179 1.49 christos r = ZEROSIZEPTR;
1180 1.49 christos } else if (ptr == NULL) {
1181 1.49 christos r = imalloc(size);
1182 1.49 christos err = (r == NULL);
1183 1.49 christos } else {
1184 1.49 christos r = irealloc(ptr, size);
1185 1.49 christos err = (r == NULL);
1186 1.49 christos }
1187 1.49 christos UTRACE(ptr, size, r);
1188 1.49 christos malloc_active = 0;
1189 1.49 christos _MALLOC_UNLOCK();
1190 1.49 christos if (malloc_xmalloc && err)
1191 1.49 christos wrterror("out of memory\n");
1192 1.49 christos if (err)
1193 1.49 christos errno = ENOMEM;
1194 1.49 christos return (r);
1195 1.49 christos }
1196 1.49 christos
1197 1.17 tls /*
1198 1.17 tls * These are the public exported interface routines.
1199 1.17 tls */
1200 1.17 tls
1201 1.49 christos void *
1202 1.49 christos malloc(size_t size)
1203 1.49 christos {
1204 1.49 christos
1205 1.49 christos return pubrealloc(NULL, size, " in malloc():");
1206 1.49 christos }
1207 1.49 christos
1208 1.49 christos int
1209 1.49 christos posix_memalign(void **memptr, size_t alignment, size_t size)
1210 1.49 christos {
1211 1.49 christos int err;
1212 1.49 christos void *result;
1213 1.49 christos
1214 1.52 christos if (!malloc_started) {
1215 1.52 christos malloc_init();
1216 1.52 christos malloc_started = 1;
1217 1.52 christos }
1218 1.49 christos /* Make sure that alignment is a large enough power of 2. */
1219 1.52 christos if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) ||
1220 1.52 christos alignment > malloc_pagesize)
1221 1.49 christos return EINVAL;
1222 1.49 christos
1223 1.49 christos /*
1224 1.52 christos * (size | alignment) is enough to assure the requested alignment, since
1225 1.49 christos * the allocator always allocates power-of-two blocks.
1226 1.49 christos */
1227 1.49 christos err = errno; /* Protect errno against changes in pubrealloc(). */
1228 1.52 christos result = pubrealloc(NULL, (size | alignment), " in posix_memalign()");
1229 1.49 christos errno = err;
1230 1.49 christos
1231 1.49 christos if (result == NULL)
1232 1.49 christos return ENOMEM;
1233 1.49 christos
1234 1.49 christos *memptr = result;
1235 1.49 christos return 0;
1236 1.49 christos }
1237 1.17 tls
1238 1.17 tls void *
1239 1.49 christos calloc(size_t num, size_t size)
1240 1.17 tls {
1241 1.49 christos void *ret;
1242 1.17 tls
1243 1.49 christos if (size != 0 && (num * size) / size != num) {
1244 1.49 christos /* size_t overflow. */
1245 1.49 christos errno = ENOMEM;
1246 1.43 junyoung return (NULL);
1247 1.17 tls }
1248 1.49 christos
1249 1.49 christos ret = pubrealloc(NULL, num * size, " in calloc():");
1250 1.49 christos
1251 1.49 christos if (ret != NULL)
1252 1.49 christos memset(ret, 0, num * size);
1253 1.49 christos
1254 1.49 christos return ret;
1255 1.17 tls }
1256 1.17 tls
1257 1.9 christos void
1258 1.17 tls free(void *ptr)
1259 1.1 cgd {
1260 1.49 christos
1261 1.49 christos pubrealloc(ptr, 0, " in free():");
1262 1.17 tls }
1263 1.17 tls
1264 1.17 tls void *
1265 1.17 tls realloc(void *ptr, size_t size)
1266 1.17 tls {
1267 1.17 tls
1268 1.49 christos return pubrealloc(ptr, size, " in realloc():");
1269 1.49 christos }
1270 1.49 christos
1271 1.49 christos /*
1272 1.49 christos * Begin library-private functions, used by threading libraries for protection
1273 1.49 christos * of malloc during fork(). These functions are only called if the program is
1274 1.49 christos * running in threaded mode, so there is no need to check whether the program
1275 1.49 christos * is threaded here.
1276 1.49 christos */
1277 1.49 christos
1278 1.49 christos void
1279 1.49 christos _malloc_prefork(void)
1280 1.49 christos {
1281 1.49 christos
1282 1.49 christos _MALLOC_LOCK();
1283 1.49 christos }
1284 1.49 christos
1285 1.49 christos void
1286 1.49 christos _malloc_postfork(void)
1287 1.49 christos {
1288 1.49 christos
1289 1.49 christos _MALLOC_UNLOCK();
1290 1.1 cgd }
1291