malloc.c revision 1.56.2.1 1 1.56.2.1 pgoyette /* $NetBSD: malloc.c,v 1.56.2.1 2017/03/20 06:56:58 pgoyette Exp $ */
2 1.18 thorpej
3 1.1 cgd /*
4 1.17 tls * ----------------------------------------------------------------------------
5 1.17 tls * "THE BEER-WARE LICENSE" (Revision 42):
6 1.17 tls * <phk (at) FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 1.17 tls * can do whatever you want with this stuff. If we meet some day, and you think
8 1.17 tls * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 1.17 tls * ----------------------------------------------------------------------------
10 1.1 cgd *
11 1.49 christos * From FreeBSD: malloc.c,v 1.91 2006/01/12 07:28:20 jasone
12 1.1 cgd *
13 1.17 tls */
14 1.17 tls
15 1.17 tls /*
16 1.19 thorpej * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
17 1.17 tls * to internal conditions and consistency in malloc.c. This has a
18 1.17 tls * noticeable runtime performance hit, and generally will not do you
19 1.17 tls * any good unless you fiddle with the internals of malloc or want
20 1.17 tls * to catch random pointer corruption as early as possible.
21 1.17 tls */
22 1.17 tls #ifndef MALLOC_EXTRA_SANITY
23 1.17 tls #undef MALLOC_EXTRA_SANITY
24 1.5 thorpej #endif
25 1.1 cgd
26 1.1 cgd /*
27 1.17 tls * What to use for Junk. This is the byte value we use to fill with
28 1.17 tls * when the 'J' option is enabled.
29 1.17 tls */
30 1.17 tls #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
31 1.17 tls
32 1.17 tls /*
33 1.17 tls * The basic parameters you can tweak.
34 1.17 tls *
35 1.17 tls * malloc_minsize minimum size of an allocation in bytes.
36 1.17 tls * If this is too small it's too much work
37 1.17 tls * to manage them. This is also the smallest
38 1.17 tls * unit of alignment used for the storage
39 1.17 tls * returned by malloc/realloc.
40 1.1 cgd *
41 1.1 cgd */
42 1.1 cgd
43 1.49 christos #include "namespace.h"
44 1.17 tls #if defined(__FreeBSD__)
45 1.17 tls # if defined(__i386__)
46 1.17 tls # define malloc_minsize 16U
47 1.17 tls # endif
48 1.49 christos # if defined(__ia64__)
49 1.49 christos # define malloc_pageshift 13U
50 1.49 christos # define malloc_minsize 16U
51 1.49 christos # endif
52 1.17 tls # if defined(__alpha__)
53 1.49 christos # define malloc_pageshift 13U
54 1.49 christos # define malloc_minsize 16U
55 1.49 christos # endif
56 1.49 christos # if defined(__sparc64__)
57 1.49 christos # define malloc_pageshift 13U
58 1.17 tls # define malloc_minsize 16U
59 1.17 tls # endif
60 1.49 christos # if defined(__amd64__)
61 1.49 christos # define malloc_pageshift 12U
62 1.49 christos # define malloc_minsize 16U
63 1.49 christos # endif
64 1.49 christos # if defined(__arm__)
65 1.49 christos # define malloc_pageshift 12U
66 1.49 christos # define malloc_minsize 16U
67 1.49 christos # endif
68 1.35 jdolecek # define HAS_UTRACE
69 1.35 jdolecek # define UTRACE_LABEL
70 1.35 jdolecek
71 1.35 jdolecek #include <sys/cdefs.h>
72 1.43 junyoung void utrace(struct ut *, int);
73 1.35 jdolecek
74 1.17 tls /*
75 1.17 tls * Make malloc/free/realloc thread-safe in libc for use with
76 1.17 tls * kernel threads.
77 1.17 tls */
78 1.17 tls # include "libc_private.h"
79 1.17 tls # include "spinlock.h"
80 1.17 tls static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
81 1.49 christos # define _MALLOC_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
82 1.49 christos # define _MALLOC_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
83 1.17 tls #endif /* __FreeBSD__ */
84 1.17 tls
85 1.51 macallan #include <sys/types.h>
86 1.17 tls #if defined(__NetBSD__)
87 1.56.2.1 pgoyette # define malloc_minsize 16U
88 1.56.2.1 pgoyette # ifdef _LIBC
89 1.56.2.1 pgoyette # define HAS_UTRACE
90 1.56.2.1 pgoyette # define UTRACE_LABEL "malloc",
91 1.43 junyoung int utrace(const char *, void *, size_t);
92 1.56.2.1 pgoyette # endif
93 1.56.2.1 pgoyette # include <sys/cdefs.h>
94 1.56.2.1 pgoyette # include "extern.h"
95 1.56.2.1 pgoyette # if defined(LIBC_SCCS) && !defined(lint)
96 1.56.2.1 pgoyette __RCSID("$NetBSD: malloc.c,v 1.56.2.1 2017/03/20 06:56:58 pgoyette Exp $");
97 1.56.2.1 pgoyette # endif /* LIBC_SCCS and not lint */
98 1.56.2.1 pgoyette # include <reentrant.h>
99 1.56.2.1 pgoyette # ifdef _REENTRANT
100 1.41 thorpej extern int __isthreaded;
101 1.41 thorpej static mutex_t thread_lock = MUTEX_INITIALIZER;
102 1.56.2.1 pgoyette # define _MALLOC_LOCK() if (__isthreaded) mutex_lock(&thread_lock);
103 1.56.2.1 pgoyette # define _MALLOC_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock);
104 1.56.2.1 pgoyette # else
105 1.56.2.1 pgoyette # define _MALLOC_LOCK()
106 1.56.2.1 pgoyette # define _MALLOC_UNLOCK()
107 1.56.2.1 pgoyette # endif
108 1.17 tls #endif /* __NetBSD__ */
109 1.17 tls
110 1.17 tls #if defined(__sparc__) && defined(sun)
111 1.17 tls # define malloc_minsize 16U
112 1.17 tls # define MAP_ANON (0)
113 1.17 tls static int fdzero;
114 1.17 tls # define MMAP_FD fdzero
115 1.17 tls # define INIT_MMAP() \
116 1.56 christos { if ((fdzero = open(_PATH_DEVZERO, O_RDWR | O_CLOEXEC, 0000)) == -1) \
117 1.17 tls wrterror("open of /dev/zero"); }
118 1.17 tls #endif /* __sparc__ */
119 1.17 tls
120 1.17 tls /* Insert your combination here... */
121 1.17 tls #if defined(__FOOCPU__) && defined(__BAROS__)
122 1.17 tls # define malloc_minsize 16U
123 1.17 tls #endif /* __FOOCPU__ && __BAROS__ */
124 1.17 tls
125 1.49 christos #ifndef ZEROSIZEPTR
126 1.49 christos #define ZEROSIZEPTR ((void *)(uintptr_t)(1UL << (malloc_pageshift - 1)))
127 1.49 christos #endif
128 1.17 tls
129 1.17 tls /*
130 1.17 tls * No user serviceable parts behind this point.
131 1.17 tls */
132 1.16 kleink #include <sys/types.h>
133 1.17 tls #include <sys/mman.h>
134 1.17 tls #include <errno.h>
135 1.17 tls #include <fcntl.h>
136 1.49 christos #include <paths.h>
137 1.17 tls #include <stddef.h>
138 1.9 christos #include <stdio.h>
139 1.1 cgd #include <stdlib.h>
140 1.1 cgd #include <string.h>
141 1.1 cgd #include <unistd.h>
142 1.1 cgd
143 1.17 tls /*
144 1.17 tls * This structure describes a page worth of chunks.
145 1.17 tls */
146 1.17 tls
147 1.17 tls struct pginfo {
148 1.17 tls struct pginfo *next; /* next on the free list */
149 1.17 tls void *page; /* Pointer to the page */
150 1.17 tls u_short size; /* size of this page's chunks */
151 1.17 tls u_short shift; /* How far to shift for this size chunks */
152 1.17 tls u_short free; /* How many free chunks */
153 1.17 tls u_short total; /* How many chunk */
154 1.17 tls u_int bits[1]; /* Which chunks are free */
155 1.17 tls };
156 1.1 cgd
157 1.1 cgd /*
158 1.17 tls * This structure describes a number of free pages.
159 1.17 tls */
160 1.17 tls
161 1.17 tls struct pgfree {
162 1.17 tls struct pgfree *next; /* next run of free pages */
163 1.17 tls struct pgfree *prev; /* prev run of free pages */
164 1.17 tls void *page; /* pointer to free pages */
165 1.17 tls void *end; /* pointer to end of free pages */
166 1.17 tls size_t size; /* number of bytes free */
167 1.1 cgd };
168 1.1 cgd
169 1.17 tls /*
170 1.17 tls * How many bits per u_int in the bitmap.
171 1.17 tls * Change only if not 8 bits/byte
172 1.17 tls */
173 1.39 thorpej #define MALLOC_BITS ((int)(8*sizeof(u_int)))
174 1.17 tls
175 1.17 tls /*
176 1.17 tls * Magic values to put in the page_directory
177 1.17 tls */
178 1.17 tls #define MALLOC_NOT_MINE ((struct pginfo*) 0)
179 1.17 tls #define MALLOC_FREE ((struct pginfo*) 1)
180 1.17 tls #define MALLOC_FIRST ((struct pginfo*) 2)
181 1.17 tls #define MALLOC_FOLLOW ((struct pginfo*) 3)
182 1.17 tls #define MALLOC_MAGIC ((struct pginfo*) 4)
183 1.17 tls
184 1.20 thorpej /*
185 1.20 thorpej * Page size related parameters, computed at run-time.
186 1.20 thorpej */
187 1.20 thorpej static size_t malloc_pagesize;
188 1.20 thorpej static size_t malloc_pageshift;
189 1.20 thorpej static size_t malloc_pagemask;
190 1.17 tls
191 1.17 tls #ifndef malloc_minsize
192 1.17 tls #define malloc_minsize 16U
193 1.17 tls #endif
194 1.17 tls
195 1.17 tls #ifndef malloc_maxsize
196 1.17 tls #define malloc_maxsize ((malloc_pagesize)>>1)
197 1.17 tls #endif
198 1.17 tls
199 1.17 tls #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
200 1.38 christos #define ptr2idx(foo) \
201 1.38 christos (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
202 1.1 cgd
203 1.49 christos #ifndef _MALLOC_LOCK
204 1.49 christos #define _MALLOC_LOCK()
205 1.17 tls #endif
206 1.1 cgd
207 1.49 christos #ifndef _MALLOC_UNLOCK
208 1.49 christos #define _MALLOC_UNLOCK()
209 1.16 kleink #endif
210 1.16 kleink
211 1.17 tls #ifndef MMAP_FD
212 1.17 tls #define MMAP_FD (-1)
213 1.1 cgd #endif
214 1.1 cgd
215 1.17 tls #ifndef INIT_MMAP
216 1.17 tls #define INIT_MMAP()
217 1.18 thorpej #endif
218 1.18 thorpej
219 1.18 thorpej #ifndef MADV_FREE
220 1.18 thorpej #define MADV_FREE MADV_DONTNEED
221 1.9 christos #endif
222 1.9 christos
223 1.17 tls /* Number of free pages we cache */
224 1.49 christos static size_t malloc_cache = 16;
225 1.17 tls
226 1.17 tls /* The offset from pagenumber to index into the page directory */
227 1.30 enami static size_t malloc_origo;
228 1.17 tls
229 1.17 tls /* The last index in the page directory we care about */
230 1.30 enami static size_t last_idx;
231 1.17 tls
232 1.17 tls /* Pointer to page directory. Allocated "as if with" malloc */
233 1.17 tls static struct pginfo **page_dir;
234 1.17 tls
235 1.17 tls /* How many slots in the page directory */
236 1.49 christos static size_t malloc_ninfo;
237 1.17 tls
238 1.17 tls /* Free pages line up here */
239 1.17 tls static struct pgfree free_list;
240 1.17 tls
241 1.17 tls /* Abort(), user doesn't handle problems. */
242 1.17 tls static int malloc_abort;
243 1.17 tls
244 1.17 tls /* Are we trying to die ? */
245 1.17 tls static int suicide;
246 1.17 tls
247 1.17 tls /* always realloc ? */
248 1.17 tls static int malloc_realloc;
249 1.9 christos
250 1.17 tls /* pass the kernel a hint on free pages ? */
251 1.49 christos #if defined(MADV_FREE)
252 1.32 simonb static int malloc_hint = 0;
253 1.49 christos #endif
254 1.17 tls
255 1.17 tls /* xmalloc behaviour ? */
256 1.17 tls static int malloc_xmalloc;
257 1.17 tls
258 1.17 tls /* sysv behaviour for malloc(0) ? */
259 1.17 tls static int malloc_sysv;
260 1.17 tls
261 1.17 tls /* zero fill ? */
262 1.17 tls static int malloc_zero;
263 1.17 tls
264 1.17 tls /* junk fill ? */
265 1.17 tls static int malloc_junk;
266 1.17 tls
267 1.17 tls #ifdef HAS_UTRACE
268 1.17 tls
269 1.17 tls /* utrace ? */
270 1.17 tls static int malloc_utrace;
271 1.17 tls
272 1.17 tls struct ut { void *p; size_t s; void *r; };
273 1.17 tls
274 1.17 tls #define UTRACE(a, b, c) \
275 1.35 jdolecek if (malloc_utrace) { \
276 1.35 jdolecek struct ut u; \
277 1.35 jdolecek u.p=a; u.s = b; u.r=c; \
278 1.35 jdolecek utrace(UTRACE_LABEL (void *) &u, sizeof u); \
279 1.35 jdolecek }
280 1.17 tls #else /* !HAS_UTRACE */
281 1.17 tls #define UTRACE(a,b,c)
282 1.17 tls #endif /* HAS_UTRACE */
283 1.17 tls
284 1.17 tls /* my last break. */
285 1.17 tls static void *malloc_brk;
286 1.17 tls
287 1.17 tls /* one location cache for free-list holders */
288 1.17 tls static struct pgfree *px;
289 1.17 tls
290 1.17 tls /* compile-time options */
291 1.49 christos const char *_malloc_options;
292 1.17 tls
293 1.17 tls /* Name of the current public function */
294 1.45 christos static const char *malloc_func;
295 1.17 tls
296 1.17 tls /* Macro for mmap */
297 1.17 tls #define MMAP(size) \
298 1.49 christos mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
299 1.30 enami MMAP_FD, (off_t)0);
300 1.9 christos
301 1.16 kleink /*
302 1.17 tls * Necessary function declarations
303 1.16 kleink */
304 1.30 enami static int extend_pgdir(size_t idx);
305 1.17 tls static void *imalloc(size_t size);
306 1.17 tls static void ifree(void *ptr);
307 1.17 tls static void *irealloc(void *ptr, size_t size);
308 1.17 tls
309 1.17 tls static void
310 1.49 christos wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
311 1.49 christos {
312 1.49 christos
313 1.49 christos write(STDERR_FILENO, p1, strlen(p1));
314 1.49 christos write(STDERR_FILENO, p2, strlen(p2));
315 1.49 christos write(STDERR_FILENO, p3, strlen(p3));
316 1.49 christos write(STDERR_FILENO, p4, strlen(p4));
317 1.49 christos }
318 1.49 christos
319 1.49 christos void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
320 1.49 christos const char *p4) = wrtmessage;
321 1.49 christos static void
322 1.45 christos wrterror(const char *p)
323 1.17 tls {
324 1.49 christos
325 1.17 tls suicide = 1;
326 1.49 christos _malloc_message(getprogname(), malloc_func, " error: ", p);
327 1.17 tls abort();
328 1.17 tls }
329 1.17 tls
330 1.16 kleink static void
331 1.45 christos wrtwarning(const char *p)
332 1.1 cgd {
333 1.49 christos
334 1.49 christos /*
335 1.49 christos * Sensitive processes, somewhat arbitrarily defined here as setuid,
336 1.49 christos * setgid, root and wheel cannot afford to have malloc mistakes.
337 1.49 christos */
338 1.49 christos if (malloc_abort || issetugid() || getuid() == 0 || getgid() == 0)
339 1.17 tls wrterror(p);
340 1.17 tls }
341 1.17 tls
342 1.17 tls /*
343 1.17 tls * Allocate a number of pages from the OS
344 1.17 tls */
345 1.17 tls static void *
346 1.30 enami map_pages(size_t pages)
347 1.17 tls {
348 1.38 christos caddr_t result, rresult, tail;
349 1.38 christos intptr_t bytes = pages << malloc_pageshift;
350 1.16 kleink
351 1.38 christos if (bytes < 0 || (size_t)bytes < pages) {
352 1.38 christos errno = ENOMEM;
353 1.38 christos return NULL;
354 1.38 christos }
355 1.17 tls
356 1.38 christos if ((result = sbrk(bytes)) == (void *)-1)
357 1.38 christos return NULL;
358 1.38 christos
359 1.38 christos /*
360 1.38 christos * Round to a page, in case sbrk(2) did not do this for us
361 1.38 christos */
362 1.38 christos rresult = (caddr_t)pageround((size_t)(uintptr_t)result);
363 1.38 christos if (result < rresult) {
364 1.38 christos /* make sure we have enough space to fit bytes */
365 1.38 christos if (sbrk((intptr_t)(rresult - result)) == (void *) -1) {
366 1.38 christos /* we failed, put everything back */
367 1.38 christos if (brk(result)) {
368 1.38 christos wrterror("brk(2) failed [internal error]\n");
369 1.38 christos }
370 1.38 christos }
371 1.17 tls }
372 1.38 christos tail = rresult + (size_t)bytes;
373 1.38 christos
374 1.23 thorpej last_idx = ptr2idx(tail) - 1;
375 1.17 tls malloc_brk = tail;
376 1.17 tls
377 1.38 christos if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) {
378 1.38 christos malloc_brk = result;
379 1.38 christos last_idx = ptr2idx(malloc_brk) - 1;
380 1.38 christos /* Put back break point since we failed. */
381 1.38 christos if (brk(malloc_brk))
382 1.38 christos wrterror("brk(2) failed [internal error]\n");
383 1.38 christos return 0;
384 1.38 christos }
385 1.16 kleink
386 1.38 christos return rresult;
387 1.1 cgd }
388 1.1 cgd
389 1.17 tls /*
390 1.17 tls * Extend page directory
391 1.17 tls */
392 1.17 tls static int
393 1.30 enami extend_pgdir(size_t idx)
394 1.1 cgd {
395 1.17 tls struct pginfo **new, **old;
396 1.30 enami size_t newlen, oldlen;
397 1.37 christos
398 1.37 christos /* check for overflow */
399 1.37 christos if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1)
400 1.37 christos + (malloc_pagesize / sizeof *page_dir)) < idx) {
401 1.37 christos errno = ENOMEM;
402 1.37 christos return 0;
403 1.37 christos }
404 1.1 cgd
405 1.17 tls /* Make it this many pages */
406 1.30 enami newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
407 1.17 tls
408 1.17 tls /* remember the old mapping size */
409 1.17 tls oldlen = malloc_ninfo * sizeof *page_dir;
410 1.17 tls
411 1.17 tls /*
412 1.17 tls * NOTE: we allocate new pages and copy the directory rather than tempt
413 1.17 tls * fate by trying to "grow" the region.. There is nothing to prevent
414 1.48 christos * us from accidentally re-mapping space that's been allocated by our caller
415 1.17 tls * via dlopen() or other mmap().
416 1.17 tls *
417 1.17 tls * The copy problem is not too bad, as there is 4K of page index per
418 1.17 tls * 4MB of malloc arena.
419 1.17 tls *
420 1.17 tls * We can totally avoid the copy if we open a file descriptor to associate
421 1.17 tls * the anon mappings with. Then, when we remap the pages at the new
422 1.17 tls * address, the old pages will be "magically" remapped.. But this means
423 1.17 tls * keeping open a "secret" file descriptor.....
424 1.17 tls */
425 1.17 tls
426 1.17 tls /* Get new pages */
427 1.49 christos new = MMAP(newlen);
428 1.43 junyoung if (new == MAP_FAILED)
429 1.17 tls return 0;
430 1.17 tls
431 1.17 tls /* Copy the old stuff */
432 1.30 enami memcpy(new, page_dir, oldlen);
433 1.17 tls
434 1.17 tls /* register the new size */
435 1.30 enami malloc_ninfo = newlen / sizeof *page_dir;
436 1.17 tls
437 1.17 tls /* swap the pointers */
438 1.17 tls old = page_dir;
439 1.17 tls page_dir = new;
440 1.17 tls
441 1.17 tls /* Now free the old stuff */
442 1.17 tls munmap(old, oldlen);
443 1.17 tls return 1;
444 1.17 tls }
445 1.16 kleink
446 1.17 tls /*
447 1.17 tls * Initialize the world
448 1.17 tls */
449 1.17 tls static void
450 1.49 christos malloc_init(void)
451 1.17 tls {
452 1.49 christos const char *p;
453 1.49 christos char b[64];
454 1.49 christos size_t i;
455 1.49 christos ssize_t j;
456 1.54 christos int serrno = errno;
457 1.20 thorpej
458 1.20 thorpej /*
459 1.20 thorpej * Compute page-size related variables.
460 1.20 thorpej */
461 1.56.2.1 pgoyette malloc_pagesize = getpagesize();
462 1.20 thorpej malloc_pagemask = malloc_pagesize - 1;
463 1.20 thorpej for (malloc_pageshift = 0;
464 1.20 thorpej (1UL << malloc_pageshift) != malloc_pagesize;
465 1.20 thorpej malloc_pageshift++)
466 1.20 thorpej /* nothing */ ;
467 1.17 tls
468 1.17 tls INIT_MMAP();
469 1.17 tls
470 1.19 thorpej #ifdef MALLOC_EXTRA_SANITY
471 1.17 tls malloc_junk = 1;
472 1.19 thorpej #endif /* MALLOC_EXTRA_SANITY */
473 1.17 tls
474 1.17 tls for (i = 0; i < 3; i++) {
475 1.17 tls if (i == 0) {
476 1.17 tls j = readlink("/etc/malloc.conf", b, sizeof b - 1);
477 1.54 christos if (j == -1)
478 1.17 tls continue;
479 1.17 tls b[j] = '\0';
480 1.17 tls p = b;
481 1.56.2.1 pgoyette #ifdef _LIBC
482 1.49 christos } else if (i == 1 && issetugid() == 0) {
483 1.49 christos p = getenv("MALLOC_OPTIONS");
484 1.56.2.1 pgoyette #endif
485 1.17 tls } else if (i == 1) {
486 1.49 christos continue;
487 1.17 tls } else {
488 1.49 christos p = _malloc_options;
489 1.1 cgd }
490 1.43 junyoung for (; p != NULL && *p != '\0'; p++) {
491 1.17 tls switch (*p) {
492 1.17 tls case '>': malloc_cache <<= 1; break;
493 1.17 tls case '<': malloc_cache >>= 1; break;
494 1.17 tls case 'a': malloc_abort = 0; break;
495 1.17 tls case 'A': malloc_abort = 1; break;
496 1.17 tls case 'h': malloc_hint = 0; break;
497 1.17 tls case 'H': malloc_hint = 1; break;
498 1.17 tls case 'r': malloc_realloc = 0; break;
499 1.17 tls case 'R': malloc_realloc = 1; break;
500 1.17 tls case 'j': malloc_junk = 0; break;
501 1.17 tls case 'J': malloc_junk = 1; break;
502 1.17 tls #ifdef HAS_UTRACE
503 1.17 tls case 'u': malloc_utrace = 0; break;
504 1.17 tls case 'U': malloc_utrace = 1; break;
505 1.17 tls #endif
506 1.17 tls case 'v': malloc_sysv = 0; break;
507 1.17 tls case 'V': malloc_sysv = 1; break;
508 1.17 tls case 'x': malloc_xmalloc = 0; break;
509 1.17 tls case 'X': malloc_xmalloc = 1; break;
510 1.17 tls case 'z': malloc_zero = 0; break;
511 1.17 tls case 'Z': malloc_zero = 1; break;
512 1.17 tls default:
513 1.49 christos _malloc_message(getprogname(), malloc_func,
514 1.49 christos " warning: ", "unknown char in MALLOC_OPTIONS\n");
515 1.17 tls break;
516 1.17 tls }
517 1.1 cgd }
518 1.17 tls }
519 1.17 tls
520 1.17 tls UTRACE(0, 0, 0);
521 1.17 tls
522 1.17 tls /*
523 1.17 tls * We want junk in the entire allocation, and zero only in the part
524 1.17 tls * the user asked for.
525 1.17 tls */
526 1.17 tls if (malloc_zero)
527 1.49 christos malloc_junk = 1;
528 1.17 tls
529 1.17 tls /* Allocate one page for the page directory */
530 1.49 christos page_dir = MMAP(malloc_pagesize);
531 1.17 tls
532 1.43 junyoung if (page_dir == MAP_FAILED)
533 1.17 tls wrterror("mmap(2) failed, check limits.\n");
534 1.17 tls
535 1.17 tls /*
536 1.17 tls * We need a maximum of malloc_pageshift buckets, steal these from the
537 1.17 tls * front of the page_directory;
538 1.17 tls */
539 1.38 christos malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
540 1.34 christos >> malloc_pageshift;
541 1.17 tls malloc_origo -= malloc_pageshift;
542 1.17 tls
543 1.17 tls malloc_ninfo = malloc_pagesize / sizeof *page_dir;
544 1.17 tls
545 1.17 tls /* Recalculate the cache size in bytes, and make sure it's nonzero */
546 1.17 tls
547 1.17 tls if (!malloc_cache)
548 1.17 tls malloc_cache++;
549 1.17 tls
550 1.17 tls malloc_cache <<= malloc_pageshift;
551 1.17 tls
552 1.17 tls /*
553 1.17 tls * This is a nice hack from Kaleb Keithly (kaleb (at) x.org).
554 1.17 tls * We can sbrk(2) further back when we keep this on a low address.
555 1.17 tls */
556 1.49 christos px = imalloc(sizeof *px);
557 1.17 tls
558 1.54 christos errno = serrno;
559 1.17 tls }
560 1.17 tls
561 1.17 tls /*
562 1.17 tls * Allocate a number of complete pages
563 1.17 tls */
564 1.17 tls static void *
565 1.17 tls malloc_pages(size_t size)
566 1.17 tls {
567 1.43 junyoung void *p, *delay_free = NULL;
568 1.38 christos size_t i;
569 1.17 tls struct pgfree *pf;
570 1.30 enami size_t idx;
571 1.17 tls
572 1.38 christos idx = pageround(size);
573 1.38 christos if (idx < size) {
574 1.38 christos errno = ENOMEM;
575 1.38 christos return NULL;
576 1.38 christos } else
577 1.38 christos size = idx;
578 1.17 tls
579 1.43 junyoung p = NULL;
580 1.17 tls
581 1.17 tls /* Look for free pages before asking for more */
582 1.17 tls for(pf = free_list.next; pf; pf = pf->next) {
583 1.17 tls
584 1.19 thorpej #ifdef MALLOC_EXTRA_SANITY
585 1.17 tls if (pf->size & malloc_pagemask)
586 1.43 junyoung wrterror("(ES): junk length entry on free_list.\n");
587 1.17 tls if (!pf->size)
588 1.43 junyoung wrterror("(ES): zero length entry on free_list.\n");
589 1.17 tls if (pf->page == pf->end)
590 1.43 junyoung wrterror("(ES): zero entry on free_list.\n");
591 1.43 junyoung if (pf->page > pf->end)
592 1.43 junyoung wrterror("(ES): sick entry on free_list.\n");
593 1.17 tls if ((void*)pf->page >= (void*)sbrk(0))
594 1.43 junyoung wrterror("(ES): entry on free_list past brk.\n");
595 1.43 junyoung if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
596 1.43 junyoung wrterror("(ES): non-free first page on free-list.\n");
597 1.23 thorpej if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
598 1.43 junyoung wrterror("(ES): non-free last page on free-list.\n");
599 1.19 thorpej #endif /* MALLOC_EXTRA_SANITY */
600 1.17 tls
601 1.17 tls if (pf->size < size)
602 1.17 tls continue;
603 1.17 tls
604 1.17 tls if (pf->size == size) {
605 1.17 tls p = pf->page;
606 1.43 junyoung if (pf->next != NULL)
607 1.17 tls pf->next->prev = pf->prev;
608 1.17 tls pf->prev->next = pf->next;
609 1.17 tls delay_free = pf;
610 1.17 tls break;
611 1.17 tls }
612 1.17 tls
613 1.17 tls p = pf->page;
614 1.17 tls pf->page = (char *)pf->page + size;
615 1.17 tls pf->size -= size;
616 1.17 tls break;
617 1.17 tls }
618 1.17 tls
619 1.19 thorpej #ifdef MALLOC_EXTRA_SANITY
620 1.43 junyoung if (p != NULL && page_dir[ptr2idx(p)] != MALLOC_FREE)
621 1.43 junyoung wrterror("(ES): allocated non-free page on free-list.\n");
622 1.19 thorpej #endif /* MALLOC_EXTRA_SANITY */
623 1.17 tls
624 1.17 tls size >>= malloc_pageshift;
625 1.17 tls
626 1.17 tls /* Map new pages */
627 1.43 junyoung if (p == NULL)
628 1.17 tls p = map_pages(size);
629 1.17 tls
630 1.43 junyoung if (p != NULL) {
631 1.17 tls
632 1.23 thorpej idx = ptr2idx(p);
633 1.23 thorpej page_dir[idx] = MALLOC_FIRST;
634 1.17 tls for (i=1;i<size;i++)
635 1.23 thorpej page_dir[idx+i] = MALLOC_FOLLOW;
636 1.17 tls
637 1.17 tls if (malloc_junk)
638 1.17 tls memset(p, SOME_JUNK, size << malloc_pageshift);
639 1.17 tls }
640 1.17 tls
641 1.17 tls if (delay_free) {
642 1.43 junyoung if (px == NULL)
643 1.17 tls px = delay_free;
644 1.17 tls else
645 1.17 tls ifree(delay_free);
646 1.17 tls }
647 1.17 tls
648 1.17 tls return p;
649 1.17 tls }
650 1.17 tls
651 1.17 tls /*
652 1.17 tls * Allocate a page of fragments
653 1.17 tls */
654 1.17 tls
655 1.46 perry static inline int
656 1.17 tls malloc_make_chunks(int bits)
657 1.17 tls {
658 1.17 tls struct pginfo *bp;
659 1.17 tls void *pp;
660 1.49 christos int i, k;
661 1.49 christos long l;
662 1.17 tls
663 1.17 tls /* Allocate a new bucket */
664 1.17 tls pp = malloc_pages(malloc_pagesize);
665 1.43 junyoung if (pp == NULL)
666 1.17 tls return 0;
667 1.17 tls
668 1.17 tls /* Find length of admin structure */
669 1.49 christos l = (long)offsetof(struct pginfo, bits[0]);
670 1.49 christos l += (long)sizeof bp->bits[0] *
671 1.17 tls (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
672 1.17 tls
673 1.17 tls /* Don't waste more than two chunks on this */
674 1.17 tls if ((1<<(bits)) <= l+l) {
675 1.17 tls bp = (struct pginfo *)pp;
676 1.17 tls } else {
677 1.49 christos bp = imalloc((size_t)l);
678 1.43 junyoung if (bp == NULL) {
679 1.17 tls ifree(pp);
680 1.17 tls return 0;
681 1.1 cgd }
682 1.17 tls }
683 1.17 tls
684 1.17 tls bp->size = (1<<bits);
685 1.17 tls bp->shift = bits;
686 1.49 christos bp->total = bp->free = (u_short)(malloc_pagesize >> bits);
687 1.17 tls bp->page = pp;
688 1.17 tls
689 1.17 tls /* set all valid bits in the bitmap */
690 1.17 tls k = bp->total;
691 1.17 tls i = 0;
692 1.17 tls
693 1.17 tls /* Do a bunch at a time */
694 1.17 tls for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
695 1.30 enami bp->bits[i / MALLOC_BITS] = ~0U;
696 1.17 tls
697 1.17 tls for(; i < k; i++)
698 1.17 tls bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
699 1.17 tls
700 1.17 tls if (bp == bp->page) {
701 1.17 tls /* Mark the ones we stole for ourselves */
702 1.49 christos for(i = 0; l > 0; i++) {
703 1.49 christos bp->bits[i / MALLOC_BITS] &= ~(1 << (i % MALLOC_BITS));
704 1.17 tls bp->free--;
705 1.17 tls bp->total--;
706 1.49 christos l -= (long)(1 << bits);
707 1.1 cgd }
708 1.17 tls }
709 1.17 tls
710 1.17 tls /* MALLOC_LOCK */
711 1.17 tls
712 1.23 thorpej page_dir[ptr2idx(pp)] = bp;
713 1.17 tls
714 1.17 tls bp->next = page_dir[bits];
715 1.17 tls page_dir[bits] = bp;
716 1.17 tls
717 1.17 tls /* MALLOC_UNLOCK */
718 1.17 tls
719 1.17 tls return 1;
720 1.1 cgd }
721 1.1 cgd
722 1.1 cgd /*
723 1.17 tls * Allocate a fragment
724 1.1 cgd */
725 1.17 tls static void *
726 1.17 tls malloc_bytes(size_t size)
727 1.1 cgd {
728 1.30 enami size_t i;
729 1.30 enami int j;
730 1.17 tls u_int u;
731 1.17 tls struct pginfo *bp;
732 1.49 christos size_t k;
733 1.17 tls u_int *lp;
734 1.17 tls
735 1.17 tls /* Don't bother with anything less than this */
736 1.17 tls if (size < malloc_minsize)
737 1.17 tls size = malloc_minsize;
738 1.17 tls
739 1.49 christos
740 1.17 tls /* Find the right bucket */
741 1.17 tls j = 1;
742 1.17 tls i = size-1;
743 1.17 tls while (i >>= 1)
744 1.17 tls j++;
745 1.17 tls
746 1.17 tls /* If it's empty, make a page more of that size chunks */
747 1.43 junyoung if (page_dir[j] == NULL && !malloc_make_chunks(j))
748 1.43 junyoung return NULL;
749 1.17 tls
750 1.17 tls bp = page_dir[j];
751 1.17 tls
752 1.17 tls /* Find first word of bitmap which isn't empty */
753 1.17 tls for (lp = bp->bits; !*lp; lp++)
754 1.17 tls ;
755 1.17 tls
756 1.17 tls /* Find that bit, and tweak it */
757 1.17 tls u = 1;
758 1.17 tls k = 0;
759 1.17 tls while (!(*lp & u)) {
760 1.17 tls u += u;
761 1.17 tls k++;
762 1.17 tls }
763 1.17 tls *lp ^= u;
764 1.17 tls
765 1.17 tls /* If there are no more free, remove from free-list */
766 1.17 tls if (!--bp->free) {
767 1.17 tls page_dir[j] = bp->next;
768 1.43 junyoung bp->next = NULL;
769 1.17 tls }
770 1.17 tls
771 1.17 tls /* Adjust to the real offset of that chunk */
772 1.17 tls k += (lp-bp->bits)*MALLOC_BITS;
773 1.17 tls k <<= bp->shift;
774 1.17 tls
775 1.17 tls if (malloc_junk)
776 1.30 enami memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
777 1.1 cgd
778 1.17 tls return (u_char *)bp->page + k;
779 1.1 cgd }
780 1.1 cgd
781 1.17 tls /*
782 1.17 tls * Allocate a piece of memory
783 1.17 tls */
784 1.17 tls static void *
785 1.17 tls imalloc(size_t size)
786 1.17 tls {
787 1.17 tls void *result;
788 1.17 tls
789 1.17 tls if (suicide)
790 1.17 tls abort();
791 1.17 tls
792 1.17 tls if ((size + malloc_pagesize) < size) /* Check for overflow */
793 1.43 junyoung result = NULL;
794 1.49 christos else if ((size + malloc_pagesize) >= (uintptr_t)page_dir)
795 1.49 christos result = NULL;
796 1.17 tls else if (size <= malloc_maxsize)
797 1.43 junyoung result = malloc_bytes(size);
798 1.17 tls else
799 1.43 junyoung result = malloc_pages(size);
800 1.17 tls
801 1.43 junyoung if (malloc_abort && result == NULL)
802 1.17 tls wrterror("allocation failed.\n");
803 1.17 tls
804 1.43 junyoung if (malloc_zero && result != NULL)
805 1.17 tls memset(result, 0, size);
806 1.17 tls
807 1.17 tls return result;
808 1.1 cgd }
809 1.1 cgd
810 1.1 cgd /*
811 1.17 tls * Change the size of an allocation.
812 1.1 cgd */
813 1.17 tls static void *
814 1.17 tls irealloc(void *ptr, size_t size)
815 1.17 tls {
816 1.17 tls void *p;
817 1.30 enami size_t osize, idx;
818 1.17 tls struct pginfo **mp;
819 1.30 enami size_t i;
820 1.17 tls
821 1.17 tls if (suicide)
822 1.17 tls abort();
823 1.17 tls
824 1.23 thorpej idx = ptr2idx(ptr);
825 1.1 cgd
826 1.23 thorpej if (idx < malloc_pageshift) {
827 1.17 tls wrtwarning("junk pointer, too low to make sense.\n");
828 1.17 tls return 0;
829 1.17 tls }
830 1.17 tls
831 1.23 thorpej if (idx > last_idx) {
832 1.17 tls wrtwarning("junk pointer, too high to make sense.\n");
833 1.17 tls return 0;
834 1.17 tls }
835 1.17 tls
836 1.23 thorpej mp = &page_dir[idx];
837 1.17 tls
838 1.17 tls if (*mp == MALLOC_FIRST) { /* Page allocation */
839 1.17 tls
840 1.17 tls /* Check the pointer */
841 1.38 christos if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
842 1.17 tls wrtwarning("modified (page-) pointer.\n");
843 1.43 junyoung return NULL;
844 1.17 tls }
845 1.17 tls
846 1.17 tls /* Find the size in bytes */
847 1.17 tls for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
848 1.17 tls osize += malloc_pagesize;
849 1.17 tls
850 1.17 tls if (!malloc_realloc && /* unless we have to, */
851 1.17 tls size <= osize && /* .. or are too small, */
852 1.17 tls size > (osize - malloc_pagesize)) { /* .. or can free a page, */
853 1.49 christos if (malloc_junk)
854 1.49 christos memset((u_char *)ptr + size, SOME_JUNK, osize-size);
855 1.17 tls return ptr; /* don't do anything. */
856 1.6 jtc }
857 1.17 tls
858 1.17 tls } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
859 1.17 tls
860 1.17 tls /* Check the pointer for sane values */
861 1.38 christos if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) {
862 1.17 tls wrtwarning("modified (chunk-) pointer.\n");
863 1.43 junyoung return NULL;
864 1.1 cgd }
865 1.17 tls
866 1.17 tls /* Find the chunk index in the page */
867 1.38 christos i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift;
868 1.17 tls
869 1.17 tls /* Verify that it isn't a free chunk already */
870 1.49 christos if ((*mp)->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
871 1.17 tls wrtwarning("chunk is already free.\n");
872 1.43 junyoung return NULL;
873 1.16 kleink }
874 1.17 tls
875 1.17 tls osize = (*mp)->size;
876 1.17 tls
877 1.17 tls if (!malloc_realloc && /* Unless we have to, */
878 1.49 christos size <= osize && /* ..or are too small, */
879 1.49 christos (size > osize / 2 || /* ..or could use a smaller size, */
880 1.17 tls osize == malloc_minsize)) { /* ..(if there is one) */
881 1.49 christos if (malloc_junk)
882 1.49 christos memset((u_char *)ptr + size, SOME_JUNK, osize-size);
883 1.17 tls return ptr; /* ..Don't do anything */
884 1.1 cgd }
885 1.17 tls
886 1.17 tls } else {
887 1.17 tls wrtwarning("pointer to wrong page.\n");
888 1.43 junyoung return NULL;
889 1.17 tls }
890 1.17 tls
891 1.17 tls p = imalloc(size);
892 1.17 tls
893 1.43 junyoung if (p != NULL) {
894 1.17 tls /* copy the lesser of the two sizes, and free the old one */
895 1.17 tls if (!size || !osize)
896 1.17 tls ;
897 1.17 tls else if (osize < size)
898 1.17 tls memcpy(p, ptr, osize);
899 1.17 tls else
900 1.17 tls memcpy(p, ptr, size);
901 1.17 tls ifree(ptr);
902 1.17 tls }
903 1.17 tls return p;
904 1.1 cgd }
905 1.1 cgd
906 1.1 cgd /*
907 1.17 tls * Free a sequence of pages
908 1.1 cgd */
909 1.17 tls
910 1.46 perry static inline void
911 1.30 enami free_pages(void *ptr, size_t idx, struct pginfo *info)
912 1.17 tls {
913 1.30 enami size_t i;
914 1.43 junyoung struct pgfree *pf, *pt=NULL;
915 1.30 enami size_t l;
916 1.17 tls void *tail;
917 1.17 tls
918 1.17 tls if (info == MALLOC_FREE) {
919 1.17 tls wrtwarning("page is already free.\n");
920 1.17 tls return;
921 1.17 tls }
922 1.17 tls
923 1.17 tls if (info != MALLOC_FIRST) {
924 1.17 tls wrtwarning("pointer to wrong page.\n");
925 1.17 tls return;
926 1.17 tls }
927 1.17 tls
928 1.38 christos if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
929 1.17 tls wrtwarning("modified (page-) pointer.\n");
930 1.17 tls return;
931 1.17 tls }
932 1.17 tls
933 1.17 tls /* Count how many pages and mark them free at the same time */
934 1.23 thorpej page_dir[idx] = MALLOC_FREE;
935 1.23 thorpej for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
936 1.23 thorpej page_dir[idx + i] = MALLOC_FREE;
937 1.17 tls
938 1.17 tls l = i << malloc_pageshift;
939 1.17 tls
940 1.17 tls if (malloc_junk)
941 1.17 tls memset(ptr, SOME_JUNK, l);
942 1.17 tls
943 1.17 tls if (malloc_hint)
944 1.17 tls madvise(ptr, l, MADV_FREE);
945 1.17 tls
946 1.17 tls tail = (char *)ptr+l;
947 1.17 tls
948 1.17 tls /* add to free-list */
949 1.43 junyoung if (px == NULL)
950 1.49 christos px = imalloc(sizeof *px); /* This cannot fail... */
951 1.17 tls px->page = ptr;
952 1.17 tls px->end = tail;
953 1.17 tls px->size = l;
954 1.43 junyoung if (free_list.next == NULL) {
955 1.17 tls
956 1.17 tls /* Nothing on free list, put this at head */
957 1.17 tls px->next = free_list.next;
958 1.17 tls px->prev = &free_list;
959 1.17 tls free_list.next = px;
960 1.17 tls pf = px;
961 1.43 junyoung px = NULL;
962 1.17 tls
963 1.17 tls } else {
964 1.17 tls
965 1.17 tls /* Find the right spot, leave pf pointing to the modified entry. */
966 1.17 tls tail = (char *)ptr+l;
967 1.17 tls
968 1.43 junyoung for(pf = free_list.next; pf->end < ptr && pf->next != NULL;
969 1.43 junyoung pf = pf->next)
970 1.17 tls ; /* Race ahead here */
971 1.17 tls
972 1.17 tls if (pf->page > tail) {
973 1.17 tls /* Insert before entry */
974 1.17 tls px->next = pf;
975 1.17 tls px->prev = pf->prev;
976 1.17 tls pf->prev = px;
977 1.17 tls px->prev->next = px;
978 1.17 tls pf = px;
979 1.43 junyoung px = NULL;
980 1.17 tls } else if (pf->end == ptr ) {
981 1.17 tls /* Append to the previous entry */
982 1.17 tls pf->end = (char *)pf->end + l;
983 1.17 tls pf->size += l;
984 1.43 junyoung if (pf->next != NULL && pf->end == pf->next->page ) {
985 1.17 tls /* And collapse the next too. */
986 1.17 tls pt = pf->next;
987 1.17 tls pf->end = pt->end;
988 1.17 tls pf->size += pt->size;
989 1.17 tls pf->next = pt->next;
990 1.43 junyoung if (pf->next != NULL)
991 1.17 tls pf->next->prev = pf;
992 1.17 tls }
993 1.17 tls } else if (pf->page == tail) {
994 1.17 tls /* Prepend to entry */
995 1.17 tls pf->size += l;
996 1.17 tls pf->page = ptr;
997 1.43 junyoung } else if (pf->next == NULL) {
998 1.17 tls /* Append at tail of chain */
999 1.43 junyoung px->next = NULL;
1000 1.17 tls px->prev = pf;
1001 1.17 tls pf->next = px;
1002 1.17 tls pf = px;
1003 1.43 junyoung px = NULL;
1004 1.17 tls } else {
1005 1.17 tls wrterror("freelist is destroyed.\n");
1006 1.1 cgd }
1007 1.17 tls }
1008 1.17 tls
1009 1.17 tls /* Return something to OS ? */
1010 1.43 junyoung if (pf->next == NULL && /* If we're the last one, */
1011 1.17 tls pf->size > malloc_cache && /* ..and the cache is full, */
1012 1.17 tls pf->end == malloc_brk && /* ..and none behind us, */
1013 1.34 christos malloc_brk == sbrk((intptr_t)0)) { /* ..and it's OK to do... */
1014 1.17 tls
1015 1.17 tls /*
1016 1.17 tls * Keep the cache intact. Notice that the '>' above guarantees that
1017 1.17 tls * the pf will always have at least one page afterwards.
1018 1.17 tls */
1019 1.17 tls pf->end = (char *)pf->page + malloc_cache;
1020 1.17 tls pf->size = malloc_cache;
1021 1.17 tls
1022 1.17 tls brk(pf->end);
1023 1.17 tls malloc_brk = pf->end;
1024 1.17 tls
1025 1.23 thorpej idx = ptr2idx(pf->end);
1026 1.17 tls
1027 1.23 thorpej for(i=idx;i <= last_idx;)
1028 1.17 tls page_dir[i++] = MALLOC_NOT_MINE;
1029 1.17 tls
1030 1.47 elad last_idx = idx - 1;
1031 1.47 elad
1032 1.17 tls /* XXX: We could realloc/shrink the pagedir here I guess. */
1033 1.17 tls }
1034 1.43 junyoung if (pt != NULL)
1035 1.17 tls ifree(pt);
1036 1.1 cgd }
1037 1.1 cgd
1038 1.1 cgd /*
1039 1.17 tls * Free a chunk, and possibly the page it's on, if the page becomes empty.
1040 1.1 cgd */
1041 1.17 tls
1042 1.46 perry static inline void
1043 1.30 enami free_bytes(void *ptr, size_t idx, struct pginfo *info)
1044 1.17 tls {
1045 1.30 enami size_t i;
1046 1.17 tls struct pginfo **mp;
1047 1.17 tls void *vp;
1048 1.17 tls
1049 1.17 tls /* Find the chunk number on the page */
1050 1.38 christos i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift;
1051 1.17 tls
1052 1.38 christos if (((size_t)(uintptr_t)ptr & (info->size-1))) {
1053 1.17 tls wrtwarning("modified (chunk-) pointer.\n");
1054 1.17 tls return;
1055 1.17 tls }
1056 1.17 tls
1057 1.49 christos if (info->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
1058 1.17 tls wrtwarning("chunk is already free.\n");
1059 1.17 tls return;
1060 1.17 tls }
1061 1.17 tls
1062 1.17 tls if (malloc_junk)
1063 1.30 enami memset(ptr, SOME_JUNK, (size_t)info->size);
1064 1.17 tls
1065 1.49 christos info->bits[i/MALLOC_BITS] |= (u_int)(1UL << (i % MALLOC_BITS));
1066 1.17 tls info->free++;
1067 1.17 tls
1068 1.17 tls mp = page_dir + info->shift;
1069 1.17 tls
1070 1.17 tls if (info->free == 1) {
1071 1.17 tls
1072 1.17 tls /* Page became non-full */
1073 1.17 tls
1074 1.17 tls mp = page_dir + info->shift;
1075 1.17 tls /* Insert in address order */
1076 1.17 tls while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1077 1.17 tls mp = &(*mp)->next;
1078 1.17 tls info->next = *mp;
1079 1.17 tls *mp = info;
1080 1.17 tls return;
1081 1.17 tls }
1082 1.17 tls
1083 1.17 tls if (info->free != info->total)
1084 1.17 tls return;
1085 1.17 tls
1086 1.17 tls /* Find & remove this page in the queue */
1087 1.17 tls while (*mp != info) {
1088 1.17 tls mp = &((*mp)->next);
1089 1.19 thorpej #ifdef MALLOC_EXTRA_SANITY
1090 1.17 tls if (!*mp)
1091 1.43 junyoung wrterror("(ES): Not on queue.\n");
1092 1.19 thorpej #endif /* MALLOC_EXTRA_SANITY */
1093 1.17 tls }
1094 1.17 tls *mp = info->next;
1095 1.17 tls
1096 1.17 tls /* Free the page & the info structure if need be */
1097 1.30 enami page_dir[idx] = MALLOC_FIRST;
1098 1.17 tls vp = info->page; /* Order is important ! */
1099 1.17 tls if(vp != (void*)info)
1100 1.17 tls ifree(info);
1101 1.17 tls ifree(vp);
1102 1.17 tls }
1103 1.17 tls
1104 1.17 tls static void
1105 1.17 tls ifree(void *ptr)
1106 1.17 tls {
1107 1.17 tls struct pginfo *info;
1108 1.30 enami size_t idx;
1109 1.17 tls
1110 1.17 tls /* This is legal */
1111 1.43 junyoung if (ptr == NULL)
1112 1.17 tls return;
1113 1.17 tls
1114 1.17 tls /* If we're already sinking, don't make matters any worse. */
1115 1.17 tls if (suicide)
1116 1.17 tls return;
1117 1.17 tls
1118 1.23 thorpej idx = ptr2idx(ptr);
1119 1.17 tls
1120 1.23 thorpej if (idx < malloc_pageshift) {
1121 1.17 tls wrtwarning("junk pointer, too low to make sense.\n");
1122 1.17 tls return;
1123 1.17 tls }
1124 1.17 tls
1125 1.23 thorpej if (idx > last_idx) {
1126 1.17 tls wrtwarning("junk pointer, too high to make sense.\n");
1127 1.17 tls return;
1128 1.17 tls }
1129 1.17 tls
1130 1.23 thorpej info = page_dir[idx];
1131 1.17 tls
1132 1.17 tls if (info < MALLOC_MAGIC)
1133 1.23 thorpej free_pages(ptr, idx, info);
1134 1.17 tls else
1135 1.23 thorpej free_bytes(ptr, idx, info);
1136 1.17 tls return;
1137 1.17 tls }
1138 1.17 tls
1139 1.55 dholland static int malloc_active; /* Recursion flag for public interface. */
1140 1.52 christos static unsigned malloc_started; /* Set when initialization has been done */
1141 1.52 christos
1142 1.49 christos static void *
1143 1.49 christos pubrealloc(void *ptr, size_t size, const char *func)
1144 1.49 christos {
1145 1.49 christos void *r;
1146 1.49 christos int err = 0;
1147 1.49 christos
1148 1.49 christos /*
1149 1.49 christos * If a thread is inside our code with a functional lock held, and then
1150 1.49 christos * catches a signal which calls us again, we would get a deadlock if the
1151 1.49 christos * lock is not of a recursive type.
1152 1.49 christos */
1153 1.49 christos _MALLOC_LOCK();
1154 1.49 christos malloc_func = func;
1155 1.49 christos if (malloc_active > 0) {
1156 1.49 christos if (malloc_active == 1) {
1157 1.49 christos wrtwarning("recursive call\n");
1158 1.49 christos malloc_active = 2;
1159 1.49 christos }
1160 1.49 christos _MALLOC_UNLOCK();
1161 1.49 christos errno = EINVAL;
1162 1.49 christos return (NULL);
1163 1.49 christos }
1164 1.49 christos malloc_active = 1;
1165 1.49 christos
1166 1.49 christos if (!malloc_started) {
1167 1.49 christos if (ptr != NULL) {
1168 1.49 christos wrtwarning("malloc() has never been called\n");
1169 1.49 christos malloc_active = 0;
1170 1.49 christos _MALLOC_UNLOCK();
1171 1.49 christos errno = EINVAL;
1172 1.49 christos return (NULL);
1173 1.49 christos }
1174 1.49 christos malloc_init();
1175 1.49 christos malloc_started = 1;
1176 1.49 christos }
1177 1.49 christos
1178 1.49 christos if (ptr == ZEROSIZEPTR)
1179 1.49 christos ptr = NULL;
1180 1.49 christos if (malloc_sysv && !size) {
1181 1.49 christos if (ptr != NULL)
1182 1.49 christos ifree(ptr);
1183 1.49 christos r = NULL;
1184 1.49 christos } else if (!size) {
1185 1.49 christos if (ptr != NULL)
1186 1.49 christos ifree(ptr);
1187 1.49 christos r = ZEROSIZEPTR;
1188 1.49 christos } else if (ptr == NULL) {
1189 1.49 christos r = imalloc(size);
1190 1.49 christos err = (r == NULL);
1191 1.49 christos } else {
1192 1.49 christos r = irealloc(ptr, size);
1193 1.49 christos err = (r == NULL);
1194 1.49 christos }
1195 1.49 christos UTRACE(ptr, size, r);
1196 1.49 christos malloc_active = 0;
1197 1.49 christos _MALLOC_UNLOCK();
1198 1.49 christos if (malloc_xmalloc && err)
1199 1.49 christos wrterror("out of memory\n");
1200 1.49 christos if (err)
1201 1.49 christos errno = ENOMEM;
1202 1.49 christos return (r);
1203 1.49 christos }
1204 1.49 christos
1205 1.17 tls /*
1206 1.17 tls * These are the public exported interface routines.
1207 1.17 tls */
1208 1.17 tls
1209 1.49 christos void *
1210 1.49 christos malloc(size_t size)
1211 1.49 christos {
1212 1.49 christos
1213 1.49 christos return pubrealloc(NULL, size, " in malloc():");
1214 1.49 christos }
1215 1.49 christos
1216 1.49 christos int
1217 1.49 christos posix_memalign(void **memptr, size_t alignment, size_t size)
1218 1.49 christos {
1219 1.49 christos int err;
1220 1.49 christos void *result;
1221 1.49 christos
1222 1.52 christos if (!malloc_started) {
1223 1.52 christos malloc_init();
1224 1.52 christos malloc_started = 1;
1225 1.52 christos }
1226 1.49 christos /* Make sure that alignment is a large enough power of 2. */
1227 1.52 christos if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) ||
1228 1.52 christos alignment > malloc_pagesize)
1229 1.49 christos return EINVAL;
1230 1.49 christos
1231 1.49 christos /*
1232 1.52 christos * (size | alignment) is enough to assure the requested alignment, since
1233 1.49 christos * the allocator always allocates power-of-two blocks.
1234 1.49 christos */
1235 1.49 christos err = errno; /* Protect errno against changes in pubrealloc(). */
1236 1.52 christos result = pubrealloc(NULL, (size | alignment), " in posix_memalign()");
1237 1.49 christos errno = err;
1238 1.49 christos
1239 1.49 christos if (result == NULL)
1240 1.49 christos return ENOMEM;
1241 1.49 christos
1242 1.49 christos *memptr = result;
1243 1.49 christos return 0;
1244 1.49 christos }
1245 1.17 tls
1246 1.17 tls void *
1247 1.49 christos calloc(size_t num, size_t size)
1248 1.17 tls {
1249 1.49 christos void *ret;
1250 1.17 tls
1251 1.49 christos if (size != 0 && (num * size) / size != num) {
1252 1.49 christos /* size_t overflow. */
1253 1.49 christos errno = ENOMEM;
1254 1.43 junyoung return (NULL);
1255 1.17 tls }
1256 1.49 christos
1257 1.49 christos ret = pubrealloc(NULL, num * size, " in calloc():");
1258 1.49 christos
1259 1.49 christos if (ret != NULL)
1260 1.49 christos memset(ret, 0, num * size);
1261 1.49 christos
1262 1.49 christos return ret;
1263 1.17 tls }
1264 1.17 tls
1265 1.9 christos void
1266 1.17 tls free(void *ptr)
1267 1.1 cgd {
1268 1.49 christos
1269 1.49 christos pubrealloc(ptr, 0, " in free():");
1270 1.17 tls }
1271 1.17 tls
1272 1.17 tls void *
1273 1.17 tls realloc(void *ptr, size_t size)
1274 1.17 tls {
1275 1.17 tls
1276 1.49 christos return pubrealloc(ptr, size, " in realloc():");
1277 1.49 christos }
1278 1.49 christos
1279 1.49 christos /*
1280 1.49 christos * Begin library-private functions, used by threading libraries for protection
1281 1.49 christos * of malloc during fork(). These functions are only called if the program is
1282 1.49 christos * running in threaded mode, so there is no need to check whether the program
1283 1.49 christos * is threaded here.
1284 1.49 christos */
1285 1.49 christos
1286 1.49 christos void
1287 1.49 christos _malloc_prefork(void)
1288 1.49 christos {
1289 1.49 christos
1290 1.49 christos _MALLOC_LOCK();
1291 1.49 christos }
1292 1.49 christos
1293 1.49 christos void
1294 1.49 christos _malloc_postfork(void)
1295 1.49 christos {
1296 1.49 christos
1297 1.49 christos _MALLOC_UNLOCK();
1298 1.1 cgd }
1299