Home | History | Annotate | Line # | Download | only in stdlib
jemalloc.c revision 1.10.2.3
      1  1.10.2.3  matt /*	$NetBSD: jemalloc.c,v 1.10.2.3 2008/01/09 01:34:14 matt Exp $	*/
      2  1.10.2.2  matt 
      3  1.10.2.2  matt /*-
      4  1.10.2.2  matt  * Copyright (C) 2006,2007 Jason Evans <jasone (at) FreeBSD.org>.
      5  1.10.2.2  matt  * All rights reserved.
      6  1.10.2.2  matt  *
      7  1.10.2.2  matt  * Redistribution and use in source and binary forms, with or without
      8  1.10.2.2  matt  * modification, are permitted provided that the following conditions
      9  1.10.2.2  matt  * are met:
     10  1.10.2.2  matt  * 1. Redistributions of source code must retain the above copyright
     11  1.10.2.2  matt  *    notice(s), this list of conditions and the following disclaimer as
     12  1.10.2.2  matt  *    the first lines of this file unmodified other than the possible
     13  1.10.2.2  matt  *    addition of one or more copyright notices.
     14  1.10.2.2  matt  * 2. Redistributions in binary form must reproduce the above copyright
     15  1.10.2.2  matt  *    notice(s), this list of conditions and the following disclaimer in
     16  1.10.2.2  matt  *    the documentation and/or other materials provided with the
     17  1.10.2.2  matt  *    distribution.
     18  1.10.2.2  matt  *
     19  1.10.2.2  matt  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
     20  1.10.2.2  matt  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  1.10.2.2  matt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.10.2.2  matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
     23  1.10.2.2  matt  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.10.2.2  matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.10.2.2  matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
     26  1.10.2.2  matt  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     27  1.10.2.2  matt  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
     28  1.10.2.2  matt  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
     29  1.10.2.2  matt  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30  1.10.2.2  matt  *
     31  1.10.2.2  matt  *******************************************************************************
     32  1.10.2.2  matt  *
     33  1.10.2.2  matt  * This allocator implementation is designed to provide scalable performance
     34  1.10.2.2  matt  * for multi-threaded programs on multi-processor systems.  The following
     35  1.10.2.2  matt  * features are included for this purpose:
     36  1.10.2.2  matt  *
     37  1.10.2.2  matt  *   + Multiple arenas are used if there are multiple CPUs, which reduces lock
     38  1.10.2.2  matt  *     contention and cache sloshing.
     39  1.10.2.2  matt  *
     40  1.10.2.2  matt  *   + Cache line sharing between arenas is avoided for internal data
     41  1.10.2.2  matt  *     structures.
     42  1.10.2.2  matt  *
     43  1.10.2.2  matt  *   + Memory is managed in chunks and runs (chunks can be split into runs),
     44  1.10.2.2  matt  *     rather than as individual pages.  This provides a constant-time
     45  1.10.2.2  matt  *     mechanism for associating allocations with particular arenas.
     46  1.10.2.2  matt  *
     47  1.10.2.2  matt  * Allocation requests are rounded up to the nearest size class, and no record
     48  1.10.2.2  matt  * of the original request size is maintained.  Allocations are broken into
     49  1.10.2.2  matt  * categories according to size class.  Assuming runtime defaults, 4 kB pages
     50  1.10.2.2  matt  * and a 16 byte quantum, the size classes in each category are as follows:
     51  1.10.2.2  matt  *
     52  1.10.2.2  matt  *   |=====================================|
     53  1.10.2.2  matt  *   | Category | Subcategory    |    Size |
     54  1.10.2.2  matt  *   |=====================================|
     55  1.10.2.2  matt  *   | Small    | Tiny           |       2 |
     56  1.10.2.2  matt  *   |          |                |       4 |
     57  1.10.2.2  matt  *   |          |                |       8 |
     58  1.10.2.2  matt  *   |          |----------------+---------|
     59  1.10.2.2  matt  *   |          | Quantum-spaced |      16 |
     60  1.10.2.2  matt  *   |          |                |      32 |
     61  1.10.2.2  matt  *   |          |                |      48 |
     62  1.10.2.2  matt  *   |          |                |     ... |
     63  1.10.2.2  matt  *   |          |                |     480 |
     64  1.10.2.2  matt  *   |          |                |     496 |
     65  1.10.2.2  matt  *   |          |                |     512 |
     66  1.10.2.2  matt  *   |          |----------------+---------|
     67  1.10.2.2  matt  *   |          | Sub-page       |    1 kB |
     68  1.10.2.2  matt  *   |          |                |    2 kB |
     69  1.10.2.2  matt  *   |=====================================|
     70  1.10.2.2  matt  *   | Large                     |    4 kB |
     71  1.10.2.2  matt  *   |                           |    8 kB |
     72  1.10.2.2  matt  *   |                           |   12 kB |
     73  1.10.2.2  matt  *   |                           |     ... |
     74  1.10.2.2  matt  *   |                           | 1012 kB |
     75  1.10.2.2  matt  *   |                           | 1016 kB |
     76  1.10.2.2  matt  *   |                           | 1020 kB |
     77  1.10.2.2  matt  *   |=====================================|
     78  1.10.2.2  matt  *   | Huge                      |    1 MB |
     79  1.10.2.2  matt  *   |                           |    2 MB |
     80  1.10.2.2  matt  *   |                           |    3 MB |
     81  1.10.2.2  matt  *   |                           |     ... |
     82  1.10.2.2  matt  *   |=====================================|
     83  1.10.2.2  matt  *
     84  1.10.2.2  matt  * A different mechanism is used for each category:
     85  1.10.2.2  matt  *
     86  1.10.2.2  matt  *   Small : Each size class is segregated into its own set of runs.  Each run
     87  1.10.2.2  matt  *           maintains a bitmap of which regions are free/allocated.
     88  1.10.2.2  matt  *
     89  1.10.2.2  matt  *   Large : Each allocation is backed by a dedicated run.  Metadata are stored
     90  1.10.2.2  matt  *           in the associated arena chunk header maps.
     91  1.10.2.2  matt  *
     92  1.10.2.2  matt  *   Huge : Each allocation is backed by a dedicated contiguous set of chunks.
     93  1.10.2.2  matt  *          Metadata are stored in a separate red-black tree.
     94  1.10.2.2  matt  *
     95  1.10.2.2  matt  *******************************************************************************
     96  1.10.2.2  matt  */
     97  1.10.2.2  matt 
     98  1.10.2.2  matt /* LINTLIBRARY */
     99  1.10.2.2  matt 
    100  1.10.2.2  matt #ifdef __NetBSD__
    101  1.10.2.2  matt #  define xutrace(a, b)		utrace("malloc", (a), (b))
    102  1.10.2.2  matt #  define __DECONST(x, y)	((x)__UNCONST(y))
    103  1.10.2.2  matt #  define NO_TLS
    104  1.10.2.2  matt #else
    105  1.10.2.2  matt #  define xutrace(a, b)		utrace((a), (b))
    106  1.10.2.2  matt #endif	/* __NetBSD__ */
    107  1.10.2.2  matt 
    108  1.10.2.2  matt /*
    109  1.10.2.2  matt  * MALLOC_PRODUCTION disables assertions and statistics gathering.  It also
    110  1.10.2.2  matt  * defaults the A and J runtime options to off.  These settings are appropriate
    111  1.10.2.2  matt  * for production systems.
    112  1.10.2.2  matt  */
    113  1.10.2.2  matt #define MALLOC_PRODUCTION
    114  1.10.2.2  matt 
    115  1.10.2.2  matt #ifndef MALLOC_PRODUCTION
    116  1.10.2.2  matt #  define MALLOC_DEBUG
    117  1.10.2.2  matt #endif
    118  1.10.2.2  matt 
    119  1.10.2.2  matt #include <sys/cdefs.h>
    120  1.10.2.2  matt /* __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.147 2007/06/15 22:00:16 jasone Exp $"); */
    121  1.10.2.3  matt __RCSID("$NetBSD: jemalloc.c,v 1.10.2.3 2008/01/09 01:34:14 matt Exp $");
    122  1.10.2.2  matt 
    123  1.10.2.2  matt #ifdef __FreeBSD__
    124  1.10.2.2  matt #include "libc_private.h"
    125  1.10.2.2  matt #ifdef MALLOC_DEBUG
    126  1.10.2.2  matt #  define _LOCK_DEBUG
    127  1.10.2.2  matt #endif
    128  1.10.2.2  matt #include "spinlock.h"
    129  1.10.2.2  matt #endif
    130  1.10.2.2  matt #include "namespace.h"
    131  1.10.2.2  matt #include <sys/mman.h>
    132  1.10.2.2  matt #include <sys/param.h>
    133  1.10.2.2  matt #ifdef __FreeBSD__
    134  1.10.2.2  matt #include <sys/stddef.h>
    135  1.10.2.2  matt #endif
    136  1.10.2.2  matt #include <sys/time.h>
    137  1.10.2.2  matt #include <sys/types.h>
    138  1.10.2.2  matt #include <sys/sysctl.h>
    139  1.10.2.2  matt #include <sys/tree.h>
    140  1.10.2.2  matt #include <sys/uio.h>
    141  1.10.2.2  matt #include <sys/ktrace.h> /* Must come after several other sys/ includes. */
    142  1.10.2.2  matt 
    143  1.10.2.2  matt #ifdef __FreeBSD__
    144  1.10.2.2  matt #include <machine/atomic.h>
    145  1.10.2.2  matt #include <machine/cpufunc.h>
    146  1.10.2.2  matt #endif
    147  1.10.2.2  matt #include <machine/vmparam.h>
    148  1.10.2.2  matt 
    149  1.10.2.2  matt #include <errno.h>
    150  1.10.2.2  matt #include <limits.h>
    151  1.10.2.2  matt #include <pthread.h>
    152  1.10.2.2  matt #include <sched.h>
    153  1.10.2.2  matt #include <stdarg.h>
    154  1.10.2.2  matt #include <stdbool.h>
    155  1.10.2.2  matt #include <stdio.h>
    156  1.10.2.2  matt #include <stdint.h>
    157  1.10.2.2  matt #include <stdlib.h>
    158  1.10.2.2  matt #include <string.h>
    159  1.10.2.2  matt #include <strings.h>
    160  1.10.2.2  matt #include <unistd.h>
    161  1.10.2.2  matt 
    162  1.10.2.2  matt #ifdef __NetBSD__
    163  1.10.2.2  matt #  include <reentrant.h>
    164  1.10.2.3  matt #  include "extern.h"
    165  1.10.2.3  matt 
    166  1.10.2.3  matt #define STRERROR_R(a, b, c)	__strerror_r(a, b, c);
    167  1.10.2.3  matt /*
    168  1.10.2.3  matt  * A non localized version of strerror, that avoids bringing in
    169  1.10.2.3  matt  * stdio and the locale code. All the malloc messages are in English
    170  1.10.2.3  matt  * so why bother?
    171  1.10.2.3  matt  */
    172  1.10.2.3  matt static int
    173  1.10.2.3  matt __strerror_r(int e, char *s, size_t l)
    174  1.10.2.3  matt {
    175  1.10.2.3  matt 	int rval;
    176  1.10.2.3  matt 	size_t slen;
    177  1.10.2.3  matt 
    178  1.10.2.3  matt 	if (e >= 0 && e < sys_nerr) {
    179  1.10.2.3  matt 		slen = strlcpy(s, sys_errlist[e], l);
    180  1.10.2.3  matt 		rval = 0;
    181  1.10.2.3  matt 	} else {
    182  1.10.2.3  matt 		slen = snprintf_ss(s, l, "Unknown error %u", e);
    183  1.10.2.3  matt 		rval = EINVAL;
    184  1.10.2.3  matt 	}
    185  1.10.2.3  matt 	return slen >= l ? ERANGE : rval;
    186  1.10.2.3  matt }
    187  1.10.2.2  matt #endif
    188  1.10.2.2  matt 
    189  1.10.2.2  matt #ifdef __FreeBSD__
    190  1.10.2.3  matt #define STRERROR_R(a, b, c)	strerror_r(a, b, c);
    191  1.10.2.2  matt #include "un-namespace.h"
    192  1.10.2.2  matt #endif
    193  1.10.2.2  matt 
    194  1.10.2.2  matt /* MALLOC_STATS enables statistics calculation. */
    195  1.10.2.2  matt #ifndef MALLOC_PRODUCTION
    196  1.10.2.2  matt #  define MALLOC_STATS
    197  1.10.2.2  matt #endif
    198  1.10.2.2  matt 
    199  1.10.2.2  matt #ifdef MALLOC_DEBUG
    200  1.10.2.2  matt #  ifdef NDEBUG
    201  1.10.2.2  matt #    undef NDEBUG
    202  1.10.2.2  matt #  endif
    203  1.10.2.2  matt #else
    204  1.10.2.2  matt #  ifndef NDEBUG
    205  1.10.2.2  matt #    define NDEBUG
    206  1.10.2.2  matt #  endif
    207  1.10.2.2  matt #endif
    208  1.10.2.2  matt #include <assert.h>
    209  1.10.2.2  matt 
    210  1.10.2.2  matt #ifdef MALLOC_DEBUG
    211  1.10.2.2  matt    /* Disable inlining to make debugging easier. */
    212  1.10.2.2  matt #  define inline
    213  1.10.2.2  matt #endif
    214  1.10.2.2  matt 
    215  1.10.2.2  matt /* Size of stack-allocated buffer passed to strerror_r(). */
    216  1.10.2.2  matt #define	STRERROR_BUF		64
    217  1.10.2.2  matt 
    218  1.10.2.2  matt /* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */
    219  1.10.2.2  matt #ifdef __i386__
    220  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    221  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    222  1.10.2.2  matt #  define USE_BRK
    223  1.10.2.2  matt #endif
    224  1.10.2.2  matt #ifdef __ia64__
    225  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    226  1.10.2.2  matt #  define SIZEOF_PTR_2POW	3
    227  1.10.2.2  matt #endif
    228  1.10.2.2  matt #ifdef __alpha__
    229  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    230  1.10.2.2  matt #  define SIZEOF_PTR_2POW	3
    231  1.10.2.2  matt #  define NO_TLS
    232  1.10.2.2  matt #endif
    233  1.10.2.2  matt #ifdef __sparc64__
    234  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    235  1.10.2.2  matt #  define SIZEOF_PTR_2POW	3
    236  1.10.2.2  matt #  define NO_TLS
    237  1.10.2.2  matt #endif
    238  1.10.2.2  matt #ifdef __amd64__
    239  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    240  1.10.2.2  matt #  define SIZEOF_PTR_2POW	3
    241  1.10.2.2  matt #endif
    242  1.10.2.2  matt #ifdef __arm__
    243  1.10.2.2  matt #  define QUANTUM_2POW_MIN	3
    244  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    245  1.10.2.2  matt #  define USE_BRK
    246  1.10.2.2  matt #  define NO_TLS
    247  1.10.2.2  matt #endif
    248  1.10.2.2  matt #ifdef __powerpc__
    249  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    250  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    251  1.10.2.2  matt #  define USE_BRK
    252  1.10.2.2  matt #endif
    253  1.10.2.2  matt #if defined(__sparc__) && !defined(__sparc64__)
    254  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    255  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    256  1.10.2.2  matt #  define USE_BRK
    257  1.10.2.2  matt #endif
    258  1.10.2.2  matt #ifdef __vax__
    259  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    260  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    261  1.10.2.2  matt #  define USE_BRK
    262  1.10.2.2  matt #endif
    263  1.10.2.2  matt #ifdef __sh__
    264  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    265  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    266  1.10.2.2  matt #  define USE_BRK
    267  1.10.2.2  matt #endif
    268  1.10.2.2  matt #ifdef __m68k__
    269  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    270  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    271  1.10.2.2  matt #  define USE_BRK
    272  1.10.2.2  matt #endif
    273  1.10.2.2  matt #ifdef __mips__
    274  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    275  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    276  1.10.2.2  matt #  define USE_BRK
    277  1.10.2.2  matt #endif
    278  1.10.2.2  matt #ifdef __hppa__
    279  1.10.2.2  matt #  define QUANTUM_2POW_MIN     4
    280  1.10.2.2  matt #  define SIZEOF_PTR_2POW      2
    281  1.10.2.2  matt #  define USE_BRK
    282  1.10.2.2  matt #endif
    283  1.10.2.2  matt 
    284  1.10.2.2  matt #define	SIZEOF_PTR		(1 << SIZEOF_PTR_2POW)
    285  1.10.2.2  matt 
    286  1.10.2.2  matt /* sizeof(int) == (1 << SIZEOF_INT_2POW). */
    287  1.10.2.2  matt #ifndef SIZEOF_INT_2POW
    288  1.10.2.2  matt #  define SIZEOF_INT_2POW	2
    289  1.10.2.2  matt #endif
    290  1.10.2.2  matt 
    291  1.10.2.2  matt /* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */
    292  1.10.2.2  matt #if (!defined(PIC) && !defined(NO_TLS))
    293  1.10.2.2  matt #  define NO_TLS
    294  1.10.2.2  matt #endif
    295  1.10.2.2  matt 
    296  1.10.2.2  matt /*
    297  1.10.2.2  matt  * Size and alignment of memory chunks that are allocated by the OS's virtual
    298  1.10.2.2  matt  * memory system.
    299  1.10.2.2  matt  */
    300  1.10.2.2  matt #define	CHUNK_2POW_DEFAULT	20
    301  1.10.2.2  matt 
    302  1.10.2.2  matt /*
    303  1.10.2.2  matt  * Maximum size of L1 cache line.  This is used to avoid cache line aliasing,
    304  1.10.2.2  matt  * so over-estimates are okay (up to a point), but under-estimates will
    305  1.10.2.2  matt  * negatively affect performance.
    306  1.10.2.2  matt  */
    307  1.10.2.2  matt #define	CACHELINE_2POW		6
    308  1.10.2.2  matt #define	CACHELINE		((size_t)(1 << CACHELINE_2POW))
    309  1.10.2.2  matt 
    310  1.10.2.2  matt /* Smallest size class to support. */
    311  1.10.2.2  matt #define	TINY_MIN_2POW		1
    312  1.10.2.2  matt 
    313  1.10.2.2  matt /*
    314  1.10.2.2  matt  * Maximum size class that is a multiple of the quantum, but not (necessarily)
    315  1.10.2.2  matt  * a power of 2.  Above this size, allocations are rounded up to the nearest
    316  1.10.2.2  matt  * power of 2.
    317  1.10.2.2  matt  */
    318  1.10.2.2  matt #define	SMALL_MAX_2POW_DEFAULT	9
    319  1.10.2.2  matt #define	SMALL_MAX_DEFAULT	(1 << SMALL_MAX_2POW_DEFAULT)
    320  1.10.2.2  matt 
    321  1.10.2.2  matt /*
    322  1.10.2.2  matt  * Maximum desired run header overhead.  Runs are sized as small as possible
    323  1.10.2.2  matt  * such that this setting is still honored, without violating other constraints.
    324  1.10.2.2  matt  * The goal is to make runs as small as possible without exceeding a per run
    325  1.10.2.2  matt  * external fragmentation threshold.
    326  1.10.2.2  matt  *
    327  1.10.2.2  matt  * Note that it is possible to set this low enough that it cannot be honored
    328  1.10.2.2  matt  * for some/all object sizes, since there is one bit of header overhead per
    329  1.10.2.2  matt  * object (plus a constant).  In such cases, this constraint is relaxed.
    330  1.10.2.2  matt  *
    331  1.10.2.2  matt  * RUN_MAX_OVRHD_RELAX specifies the maximum number of bits per region of
    332  1.10.2.2  matt  * overhead for which RUN_MAX_OVRHD is relaxed.
    333  1.10.2.2  matt  */
    334  1.10.2.2  matt #define RUN_MAX_OVRHD		0.015
    335  1.10.2.2  matt #define RUN_MAX_OVRHD_RELAX	1.5
    336  1.10.2.2  matt 
    337  1.10.2.2  matt /* Put a cap on small object run size.  This overrides RUN_MAX_OVRHD. */
    338  1.10.2.2  matt #define RUN_MAX_SMALL_2POW	15
    339  1.10.2.2  matt #define RUN_MAX_SMALL		(1 << RUN_MAX_SMALL_2POW)
    340  1.10.2.2  matt 
    341  1.10.2.2  matt /******************************************************************************/
    342  1.10.2.2  matt 
    343  1.10.2.2  matt #ifdef __FreeBSD__
    344  1.10.2.2  matt /*
    345  1.10.2.2  matt  * Mutexes based on spinlocks.  We can't use normal pthread mutexes, because
    346  1.10.2.2  matt  * they require malloc()ed memory.
    347  1.10.2.2  matt  */
    348  1.10.2.2  matt typedef struct {
    349  1.10.2.2  matt 	spinlock_t	lock;
    350  1.10.2.2  matt } malloc_mutex_t;
    351  1.10.2.2  matt 
    352  1.10.2.2  matt /* Set to true once the allocator has been initialized. */
    353  1.10.2.2  matt static bool malloc_initialized = false;
    354  1.10.2.2  matt 
    355  1.10.2.2  matt /* Used to avoid initialization races. */
    356  1.10.2.2  matt static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
    357  1.10.2.2  matt #else
    358  1.10.2.2  matt #define	malloc_mutex_t	mutex_t
    359  1.10.2.2  matt 
    360  1.10.2.2  matt /* Set to true once the allocator has been initialized. */
    361  1.10.2.2  matt static bool malloc_initialized = false;
    362  1.10.2.2  matt 
    363  1.10.2.2  matt /* Used to avoid initialization races. */
    364  1.10.2.2  matt static mutex_t init_lock = MUTEX_INITIALIZER;
    365  1.10.2.2  matt #endif
    366  1.10.2.2  matt 
    367  1.10.2.2  matt /******************************************************************************/
    368  1.10.2.2  matt /*
    369  1.10.2.2  matt  * Statistics data structures.
    370  1.10.2.2  matt  */
    371  1.10.2.2  matt 
    372  1.10.2.2  matt #ifdef MALLOC_STATS
    373  1.10.2.2  matt 
    374  1.10.2.2  matt typedef struct malloc_bin_stats_s malloc_bin_stats_t;
    375  1.10.2.2  matt struct malloc_bin_stats_s {
    376  1.10.2.2  matt 	/*
    377  1.10.2.2  matt 	 * Number of allocation requests that corresponded to the size of this
    378  1.10.2.2  matt 	 * bin.
    379  1.10.2.2  matt 	 */
    380  1.10.2.2  matt 	uint64_t	nrequests;
    381  1.10.2.2  matt 
    382  1.10.2.2  matt 	/* Total number of runs created for this bin's size class. */
    383  1.10.2.2  matt 	uint64_t	nruns;
    384  1.10.2.2  matt 
    385  1.10.2.2  matt 	/*
    386  1.10.2.2  matt 	 * Total number of runs reused by extracting them from the runs tree for
    387  1.10.2.2  matt 	 * this bin's size class.
    388  1.10.2.2  matt 	 */
    389  1.10.2.2  matt 	uint64_t	reruns;
    390  1.10.2.2  matt 
    391  1.10.2.2  matt 	/* High-water mark for this bin. */
    392  1.10.2.2  matt 	unsigned long	highruns;
    393  1.10.2.2  matt 
    394  1.10.2.2  matt 	/* Current number of runs in this bin. */
    395  1.10.2.2  matt 	unsigned long	curruns;
    396  1.10.2.2  matt };
    397  1.10.2.2  matt 
    398  1.10.2.2  matt typedef struct arena_stats_s arena_stats_t;
    399  1.10.2.2  matt struct arena_stats_s {
    400  1.10.2.2  matt 	/* Number of bytes currently mapped. */
    401  1.10.2.2  matt 	size_t		mapped;
    402  1.10.2.2  matt 
    403  1.10.2.2  matt 	/* Per-size-category statistics. */
    404  1.10.2.2  matt 	size_t		allocated_small;
    405  1.10.2.2  matt 	uint64_t	nmalloc_small;
    406  1.10.2.2  matt 	uint64_t	ndalloc_small;
    407  1.10.2.2  matt 
    408  1.10.2.2  matt 	size_t		allocated_large;
    409  1.10.2.2  matt 	uint64_t	nmalloc_large;
    410  1.10.2.2  matt 	uint64_t	ndalloc_large;
    411  1.10.2.2  matt };
    412  1.10.2.2  matt 
    413  1.10.2.2  matt typedef struct chunk_stats_s chunk_stats_t;
    414  1.10.2.2  matt struct chunk_stats_s {
    415  1.10.2.2  matt 	/* Number of chunks that were allocated. */
    416  1.10.2.2  matt 	uint64_t	nchunks;
    417  1.10.2.2  matt 
    418  1.10.2.2  matt 	/* High-water mark for number of chunks allocated. */
    419  1.10.2.2  matt 	unsigned long	highchunks;
    420  1.10.2.2  matt 
    421  1.10.2.2  matt 	/*
    422  1.10.2.2  matt 	 * Current number of chunks allocated.  This value isn't maintained for
    423  1.10.2.2  matt 	 * any other purpose, so keep track of it in order to be able to set
    424  1.10.2.2  matt 	 * highchunks.
    425  1.10.2.2  matt 	 */
    426  1.10.2.2  matt 	unsigned long	curchunks;
    427  1.10.2.2  matt };
    428  1.10.2.2  matt 
    429  1.10.2.2  matt #endif /* #ifdef MALLOC_STATS */
    430  1.10.2.2  matt 
    431  1.10.2.2  matt /******************************************************************************/
    432  1.10.2.2  matt /*
    433  1.10.2.2  matt  * Chunk data structures.
    434  1.10.2.2  matt  */
    435  1.10.2.2  matt 
    436  1.10.2.2  matt /* Tree of chunks. */
    437  1.10.2.2  matt typedef struct chunk_node_s chunk_node_t;
    438  1.10.2.2  matt struct chunk_node_s {
    439  1.10.2.2  matt 	/* Linkage for the chunk tree. */
    440  1.10.2.2  matt 	RB_ENTRY(chunk_node_s) link;
    441  1.10.2.2  matt 
    442  1.10.2.2  matt 	/*
    443  1.10.2.2  matt 	 * Pointer to the chunk that this tree node is responsible for.  In some
    444  1.10.2.2  matt 	 * (but certainly not all) cases, this data structure is placed at the
    445  1.10.2.2  matt 	 * beginning of the corresponding chunk, so this field may point to this
    446  1.10.2.2  matt 	 * node.
    447  1.10.2.2  matt 	 */
    448  1.10.2.2  matt 	void	*chunk;
    449  1.10.2.2  matt 
    450  1.10.2.2  matt 	/* Total chunk size. */
    451  1.10.2.2  matt 	size_t	size;
    452  1.10.2.2  matt };
    453  1.10.2.2  matt typedef struct chunk_tree_s chunk_tree_t;
    454  1.10.2.2  matt RB_HEAD(chunk_tree_s, chunk_node_s);
    455  1.10.2.2  matt 
    456  1.10.2.2  matt /******************************************************************************/
    457  1.10.2.2  matt /*
    458  1.10.2.2  matt  * Arena data structures.
    459  1.10.2.2  matt  */
    460  1.10.2.2  matt 
    461  1.10.2.2  matt typedef struct arena_s arena_t;
    462  1.10.2.2  matt typedef struct arena_bin_s arena_bin_t;
    463  1.10.2.2  matt 
    464  1.10.2.2  matt typedef struct arena_chunk_map_s arena_chunk_map_t;
    465  1.10.2.2  matt struct arena_chunk_map_s {
    466  1.10.2.2  matt 	/* Number of pages in run. */
    467  1.10.2.2  matt 	uint32_t	npages;
    468  1.10.2.2  matt 	/*
    469  1.10.2.2  matt 	 * Position within run.  For a free run, this is POS_FREE for the first
    470  1.10.2.2  matt 	 * and last pages.  The POS_FREE special value makes it possible to
    471  1.10.2.2  matt 	 * quickly coalesce free runs.
    472  1.10.2.2  matt 	 *
    473  1.10.2.2  matt 	 * This is the limiting factor for chunksize; there can be at most 2^31
    474  1.10.2.2  matt 	 * pages in a run.
    475  1.10.2.2  matt 	 */
    476  1.10.2.2  matt #define POS_FREE ((uint32_t)0xffffffffU)
    477  1.10.2.2  matt 	uint32_t	pos;
    478  1.10.2.2  matt };
    479  1.10.2.2  matt 
    480  1.10.2.2  matt /* Arena chunk header. */
    481  1.10.2.2  matt typedef struct arena_chunk_s arena_chunk_t;
    482  1.10.2.2  matt struct arena_chunk_s {
    483  1.10.2.2  matt 	/* Arena that owns the chunk. */
    484  1.10.2.2  matt 	arena_t *arena;
    485  1.10.2.2  matt 
    486  1.10.2.2  matt 	/* Linkage for the arena's chunk tree. */
    487  1.10.2.2  matt 	RB_ENTRY(arena_chunk_s) link;
    488  1.10.2.2  matt 
    489  1.10.2.2  matt 	/*
    490  1.10.2.2  matt 	 * Number of pages in use.  This is maintained in order to make
    491  1.10.2.2  matt 	 * detection of empty chunks fast.
    492  1.10.2.2  matt 	 */
    493  1.10.2.2  matt 	uint32_t pages_used;
    494  1.10.2.2  matt 
    495  1.10.2.2  matt 	/*
    496  1.10.2.2  matt 	 * Every time a free run larger than this value is created/coalesced,
    497  1.10.2.2  matt 	 * this value is increased.  The only way that the value decreases is if
    498  1.10.2.2  matt 	 * arena_run_alloc() fails to find a free run as large as advertised by
    499  1.10.2.2  matt 	 * this value.
    500  1.10.2.2  matt 	 */
    501  1.10.2.2  matt 	uint32_t max_frun_npages;
    502  1.10.2.2  matt 
    503  1.10.2.2  matt 	/*
    504  1.10.2.2  matt 	 * Every time a free run that starts at an earlier page than this value
    505  1.10.2.2  matt 	 * is created/coalesced, this value is decreased.  It is reset in a
    506  1.10.2.2  matt 	 * similar fashion to max_frun_npages.
    507  1.10.2.2  matt 	 */
    508  1.10.2.2  matt 	uint32_t min_frun_ind;
    509  1.10.2.2  matt 
    510  1.10.2.2  matt 	/*
    511  1.10.2.2  matt 	 * Map of pages within chunk that keeps track of free/large/small.  For
    512  1.10.2.2  matt 	 * free runs, only the map entries for the first and last pages are
    513  1.10.2.2  matt 	 * kept up to date, so that free runs can be quickly coalesced.
    514  1.10.2.2  matt 	 */
    515  1.10.2.2  matt 	arena_chunk_map_t map[1]; /* Dynamically sized. */
    516  1.10.2.2  matt };
    517  1.10.2.2  matt typedef struct arena_chunk_tree_s arena_chunk_tree_t;
    518  1.10.2.2  matt RB_HEAD(arena_chunk_tree_s, arena_chunk_s);
    519  1.10.2.2  matt 
    520  1.10.2.2  matt typedef struct arena_run_s arena_run_t;
    521  1.10.2.2  matt struct arena_run_s {
    522  1.10.2.2  matt 	/* Linkage for run trees. */
    523  1.10.2.2  matt 	RB_ENTRY(arena_run_s) link;
    524  1.10.2.2  matt 
    525  1.10.2.2  matt #ifdef MALLOC_DEBUG
    526  1.10.2.2  matt 	uint32_t	magic;
    527  1.10.2.2  matt #  define ARENA_RUN_MAGIC 0x384adf93
    528  1.10.2.2  matt #endif
    529  1.10.2.2  matt 
    530  1.10.2.2  matt 	/* Bin this run is associated with. */
    531  1.10.2.2  matt 	arena_bin_t	*bin;
    532  1.10.2.2  matt 
    533  1.10.2.2  matt 	/* Index of first element that might have a free region. */
    534  1.10.2.2  matt 	unsigned	regs_minelm;
    535  1.10.2.2  matt 
    536  1.10.2.2  matt 	/* Number of free regions in run. */
    537  1.10.2.2  matt 	unsigned	nfree;
    538  1.10.2.2  matt 
    539  1.10.2.2  matt 	/* Bitmask of in-use regions (0: in use, 1: free). */
    540  1.10.2.2  matt 	unsigned	regs_mask[1]; /* Dynamically sized. */
    541  1.10.2.2  matt };
    542  1.10.2.2  matt typedef struct arena_run_tree_s arena_run_tree_t;
    543  1.10.2.2  matt RB_HEAD(arena_run_tree_s, arena_run_s);
    544  1.10.2.2  matt 
    545  1.10.2.2  matt struct arena_bin_s {
    546  1.10.2.2  matt 	/*
    547  1.10.2.2  matt 	 * Current run being used to service allocations of this bin's size
    548  1.10.2.2  matt 	 * class.
    549  1.10.2.2  matt 	 */
    550  1.10.2.2  matt 	arena_run_t	*runcur;
    551  1.10.2.2  matt 
    552  1.10.2.2  matt 	/*
    553  1.10.2.2  matt 	 * Tree of non-full runs.  This tree is used when looking for an
    554  1.10.2.2  matt 	 * existing run when runcur is no longer usable.  We choose the
    555  1.10.2.2  matt 	 * non-full run that is lowest in memory; this policy tends to keep
    556  1.10.2.2  matt 	 * objects packed well, and it can also help reduce the number of
    557  1.10.2.2  matt 	 * almost-empty chunks.
    558  1.10.2.2  matt 	 */
    559  1.10.2.2  matt 	arena_run_tree_t runs;
    560  1.10.2.2  matt 
    561  1.10.2.2  matt 	/* Size of regions in a run for this bin's size class. */
    562  1.10.2.2  matt 	size_t		reg_size;
    563  1.10.2.2  matt 
    564  1.10.2.2  matt 	/* Total size of a run for this bin's size class. */
    565  1.10.2.2  matt 	size_t		run_size;
    566  1.10.2.2  matt 
    567  1.10.2.2  matt 	/* Total number of regions in a run for this bin's size class. */
    568  1.10.2.2  matt 	uint32_t	nregs;
    569  1.10.2.2  matt 
    570  1.10.2.2  matt 	/* Number of elements in a run's regs_mask for this bin's size class. */
    571  1.10.2.2  matt 	uint32_t	regs_mask_nelms;
    572  1.10.2.2  matt 
    573  1.10.2.2  matt 	/* Offset of first region in a run for this bin's size class. */
    574  1.10.2.2  matt 	uint32_t	reg0_offset;
    575  1.10.2.2  matt 
    576  1.10.2.2  matt #ifdef MALLOC_STATS
    577  1.10.2.2  matt 	/* Bin statistics. */
    578  1.10.2.2  matt 	malloc_bin_stats_t stats;
    579  1.10.2.2  matt #endif
    580  1.10.2.2  matt };
    581  1.10.2.2  matt 
    582  1.10.2.2  matt struct arena_s {
    583  1.10.2.2  matt #ifdef MALLOC_DEBUG
    584  1.10.2.2  matt 	uint32_t		magic;
    585  1.10.2.2  matt #  define ARENA_MAGIC 0x947d3d24
    586  1.10.2.2  matt #endif
    587  1.10.2.2  matt 
    588  1.10.2.2  matt 	/* All operations on this arena require that mtx be locked. */
    589  1.10.2.2  matt 	malloc_mutex_t		mtx;
    590  1.10.2.2  matt 
    591  1.10.2.2  matt #ifdef MALLOC_STATS
    592  1.10.2.2  matt 	arena_stats_t		stats;
    593  1.10.2.2  matt #endif
    594  1.10.2.2  matt 
    595  1.10.2.2  matt 	/*
    596  1.10.2.2  matt 	 * Tree of chunks this arena manages.
    597  1.10.2.2  matt 	 */
    598  1.10.2.2  matt 	arena_chunk_tree_t	chunks;
    599  1.10.2.2  matt 
    600  1.10.2.2  matt 	/*
    601  1.10.2.2  matt 	 * In order to avoid rapid chunk allocation/deallocation when an arena
    602  1.10.2.2  matt 	 * oscillates right on the cusp of needing a new chunk, cache the most
    603  1.10.2.2  matt 	 * recently freed chunk.  This caching is disabled by opt_hint.
    604  1.10.2.2  matt 	 *
    605  1.10.2.2  matt 	 * There is one spare chunk per arena, rather than one spare total, in
    606  1.10.2.2  matt 	 * order to avoid interactions between multiple threads that could make
    607  1.10.2.2  matt 	 * a single spare inadequate.
    608  1.10.2.2  matt 	 */
    609  1.10.2.2  matt 	arena_chunk_t *spare;
    610  1.10.2.2  matt 
    611  1.10.2.2  matt 	/*
    612  1.10.2.2  matt 	 * bins is used to store rings of free regions of the following sizes,
    613  1.10.2.2  matt 	 * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
    614  1.10.2.2  matt 	 *
    615  1.10.2.2  matt 	 *   bins[i] | size |
    616  1.10.2.2  matt 	 *   --------+------+
    617  1.10.2.2  matt 	 *        0  |    2 |
    618  1.10.2.2  matt 	 *        1  |    4 |
    619  1.10.2.2  matt 	 *        2  |    8 |
    620  1.10.2.2  matt 	 *   --------+------+
    621  1.10.2.2  matt 	 *        3  |   16 |
    622  1.10.2.2  matt 	 *        4  |   32 |
    623  1.10.2.2  matt 	 *        5  |   48 |
    624  1.10.2.2  matt 	 *        6  |   64 |
    625  1.10.2.2  matt 	 *           :      :
    626  1.10.2.2  matt 	 *           :      :
    627  1.10.2.2  matt 	 *       33  |  496 |
    628  1.10.2.2  matt 	 *       34  |  512 |
    629  1.10.2.2  matt 	 *   --------+------+
    630  1.10.2.2  matt 	 *       35  | 1024 |
    631  1.10.2.2  matt 	 *       36  | 2048 |
    632  1.10.2.2  matt 	 *   --------+------+
    633  1.10.2.2  matt 	 */
    634  1.10.2.2  matt 	arena_bin_t		bins[1]; /* Dynamically sized. */
    635  1.10.2.2  matt };
    636  1.10.2.2  matt 
    637  1.10.2.2  matt /******************************************************************************/
    638  1.10.2.2  matt /*
    639  1.10.2.2  matt  * Data.
    640  1.10.2.2  matt  */
    641  1.10.2.2  matt 
    642  1.10.2.2  matt /* Number of CPUs. */
    643  1.10.2.2  matt static unsigned		ncpus;
    644  1.10.2.2  matt 
    645  1.10.2.2  matt /* VM page size. */
    646  1.10.2.2  matt static size_t		pagesize;
    647  1.10.2.2  matt static size_t		pagesize_mask;
    648  1.10.2.2  matt static int		pagesize_2pow;
    649  1.10.2.2  matt 
    650  1.10.2.2  matt /* Various bin-related settings. */
    651  1.10.2.2  matt static size_t		bin_maxclass; /* Max size class for bins. */
    652  1.10.2.2  matt static unsigned		ntbins; /* Number of (2^n)-spaced tiny bins. */
    653  1.10.2.2  matt static unsigned		nqbins; /* Number of quantum-spaced bins. */
    654  1.10.2.2  matt static unsigned		nsbins; /* Number of (2^n)-spaced sub-page bins. */
    655  1.10.2.2  matt static size_t		small_min;
    656  1.10.2.2  matt static size_t		small_max;
    657  1.10.2.2  matt 
    658  1.10.2.2  matt /* Various quantum-related settings. */
    659  1.10.2.2  matt static size_t		quantum;
    660  1.10.2.2  matt static size_t		quantum_mask; /* (quantum - 1). */
    661  1.10.2.2  matt 
    662  1.10.2.2  matt /* Various chunk-related settings. */
    663  1.10.2.2  matt static size_t		chunksize;
    664  1.10.2.2  matt static size_t		chunksize_mask; /* (chunksize - 1). */
    665  1.10.2.2  matt static int		chunksize_2pow;
    666  1.10.2.2  matt static unsigned		chunk_npages;
    667  1.10.2.2  matt static unsigned		arena_chunk_header_npages;
    668  1.10.2.2  matt static size_t		arena_maxclass; /* Max size class for arenas. */
    669  1.10.2.2  matt 
    670  1.10.2.2  matt /********/
    671  1.10.2.2  matt /*
    672  1.10.2.2  matt  * Chunks.
    673  1.10.2.2  matt  */
    674  1.10.2.2  matt 
    675  1.10.2.2  matt /* Protects chunk-related data structures. */
    676  1.10.2.2  matt static malloc_mutex_t	chunks_mtx;
    677  1.10.2.2  matt 
    678  1.10.2.2  matt /* Tree of chunks that are stand-alone huge allocations. */
    679  1.10.2.2  matt static chunk_tree_t	huge;
    680  1.10.2.2  matt 
    681  1.10.2.2  matt #ifdef USE_BRK
    682  1.10.2.2  matt /*
    683  1.10.2.2  matt  * Try to use brk for chunk-size allocations, due to address space constraints.
    684  1.10.2.2  matt  */
    685  1.10.2.2  matt /*
    686  1.10.2.2  matt  * Protects sbrk() calls.  This must be separate from chunks_mtx, since
    687  1.10.2.2  matt  * base_pages_alloc() also uses sbrk(), but cannot lock chunks_mtx (doing so
    688  1.10.2.2  matt  * could cause recursive lock acquisition).
    689  1.10.2.2  matt  */
    690  1.10.2.2  matt static malloc_mutex_t	brk_mtx;
    691  1.10.2.2  matt /* Result of first sbrk(0) call. */
    692  1.10.2.2  matt static void		*brk_base;
    693  1.10.2.2  matt /* Current end of brk, or ((void *)-1) if brk is exhausted. */
    694  1.10.2.2  matt static void		*brk_prev;
    695  1.10.2.2  matt /* Current upper limit on brk addresses. */
    696  1.10.2.2  matt static void		*brk_max;
    697  1.10.2.2  matt #endif
    698  1.10.2.2  matt 
    699  1.10.2.2  matt #ifdef MALLOC_STATS
    700  1.10.2.2  matt /* Huge allocation statistics. */
    701  1.10.2.2  matt static uint64_t		huge_nmalloc;
    702  1.10.2.2  matt static uint64_t		huge_ndalloc;
    703  1.10.2.2  matt static uint64_t		huge_nralloc;
    704  1.10.2.2  matt static size_t		huge_allocated;
    705  1.10.2.2  matt #endif
    706  1.10.2.2  matt 
    707  1.10.2.2  matt /*
    708  1.10.2.2  matt  * Tree of chunks that were previously allocated.  This is used when allocating
    709  1.10.2.2  matt  * chunks, in an attempt to re-use address space.
    710  1.10.2.2  matt  */
    711  1.10.2.2  matt static chunk_tree_t	old_chunks;
    712  1.10.2.2  matt 
    713  1.10.2.2  matt /****************************/
    714  1.10.2.2  matt /*
    715  1.10.2.2  matt  * base (internal allocation).
    716  1.10.2.2  matt  */
    717  1.10.2.2  matt 
    718  1.10.2.2  matt /*
    719  1.10.2.2  matt  * Current pages that are being used for internal memory allocations.  These
    720  1.10.2.2  matt  * pages are carved up in cacheline-size quanta, so that there is no chance of
    721  1.10.2.2  matt  * false cache line sharing.
    722  1.10.2.2  matt  */
    723  1.10.2.2  matt static void		*base_pages;
    724  1.10.2.2  matt static void		*base_next_addr;
    725  1.10.2.2  matt static void		*base_past_addr; /* Addr immediately past base_pages. */
    726  1.10.2.2  matt static chunk_node_t	*base_chunk_nodes; /* LIFO cache of chunk nodes. */
    727  1.10.2.2  matt static malloc_mutex_t	base_mtx;
    728  1.10.2.2  matt #ifdef MALLOC_STATS
    729  1.10.2.2  matt static size_t		base_mapped;
    730  1.10.2.2  matt #endif
    731  1.10.2.2  matt 
    732  1.10.2.2  matt /********/
    733  1.10.2.2  matt /*
    734  1.10.2.2  matt  * Arenas.
    735  1.10.2.2  matt  */
    736  1.10.2.2  matt 
    737  1.10.2.2  matt /*
    738  1.10.2.2  matt  * Arenas that are used to service external requests.  Not all elements of the
    739  1.10.2.2  matt  * arenas array are necessarily used; arenas are created lazily as needed.
    740  1.10.2.2  matt  */
    741  1.10.2.2  matt static arena_t		**arenas;
    742  1.10.2.2  matt static unsigned		narenas;
    743  1.10.2.2  matt static unsigned		next_arena;
    744  1.10.2.2  matt static malloc_mutex_t	arenas_mtx; /* Protects arenas initialization. */
    745  1.10.2.2  matt 
    746  1.10.2.2  matt #ifndef NO_TLS
    747  1.10.2.2  matt /*
    748  1.10.2.2  matt  * Map of pthread_self() --> arenas[???], used for selecting an arena to use
    749  1.10.2.2  matt  * for allocations.
    750  1.10.2.2  matt  */
    751  1.10.2.2  matt static __thread arena_t	*arenas_map;
    752  1.10.2.2  matt #define	get_arenas_map()	(arenas_map)
    753  1.10.2.2  matt #define	set_arenas_map(x)	(arenas_map = x)
    754  1.10.2.2  matt #else
    755  1.10.2.2  matt static thread_key_t arenas_map_key;
    756  1.10.2.2  matt #define	get_arenas_map()	thr_getspecific(arenas_map_key)
    757  1.10.2.2  matt #define	set_arenas_map(x)	thr_setspecific(arenas_map_key, x)
    758  1.10.2.2  matt #endif
    759  1.10.2.2  matt 
    760  1.10.2.2  matt #ifdef MALLOC_STATS
    761  1.10.2.2  matt /* Chunk statistics. */
    762  1.10.2.2  matt static chunk_stats_t	stats_chunks;
    763  1.10.2.2  matt #endif
    764  1.10.2.2  matt 
    765  1.10.2.2  matt /*******************************/
    766  1.10.2.2  matt /*
    767  1.10.2.2  matt  * Runtime configuration options.
    768  1.10.2.2  matt  */
    769  1.10.2.2  matt const char	*_malloc_options;
    770  1.10.2.2  matt 
    771  1.10.2.2  matt #ifndef MALLOC_PRODUCTION
    772  1.10.2.2  matt static bool	opt_abort = true;
    773  1.10.2.2  matt static bool	opt_junk = true;
    774  1.10.2.2  matt #else
    775  1.10.2.2  matt static bool	opt_abort = false;
    776  1.10.2.2  matt static bool	opt_junk = false;
    777  1.10.2.2  matt #endif
    778  1.10.2.2  matt static bool	opt_hint = false;
    779  1.10.2.2  matt static bool	opt_print_stats = false;
    780  1.10.2.2  matt static int	opt_quantum_2pow = QUANTUM_2POW_MIN;
    781  1.10.2.2  matt static int	opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT;
    782  1.10.2.2  matt static int	opt_chunk_2pow = CHUNK_2POW_DEFAULT;
    783  1.10.2.2  matt static bool	opt_utrace = false;
    784  1.10.2.2  matt static bool	opt_sysv = false;
    785  1.10.2.2  matt static bool	opt_xmalloc = false;
    786  1.10.2.2  matt static bool	opt_zero = false;
    787  1.10.2.2  matt static int32_t	opt_narenas_lshift = 0;
    788  1.10.2.2  matt 
    789  1.10.2.2  matt typedef struct {
    790  1.10.2.2  matt 	void	*p;
    791  1.10.2.2  matt 	size_t	s;
    792  1.10.2.2  matt 	void	*r;
    793  1.10.2.2  matt } malloc_utrace_t;
    794  1.10.2.2  matt 
    795  1.10.2.2  matt #define	UTRACE(a, b, c)							\
    796  1.10.2.2  matt 	if (opt_utrace) {						\
    797  1.10.2.2  matt 		malloc_utrace_t ut;					\
    798  1.10.2.2  matt 		ut.p = a;						\
    799  1.10.2.2  matt 		ut.s = b;						\
    800  1.10.2.2  matt 		ut.r = c;						\
    801  1.10.2.2  matt 		xutrace(&ut, sizeof(ut));				\
    802  1.10.2.2  matt 	}
    803  1.10.2.2  matt 
    804  1.10.2.2  matt /******************************************************************************/
    805  1.10.2.2  matt /*
    806  1.10.2.2  matt  * Begin function prototypes for non-inline static functions.
    807  1.10.2.2  matt  */
    808  1.10.2.2  matt 
    809  1.10.2.2  matt static void	wrtmessage(const char *p1, const char *p2, const char *p3,
    810  1.10.2.2  matt 		const char *p4);
    811  1.10.2.2  matt #ifdef MALLOC_STATS
    812  1.10.2.2  matt static void	malloc_printf(const char *format, ...);
    813  1.10.2.2  matt #endif
    814  1.10.2.2  matt static char	*umax2s(uintmax_t x, char *s);
    815  1.10.2.2  matt static bool	base_pages_alloc(size_t minsize);
    816  1.10.2.2  matt static void	*base_alloc(size_t size);
    817  1.10.2.2  matt static chunk_node_t *base_chunk_node_alloc(void);
    818  1.10.2.2  matt static void	base_chunk_node_dealloc(chunk_node_t *node);
    819  1.10.2.2  matt #ifdef MALLOC_STATS
    820  1.10.2.2  matt static void	stats_print(arena_t *arena);
    821  1.10.2.2  matt #endif
    822  1.10.2.2  matt static void	*pages_map(void *addr, size_t size);
    823  1.10.2.2  matt static void	*pages_map_align(void *addr, size_t size, int align);
    824  1.10.2.2  matt static void	pages_unmap(void *addr, size_t size);
    825  1.10.2.2  matt static void	*chunk_alloc(size_t size);
    826  1.10.2.2  matt static void	chunk_dealloc(void *chunk, size_t size);
    827  1.10.2.2  matt static arena_t	*choose_arena_hard(void);
    828  1.10.2.2  matt static void	arena_run_split(arena_t *arena, arena_run_t *run, size_t size);
    829  1.10.2.2  matt static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
    830  1.10.2.2  matt static void	arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
    831  1.10.2.2  matt static arena_run_t *arena_run_alloc(arena_t *arena, size_t size);
    832  1.10.2.2  matt static void	arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size);
    833  1.10.2.2  matt static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
    834  1.10.2.2  matt static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
    835  1.10.2.2  matt static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
    836  1.10.2.2  matt static void	*arena_malloc(arena_t *arena, size_t size);
    837  1.10.2.2  matt static void	*arena_palloc(arena_t *arena, size_t alignment, size_t size,
    838  1.10.2.2  matt     size_t alloc_size);
    839  1.10.2.2  matt static size_t	arena_salloc(const void *ptr);
    840  1.10.2.2  matt static void	*arena_ralloc(void *ptr, size_t size, size_t oldsize);
    841  1.10.2.2  matt static void	arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
    842  1.10.2.2  matt static bool	arena_new(arena_t *arena);
    843  1.10.2.2  matt static arena_t	*arenas_extend(unsigned ind);
    844  1.10.2.2  matt static void	*huge_malloc(size_t size);
    845  1.10.2.2  matt static void	*huge_palloc(size_t alignment, size_t size);
    846  1.10.2.2  matt static void	*huge_ralloc(void *ptr, size_t size, size_t oldsize);
    847  1.10.2.2  matt static void	huge_dalloc(void *ptr);
    848  1.10.2.2  matt static void	*imalloc(size_t size);
    849  1.10.2.2  matt static void	*ipalloc(size_t alignment, size_t size);
    850  1.10.2.2  matt static void	*icalloc(size_t size);
    851  1.10.2.2  matt static size_t	isalloc(const void *ptr);
    852  1.10.2.2  matt static void	*iralloc(void *ptr, size_t size);
    853  1.10.2.2  matt static void	idalloc(void *ptr);
    854  1.10.2.2  matt static void	malloc_print_stats(void);
    855  1.10.2.2  matt static bool	malloc_init_hard(void);
    856  1.10.2.2  matt 
    857  1.10.2.2  matt /*
    858  1.10.2.2  matt  * End function prototypes.
    859  1.10.2.2  matt  */
    860  1.10.2.2  matt /******************************************************************************/
    861  1.10.2.2  matt /*
    862  1.10.2.2  matt  * Begin mutex.
    863  1.10.2.2  matt  */
    864  1.10.2.2  matt 
    865  1.10.2.2  matt #ifdef __NetBSD__
    866  1.10.2.2  matt #define	malloc_mutex_init(m)	mutex_init(m, NULL)
    867  1.10.2.2  matt #define	malloc_mutex_lock(m)	mutex_lock(m)
    868  1.10.2.2  matt #define	malloc_mutex_unlock(m)	mutex_unlock(m)
    869  1.10.2.2  matt #else	/* __NetBSD__ */
    870  1.10.2.2  matt static inline void
    871  1.10.2.2  matt malloc_mutex_init(malloc_mutex_t *a_mutex)
    872  1.10.2.2  matt {
    873  1.10.2.2  matt 	static const spinlock_t lock = _SPINLOCK_INITIALIZER;
    874  1.10.2.2  matt 
    875  1.10.2.2  matt 	a_mutex->lock = lock;
    876  1.10.2.2  matt }
    877  1.10.2.2  matt 
    878  1.10.2.2  matt static inline void
    879  1.10.2.2  matt malloc_mutex_lock(malloc_mutex_t *a_mutex)
    880  1.10.2.2  matt {
    881  1.10.2.2  matt 
    882  1.10.2.2  matt 	if (__isthreaded)
    883  1.10.2.2  matt 		_SPINLOCK(&a_mutex->lock);
    884  1.10.2.2  matt }
    885  1.10.2.2  matt 
    886  1.10.2.2  matt static inline void
    887  1.10.2.2  matt malloc_mutex_unlock(malloc_mutex_t *a_mutex)
    888  1.10.2.2  matt {
    889  1.10.2.2  matt 
    890  1.10.2.2  matt 	if (__isthreaded)
    891  1.10.2.2  matt 		_SPINUNLOCK(&a_mutex->lock);
    892  1.10.2.2  matt }
    893  1.10.2.2  matt #endif	/* __NetBSD__ */
    894  1.10.2.2  matt 
    895  1.10.2.2  matt /*
    896  1.10.2.2  matt  * End mutex.
    897  1.10.2.2  matt  */
    898  1.10.2.2  matt /******************************************************************************/
    899  1.10.2.2  matt /*
    900  1.10.2.2  matt  * Begin Utility functions/macros.
    901  1.10.2.2  matt  */
    902  1.10.2.2  matt 
    903  1.10.2.2  matt /* Return the chunk address for allocation address a. */
    904  1.10.2.2  matt #define	CHUNK_ADDR2BASE(a)						\
    905  1.10.2.2  matt 	((void *)((uintptr_t)(a) & ~chunksize_mask))
    906  1.10.2.2  matt 
    907  1.10.2.2  matt /* Return the chunk offset of address a. */
    908  1.10.2.2  matt #define	CHUNK_ADDR2OFFSET(a)						\
    909  1.10.2.2  matt 	((size_t)((uintptr_t)(a) & chunksize_mask))
    910  1.10.2.2  matt 
    911  1.10.2.2  matt /* Return the smallest chunk multiple that is >= s. */
    912  1.10.2.2  matt #define	CHUNK_CEILING(s)						\
    913  1.10.2.2  matt 	(((s) + chunksize_mask) & ~chunksize_mask)
    914  1.10.2.2  matt 
    915  1.10.2.2  matt /* Return the smallest cacheline multiple that is >= s. */
    916  1.10.2.2  matt #define	CACHELINE_CEILING(s)						\
    917  1.10.2.2  matt 	(((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
    918  1.10.2.2  matt 
    919  1.10.2.2  matt /* Return the smallest quantum multiple that is >= a. */
    920  1.10.2.2  matt #define	QUANTUM_CEILING(a)						\
    921  1.10.2.2  matt 	(((a) + quantum_mask) & ~quantum_mask)
    922  1.10.2.2  matt 
    923  1.10.2.2  matt /* Return the smallest pagesize multiple that is >= s. */
    924  1.10.2.2  matt #define	PAGE_CEILING(s)							\
    925  1.10.2.2  matt 	(((s) + pagesize_mask) & ~pagesize_mask)
    926  1.10.2.2  matt 
    927  1.10.2.2  matt /* Compute the smallest power of 2 that is >= x. */
    928  1.10.2.2  matt static inline size_t
    929  1.10.2.2  matt pow2_ceil(size_t x)
    930  1.10.2.2  matt {
    931  1.10.2.2  matt 
    932  1.10.2.2  matt 	x--;
    933  1.10.2.2  matt 	x |= x >> 1;
    934  1.10.2.2  matt 	x |= x >> 2;
    935  1.10.2.2  matt 	x |= x >> 4;
    936  1.10.2.2  matt 	x |= x >> 8;
    937  1.10.2.2  matt 	x |= x >> 16;
    938  1.10.2.2  matt #if (SIZEOF_PTR == 8)
    939  1.10.2.2  matt 	x |= x >> 32;
    940  1.10.2.2  matt #endif
    941  1.10.2.2  matt 	x++;
    942  1.10.2.2  matt 	return (x);
    943  1.10.2.2  matt }
    944  1.10.2.2  matt 
    945  1.10.2.2  matt static void
    946  1.10.2.2  matt wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
    947  1.10.2.2  matt {
    948  1.10.2.2  matt 
    949  1.10.2.3  matt 	write(STDERR_FILENO, p1, strlen(p1));
    950  1.10.2.3  matt 	write(STDERR_FILENO, p2, strlen(p2));
    951  1.10.2.3  matt 	write(STDERR_FILENO, p3, strlen(p3));
    952  1.10.2.3  matt 	write(STDERR_FILENO, p4, strlen(p4));
    953  1.10.2.2  matt }
    954  1.10.2.2  matt 
    955  1.10.2.2  matt void	(*_malloc_message)(const char *p1, const char *p2, const char *p3,
    956  1.10.2.2  matt 	    const char *p4) = wrtmessage;
    957  1.10.2.2  matt 
    958  1.10.2.2  matt #ifdef MALLOC_STATS
    959  1.10.2.2  matt /*
    960  1.10.2.2  matt  * Print to stderr in such a way as to (hopefully) avoid memory allocation.
    961  1.10.2.2  matt  */
    962  1.10.2.2  matt static void
    963  1.10.2.2  matt malloc_printf(const char *format, ...)
    964  1.10.2.2  matt {
    965  1.10.2.2  matt 	char buf[4096];
    966  1.10.2.2  matt 	va_list ap;
    967  1.10.2.2  matt 
    968  1.10.2.2  matt 	va_start(ap, format);
    969  1.10.2.2  matt 	vsnprintf(buf, sizeof(buf), format, ap);
    970  1.10.2.2  matt 	va_end(ap);
    971  1.10.2.2  matt 	_malloc_message(buf, "", "", "");
    972  1.10.2.2  matt }
    973  1.10.2.2  matt #endif
    974  1.10.2.2  matt 
    975  1.10.2.2  matt /*
    976  1.10.2.2  matt  * We don't want to depend on vsnprintf() for production builds, since that can
    977  1.10.2.2  matt  * cause unnecessary bloat for static binaries.  umax2s() provides minimal
    978  1.10.2.2  matt  * integer printing functionality, so that malloc_printf() use can be limited to
    979  1.10.2.2  matt  * MALLOC_STATS code.
    980  1.10.2.2  matt  */
    981  1.10.2.2  matt #define UMAX2S_BUFSIZE	21
    982  1.10.2.2  matt static char *
    983  1.10.2.2  matt umax2s(uintmax_t x, char *s)
    984  1.10.2.2  matt {
    985  1.10.2.2  matt 	unsigned i;
    986  1.10.2.2  matt 
    987  1.10.2.2  matt 	/* Make sure UMAX2S_BUFSIZE is large enough. */
    988  1.10.2.2  matt 	/* LINTED */
    989  1.10.2.2  matt 	assert(sizeof(uintmax_t) <= 8);
    990  1.10.2.2  matt 
    991  1.10.2.2  matt 	i = UMAX2S_BUFSIZE - 1;
    992  1.10.2.2  matt 	s[i] = '\0';
    993  1.10.2.2  matt 	do {
    994  1.10.2.2  matt 		i--;
    995  1.10.2.2  matt 		s[i] = "0123456789"[(int)x % 10];
    996  1.10.2.2  matt 		x /= (uintmax_t)10LL;
    997  1.10.2.2  matt 	} while (x > 0);
    998  1.10.2.2  matt 
    999  1.10.2.2  matt 	return (&s[i]);
   1000  1.10.2.2  matt }
   1001  1.10.2.2  matt 
   1002  1.10.2.2  matt /******************************************************************************/
   1003  1.10.2.2  matt 
   1004  1.10.2.2  matt static bool
   1005  1.10.2.2  matt base_pages_alloc(size_t minsize)
   1006  1.10.2.2  matt {
   1007  1.10.2.2  matt 	size_t csize = 0;
   1008  1.10.2.2  matt 
   1009  1.10.2.2  matt #ifdef USE_BRK
   1010  1.10.2.2  matt 	/*
   1011  1.10.2.2  matt 	 * Do special brk allocation here, since base allocations don't need to
   1012  1.10.2.2  matt 	 * be chunk-aligned.
   1013  1.10.2.2  matt 	 */
   1014  1.10.2.2  matt 	if (brk_prev != (void *)-1) {
   1015  1.10.2.2  matt 		void *brk_cur;
   1016  1.10.2.2  matt 		intptr_t incr;
   1017  1.10.2.2  matt 
   1018  1.10.2.2  matt 		if (minsize != 0)
   1019  1.10.2.2  matt 			csize = CHUNK_CEILING(minsize);
   1020  1.10.2.2  matt 
   1021  1.10.2.2  matt 		malloc_mutex_lock(&brk_mtx);
   1022  1.10.2.2  matt 		do {
   1023  1.10.2.2  matt 			/* Get the current end of brk. */
   1024  1.10.2.2  matt 			brk_cur = sbrk(0);
   1025  1.10.2.2  matt 
   1026  1.10.2.2  matt 			/*
   1027  1.10.2.2  matt 			 * Calculate how much padding is necessary to
   1028  1.10.2.2  matt 			 * chunk-align the end of brk.  Don't worry about
   1029  1.10.2.2  matt 			 * brk_cur not being chunk-aligned though.
   1030  1.10.2.2  matt 			 */
   1031  1.10.2.2  matt 			incr = (intptr_t)chunksize
   1032  1.10.2.2  matt 			    - (intptr_t)CHUNK_ADDR2OFFSET(brk_cur);
   1033  1.10.2.2  matt 			if (incr < minsize)
   1034  1.10.2.2  matt 				incr += csize;
   1035  1.10.2.2  matt 
   1036  1.10.2.2  matt 			brk_prev = sbrk(incr);
   1037  1.10.2.2  matt 			if (brk_prev == brk_cur) {
   1038  1.10.2.2  matt 				/* Success. */
   1039  1.10.2.2  matt 				malloc_mutex_unlock(&brk_mtx);
   1040  1.10.2.2  matt 				base_pages = brk_cur;
   1041  1.10.2.2  matt 				base_next_addr = base_pages;
   1042  1.10.2.2  matt 				base_past_addr = (void *)((uintptr_t)base_pages
   1043  1.10.2.2  matt 				    + incr);
   1044  1.10.2.2  matt #ifdef MALLOC_STATS
   1045  1.10.2.2  matt 				base_mapped += incr;
   1046  1.10.2.2  matt #endif
   1047  1.10.2.2  matt 				return (false);
   1048  1.10.2.2  matt 			}
   1049  1.10.2.2  matt 		} while (brk_prev != (void *)-1);
   1050  1.10.2.2  matt 		malloc_mutex_unlock(&brk_mtx);
   1051  1.10.2.2  matt 	}
   1052  1.10.2.2  matt 	if (minsize == 0) {
   1053  1.10.2.2  matt 		/*
   1054  1.10.2.2  matt 		 * Failure during initialization doesn't matter, so avoid
   1055  1.10.2.2  matt 		 * falling through to the mmap-based page mapping code.
   1056  1.10.2.2  matt 		 */
   1057  1.10.2.2  matt 		return (true);
   1058  1.10.2.2  matt 	}
   1059  1.10.2.2  matt #endif
   1060  1.10.2.2  matt 	assert(minsize != 0);
   1061  1.10.2.2  matt 	csize = PAGE_CEILING(minsize);
   1062  1.10.2.2  matt 	base_pages = pages_map(NULL, csize);
   1063  1.10.2.2  matt 	if (base_pages == NULL)
   1064  1.10.2.2  matt 		return (true);
   1065  1.10.2.2  matt 	base_next_addr = base_pages;
   1066  1.10.2.2  matt 	base_past_addr = (void *)((uintptr_t)base_pages + csize);
   1067  1.10.2.2  matt #ifdef MALLOC_STATS
   1068  1.10.2.2  matt 	base_mapped += csize;
   1069  1.10.2.2  matt #endif
   1070  1.10.2.2  matt 	return (false);
   1071  1.10.2.2  matt }
   1072  1.10.2.2  matt 
   1073  1.10.2.2  matt static void *
   1074  1.10.2.2  matt base_alloc(size_t size)
   1075  1.10.2.2  matt {
   1076  1.10.2.2  matt 	void *ret;
   1077  1.10.2.2  matt 	size_t csize;
   1078  1.10.2.2  matt 
   1079  1.10.2.2  matt 	/* Round size up to nearest multiple of the cacheline size. */
   1080  1.10.2.2  matt 	csize = CACHELINE_CEILING(size);
   1081  1.10.2.2  matt 
   1082  1.10.2.2  matt 	malloc_mutex_lock(&base_mtx);
   1083  1.10.2.2  matt 
   1084  1.10.2.2  matt 	/* Make sure there's enough space for the allocation. */
   1085  1.10.2.2  matt 	if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
   1086  1.10.2.2  matt 		if (base_pages_alloc(csize)) {
   1087  1.10.2.2  matt 			ret = NULL;
   1088  1.10.2.2  matt 			goto RETURN;
   1089  1.10.2.2  matt 		}
   1090  1.10.2.2  matt 	}
   1091  1.10.2.2  matt 
   1092  1.10.2.2  matt 	/* Allocate. */
   1093  1.10.2.2  matt 	ret = base_next_addr;
   1094  1.10.2.2  matt 	base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
   1095  1.10.2.2  matt 
   1096  1.10.2.2  matt RETURN:
   1097  1.10.2.2  matt 	malloc_mutex_unlock(&base_mtx);
   1098  1.10.2.2  matt 	return (ret);
   1099  1.10.2.2  matt }
   1100  1.10.2.2  matt 
   1101  1.10.2.2  matt static chunk_node_t *
   1102  1.10.2.2  matt base_chunk_node_alloc(void)
   1103  1.10.2.2  matt {
   1104  1.10.2.2  matt 	chunk_node_t *ret;
   1105  1.10.2.2  matt 
   1106  1.10.2.2  matt 	malloc_mutex_lock(&base_mtx);
   1107  1.10.2.2  matt 	if (base_chunk_nodes != NULL) {
   1108  1.10.2.2  matt 		ret = base_chunk_nodes;
   1109  1.10.2.2  matt 		/* LINTED */
   1110  1.10.2.2  matt 		base_chunk_nodes = *(chunk_node_t **)ret;
   1111  1.10.2.2  matt 		malloc_mutex_unlock(&base_mtx);
   1112  1.10.2.2  matt 	} else {
   1113  1.10.2.2  matt 		malloc_mutex_unlock(&base_mtx);
   1114  1.10.2.2  matt 		ret = (chunk_node_t *)base_alloc(sizeof(chunk_node_t));
   1115  1.10.2.2  matt 	}
   1116  1.10.2.2  matt 
   1117  1.10.2.2  matt 	return (ret);
   1118  1.10.2.2  matt }
   1119  1.10.2.2  matt 
   1120  1.10.2.2  matt static void
   1121  1.10.2.2  matt base_chunk_node_dealloc(chunk_node_t *node)
   1122  1.10.2.2  matt {
   1123  1.10.2.2  matt 
   1124  1.10.2.2  matt 	malloc_mutex_lock(&base_mtx);
   1125  1.10.2.2  matt 	/* LINTED */
   1126  1.10.2.2  matt 	*(chunk_node_t **)node = base_chunk_nodes;
   1127  1.10.2.2  matt 	base_chunk_nodes = node;
   1128  1.10.2.2  matt 	malloc_mutex_unlock(&base_mtx);
   1129  1.10.2.2  matt }
   1130  1.10.2.2  matt 
   1131  1.10.2.2  matt /******************************************************************************/
   1132  1.10.2.2  matt 
   1133  1.10.2.2  matt #ifdef MALLOC_STATS
   1134  1.10.2.2  matt static void
   1135  1.10.2.2  matt stats_print(arena_t *arena)
   1136  1.10.2.2  matt {
   1137  1.10.2.2  matt 	unsigned i;
   1138  1.10.2.2  matt 	int gap_start;
   1139  1.10.2.2  matt 
   1140  1.10.2.2  matt 	malloc_printf(
   1141  1.10.2.2  matt 	    "          allocated/mapped            nmalloc      ndalloc\n");
   1142  1.10.2.2  matt 
   1143  1.10.2.2  matt 	malloc_printf("small: %12zu %-12s %12llu %12llu\n",
   1144  1.10.2.2  matt 	    arena->stats.allocated_small, "", arena->stats.nmalloc_small,
   1145  1.10.2.2  matt 	    arena->stats.ndalloc_small);
   1146  1.10.2.2  matt 	malloc_printf("large: %12zu %-12s %12llu %12llu\n",
   1147  1.10.2.2  matt 	    arena->stats.allocated_large, "", arena->stats.nmalloc_large,
   1148  1.10.2.2  matt 	    arena->stats.ndalloc_large);
   1149  1.10.2.2  matt 	malloc_printf("total: %12zu/%-12zu %12llu %12llu\n",
   1150  1.10.2.2  matt 	    arena->stats.allocated_small + arena->stats.allocated_large,
   1151  1.10.2.2  matt 	    arena->stats.mapped,
   1152  1.10.2.2  matt 	    arena->stats.nmalloc_small + arena->stats.nmalloc_large,
   1153  1.10.2.2  matt 	    arena->stats.ndalloc_small + arena->stats.ndalloc_large);
   1154  1.10.2.2  matt 
   1155  1.10.2.2  matt 	malloc_printf("bins:     bin   size regs pgs  requests   newruns"
   1156  1.10.2.2  matt 	    "    reruns maxruns curruns\n");
   1157  1.10.2.2  matt 	for (i = 0, gap_start = -1; i < ntbins + nqbins + nsbins; i++) {
   1158  1.10.2.2  matt 		if (arena->bins[i].stats.nrequests == 0) {
   1159  1.10.2.2  matt 			if (gap_start == -1)
   1160  1.10.2.2  matt 				gap_start = i;
   1161  1.10.2.2  matt 		} else {
   1162  1.10.2.2  matt 			if (gap_start != -1) {
   1163  1.10.2.2  matt 				if (i > gap_start + 1) {
   1164  1.10.2.2  matt 					/* Gap of more than one size class. */
   1165  1.10.2.2  matt 					malloc_printf("[%u..%u]\n",
   1166  1.10.2.2  matt 					    gap_start, i - 1);
   1167  1.10.2.2  matt 				} else {
   1168  1.10.2.2  matt 					/* Gap of one size class. */
   1169  1.10.2.2  matt 					malloc_printf("[%u]\n", gap_start);
   1170  1.10.2.2  matt 				}
   1171  1.10.2.2  matt 				gap_start = -1;
   1172  1.10.2.2  matt 			}
   1173  1.10.2.2  matt 			malloc_printf(
   1174  1.10.2.2  matt 			    "%13u %1s %4u %4u %3u %9llu %9llu"
   1175  1.10.2.2  matt 			    " %9llu %7lu %7lu\n",
   1176  1.10.2.2  matt 			    i,
   1177  1.10.2.2  matt 			    i < ntbins ? "T" : i < ntbins + nqbins ? "Q" : "S",
   1178  1.10.2.2  matt 			    arena->bins[i].reg_size,
   1179  1.10.2.2  matt 			    arena->bins[i].nregs,
   1180  1.10.2.2  matt 			    arena->bins[i].run_size >> pagesize_2pow,
   1181  1.10.2.2  matt 			    arena->bins[i].stats.nrequests,
   1182  1.10.2.2  matt 			    arena->bins[i].stats.nruns,
   1183  1.10.2.2  matt 			    arena->bins[i].stats.reruns,
   1184  1.10.2.2  matt 			    arena->bins[i].stats.highruns,
   1185  1.10.2.2  matt 			    arena->bins[i].stats.curruns);
   1186  1.10.2.2  matt 		}
   1187  1.10.2.2  matt 	}
   1188  1.10.2.2  matt 	if (gap_start != -1) {
   1189  1.10.2.2  matt 		if (i > gap_start + 1) {
   1190  1.10.2.2  matt 			/* Gap of more than one size class. */
   1191  1.10.2.2  matt 			malloc_printf("[%u..%u]\n", gap_start, i - 1);
   1192  1.10.2.2  matt 		} else {
   1193  1.10.2.2  matt 			/* Gap of one size class. */
   1194  1.10.2.2  matt 			malloc_printf("[%u]\n", gap_start);
   1195  1.10.2.2  matt 		}
   1196  1.10.2.2  matt 	}
   1197  1.10.2.2  matt }
   1198  1.10.2.2  matt #endif
   1199  1.10.2.2  matt 
   1200  1.10.2.2  matt /*
   1201  1.10.2.2  matt  * End Utility functions/macros.
   1202  1.10.2.2  matt  */
   1203  1.10.2.2  matt /******************************************************************************/
   1204  1.10.2.2  matt /*
   1205  1.10.2.2  matt  * Begin chunk management functions.
   1206  1.10.2.2  matt  */
   1207  1.10.2.2  matt 
   1208  1.10.2.2  matt #ifndef lint
   1209  1.10.2.2  matt static inline int
   1210  1.10.2.2  matt chunk_comp(chunk_node_t *a, chunk_node_t *b)
   1211  1.10.2.2  matt {
   1212  1.10.2.2  matt 
   1213  1.10.2.2  matt 	assert(a != NULL);
   1214  1.10.2.2  matt 	assert(b != NULL);
   1215  1.10.2.2  matt 
   1216  1.10.2.2  matt 	if ((uintptr_t)a->chunk < (uintptr_t)b->chunk)
   1217  1.10.2.2  matt 		return (-1);
   1218  1.10.2.2  matt 	else if (a->chunk == b->chunk)
   1219  1.10.2.2  matt 		return (0);
   1220  1.10.2.2  matt 	else
   1221  1.10.2.2  matt 		return (1);
   1222  1.10.2.2  matt }
   1223  1.10.2.2  matt 
   1224  1.10.2.2  matt /* Generate red-black tree code for chunks. */
   1225  1.10.2.2  matt RB_GENERATE_STATIC(chunk_tree_s, chunk_node_s, link, chunk_comp);
   1226  1.10.2.2  matt #endif
   1227  1.10.2.2  matt 
   1228  1.10.2.2  matt static void *
   1229  1.10.2.2  matt pages_map_align(void *addr, size_t size, int align)
   1230  1.10.2.2  matt {
   1231  1.10.2.2  matt 	void *ret;
   1232  1.10.2.2  matt 
   1233  1.10.2.2  matt 	/*
   1234  1.10.2.2  matt 	 * We don't use MAP_FIXED here, because it can cause the *replacement*
   1235  1.10.2.2  matt 	 * of existing mappings, and we only want to create new mappings.
   1236  1.10.2.2  matt 	 */
   1237  1.10.2.2  matt 	ret = mmap(addr, size, PROT_READ | PROT_WRITE,
   1238  1.10.2.2  matt 	    MAP_PRIVATE | MAP_ANON | MAP_ALIGNED(align), -1, 0);
   1239  1.10.2.2  matt 	assert(ret != NULL);
   1240  1.10.2.2  matt 
   1241  1.10.2.2  matt 	if (ret == MAP_FAILED)
   1242  1.10.2.2  matt 		ret = NULL;
   1243  1.10.2.2  matt 	else if (addr != NULL && ret != addr) {
   1244  1.10.2.2  matt 		/*
   1245  1.10.2.2  matt 		 * We succeeded in mapping memory, but not in the right place.
   1246  1.10.2.2  matt 		 */
   1247  1.10.2.2  matt 		if (munmap(ret, size) == -1) {
   1248  1.10.2.2  matt 			char buf[STRERROR_BUF];
   1249  1.10.2.2  matt 
   1250  1.10.2.3  matt 			STRERROR_R(errno, buf, sizeof(buf));
   1251  1.10.2.3  matt 			_malloc_message(getprogname(),
   1252  1.10.2.2  matt 			    ": (malloc) Error in munmap(): ", buf, "\n");
   1253  1.10.2.2  matt 			if (opt_abort)
   1254  1.10.2.2  matt 				abort();
   1255  1.10.2.2  matt 		}
   1256  1.10.2.2  matt 		ret = NULL;
   1257  1.10.2.2  matt 	}
   1258  1.10.2.2  matt 
   1259  1.10.2.2  matt 	assert(ret == NULL || (addr == NULL && ret != addr)
   1260  1.10.2.2  matt 	    || (addr != NULL && ret == addr));
   1261  1.10.2.2  matt 	return (ret);
   1262  1.10.2.2  matt }
   1263  1.10.2.2  matt 
   1264  1.10.2.2  matt static void *
   1265  1.10.2.2  matt pages_map(void *addr, size_t size)
   1266  1.10.2.2  matt {
   1267  1.10.2.2  matt 
   1268  1.10.2.2  matt 	return pages_map_align(addr, size, 0);
   1269  1.10.2.2  matt }
   1270  1.10.2.2  matt 
   1271  1.10.2.2  matt static void
   1272  1.10.2.2  matt pages_unmap(void *addr, size_t size)
   1273  1.10.2.2  matt {
   1274  1.10.2.2  matt 
   1275  1.10.2.2  matt 	if (munmap(addr, size) == -1) {
   1276  1.10.2.2  matt 		char buf[STRERROR_BUF];
   1277  1.10.2.2  matt 
   1278  1.10.2.3  matt 		STRERROR_R(errno, buf, sizeof(buf));
   1279  1.10.2.3  matt 		_malloc_message(getprogname(),
   1280  1.10.2.2  matt 		    ": (malloc) Error in munmap(): ", buf, "\n");
   1281  1.10.2.2  matt 		if (opt_abort)
   1282  1.10.2.2  matt 			abort();
   1283  1.10.2.2  matt 	}
   1284  1.10.2.2  matt }
   1285  1.10.2.2  matt 
   1286  1.10.2.2  matt static void *
   1287  1.10.2.2  matt chunk_alloc(size_t size)
   1288  1.10.2.2  matt {
   1289  1.10.2.2  matt 	void *ret, *chunk;
   1290  1.10.2.2  matt 	chunk_node_t *tchunk, *delchunk;
   1291  1.10.2.2  matt 
   1292  1.10.2.2  matt 	assert(size != 0);
   1293  1.10.2.2  matt 	assert((size & chunksize_mask) == 0);
   1294  1.10.2.2  matt 
   1295  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   1296  1.10.2.2  matt 
   1297  1.10.2.2  matt 	if (size == chunksize) {
   1298  1.10.2.2  matt 		/*
   1299  1.10.2.2  matt 		 * Check for address ranges that were previously chunks and try
   1300  1.10.2.2  matt 		 * to use them.
   1301  1.10.2.2  matt 		 */
   1302  1.10.2.2  matt 
   1303  1.10.2.2  matt 		/* LINTED */
   1304  1.10.2.2  matt 		tchunk = RB_MIN(chunk_tree_s, &old_chunks);
   1305  1.10.2.2  matt 		while (tchunk != NULL) {
   1306  1.10.2.2  matt 			/* Found an address range.  Try to recycle it. */
   1307  1.10.2.2  matt 
   1308  1.10.2.2  matt 			chunk = tchunk->chunk;
   1309  1.10.2.2  matt 			delchunk = tchunk;
   1310  1.10.2.2  matt 			/* LINTED */
   1311  1.10.2.2  matt 			tchunk = RB_NEXT(chunk_tree_s, &old_chunks, delchunk);
   1312  1.10.2.2  matt 
   1313  1.10.2.2  matt 			/* Remove delchunk from the tree. */
   1314  1.10.2.2  matt 			/* LINTED */
   1315  1.10.2.2  matt 			RB_REMOVE(chunk_tree_s, &old_chunks, delchunk);
   1316  1.10.2.2  matt 			base_chunk_node_dealloc(delchunk);
   1317  1.10.2.2  matt 
   1318  1.10.2.2  matt #ifdef USE_BRK
   1319  1.10.2.2  matt 			if ((uintptr_t)chunk >= (uintptr_t)brk_base
   1320  1.10.2.2  matt 			    && (uintptr_t)chunk < (uintptr_t)brk_max) {
   1321  1.10.2.2  matt 				/* Re-use a previously freed brk chunk. */
   1322  1.10.2.2  matt 				ret = chunk;
   1323  1.10.2.2  matt 				goto RETURN;
   1324  1.10.2.2  matt 			}
   1325  1.10.2.2  matt #endif
   1326  1.10.2.2  matt 			if ((ret = pages_map(chunk, size)) != NULL) {
   1327  1.10.2.2  matt 				/* Success. */
   1328  1.10.2.2  matt 				goto RETURN;
   1329  1.10.2.2  matt 			}
   1330  1.10.2.2  matt 		}
   1331  1.10.2.2  matt 	}
   1332  1.10.2.2  matt 
   1333  1.10.2.2  matt 	/*
   1334  1.10.2.2  matt 	 * Try to over-allocate, but allow the OS to place the allocation
   1335  1.10.2.2  matt 	 * anywhere.  Beware of size_t wrap-around.
   1336  1.10.2.2  matt 	 */
   1337  1.10.2.2  matt 	if (size + chunksize > size) {
   1338  1.10.2.2  matt 		if ((ret = pages_map_align(NULL, size, chunksize_2pow))
   1339  1.10.2.2  matt 		    != NULL) {
   1340  1.10.2.2  matt 			goto RETURN;
   1341  1.10.2.2  matt 		}
   1342  1.10.2.2  matt 	}
   1343  1.10.2.2  matt 
   1344  1.10.2.2  matt #ifdef USE_BRK
   1345  1.10.2.2  matt 	/*
   1346  1.10.2.2  matt 	 * Try to create allocations in brk, in order to make full use of
   1347  1.10.2.2  matt 	 * limited address space.
   1348  1.10.2.2  matt 	 */
   1349  1.10.2.2  matt 	if (brk_prev != (void *)-1) {
   1350  1.10.2.2  matt 		void *brk_cur;
   1351  1.10.2.2  matt 		intptr_t incr;
   1352  1.10.2.2  matt 
   1353  1.10.2.2  matt 		/*
   1354  1.10.2.2  matt 		 * The loop is necessary to recover from races with other
   1355  1.10.2.2  matt 		 * threads that are using brk for something other than malloc.
   1356  1.10.2.2  matt 		 */
   1357  1.10.2.2  matt 		malloc_mutex_lock(&brk_mtx);
   1358  1.10.2.2  matt 		do {
   1359  1.10.2.2  matt 			/* Get the current end of brk. */
   1360  1.10.2.2  matt 			brk_cur = sbrk(0);
   1361  1.10.2.2  matt 
   1362  1.10.2.2  matt 			/*
   1363  1.10.2.2  matt 			 * Calculate how much padding is necessary to
   1364  1.10.2.2  matt 			 * chunk-align the end of brk.
   1365  1.10.2.2  matt 			 */
   1366  1.10.2.2  matt 			incr = (intptr_t)size
   1367  1.10.2.2  matt 			    - (intptr_t)CHUNK_ADDR2OFFSET(brk_cur);
   1368  1.10.2.2  matt 			if (incr == size) {
   1369  1.10.2.2  matt 				ret = brk_cur;
   1370  1.10.2.2  matt 			} else {
   1371  1.10.2.2  matt 				ret = (void *)((intptr_t)brk_cur + incr);
   1372  1.10.2.2  matt 				incr += size;
   1373  1.10.2.2  matt 			}
   1374  1.10.2.2  matt 
   1375  1.10.2.2  matt 			brk_prev = sbrk(incr);
   1376  1.10.2.2  matt 			if (brk_prev == brk_cur) {
   1377  1.10.2.2  matt 				/* Success. */
   1378  1.10.2.2  matt 				malloc_mutex_unlock(&brk_mtx);
   1379  1.10.2.2  matt 				brk_max = (void *)((intptr_t)ret + size);
   1380  1.10.2.2  matt 				goto RETURN;
   1381  1.10.2.2  matt 			}
   1382  1.10.2.2  matt 		} while (brk_prev != (void *)-1);
   1383  1.10.2.2  matt 		malloc_mutex_unlock(&brk_mtx);
   1384  1.10.2.2  matt 	}
   1385  1.10.2.2  matt #endif
   1386  1.10.2.2  matt 
   1387  1.10.2.2  matt 	/* All strategies for allocation failed. */
   1388  1.10.2.2  matt 	ret = NULL;
   1389  1.10.2.2  matt RETURN:
   1390  1.10.2.2  matt 	if (ret != NULL) {
   1391  1.10.2.2  matt 		chunk_node_t key;
   1392  1.10.2.2  matt 		/*
   1393  1.10.2.2  matt 		 * Clean out any entries in old_chunks that overlap with the
   1394  1.10.2.2  matt 		 * memory we just allocated.
   1395  1.10.2.2  matt 		 */
   1396  1.10.2.2  matt 		key.chunk = ret;
   1397  1.10.2.2  matt 		/* LINTED */
   1398  1.10.2.2  matt 		tchunk = RB_NFIND(chunk_tree_s, &old_chunks, &key);
   1399  1.10.2.2  matt 		while (tchunk != NULL
   1400  1.10.2.2  matt 		    && (uintptr_t)tchunk->chunk >= (uintptr_t)ret
   1401  1.10.2.2  matt 		    && (uintptr_t)tchunk->chunk < (uintptr_t)ret + size) {
   1402  1.10.2.2  matt 			delchunk = tchunk;
   1403  1.10.2.2  matt 			/* LINTED */
   1404  1.10.2.2  matt 			tchunk = RB_NEXT(chunk_tree_s, &old_chunks, delchunk);
   1405  1.10.2.2  matt 			/* LINTED */
   1406  1.10.2.2  matt 			RB_REMOVE(chunk_tree_s, &old_chunks, delchunk);
   1407  1.10.2.2  matt 			base_chunk_node_dealloc(delchunk);
   1408  1.10.2.2  matt 		}
   1409  1.10.2.2  matt 
   1410  1.10.2.2  matt 	}
   1411  1.10.2.2  matt #ifdef MALLOC_STATS
   1412  1.10.2.2  matt 	if (ret != NULL) {
   1413  1.10.2.2  matt 		stats_chunks.nchunks += (size / chunksize);
   1414  1.10.2.2  matt 		stats_chunks.curchunks += (size / chunksize);
   1415  1.10.2.2  matt 	}
   1416  1.10.2.2  matt 	if (stats_chunks.curchunks > stats_chunks.highchunks)
   1417  1.10.2.2  matt 		stats_chunks.highchunks = stats_chunks.curchunks;
   1418  1.10.2.2  matt #endif
   1419  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   1420  1.10.2.2  matt 
   1421  1.10.2.2  matt 	assert(CHUNK_ADDR2BASE(ret) == ret);
   1422  1.10.2.2  matt 	return (ret);
   1423  1.10.2.2  matt }
   1424  1.10.2.2  matt 
   1425  1.10.2.2  matt static void
   1426  1.10.2.2  matt chunk_dealloc(void *chunk, size_t size)
   1427  1.10.2.2  matt {
   1428  1.10.2.2  matt 	chunk_node_t *node;
   1429  1.10.2.2  matt 
   1430  1.10.2.2  matt 	assert(chunk != NULL);
   1431  1.10.2.2  matt 	assert(CHUNK_ADDR2BASE(chunk) == chunk);
   1432  1.10.2.2  matt 	assert(size != 0);
   1433  1.10.2.2  matt 	assert((size & chunksize_mask) == 0);
   1434  1.10.2.2  matt 
   1435  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   1436  1.10.2.2  matt 
   1437  1.10.2.2  matt #ifdef USE_BRK
   1438  1.10.2.2  matt 	if ((uintptr_t)chunk >= (uintptr_t)brk_base
   1439  1.10.2.2  matt 	    && (uintptr_t)chunk < (uintptr_t)brk_max) {
   1440  1.10.2.2  matt 		void *brk_cur;
   1441  1.10.2.2  matt 
   1442  1.10.2.2  matt 		malloc_mutex_lock(&brk_mtx);
   1443  1.10.2.2  matt 		/* Get the current end of brk. */
   1444  1.10.2.2  matt 		brk_cur = sbrk(0);
   1445  1.10.2.2  matt 
   1446  1.10.2.2  matt 		/*
   1447  1.10.2.2  matt 		 * Try to shrink the data segment if this chunk is at the end
   1448  1.10.2.2  matt 		 * of the data segment.  The sbrk() call here is subject to a
   1449  1.10.2.2  matt 		 * race condition with threads that use brk(2) or sbrk(2)
   1450  1.10.2.2  matt 		 * directly, but the alternative would be to leak memory for
   1451  1.10.2.2  matt 		 * the sake of poorly designed multi-threaded programs.
   1452  1.10.2.2  matt 		 */
   1453  1.10.2.2  matt 		if (brk_cur == brk_max
   1454  1.10.2.2  matt 		    && (void *)((uintptr_t)chunk + size) == brk_max
   1455  1.10.2.2  matt 		    && sbrk(-(intptr_t)size) == brk_max) {
   1456  1.10.2.2  matt 			malloc_mutex_unlock(&brk_mtx);
   1457  1.10.2.2  matt 			if (brk_prev == brk_max) {
   1458  1.10.2.2  matt 				/* Success. */
   1459  1.10.2.2  matt 				brk_prev = (void *)((intptr_t)brk_max
   1460  1.10.2.2  matt 				    - (intptr_t)size);
   1461  1.10.2.2  matt 				brk_max = brk_prev;
   1462  1.10.2.2  matt 			}
   1463  1.10.2.2  matt 		} else {
   1464  1.10.2.2  matt 			size_t offset;
   1465  1.10.2.2  matt 
   1466  1.10.2.2  matt 			malloc_mutex_unlock(&brk_mtx);
   1467  1.10.2.2  matt 			madvise(chunk, size, MADV_FREE);
   1468  1.10.2.2  matt 
   1469  1.10.2.2  matt 			/*
   1470  1.10.2.2  matt 			 * Iteratively create records of each chunk-sized
   1471  1.10.2.2  matt 			 * memory region that 'chunk' is comprised of, so that
   1472  1.10.2.2  matt 			 * the address range can be recycled if memory usage
   1473  1.10.2.2  matt 			 * increases later on.
   1474  1.10.2.2  matt 			 */
   1475  1.10.2.2  matt 			for (offset = 0; offset < size; offset += chunksize) {
   1476  1.10.2.2  matt 				node = base_chunk_node_alloc();
   1477  1.10.2.2  matt 				if (node == NULL)
   1478  1.10.2.2  matt 					break;
   1479  1.10.2.2  matt 
   1480  1.10.2.2  matt 				node->chunk = (void *)((uintptr_t)chunk
   1481  1.10.2.2  matt 				    + (uintptr_t)offset);
   1482  1.10.2.2  matt 				node->size = chunksize;
   1483  1.10.2.2  matt 				/* LINTED */
   1484  1.10.2.2  matt 				RB_INSERT(chunk_tree_s, &old_chunks, node);
   1485  1.10.2.2  matt 			}
   1486  1.10.2.2  matt 		}
   1487  1.10.2.2  matt 	} else {
   1488  1.10.2.2  matt #endif
   1489  1.10.2.2  matt 		pages_unmap(chunk, size);
   1490  1.10.2.2  matt 
   1491  1.10.2.2  matt 		/*
   1492  1.10.2.2  matt 		 * Make a record of the chunk's address, so that the address
   1493  1.10.2.2  matt 		 * range can be recycled if memory usage increases later on.
   1494  1.10.2.2  matt 		 * Don't bother to create entries if (size > chunksize), since
   1495  1.10.2.2  matt 		 * doing so could cause scalability issues for truly gargantuan
   1496  1.10.2.2  matt 		 * objects (many gigabytes or larger).
   1497  1.10.2.2  matt 		 */
   1498  1.10.2.2  matt 		if (size == chunksize) {
   1499  1.10.2.2  matt 			node = base_chunk_node_alloc();
   1500  1.10.2.2  matt 			if (node != NULL) {
   1501  1.10.2.2  matt 				node->chunk = (void *)(uintptr_t)chunk;
   1502  1.10.2.2  matt 				node->size = chunksize;
   1503  1.10.2.2  matt 				/* LINTED */
   1504  1.10.2.2  matt 				RB_INSERT(chunk_tree_s, &old_chunks, node);
   1505  1.10.2.2  matt 			}
   1506  1.10.2.2  matt 		}
   1507  1.10.2.2  matt #ifdef USE_BRK
   1508  1.10.2.2  matt 	}
   1509  1.10.2.2  matt #endif
   1510  1.10.2.2  matt 
   1511  1.10.2.2  matt #ifdef MALLOC_STATS
   1512  1.10.2.2  matt 	stats_chunks.curchunks -= (size / chunksize);
   1513  1.10.2.2  matt #endif
   1514  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   1515  1.10.2.2  matt }
   1516  1.10.2.2  matt 
   1517  1.10.2.2  matt /*
   1518  1.10.2.2  matt  * End chunk management functions.
   1519  1.10.2.2  matt  */
   1520  1.10.2.2  matt /******************************************************************************/
   1521  1.10.2.2  matt /*
   1522  1.10.2.2  matt  * Begin arena.
   1523  1.10.2.2  matt  */
   1524  1.10.2.2  matt 
   1525  1.10.2.2  matt /*
   1526  1.10.2.2  matt  * Choose an arena based on a per-thread value (fast-path code, calls slow-path
   1527  1.10.2.2  matt  * code if necessary).
   1528  1.10.2.2  matt  */
   1529  1.10.2.2  matt static inline arena_t *
   1530  1.10.2.2  matt choose_arena(void)
   1531  1.10.2.2  matt {
   1532  1.10.2.2  matt 	arena_t *ret;
   1533  1.10.2.2  matt 
   1534  1.10.2.2  matt 	/*
   1535  1.10.2.2  matt 	 * We can only use TLS if this is a PIC library, since for the static
   1536  1.10.2.2  matt 	 * library version, libc's malloc is used by TLS allocation, which
   1537  1.10.2.2  matt 	 * introduces a bootstrapping issue.
   1538  1.10.2.2  matt 	 */
   1539  1.10.2.2  matt 	if (__isthreaded == false) {
   1540  1.10.2.2  matt 	    /*
   1541  1.10.2.2  matt 	     * Avoid the overhead of TLS for single-threaded operation.  If the
   1542  1.10.2.2  matt 	     * app switches to threaded mode, the initial thread may end up
   1543  1.10.2.2  matt 	     * being assigned to some other arena, but this one-time switch
   1544  1.10.2.2  matt 	     * shouldn't cause significant issues.
   1545  1.10.2.2  matt 	     */
   1546  1.10.2.2  matt 	    return (arenas[0]);
   1547  1.10.2.2  matt 	}
   1548  1.10.2.2  matt 
   1549  1.10.2.2  matt 	ret = get_arenas_map();
   1550  1.10.2.2  matt 	if (ret == NULL)
   1551  1.10.2.2  matt 		ret = choose_arena_hard();
   1552  1.10.2.2  matt 
   1553  1.10.2.2  matt 	assert(ret != NULL);
   1554  1.10.2.2  matt 	return (ret);
   1555  1.10.2.2  matt }
   1556  1.10.2.2  matt 
   1557  1.10.2.2  matt /*
   1558  1.10.2.2  matt  * Choose an arena based on a per-thread value (slow-path code only, called
   1559  1.10.2.2  matt  * only by choose_arena()).
   1560  1.10.2.2  matt  */
   1561  1.10.2.2  matt static arena_t *
   1562  1.10.2.2  matt choose_arena_hard(void)
   1563  1.10.2.2  matt {
   1564  1.10.2.2  matt 	arena_t *ret;
   1565  1.10.2.2  matt 
   1566  1.10.2.2  matt 	assert(__isthreaded);
   1567  1.10.2.2  matt 
   1568  1.10.2.2  matt 	/* Assign one of the arenas to this thread, in a round-robin fashion. */
   1569  1.10.2.2  matt 	malloc_mutex_lock(&arenas_mtx);
   1570  1.10.2.2  matt 	ret = arenas[next_arena];
   1571  1.10.2.2  matt 	if (ret == NULL)
   1572  1.10.2.2  matt 		ret = arenas_extend(next_arena);
   1573  1.10.2.2  matt 	if (ret == NULL) {
   1574  1.10.2.2  matt 		/*
   1575  1.10.2.2  matt 		 * Make sure that this function never returns NULL, so that
   1576  1.10.2.2  matt 		 * choose_arena() doesn't have to check for a NULL return
   1577  1.10.2.2  matt 		 * value.
   1578  1.10.2.2  matt 		 */
   1579  1.10.2.2  matt 		ret = arenas[0];
   1580  1.10.2.2  matt 	}
   1581  1.10.2.2  matt 	next_arena = (next_arena + 1) % narenas;
   1582  1.10.2.2  matt 	malloc_mutex_unlock(&arenas_mtx);
   1583  1.10.2.2  matt 	set_arenas_map(ret);
   1584  1.10.2.2  matt 
   1585  1.10.2.2  matt 	return (ret);
   1586  1.10.2.2  matt }
   1587  1.10.2.2  matt 
   1588  1.10.2.2  matt #ifndef lint
   1589  1.10.2.2  matt static inline int
   1590  1.10.2.2  matt arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
   1591  1.10.2.2  matt {
   1592  1.10.2.2  matt 
   1593  1.10.2.2  matt 	assert(a != NULL);
   1594  1.10.2.2  matt 	assert(b != NULL);
   1595  1.10.2.2  matt 
   1596  1.10.2.2  matt 	if ((uintptr_t)a < (uintptr_t)b)
   1597  1.10.2.2  matt 		return (-1);
   1598  1.10.2.2  matt 	else if (a == b)
   1599  1.10.2.2  matt 		return (0);
   1600  1.10.2.2  matt 	else
   1601  1.10.2.2  matt 		return (1);
   1602  1.10.2.2  matt }
   1603  1.10.2.2  matt 
   1604  1.10.2.2  matt /* Generate red-black tree code for arena chunks. */
   1605  1.10.2.2  matt RB_GENERATE_STATIC(arena_chunk_tree_s, arena_chunk_s, link, arena_chunk_comp);
   1606  1.10.2.2  matt #endif
   1607  1.10.2.2  matt 
   1608  1.10.2.2  matt #ifndef lint
   1609  1.10.2.2  matt static inline int
   1610  1.10.2.2  matt arena_run_comp(arena_run_t *a, arena_run_t *b)
   1611  1.10.2.2  matt {
   1612  1.10.2.2  matt 
   1613  1.10.2.2  matt 	assert(a != NULL);
   1614  1.10.2.2  matt 	assert(b != NULL);
   1615  1.10.2.2  matt 
   1616  1.10.2.2  matt 	if ((uintptr_t)a < (uintptr_t)b)
   1617  1.10.2.2  matt 		return (-1);
   1618  1.10.2.2  matt 	else if (a == b)
   1619  1.10.2.2  matt 		return (0);
   1620  1.10.2.2  matt 	else
   1621  1.10.2.2  matt 		return (1);
   1622  1.10.2.2  matt }
   1623  1.10.2.2  matt 
   1624  1.10.2.2  matt /* Generate red-black tree code for arena runs. */
   1625  1.10.2.2  matt RB_GENERATE_STATIC(arena_run_tree_s, arena_run_s, link, arena_run_comp);
   1626  1.10.2.2  matt #endif
   1627  1.10.2.2  matt 
   1628  1.10.2.2  matt static inline void *
   1629  1.10.2.2  matt arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
   1630  1.10.2.2  matt {
   1631  1.10.2.2  matt 	void *ret;
   1632  1.10.2.2  matt 	unsigned i, mask, bit, regind;
   1633  1.10.2.2  matt 
   1634  1.10.2.2  matt 	assert(run->magic == ARENA_RUN_MAGIC);
   1635  1.10.2.2  matt 	assert(run->regs_minelm < bin->regs_mask_nelms);
   1636  1.10.2.2  matt 
   1637  1.10.2.2  matt 	/*
   1638  1.10.2.2  matt 	 * Move the first check outside the loop, so that run->regs_minelm can
   1639  1.10.2.2  matt 	 * be updated unconditionally, without the possibility of updating it
   1640  1.10.2.2  matt 	 * multiple times.
   1641  1.10.2.2  matt 	 */
   1642  1.10.2.2  matt 	i = run->regs_minelm;
   1643  1.10.2.2  matt 	mask = run->regs_mask[i];
   1644  1.10.2.2  matt 	if (mask != 0) {
   1645  1.10.2.2  matt 		/* Usable allocation found. */
   1646  1.10.2.2  matt 		bit = ffs((int)mask) - 1;
   1647  1.10.2.2  matt 
   1648  1.10.2.2  matt 		regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
   1649  1.10.2.2  matt 		ret = (void *)(((uintptr_t)run) + bin->reg0_offset
   1650  1.10.2.2  matt 		    + (bin->reg_size * regind));
   1651  1.10.2.2  matt 
   1652  1.10.2.2  matt 		/* Clear bit. */
   1653  1.10.2.2  matt 		mask ^= (1 << bit);
   1654  1.10.2.2  matt 		run->regs_mask[i] = mask;
   1655  1.10.2.2  matt 
   1656  1.10.2.2  matt 		return (ret);
   1657  1.10.2.2  matt 	}
   1658  1.10.2.2  matt 
   1659  1.10.2.2  matt 	for (i++; i < bin->regs_mask_nelms; i++) {
   1660  1.10.2.2  matt 		mask = run->regs_mask[i];
   1661  1.10.2.2  matt 		if (mask != 0) {
   1662  1.10.2.2  matt 			/* Usable allocation found. */
   1663  1.10.2.2  matt 			bit = ffs((int)mask) - 1;
   1664  1.10.2.2  matt 
   1665  1.10.2.2  matt 			regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
   1666  1.10.2.2  matt 			ret = (void *)(((uintptr_t)run) + bin->reg0_offset
   1667  1.10.2.2  matt 			    + (bin->reg_size * regind));
   1668  1.10.2.2  matt 
   1669  1.10.2.2  matt 			/* Clear bit. */
   1670  1.10.2.2  matt 			mask ^= (1 << bit);
   1671  1.10.2.2  matt 			run->regs_mask[i] = mask;
   1672  1.10.2.2  matt 
   1673  1.10.2.2  matt 			/*
   1674  1.10.2.2  matt 			 * Make a note that nothing before this element
   1675  1.10.2.2  matt 			 * contains a free region.
   1676  1.10.2.2  matt 			 */
   1677  1.10.2.2  matt 			run->regs_minelm = i; /* Low payoff: + (mask == 0); */
   1678  1.10.2.2  matt 
   1679  1.10.2.2  matt 			return (ret);
   1680  1.10.2.2  matt 		}
   1681  1.10.2.2  matt 	}
   1682  1.10.2.2  matt 	/* Not reached. */
   1683  1.10.2.2  matt 	/* LINTED */
   1684  1.10.2.2  matt 	assert(0);
   1685  1.10.2.2  matt 	return (NULL);
   1686  1.10.2.2  matt }
   1687  1.10.2.2  matt 
   1688  1.10.2.2  matt static inline void
   1689  1.10.2.2  matt arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size)
   1690  1.10.2.2  matt {
   1691  1.10.2.2  matt 	/*
   1692  1.10.2.2  matt 	 * To divide by a number D that is not a power of two we multiply
   1693  1.10.2.2  matt 	 * by (2^21 / D) and then right shift by 21 positions.
   1694  1.10.2.2  matt 	 *
   1695  1.10.2.2  matt 	 *   X / D
   1696  1.10.2.2  matt 	 *
   1697  1.10.2.2  matt 	 * becomes
   1698  1.10.2.2  matt 	 *
   1699  1.10.2.2  matt 	 *   (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT
   1700  1.10.2.2  matt 	 */
   1701  1.10.2.2  matt #define SIZE_INV_SHIFT 21
   1702  1.10.2.2  matt #define SIZE_INV(s) (((1 << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1)
   1703  1.10.2.2  matt 	static const unsigned size_invs[] = {
   1704  1.10.2.2  matt 	    SIZE_INV(3),
   1705  1.10.2.2  matt 	    SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
   1706  1.10.2.2  matt 	    SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
   1707  1.10.2.2  matt 	    SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
   1708  1.10.2.2  matt 	    SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
   1709  1.10.2.2  matt 	    SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
   1710  1.10.2.2  matt 	    SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
   1711  1.10.2.2  matt 	    SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
   1712  1.10.2.2  matt #if (QUANTUM_2POW_MIN < 4)
   1713  1.10.2.2  matt 	    ,
   1714  1.10.2.2  matt 	    SIZE_INV(32), SIZE_INV(33), SIZE_INV(34), SIZE_INV(35),
   1715  1.10.2.2  matt 	    SIZE_INV(36), SIZE_INV(37), SIZE_INV(38), SIZE_INV(39),
   1716  1.10.2.2  matt 	    SIZE_INV(40), SIZE_INV(41), SIZE_INV(42), SIZE_INV(43),
   1717  1.10.2.2  matt 	    SIZE_INV(44), SIZE_INV(45), SIZE_INV(46), SIZE_INV(47),
   1718  1.10.2.2  matt 	    SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51),
   1719  1.10.2.2  matt 	    SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55),
   1720  1.10.2.2  matt 	    SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59),
   1721  1.10.2.2  matt 	    SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63)
   1722  1.10.2.2  matt #endif
   1723  1.10.2.2  matt 	};
   1724  1.10.2.2  matt 	unsigned diff, regind, elm, bit;
   1725  1.10.2.2  matt 
   1726  1.10.2.2  matt 	/* LINTED */
   1727  1.10.2.2  matt 	assert(run->magic == ARENA_RUN_MAGIC);
   1728  1.10.2.2  matt 	assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3
   1729  1.10.2.2  matt 	    >= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN));
   1730  1.10.2.2  matt 
   1731  1.10.2.2  matt 	/*
   1732  1.10.2.2  matt 	 * Avoid doing division with a variable divisor if possible.  Using
   1733  1.10.2.2  matt 	 * actual division here can reduce allocator throughput by over 20%!
   1734  1.10.2.2  matt 	 */
   1735  1.10.2.2  matt 	diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->reg0_offset);
   1736  1.10.2.2  matt 	if ((size & (size - 1)) == 0) {
   1737  1.10.2.2  matt 		/*
   1738  1.10.2.2  matt 		 * log2_table allows fast division of a power of two in the
   1739  1.10.2.2  matt 		 * [1..128] range.
   1740  1.10.2.2  matt 		 *
   1741  1.10.2.2  matt 		 * (x / divisor) becomes (x >> log2_table[divisor - 1]).
   1742  1.10.2.2  matt 		 */
   1743  1.10.2.2  matt 		static const unsigned char log2_table[] = {
   1744  1.10.2.2  matt 		    0, 1, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4,
   1745  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5,
   1746  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   1747  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
   1748  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   1749  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   1750  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   1751  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
   1752  1.10.2.2  matt 		};
   1753  1.10.2.2  matt 
   1754  1.10.2.2  matt 		if (size <= 128)
   1755  1.10.2.2  matt 			regind = (diff >> log2_table[size - 1]);
   1756  1.10.2.2  matt 		else if (size <= 32768)
   1757  1.10.2.2  matt 			regind = diff >> (8 + log2_table[(size >> 8) - 1]);
   1758  1.10.2.2  matt 		else {
   1759  1.10.2.2  matt 			/*
   1760  1.10.2.2  matt 			 * The page size is too large for us to use the lookup
   1761  1.10.2.2  matt 			 * table.  Use real division.
   1762  1.10.2.2  matt 			 */
   1763  1.10.2.2  matt 			regind = (unsigned)(diff / size);
   1764  1.10.2.2  matt 		}
   1765  1.10.2.2  matt 	} else if (size <= ((sizeof(size_invs) / sizeof(unsigned))
   1766  1.10.2.2  matt 	    << QUANTUM_2POW_MIN) + 2) {
   1767  1.10.2.2  matt 		regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff;
   1768  1.10.2.2  matt 		regind >>= SIZE_INV_SHIFT;
   1769  1.10.2.2  matt 	} else {
   1770  1.10.2.2  matt 		/*
   1771  1.10.2.2  matt 		 * size_invs isn't large enough to handle this size class, so
   1772  1.10.2.2  matt 		 * calculate regind using actual division.  This only happens
   1773  1.10.2.2  matt 		 * if the user increases small_max via the 'S' runtime
   1774  1.10.2.2  matt 		 * configuration option.
   1775  1.10.2.2  matt 		 */
   1776  1.10.2.2  matt 		regind = (unsigned)(diff / size);
   1777  1.10.2.2  matt 	};
   1778  1.10.2.2  matt 	assert(diff == regind * size);
   1779  1.10.2.2  matt 	assert(regind < bin->nregs);
   1780  1.10.2.2  matt 
   1781  1.10.2.2  matt 	elm = regind >> (SIZEOF_INT_2POW + 3);
   1782  1.10.2.2  matt 	if (elm < run->regs_minelm)
   1783  1.10.2.2  matt 		run->regs_minelm = elm;
   1784  1.10.2.2  matt 	bit = regind - (elm << (SIZEOF_INT_2POW + 3));
   1785  1.10.2.2  matt 	assert((run->regs_mask[elm] & (1 << bit)) == 0);
   1786  1.10.2.2  matt 	run->regs_mask[elm] |= (1 << bit);
   1787  1.10.2.2  matt #undef SIZE_INV
   1788  1.10.2.2  matt #undef SIZE_INV_SHIFT
   1789  1.10.2.2  matt }
   1790  1.10.2.2  matt 
   1791  1.10.2.2  matt static void
   1792  1.10.2.2  matt arena_run_split(arena_t *arena, arena_run_t *run, size_t size)
   1793  1.10.2.2  matt {
   1794  1.10.2.2  matt 	arena_chunk_t *chunk;
   1795  1.10.2.2  matt 	unsigned run_ind, map_offset, total_pages, need_pages, rem_pages;
   1796  1.10.2.2  matt 	unsigned i;
   1797  1.10.2.2  matt 
   1798  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
   1799  1.10.2.2  matt 	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
   1800  1.10.2.2  matt 	    >> pagesize_2pow);
   1801  1.10.2.2  matt 	total_pages = chunk->map[run_ind].npages;
   1802  1.10.2.2  matt 	need_pages = (unsigned)(size >> pagesize_2pow);
   1803  1.10.2.2  matt 	assert(need_pages <= total_pages);
   1804  1.10.2.2  matt 	rem_pages = total_pages - need_pages;
   1805  1.10.2.2  matt 
   1806  1.10.2.2  matt 	/* Split enough pages from the front of run to fit allocation size. */
   1807  1.10.2.2  matt 	map_offset = run_ind;
   1808  1.10.2.2  matt 	for (i = 0; i < need_pages; i++) {
   1809  1.10.2.2  matt 		chunk->map[map_offset + i].npages = need_pages;
   1810  1.10.2.2  matt 		chunk->map[map_offset + i].pos = i;
   1811  1.10.2.2  matt 	}
   1812  1.10.2.2  matt 
   1813  1.10.2.2  matt 	/* Keep track of trailing unused pages for later use. */
   1814  1.10.2.2  matt 	if (rem_pages > 0) {
   1815  1.10.2.2  matt 		/* Update map for trailing pages. */
   1816  1.10.2.2  matt 		map_offset += need_pages;
   1817  1.10.2.2  matt 		chunk->map[map_offset].npages = rem_pages;
   1818  1.10.2.2  matt 		chunk->map[map_offset].pos = POS_FREE;
   1819  1.10.2.2  matt 		chunk->map[map_offset + rem_pages - 1].npages = rem_pages;
   1820  1.10.2.2  matt 		chunk->map[map_offset + rem_pages - 1].pos = POS_FREE;
   1821  1.10.2.2  matt 	}
   1822  1.10.2.2  matt 
   1823  1.10.2.2  matt 	chunk->pages_used += need_pages;
   1824  1.10.2.2  matt }
   1825  1.10.2.2  matt 
   1826  1.10.2.2  matt static arena_chunk_t *
   1827  1.10.2.2  matt arena_chunk_alloc(arena_t *arena)
   1828  1.10.2.2  matt {
   1829  1.10.2.2  matt 	arena_chunk_t *chunk;
   1830  1.10.2.2  matt 
   1831  1.10.2.2  matt 	if (arena->spare != NULL) {
   1832  1.10.2.2  matt 		chunk = arena->spare;
   1833  1.10.2.2  matt 		arena->spare = NULL;
   1834  1.10.2.2  matt 
   1835  1.10.2.2  matt 		/* LINTED */
   1836  1.10.2.2  matt 		RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
   1837  1.10.2.2  matt 	} else {
   1838  1.10.2.2  matt 		chunk = (arena_chunk_t *)chunk_alloc(chunksize);
   1839  1.10.2.2  matt 		if (chunk == NULL)
   1840  1.10.2.2  matt 			return (NULL);
   1841  1.10.2.2  matt #ifdef MALLOC_STATS
   1842  1.10.2.2  matt 		arena->stats.mapped += chunksize;
   1843  1.10.2.2  matt #endif
   1844  1.10.2.2  matt 
   1845  1.10.2.2  matt 		chunk->arena = arena;
   1846  1.10.2.2  matt 
   1847  1.10.2.2  matt 		/* LINTED */
   1848  1.10.2.2  matt 		RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
   1849  1.10.2.2  matt 
   1850  1.10.2.2  matt 		/*
   1851  1.10.2.2  matt 		 * Claim that no pages are in use, since the header is merely
   1852  1.10.2.2  matt 		 * overhead.
   1853  1.10.2.2  matt 		 */
   1854  1.10.2.2  matt 		chunk->pages_used = 0;
   1855  1.10.2.2  matt 
   1856  1.10.2.2  matt 		chunk->max_frun_npages = chunk_npages -
   1857  1.10.2.2  matt 		    arena_chunk_header_npages;
   1858  1.10.2.2  matt 		chunk->min_frun_ind = arena_chunk_header_npages;
   1859  1.10.2.2  matt 
   1860  1.10.2.2  matt 		/*
   1861  1.10.2.2  matt 		 * Initialize enough of the map to support one maximal free run.
   1862  1.10.2.2  matt 		 */
   1863  1.10.2.2  matt 		chunk->map[arena_chunk_header_npages].npages = chunk_npages -
   1864  1.10.2.2  matt 		    arena_chunk_header_npages;
   1865  1.10.2.2  matt 		chunk->map[arena_chunk_header_npages].pos = POS_FREE;
   1866  1.10.2.2  matt 		chunk->map[chunk_npages - 1].npages = chunk_npages -
   1867  1.10.2.2  matt 		    arena_chunk_header_npages;
   1868  1.10.2.2  matt 		chunk->map[chunk_npages - 1].pos = POS_FREE;
   1869  1.10.2.2  matt 	}
   1870  1.10.2.2  matt 
   1871  1.10.2.2  matt 	return (chunk);
   1872  1.10.2.2  matt }
   1873  1.10.2.2  matt 
   1874  1.10.2.2  matt static void
   1875  1.10.2.2  matt arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
   1876  1.10.2.2  matt {
   1877  1.10.2.2  matt 
   1878  1.10.2.2  matt 	/*
   1879  1.10.2.2  matt 	 * Remove chunk from the chunk tree, regardless of whether this chunk
   1880  1.10.2.2  matt 	 * will be cached, so that the arena does not use it.
   1881  1.10.2.2  matt 	 */
   1882  1.10.2.2  matt 	/* LINTED */
   1883  1.10.2.2  matt 	RB_REMOVE(arena_chunk_tree_s, &chunk->arena->chunks, chunk);
   1884  1.10.2.2  matt 
   1885  1.10.2.2  matt 	if (opt_hint == false) {
   1886  1.10.2.2  matt 		if (arena->spare != NULL) {
   1887  1.10.2.2  matt 			chunk_dealloc((void *)arena->spare, chunksize);
   1888  1.10.2.2  matt #ifdef MALLOC_STATS
   1889  1.10.2.2  matt 			arena->stats.mapped -= chunksize;
   1890  1.10.2.2  matt #endif
   1891  1.10.2.2  matt 		}
   1892  1.10.2.2  matt 		arena->spare = chunk;
   1893  1.10.2.2  matt 	} else {
   1894  1.10.2.2  matt 		assert(arena->spare == NULL);
   1895  1.10.2.2  matt 		chunk_dealloc((void *)chunk, chunksize);
   1896  1.10.2.2  matt #ifdef MALLOC_STATS
   1897  1.10.2.2  matt 		arena->stats.mapped -= chunksize;
   1898  1.10.2.2  matt #endif
   1899  1.10.2.2  matt 	}
   1900  1.10.2.2  matt }
   1901  1.10.2.2  matt 
   1902  1.10.2.2  matt static arena_run_t *
   1903  1.10.2.2  matt arena_run_alloc(arena_t *arena, size_t size)
   1904  1.10.2.2  matt {
   1905  1.10.2.2  matt 	arena_chunk_t *chunk;
   1906  1.10.2.2  matt 	arena_run_t *run;
   1907  1.10.2.2  matt 	unsigned need_npages, limit_pages, compl_need_npages;
   1908  1.10.2.2  matt 
   1909  1.10.2.2  matt 	assert(size <= (chunksize - (arena_chunk_header_npages <<
   1910  1.10.2.2  matt 	    pagesize_2pow)));
   1911  1.10.2.2  matt 	assert((size & pagesize_mask) == 0);
   1912  1.10.2.2  matt 
   1913  1.10.2.2  matt 	/*
   1914  1.10.2.2  matt 	 * Search through arena's chunks in address order for a free run that is
   1915  1.10.2.2  matt 	 * large enough.  Look for the first fit.
   1916  1.10.2.2  matt 	 */
   1917  1.10.2.2  matt 	need_npages = (unsigned)(size >> pagesize_2pow);
   1918  1.10.2.2  matt 	limit_pages = chunk_npages - arena_chunk_header_npages;
   1919  1.10.2.2  matt 	compl_need_npages = limit_pages - need_npages;
   1920  1.10.2.2  matt 	/* LINTED */
   1921  1.10.2.2  matt 	RB_FOREACH(chunk, arena_chunk_tree_s, &arena->chunks) {
   1922  1.10.2.2  matt 		/*
   1923  1.10.2.2  matt 		 * Avoid searching this chunk if there are not enough
   1924  1.10.2.2  matt 		 * contiguous free pages for there to possibly be a large
   1925  1.10.2.2  matt 		 * enough free run.
   1926  1.10.2.2  matt 		 */
   1927  1.10.2.2  matt 		if (chunk->pages_used <= compl_need_npages &&
   1928  1.10.2.2  matt 		    need_npages <= chunk->max_frun_npages) {
   1929  1.10.2.2  matt 			arena_chunk_map_t *mapelm;
   1930  1.10.2.2  matt 			unsigned i;
   1931  1.10.2.2  matt 			unsigned max_frun_npages = 0;
   1932  1.10.2.2  matt 			unsigned min_frun_ind = chunk_npages;
   1933  1.10.2.2  matt 
   1934  1.10.2.2  matt 			assert(chunk->min_frun_ind >=
   1935  1.10.2.2  matt 			    arena_chunk_header_npages);
   1936  1.10.2.2  matt 			for (i = chunk->min_frun_ind; i < chunk_npages;) {
   1937  1.10.2.2  matt 				mapelm = &chunk->map[i];
   1938  1.10.2.2  matt 				if (mapelm->pos == POS_FREE) {
   1939  1.10.2.2  matt 					if (mapelm->npages >= need_npages) {
   1940  1.10.2.2  matt 						run = (arena_run_t *)
   1941  1.10.2.2  matt 						    ((uintptr_t)chunk + (i <<
   1942  1.10.2.2  matt 						    pagesize_2pow));
   1943  1.10.2.2  matt 						/* Update page map. */
   1944  1.10.2.2  matt 						arena_run_split(arena, run,
   1945  1.10.2.2  matt 						    size);
   1946  1.10.2.2  matt 						return (run);
   1947  1.10.2.2  matt 					}
   1948  1.10.2.2  matt 					if (mapelm->npages >
   1949  1.10.2.2  matt 					    max_frun_npages) {
   1950  1.10.2.2  matt 						max_frun_npages =
   1951  1.10.2.2  matt 						    mapelm->npages;
   1952  1.10.2.2  matt 					}
   1953  1.10.2.2  matt 					if (i < min_frun_ind) {
   1954  1.10.2.2  matt 						min_frun_ind = i;
   1955  1.10.2.2  matt 						if (i < chunk->min_frun_ind)
   1956  1.10.2.2  matt 							chunk->min_frun_ind = i;
   1957  1.10.2.2  matt 					}
   1958  1.10.2.2  matt 				}
   1959  1.10.2.2  matt 				i += mapelm->npages;
   1960  1.10.2.2  matt 			}
   1961  1.10.2.2  matt 			/*
   1962  1.10.2.2  matt 			 * Search failure.  Reset cached chunk->max_frun_npages.
   1963  1.10.2.2  matt 			 * chunk->min_frun_ind was already reset above (if
   1964  1.10.2.2  matt 			 * necessary).
   1965  1.10.2.2  matt 			 */
   1966  1.10.2.2  matt 			chunk->max_frun_npages = max_frun_npages;
   1967  1.10.2.2  matt 		}
   1968  1.10.2.2  matt 	}
   1969  1.10.2.2  matt 
   1970  1.10.2.2  matt 	/*
   1971  1.10.2.2  matt 	 * No usable runs.  Create a new chunk from which to allocate the run.
   1972  1.10.2.2  matt 	 */
   1973  1.10.2.2  matt 	chunk = arena_chunk_alloc(arena);
   1974  1.10.2.2  matt 	if (chunk == NULL)
   1975  1.10.2.2  matt 		return (NULL);
   1976  1.10.2.2  matt 	run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages <<
   1977  1.10.2.2  matt 	    pagesize_2pow));
   1978  1.10.2.2  matt 	/* Update page map. */
   1979  1.10.2.2  matt 	arena_run_split(arena, run, size);
   1980  1.10.2.2  matt 	return (run);
   1981  1.10.2.2  matt }
   1982  1.10.2.2  matt 
   1983  1.10.2.2  matt static void
   1984  1.10.2.2  matt arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size)
   1985  1.10.2.2  matt {
   1986  1.10.2.2  matt 	arena_chunk_t *chunk;
   1987  1.10.2.2  matt 	unsigned run_ind, run_pages;
   1988  1.10.2.2  matt 
   1989  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
   1990  1.10.2.2  matt 
   1991  1.10.2.2  matt 	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
   1992  1.10.2.2  matt 	    >> pagesize_2pow);
   1993  1.10.2.2  matt 	assert(run_ind >= arena_chunk_header_npages);
   1994  1.10.2.2  matt 	assert(run_ind < (chunksize >> pagesize_2pow));
   1995  1.10.2.2  matt 	run_pages = (unsigned)(size >> pagesize_2pow);
   1996  1.10.2.2  matt 	assert(run_pages == chunk->map[run_ind].npages);
   1997  1.10.2.2  matt 
   1998  1.10.2.2  matt 	/* Subtract pages from count of pages used in chunk. */
   1999  1.10.2.2  matt 	chunk->pages_used -= run_pages;
   2000  1.10.2.2  matt 
   2001  1.10.2.2  matt 	/* Mark run as deallocated. */
   2002  1.10.2.2  matt 	assert(chunk->map[run_ind].npages == run_pages);
   2003  1.10.2.2  matt 	chunk->map[run_ind].pos = POS_FREE;
   2004  1.10.2.2  matt 	assert(chunk->map[run_ind + run_pages - 1].npages == run_pages);
   2005  1.10.2.2  matt 	chunk->map[run_ind + run_pages - 1].pos = POS_FREE;
   2006  1.10.2.2  matt 
   2007  1.10.2.2  matt 	/*
   2008  1.10.2.2  matt 	 * Tell the kernel that we don't need the data in this run, but only if
   2009  1.10.2.2  matt 	 * requested via runtime configuration.
   2010  1.10.2.2  matt 	 */
   2011  1.10.2.2  matt 	if (opt_hint)
   2012  1.10.2.2  matt 		madvise(run, size, MADV_FREE);
   2013  1.10.2.2  matt 
   2014  1.10.2.2  matt 	/* Try to coalesce with neighboring runs. */
   2015  1.10.2.2  matt 	if (run_ind > arena_chunk_header_npages &&
   2016  1.10.2.2  matt 	    chunk->map[run_ind - 1].pos == POS_FREE) {
   2017  1.10.2.2  matt 		unsigned prev_npages;
   2018  1.10.2.2  matt 
   2019  1.10.2.2  matt 		/* Coalesce with previous run. */
   2020  1.10.2.2  matt 		prev_npages = chunk->map[run_ind - 1].npages;
   2021  1.10.2.2  matt 		run_ind -= prev_npages;
   2022  1.10.2.2  matt 		assert(chunk->map[run_ind].npages == prev_npages);
   2023  1.10.2.2  matt 		assert(chunk->map[run_ind].pos == POS_FREE);
   2024  1.10.2.2  matt 		run_pages += prev_npages;
   2025  1.10.2.2  matt 
   2026  1.10.2.2  matt 		chunk->map[run_ind].npages = run_pages;
   2027  1.10.2.2  matt 		assert(chunk->map[run_ind].pos == POS_FREE);
   2028  1.10.2.2  matt 		chunk->map[run_ind + run_pages - 1].npages = run_pages;
   2029  1.10.2.2  matt 		assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
   2030  1.10.2.2  matt 	}
   2031  1.10.2.2  matt 
   2032  1.10.2.2  matt 	if (run_ind + run_pages < chunk_npages &&
   2033  1.10.2.2  matt 	    chunk->map[run_ind + run_pages].pos == POS_FREE) {
   2034  1.10.2.2  matt 		unsigned next_npages;
   2035  1.10.2.2  matt 
   2036  1.10.2.2  matt 		/* Coalesce with next run. */
   2037  1.10.2.2  matt 		next_npages = chunk->map[run_ind + run_pages].npages;
   2038  1.10.2.2  matt 		run_pages += next_npages;
   2039  1.10.2.2  matt 		assert(chunk->map[run_ind + run_pages - 1].npages ==
   2040  1.10.2.2  matt 		    next_npages);
   2041  1.10.2.2  matt 		assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
   2042  1.10.2.2  matt 
   2043  1.10.2.2  matt 		chunk->map[run_ind].npages = run_pages;
   2044  1.10.2.2  matt 		chunk->map[run_ind].pos = POS_FREE;
   2045  1.10.2.2  matt 		chunk->map[run_ind + run_pages - 1].npages = run_pages;
   2046  1.10.2.2  matt 		assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
   2047  1.10.2.2  matt 	}
   2048  1.10.2.2  matt 
   2049  1.10.2.2  matt 	if (chunk->map[run_ind].npages > chunk->max_frun_npages)
   2050  1.10.2.2  matt 		chunk->max_frun_npages = chunk->map[run_ind].npages;
   2051  1.10.2.2  matt 	if (run_ind < chunk->min_frun_ind)
   2052  1.10.2.2  matt 		chunk->min_frun_ind = run_ind;
   2053  1.10.2.2  matt 
   2054  1.10.2.2  matt 	/* Deallocate chunk if it is now completely unused. */
   2055  1.10.2.2  matt 	if (chunk->pages_used == 0)
   2056  1.10.2.2  matt 		arena_chunk_dealloc(arena, chunk);
   2057  1.10.2.2  matt }
   2058  1.10.2.2  matt 
   2059  1.10.2.2  matt static arena_run_t *
   2060  1.10.2.2  matt arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
   2061  1.10.2.2  matt {
   2062  1.10.2.2  matt 	arena_run_t *run;
   2063  1.10.2.2  matt 	unsigned i, remainder;
   2064  1.10.2.2  matt 
   2065  1.10.2.2  matt 	/* Look for a usable run. */
   2066  1.10.2.2  matt 	/* LINTED */
   2067  1.10.2.2  matt 	if ((run = RB_MIN(arena_run_tree_s, &bin->runs)) != NULL) {
   2068  1.10.2.2  matt 		/* run is guaranteed to have available space. */
   2069  1.10.2.2  matt 		/* LINTED */
   2070  1.10.2.2  matt 		RB_REMOVE(arena_run_tree_s, &bin->runs, run);
   2071  1.10.2.2  matt #ifdef MALLOC_STATS
   2072  1.10.2.2  matt 		bin->stats.reruns++;
   2073  1.10.2.2  matt #endif
   2074  1.10.2.2  matt 		return (run);
   2075  1.10.2.2  matt 	}
   2076  1.10.2.2  matt 	/* No existing runs have any space available. */
   2077  1.10.2.2  matt 
   2078  1.10.2.2  matt 	/* Allocate a new run. */
   2079  1.10.2.2  matt 	run = arena_run_alloc(arena, bin->run_size);
   2080  1.10.2.2  matt 	if (run == NULL)
   2081  1.10.2.2  matt 		return (NULL);
   2082  1.10.2.2  matt 
   2083  1.10.2.2  matt 	/* Initialize run internals. */
   2084  1.10.2.2  matt 	run->bin = bin;
   2085  1.10.2.2  matt 
   2086  1.10.2.2  matt 	for (i = 0; i < bin->regs_mask_nelms; i++)
   2087  1.10.2.2  matt 		run->regs_mask[i] = UINT_MAX;
   2088  1.10.2.2  matt 	remainder = bin->nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1);
   2089  1.10.2.2  matt 	if (remainder != 0) {
   2090  1.10.2.2  matt 		/* The last element has spare bits that need to be unset. */
   2091  1.10.2.2  matt 		run->regs_mask[i] = (UINT_MAX >> ((1 << (SIZEOF_INT_2POW + 3))
   2092  1.10.2.2  matt 		    - remainder));
   2093  1.10.2.2  matt 	}
   2094  1.10.2.2  matt 
   2095  1.10.2.2  matt 	run->regs_minelm = 0;
   2096  1.10.2.2  matt 
   2097  1.10.2.2  matt 	run->nfree = bin->nregs;
   2098  1.10.2.2  matt #ifdef MALLOC_DEBUG
   2099  1.10.2.2  matt 	run->magic = ARENA_RUN_MAGIC;
   2100  1.10.2.2  matt #endif
   2101  1.10.2.2  matt 
   2102  1.10.2.2  matt #ifdef MALLOC_STATS
   2103  1.10.2.2  matt 	bin->stats.nruns++;
   2104  1.10.2.2  matt 	bin->stats.curruns++;
   2105  1.10.2.2  matt 	if (bin->stats.curruns > bin->stats.highruns)
   2106  1.10.2.2  matt 		bin->stats.highruns = bin->stats.curruns;
   2107  1.10.2.2  matt #endif
   2108  1.10.2.2  matt 	return (run);
   2109  1.10.2.2  matt }
   2110  1.10.2.2  matt 
   2111  1.10.2.2  matt /* bin->runcur must have space available before this function is called. */
   2112  1.10.2.2  matt static inline void *
   2113  1.10.2.2  matt arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
   2114  1.10.2.2  matt {
   2115  1.10.2.2  matt 	void *ret;
   2116  1.10.2.2  matt 
   2117  1.10.2.2  matt 	assert(run->magic == ARENA_RUN_MAGIC);
   2118  1.10.2.2  matt 	assert(run->nfree > 0);
   2119  1.10.2.2  matt 
   2120  1.10.2.2  matt 	ret = arena_run_reg_alloc(run, bin);
   2121  1.10.2.2  matt 	assert(ret != NULL);
   2122  1.10.2.2  matt 	run->nfree--;
   2123  1.10.2.2  matt 
   2124  1.10.2.2  matt 	return (ret);
   2125  1.10.2.2  matt }
   2126  1.10.2.2  matt 
   2127  1.10.2.2  matt /* Re-fill bin->runcur, then call arena_bin_malloc_easy(). */
   2128  1.10.2.2  matt static void *
   2129  1.10.2.2  matt arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
   2130  1.10.2.2  matt {
   2131  1.10.2.2  matt 
   2132  1.10.2.2  matt 	bin->runcur = arena_bin_nonfull_run_get(arena, bin);
   2133  1.10.2.2  matt 	if (bin->runcur == NULL)
   2134  1.10.2.2  matt 		return (NULL);
   2135  1.10.2.2  matt 	assert(bin->runcur->magic == ARENA_RUN_MAGIC);
   2136  1.10.2.2  matt 	assert(bin->runcur->nfree > 0);
   2137  1.10.2.2  matt 
   2138  1.10.2.2  matt 	return (arena_bin_malloc_easy(arena, bin, bin->runcur));
   2139  1.10.2.2  matt }
   2140  1.10.2.2  matt 
   2141  1.10.2.2  matt /*
   2142  1.10.2.2  matt  * Calculate bin->run_size such that it meets the following constraints:
   2143  1.10.2.2  matt  *
   2144  1.10.2.2  matt  *   *) bin->run_size >= min_run_size
   2145  1.10.2.2  matt  *   *) bin->run_size <= arena_maxclass
   2146  1.10.2.2  matt  *   *) bin->run_size <= RUN_MAX_SMALL
   2147  1.10.2.2  matt  *   *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
   2148  1.10.2.2  matt  *
   2149  1.10.2.2  matt  * bin->nregs, bin->regs_mask_nelms, and bin->reg0_offset are
   2150  1.10.2.2  matt  * also calculated here, since these settings are all interdependent.
   2151  1.10.2.2  matt  */
   2152  1.10.2.2  matt static size_t
   2153  1.10.2.2  matt arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
   2154  1.10.2.2  matt {
   2155  1.10.2.2  matt 	size_t try_run_size, good_run_size;
   2156  1.10.2.2  matt 	unsigned good_nregs, good_mask_nelms, good_reg0_offset;
   2157  1.10.2.2  matt 	unsigned try_nregs, try_mask_nelms, try_reg0_offset;
   2158  1.10.2.2  matt 	float max_ovrhd = RUN_MAX_OVRHD;
   2159  1.10.2.2  matt 
   2160  1.10.2.2  matt 	assert(min_run_size >= pagesize);
   2161  1.10.2.2  matt 	assert(min_run_size <= arena_maxclass);
   2162  1.10.2.2  matt 	assert(min_run_size <= RUN_MAX_SMALL);
   2163  1.10.2.2  matt 
   2164  1.10.2.2  matt 	/*
   2165  1.10.2.2  matt 	 * Calculate known-valid settings before entering the run_size
   2166  1.10.2.2  matt 	 * expansion loop, so that the first part of the loop always copies
   2167  1.10.2.2  matt 	 * valid settings.
   2168  1.10.2.2  matt 	 *
   2169  1.10.2.2  matt 	 * The do..while loop iteratively reduces the number of regions until
   2170  1.10.2.2  matt 	 * the run header and the regions no longer overlap.  A closed formula
   2171  1.10.2.2  matt 	 * would be quite messy, since there is an interdependency between the
   2172  1.10.2.2  matt 	 * header's mask length and the number of regions.
   2173  1.10.2.2  matt 	 */
   2174  1.10.2.2  matt 	try_run_size = min_run_size;
   2175  1.10.2.2  matt 	try_nregs = (unsigned)(((try_run_size - sizeof(arena_run_t)) /
   2176  1.10.2.2  matt 	    bin->reg_size) + 1); /* Counter-act the first line of the loop. */
   2177  1.10.2.2  matt 	do {
   2178  1.10.2.2  matt 		try_nregs--;
   2179  1.10.2.2  matt 		try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
   2180  1.10.2.2  matt 		    ((try_nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0);
   2181  1.10.2.2  matt 		try_reg0_offset = (unsigned)(try_run_size -
   2182  1.10.2.2  matt 		    (try_nregs * bin->reg_size));
   2183  1.10.2.2  matt 	} while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1))
   2184  1.10.2.2  matt 	    > try_reg0_offset);
   2185  1.10.2.2  matt 
   2186  1.10.2.2  matt 	/* run_size expansion loop. */
   2187  1.10.2.2  matt 	do {
   2188  1.10.2.2  matt 		/*
   2189  1.10.2.2  matt 		 * Copy valid settings before trying more aggressive settings.
   2190  1.10.2.2  matt 		 */
   2191  1.10.2.2  matt 		good_run_size = try_run_size;
   2192  1.10.2.2  matt 		good_nregs = try_nregs;
   2193  1.10.2.2  matt 		good_mask_nelms = try_mask_nelms;
   2194  1.10.2.2  matt 		good_reg0_offset = try_reg0_offset;
   2195  1.10.2.2  matt 
   2196  1.10.2.2  matt 		/* Try more aggressive settings. */
   2197  1.10.2.2  matt 		try_run_size += pagesize;
   2198  1.10.2.2  matt 		try_nregs = (unsigned)(((try_run_size - sizeof(arena_run_t)) /
   2199  1.10.2.2  matt 		    bin->reg_size) + 1); /* Counter-act try_nregs-- in loop. */
   2200  1.10.2.2  matt 		do {
   2201  1.10.2.2  matt 			try_nregs--;
   2202  1.10.2.2  matt 			try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
   2203  1.10.2.2  matt 			    ((try_nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1)) ?
   2204  1.10.2.2  matt 			    1 : 0);
   2205  1.10.2.2  matt 			try_reg0_offset = (unsigned)(try_run_size - (try_nregs *
   2206  1.10.2.2  matt 			    bin->reg_size));
   2207  1.10.2.2  matt 		} while (sizeof(arena_run_t) + (sizeof(unsigned) *
   2208  1.10.2.2  matt 		    (try_mask_nelms - 1)) > try_reg0_offset);
   2209  1.10.2.2  matt 	} while (try_run_size <= arena_maxclass && try_run_size <= RUN_MAX_SMALL
   2210  1.10.2.2  matt 	    && max_ovrhd > RUN_MAX_OVRHD_RELAX / ((float)(bin->reg_size << 3))
   2211  1.10.2.2  matt 	    && ((float)(try_reg0_offset)) / ((float)(try_run_size)) >
   2212  1.10.2.2  matt 	    max_ovrhd);
   2213  1.10.2.2  matt 
   2214  1.10.2.2  matt 	assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
   2215  1.10.2.2  matt 	    <= good_reg0_offset);
   2216  1.10.2.2  matt 	assert((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
   2217  1.10.2.2  matt 
   2218  1.10.2.2  matt 	/* Copy final settings. */
   2219  1.10.2.2  matt 	bin->run_size = good_run_size;
   2220  1.10.2.2  matt 	bin->nregs = good_nregs;
   2221  1.10.2.2  matt 	bin->regs_mask_nelms = good_mask_nelms;
   2222  1.10.2.2  matt 	bin->reg0_offset = good_reg0_offset;
   2223  1.10.2.2  matt 
   2224  1.10.2.2  matt 	return (good_run_size);
   2225  1.10.2.2  matt }
   2226  1.10.2.2  matt 
   2227  1.10.2.2  matt static void *
   2228  1.10.2.2  matt arena_malloc(arena_t *arena, size_t size)
   2229  1.10.2.2  matt {
   2230  1.10.2.2  matt 	void *ret;
   2231  1.10.2.2  matt 
   2232  1.10.2.2  matt 	assert(arena != NULL);
   2233  1.10.2.2  matt 	assert(arena->magic == ARENA_MAGIC);
   2234  1.10.2.2  matt 	assert(size != 0);
   2235  1.10.2.2  matt 	assert(QUANTUM_CEILING(size) <= arena_maxclass);
   2236  1.10.2.2  matt 
   2237  1.10.2.2  matt 	if (size <= bin_maxclass) {
   2238  1.10.2.2  matt 		arena_bin_t *bin;
   2239  1.10.2.2  matt 		arena_run_t *run;
   2240  1.10.2.2  matt 
   2241  1.10.2.2  matt 		/* Small allocation. */
   2242  1.10.2.2  matt 
   2243  1.10.2.2  matt 		if (size < small_min) {
   2244  1.10.2.2  matt 			/* Tiny. */
   2245  1.10.2.2  matt 			size = pow2_ceil(size);
   2246  1.10.2.2  matt 			bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
   2247  1.10.2.2  matt 			    1)))];
   2248  1.10.2.2  matt #if (!defined(NDEBUG) || defined(MALLOC_STATS))
   2249  1.10.2.2  matt 			/*
   2250  1.10.2.2  matt 			 * Bin calculation is always correct, but we may need
   2251  1.10.2.2  matt 			 * to fix size for the purposes of assertions and/or
   2252  1.10.2.2  matt 			 * stats accuracy.
   2253  1.10.2.2  matt 			 */
   2254  1.10.2.2  matt 			if (size < (1 << TINY_MIN_2POW))
   2255  1.10.2.2  matt 				size = (1 << TINY_MIN_2POW);
   2256  1.10.2.2  matt #endif
   2257  1.10.2.2  matt 		} else if (size <= small_max) {
   2258  1.10.2.2  matt 			/* Quantum-spaced. */
   2259  1.10.2.2  matt 			size = QUANTUM_CEILING(size);
   2260  1.10.2.2  matt 			bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
   2261  1.10.2.2  matt 			    - 1];
   2262  1.10.2.2  matt 		} else {
   2263  1.10.2.2  matt 			/* Sub-page. */
   2264  1.10.2.2  matt 			size = pow2_ceil(size);
   2265  1.10.2.2  matt 			bin = &arena->bins[ntbins + nqbins
   2266  1.10.2.2  matt 			    + (ffs((int)(size >> opt_small_max_2pow)) - 2)];
   2267  1.10.2.2  matt 		}
   2268  1.10.2.2  matt 		assert(size == bin->reg_size);
   2269  1.10.2.2  matt 
   2270  1.10.2.2  matt 		malloc_mutex_lock(&arena->mtx);
   2271  1.10.2.2  matt 		if ((run = bin->runcur) != NULL && run->nfree > 0)
   2272  1.10.2.2  matt 			ret = arena_bin_malloc_easy(arena, bin, run);
   2273  1.10.2.2  matt 		else
   2274  1.10.2.2  matt 			ret = arena_bin_malloc_hard(arena, bin);
   2275  1.10.2.2  matt 
   2276  1.10.2.2  matt 		if (ret == NULL) {
   2277  1.10.2.2  matt 			malloc_mutex_unlock(&arena->mtx);
   2278  1.10.2.2  matt 			return (NULL);
   2279  1.10.2.2  matt 		}
   2280  1.10.2.2  matt 
   2281  1.10.2.2  matt #ifdef MALLOC_STATS
   2282  1.10.2.2  matt 		bin->stats.nrequests++;
   2283  1.10.2.2  matt 		arena->stats.nmalloc_small++;
   2284  1.10.2.2  matt 		arena->stats.allocated_small += size;
   2285  1.10.2.2  matt #endif
   2286  1.10.2.2  matt 	} else {
   2287  1.10.2.2  matt 		/* Large allocation. */
   2288  1.10.2.2  matt 		size = PAGE_CEILING(size);
   2289  1.10.2.2  matt 		malloc_mutex_lock(&arena->mtx);
   2290  1.10.2.2  matt 		ret = (void *)arena_run_alloc(arena, size);
   2291  1.10.2.2  matt 		if (ret == NULL) {
   2292  1.10.2.2  matt 			malloc_mutex_unlock(&arena->mtx);
   2293  1.10.2.2  matt 			return (NULL);
   2294  1.10.2.2  matt 		}
   2295  1.10.2.2  matt #ifdef MALLOC_STATS
   2296  1.10.2.2  matt 		arena->stats.nmalloc_large++;
   2297  1.10.2.2  matt 		arena->stats.allocated_large += size;
   2298  1.10.2.2  matt #endif
   2299  1.10.2.2  matt 	}
   2300  1.10.2.2  matt 
   2301  1.10.2.2  matt 	malloc_mutex_unlock(&arena->mtx);
   2302  1.10.2.2  matt 
   2303  1.10.2.2  matt 	if (opt_junk)
   2304  1.10.2.2  matt 		memset(ret, 0xa5, size);
   2305  1.10.2.2  matt 	else if (opt_zero)
   2306  1.10.2.2  matt 		memset(ret, 0, size);
   2307  1.10.2.2  matt 	return (ret);
   2308  1.10.2.2  matt }
   2309  1.10.2.2  matt 
   2310  1.10.2.2  matt static inline void
   2311  1.10.2.2  matt arena_palloc_trim(arena_t *arena, arena_chunk_t *chunk, unsigned pageind,
   2312  1.10.2.2  matt     unsigned npages)
   2313  1.10.2.2  matt {
   2314  1.10.2.2  matt 	unsigned i;
   2315  1.10.2.2  matt 
   2316  1.10.2.2  matt 	assert(npages > 0);
   2317  1.10.2.2  matt 
   2318  1.10.2.2  matt 	/*
   2319  1.10.2.2  matt 	 * Modifiy the map such that arena_run_dalloc() sees the run as
   2320  1.10.2.2  matt 	 * separately allocated.
   2321  1.10.2.2  matt 	 */
   2322  1.10.2.2  matt 	for (i = 0; i < npages; i++) {
   2323  1.10.2.2  matt 		chunk->map[pageind + i].npages = npages;
   2324  1.10.2.2  matt 		chunk->map[pageind + i].pos = i;
   2325  1.10.2.2  matt 	}
   2326  1.10.2.2  matt 	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)chunk + (pageind <<
   2327  1.10.2.2  matt 	    pagesize_2pow)), npages << pagesize_2pow);
   2328  1.10.2.2  matt }
   2329  1.10.2.2  matt 
   2330  1.10.2.2  matt /* Only handles large allocations that require more than page alignment. */
   2331  1.10.2.2  matt static void *
   2332  1.10.2.2  matt arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
   2333  1.10.2.2  matt {
   2334  1.10.2.2  matt 	void *ret;
   2335  1.10.2.2  matt 	size_t offset;
   2336  1.10.2.2  matt 	arena_chunk_t *chunk;
   2337  1.10.2.2  matt 	unsigned pageind, i, npages;
   2338  1.10.2.2  matt 
   2339  1.10.2.2  matt 	assert((size & pagesize_mask) == 0);
   2340  1.10.2.2  matt 	assert((alignment & pagesize_mask) == 0);
   2341  1.10.2.2  matt 
   2342  1.10.2.2  matt 	npages = (unsigned)(size >> pagesize_2pow);
   2343  1.10.2.2  matt 
   2344  1.10.2.2  matt 	malloc_mutex_lock(&arena->mtx);
   2345  1.10.2.2  matt 	ret = (void *)arena_run_alloc(arena, alloc_size);
   2346  1.10.2.2  matt 	if (ret == NULL) {
   2347  1.10.2.2  matt 		malloc_mutex_unlock(&arena->mtx);
   2348  1.10.2.2  matt 		return (NULL);
   2349  1.10.2.2  matt 	}
   2350  1.10.2.2  matt 
   2351  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
   2352  1.10.2.2  matt 
   2353  1.10.2.2  matt 	offset = (uintptr_t)ret & (alignment - 1);
   2354  1.10.2.2  matt 	assert((offset & pagesize_mask) == 0);
   2355  1.10.2.2  matt 	assert(offset < alloc_size);
   2356  1.10.2.2  matt 	if (offset == 0) {
   2357  1.10.2.2  matt 		pageind = (unsigned)(((uintptr_t)ret - (uintptr_t)chunk) >>
   2358  1.10.2.2  matt 		    pagesize_2pow);
   2359  1.10.2.2  matt 
   2360  1.10.2.2  matt 		/* Update the map for the run to be kept. */
   2361  1.10.2.2  matt 		for (i = 0; i < npages; i++) {
   2362  1.10.2.2  matt 			chunk->map[pageind + i].npages = npages;
   2363  1.10.2.2  matt 			assert(chunk->map[pageind + i].pos == i);
   2364  1.10.2.2  matt 		}
   2365  1.10.2.2  matt 
   2366  1.10.2.2  matt 		/* Trim trailing space. */
   2367  1.10.2.2  matt 		arena_palloc_trim(arena, chunk, pageind + npages,
   2368  1.10.2.2  matt 		    (unsigned)((alloc_size - size) >> pagesize_2pow));
   2369  1.10.2.2  matt 	} else {
   2370  1.10.2.2  matt 		size_t leadsize, trailsize;
   2371  1.10.2.2  matt 
   2372  1.10.2.2  matt 		leadsize = alignment - offset;
   2373  1.10.2.2  matt 		ret = (void *)((uintptr_t)ret + leadsize);
   2374  1.10.2.2  matt 		pageind = (unsigned)(((uintptr_t)ret - (uintptr_t)chunk) >>
   2375  1.10.2.2  matt 		    pagesize_2pow);
   2376  1.10.2.2  matt 
   2377  1.10.2.2  matt 		/* Update the map for the run to be kept. */
   2378  1.10.2.2  matt 		for (i = 0; i < npages; i++) {
   2379  1.10.2.2  matt 			chunk->map[pageind + i].npages = npages;
   2380  1.10.2.2  matt 			chunk->map[pageind + i].pos = i;
   2381  1.10.2.2  matt 		}
   2382  1.10.2.2  matt 
   2383  1.10.2.2  matt 		/* Trim leading space. */
   2384  1.10.2.2  matt 		arena_palloc_trim(arena, chunk,
   2385  1.10.2.2  matt 		    (unsigned)(pageind - (leadsize >> pagesize_2pow)),
   2386  1.10.2.2  matt 		    (unsigned)(leadsize >> pagesize_2pow));
   2387  1.10.2.2  matt 
   2388  1.10.2.2  matt 		trailsize = alloc_size - leadsize - size;
   2389  1.10.2.2  matt 		if (trailsize != 0) {
   2390  1.10.2.2  matt 			/* Trim trailing space. */
   2391  1.10.2.2  matt 			assert(trailsize < alloc_size);
   2392  1.10.2.2  matt 			arena_palloc_trim(arena, chunk, pageind + npages,
   2393  1.10.2.2  matt 			    (unsigned)(trailsize >> pagesize_2pow));
   2394  1.10.2.2  matt 		}
   2395  1.10.2.2  matt 	}
   2396  1.10.2.2  matt 
   2397  1.10.2.2  matt #ifdef MALLOC_STATS
   2398  1.10.2.2  matt 	arena->stats.nmalloc_large++;
   2399  1.10.2.2  matt 	arena->stats.allocated_large += size;
   2400  1.10.2.2  matt #endif
   2401  1.10.2.2  matt 	malloc_mutex_unlock(&arena->mtx);
   2402  1.10.2.2  matt 
   2403  1.10.2.2  matt 	if (opt_junk)
   2404  1.10.2.2  matt 		memset(ret, 0xa5, size);
   2405  1.10.2.2  matt 	else if (opt_zero)
   2406  1.10.2.2  matt 		memset(ret, 0, size);
   2407  1.10.2.2  matt 	return (ret);
   2408  1.10.2.2  matt }
   2409  1.10.2.2  matt 
   2410  1.10.2.2  matt /* Return the size of the allocation pointed to by ptr. */
   2411  1.10.2.2  matt static size_t
   2412  1.10.2.2  matt arena_salloc(const void *ptr)
   2413  1.10.2.2  matt {
   2414  1.10.2.2  matt 	size_t ret;
   2415  1.10.2.2  matt 	arena_chunk_t *chunk;
   2416  1.10.2.2  matt 	arena_chunk_map_t *mapelm;
   2417  1.10.2.2  matt 	unsigned pageind;
   2418  1.10.2.2  matt 
   2419  1.10.2.2  matt 	assert(ptr != NULL);
   2420  1.10.2.2  matt 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
   2421  1.10.2.2  matt 
   2422  1.10.2.2  matt 	/*
   2423  1.10.2.2  matt 	 * No arena data structures that we query here can change in a way that
   2424  1.10.2.2  matt 	 * affects this function, so we don't need to lock.
   2425  1.10.2.2  matt 	 */
   2426  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
   2427  1.10.2.2  matt 	pageind = (unsigned)(((uintptr_t)ptr - (uintptr_t)chunk) >>
   2428  1.10.2.2  matt 	    pagesize_2pow);
   2429  1.10.2.2  matt 	mapelm = &chunk->map[pageind];
   2430  1.10.2.2  matt 	if (mapelm->pos != 0 || ptr != (char *)((uintptr_t)chunk) + (pageind <<
   2431  1.10.2.2  matt 	    pagesize_2pow)) {
   2432  1.10.2.2  matt 		arena_run_t *run;
   2433  1.10.2.2  matt 
   2434  1.10.2.2  matt 		pageind -= mapelm->pos;
   2435  1.10.2.2  matt 
   2436  1.10.2.2  matt 		run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
   2437  1.10.2.2  matt 		    pagesize_2pow));
   2438  1.10.2.2  matt 		assert(run->magic == ARENA_RUN_MAGIC);
   2439  1.10.2.2  matt 		ret = run->bin->reg_size;
   2440  1.10.2.2  matt 	} else
   2441  1.10.2.2  matt 		ret = mapelm->npages << pagesize_2pow;
   2442  1.10.2.2  matt 
   2443  1.10.2.2  matt 	return (ret);
   2444  1.10.2.2  matt }
   2445  1.10.2.2  matt 
   2446  1.10.2.2  matt static void *
   2447  1.10.2.2  matt arena_ralloc(void *ptr, size_t size, size_t oldsize)
   2448  1.10.2.2  matt {
   2449  1.10.2.2  matt 	void *ret;
   2450  1.10.2.2  matt 
   2451  1.10.2.2  matt 	/* Avoid moving the allocation if the size class would not change. */
   2452  1.10.2.2  matt 	if (size < small_min) {
   2453  1.10.2.2  matt 		if (oldsize < small_min &&
   2454  1.10.2.2  matt 		    ffs((int)(pow2_ceil(size) >> (TINY_MIN_2POW + 1)))
   2455  1.10.2.2  matt 		    == ffs((int)(pow2_ceil(oldsize) >> (TINY_MIN_2POW + 1))))
   2456  1.10.2.2  matt 			goto IN_PLACE;
   2457  1.10.2.2  matt 	} else if (size <= small_max) {
   2458  1.10.2.2  matt 		if (oldsize >= small_min && oldsize <= small_max &&
   2459  1.10.2.2  matt 		    (QUANTUM_CEILING(size) >> opt_quantum_2pow)
   2460  1.10.2.2  matt 		    == (QUANTUM_CEILING(oldsize) >> opt_quantum_2pow))
   2461  1.10.2.2  matt 			goto IN_PLACE;
   2462  1.10.2.2  matt 	} else {
   2463  1.10.2.2  matt 		/*
   2464  1.10.2.2  matt 		 * We make no attempt to resize runs here, though it would be
   2465  1.10.2.2  matt 		 * possible to do so.
   2466  1.10.2.2  matt 		 */
   2467  1.10.2.2  matt 		if (oldsize > small_max && PAGE_CEILING(size) == oldsize)
   2468  1.10.2.2  matt 			goto IN_PLACE;
   2469  1.10.2.2  matt 	}
   2470  1.10.2.2  matt 
   2471  1.10.2.2  matt 	/*
   2472  1.10.2.2  matt 	 * If we get here, then size and oldsize are different enough that we
   2473  1.10.2.2  matt 	 * need to use a different size class.  In that case, fall back to
   2474  1.10.2.2  matt 	 * allocating new space and copying.
   2475  1.10.2.2  matt 	 */
   2476  1.10.2.2  matt 	ret = arena_malloc(choose_arena(), size);
   2477  1.10.2.2  matt 	if (ret == NULL)
   2478  1.10.2.2  matt 		return (NULL);
   2479  1.10.2.2  matt 
   2480  1.10.2.2  matt 	/* Junk/zero-filling were already done by arena_malloc(). */
   2481  1.10.2.2  matt 	if (size < oldsize)
   2482  1.10.2.2  matt 		memcpy(ret, ptr, size);
   2483  1.10.2.2  matt 	else
   2484  1.10.2.2  matt 		memcpy(ret, ptr, oldsize);
   2485  1.10.2.2  matt 	idalloc(ptr);
   2486  1.10.2.2  matt 	return (ret);
   2487  1.10.2.2  matt IN_PLACE:
   2488  1.10.2.2  matt 	if (opt_junk && size < oldsize)
   2489  1.10.2.2  matt 		memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size);
   2490  1.10.2.2  matt 	else if (opt_zero && size > oldsize)
   2491  1.10.2.2  matt 		memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
   2492  1.10.2.2  matt 	return (ptr);
   2493  1.10.2.2  matt }
   2494  1.10.2.2  matt 
   2495  1.10.2.2  matt static void
   2496  1.10.2.2  matt arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
   2497  1.10.2.2  matt {
   2498  1.10.2.2  matt 	unsigned pageind;
   2499  1.10.2.2  matt 	arena_chunk_map_t *mapelm;
   2500  1.10.2.2  matt 	size_t size;
   2501  1.10.2.2  matt 
   2502  1.10.2.2  matt 	assert(arena != NULL);
   2503  1.10.2.2  matt 	assert(arena->magic == ARENA_MAGIC);
   2504  1.10.2.2  matt 	assert(chunk->arena == arena);
   2505  1.10.2.2  matt 	assert(ptr != NULL);
   2506  1.10.2.2  matt 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
   2507  1.10.2.2  matt 
   2508  1.10.2.2  matt 	pageind = (unsigned)(((uintptr_t)ptr - (uintptr_t)chunk) >>
   2509  1.10.2.2  matt 	    pagesize_2pow);
   2510  1.10.2.2  matt 	mapelm = &chunk->map[pageind];
   2511  1.10.2.2  matt 	if (mapelm->pos != 0 || ptr != (char *)((uintptr_t)chunk) + (pageind <<
   2512  1.10.2.2  matt 	    pagesize_2pow)) {
   2513  1.10.2.2  matt 		arena_run_t *run;
   2514  1.10.2.2  matt 		arena_bin_t *bin;
   2515  1.10.2.2  matt 
   2516  1.10.2.2  matt 		/* Small allocation. */
   2517  1.10.2.2  matt 
   2518  1.10.2.2  matt 		pageind -= mapelm->pos;
   2519  1.10.2.2  matt 
   2520  1.10.2.2  matt 		run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
   2521  1.10.2.2  matt 		    pagesize_2pow));
   2522  1.10.2.2  matt 		assert(run->magic == ARENA_RUN_MAGIC);
   2523  1.10.2.2  matt 		bin = run->bin;
   2524  1.10.2.2  matt 		size = bin->reg_size;
   2525  1.10.2.2  matt 
   2526  1.10.2.2  matt 		if (opt_junk)
   2527  1.10.2.2  matt 			memset(ptr, 0x5a, size);
   2528  1.10.2.2  matt 
   2529  1.10.2.2  matt 		malloc_mutex_lock(&arena->mtx);
   2530  1.10.2.2  matt 		arena_run_reg_dalloc(run, bin, ptr, size);
   2531  1.10.2.2  matt 		run->nfree++;
   2532  1.10.2.2  matt 
   2533  1.10.2.2  matt 		if (run->nfree == bin->nregs) {
   2534  1.10.2.2  matt 			/* Deallocate run. */
   2535  1.10.2.2  matt 			if (run == bin->runcur)
   2536  1.10.2.2  matt 				bin->runcur = NULL;
   2537  1.10.2.2  matt 			else if (bin->nregs != 1) {
   2538  1.10.2.2  matt 				/*
   2539  1.10.2.2  matt 				 * This block's conditional is necessary because
   2540  1.10.2.2  matt 				 * if the run only contains one region, then it
   2541  1.10.2.2  matt 				 * never gets inserted into the non-full runs
   2542  1.10.2.2  matt 				 * tree.
   2543  1.10.2.2  matt 				 */
   2544  1.10.2.2  matt 				/* LINTED */
   2545  1.10.2.2  matt 				RB_REMOVE(arena_run_tree_s, &bin->runs, run);
   2546  1.10.2.2  matt 			}
   2547  1.10.2.2  matt #ifdef MALLOC_DEBUG
   2548  1.10.2.2  matt 			run->magic = 0;
   2549  1.10.2.2  matt #endif
   2550  1.10.2.2  matt 			arena_run_dalloc(arena, run, bin->run_size);
   2551  1.10.2.2  matt #ifdef MALLOC_STATS
   2552  1.10.2.2  matt 			bin->stats.curruns--;
   2553  1.10.2.2  matt #endif
   2554  1.10.2.2  matt 		} else if (run->nfree == 1 && run != bin->runcur) {
   2555  1.10.2.2  matt 			/*
   2556  1.10.2.2  matt 			 * Make sure that bin->runcur always refers to the
   2557  1.10.2.2  matt 			 * lowest non-full run, if one exists.
   2558  1.10.2.2  matt 			 */
   2559  1.10.2.2  matt 			if (bin->runcur == NULL)
   2560  1.10.2.2  matt 				bin->runcur = run;
   2561  1.10.2.2  matt 			else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
   2562  1.10.2.2  matt 				/* Switch runcur. */
   2563  1.10.2.2  matt 				if (bin->runcur->nfree > 0) {
   2564  1.10.2.2  matt 					/* Insert runcur. */
   2565  1.10.2.2  matt 					/* LINTED */
   2566  1.10.2.2  matt 					RB_INSERT(arena_run_tree_s, &bin->runs,
   2567  1.10.2.2  matt 					    bin->runcur);
   2568  1.10.2.2  matt 				}
   2569  1.10.2.2  matt 				bin->runcur = run;
   2570  1.10.2.2  matt 			} else {
   2571  1.10.2.2  matt 				/* LINTED */
   2572  1.10.2.2  matt 				RB_INSERT(arena_run_tree_s, &bin->runs, run);
   2573  1.10.2.2  matt 			}
   2574  1.10.2.2  matt 		}
   2575  1.10.2.2  matt #ifdef MALLOC_STATS
   2576  1.10.2.2  matt 		arena->stats.allocated_small -= size;
   2577  1.10.2.2  matt 		arena->stats.ndalloc_small++;
   2578  1.10.2.2  matt #endif
   2579  1.10.2.2  matt 	} else {
   2580  1.10.2.2  matt 		/* Large allocation. */
   2581  1.10.2.2  matt 
   2582  1.10.2.2  matt 		size = mapelm->npages << pagesize_2pow;
   2583  1.10.2.2  matt 		assert((((uintptr_t)ptr) & pagesize_mask) == 0);
   2584  1.10.2.2  matt 
   2585  1.10.2.2  matt 		if (opt_junk)
   2586  1.10.2.2  matt 			memset(ptr, 0x5a, size);
   2587  1.10.2.2  matt 
   2588  1.10.2.2  matt 		malloc_mutex_lock(&arena->mtx);
   2589  1.10.2.2  matt 		arena_run_dalloc(arena, (arena_run_t *)ptr, size);
   2590  1.10.2.2  matt #ifdef MALLOC_STATS
   2591  1.10.2.2  matt 		arena->stats.allocated_large -= size;
   2592  1.10.2.2  matt 		arena->stats.ndalloc_large++;
   2593  1.10.2.2  matt #endif
   2594  1.10.2.2  matt 	}
   2595  1.10.2.2  matt 
   2596  1.10.2.2  matt 	malloc_mutex_unlock(&arena->mtx);
   2597  1.10.2.2  matt }
   2598  1.10.2.2  matt 
   2599  1.10.2.2  matt static bool
   2600  1.10.2.2  matt arena_new(arena_t *arena)
   2601  1.10.2.2  matt {
   2602  1.10.2.2  matt 	unsigned i;
   2603  1.10.2.2  matt 	arena_bin_t *bin;
   2604  1.10.2.2  matt 	size_t prev_run_size;
   2605  1.10.2.2  matt 
   2606  1.10.2.2  matt 	malloc_mutex_init(&arena->mtx);
   2607  1.10.2.2  matt 
   2608  1.10.2.2  matt #ifdef MALLOC_STATS
   2609  1.10.2.2  matt 	memset(&arena->stats, 0, sizeof(arena_stats_t));
   2610  1.10.2.2  matt #endif
   2611  1.10.2.2  matt 
   2612  1.10.2.2  matt 	/* Initialize chunks. */
   2613  1.10.2.2  matt 	RB_INIT(&arena->chunks);
   2614  1.10.2.2  matt 	arena->spare = NULL;
   2615  1.10.2.2  matt 
   2616  1.10.2.2  matt 	/* Initialize bins. */
   2617  1.10.2.2  matt 	prev_run_size = pagesize;
   2618  1.10.2.2  matt 
   2619  1.10.2.2  matt 	/* (2^n)-spaced tiny bins. */
   2620  1.10.2.2  matt 	for (i = 0; i < ntbins; i++) {
   2621  1.10.2.2  matt 		bin = &arena->bins[i];
   2622  1.10.2.2  matt 		bin->runcur = NULL;
   2623  1.10.2.2  matt 		RB_INIT(&bin->runs);
   2624  1.10.2.2  matt 
   2625  1.10.2.2  matt 		bin->reg_size = (1 << (TINY_MIN_2POW + i));
   2626  1.10.2.2  matt 		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
   2627  1.10.2.2  matt 
   2628  1.10.2.2  matt #ifdef MALLOC_STATS
   2629  1.10.2.2  matt 		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
   2630  1.10.2.2  matt #endif
   2631  1.10.2.2  matt 	}
   2632  1.10.2.2  matt 
   2633  1.10.2.2  matt 	/* Quantum-spaced bins. */
   2634  1.10.2.2  matt 	for (; i < ntbins + nqbins; i++) {
   2635  1.10.2.2  matt 		bin = &arena->bins[i];
   2636  1.10.2.2  matt 		bin->runcur = NULL;
   2637  1.10.2.2  matt 		RB_INIT(&bin->runs);
   2638  1.10.2.2  matt 
   2639  1.10.2.2  matt 		bin->reg_size = quantum * (i - ntbins + 1);
   2640  1.10.2.2  matt /*
   2641  1.10.2.2  matt 		pow2_size = pow2_ceil(quantum * (i - ntbins + 1));
   2642  1.10.2.2  matt */
   2643  1.10.2.2  matt 		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
   2644  1.10.2.2  matt 
   2645  1.10.2.2  matt #ifdef MALLOC_STATS
   2646  1.10.2.2  matt 		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
   2647  1.10.2.2  matt #endif
   2648  1.10.2.2  matt 	}
   2649  1.10.2.2  matt 
   2650  1.10.2.2  matt 	/* (2^n)-spaced sub-page bins. */
   2651  1.10.2.2  matt 	for (; i < ntbins + nqbins + nsbins; i++) {
   2652  1.10.2.2  matt 		bin = &arena->bins[i];
   2653  1.10.2.2  matt 		bin->runcur = NULL;
   2654  1.10.2.2  matt 		RB_INIT(&bin->runs);
   2655  1.10.2.2  matt 
   2656  1.10.2.2  matt 		bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
   2657  1.10.2.2  matt 
   2658  1.10.2.2  matt 		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
   2659  1.10.2.2  matt 
   2660  1.10.2.2  matt #ifdef MALLOC_STATS
   2661  1.10.2.2  matt 		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
   2662  1.10.2.2  matt #endif
   2663  1.10.2.2  matt 	}
   2664  1.10.2.2  matt 
   2665  1.10.2.2  matt #ifdef MALLOC_DEBUG
   2666  1.10.2.2  matt 	arena->magic = ARENA_MAGIC;
   2667  1.10.2.2  matt #endif
   2668  1.10.2.2  matt 
   2669  1.10.2.2  matt 	return (false);
   2670  1.10.2.2  matt }
   2671  1.10.2.2  matt 
   2672  1.10.2.2  matt /* Create a new arena and insert it into the arenas array at index ind. */
   2673  1.10.2.2  matt static arena_t *
   2674  1.10.2.2  matt arenas_extend(unsigned ind)
   2675  1.10.2.2  matt {
   2676  1.10.2.2  matt 	arena_t *ret;
   2677  1.10.2.2  matt 
   2678  1.10.2.2  matt 	/* Allocate enough space for trailing bins. */
   2679  1.10.2.2  matt 	ret = (arena_t *)base_alloc(sizeof(arena_t)
   2680  1.10.2.2  matt 	    + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
   2681  1.10.2.2  matt 	if (ret != NULL && arena_new(ret) == false) {
   2682  1.10.2.2  matt 		arenas[ind] = ret;
   2683  1.10.2.2  matt 		return (ret);
   2684  1.10.2.2  matt 	}
   2685  1.10.2.2  matt 	/* Only reached if there is an OOM error. */
   2686  1.10.2.2  matt 
   2687  1.10.2.2  matt 	/*
   2688  1.10.2.2  matt 	 * OOM here is quite inconvenient to propagate, since dealing with it
   2689  1.10.2.2  matt 	 * would require a check for failure in the fast path.  Instead, punt
   2690  1.10.2.2  matt 	 * by using arenas[0].  In practice, this is an extremely unlikely
   2691  1.10.2.2  matt 	 * failure.
   2692  1.10.2.2  matt 	 */
   2693  1.10.2.3  matt 	_malloc_message(getprogname(),
   2694  1.10.2.2  matt 	    ": (malloc) Error initializing arena\n", "", "");
   2695  1.10.2.2  matt 	if (opt_abort)
   2696  1.10.2.2  matt 		abort();
   2697  1.10.2.2  matt 
   2698  1.10.2.2  matt 	return (arenas[0]);
   2699  1.10.2.2  matt }
   2700  1.10.2.2  matt 
   2701  1.10.2.2  matt /*
   2702  1.10.2.2  matt  * End arena.
   2703  1.10.2.2  matt  */
   2704  1.10.2.2  matt /******************************************************************************/
   2705  1.10.2.2  matt /*
   2706  1.10.2.2  matt  * Begin general internal functions.
   2707  1.10.2.2  matt  */
   2708  1.10.2.2  matt 
   2709  1.10.2.2  matt static void *
   2710  1.10.2.2  matt huge_malloc(size_t size)
   2711  1.10.2.2  matt {
   2712  1.10.2.2  matt 	void *ret;
   2713  1.10.2.2  matt 	size_t csize;
   2714  1.10.2.2  matt 	chunk_node_t *node;
   2715  1.10.2.2  matt 
   2716  1.10.2.2  matt 	/* Allocate one or more contiguous chunks for this request. */
   2717  1.10.2.2  matt 
   2718  1.10.2.2  matt 	csize = CHUNK_CEILING(size);
   2719  1.10.2.2  matt 	if (csize == 0) {
   2720  1.10.2.2  matt 		/* size is large enough to cause size_t wrap-around. */
   2721  1.10.2.2  matt 		return (NULL);
   2722  1.10.2.2  matt 	}
   2723  1.10.2.2  matt 
   2724  1.10.2.2  matt 	/* Allocate a chunk node with which to track the chunk. */
   2725  1.10.2.2  matt 	node = base_chunk_node_alloc();
   2726  1.10.2.2  matt 	if (node == NULL)
   2727  1.10.2.2  matt 		return (NULL);
   2728  1.10.2.2  matt 
   2729  1.10.2.2  matt 	ret = chunk_alloc(csize);
   2730  1.10.2.2  matt 	if (ret == NULL) {
   2731  1.10.2.2  matt 		base_chunk_node_dealloc(node);
   2732  1.10.2.2  matt 		return (NULL);
   2733  1.10.2.2  matt 	}
   2734  1.10.2.2  matt 
   2735  1.10.2.2  matt 	/* Insert node into huge. */
   2736  1.10.2.2  matt 	node->chunk = ret;
   2737  1.10.2.2  matt 	node->size = csize;
   2738  1.10.2.2  matt 
   2739  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   2740  1.10.2.2  matt 	RB_INSERT(chunk_tree_s, &huge, node);
   2741  1.10.2.2  matt #ifdef MALLOC_STATS
   2742  1.10.2.2  matt 	huge_nmalloc++;
   2743  1.10.2.2  matt 	huge_allocated += csize;
   2744  1.10.2.2  matt #endif
   2745  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   2746  1.10.2.2  matt 
   2747  1.10.2.2  matt 	if (opt_junk)
   2748  1.10.2.2  matt 		memset(ret, 0xa5, csize);
   2749  1.10.2.2  matt 	else if (opt_zero)
   2750  1.10.2.2  matt 		memset(ret, 0, csize);
   2751  1.10.2.2  matt 
   2752  1.10.2.2  matt 	return (ret);
   2753  1.10.2.2  matt }
   2754  1.10.2.2  matt 
   2755  1.10.2.2  matt /* Only handles large allocations that require more than chunk alignment. */
   2756  1.10.2.2  matt static void *
   2757  1.10.2.2  matt huge_palloc(size_t alignment, size_t size)
   2758  1.10.2.2  matt {
   2759  1.10.2.2  matt 	void *ret;
   2760  1.10.2.2  matt 	size_t alloc_size, chunk_size, offset;
   2761  1.10.2.2  matt 	chunk_node_t *node;
   2762  1.10.2.2  matt 
   2763  1.10.2.2  matt 	/*
   2764  1.10.2.2  matt 	 * This allocation requires alignment that is even larger than chunk
   2765  1.10.2.2  matt 	 * alignment.  This means that huge_malloc() isn't good enough.
   2766  1.10.2.2  matt 	 *
   2767  1.10.2.2  matt 	 * Allocate almost twice as many chunks as are demanded by the size or
   2768  1.10.2.2  matt 	 * alignment, in order to assure the alignment can be achieved, then
   2769  1.10.2.2  matt 	 * unmap leading and trailing chunks.
   2770  1.10.2.2  matt 	 */
   2771  1.10.2.2  matt 	assert(alignment >= chunksize);
   2772  1.10.2.2  matt 
   2773  1.10.2.2  matt 	chunk_size = CHUNK_CEILING(size);
   2774  1.10.2.2  matt 
   2775  1.10.2.2  matt 	if (size >= alignment)
   2776  1.10.2.2  matt 		alloc_size = chunk_size + alignment - chunksize;
   2777  1.10.2.2  matt 	else
   2778  1.10.2.2  matt 		alloc_size = (alignment << 1) - chunksize;
   2779  1.10.2.2  matt 
   2780  1.10.2.2  matt 	/* Allocate a chunk node with which to track the chunk. */
   2781  1.10.2.2  matt 	node = base_chunk_node_alloc();
   2782  1.10.2.2  matt 	if (node == NULL)
   2783  1.10.2.2  matt 		return (NULL);
   2784  1.10.2.2  matt 
   2785  1.10.2.2  matt 	ret = chunk_alloc(alloc_size);
   2786  1.10.2.2  matt 	if (ret == NULL) {
   2787  1.10.2.2  matt 		base_chunk_node_dealloc(node);
   2788  1.10.2.2  matt 		return (NULL);
   2789  1.10.2.2  matt 	}
   2790  1.10.2.2  matt 
   2791  1.10.2.2  matt 	offset = (uintptr_t)ret & (alignment - 1);
   2792  1.10.2.2  matt 	assert((offset & chunksize_mask) == 0);
   2793  1.10.2.2  matt 	assert(offset < alloc_size);
   2794  1.10.2.2  matt 	if (offset == 0) {
   2795  1.10.2.2  matt 		/* Trim trailing space. */
   2796  1.10.2.2  matt 		chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
   2797  1.10.2.2  matt 		    - chunk_size);
   2798  1.10.2.2  matt 	} else {
   2799  1.10.2.2  matt 		size_t trailsize;
   2800  1.10.2.2  matt 
   2801  1.10.2.2  matt 		/* Trim leading space. */
   2802  1.10.2.2  matt 		chunk_dealloc(ret, alignment - offset);
   2803  1.10.2.2  matt 
   2804  1.10.2.2  matt 		ret = (void *)((uintptr_t)ret + (alignment - offset));
   2805  1.10.2.2  matt 
   2806  1.10.2.2  matt 		trailsize = alloc_size - (alignment - offset) - chunk_size;
   2807  1.10.2.2  matt 		if (trailsize != 0) {
   2808  1.10.2.2  matt 		    /* Trim trailing space. */
   2809  1.10.2.2  matt 		    assert(trailsize < alloc_size);
   2810  1.10.2.2  matt 		    chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
   2811  1.10.2.2  matt 			trailsize);
   2812  1.10.2.2  matt 		}
   2813  1.10.2.2  matt 	}
   2814  1.10.2.2  matt 
   2815  1.10.2.2  matt 	/* Insert node into huge. */
   2816  1.10.2.2  matt 	node->chunk = ret;
   2817  1.10.2.2  matt 	node->size = chunk_size;
   2818  1.10.2.2  matt 
   2819  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   2820  1.10.2.2  matt 	RB_INSERT(chunk_tree_s, &huge, node);
   2821  1.10.2.2  matt #ifdef MALLOC_STATS
   2822  1.10.2.2  matt 	huge_nmalloc++;
   2823  1.10.2.2  matt 	huge_allocated += chunk_size;
   2824  1.10.2.2  matt #endif
   2825  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   2826  1.10.2.2  matt 
   2827  1.10.2.2  matt 	if (opt_junk)
   2828  1.10.2.2  matt 		memset(ret, 0xa5, chunk_size);
   2829  1.10.2.2  matt 	else if (opt_zero)
   2830  1.10.2.2  matt 		memset(ret, 0, chunk_size);
   2831  1.10.2.2  matt 
   2832  1.10.2.2  matt 	return (ret);
   2833  1.10.2.2  matt }
   2834  1.10.2.2  matt 
   2835  1.10.2.2  matt static void *
   2836  1.10.2.2  matt huge_ralloc(void *ptr, size_t size, size_t oldsize)
   2837  1.10.2.2  matt {
   2838  1.10.2.2  matt 	void *ret;
   2839  1.10.2.2  matt 
   2840  1.10.2.2  matt 	/* Avoid moving the allocation if the size class would not change. */
   2841  1.10.2.2  matt 	if (oldsize > arena_maxclass &&
   2842  1.10.2.2  matt 	    CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
   2843  1.10.2.2  matt 		if (opt_junk && size < oldsize) {
   2844  1.10.2.2  matt 			memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize
   2845  1.10.2.2  matt 			    - size);
   2846  1.10.2.2  matt 		} else if (opt_zero && size > oldsize) {
   2847  1.10.2.2  matt 			memset((void *)((uintptr_t)ptr + oldsize), 0, size
   2848  1.10.2.2  matt 			    - oldsize);
   2849  1.10.2.2  matt 		}
   2850  1.10.2.2  matt 		return (ptr);
   2851  1.10.2.2  matt 	}
   2852  1.10.2.2  matt 
   2853  1.10.2.2  matt 	if (CHUNK_ADDR2BASE(ptr) == ptr
   2854  1.10.2.2  matt #ifdef USE_BRK
   2855  1.10.2.2  matt 	    && ((uintptr_t)ptr < (uintptr_t)brk_base
   2856  1.10.2.2  matt 	    || (uintptr_t)ptr >= (uintptr_t)brk_max)
   2857  1.10.2.2  matt #endif
   2858  1.10.2.2  matt 	    ) {
   2859  1.10.2.2  matt 		chunk_node_t *node, key;
   2860  1.10.2.2  matt 		void *newptr;
   2861  1.10.2.2  matt 		size_t oldcsize;
   2862  1.10.2.2  matt 		size_t newcsize;
   2863  1.10.2.2  matt 
   2864  1.10.2.2  matt 		newcsize = CHUNK_CEILING(size);
   2865  1.10.2.2  matt 		oldcsize = CHUNK_CEILING(oldsize);
   2866  1.10.2.2  matt 		assert(oldcsize != newcsize);
   2867  1.10.2.2  matt 		if (newcsize == 0) {
   2868  1.10.2.2  matt 			/* size_t wrap-around */
   2869  1.10.2.2  matt 			return (NULL);
   2870  1.10.2.2  matt 		}
   2871  1.10.2.2  matt 		newptr = mremap(ptr, oldcsize, NULL, newcsize,
   2872  1.10.2.2  matt 		    MAP_ALIGNED(chunksize_2pow));
   2873  1.10.2.2  matt 		if (newptr != MAP_FAILED) {
   2874  1.10.2.2  matt 			assert(CHUNK_ADDR2BASE(newptr) == newptr);
   2875  1.10.2.2  matt 
   2876  1.10.2.2  matt 			/* update tree */
   2877  1.10.2.2  matt 			malloc_mutex_lock(&chunks_mtx);
   2878  1.10.2.2  matt 			key.chunk = __DECONST(void *, ptr);
   2879  1.10.2.2  matt 			/* LINTED */
   2880  1.10.2.2  matt 			node = RB_FIND(chunk_tree_s, &huge, &key);
   2881  1.10.2.2  matt 			assert(node != NULL);
   2882  1.10.2.2  matt 			assert(node->chunk == ptr);
   2883  1.10.2.2  matt 			assert(node->size == oldcsize);
   2884  1.10.2.2  matt 			node->size = newcsize;
   2885  1.10.2.2  matt 			if (ptr != newptr) {
   2886  1.10.2.2  matt 				RB_REMOVE(chunk_tree_s, &huge, node);
   2887  1.10.2.2  matt 				node->chunk = newptr;
   2888  1.10.2.2  matt 				RB_INSERT(chunk_tree_s, &huge, node);
   2889  1.10.2.2  matt 			}
   2890  1.10.2.2  matt #ifdef MALLOC_STATS
   2891  1.10.2.2  matt 			huge_nralloc++;
   2892  1.10.2.2  matt 			huge_allocated += newcsize - oldcsize;
   2893  1.10.2.2  matt 			if (newcsize > oldcsize) {
   2894  1.10.2.2  matt 				stats_chunks.curchunks +=
   2895  1.10.2.2  matt 				    (newcsize - oldcsize) / chunksize;
   2896  1.10.2.2  matt 				if (stats_chunks.curchunks >
   2897  1.10.2.2  matt 				    stats_chunks.highchunks)
   2898  1.10.2.2  matt 					stats_chunks.highchunks =
   2899  1.10.2.2  matt 					    stats_chunks.curchunks;
   2900  1.10.2.2  matt 			} else {
   2901  1.10.2.2  matt 				stats_chunks.curchunks -=
   2902  1.10.2.2  matt 				    (oldcsize - newcsize) / chunksize;
   2903  1.10.2.2  matt 			}
   2904  1.10.2.2  matt #endif
   2905  1.10.2.2  matt 			malloc_mutex_unlock(&chunks_mtx);
   2906  1.10.2.2  matt 
   2907  1.10.2.2  matt 			if (opt_junk && size < oldsize) {
   2908  1.10.2.2  matt 				memset((void *)((uintptr_t)newptr + size), 0x5a,
   2909  1.10.2.2  matt 				    newcsize - size);
   2910  1.10.2.2  matt 			} else if (opt_zero && size > oldsize) {
   2911  1.10.2.2  matt 				memset((void *)((uintptr_t)newptr + oldsize), 0,
   2912  1.10.2.2  matt 				    size - oldsize);
   2913  1.10.2.2  matt 			}
   2914  1.10.2.2  matt 			return (newptr);
   2915  1.10.2.2  matt 		}
   2916  1.10.2.2  matt 	}
   2917  1.10.2.2  matt 
   2918  1.10.2.2  matt 	/*
   2919  1.10.2.2  matt 	 * If we get here, then size and oldsize are different enough that we
   2920  1.10.2.2  matt 	 * need to use a different size class.  In that case, fall back to
   2921  1.10.2.2  matt 	 * allocating new space and copying.
   2922  1.10.2.2  matt 	 */
   2923  1.10.2.2  matt 	ret = huge_malloc(size);
   2924  1.10.2.2  matt 	if (ret == NULL)
   2925  1.10.2.2  matt 		return (NULL);
   2926  1.10.2.2  matt 
   2927  1.10.2.2  matt 	if (CHUNK_ADDR2BASE(ptr) == ptr) {
   2928  1.10.2.2  matt 		/* The old allocation is a chunk. */
   2929  1.10.2.2  matt 		if (size < oldsize)
   2930  1.10.2.2  matt 			memcpy(ret, ptr, size);
   2931  1.10.2.2  matt 		else
   2932  1.10.2.2  matt 			memcpy(ret, ptr, oldsize);
   2933  1.10.2.2  matt 	} else {
   2934  1.10.2.2  matt 		/* The old allocation is a region. */
   2935  1.10.2.2  matt 		assert(oldsize < size);
   2936  1.10.2.2  matt 		memcpy(ret, ptr, oldsize);
   2937  1.10.2.2  matt 	}
   2938  1.10.2.2  matt 	idalloc(ptr);
   2939  1.10.2.2  matt 	return (ret);
   2940  1.10.2.2  matt }
   2941  1.10.2.2  matt 
   2942  1.10.2.2  matt static void
   2943  1.10.2.2  matt huge_dalloc(void *ptr)
   2944  1.10.2.2  matt {
   2945  1.10.2.2  matt 	chunk_node_t key;
   2946  1.10.2.2  matt 	chunk_node_t *node;
   2947  1.10.2.2  matt 
   2948  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   2949  1.10.2.2  matt 
   2950  1.10.2.2  matt 	/* Extract from tree of huge allocations. */
   2951  1.10.2.2  matt 	key.chunk = ptr;
   2952  1.10.2.2  matt 	/* LINTED */
   2953  1.10.2.2  matt 	node = RB_FIND(chunk_tree_s, &huge, &key);
   2954  1.10.2.2  matt 	assert(node != NULL);
   2955  1.10.2.2  matt 	assert(node->chunk == ptr);
   2956  1.10.2.2  matt 	/* LINTED */
   2957  1.10.2.2  matt 	RB_REMOVE(chunk_tree_s, &huge, node);
   2958  1.10.2.2  matt 
   2959  1.10.2.2  matt #ifdef MALLOC_STATS
   2960  1.10.2.2  matt 	huge_ndalloc++;
   2961  1.10.2.2  matt 	huge_allocated -= node->size;
   2962  1.10.2.2  matt #endif
   2963  1.10.2.2  matt 
   2964  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   2965  1.10.2.2  matt 
   2966  1.10.2.2  matt 	/* Unmap chunk. */
   2967  1.10.2.2  matt #ifdef USE_BRK
   2968  1.10.2.2  matt 	if (opt_junk)
   2969  1.10.2.2  matt 		memset(node->chunk, 0x5a, node->size);
   2970  1.10.2.2  matt #endif
   2971  1.10.2.2  matt 	chunk_dealloc(node->chunk, node->size);
   2972  1.10.2.2  matt 
   2973  1.10.2.2  matt 	base_chunk_node_dealloc(node);
   2974  1.10.2.2  matt }
   2975  1.10.2.2  matt 
   2976  1.10.2.2  matt static void *
   2977  1.10.2.2  matt imalloc(size_t size)
   2978  1.10.2.2  matt {
   2979  1.10.2.2  matt 	void *ret;
   2980  1.10.2.2  matt 
   2981  1.10.2.2  matt 	assert(size != 0);
   2982  1.10.2.2  matt 
   2983  1.10.2.2  matt 	if (size <= arena_maxclass)
   2984  1.10.2.2  matt 		ret = arena_malloc(choose_arena(), size);
   2985  1.10.2.2  matt 	else
   2986  1.10.2.2  matt 		ret = huge_malloc(size);
   2987  1.10.2.2  matt 
   2988  1.10.2.2  matt 	return (ret);
   2989  1.10.2.2  matt }
   2990  1.10.2.2  matt 
   2991  1.10.2.2  matt static void *
   2992  1.10.2.2  matt ipalloc(size_t alignment, size_t size)
   2993  1.10.2.2  matt {
   2994  1.10.2.2  matt 	void *ret;
   2995  1.10.2.2  matt 	size_t ceil_size;
   2996  1.10.2.2  matt 
   2997  1.10.2.2  matt 	/*
   2998  1.10.2.2  matt 	 * Round size up to the nearest multiple of alignment.
   2999  1.10.2.2  matt 	 *
   3000  1.10.2.2  matt 	 * This done, we can take advantage of the fact that for each small
   3001  1.10.2.2  matt 	 * size class, every object is aligned at the smallest power of two
   3002  1.10.2.2  matt 	 * that is non-zero in the base two representation of the size.  For
   3003  1.10.2.2  matt 	 * example:
   3004  1.10.2.2  matt 	 *
   3005  1.10.2.2  matt 	 *   Size |   Base 2 | Minimum alignment
   3006  1.10.2.2  matt 	 *   -----+----------+------------------
   3007  1.10.2.2  matt 	 *     96 |  1100000 |  32
   3008  1.10.2.2  matt 	 *    144 | 10100000 |  32
   3009  1.10.2.2  matt 	 *    192 | 11000000 |  64
   3010  1.10.2.2  matt 	 *
   3011  1.10.2.2  matt 	 * Depending on runtime settings, it is possible that arena_malloc()
   3012  1.10.2.2  matt 	 * will further round up to a power of two, but that never causes
   3013  1.10.2.2  matt 	 * correctness issues.
   3014  1.10.2.2  matt 	 */
   3015  1.10.2.2  matt 	ceil_size = (size + (alignment - 1)) & (-alignment);
   3016  1.10.2.2  matt 	/*
   3017  1.10.2.2  matt 	 * (ceil_size < size) protects against the combination of maximal
   3018  1.10.2.2  matt 	 * alignment and size greater than maximal alignment.
   3019  1.10.2.2  matt 	 */
   3020  1.10.2.2  matt 	if (ceil_size < size) {
   3021  1.10.2.2  matt 		/* size_t overflow. */
   3022  1.10.2.2  matt 		return (NULL);
   3023  1.10.2.2  matt 	}
   3024  1.10.2.2  matt 
   3025  1.10.2.2  matt 	if (ceil_size <= pagesize || (alignment <= pagesize
   3026  1.10.2.2  matt 	    && ceil_size <= arena_maxclass))
   3027  1.10.2.2  matt 		ret = arena_malloc(choose_arena(), ceil_size);
   3028  1.10.2.2  matt 	else {
   3029  1.10.2.2  matt 		size_t run_size;
   3030  1.10.2.2  matt 
   3031  1.10.2.2  matt 		/*
   3032  1.10.2.2  matt 		 * We can't achieve sub-page alignment, so round up alignment
   3033  1.10.2.2  matt 		 * permanently; it makes later calculations simpler.
   3034  1.10.2.2  matt 		 */
   3035  1.10.2.2  matt 		alignment = PAGE_CEILING(alignment);
   3036  1.10.2.2  matt 		ceil_size = PAGE_CEILING(size);
   3037  1.10.2.2  matt 		/*
   3038  1.10.2.2  matt 		 * (ceil_size < size) protects against very large sizes within
   3039  1.10.2.2  matt 		 * pagesize of SIZE_T_MAX.
   3040  1.10.2.2  matt 		 *
   3041  1.10.2.2  matt 		 * (ceil_size + alignment < ceil_size) protects against the
   3042  1.10.2.2  matt 		 * combination of maximal alignment and ceil_size large enough
   3043  1.10.2.2  matt 		 * to cause overflow.  This is similar to the first overflow
   3044  1.10.2.2  matt 		 * check above, but it needs to be repeated due to the new
   3045  1.10.2.2  matt 		 * ceil_size value, which may now be *equal* to maximal
   3046  1.10.2.2  matt 		 * alignment, whereas before we only detected overflow if the
   3047  1.10.2.2  matt 		 * original size was *greater* than maximal alignment.
   3048  1.10.2.2  matt 		 */
   3049  1.10.2.2  matt 		if (ceil_size < size || ceil_size + alignment < ceil_size) {
   3050  1.10.2.2  matt 			/* size_t overflow. */
   3051  1.10.2.2  matt 			return (NULL);
   3052  1.10.2.2  matt 		}
   3053  1.10.2.2  matt 
   3054  1.10.2.2  matt 		/*
   3055  1.10.2.2  matt 		 * Calculate the size of the over-size run that arena_palloc()
   3056  1.10.2.2  matt 		 * would need to allocate in order to guarantee the alignment.
   3057  1.10.2.2  matt 		 */
   3058  1.10.2.2  matt 		if (ceil_size >= alignment)
   3059  1.10.2.2  matt 			run_size = ceil_size + alignment - pagesize;
   3060  1.10.2.2  matt 		else {
   3061  1.10.2.2  matt 			/*
   3062  1.10.2.2  matt 			 * It is possible that (alignment << 1) will cause
   3063  1.10.2.2  matt 			 * overflow, but it doesn't matter because we also
   3064  1.10.2.2  matt 			 * subtract pagesize, which in the case of overflow
   3065  1.10.2.2  matt 			 * leaves us with a very large run_size.  That causes
   3066  1.10.2.2  matt 			 * the first conditional below to fail, which means
   3067  1.10.2.2  matt 			 * that the bogus run_size value never gets used for
   3068  1.10.2.2  matt 			 * anything important.
   3069  1.10.2.2  matt 			 */
   3070  1.10.2.2  matt 			run_size = (alignment << 1) - pagesize;
   3071  1.10.2.2  matt 		}
   3072  1.10.2.2  matt 
   3073  1.10.2.2  matt 		if (run_size <= arena_maxclass) {
   3074  1.10.2.2  matt 			ret = arena_palloc(choose_arena(), alignment, ceil_size,
   3075  1.10.2.2  matt 			    run_size);
   3076  1.10.2.2  matt 		} else if (alignment <= chunksize)
   3077  1.10.2.2  matt 			ret = huge_malloc(ceil_size);
   3078  1.10.2.2  matt 		else
   3079  1.10.2.2  matt 			ret = huge_palloc(alignment, ceil_size);
   3080  1.10.2.2  matt 	}
   3081  1.10.2.2  matt 
   3082  1.10.2.2  matt 	assert(((uintptr_t)ret & (alignment - 1)) == 0);
   3083  1.10.2.2  matt 	return (ret);
   3084  1.10.2.2  matt }
   3085  1.10.2.2  matt 
   3086  1.10.2.2  matt static void *
   3087  1.10.2.2  matt icalloc(size_t size)
   3088  1.10.2.2  matt {
   3089  1.10.2.2  matt 	void *ret;
   3090  1.10.2.2  matt 
   3091  1.10.2.2  matt 	if (size <= arena_maxclass) {
   3092  1.10.2.2  matt 		ret = arena_malloc(choose_arena(), size);
   3093  1.10.2.2  matt 		if (ret == NULL)
   3094  1.10.2.2  matt 			return (NULL);
   3095  1.10.2.2  matt 		memset(ret, 0, size);
   3096  1.10.2.2  matt 	} else {
   3097  1.10.2.2  matt 		/*
   3098  1.10.2.2  matt 		 * The virtual memory system provides zero-filled pages, so
   3099  1.10.2.2  matt 		 * there is no need to do so manually, unless opt_junk is
   3100  1.10.2.2  matt 		 * enabled, in which case huge_malloc() fills huge allocations
   3101  1.10.2.2  matt 		 * with junk.
   3102  1.10.2.2  matt 		 */
   3103  1.10.2.2  matt 		ret = huge_malloc(size);
   3104  1.10.2.2  matt 		if (ret == NULL)
   3105  1.10.2.2  matt 			return (NULL);
   3106  1.10.2.2  matt 
   3107  1.10.2.2  matt 		if (opt_junk)
   3108  1.10.2.2  matt 			memset(ret, 0, size);
   3109  1.10.2.2  matt #ifdef USE_BRK
   3110  1.10.2.2  matt 		else if ((uintptr_t)ret >= (uintptr_t)brk_base
   3111  1.10.2.2  matt 		    && (uintptr_t)ret < (uintptr_t)brk_max) {
   3112  1.10.2.2  matt 			/*
   3113  1.10.2.2  matt 			 * This may be a re-used brk chunk.  Therefore, zero
   3114  1.10.2.2  matt 			 * the memory.
   3115  1.10.2.2  matt 			 */
   3116  1.10.2.2  matt 			memset(ret, 0, size);
   3117  1.10.2.2  matt 		}
   3118  1.10.2.2  matt #endif
   3119  1.10.2.2  matt 	}
   3120  1.10.2.2  matt 
   3121  1.10.2.2  matt 	return (ret);
   3122  1.10.2.2  matt }
   3123  1.10.2.2  matt 
   3124  1.10.2.2  matt static size_t
   3125  1.10.2.2  matt isalloc(const void *ptr)
   3126  1.10.2.2  matt {
   3127  1.10.2.2  matt 	size_t ret;
   3128  1.10.2.2  matt 	arena_chunk_t *chunk;
   3129  1.10.2.2  matt 
   3130  1.10.2.2  matt 	assert(ptr != NULL);
   3131  1.10.2.2  matt 
   3132  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
   3133  1.10.2.2  matt 	if (chunk != ptr) {
   3134  1.10.2.2  matt 		/* Region. */
   3135  1.10.2.2  matt 		assert(chunk->arena->magic == ARENA_MAGIC);
   3136  1.10.2.2  matt 
   3137  1.10.2.2  matt 		ret = arena_salloc(ptr);
   3138  1.10.2.2  matt 	} else {
   3139  1.10.2.2  matt 		chunk_node_t *node, key;
   3140  1.10.2.2  matt 
   3141  1.10.2.2  matt 		/* Chunk (huge allocation). */
   3142  1.10.2.2  matt 
   3143  1.10.2.2  matt 		malloc_mutex_lock(&chunks_mtx);
   3144  1.10.2.2  matt 
   3145  1.10.2.2  matt 		/* Extract from tree of huge allocations. */
   3146  1.10.2.2  matt 		key.chunk = __DECONST(void *, ptr);
   3147  1.10.2.2  matt 		/* LINTED */
   3148  1.10.2.2  matt 		node = RB_FIND(chunk_tree_s, &huge, &key);
   3149  1.10.2.2  matt 		assert(node != NULL);
   3150  1.10.2.2  matt 
   3151  1.10.2.2  matt 		ret = node->size;
   3152  1.10.2.2  matt 
   3153  1.10.2.2  matt 		malloc_mutex_unlock(&chunks_mtx);
   3154  1.10.2.2  matt 	}
   3155  1.10.2.2  matt 
   3156  1.10.2.2  matt 	return (ret);
   3157  1.10.2.2  matt }
   3158  1.10.2.2  matt 
   3159  1.10.2.2  matt static void *
   3160  1.10.2.2  matt iralloc(void *ptr, size_t size)
   3161  1.10.2.2  matt {
   3162  1.10.2.2  matt 	void *ret;
   3163  1.10.2.2  matt 	size_t oldsize;
   3164  1.10.2.2  matt 
   3165  1.10.2.2  matt 	assert(ptr != NULL);
   3166  1.10.2.2  matt 	assert(size != 0);
   3167  1.10.2.2  matt 
   3168  1.10.2.2  matt 	oldsize = isalloc(ptr);
   3169  1.10.2.2  matt 
   3170  1.10.2.2  matt 	if (size <= arena_maxclass)
   3171  1.10.2.2  matt 		ret = arena_ralloc(ptr, size, oldsize);
   3172  1.10.2.2  matt 	else
   3173  1.10.2.2  matt 		ret = huge_ralloc(ptr, size, oldsize);
   3174  1.10.2.2  matt 
   3175  1.10.2.2  matt 	return (ret);
   3176  1.10.2.2  matt }
   3177  1.10.2.2  matt 
   3178  1.10.2.2  matt static void
   3179  1.10.2.2  matt idalloc(void *ptr)
   3180  1.10.2.2  matt {
   3181  1.10.2.2  matt 	arena_chunk_t *chunk;
   3182  1.10.2.2  matt 
   3183  1.10.2.2  matt 	assert(ptr != NULL);
   3184  1.10.2.2  matt 
   3185  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
   3186  1.10.2.2  matt 	if (chunk != ptr) {
   3187  1.10.2.2  matt 		/* Region. */
   3188  1.10.2.2  matt 		arena_dalloc(chunk->arena, chunk, ptr);
   3189  1.10.2.2  matt 	} else
   3190  1.10.2.2  matt 		huge_dalloc(ptr);
   3191  1.10.2.2  matt }
   3192  1.10.2.2  matt 
   3193  1.10.2.2  matt static void
   3194  1.10.2.2  matt malloc_print_stats(void)
   3195  1.10.2.2  matt {
   3196  1.10.2.2  matt 
   3197  1.10.2.2  matt 	if (opt_print_stats) {
   3198  1.10.2.2  matt 		char s[UMAX2S_BUFSIZE];
   3199  1.10.2.2  matt 		_malloc_message("___ Begin malloc statistics ___\n", "", "",
   3200  1.10.2.2  matt 		    "");
   3201  1.10.2.2  matt 		_malloc_message("Assertions ",
   3202  1.10.2.2  matt #ifdef NDEBUG
   3203  1.10.2.2  matt 		    "disabled",
   3204  1.10.2.2  matt #else
   3205  1.10.2.2  matt 		    "enabled",
   3206  1.10.2.2  matt #endif
   3207  1.10.2.2  matt 		    "\n", "");
   3208  1.10.2.2  matt 		_malloc_message("Boolean MALLOC_OPTIONS: ",
   3209  1.10.2.2  matt 		    opt_abort ? "A" : "a",
   3210  1.10.2.2  matt 		    opt_junk ? "J" : "j",
   3211  1.10.2.2  matt 		    opt_hint ? "H" : "h");
   3212  1.10.2.2  matt 		_malloc_message(opt_utrace ? "PU" : "Pu",
   3213  1.10.2.2  matt 		    opt_sysv ? "V" : "v",
   3214  1.10.2.2  matt 		    opt_xmalloc ? "X" : "x",
   3215  1.10.2.2  matt 		    opt_zero ? "Z\n" : "z\n");
   3216  1.10.2.2  matt 
   3217  1.10.2.2  matt 		_malloc_message("CPUs: ", umax2s(ncpus, s), "\n", "");
   3218  1.10.2.2  matt 		_malloc_message("Max arenas: ", umax2s(narenas, s), "\n", "");
   3219  1.10.2.2  matt 		_malloc_message("Pointer size: ", umax2s(sizeof(void *), s),
   3220  1.10.2.2  matt 		    "\n", "");
   3221  1.10.2.2  matt 		_malloc_message("Quantum size: ", umax2s(quantum, s), "\n", "");
   3222  1.10.2.2  matt 		_malloc_message("Max small size: ", umax2s(small_max, s), "\n",
   3223  1.10.2.2  matt 		    "");
   3224  1.10.2.2  matt 
   3225  1.10.2.2  matt 		_malloc_message("Chunk size: ", umax2s(chunksize, s), "", "");
   3226  1.10.2.2  matt 		_malloc_message(" (2^", umax2s(opt_chunk_2pow, s), ")\n", "");
   3227  1.10.2.2  matt 
   3228  1.10.2.2  matt #ifdef MALLOC_STATS
   3229  1.10.2.2  matt 		{
   3230  1.10.2.2  matt 			size_t allocated, mapped;
   3231  1.10.2.2  matt 			unsigned i;
   3232  1.10.2.2  matt 			arena_t *arena;
   3233  1.10.2.2  matt 
   3234  1.10.2.2  matt 			/* Calculate and print allocated/mapped stats. */
   3235  1.10.2.2  matt 
   3236  1.10.2.2  matt 			/* arenas. */
   3237  1.10.2.2  matt 			for (i = 0, allocated = 0; i < narenas; i++) {
   3238  1.10.2.2  matt 				if (arenas[i] != NULL) {
   3239  1.10.2.2  matt 					malloc_mutex_lock(&arenas[i]->mtx);
   3240  1.10.2.2  matt 					allocated +=
   3241  1.10.2.2  matt 					    arenas[i]->stats.allocated_small;
   3242  1.10.2.2  matt 					allocated +=
   3243  1.10.2.2  matt 					    arenas[i]->stats.allocated_large;
   3244  1.10.2.2  matt 					malloc_mutex_unlock(&arenas[i]->mtx);
   3245  1.10.2.2  matt 				}
   3246  1.10.2.2  matt 			}
   3247  1.10.2.2  matt 
   3248  1.10.2.2  matt 			/* huge/base. */
   3249  1.10.2.2  matt 			malloc_mutex_lock(&chunks_mtx);
   3250  1.10.2.2  matt 			allocated += huge_allocated;
   3251  1.10.2.2  matt 			mapped = stats_chunks.curchunks * chunksize;
   3252  1.10.2.2  matt 			malloc_mutex_unlock(&chunks_mtx);
   3253  1.10.2.2  matt 
   3254  1.10.2.2  matt 			malloc_mutex_lock(&base_mtx);
   3255  1.10.2.2  matt 			mapped += base_mapped;
   3256  1.10.2.2  matt 			malloc_mutex_unlock(&base_mtx);
   3257  1.10.2.2  matt 
   3258  1.10.2.2  matt 			malloc_printf("Allocated: %zu, mapped: %zu\n",
   3259  1.10.2.2  matt 			    allocated, mapped);
   3260  1.10.2.2  matt 
   3261  1.10.2.2  matt 			/* Print chunk stats. */
   3262  1.10.2.2  matt 			{
   3263  1.10.2.2  matt 				chunk_stats_t chunks_stats;
   3264  1.10.2.2  matt 
   3265  1.10.2.2  matt 				malloc_mutex_lock(&chunks_mtx);
   3266  1.10.2.2  matt 				chunks_stats = stats_chunks;
   3267  1.10.2.2  matt 				malloc_mutex_unlock(&chunks_mtx);
   3268  1.10.2.2  matt 
   3269  1.10.2.2  matt 				malloc_printf("chunks: nchunks   "
   3270  1.10.2.2  matt 				    "highchunks    curchunks\n");
   3271  1.10.2.2  matt 				malloc_printf("  %13llu%13lu%13lu\n",
   3272  1.10.2.2  matt 				    chunks_stats.nchunks,
   3273  1.10.2.2  matt 				    chunks_stats.highchunks,
   3274  1.10.2.2  matt 				    chunks_stats.curchunks);
   3275  1.10.2.2  matt 			}
   3276  1.10.2.2  matt 
   3277  1.10.2.2  matt 			/* Print chunk stats. */
   3278  1.10.2.2  matt 			malloc_printf(
   3279  1.10.2.2  matt 			    "huge: nmalloc      ndalloc      "
   3280  1.10.2.2  matt 			    "nralloc    allocated\n");
   3281  1.10.2.2  matt 			malloc_printf(" %12llu %12llu %12llu %12zu\n",
   3282  1.10.2.2  matt 			    huge_nmalloc, huge_ndalloc, huge_nralloc,
   3283  1.10.2.2  matt 			    huge_allocated);
   3284  1.10.2.2  matt 
   3285  1.10.2.2  matt 			/* Print stats for each arena. */
   3286  1.10.2.2  matt 			for (i = 0; i < narenas; i++) {
   3287  1.10.2.2  matt 				arena = arenas[i];
   3288  1.10.2.2  matt 				if (arena != NULL) {
   3289  1.10.2.2  matt 					malloc_printf(
   3290  1.10.2.2  matt 					    "\narenas[%u] @ %p\n", i, arena);
   3291  1.10.2.2  matt 					malloc_mutex_lock(&arena->mtx);
   3292  1.10.2.2  matt 					stats_print(arena);
   3293  1.10.2.2  matt 					malloc_mutex_unlock(&arena->mtx);
   3294  1.10.2.2  matt 				}
   3295  1.10.2.2  matt 			}
   3296  1.10.2.2  matt 		}
   3297  1.10.2.2  matt #endif /* #ifdef MALLOC_STATS */
   3298  1.10.2.2  matt 		_malloc_message("--- End malloc statistics ---\n", "", "", "");
   3299  1.10.2.2  matt 	}
   3300  1.10.2.2  matt }
   3301  1.10.2.2  matt 
   3302  1.10.2.2  matt /*
   3303  1.10.2.2  matt  * FreeBSD's pthreads implementation calls malloc(3), so the malloc
   3304  1.10.2.2  matt  * implementation has to take pains to avoid infinite recursion during
   3305  1.10.2.2  matt  * initialization.
   3306  1.10.2.2  matt  */
   3307  1.10.2.2  matt static inline bool
   3308  1.10.2.2  matt malloc_init(void)
   3309  1.10.2.2  matt {
   3310  1.10.2.2  matt 
   3311  1.10.2.2  matt 	if (malloc_initialized == false)
   3312  1.10.2.2  matt 		return (malloc_init_hard());
   3313  1.10.2.2  matt 
   3314  1.10.2.2  matt 	return (false);
   3315  1.10.2.2  matt }
   3316  1.10.2.2  matt 
   3317  1.10.2.2  matt static bool
   3318  1.10.2.2  matt malloc_init_hard(void)
   3319  1.10.2.2  matt {
   3320  1.10.2.2  matt 	unsigned i, j;
   3321  1.10.2.2  matt 	ssize_t linklen;
   3322  1.10.2.2  matt 	char buf[PATH_MAX + 1];
   3323  1.10.2.2  matt 	const char *opts = "";
   3324  1.10.2.2  matt 
   3325  1.10.2.2  matt 	malloc_mutex_lock(&init_lock);
   3326  1.10.2.2  matt 	if (malloc_initialized) {
   3327  1.10.2.2  matt 		/*
   3328  1.10.2.2  matt 		 * Another thread initialized the allocator before this one
   3329  1.10.2.2  matt 		 * acquired init_lock.
   3330  1.10.2.2  matt 		 */
   3331  1.10.2.2  matt 		malloc_mutex_unlock(&init_lock);
   3332  1.10.2.2  matt 		return (false);
   3333  1.10.2.2  matt 	}
   3334  1.10.2.2  matt 
   3335  1.10.2.2  matt 	/* Get number of CPUs. */
   3336  1.10.2.2  matt 	{
   3337  1.10.2.2  matt 		int mib[2];
   3338  1.10.2.2  matt 		size_t len;
   3339  1.10.2.2  matt 
   3340  1.10.2.2  matt 		mib[0] = CTL_HW;
   3341  1.10.2.2  matt 		mib[1] = HW_NCPU;
   3342  1.10.2.2  matt 		len = sizeof(ncpus);
   3343  1.10.2.2  matt 		if (sysctl(mib, 2, &ncpus, &len, (void *) 0, 0) == -1) {
   3344  1.10.2.2  matt 			/* Error. */
   3345  1.10.2.2  matt 			ncpus = 1;
   3346  1.10.2.2  matt 		}
   3347  1.10.2.2  matt 	}
   3348  1.10.2.2  matt 
   3349  1.10.2.2  matt 	/* Get page size. */
   3350  1.10.2.2  matt 	{
   3351  1.10.2.2  matt 		long result;
   3352  1.10.2.2  matt 
   3353  1.10.2.2  matt 		result = sysconf(_SC_PAGESIZE);
   3354  1.10.2.2  matt 		assert(result != -1);
   3355  1.10.2.2  matt 		pagesize = (unsigned) result;
   3356  1.10.2.2  matt 
   3357  1.10.2.2  matt 		/*
   3358  1.10.2.2  matt 		 * We assume that pagesize is a power of 2 when calculating
   3359  1.10.2.2  matt 		 * pagesize_mask and pagesize_2pow.
   3360  1.10.2.2  matt 		 */
   3361  1.10.2.2  matt 		assert(((result - 1) & result) == 0);
   3362  1.10.2.2  matt 		pagesize_mask = result - 1;
   3363  1.10.2.2  matt 		pagesize_2pow = ffs((int)result) - 1;
   3364  1.10.2.2  matt 	}
   3365  1.10.2.2  matt 
   3366  1.10.2.2  matt 	for (i = 0; i < 3; i++) {
   3367  1.10.2.2  matt 		/* Get runtime configuration. */
   3368  1.10.2.2  matt 		switch (i) {
   3369  1.10.2.2  matt 		case 0:
   3370  1.10.2.2  matt 			if ((linklen = readlink("/etc/malloc.conf", buf,
   3371  1.10.2.2  matt 						sizeof(buf) - 1)) != -1) {
   3372  1.10.2.2  matt 				/*
   3373  1.10.2.2  matt 				 * Use the contents of the "/etc/malloc.conf"
   3374  1.10.2.2  matt 				 * symbolic link's name.
   3375  1.10.2.2  matt 				 */
   3376  1.10.2.2  matt 				buf[linklen] = '\0';
   3377  1.10.2.2  matt 				opts = buf;
   3378  1.10.2.2  matt 			} else {
   3379  1.10.2.2  matt 				/* No configuration specified. */
   3380  1.10.2.2  matt 				buf[0] = '\0';
   3381  1.10.2.2  matt 				opts = buf;
   3382  1.10.2.2  matt 			}
   3383  1.10.2.2  matt 			break;
   3384  1.10.2.2  matt 		case 1:
   3385  1.10.2.2  matt 			if (issetugid() == 0 && (opts =
   3386  1.10.2.2  matt 			    getenv("MALLOC_OPTIONS")) != NULL) {
   3387  1.10.2.2  matt 				/*
   3388  1.10.2.2  matt 				 * Do nothing; opts is already initialized to
   3389  1.10.2.2  matt 				 * the value of the MALLOC_OPTIONS environment
   3390  1.10.2.2  matt 				 * variable.
   3391  1.10.2.2  matt 				 */
   3392  1.10.2.2  matt 			} else {
   3393  1.10.2.2  matt 				/* No configuration specified. */
   3394  1.10.2.2  matt 				buf[0] = '\0';
   3395  1.10.2.2  matt 				opts = buf;
   3396  1.10.2.2  matt 			}
   3397  1.10.2.2  matt 			break;
   3398  1.10.2.2  matt 		case 2:
   3399  1.10.2.2  matt 			if (_malloc_options != NULL) {
   3400  1.10.2.2  matt 			    /*
   3401  1.10.2.2  matt 			     * Use options that were compiled into the program.
   3402  1.10.2.2  matt 			     */
   3403  1.10.2.2  matt 			    opts = _malloc_options;
   3404  1.10.2.2  matt 			} else {
   3405  1.10.2.2  matt 				/* No configuration specified. */
   3406  1.10.2.2  matt 				buf[0] = '\0';
   3407  1.10.2.2  matt 				opts = buf;
   3408  1.10.2.2  matt 			}
   3409  1.10.2.2  matt 			break;
   3410  1.10.2.2  matt 		default:
   3411  1.10.2.2  matt 			/* NOTREACHED */
   3412  1.10.2.2  matt 			/* LINTED */
   3413  1.10.2.2  matt 			assert(false);
   3414  1.10.2.2  matt 		}
   3415  1.10.2.2  matt 
   3416  1.10.2.2  matt 		for (j = 0; opts[j] != '\0'; j++) {
   3417  1.10.2.2  matt 			switch (opts[j]) {
   3418  1.10.2.2  matt 			case 'a':
   3419  1.10.2.2  matt 				opt_abort = false;
   3420  1.10.2.2  matt 				break;
   3421  1.10.2.2  matt 			case 'A':
   3422  1.10.2.2  matt 				opt_abort = true;
   3423  1.10.2.2  matt 				break;
   3424  1.10.2.2  matt 			case 'h':
   3425  1.10.2.2  matt 				opt_hint = false;
   3426  1.10.2.2  matt 				break;
   3427  1.10.2.2  matt 			case 'H':
   3428  1.10.2.2  matt 				opt_hint = true;
   3429  1.10.2.2  matt 				break;
   3430  1.10.2.2  matt 			case 'j':
   3431  1.10.2.2  matt 				opt_junk = false;
   3432  1.10.2.2  matt 				break;
   3433  1.10.2.2  matt 			case 'J':
   3434  1.10.2.2  matt 				opt_junk = true;
   3435  1.10.2.2  matt 				break;
   3436  1.10.2.2  matt 			case 'k':
   3437  1.10.2.2  matt 				/*
   3438  1.10.2.2  matt 				 * Chunks always require at least one header
   3439  1.10.2.2  matt 				 * page, so chunks can never be smaller than
   3440  1.10.2.2  matt 				 * two pages.
   3441  1.10.2.2  matt 				 */
   3442  1.10.2.2  matt 				if (opt_chunk_2pow > pagesize_2pow + 1)
   3443  1.10.2.2  matt 					opt_chunk_2pow--;
   3444  1.10.2.2  matt 				break;
   3445  1.10.2.2  matt 			case 'K':
   3446  1.10.2.2  matt 				/*
   3447  1.10.2.2  matt 				 * There must be fewer pages in a chunk than
   3448  1.10.2.2  matt 				 * can be recorded by the pos field of
   3449  1.10.2.2  matt 				 * arena_chunk_map_t, in order to make POS_FREE
   3450  1.10.2.2  matt 				 * special.
   3451  1.10.2.2  matt 				 */
   3452  1.10.2.2  matt 				if (opt_chunk_2pow - pagesize_2pow
   3453  1.10.2.2  matt 				    < (sizeof(uint32_t) << 3) - 1)
   3454  1.10.2.2  matt 					opt_chunk_2pow++;
   3455  1.10.2.2  matt 				break;
   3456  1.10.2.2  matt 			case 'n':
   3457  1.10.2.2  matt 				opt_narenas_lshift--;
   3458  1.10.2.2  matt 				break;
   3459  1.10.2.2  matt 			case 'N':
   3460  1.10.2.2  matt 				opt_narenas_lshift++;
   3461  1.10.2.2  matt 				break;
   3462  1.10.2.2  matt 			case 'p':
   3463  1.10.2.2  matt 				opt_print_stats = false;
   3464  1.10.2.2  matt 				break;
   3465  1.10.2.2  matt 			case 'P':
   3466  1.10.2.2  matt 				opt_print_stats = true;
   3467  1.10.2.2  matt 				break;
   3468  1.10.2.2  matt 			case 'q':
   3469  1.10.2.2  matt 				if (opt_quantum_2pow > QUANTUM_2POW_MIN)
   3470  1.10.2.2  matt 					opt_quantum_2pow--;
   3471  1.10.2.2  matt 				break;
   3472  1.10.2.2  matt 			case 'Q':
   3473  1.10.2.2  matt 				if (opt_quantum_2pow < pagesize_2pow - 1)
   3474  1.10.2.2  matt 					opt_quantum_2pow++;
   3475  1.10.2.2  matt 				break;
   3476  1.10.2.2  matt 			case 's':
   3477  1.10.2.2  matt 				if (opt_small_max_2pow > QUANTUM_2POW_MIN)
   3478  1.10.2.2  matt 					opt_small_max_2pow--;
   3479  1.10.2.2  matt 				break;
   3480  1.10.2.2  matt 			case 'S':
   3481  1.10.2.2  matt 				if (opt_small_max_2pow < pagesize_2pow - 1)
   3482  1.10.2.2  matt 					opt_small_max_2pow++;
   3483  1.10.2.2  matt 				break;
   3484  1.10.2.2  matt 			case 'u':
   3485  1.10.2.2  matt 				opt_utrace = false;
   3486  1.10.2.2  matt 				break;
   3487  1.10.2.2  matt 			case 'U':
   3488  1.10.2.2  matt 				opt_utrace = true;
   3489  1.10.2.2  matt 				break;
   3490  1.10.2.2  matt 			case 'v':
   3491  1.10.2.2  matt 				opt_sysv = false;
   3492  1.10.2.2  matt 				break;
   3493  1.10.2.2  matt 			case 'V':
   3494  1.10.2.2  matt 				opt_sysv = true;
   3495  1.10.2.2  matt 				break;
   3496  1.10.2.2  matt 			case 'x':
   3497  1.10.2.2  matt 				opt_xmalloc = false;
   3498  1.10.2.2  matt 				break;
   3499  1.10.2.2  matt 			case 'X':
   3500  1.10.2.2  matt 				opt_xmalloc = true;
   3501  1.10.2.2  matt 				break;
   3502  1.10.2.2  matt 			case 'z':
   3503  1.10.2.2  matt 				opt_zero = false;
   3504  1.10.2.2  matt 				break;
   3505  1.10.2.2  matt 			case 'Z':
   3506  1.10.2.2  matt 				opt_zero = true;
   3507  1.10.2.2  matt 				break;
   3508  1.10.2.2  matt 			default: {
   3509  1.10.2.2  matt 				char cbuf[2];
   3510  1.10.2.2  matt 
   3511  1.10.2.2  matt 				cbuf[0] = opts[j];
   3512  1.10.2.2  matt 				cbuf[1] = '\0';
   3513  1.10.2.3  matt 				_malloc_message(getprogname(),
   3514  1.10.2.2  matt 				    ": (malloc) Unsupported character in "
   3515  1.10.2.2  matt 				    "malloc options: '", cbuf, "'\n");
   3516  1.10.2.2  matt 			}
   3517  1.10.2.2  matt 			}
   3518  1.10.2.2  matt 		}
   3519  1.10.2.2  matt 	}
   3520  1.10.2.2  matt 
   3521  1.10.2.2  matt 	/* Take care to call atexit() only once. */
   3522  1.10.2.2  matt 	if (opt_print_stats) {
   3523  1.10.2.2  matt 		/* Print statistics at exit. */
   3524  1.10.2.2  matt 		atexit(malloc_print_stats);
   3525  1.10.2.2  matt 	}
   3526  1.10.2.2  matt 
   3527  1.10.2.2  matt 	/* Set variables according to the value of opt_small_max_2pow. */
   3528  1.10.2.2  matt 	if (opt_small_max_2pow < opt_quantum_2pow)
   3529  1.10.2.2  matt 		opt_small_max_2pow = opt_quantum_2pow;
   3530  1.10.2.2  matt 	small_max = (1 << opt_small_max_2pow);
   3531  1.10.2.2  matt 
   3532  1.10.2.2  matt 	/* Set bin-related variables. */
   3533  1.10.2.2  matt 	bin_maxclass = (pagesize >> 1);
   3534  1.10.2.2  matt 	assert(opt_quantum_2pow >= TINY_MIN_2POW);
   3535  1.10.2.2  matt 	ntbins = (unsigned)(opt_quantum_2pow - TINY_MIN_2POW);
   3536  1.10.2.2  matt 	assert(ntbins <= opt_quantum_2pow);
   3537  1.10.2.2  matt 	nqbins = (unsigned)(small_max >> opt_quantum_2pow);
   3538  1.10.2.2  matt 	nsbins = (unsigned)(pagesize_2pow - opt_small_max_2pow - 1);
   3539  1.10.2.2  matt 
   3540  1.10.2.2  matt 	/* Set variables according to the value of opt_quantum_2pow. */
   3541  1.10.2.2  matt 	quantum = (1 << opt_quantum_2pow);
   3542  1.10.2.2  matt 	quantum_mask = quantum - 1;
   3543  1.10.2.2  matt 	if (ntbins > 0)
   3544  1.10.2.2  matt 		small_min = (quantum >> 1) + 1;
   3545  1.10.2.2  matt 	else
   3546  1.10.2.2  matt 		small_min = 1;
   3547  1.10.2.2  matt 	assert(small_min <= quantum);
   3548  1.10.2.2  matt 
   3549  1.10.2.2  matt 	/* Set variables according to the value of opt_chunk_2pow. */
   3550  1.10.2.2  matt 	chunksize = (1LU << opt_chunk_2pow);
   3551  1.10.2.2  matt 	chunksize_mask = chunksize - 1;
   3552  1.10.2.2  matt 	chunksize_2pow = (unsigned)opt_chunk_2pow;
   3553  1.10.2.2  matt 	chunk_npages = (unsigned)(chunksize >> pagesize_2pow);
   3554  1.10.2.2  matt 	{
   3555  1.10.2.2  matt 		unsigned header_size;
   3556  1.10.2.2  matt 
   3557  1.10.2.2  matt 		header_size = (unsigned)(sizeof(arena_chunk_t) +
   3558  1.10.2.2  matt 		    (sizeof(arena_chunk_map_t) * (chunk_npages - 1)));
   3559  1.10.2.2  matt 		arena_chunk_header_npages = (header_size >> pagesize_2pow);
   3560  1.10.2.2  matt 		if ((header_size & pagesize_mask) != 0)
   3561  1.10.2.2  matt 			arena_chunk_header_npages++;
   3562  1.10.2.2  matt 	}
   3563  1.10.2.2  matt 	arena_maxclass = chunksize - (arena_chunk_header_npages <<
   3564  1.10.2.2  matt 	    pagesize_2pow);
   3565  1.10.2.2  matt 
   3566  1.10.2.2  matt 	UTRACE(0, 0, 0);
   3567  1.10.2.2  matt 
   3568  1.10.2.2  matt #ifdef MALLOC_STATS
   3569  1.10.2.2  matt 	memset(&stats_chunks, 0, sizeof(chunk_stats_t));
   3570  1.10.2.2  matt #endif
   3571  1.10.2.2  matt 
   3572  1.10.2.2  matt 	/* Various sanity checks that regard configuration. */
   3573  1.10.2.2  matt 	assert(quantum >= sizeof(void *));
   3574  1.10.2.2  matt 	assert(quantum <= pagesize);
   3575  1.10.2.2  matt 	assert(chunksize >= pagesize);
   3576  1.10.2.2  matt 	assert(quantum * 4 <= chunksize);
   3577  1.10.2.2  matt 
   3578  1.10.2.2  matt 	/* Initialize chunks data. */
   3579  1.10.2.2  matt 	malloc_mutex_init(&chunks_mtx);
   3580  1.10.2.2  matt 	RB_INIT(&huge);
   3581  1.10.2.2  matt #ifdef USE_BRK
   3582  1.10.2.2  matt 	malloc_mutex_init(&brk_mtx);
   3583  1.10.2.2  matt 	brk_base = sbrk(0);
   3584  1.10.2.2  matt 	brk_prev = brk_base;
   3585  1.10.2.2  matt 	brk_max = brk_base;
   3586  1.10.2.2  matt #endif
   3587  1.10.2.2  matt #ifdef MALLOC_STATS
   3588  1.10.2.2  matt 	huge_nmalloc = 0;
   3589  1.10.2.2  matt 	huge_ndalloc = 0;
   3590  1.10.2.2  matt 	huge_nralloc = 0;
   3591  1.10.2.2  matt 	huge_allocated = 0;
   3592  1.10.2.2  matt #endif
   3593  1.10.2.2  matt 	RB_INIT(&old_chunks);
   3594  1.10.2.2  matt 
   3595  1.10.2.2  matt 	/* Initialize base allocation data structures. */
   3596  1.10.2.2  matt #ifdef MALLOC_STATS
   3597  1.10.2.2  matt 	base_mapped = 0;
   3598  1.10.2.2  matt #endif
   3599  1.10.2.2  matt #ifdef USE_BRK
   3600  1.10.2.2  matt 	/*
   3601  1.10.2.2  matt 	 * Allocate a base chunk here, since it doesn't actually have to be
   3602  1.10.2.2  matt 	 * chunk-aligned.  Doing this before allocating any other chunks allows
   3603  1.10.2.2  matt 	 * the use of space that would otherwise be wasted.
   3604  1.10.2.2  matt 	 */
   3605  1.10.2.2  matt 	base_pages_alloc(0);
   3606  1.10.2.2  matt #endif
   3607  1.10.2.2  matt 	base_chunk_nodes = NULL;
   3608  1.10.2.2  matt 	malloc_mutex_init(&base_mtx);
   3609  1.10.2.2  matt 
   3610  1.10.2.2  matt 	if (ncpus > 1) {
   3611  1.10.2.2  matt 		/*
   3612  1.10.2.2  matt 		 * For SMP systems, create four times as many arenas as there
   3613  1.10.2.2  matt 		 * are CPUs by default.
   3614  1.10.2.2  matt 		 */
   3615  1.10.2.2  matt 		opt_narenas_lshift += 2;
   3616  1.10.2.2  matt 	}
   3617  1.10.2.2  matt 
   3618  1.10.2.2  matt #ifdef NO_TLS
   3619  1.10.2.2  matt 	/* Initialize arena key. */
   3620  1.10.2.2  matt 	(void)thr_keycreate(&arenas_map_key, NULL);
   3621  1.10.2.2  matt #endif
   3622  1.10.2.2  matt 
   3623  1.10.2.2  matt 	/* Determine how many arenas to use. */
   3624  1.10.2.2  matt 	narenas = ncpus;
   3625  1.10.2.2  matt 	if (opt_narenas_lshift > 0) {
   3626  1.10.2.2  matt 		if ((narenas << opt_narenas_lshift) > narenas)
   3627  1.10.2.2  matt 			narenas <<= opt_narenas_lshift;
   3628  1.10.2.2  matt 		/*
   3629  1.10.2.2  matt 		 * Make sure not to exceed the limits of what base_malloc()
   3630  1.10.2.2  matt 		 * can handle.
   3631  1.10.2.2  matt 		 */
   3632  1.10.2.2  matt 		if (narenas * sizeof(arena_t *) > chunksize)
   3633  1.10.2.2  matt 			narenas = (unsigned)(chunksize / sizeof(arena_t *));
   3634  1.10.2.2  matt 	} else if (opt_narenas_lshift < 0) {
   3635  1.10.2.2  matt 		if ((narenas << opt_narenas_lshift) < narenas)
   3636  1.10.2.2  matt 			narenas <<= opt_narenas_lshift;
   3637  1.10.2.2  matt 		/* Make sure there is at least one arena. */
   3638  1.10.2.2  matt 		if (narenas == 0)
   3639  1.10.2.2  matt 			narenas = 1;
   3640  1.10.2.2  matt 	}
   3641  1.10.2.2  matt 
   3642  1.10.2.2  matt 	next_arena = 0;
   3643  1.10.2.2  matt 
   3644  1.10.2.2  matt 	/* Allocate and initialize arenas. */
   3645  1.10.2.2  matt 	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
   3646  1.10.2.2  matt 	if (arenas == NULL) {
   3647  1.10.2.2  matt 		malloc_mutex_unlock(&init_lock);
   3648  1.10.2.2  matt 		return (true);
   3649  1.10.2.2  matt 	}
   3650  1.10.2.2  matt 	/*
   3651  1.10.2.2  matt 	 * Zero the array.  In practice, this should always be pre-zeroed,
   3652  1.10.2.2  matt 	 * since it was just mmap()ed, but let's be sure.
   3653  1.10.2.2  matt 	 */
   3654  1.10.2.2  matt 	memset(arenas, 0, sizeof(arena_t *) * narenas);
   3655  1.10.2.2  matt 
   3656  1.10.2.2  matt 	/*
   3657  1.10.2.2  matt 	 * Initialize one arena here.  The rest are lazily created in
   3658  1.10.2.2  matt 	 * arena_choose_hard().
   3659  1.10.2.2  matt 	 */
   3660  1.10.2.2  matt 	arenas_extend(0);
   3661  1.10.2.2  matt 	if (arenas[0] == NULL) {
   3662  1.10.2.2  matt 		malloc_mutex_unlock(&init_lock);
   3663  1.10.2.2  matt 		return (true);
   3664  1.10.2.2  matt 	}
   3665  1.10.2.2  matt 
   3666  1.10.2.2  matt 	malloc_mutex_init(&arenas_mtx);
   3667  1.10.2.2  matt 
   3668  1.10.2.2  matt 	malloc_initialized = true;
   3669  1.10.2.2  matt 	malloc_mutex_unlock(&init_lock);
   3670  1.10.2.2  matt 	return (false);
   3671  1.10.2.2  matt }
   3672  1.10.2.2  matt 
   3673  1.10.2.2  matt /*
   3674  1.10.2.2  matt  * End general internal functions.
   3675  1.10.2.2  matt  */
   3676  1.10.2.2  matt /******************************************************************************/
   3677  1.10.2.2  matt /*
   3678  1.10.2.2  matt  * Begin malloc(3)-compatible functions.
   3679  1.10.2.2  matt  */
   3680  1.10.2.2  matt 
   3681  1.10.2.2  matt void *
   3682  1.10.2.2  matt malloc(size_t size)
   3683  1.10.2.2  matt {
   3684  1.10.2.2  matt 	void *ret;
   3685  1.10.2.2  matt 
   3686  1.10.2.2  matt 	if (malloc_init()) {
   3687  1.10.2.2  matt 		ret = NULL;
   3688  1.10.2.2  matt 		goto RETURN;
   3689  1.10.2.2  matt 	}
   3690  1.10.2.2  matt 
   3691  1.10.2.2  matt 	if (size == 0) {
   3692  1.10.2.2  matt 		if (opt_sysv == false)
   3693  1.10.2.2  matt 			size = 1;
   3694  1.10.2.2  matt 		else {
   3695  1.10.2.2  matt 			ret = NULL;
   3696  1.10.2.2  matt 			goto RETURN;
   3697  1.10.2.2  matt 		}
   3698  1.10.2.2  matt 	}
   3699  1.10.2.2  matt 
   3700  1.10.2.2  matt 	ret = imalloc(size);
   3701  1.10.2.2  matt 
   3702  1.10.2.2  matt RETURN:
   3703  1.10.2.2  matt 	if (ret == NULL) {
   3704  1.10.2.2  matt 		if (opt_xmalloc) {
   3705  1.10.2.3  matt 			_malloc_message(getprogname(),
   3706  1.10.2.2  matt 			    ": (malloc) Error in malloc(): out of memory\n", "",
   3707  1.10.2.2  matt 			    "");
   3708  1.10.2.2  matt 			abort();
   3709  1.10.2.2  matt 		}
   3710  1.10.2.2  matt 		errno = ENOMEM;
   3711  1.10.2.2  matt 	}
   3712  1.10.2.2  matt 
   3713  1.10.2.2  matt 	UTRACE(0, size, ret);
   3714  1.10.2.2  matt 	return (ret);
   3715  1.10.2.2  matt }
   3716  1.10.2.2  matt 
   3717  1.10.2.2  matt int
   3718  1.10.2.2  matt posix_memalign(void **memptr, size_t alignment, size_t size)
   3719  1.10.2.2  matt {
   3720  1.10.2.2  matt 	int ret;
   3721  1.10.2.2  matt 	void *result;
   3722  1.10.2.2  matt 
   3723  1.10.2.2  matt 	if (malloc_init())
   3724  1.10.2.2  matt 		result = NULL;
   3725  1.10.2.2  matt 	else {
   3726  1.10.2.2  matt 		/* Make sure that alignment is a large enough power of 2. */
   3727  1.10.2.2  matt 		if (((alignment - 1) & alignment) != 0
   3728  1.10.2.2  matt 		    || alignment < sizeof(void *)) {
   3729  1.10.2.2  matt 			if (opt_xmalloc) {
   3730  1.10.2.3  matt 				_malloc_message(getprogname(),
   3731  1.10.2.2  matt 				    ": (malloc) Error in posix_memalign(): "
   3732  1.10.2.2  matt 				    "invalid alignment\n", "", "");
   3733  1.10.2.2  matt 				abort();
   3734  1.10.2.2  matt 			}
   3735  1.10.2.2  matt 			result = NULL;
   3736  1.10.2.2  matt 			ret = EINVAL;
   3737  1.10.2.2  matt 			goto RETURN;
   3738  1.10.2.2  matt 		}
   3739  1.10.2.2  matt 
   3740  1.10.2.2  matt 		result = ipalloc(alignment, size);
   3741  1.10.2.2  matt 	}
   3742  1.10.2.2  matt 
   3743  1.10.2.2  matt 	if (result == NULL) {
   3744  1.10.2.2  matt 		if (opt_xmalloc) {
   3745  1.10.2.3  matt 			_malloc_message(getprogname(),
   3746  1.10.2.2  matt 			": (malloc) Error in posix_memalign(): out of memory\n",
   3747  1.10.2.2  matt 			"", "");
   3748  1.10.2.2  matt 			abort();
   3749  1.10.2.2  matt 		}
   3750  1.10.2.2  matt 		ret = ENOMEM;
   3751  1.10.2.2  matt 		goto RETURN;
   3752  1.10.2.2  matt 	}
   3753  1.10.2.2  matt 
   3754  1.10.2.2  matt 	*memptr = result;
   3755  1.10.2.2  matt 	ret = 0;
   3756  1.10.2.2  matt 
   3757  1.10.2.2  matt RETURN:
   3758  1.10.2.2  matt 	UTRACE(0, size, result);
   3759  1.10.2.2  matt 	return (ret);
   3760  1.10.2.2  matt }
   3761  1.10.2.2  matt 
   3762  1.10.2.2  matt void *
   3763  1.10.2.2  matt calloc(size_t num, size_t size)
   3764  1.10.2.2  matt {
   3765  1.10.2.2  matt 	void *ret;
   3766  1.10.2.2  matt 	size_t num_size;
   3767  1.10.2.2  matt 
   3768  1.10.2.2  matt 	if (malloc_init()) {
   3769  1.10.2.2  matt 		num_size = 0;
   3770  1.10.2.2  matt 		ret = NULL;
   3771  1.10.2.2  matt 		goto RETURN;
   3772  1.10.2.2  matt 	}
   3773  1.10.2.2  matt 
   3774  1.10.2.2  matt 	num_size = num * size;
   3775  1.10.2.2  matt 	if (num_size == 0) {
   3776  1.10.2.2  matt 		if ((opt_sysv == false) && ((num == 0) || (size == 0)))
   3777  1.10.2.2  matt 			num_size = 1;
   3778  1.10.2.2  matt 		else {
   3779  1.10.2.2  matt 			ret = NULL;
   3780  1.10.2.2  matt 			goto RETURN;
   3781  1.10.2.2  matt 		}
   3782  1.10.2.2  matt 	/*
   3783  1.10.2.2  matt 	 * Try to avoid division here.  We know that it isn't possible to
   3784  1.10.2.2  matt 	 * overflow during multiplication if neither operand uses any of the
   3785  1.10.2.2  matt 	 * most significant half of the bits in a size_t.
   3786  1.10.2.2  matt 	 */
   3787  1.10.2.2  matt 	} else if ((unsigned long long)((num | size) &
   3788  1.10.2.2  matt 	   ((unsigned long long)SIZE_T_MAX << (sizeof(size_t) << 2))) &&
   3789  1.10.2.2  matt 	   (num_size / size != num)) {
   3790  1.10.2.2  matt 		/* size_t overflow. */
   3791  1.10.2.2  matt 		ret = NULL;
   3792  1.10.2.2  matt 		goto RETURN;
   3793  1.10.2.2  matt 	}
   3794  1.10.2.2  matt 
   3795  1.10.2.2  matt 	ret = icalloc(num_size);
   3796  1.10.2.2  matt 
   3797  1.10.2.2  matt RETURN:
   3798  1.10.2.2  matt 	if (ret == NULL) {
   3799  1.10.2.2  matt 		if (opt_xmalloc) {
   3800  1.10.2.3  matt 			_malloc_message(getprogname(),
   3801  1.10.2.2  matt 			    ": (malloc) Error in calloc(): out of memory\n", "",
   3802  1.10.2.2  matt 			    "");
   3803  1.10.2.2  matt 			abort();
   3804  1.10.2.2  matt 		}
   3805  1.10.2.2  matt 		errno = ENOMEM;
   3806  1.10.2.2  matt 	}
   3807  1.10.2.2  matt 
   3808  1.10.2.2  matt 	UTRACE(0, num_size, ret);
   3809  1.10.2.2  matt 	return (ret);
   3810  1.10.2.2  matt }
   3811  1.10.2.2  matt 
   3812  1.10.2.2  matt void *
   3813  1.10.2.2  matt realloc(void *ptr, size_t size)
   3814  1.10.2.2  matt {
   3815  1.10.2.2  matt 	void *ret;
   3816  1.10.2.2  matt 
   3817  1.10.2.2  matt 	if (size == 0) {
   3818  1.10.2.2  matt 		if (opt_sysv == false)
   3819  1.10.2.2  matt 			size = 1;
   3820  1.10.2.2  matt 		else {
   3821  1.10.2.2  matt 			if (ptr != NULL)
   3822  1.10.2.2  matt 				idalloc(ptr);
   3823  1.10.2.2  matt 			ret = NULL;
   3824  1.10.2.2  matt 			goto RETURN;
   3825  1.10.2.2  matt 		}
   3826  1.10.2.2  matt 	}
   3827  1.10.2.2  matt 
   3828  1.10.2.2  matt 	if (ptr != NULL) {
   3829  1.10.2.2  matt 		assert(malloc_initialized);
   3830  1.10.2.2  matt 
   3831  1.10.2.2  matt 		ret = iralloc(ptr, size);
   3832  1.10.2.2  matt 
   3833  1.10.2.2  matt 		if (ret == NULL) {
   3834  1.10.2.2  matt 			if (opt_xmalloc) {
   3835  1.10.2.3  matt 				_malloc_message(getprogname(),
   3836  1.10.2.2  matt 				    ": (malloc) Error in realloc(): out of "
   3837  1.10.2.2  matt 				    "memory\n", "", "");
   3838  1.10.2.2  matt 				abort();
   3839  1.10.2.2  matt 			}
   3840  1.10.2.2  matt 			errno = ENOMEM;
   3841  1.10.2.2  matt 		}
   3842  1.10.2.2  matt 	} else {
   3843  1.10.2.2  matt 		if (malloc_init())
   3844  1.10.2.2  matt 			ret = NULL;
   3845  1.10.2.2  matt 		else
   3846  1.10.2.2  matt 			ret = imalloc(size);
   3847  1.10.2.2  matt 
   3848  1.10.2.2  matt 		if (ret == NULL) {
   3849  1.10.2.2  matt 			if (opt_xmalloc) {
   3850  1.10.2.3  matt 				_malloc_message(getprogname(),
   3851  1.10.2.2  matt 				    ": (malloc) Error in realloc(): out of "
   3852  1.10.2.2  matt 				    "memory\n", "", "");
   3853  1.10.2.2  matt 				abort();
   3854  1.10.2.2  matt 			}
   3855  1.10.2.2  matt 			errno = ENOMEM;
   3856  1.10.2.2  matt 		}
   3857  1.10.2.2  matt 	}
   3858  1.10.2.2  matt 
   3859  1.10.2.2  matt RETURN:
   3860  1.10.2.2  matt 	UTRACE(ptr, size, ret);
   3861  1.10.2.2  matt 	return (ret);
   3862  1.10.2.2  matt }
   3863  1.10.2.2  matt 
   3864  1.10.2.2  matt void
   3865  1.10.2.2  matt free(void *ptr)
   3866  1.10.2.2  matt {
   3867  1.10.2.2  matt 
   3868  1.10.2.2  matt 	UTRACE(ptr, 0, 0);
   3869  1.10.2.2  matt 	if (ptr != NULL) {
   3870  1.10.2.2  matt 		assert(malloc_initialized);
   3871  1.10.2.2  matt 
   3872  1.10.2.2  matt 		idalloc(ptr);
   3873  1.10.2.2  matt 	}
   3874  1.10.2.2  matt }
   3875  1.10.2.2  matt 
   3876  1.10.2.2  matt /*
   3877  1.10.2.2  matt  * End malloc(3)-compatible functions.
   3878  1.10.2.2  matt  */
   3879  1.10.2.2  matt /******************************************************************************/
   3880  1.10.2.2  matt /*
   3881  1.10.2.2  matt  * Begin non-standard functions.
   3882  1.10.2.2  matt  */
   3883  1.10.2.2  matt #ifndef __NetBSD__
   3884  1.10.2.2  matt size_t
   3885  1.10.2.2  matt malloc_usable_size(const void *ptr)
   3886  1.10.2.2  matt {
   3887  1.10.2.2  matt 
   3888  1.10.2.2  matt 	assert(ptr != NULL);
   3889  1.10.2.2  matt 
   3890  1.10.2.2  matt 	return (isalloc(ptr));
   3891  1.10.2.2  matt }
   3892  1.10.2.2  matt #endif
   3893  1.10.2.2  matt 
   3894  1.10.2.2  matt /*
   3895  1.10.2.2  matt  * End non-standard functions.
   3896  1.10.2.2  matt  */
   3897  1.10.2.2  matt /******************************************************************************/
   3898  1.10.2.2  matt /*
   3899  1.10.2.2  matt  * Begin library-private functions, used by threading libraries for protection
   3900  1.10.2.2  matt  * of malloc during fork().  These functions are only called if the program is
   3901  1.10.2.2  matt  * running in threaded mode, so there is no need to check whether the program
   3902  1.10.2.2  matt  * is threaded here.
   3903  1.10.2.2  matt  */
   3904  1.10.2.2  matt 
   3905  1.10.2.2  matt void
   3906  1.10.2.2  matt _malloc_prefork(void)
   3907  1.10.2.2  matt {
   3908  1.10.2.2  matt 	unsigned i;
   3909  1.10.2.2  matt 
   3910  1.10.2.2  matt 	/* Acquire all mutexes in a safe order. */
   3911  1.10.2.2  matt 
   3912  1.10.2.2  matt 	malloc_mutex_lock(&arenas_mtx);
   3913  1.10.2.2  matt 	for (i = 0; i < narenas; i++) {
   3914  1.10.2.2  matt 		if (arenas[i] != NULL)
   3915  1.10.2.2  matt 			malloc_mutex_lock(&arenas[i]->mtx);
   3916  1.10.2.2  matt 	}
   3917  1.10.2.2  matt 	malloc_mutex_unlock(&arenas_mtx);
   3918  1.10.2.2  matt 
   3919  1.10.2.2  matt 	malloc_mutex_lock(&base_mtx);
   3920  1.10.2.2  matt 
   3921  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   3922  1.10.2.2  matt }
   3923  1.10.2.2  matt 
   3924  1.10.2.2  matt void
   3925  1.10.2.2  matt _malloc_postfork(void)
   3926  1.10.2.2  matt {
   3927  1.10.2.2  matt 	unsigned i;
   3928  1.10.2.2  matt 
   3929  1.10.2.2  matt 	/* Release all mutexes, now that fork() has completed. */
   3930  1.10.2.2  matt 
   3931  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   3932  1.10.2.2  matt 
   3933  1.10.2.2  matt 	malloc_mutex_unlock(&base_mtx);
   3934  1.10.2.2  matt 
   3935  1.10.2.2  matt 	malloc_mutex_lock(&arenas_mtx);
   3936  1.10.2.2  matt 	for (i = 0; i < narenas; i++) {
   3937  1.10.2.2  matt 		if (arenas[i] != NULL)
   3938  1.10.2.2  matt 			malloc_mutex_unlock(&arenas[i]->mtx);
   3939  1.10.2.2  matt 	}
   3940  1.10.2.2  matt 	malloc_mutex_unlock(&arenas_mtx);
   3941  1.10.2.2  matt }
   3942  1.10.2.2  matt 
   3943  1.10.2.2  matt /*
   3944  1.10.2.2  matt  * End library-private functions.
   3945  1.10.2.2  matt  */
   3946  1.10.2.2  matt /******************************************************************************/
   3947