Home | History | Annotate | Line # | Download | only in stdlib
jemalloc.c revision 1.10.2.4
      1  1.10.2.4  matt /*	jemalloc.c,v 1.10.2.3 2008/01/09 01:34:14 matt Exp	*/
      2  1.10.2.2  matt 
      3  1.10.2.2  matt /*-
      4  1.10.2.2  matt  * Copyright (C) 2006,2007 Jason Evans <jasone (at) FreeBSD.org>.
      5  1.10.2.2  matt  * All rights reserved.
      6  1.10.2.2  matt  *
      7  1.10.2.2  matt  * Redistribution and use in source and binary forms, with or without
      8  1.10.2.2  matt  * modification, are permitted provided that the following conditions
      9  1.10.2.2  matt  * are met:
     10  1.10.2.2  matt  * 1. Redistributions of source code must retain the above copyright
     11  1.10.2.2  matt  *    notice(s), this list of conditions and the following disclaimer as
     12  1.10.2.2  matt  *    the first lines of this file unmodified other than the possible
     13  1.10.2.2  matt  *    addition of one or more copyright notices.
     14  1.10.2.2  matt  * 2. Redistributions in binary form must reproduce the above copyright
     15  1.10.2.2  matt  *    notice(s), this list of conditions and the following disclaimer in
     16  1.10.2.2  matt  *    the documentation and/or other materials provided with the
     17  1.10.2.2  matt  *    distribution.
     18  1.10.2.2  matt  *
     19  1.10.2.2  matt  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
     20  1.10.2.2  matt  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  1.10.2.2  matt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.10.2.2  matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
     23  1.10.2.2  matt  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.10.2.2  matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.10.2.2  matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
     26  1.10.2.2  matt  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     27  1.10.2.2  matt  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
     28  1.10.2.2  matt  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
     29  1.10.2.2  matt  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30  1.10.2.2  matt  *
     31  1.10.2.2  matt  *******************************************************************************
     32  1.10.2.2  matt  *
     33  1.10.2.2  matt  * This allocator implementation is designed to provide scalable performance
     34  1.10.2.2  matt  * for multi-threaded programs on multi-processor systems.  The following
     35  1.10.2.2  matt  * features are included for this purpose:
     36  1.10.2.2  matt  *
     37  1.10.2.2  matt  *   + Multiple arenas are used if there are multiple CPUs, which reduces lock
     38  1.10.2.2  matt  *     contention and cache sloshing.
     39  1.10.2.2  matt  *
     40  1.10.2.2  matt  *   + Cache line sharing between arenas is avoided for internal data
     41  1.10.2.2  matt  *     structures.
     42  1.10.2.2  matt  *
     43  1.10.2.2  matt  *   + Memory is managed in chunks and runs (chunks can be split into runs),
     44  1.10.2.2  matt  *     rather than as individual pages.  This provides a constant-time
     45  1.10.2.2  matt  *     mechanism for associating allocations with particular arenas.
     46  1.10.2.2  matt  *
     47  1.10.2.2  matt  * Allocation requests are rounded up to the nearest size class, and no record
     48  1.10.2.2  matt  * of the original request size is maintained.  Allocations are broken into
     49  1.10.2.2  matt  * categories according to size class.  Assuming runtime defaults, 4 kB pages
     50  1.10.2.2  matt  * and a 16 byte quantum, the size classes in each category are as follows:
     51  1.10.2.2  matt  *
     52  1.10.2.2  matt  *   |=====================================|
     53  1.10.2.2  matt  *   | Category | Subcategory    |    Size |
     54  1.10.2.2  matt  *   |=====================================|
     55  1.10.2.2  matt  *   | Small    | Tiny           |       2 |
     56  1.10.2.2  matt  *   |          |                |       4 |
     57  1.10.2.2  matt  *   |          |                |       8 |
     58  1.10.2.2  matt  *   |          |----------------+---------|
     59  1.10.2.2  matt  *   |          | Quantum-spaced |      16 |
     60  1.10.2.2  matt  *   |          |                |      32 |
     61  1.10.2.2  matt  *   |          |                |      48 |
     62  1.10.2.2  matt  *   |          |                |     ... |
     63  1.10.2.2  matt  *   |          |                |     480 |
     64  1.10.2.2  matt  *   |          |                |     496 |
     65  1.10.2.2  matt  *   |          |                |     512 |
     66  1.10.2.2  matt  *   |          |----------------+---------|
     67  1.10.2.2  matt  *   |          | Sub-page       |    1 kB |
     68  1.10.2.2  matt  *   |          |                |    2 kB |
     69  1.10.2.2  matt  *   |=====================================|
     70  1.10.2.2  matt  *   | Large                     |    4 kB |
     71  1.10.2.2  matt  *   |                           |    8 kB |
     72  1.10.2.2  matt  *   |                           |   12 kB |
     73  1.10.2.2  matt  *   |                           |     ... |
     74  1.10.2.2  matt  *   |                           | 1012 kB |
     75  1.10.2.2  matt  *   |                           | 1016 kB |
     76  1.10.2.2  matt  *   |                           | 1020 kB |
     77  1.10.2.2  matt  *   |=====================================|
     78  1.10.2.2  matt  *   | Huge                      |    1 MB |
     79  1.10.2.2  matt  *   |                           |    2 MB |
     80  1.10.2.2  matt  *   |                           |    3 MB |
     81  1.10.2.2  matt  *   |                           |     ... |
     82  1.10.2.2  matt  *   |=====================================|
     83  1.10.2.2  matt  *
     84  1.10.2.2  matt  * A different mechanism is used for each category:
     85  1.10.2.2  matt  *
     86  1.10.2.2  matt  *   Small : Each size class is segregated into its own set of runs.  Each run
     87  1.10.2.2  matt  *           maintains a bitmap of which regions are free/allocated.
     88  1.10.2.2  matt  *
     89  1.10.2.2  matt  *   Large : Each allocation is backed by a dedicated run.  Metadata are stored
     90  1.10.2.2  matt  *           in the associated arena chunk header maps.
     91  1.10.2.2  matt  *
     92  1.10.2.2  matt  *   Huge : Each allocation is backed by a dedicated contiguous set of chunks.
     93  1.10.2.2  matt  *          Metadata are stored in a separate red-black tree.
     94  1.10.2.2  matt  *
     95  1.10.2.2  matt  *******************************************************************************
     96  1.10.2.2  matt  */
     97  1.10.2.2  matt 
     98  1.10.2.2  matt /* LINTLIBRARY */
     99  1.10.2.2  matt 
    100  1.10.2.2  matt #ifdef __NetBSD__
    101  1.10.2.2  matt #  define xutrace(a, b)		utrace("malloc", (a), (b))
    102  1.10.2.2  matt #  define __DECONST(x, y)	((x)__UNCONST(y))
    103  1.10.2.2  matt #  define NO_TLS
    104  1.10.2.2  matt #else
    105  1.10.2.2  matt #  define xutrace(a, b)		utrace((a), (b))
    106  1.10.2.2  matt #endif	/* __NetBSD__ */
    107  1.10.2.2  matt 
    108  1.10.2.2  matt /*
    109  1.10.2.2  matt  * MALLOC_PRODUCTION disables assertions and statistics gathering.  It also
    110  1.10.2.2  matt  * defaults the A and J runtime options to off.  These settings are appropriate
    111  1.10.2.2  matt  * for production systems.
    112  1.10.2.2  matt  */
    113  1.10.2.2  matt #define MALLOC_PRODUCTION
    114  1.10.2.2  matt 
    115  1.10.2.2  matt #ifndef MALLOC_PRODUCTION
    116  1.10.2.2  matt #  define MALLOC_DEBUG
    117  1.10.2.2  matt #endif
    118  1.10.2.2  matt 
    119  1.10.2.2  matt #include <sys/cdefs.h>
    120  1.10.2.2  matt /* __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.147 2007/06/15 22:00:16 jasone Exp $"); */
    121  1.10.2.4  matt __RCSID("jemalloc.c,v 1.10.2.3 2008/01/09 01:34:14 matt Exp");
    122  1.10.2.2  matt 
    123  1.10.2.2  matt #ifdef __FreeBSD__
    124  1.10.2.2  matt #include "libc_private.h"
    125  1.10.2.2  matt #ifdef MALLOC_DEBUG
    126  1.10.2.2  matt #  define _LOCK_DEBUG
    127  1.10.2.2  matt #endif
    128  1.10.2.2  matt #include "spinlock.h"
    129  1.10.2.2  matt #endif
    130  1.10.2.2  matt #include "namespace.h"
    131  1.10.2.2  matt #include <sys/mman.h>
    132  1.10.2.2  matt #include <sys/param.h>
    133  1.10.2.2  matt #ifdef __FreeBSD__
    134  1.10.2.2  matt #include <sys/stddef.h>
    135  1.10.2.2  matt #endif
    136  1.10.2.2  matt #include <sys/time.h>
    137  1.10.2.2  matt #include <sys/types.h>
    138  1.10.2.2  matt #include <sys/sysctl.h>
    139  1.10.2.2  matt #include <sys/tree.h>
    140  1.10.2.2  matt #include <sys/uio.h>
    141  1.10.2.2  matt #include <sys/ktrace.h> /* Must come after several other sys/ includes. */
    142  1.10.2.2  matt 
    143  1.10.2.2  matt #ifdef __FreeBSD__
    144  1.10.2.2  matt #include <machine/atomic.h>
    145  1.10.2.2  matt #include <machine/cpufunc.h>
    146  1.10.2.2  matt #endif
    147  1.10.2.2  matt #include <machine/vmparam.h>
    148  1.10.2.2  matt 
    149  1.10.2.2  matt #include <errno.h>
    150  1.10.2.2  matt #include <limits.h>
    151  1.10.2.2  matt #include <pthread.h>
    152  1.10.2.2  matt #include <sched.h>
    153  1.10.2.2  matt #include <stdarg.h>
    154  1.10.2.2  matt #include <stdbool.h>
    155  1.10.2.2  matt #include <stdio.h>
    156  1.10.2.2  matt #include <stdint.h>
    157  1.10.2.2  matt #include <stdlib.h>
    158  1.10.2.2  matt #include <string.h>
    159  1.10.2.2  matt #include <strings.h>
    160  1.10.2.2  matt #include <unistd.h>
    161  1.10.2.2  matt 
    162  1.10.2.2  matt #ifdef __NetBSD__
    163  1.10.2.2  matt #  include <reentrant.h>
    164  1.10.2.3  matt #  include "extern.h"
    165  1.10.2.3  matt 
    166  1.10.2.3  matt #define STRERROR_R(a, b, c)	__strerror_r(a, b, c);
    167  1.10.2.3  matt /*
    168  1.10.2.3  matt  * A non localized version of strerror, that avoids bringing in
    169  1.10.2.3  matt  * stdio and the locale code. All the malloc messages are in English
    170  1.10.2.3  matt  * so why bother?
    171  1.10.2.3  matt  */
    172  1.10.2.3  matt static int
    173  1.10.2.3  matt __strerror_r(int e, char *s, size_t l)
    174  1.10.2.3  matt {
    175  1.10.2.3  matt 	int rval;
    176  1.10.2.3  matt 	size_t slen;
    177  1.10.2.3  matt 
    178  1.10.2.3  matt 	if (e >= 0 && e < sys_nerr) {
    179  1.10.2.3  matt 		slen = strlcpy(s, sys_errlist[e], l);
    180  1.10.2.3  matt 		rval = 0;
    181  1.10.2.3  matt 	} else {
    182  1.10.2.3  matt 		slen = snprintf_ss(s, l, "Unknown error %u", e);
    183  1.10.2.3  matt 		rval = EINVAL;
    184  1.10.2.3  matt 	}
    185  1.10.2.3  matt 	return slen >= l ? ERANGE : rval;
    186  1.10.2.3  matt }
    187  1.10.2.2  matt #endif
    188  1.10.2.2  matt 
    189  1.10.2.2  matt #ifdef __FreeBSD__
    190  1.10.2.3  matt #define STRERROR_R(a, b, c)	strerror_r(a, b, c);
    191  1.10.2.2  matt #include "un-namespace.h"
    192  1.10.2.2  matt #endif
    193  1.10.2.2  matt 
    194  1.10.2.2  matt /* MALLOC_STATS enables statistics calculation. */
    195  1.10.2.2  matt #ifndef MALLOC_PRODUCTION
    196  1.10.2.2  matt #  define MALLOC_STATS
    197  1.10.2.2  matt #endif
    198  1.10.2.2  matt 
    199  1.10.2.2  matt #ifdef MALLOC_DEBUG
    200  1.10.2.2  matt #  ifdef NDEBUG
    201  1.10.2.2  matt #    undef NDEBUG
    202  1.10.2.2  matt #  endif
    203  1.10.2.2  matt #else
    204  1.10.2.2  matt #  ifndef NDEBUG
    205  1.10.2.2  matt #    define NDEBUG
    206  1.10.2.2  matt #  endif
    207  1.10.2.2  matt #endif
    208  1.10.2.2  matt #include <assert.h>
    209  1.10.2.2  matt 
    210  1.10.2.2  matt #ifdef MALLOC_DEBUG
    211  1.10.2.2  matt    /* Disable inlining to make debugging easier. */
    212  1.10.2.2  matt #  define inline
    213  1.10.2.2  matt #endif
    214  1.10.2.2  matt 
    215  1.10.2.2  matt /* Size of stack-allocated buffer passed to strerror_r(). */
    216  1.10.2.2  matt #define	STRERROR_BUF		64
    217  1.10.2.2  matt 
    218  1.10.2.2  matt /* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */
    219  1.10.2.2  matt #ifdef __i386__
    220  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    221  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    222  1.10.2.2  matt #  define USE_BRK
    223  1.10.2.2  matt #endif
    224  1.10.2.2  matt #ifdef __ia64__
    225  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    226  1.10.2.2  matt #  define SIZEOF_PTR_2POW	3
    227  1.10.2.2  matt #endif
    228  1.10.2.2  matt #ifdef __alpha__
    229  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    230  1.10.2.2  matt #  define SIZEOF_PTR_2POW	3
    231  1.10.2.2  matt #  define NO_TLS
    232  1.10.2.2  matt #endif
    233  1.10.2.2  matt #ifdef __sparc64__
    234  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    235  1.10.2.2  matt #  define SIZEOF_PTR_2POW	3
    236  1.10.2.2  matt #  define NO_TLS
    237  1.10.2.2  matt #endif
    238  1.10.2.2  matt #ifdef __amd64__
    239  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    240  1.10.2.2  matt #  define SIZEOF_PTR_2POW	3
    241  1.10.2.2  matt #endif
    242  1.10.2.2  matt #ifdef __arm__
    243  1.10.2.2  matt #  define QUANTUM_2POW_MIN	3
    244  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    245  1.10.2.2  matt #  define USE_BRK
    246  1.10.2.2  matt #  define NO_TLS
    247  1.10.2.2  matt #endif
    248  1.10.2.2  matt #ifdef __powerpc__
    249  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    250  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    251  1.10.2.2  matt #  define USE_BRK
    252  1.10.2.2  matt #endif
    253  1.10.2.2  matt #if defined(__sparc__) && !defined(__sparc64__)
    254  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    255  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    256  1.10.2.2  matt #  define USE_BRK
    257  1.10.2.2  matt #endif
    258  1.10.2.2  matt #ifdef __vax__
    259  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    260  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    261  1.10.2.2  matt #  define USE_BRK
    262  1.10.2.2  matt #endif
    263  1.10.2.2  matt #ifdef __sh__
    264  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    265  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    266  1.10.2.2  matt #  define USE_BRK
    267  1.10.2.2  matt #endif
    268  1.10.2.2  matt #ifdef __m68k__
    269  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    270  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    271  1.10.2.2  matt #  define USE_BRK
    272  1.10.2.2  matt #endif
    273  1.10.2.2  matt #ifdef __mips__
    274  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    275  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    276  1.10.2.2  matt #  define USE_BRK
    277  1.10.2.2  matt #endif
    278  1.10.2.2  matt #ifdef __hppa__
    279  1.10.2.2  matt #  define QUANTUM_2POW_MIN     4
    280  1.10.2.2  matt #  define SIZEOF_PTR_2POW      2
    281  1.10.2.2  matt #  define USE_BRK
    282  1.10.2.2  matt #endif
    283  1.10.2.2  matt 
    284  1.10.2.2  matt #define	SIZEOF_PTR		(1 << SIZEOF_PTR_2POW)
    285  1.10.2.2  matt 
    286  1.10.2.2  matt /* sizeof(int) == (1 << SIZEOF_INT_2POW). */
    287  1.10.2.2  matt #ifndef SIZEOF_INT_2POW
    288  1.10.2.2  matt #  define SIZEOF_INT_2POW	2
    289  1.10.2.2  matt #endif
    290  1.10.2.2  matt 
    291  1.10.2.2  matt /* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */
    292  1.10.2.2  matt #if (!defined(PIC) && !defined(NO_TLS))
    293  1.10.2.2  matt #  define NO_TLS
    294  1.10.2.2  matt #endif
    295  1.10.2.2  matt 
    296  1.10.2.2  matt /*
    297  1.10.2.2  matt  * Size and alignment of memory chunks that are allocated by the OS's virtual
    298  1.10.2.2  matt  * memory system.
    299  1.10.2.2  matt  */
    300  1.10.2.2  matt #define	CHUNK_2POW_DEFAULT	20
    301  1.10.2.2  matt 
    302  1.10.2.2  matt /*
    303  1.10.2.2  matt  * Maximum size of L1 cache line.  This is used to avoid cache line aliasing,
    304  1.10.2.2  matt  * so over-estimates are okay (up to a point), but under-estimates will
    305  1.10.2.2  matt  * negatively affect performance.
    306  1.10.2.2  matt  */
    307  1.10.2.2  matt #define	CACHELINE_2POW		6
    308  1.10.2.2  matt #define	CACHELINE		((size_t)(1 << CACHELINE_2POW))
    309  1.10.2.2  matt 
    310  1.10.2.2  matt /* Smallest size class to support. */
    311  1.10.2.2  matt #define	TINY_MIN_2POW		1
    312  1.10.2.2  matt 
    313  1.10.2.2  matt /*
    314  1.10.2.2  matt  * Maximum size class that is a multiple of the quantum, but not (necessarily)
    315  1.10.2.2  matt  * a power of 2.  Above this size, allocations are rounded up to the nearest
    316  1.10.2.2  matt  * power of 2.
    317  1.10.2.2  matt  */
    318  1.10.2.2  matt #define	SMALL_MAX_2POW_DEFAULT	9
    319  1.10.2.2  matt #define	SMALL_MAX_DEFAULT	(1 << SMALL_MAX_2POW_DEFAULT)
    320  1.10.2.2  matt 
    321  1.10.2.2  matt /*
    322  1.10.2.2  matt  * Maximum desired run header overhead.  Runs are sized as small as possible
    323  1.10.2.2  matt  * such that this setting is still honored, without violating other constraints.
    324  1.10.2.2  matt  * The goal is to make runs as small as possible without exceeding a per run
    325  1.10.2.2  matt  * external fragmentation threshold.
    326  1.10.2.2  matt  *
    327  1.10.2.2  matt  * Note that it is possible to set this low enough that it cannot be honored
    328  1.10.2.2  matt  * for some/all object sizes, since there is one bit of header overhead per
    329  1.10.2.2  matt  * object (plus a constant).  In such cases, this constraint is relaxed.
    330  1.10.2.2  matt  *
    331  1.10.2.2  matt  * RUN_MAX_OVRHD_RELAX specifies the maximum number of bits per region of
    332  1.10.2.2  matt  * overhead for which RUN_MAX_OVRHD is relaxed.
    333  1.10.2.2  matt  */
    334  1.10.2.2  matt #define RUN_MAX_OVRHD		0.015
    335  1.10.2.2  matt #define RUN_MAX_OVRHD_RELAX	1.5
    336  1.10.2.2  matt 
    337  1.10.2.2  matt /* Put a cap on small object run size.  This overrides RUN_MAX_OVRHD. */
    338  1.10.2.2  matt #define RUN_MAX_SMALL_2POW	15
    339  1.10.2.2  matt #define RUN_MAX_SMALL		(1 << RUN_MAX_SMALL_2POW)
    340  1.10.2.2  matt 
    341  1.10.2.2  matt /******************************************************************************/
    342  1.10.2.2  matt 
    343  1.10.2.2  matt #ifdef __FreeBSD__
    344  1.10.2.2  matt /*
    345  1.10.2.2  matt  * Mutexes based on spinlocks.  We can't use normal pthread mutexes, because
    346  1.10.2.2  matt  * they require malloc()ed memory.
    347  1.10.2.2  matt  */
    348  1.10.2.2  matt typedef struct {
    349  1.10.2.2  matt 	spinlock_t	lock;
    350  1.10.2.2  matt } malloc_mutex_t;
    351  1.10.2.2  matt 
    352  1.10.2.2  matt /* Set to true once the allocator has been initialized. */
    353  1.10.2.2  matt static bool malloc_initialized = false;
    354  1.10.2.2  matt 
    355  1.10.2.2  matt /* Used to avoid initialization races. */
    356  1.10.2.2  matt static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
    357  1.10.2.2  matt #else
    358  1.10.2.2  matt #define	malloc_mutex_t	mutex_t
    359  1.10.2.2  matt 
    360  1.10.2.2  matt /* Set to true once the allocator has been initialized. */
    361  1.10.2.2  matt static bool malloc_initialized = false;
    362  1.10.2.2  matt 
    363  1.10.2.2  matt /* Used to avoid initialization races. */
    364  1.10.2.2  matt static mutex_t init_lock = MUTEX_INITIALIZER;
    365  1.10.2.2  matt #endif
    366  1.10.2.2  matt 
    367  1.10.2.2  matt /******************************************************************************/
    368  1.10.2.2  matt /*
    369  1.10.2.2  matt  * Statistics data structures.
    370  1.10.2.2  matt  */
    371  1.10.2.2  matt 
    372  1.10.2.2  matt #ifdef MALLOC_STATS
    373  1.10.2.2  matt 
    374  1.10.2.2  matt typedef struct malloc_bin_stats_s malloc_bin_stats_t;
    375  1.10.2.2  matt struct malloc_bin_stats_s {
    376  1.10.2.2  matt 	/*
    377  1.10.2.2  matt 	 * Number of allocation requests that corresponded to the size of this
    378  1.10.2.2  matt 	 * bin.
    379  1.10.2.2  matt 	 */
    380  1.10.2.2  matt 	uint64_t	nrequests;
    381  1.10.2.2  matt 
    382  1.10.2.2  matt 	/* Total number of runs created for this bin's size class. */
    383  1.10.2.2  matt 	uint64_t	nruns;
    384  1.10.2.2  matt 
    385  1.10.2.2  matt 	/*
    386  1.10.2.2  matt 	 * Total number of runs reused by extracting them from the runs tree for
    387  1.10.2.2  matt 	 * this bin's size class.
    388  1.10.2.2  matt 	 */
    389  1.10.2.2  matt 	uint64_t	reruns;
    390  1.10.2.2  matt 
    391  1.10.2.2  matt 	/* High-water mark for this bin. */
    392  1.10.2.2  matt 	unsigned long	highruns;
    393  1.10.2.2  matt 
    394  1.10.2.2  matt 	/* Current number of runs in this bin. */
    395  1.10.2.2  matt 	unsigned long	curruns;
    396  1.10.2.2  matt };
    397  1.10.2.2  matt 
    398  1.10.2.2  matt typedef struct arena_stats_s arena_stats_t;
    399  1.10.2.2  matt struct arena_stats_s {
    400  1.10.2.2  matt 	/* Number of bytes currently mapped. */
    401  1.10.2.2  matt 	size_t		mapped;
    402  1.10.2.2  matt 
    403  1.10.2.2  matt 	/* Per-size-category statistics. */
    404  1.10.2.2  matt 	size_t		allocated_small;
    405  1.10.2.2  matt 	uint64_t	nmalloc_small;
    406  1.10.2.2  matt 	uint64_t	ndalloc_small;
    407  1.10.2.2  matt 
    408  1.10.2.2  matt 	size_t		allocated_large;
    409  1.10.2.2  matt 	uint64_t	nmalloc_large;
    410  1.10.2.2  matt 	uint64_t	ndalloc_large;
    411  1.10.2.2  matt };
    412  1.10.2.2  matt 
    413  1.10.2.2  matt typedef struct chunk_stats_s chunk_stats_t;
    414  1.10.2.2  matt struct chunk_stats_s {
    415  1.10.2.2  matt 	/* Number of chunks that were allocated. */
    416  1.10.2.2  matt 	uint64_t	nchunks;
    417  1.10.2.2  matt 
    418  1.10.2.2  matt 	/* High-water mark for number of chunks allocated. */
    419  1.10.2.2  matt 	unsigned long	highchunks;
    420  1.10.2.2  matt 
    421  1.10.2.2  matt 	/*
    422  1.10.2.2  matt 	 * Current number of chunks allocated.  This value isn't maintained for
    423  1.10.2.2  matt 	 * any other purpose, so keep track of it in order to be able to set
    424  1.10.2.2  matt 	 * highchunks.
    425  1.10.2.2  matt 	 */
    426  1.10.2.2  matt 	unsigned long	curchunks;
    427  1.10.2.2  matt };
    428  1.10.2.2  matt 
    429  1.10.2.2  matt #endif /* #ifdef MALLOC_STATS */
    430  1.10.2.2  matt 
    431  1.10.2.2  matt /******************************************************************************/
    432  1.10.2.2  matt /*
    433  1.10.2.2  matt  * Chunk data structures.
    434  1.10.2.2  matt  */
    435  1.10.2.2  matt 
    436  1.10.2.2  matt /* Tree of chunks. */
    437  1.10.2.2  matt typedef struct chunk_node_s chunk_node_t;
    438  1.10.2.2  matt struct chunk_node_s {
    439  1.10.2.2  matt 	/* Linkage for the chunk tree. */
    440  1.10.2.2  matt 	RB_ENTRY(chunk_node_s) link;
    441  1.10.2.2  matt 
    442  1.10.2.2  matt 	/*
    443  1.10.2.2  matt 	 * Pointer to the chunk that this tree node is responsible for.  In some
    444  1.10.2.2  matt 	 * (but certainly not all) cases, this data structure is placed at the
    445  1.10.2.2  matt 	 * beginning of the corresponding chunk, so this field may point to this
    446  1.10.2.2  matt 	 * node.
    447  1.10.2.2  matt 	 */
    448  1.10.2.2  matt 	void	*chunk;
    449  1.10.2.2  matt 
    450  1.10.2.2  matt 	/* Total chunk size. */
    451  1.10.2.2  matt 	size_t	size;
    452  1.10.2.2  matt };
    453  1.10.2.2  matt typedef struct chunk_tree_s chunk_tree_t;
    454  1.10.2.2  matt RB_HEAD(chunk_tree_s, chunk_node_s);
    455  1.10.2.2  matt 
    456  1.10.2.2  matt /******************************************************************************/
    457  1.10.2.2  matt /*
    458  1.10.2.2  matt  * Arena data structures.
    459  1.10.2.2  matt  */
    460  1.10.2.2  matt 
    461  1.10.2.2  matt typedef struct arena_s arena_t;
    462  1.10.2.2  matt typedef struct arena_bin_s arena_bin_t;
    463  1.10.2.2  matt 
    464  1.10.2.2  matt typedef struct arena_chunk_map_s arena_chunk_map_t;
    465  1.10.2.2  matt struct arena_chunk_map_s {
    466  1.10.2.2  matt 	/* Number of pages in run. */
    467  1.10.2.2  matt 	uint32_t	npages;
    468  1.10.2.2  matt 	/*
    469  1.10.2.2  matt 	 * Position within run.  For a free run, this is POS_FREE for the first
    470  1.10.2.2  matt 	 * and last pages.  The POS_FREE special value makes it possible to
    471  1.10.2.2  matt 	 * quickly coalesce free runs.
    472  1.10.2.2  matt 	 *
    473  1.10.2.2  matt 	 * This is the limiting factor for chunksize; there can be at most 2^31
    474  1.10.2.2  matt 	 * pages in a run.
    475  1.10.2.2  matt 	 */
    476  1.10.2.2  matt #define POS_FREE ((uint32_t)0xffffffffU)
    477  1.10.2.2  matt 	uint32_t	pos;
    478  1.10.2.2  matt };
    479  1.10.2.2  matt 
    480  1.10.2.2  matt /* Arena chunk header. */
    481  1.10.2.2  matt typedef struct arena_chunk_s arena_chunk_t;
    482  1.10.2.2  matt struct arena_chunk_s {
    483  1.10.2.2  matt 	/* Arena that owns the chunk. */
    484  1.10.2.2  matt 	arena_t *arena;
    485  1.10.2.2  matt 
    486  1.10.2.2  matt 	/* Linkage for the arena's chunk tree. */
    487  1.10.2.2  matt 	RB_ENTRY(arena_chunk_s) link;
    488  1.10.2.2  matt 
    489  1.10.2.2  matt 	/*
    490  1.10.2.2  matt 	 * Number of pages in use.  This is maintained in order to make
    491  1.10.2.2  matt 	 * detection of empty chunks fast.
    492  1.10.2.2  matt 	 */
    493  1.10.2.2  matt 	uint32_t pages_used;
    494  1.10.2.2  matt 
    495  1.10.2.2  matt 	/*
    496  1.10.2.2  matt 	 * Every time a free run larger than this value is created/coalesced,
    497  1.10.2.2  matt 	 * this value is increased.  The only way that the value decreases is if
    498  1.10.2.2  matt 	 * arena_run_alloc() fails to find a free run as large as advertised by
    499  1.10.2.2  matt 	 * this value.
    500  1.10.2.2  matt 	 */
    501  1.10.2.2  matt 	uint32_t max_frun_npages;
    502  1.10.2.2  matt 
    503  1.10.2.2  matt 	/*
    504  1.10.2.2  matt 	 * Every time a free run that starts at an earlier page than this value
    505  1.10.2.2  matt 	 * is created/coalesced, this value is decreased.  It is reset in a
    506  1.10.2.2  matt 	 * similar fashion to max_frun_npages.
    507  1.10.2.2  matt 	 */
    508  1.10.2.2  matt 	uint32_t min_frun_ind;
    509  1.10.2.2  matt 
    510  1.10.2.2  matt 	/*
    511  1.10.2.2  matt 	 * Map of pages within chunk that keeps track of free/large/small.  For
    512  1.10.2.2  matt 	 * free runs, only the map entries for the first and last pages are
    513  1.10.2.2  matt 	 * kept up to date, so that free runs can be quickly coalesced.
    514  1.10.2.2  matt 	 */
    515  1.10.2.2  matt 	arena_chunk_map_t map[1]; /* Dynamically sized. */
    516  1.10.2.2  matt };
    517  1.10.2.2  matt typedef struct arena_chunk_tree_s arena_chunk_tree_t;
    518  1.10.2.2  matt RB_HEAD(arena_chunk_tree_s, arena_chunk_s);
    519  1.10.2.2  matt 
    520  1.10.2.2  matt typedef struct arena_run_s arena_run_t;
    521  1.10.2.2  matt struct arena_run_s {
    522  1.10.2.2  matt 	/* Linkage for run trees. */
    523  1.10.2.2  matt 	RB_ENTRY(arena_run_s) link;
    524  1.10.2.2  matt 
    525  1.10.2.2  matt #ifdef MALLOC_DEBUG
    526  1.10.2.2  matt 	uint32_t	magic;
    527  1.10.2.2  matt #  define ARENA_RUN_MAGIC 0x384adf93
    528  1.10.2.2  matt #endif
    529  1.10.2.2  matt 
    530  1.10.2.2  matt 	/* Bin this run is associated with. */
    531  1.10.2.2  matt 	arena_bin_t	*bin;
    532  1.10.2.2  matt 
    533  1.10.2.2  matt 	/* Index of first element that might have a free region. */
    534  1.10.2.2  matt 	unsigned	regs_minelm;
    535  1.10.2.2  matt 
    536  1.10.2.2  matt 	/* Number of free regions in run. */
    537  1.10.2.2  matt 	unsigned	nfree;
    538  1.10.2.2  matt 
    539  1.10.2.2  matt 	/* Bitmask of in-use regions (0: in use, 1: free). */
    540  1.10.2.2  matt 	unsigned	regs_mask[1]; /* Dynamically sized. */
    541  1.10.2.2  matt };
    542  1.10.2.2  matt typedef struct arena_run_tree_s arena_run_tree_t;
    543  1.10.2.2  matt RB_HEAD(arena_run_tree_s, arena_run_s);
    544  1.10.2.2  matt 
    545  1.10.2.2  matt struct arena_bin_s {
    546  1.10.2.2  matt 	/*
    547  1.10.2.2  matt 	 * Current run being used to service allocations of this bin's size
    548  1.10.2.2  matt 	 * class.
    549  1.10.2.2  matt 	 */
    550  1.10.2.2  matt 	arena_run_t	*runcur;
    551  1.10.2.2  matt 
    552  1.10.2.2  matt 	/*
    553  1.10.2.2  matt 	 * Tree of non-full runs.  This tree is used when looking for an
    554  1.10.2.2  matt 	 * existing run when runcur is no longer usable.  We choose the
    555  1.10.2.2  matt 	 * non-full run that is lowest in memory; this policy tends to keep
    556  1.10.2.2  matt 	 * objects packed well, and it can also help reduce the number of
    557  1.10.2.2  matt 	 * almost-empty chunks.
    558  1.10.2.2  matt 	 */
    559  1.10.2.2  matt 	arena_run_tree_t runs;
    560  1.10.2.2  matt 
    561  1.10.2.2  matt 	/* Size of regions in a run for this bin's size class. */
    562  1.10.2.2  matt 	size_t		reg_size;
    563  1.10.2.2  matt 
    564  1.10.2.2  matt 	/* Total size of a run for this bin's size class. */
    565  1.10.2.2  matt 	size_t		run_size;
    566  1.10.2.2  matt 
    567  1.10.2.2  matt 	/* Total number of regions in a run for this bin's size class. */
    568  1.10.2.2  matt 	uint32_t	nregs;
    569  1.10.2.2  matt 
    570  1.10.2.2  matt 	/* Number of elements in a run's regs_mask for this bin's size class. */
    571  1.10.2.2  matt 	uint32_t	regs_mask_nelms;
    572  1.10.2.2  matt 
    573  1.10.2.2  matt 	/* Offset of first region in a run for this bin's size class. */
    574  1.10.2.2  matt 	uint32_t	reg0_offset;
    575  1.10.2.2  matt 
    576  1.10.2.2  matt #ifdef MALLOC_STATS
    577  1.10.2.2  matt 	/* Bin statistics. */
    578  1.10.2.2  matt 	malloc_bin_stats_t stats;
    579  1.10.2.2  matt #endif
    580  1.10.2.2  matt };
    581  1.10.2.2  matt 
    582  1.10.2.2  matt struct arena_s {
    583  1.10.2.2  matt #ifdef MALLOC_DEBUG
    584  1.10.2.2  matt 	uint32_t		magic;
    585  1.10.2.2  matt #  define ARENA_MAGIC 0x947d3d24
    586  1.10.2.2  matt #endif
    587  1.10.2.2  matt 
    588  1.10.2.2  matt 	/* All operations on this arena require that mtx be locked. */
    589  1.10.2.2  matt 	malloc_mutex_t		mtx;
    590  1.10.2.2  matt 
    591  1.10.2.2  matt #ifdef MALLOC_STATS
    592  1.10.2.2  matt 	arena_stats_t		stats;
    593  1.10.2.2  matt #endif
    594  1.10.2.2  matt 
    595  1.10.2.2  matt 	/*
    596  1.10.2.2  matt 	 * Tree of chunks this arena manages.
    597  1.10.2.2  matt 	 */
    598  1.10.2.2  matt 	arena_chunk_tree_t	chunks;
    599  1.10.2.2  matt 
    600  1.10.2.2  matt 	/*
    601  1.10.2.2  matt 	 * In order to avoid rapid chunk allocation/deallocation when an arena
    602  1.10.2.2  matt 	 * oscillates right on the cusp of needing a new chunk, cache the most
    603  1.10.2.2  matt 	 * recently freed chunk.  This caching is disabled by opt_hint.
    604  1.10.2.2  matt 	 *
    605  1.10.2.2  matt 	 * There is one spare chunk per arena, rather than one spare total, in
    606  1.10.2.2  matt 	 * order to avoid interactions between multiple threads that could make
    607  1.10.2.2  matt 	 * a single spare inadequate.
    608  1.10.2.2  matt 	 */
    609  1.10.2.2  matt 	arena_chunk_t *spare;
    610  1.10.2.2  matt 
    611  1.10.2.2  matt 	/*
    612  1.10.2.2  matt 	 * bins is used to store rings of free regions of the following sizes,
    613  1.10.2.2  matt 	 * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
    614  1.10.2.2  matt 	 *
    615  1.10.2.2  matt 	 *   bins[i] | size |
    616  1.10.2.2  matt 	 *   --------+------+
    617  1.10.2.2  matt 	 *        0  |    2 |
    618  1.10.2.2  matt 	 *        1  |    4 |
    619  1.10.2.2  matt 	 *        2  |    8 |
    620  1.10.2.2  matt 	 *   --------+------+
    621  1.10.2.2  matt 	 *        3  |   16 |
    622  1.10.2.2  matt 	 *        4  |   32 |
    623  1.10.2.2  matt 	 *        5  |   48 |
    624  1.10.2.2  matt 	 *        6  |   64 |
    625  1.10.2.2  matt 	 *           :      :
    626  1.10.2.2  matt 	 *           :      :
    627  1.10.2.2  matt 	 *       33  |  496 |
    628  1.10.2.2  matt 	 *       34  |  512 |
    629  1.10.2.2  matt 	 *   --------+------+
    630  1.10.2.2  matt 	 *       35  | 1024 |
    631  1.10.2.2  matt 	 *       36  | 2048 |
    632  1.10.2.2  matt 	 *   --------+------+
    633  1.10.2.2  matt 	 */
    634  1.10.2.2  matt 	arena_bin_t		bins[1]; /* Dynamically sized. */
    635  1.10.2.2  matt };
    636  1.10.2.2  matt 
    637  1.10.2.2  matt /******************************************************************************/
    638  1.10.2.2  matt /*
    639  1.10.2.2  matt  * Data.
    640  1.10.2.2  matt  */
    641  1.10.2.2  matt 
    642  1.10.2.2  matt /* Number of CPUs. */
    643  1.10.2.2  matt static unsigned		ncpus;
    644  1.10.2.2  matt 
    645  1.10.2.2  matt /* VM page size. */
    646  1.10.2.2  matt static size_t		pagesize;
    647  1.10.2.2  matt static size_t		pagesize_mask;
    648  1.10.2.2  matt static int		pagesize_2pow;
    649  1.10.2.2  matt 
    650  1.10.2.2  matt /* Various bin-related settings. */
    651  1.10.2.2  matt static size_t		bin_maxclass; /* Max size class for bins. */
    652  1.10.2.2  matt static unsigned		ntbins; /* Number of (2^n)-spaced tiny bins. */
    653  1.10.2.2  matt static unsigned		nqbins; /* Number of quantum-spaced bins. */
    654  1.10.2.2  matt static unsigned		nsbins; /* Number of (2^n)-spaced sub-page bins. */
    655  1.10.2.2  matt static size_t		small_min;
    656  1.10.2.2  matt static size_t		small_max;
    657  1.10.2.2  matt 
    658  1.10.2.2  matt /* Various quantum-related settings. */
    659  1.10.2.2  matt static size_t		quantum;
    660  1.10.2.2  matt static size_t		quantum_mask; /* (quantum - 1). */
    661  1.10.2.2  matt 
    662  1.10.2.2  matt /* Various chunk-related settings. */
    663  1.10.2.2  matt static size_t		chunksize;
    664  1.10.2.2  matt static size_t		chunksize_mask; /* (chunksize - 1). */
    665  1.10.2.2  matt static int		chunksize_2pow;
    666  1.10.2.2  matt static unsigned		chunk_npages;
    667  1.10.2.2  matt static unsigned		arena_chunk_header_npages;
    668  1.10.2.2  matt static size_t		arena_maxclass; /* Max size class for arenas. */
    669  1.10.2.2  matt 
    670  1.10.2.2  matt /********/
    671  1.10.2.2  matt /*
    672  1.10.2.2  matt  * Chunks.
    673  1.10.2.2  matt  */
    674  1.10.2.2  matt 
    675  1.10.2.2  matt /* Protects chunk-related data structures. */
    676  1.10.2.2  matt static malloc_mutex_t	chunks_mtx;
    677  1.10.2.2  matt 
    678  1.10.2.2  matt /* Tree of chunks that are stand-alone huge allocations. */
    679  1.10.2.2  matt static chunk_tree_t	huge;
    680  1.10.2.2  matt 
    681  1.10.2.2  matt #ifdef USE_BRK
    682  1.10.2.2  matt /*
    683  1.10.2.2  matt  * Try to use brk for chunk-size allocations, due to address space constraints.
    684  1.10.2.2  matt  */
    685  1.10.2.2  matt /*
    686  1.10.2.2  matt  * Protects sbrk() calls.  This must be separate from chunks_mtx, since
    687  1.10.2.2  matt  * base_pages_alloc() also uses sbrk(), but cannot lock chunks_mtx (doing so
    688  1.10.2.2  matt  * could cause recursive lock acquisition).
    689  1.10.2.2  matt  */
    690  1.10.2.2  matt static malloc_mutex_t	brk_mtx;
    691  1.10.2.2  matt /* Result of first sbrk(0) call. */
    692  1.10.2.2  matt static void		*brk_base;
    693  1.10.2.2  matt /* Current end of brk, or ((void *)-1) if brk is exhausted. */
    694  1.10.2.2  matt static void		*brk_prev;
    695  1.10.2.2  matt /* Current upper limit on brk addresses. */
    696  1.10.2.2  matt static void		*brk_max;
    697  1.10.2.2  matt #endif
    698  1.10.2.2  matt 
    699  1.10.2.2  matt #ifdef MALLOC_STATS
    700  1.10.2.2  matt /* Huge allocation statistics. */
    701  1.10.2.2  matt static uint64_t		huge_nmalloc;
    702  1.10.2.2  matt static uint64_t		huge_ndalloc;
    703  1.10.2.2  matt static uint64_t		huge_nralloc;
    704  1.10.2.2  matt static size_t		huge_allocated;
    705  1.10.2.2  matt #endif
    706  1.10.2.2  matt 
    707  1.10.2.2  matt /*
    708  1.10.2.2  matt  * Tree of chunks that were previously allocated.  This is used when allocating
    709  1.10.2.2  matt  * chunks, in an attempt to re-use address space.
    710  1.10.2.2  matt  */
    711  1.10.2.2  matt static chunk_tree_t	old_chunks;
    712  1.10.2.2  matt 
    713  1.10.2.2  matt /****************************/
    714  1.10.2.2  matt /*
    715  1.10.2.2  matt  * base (internal allocation).
    716  1.10.2.2  matt  */
    717  1.10.2.2  matt 
    718  1.10.2.2  matt /*
    719  1.10.2.2  matt  * Current pages that are being used for internal memory allocations.  These
    720  1.10.2.2  matt  * pages are carved up in cacheline-size quanta, so that there is no chance of
    721  1.10.2.2  matt  * false cache line sharing.
    722  1.10.2.2  matt  */
    723  1.10.2.2  matt static void		*base_pages;
    724  1.10.2.2  matt static void		*base_next_addr;
    725  1.10.2.2  matt static void		*base_past_addr; /* Addr immediately past base_pages. */
    726  1.10.2.2  matt static chunk_node_t	*base_chunk_nodes; /* LIFO cache of chunk nodes. */
    727  1.10.2.2  matt static malloc_mutex_t	base_mtx;
    728  1.10.2.2  matt #ifdef MALLOC_STATS
    729  1.10.2.2  matt static size_t		base_mapped;
    730  1.10.2.2  matt #endif
    731  1.10.2.2  matt 
    732  1.10.2.2  matt /********/
    733  1.10.2.2  matt /*
    734  1.10.2.2  matt  * Arenas.
    735  1.10.2.2  matt  */
    736  1.10.2.2  matt 
    737  1.10.2.2  matt /*
    738  1.10.2.2  matt  * Arenas that are used to service external requests.  Not all elements of the
    739  1.10.2.2  matt  * arenas array are necessarily used; arenas are created lazily as needed.
    740  1.10.2.2  matt  */
    741  1.10.2.2  matt static arena_t		**arenas;
    742  1.10.2.2  matt static unsigned		narenas;
    743  1.10.2.2  matt static unsigned		next_arena;
    744  1.10.2.2  matt static malloc_mutex_t	arenas_mtx; /* Protects arenas initialization. */
    745  1.10.2.2  matt 
    746  1.10.2.2  matt #ifndef NO_TLS
    747  1.10.2.2  matt /*
    748  1.10.2.2  matt  * Map of pthread_self() --> arenas[???], used for selecting an arena to use
    749  1.10.2.2  matt  * for allocations.
    750  1.10.2.2  matt  */
    751  1.10.2.2  matt static __thread arena_t	*arenas_map;
    752  1.10.2.2  matt #define	get_arenas_map()	(arenas_map)
    753  1.10.2.2  matt #define	set_arenas_map(x)	(arenas_map = x)
    754  1.10.2.2  matt #else
    755  1.10.2.2  matt static thread_key_t arenas_map_key;
    756  1.10.2.2  matt #define	get_arenas_map()	thr_getspecific(arenas_map_key)
    757  1.10.2.2  matt #define	set_arenas_map(x)	thr_setspecific(arenas_map_key, x)
    758  1.10.2.2  matt #endif
    759  1.10.2.2  matt 
    760  1.10.2.2  matt #ifdef MALLOC_STATS
    761  1.10.2.2  matt /* Chunk statistics. */
    762  1.10.2.2  matt static chunk_stats_t	stats_chunks;
    763  1.10.2.2  matt #endif
    764  1.10.2.2  matt 
    765  1.10.2.2  matt /*******************************/
    766  1.10.2.2  matt /*
    767  1.10.2.2  matt  * Runtime configuration options.
    768  1.10.2.2  matt  */
    769  1.10.2.2  matt const char	*_malloc_options;
    770  1.10.2.2  matt 
    771  1.10.2.2  matt #ifndef MALLOC_PRODUCTION
    772  1.10.2.2  matt static bool	opt_abort = true;
    773  1.10.2.2  matt static bool	opt_junk = true;
    774  1.10.2.2  matt #else
    775  1.10.2.2  matt static bool	opt_abort = false;
    776  1.10.2.2  matt static bool	opt_junk = false;
    777  1.10.2.2  matt #endif
    778  1.10.2.2  matt static bool	opt_hint = false;
    779  1.10.2.2  matt static bool	opt_print_stats = false;
    780  1.10.2.2  matt static int	opt_quantum_2pow = QUANTUM_2POW_MIN;
    781  1.10.2.2  matt static int	opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT;
    782  1.10.2.2  matt static int	opt_chunk_2pow = CHUNK_2POW_DEFAULT;
    783  1.10.2.2  matt static bool	opt_utrace = false;
    784  1.10.2.2  matt static bool	opt_sysv = false;
    785  1.10.2.2  matt static bool	opt_xmalloc = false;
    786  1.10.2.2  matt static bool	opt_zero = false;
    787  1.10.2.2  matt static int32_t	opt_narenas_lshift = 0;
    788  1.10.2.2  matt 
    789  1.10.2.2  matt typedef struct {
    790  1.10.2.2  matt 	void	*p;
    791  1.10.2.2  matt 	size_t	s;
    792  1.10.2.2  matt 	void	*r;
    793  1.10.2.2  matt } malloc_utrace_t;
    794  1.10.2.2  matt 
    795  1.10.2.2  matt #define	UTRACE(a, b, c)							\
    796  1.10.2.2  matt 	if (opt_utrace) {						\
    797  1.10.2.2  matt 		malloc_utrace_t ut;					\
    798  1.10.2.2  matt 		ut.p = a;						\
    799  1.10.2.2  matt 		ut.s = b;						\
    800  1.10.2.2  matt 		ut.r = c;						\
    801  1.10.2.2  matt 		xutrace(&ut, sizeof(ut));				\
    802  1.10.2.2  matt 	}
    803  1.10.2.2  matt 
    804  1.10.2.2  matt /******************************************************************************/
    805  1.10.2.2  matt /*
    806  1.10.2.2  matt  * Begin function prototypes for non-inline static functions.
    807  1.10.2.2  matt  */
    808  1.10.2.2  matt 
    809  1.10.2.2  matt static void	wrtmessage(const char *p1, const char *p2, const char *p3,
    810  1.10.2.2  matt 		const char *p4);
    811  1.10.2.2  matt #ifdef MALLOC_STATS
    812  1.10.2.2  matt static void	malloc_printf(const char *format, ...);
    813  1.10.2.2  matt #endif
    814  1.10.2.2  matt static char	*umax2s(uintmax_t x, char *s);
    815  1.10.2.2  matt static bool	base_pages_alloc(size_t minsize);
    816  1.10.2.2  matt static void	*base_alloc(size_t size);
    817  1.10.2.2  matt static chunk_node_t *base_chunk_node_alloc(void);
    818  1.10.2.2  matt static void	base_chunk_node_dealloc(chunk_node_t *node);
    819  1.10.2.2  matt #ifdef MALLOC_STATS
    820  1.10.2.2  matt static void	stats_print(arena_t *arena);
    821  1.10.2.2  matt #endif
    822  1.10.2.2  matt static void	*pages_map(void *addr, size_t size);
    823  1.10.2.2  matt static void	*pages_map_align(void *addr, size_t size, int align);
    824  1.10.2.2  matt static void	pages_unmap(void *addr, size_t size);
    825  1.10.2.2  matt static void	*chunk_alloc(size_t size);
    826  1.10.2.2  matt static void	chunk_dealloc(void *chunk, size_t size);
    827  1.10.2.2  matt static void	arena_run_split(arena_t *arena, arena_run_t *run, size_t size);
    828  1.10.2.2  matt static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
    829  1.10.2.2  matt static void	arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
    830  1.10.2.2  matt static arena_run_t *arena_run_alloc(arena_t *arena, size_t size);
    831  1.10.2.2  matt static void	arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size);
    832  1.10.2.2  matt static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
    833  1.10.2.2  matt static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
    834  1.10.2.2  matt static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
    835  1.10.2.2  matt static void	*arena_malloc(arena_t *arena, size_t size);
    836  1.10.2.2  matt static void	*arena_palloc(arena_t *arena, size_t alignment, size_t size,
    837  1.10.2.2  matt     size_t alloc_size);
    838  1.10.2.2  matt static size_t	arena_salloc(const void *ptr);
    839  1.10.2.2  matt static void	*arena_ralloc(void *ptr, size_t size, size_t oldsize);
    840  1.10.2.2  matt static void	arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
    841  1.10.2.2  matt static bool	arena_new(arena_t *arena);
    842  1.10.2.2  matt static arena_t	*arenas_extend(unsigned ind);
    843  1.10.2.2  matt static void	*huge_malloc(size_t size);
    844  1.10.2.2  matt static void	*huge_palloc(size_t alignment, size_t size);
    845  1.10.2.2  matt static void	*huge_ralloc(void *ptr, size_t size, size_t oldsize);
    846  1.10.2.2  matt static void	huge_dalloc(void *ptr);
    847  1.10.2.2  matt static void	*imalloc(size_t size);
    848  1.10.2.2  matt static void	*ipalloc(size_t alignment, size_t size);
    849  1.10.2.2  matt static void	*icalloc(size_t size);
    850  1.10.2.2  matt static size_t	isalloc(const void *ptr);
    851  1.10.2.2  matt static void	*iralloc(void *ptr, size_t size);
    852  1.10.2.2  matt static void	idalloc(void *ptr);
    853  1.10.2.2  matt static void	malloc_print_stats(void);
    854  1.10.2.2  matt static bool	malloc_init_hard(void);
    855  1.10.2.2  matt 
    856  1.10.2.2  matt /*
    857  1.10.2.2  matt  * End function prototypes.
    858  1.10.2.2  matt  */
    859  1.10.2.2  matt /******************************************************************************/
    860  1.10.2.2  matt /*
    861  1.10.2.2  matt  * Begin mutex.
    862  1.10.2.2  matt  */
    863  1.10.2.2  matt 
    864  1.10.2.2  matt #ifdef __NetBSD__
    865  1.10.2.2  matt #define	malloc_mutex_init(m)	mutex_init(m, NULL)
    866  1.10.2.2  matt #define	malloc_mutex_lock(m)	mutex_lock(m)
    867  1.10.2.2  matt #define	malloc_mutex_unlock(m)	mutex_unlock(m)
    868  1.10.2.2  matt #else	/* __NetBSD__ */
    869  1.10.2.2  matt static inline void
    870  1.10.2.2  matt malloc_mutex_init(malloc_mutex_t *a_mutex)
    871  1.10.2.2  matt {
    872  1.10.2.2  matt 	static const spinlock_t lock = _SPINLOCK_INITIALIZER;
    873  1.10.2.2  matt 
    874  1.10.2.2  matt 	a_mutex->lock = lock;
    875  1.10.2.2  matt }
    876  1.10.2.2  matt 
    877  1.10.2.2  matt static inline void
    878  1.10.2.2  matt malloc_mutex_lock(malloc_mutex_t *a_mutex)
    879  1.10.2.2  matt {
    880  1.10.2.2  matt 
    881  1.10.2.2  matt 	if (__isthreaded)
    882  1.10.2.2  matt 		_SPINLOCK(&a_mutex->lock);
    883  1.10.2.2  matt }
    884  1.10.2.2  matt 
    885  1.10.2.2  matt static inline void
    886  1.10.2.2  matt malloc_mutex_unlock(malloc_mutex_t *a_mutex)
    887  1.10.2.2  matt {
    888  1.10.2.2  matt 
    889  1.10.2.2  matt 	if (__isthreaded)
    890  1.10.2.2  matt 		_SPINUNLOCK(&a_mutex->lock);
    891  1.10.2.2  matt }
    892  1.10.2.2  matt #endif	/* __NetBSD__ */
    893  1.10.2.2  matt 
    894  1.10.2.2  matt /*
    895  1.10.2.2  matt  * End mutex.
    896  1.10.2.2  matt  */
    897  1.10.2.2  matt /******************************************************************************/
    898  1.10.2.2  matt /*
    899  1.10.2.2  matt  * Begin Utility functions/macros.
    900  1.10.2.2  matt  */
    901  1.10.2.2  matt 
    902  1.10.2.2  matt /* Return the chunk address for allocation address a. */
    903  1.10.2.2  matt #define	CHUNK_ADDR2BASE(a)						\
    904  1.10.2.2  matt 	((void *)((uintptr_t)(a) & ~chunksize_mask))
    905  1.10.2.2  matt 
    906  1.10.2.2  matt /* Return the chunk offset of address a. */
    907  1.10.2.2  matt #define	CHUNK_ADDR2OFFSET(a)						\
    908  1.10.2.2  matt 	((size_t)((uintptr_t)(a) & chunksize_mask))
    909  1.10.2.2  matt 
    910  1.10.2.2  matt /* Return the smallest chunk multiple that is >= s. */
    911  1.10.2.2  matt #define	CHUNK_CEILING(s)						\
    912  1.10.2.2  matt 	(((s) + chunksize_mask) & ~chunksize_mask)
    913  1.10.2.2  matt 
    914  1.10.2.2  matt /* Return the smallest cacheline multiple that is >= s. */
    915  1.10.2.2  matt #define	CACHELINE_CEILING(s)						\
    916  1.10.2.2  matt 	(((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
    917  1.10.2.2  matt 
    918  1.10.2.2  matt /* Return the smallest quantum multiple that is >= a. */
    919  1.10.2.2  matt #define	QUANTUM_CEILING(a)						\
    920  1.10.2.2  matt 	(((a) + quantum_mask) & ~quantum_mask)
    921  1.10.2.2  matt 
    922  1.10.2.2  matt /* Return the smallest pagesize multiple that is >= s. */
    923  1.10.2.2  matt #define	PAGE_CEILING(s)							\
    924  1.10.2.2  matt 	(((s) + pagesize_mask) & ~pagesize_mask)
    925  1.10.2.2  matt 
    926  1.10.2.2  matt /* Compute the smallest power of 2 that is >= x. */
    927  1.10.2.2  matt static inline size_t
    928  1.10.2.2  matt pow2_ceil(size_t x)
    929  1.10.2.2  matt {
    930  1.10.2.2  matt 
    931  1.10.2.2  matt 	x--;
    932  1.10.2.2  matt 	x |= x >> 1;
    933  1.10.2.2  matt 	x |= x >> 2;
    934  1.10.2.2  matt 	x |= x >> 4;
    935  1.10.2.2  matt 	x |= x >> 8;
    936  1.10.2.2  matt 	x |= x >> 16;
    937  1.10.2.2  matt #if (SIZEOF_PTR == 8)
    938  1.10.2.2  matt 	x |= x >> 32;
    939  1.10.2.2  matt #endif
    940  1.10.2.2  matt 	x++;
    941  1.10.2.2  matt 	return (x);
    942  1.10.2.2  matt }
    943  1.10.2.2  matt 
    944  1.10.2.2  matt static void
    945  1.10.2.2  matt wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
    946  1.10.2.2  matt {
    947  1.10.2.2  matt 
    948  1.10.2.3  matt 	write(STDERR_FILENO, p1, strlen(p1));
    949  1.10.2.3  matt 	write(STDERR_FILENO, p2, strlen(p2));
    950  1.10.2.3  matt 	write(STDERR_FILENO, p3, strlen(p3));
    951  1.10.2.3  matt 	write(STDERR_FILENO, p4, strlen(p4));
    952  1.10.2.2  matt }
    953  1.10.2.2  matt 
    954  1.10.2.2  matt void	(*_malloc_message)(const char *p1, const char *p2, const char *p3,
    955  1.10.2.2  matt 	    const char *p4) = wrtmessage;
    956  1.10.2.2  matt 
    957  1.10.2.2  matt #ifdef MALLOC_STATS
    958  1.10.2.2  matt /*
    959  1.10.2.2  matt  * Print to stderr in such a way as to (hopefully) avoid memory allocation.
    960  1.10.2.2  matt  */
    961  1.10.2.2  matt static void
    962  1.10.2.2  matt malloc_printf(const char *format, ...)
    963  1.10.2.2  matt {
    964  1.10.2.2  matt 	char buf[4096];
    965  1.10.2.2  matt 	va_list ap;
    966  1.10.2.2  matt 
    967  1.10.2.2  matt 	va_start(ap, format);
    968  1.10.2.2  matt 	vsnprintf(buf, sizeof(buf), format, ap);
    969  1.10.2.2  matt 	va_end(ap);
    970  1.10.2.2  matt 	_malloc_message(buf, "", "", "");
    971  1.10.2.2  matt }
    972  1.10.2.2  matt #endif
    973  1.10.2.2  matt 
    974  1.10.2.2  matt /*
    975  1.10.2.2  matt  * We don't want to depend on vsnprintf() for production builds, since that can
    976  1.10.2.2  matt  * cause unnecessary bloat for static binaries.  umax2s() provides minimal
    977  1.10.2.2  matt  * integer printing functionality, so that malloc_printf() use can be limited to
    978  1.10.2.2  matt  * MALLOC_STATS code.
    979  1.10.2.2  matt  */
    980  1.10.2.2  matt #define UMAX2S_BUFSIZE	21
    981  1.10.2.2  matt static char *
    982  1.10.2.2  matt umax2s(uintmax_t x, char *s)
    983  1.10.2.2  matt {
    984  1.10.2.2  matt 	unsigned i;
    985  1.10.2.2  matt 
    986  1.10.2.2  matt 	/* Make sure UMAX2S_BUFSIZE is large enough. */
    987  1.10.2.2  matt 	/* LINTED */
    988  1.10.2.2  matt 	assert(sizeof(uintmax_t) <= 8);
    989  1.10.2.2  matt 
    990  1.10.2.2  matt 	i = UMAX2S_BUFSIZE - 1;
    991  1.10.2.2  matt 	s[i] = '\0';
    992  1.10.2.2  matt 	do {
    993  1.10.2.2  matt 		i--;
    994  1.10.2.2  matt 		s[i] = "0123456789"[(int)x % 10];
    995  1.10.2.2  matt 		x /= (uintmax_t)10LL;
    996  1.10.2.2  matt 	} while (x > 0);
    997  1.10.2.2  matt 
    998  1.10.2.2  matt 	return (&s[i]);
    999  1.10.2.2  matt }
   1000  1.10.2.2  matt 
   1001  1.10.2.2  matt /******************************************************************************/
   1002  1.10.2.2  matt 
   1003  1.10.2.2  matt static bool
   1004  1.10.2.2  matt base_pages_alloc(size_t minsize)
   1005  1.10.2.2  matt {
   1006  1.10.2.2  matt 	size_t csize = 0;
   1007  1.10.2.2  matt 
   1008  1.10.2.2  matt #ifdef USE_BRK
   1009  1.10.2.2  matt 	/*
   1010  1.10.2.2  matt 	 * Do special brk allocation here, since base allocations don't need to
   1011  1.10.2.2  matt 	 * be chunk-aligned.
   1012  1.10.2.2  matt 	 */
   1013  1.10.2.2  matt 	if (brk_prev != (void *)-1) {
   1014  1.10.2.2  matt 		void *brk_cur;
   1015  1.10.2.2  matt 		intptr_t incr;
   1016  1.10.2.2  matt 
   1017  1.10.2.2  matt 		if (minsize != 0)
   1018  1.10.2.2  matt 			csize = CHUNK_CEILING(minsize);
   1019  1.10.2.2  matt 
   1020  1.10.2.2  matt 		malloc_mutex_lock(&brk_mtx);
   1021  1.10.2.2  matt 		do {
   1022  1.10.2.2  matt 			/* Get the current end of brk. */
   1023  1.10.2.2  matt 			brk_cur = sbrk(0);
   1024  1.10.2.2  matt 
   1025  1.10.2.2  matt 			/*
   1026  1.10.2.2  matt 			 * Calculate how much padding is necessary to
   1027  1.10.2.2  matt 			 * chunk-align the end of brk.  Don't worry about
   1028  1.10.2.2  matt 			 * brk_cur not being chunk-aligned though.
   1029  1.10.2.2  matt 			 */
   1030  1.10.2.2  matt 			incr = (intptr_t)chunksize
   1031  1.10.2.2  matt 			    - (intptr_t)CHUNK_ADDR2OFFSET(brk_cur);
   1032  1.10.2.2  matt 			if (incr < minsize)
   1033  1.10.2.2  matt 				incr += csize;
   1034  1.10.2.2  matt 
   1035  1.10.2.2  matt 			brk_prev = sbrk(incr);
   1036  1.10.2.2  matt 			if (brk_prev == brk_cur) {
   1037  1.10.2.2  matt 				/* Success. */
   1038  1.10.2.2  matt 				malloc_mutex_unlock(&brk_mtx);
   1039  1.10.2.2  matt 				base_pages = brk_cur;
   1040  1.10.2.2  matt 				base_next_addr = base_pages;
   1041  1.10.2.2  matt 				base_past_addr = (void *)((uintptr_t)base_pages
   1042  1.10.2.2  matt 				    + incr);
   1043  1.10.2.2  matt #ifdef MALLOC_STATS
   1044  1.10.2.2  matt 				base_mapped += incr;
   1045  1.10.2.2  matt #endif
   1046  1.10.2.2  matt 				return (false);
   1047  1.10.2.2  matt 			}
   1048  1.10.2.2  matt 		} while (brk_prev != (void *)-1);
   1049  1.10.2.2  matt 		malloc_mutex_unlock(&brk_mtx);
   1050  1.10.2.2  matt 	}
   1051  1.10.2.2  matt 	if (minsize == 0) {
   1052  1.10.2.2  matt 		/*
   1053  1.10.2.2  matt 		 * Failure during initialization doesn't matter, so avoid
   1054  1.10.2.2  matt 		 * falling through to the mmap-based page mapping code.
   1055  1.10.2.2  matt 		 */
   1056  1.10.2.2  matt 		return (true);
   1057  1.10.2.2  matt 	}
   1058  1.10.2.2  matt #endif
   1059  1.10.2.2  matt 	assert(minsize != 0);
   1060  1.10.2.2  matt 	csize = PAGE_CEILING(minsize);
   1061  1.10.2.2  matt 	base_pages = pages_map(NULL, csize);
   1062  1.10.2.2  matt 	if (base_pages == NULL)
   1063  1.10.2.2  matt 		return (true);
   1064  1.10.2.2  matt 	base_next_addr = base_pages;
   1065  1.10.2.2  matt 	base_past_addr = (void *)((uintptr_t)base_pages + csize);
   1066  1.10.2.2  matt #ifdef MALLOC_STATS
   1067  1.10.2.2  matt 	base_mapped += csize;
   1068  1.10.2.2  matt #endif
   1069  1.10.2.2  matt 	return (false);
   1070  1.10.2.2  matt }
   1071  1.10.2.2  matt 
   1072  1.10.2.2  matt static void *
   1073  1.10.2.2  matt base_alloc(size_t size)
   1074  1.10.2.2  matt {
   1075  1.10.2.2  matt 	void *ret;
   1076  1.10.2.2  matt 	size_t csize;
   1077  1.10.2.2  matt 
   1078  1.10.2.2  matt 	/* Round size up to nearest multiple of the cacheline size. */
   1079  1.10.2.2  matt 	csize = CACHELINE_CEILING(size);
   1080  1.10.2.2  matt 
   1081  1.10.2.2  matt 	malloc_mutex_lock(&base_mtx);
   1082  1.10.2.2  matt 
   1083  1.10.2.2  matt 	/* Make sure there's enough space for the allocation. */
   1084  1.10.2.2  matt 	if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
   1085  1.10.2.2  matt 		if (base_pages_alloc(csize)) {
   1086  1.10.2.2  matt 			ret = NULL;
   1087  1.10.2.2  matt 			goto RETURN;
   1088  1.10.2.2  matt 		}
   1089  1.10.2.2  matt 	}
   1090  1.10.2.2  matt 
   1091  1.10.2.2  matt 	/* Allocate. */
   1092  1.10.2.2  matt 	ret = base_next_addr;
   1093  1.10.2.2  matt 	base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
   1094  1.10.2.2  matt 
   1095  1.10.2.2  matt RETURN:
   1096  1.10.2.2  matt 	malloc_mutex_unlock(&base_mtx);
   1097  1.10.2.2  matt 	return (ret);
   1098  1.10.2.2  matt }
   1099  1.10.2.2  matt 
   1100  1.10.2.2  matt static chunk_node_t *
   1101  1.10.2.2  matt base_chunk_node_alloc(void)
   1102  1.10.2.2  matt {
   1103  1.10.2.2  matt 	chunk_node_t *ret;
   1104  1.10.2.2  matt 
   1105  1.10.2.2  matt 	malloc_mutex_lock(&base_mtx);
   1106  1.10.2.2  matt 	if (base_chunk_nodes != NULL) {
   1107  1.10.2.2  matt 		ret = base_chunk_nodes;
   1108  1.10.2.2  matt 		/* LINTED */
   1109  1.10.2.2  matt 		base_chunk_nodes = *(chunk_node_t **)ret;
   1110  1.10.2.2  matt 		malloc_mutex_unlock(&base_mtx);
   1111  1.10.2.2  matt 	} else {
   1112  1.10.2.2  matt 		malloc_mutex_unlock(&base_mtx);
   1113  1.10.2.2  matt 		ret = (chunk_node_t *)base_alloc(sizeof(chunk_node_t));
   1114  1.10.2.2  matt 	}
   1115  1.10.2.2  matt 
   1116  1.10.2.2  matt 	return (ret);
   1117  1.10.2.2  matt }
   1118  1.10.2.2  matt 
   1119  1.10.2.2  matt static void
   1120  1.10.2.2  matt base_chunk_node_dealloc(chunk_node_t *node)
   1121  1.10.2.2  matt {
   1122  1.10.2.2  matt 
   1123  1.10.2.2  matt 	malloc_mutex_lock(&base_mtx);
   1124  1.10.2.2  matt 	/* LINTED */
   1125  1.10.2.2  matt 	*(chunk_node_t **)node = base_chunk_nodes;
   1126  1.10.2.2  matt 	base_chunk_nodes = node;
   1127  1.10.2.2  matt 	malloc_mutex_unlock(&base_mtx);
   1128  1.10.2.2  matt }
   1129  1.10.2.2  matt 
   1130  1.10.2.2  matt /******************************************************************************/
   1131  1.10.2.2  matt 
   1132  1.10.2.2  matt #ifdef MALLOC_STATS
   1133  1.10.2.2  matt static void
   1134  1.10.2.2  matt stats_print(arena_t *arena)
   1135  1.10.2.2  matt {
   1136  1.10.2.2  matt 	unsigned i;
   1137  1.10.2.2  matt 	int gap_start;
   1138  1.10.2.2  matt 
   1139  1.10.2.2  matt 	malloc_printf(
   1140  1.10.2.2  matt 	    "          allocated/mapped            nmalloc      ndalloc\n");
   1141  1.10.2.2  matt 
   1142  1.10.2.2  matt 	malloc_printf("small: %12zu %-12s %12llu %12llu\n",
   1143  1.10.2.2  matt 	    arena->stats.allocated_small, "", arena->stats.nmalloc_small,
   1144  1.10.2.2  matt 	    arena->stats.ndalloc_small);
   1145  1.10.2.2  matt 	malloc_printf("large: %12zu %-12s %12llu %12llu\n",
   1146  1.10.2.2  matt 	    arena->stats.allocated_large, "", arena->stats.nmalloc_large,
   1147  1.10.2.2  matt 	    arena->stats.ndalloc_large);
   1148  1.10.2.2  matt 	malloc_printf("total: %12zu/%-12zu %12llu %12llu\n",
   1149  1.10.2.2  matt 	    arena->stats.allocated_small + arena->stats.allocated_large,
   1150  1.10.2.2  matt 	    arena->stats.mapped,
   1151  1.10.2.2  matt 	    arena->stats.nmalloc_small + arena->stats.nmalloc_large,
   1152  1.10.2.2  matt 	    arena->stats.ndalloc_small + arena->stats.ndalloc_large);
   1153  1.10.2.2  matt 
   1154  1.10.2.2  matt 	malloc_printf("bins:     bin   size regs pgs  requests   newruns"
   1155  1.10.2.2  matt 	    "    reruns maxruns curruns\n");
   1156  1.10.2.2  matt 	for (i = 0, gap_start = -1; i < ntbins + nqbins + nsbins; i++) {
   1157  1.10.2.2  matt 		if (arena->bins[i].stats.nrequests == 0) {
   1158  1.10.2.2  matt 			if (gap_start == -1)
   1159  1.10.2.2  matt 				gap_start = i;
   1160  1.10.2.2  matt 		} else {
   1161  1.10.2.2  matt 			if (gap_start != -1) {
   1162  1.10.2.2  matt 				if (i > gap_start + 1) {
   1163  1.10.2.2  matt 					/* Gap of more than one size class. */
   1164  1.10.2.2  matt 					malloc_printf("[%u..%u]\n",
   1165  1.10.2.2  matt 					    gap_start, i - 1);
   1166  1.10.2.2  matt 				} else {
   1167  1.10.2.2  matt 					/* Gap of one size class. */
   1168  1.10.2.2  matt 					malloc_printf("[%u]\n", gap_start);
   1169  1.10.2.2  matt 				}
   1170  1.10.2.2  matt 				gap_start = -1;
   1171  1.10.2.2  matt 			}
   1172  1.10.2.2  matt 			malloc_printf(
   1173  1.10.2.2  matt 			    "%13u %1s %4u %4u %3u %9llu %9llu"
   1174  1.10.2.2  matt 			    " %9llu %7lu %7lu\n",
   1175  1.10.2.2  matt 			    i,
   1176  1.10.2.2  matt 			    i < ntbins ? "T" : i < ntbins + nqbins ? "Q" : "S",
   1177  1.10.2.2  matt 			    arena->bins[i].reg_size,
   1178  1.10.2.2  matt 			    arena->bins[i].nregs,
   1179  1.10.2.2  matt 			    arena->bins[i].run_size >> pagesize_2pow,
   1180  1.10.2.2  matt 			    arena->bins[i].stats.nrequests,
   1181  1.10.2.2  matt 			    arena->bins[i].stats.nruns,
   1182  1.10.2.2  matt 			    arena->bins[i].stats.reruns,
   1183  1.10.2.2  matt 			    arena->bins[i].stats.highruns,
   1184  1.10.2.2  matt 			    arena->bins[i].stats.curruns);
   1185  1.10.2.2  matt 		}
   1186  1.10.2.2  matt 	}
   1187  1.10.2.2  matt 	if (gap_start != -1) {
   1188  1.10.2.2  matt 		if (i > gap_start + 1) {
   1189  1.10.2.2  matt 			/* Gap of more than one size class. */
   1190  1.10.2.2  matt 			malloc_printf("[%u..%u]\n", gap_start, i - 1);
   1191  1.10.2.2  matt 		} else {
   1192  1.10.2.2  matt 			/* Gap of one size class. */
   1193  1.10.2.2  matt 			malloc_printf("[%u]\n", gap_start);
   1194  1.10.2.2  matt 		}
   1195  1.10.2.2  matt 	}
   1196  1.10.2.2  matt }
   1197  1.10.2.2  matt #endif
   1198  1.10.2.2  matt 
   1199  1.10.2.2  matt /*
   1200  1.10.2.2  matt  * End Utility functions/macros.
   1201  1.10.2.2  matt  */
   1202  1.10.2.2  matt /******************************************************************************/
   1203  1.10.2.2  matt /*
   1204  1.10.2.2  matt  * Begin chunk management functions.
   1205  1.10.2.2  matt  */
   1206  1.10.2.2  matt 
   1207  1.10.2.2  matt #ifndef lint
   1208  1.10.2.2  matt static inline int
   1209  1.10.2.2  matt chunk_comp(chunk_node_t *a, chunk_node_t *b)
   1210  1.10.2.2  matt {
   1211  1.10.2.2  matt 
   1212  1.10.2.2  matt 	assert(a != NULL);
   1213  1.10.2.2  matt 	assert(b != NULL);
   1214  1.10.2.2  matt 
   1215  1.10.2.2  matt 	if ((uintptr_t)a->chunk < (uintptr_t)b->chunk)
   1216  1.10.2.2  matt 		return (-1);
   1217  1.10.2.2  matt 	else if (a->chunk == b->chunk)
   1218  1.10.2.2  matt 		return (0);
   1219  1.10.2.2  matt 	else
   1220  1.10.2.2  matt 		return (1);
   1221  1.10.2.2  matt }
   1222  1.10.2.2  matt 
   1223  1.10.2.2  matt /* Generate red-black tree code for chunks. */
   1224  1.10.2.2  matt RB_GENERATE_STATIC(chunk_tree_s, chunk_node_s, link, chunk_comp);
   1225  1.10.2.2  matt #endif
   1226  1.10.2.2  matt 
   1227  1.10.2.2  matt static void *
   1228  1.10.2.2  matt pages_map_align(void *addr, size_t size, int align)
   1229  1.10.2.2  matt {
   1230  1.10.2.2  matt 	void *ret;
   1231  1.10.2.2  matt 
   1232  1.10.2.2  matt 	/*
   1233  1.10.2.2  matt 	 * We don't use MAP_FIXED here, because it can cause the *replacement*
   1234  1.10.2.2  matt 	 * of existing mappings, and we only want to create new mappings.
   1235  1.10.2.2  matt 	 */
   1236  1.10.2.2  matt 	ret = mmap(addr, size, PROT_READ | PROT_WRITE,
   1237  1.10.2.2  matt 	    MAP_PRIVATE | MAP_ANON | MAP_ALIGNED(align), -1, 0);
   1238  1.10.2.2  matt 	assert(ret != NULL);
   1239  1.10.2.2  matt 
   1240  1.10.2.2  matt 	if (ret == MAP_FAILED)
   1241  1.10.2.2  matt 		ret = NULL;
   1242  1.10.2.2  matt 	else if (addr != NULL && ret != addr) {
   1243  1.10.2.2  matt 		/*
   1244  1.10.2.2  matt 		 * We succeeded in mapping memory, but not in the right place.
   1245  1.10.2.2  matt 		 */
   1246  1.10.2.2  matt 		if (munmap(ret, size) == -1) {
   1247  1.10.2.2  matt 			char buf[STRERROR_BUF];
   1248  1.10.2.2  matt 
   1249  1.10.2.3  matt 			STRERROR_R(errno, buf, sizeof(buf));
   1250  1.10.2.3  matt 			_malloc_message(getprogname(),
   1251  1.10.2.2  matt 			    ": (malloc) Error in munmap(): ", buf, "\n");
   1252  1.10.2.2  matt 			if (opt_abort)
   1253  1.10.2.2  matt 				abort();
   1254  1.10.2.2  matt 		}
   1255  1.10.2.2  matt 		ret = NULL;
   1256  1.10.2.2  matt 	}
   1257  1.10.2.2  matt 
   1258  1.10.2.2  matt 	assert(ret == NULL || (addr == NULL && ret != addr)
   1259  1.10.2.2  matt 	    || (addr != NULL && ret == addr));
   1260  1.10.2.2  matt 	return (ret);
   1261  1.10.2.2  matt }
   1262  1.10.2.2  matt 
   1263  1.10.2.2  matt static void *
   1264  1.10.2.2  matt pages_map(void *addr, size_t size)
   1265  1.10.2.2  matt {
   1266  1.10.2.2  matt 
   1267  1.10.2.2  matt 	return pages_map_align(addr, size, 0);
   1268  1.10.2.2  matt }
   1269  1.10.2.2  matt 
   1270  1.10.2.2  matt static void
   1271  1.10.2.2  matt pages_unmap(void *addr, size_t size)
   1272  1.10.2.2  matt {
   1273  1.10.2.2  matt 
   1274  1.10.2.2  matt 	if (munmap(addr, size) == -1) {
   1275  1.10.2.2  matt 		char buf[STRERROR_BUF];
   1276  1.10.2.2  matt 
   1277  1.10.2.3  matt 		STRERROR_R(errno, buf, sizeof(buf));
   1278  1.10.2.3  matt 		_malloc_message(getprogname(),
   1279  1.10.2.2  matt 		    ": (malloc) Error in munmap(): ", buf, "\n");
   1280  1.10.2.2  matt 		if (opt_abort)
   1281  1.10.2.2  matt 			abort();
   1282  1.10.2.2  matt 	}
   1283  1.10.2.2  matt }
   1284  1.10.2.2  matt 
   1285  1.10.2.2  matt static void *
   1286  1.10.2.2  matt chunk_alloc(size_t size)
   1287  1.10.2.2  matt {
   1288  1.10.2.2  matt 	void *ret, *chunk;
   1289  1.10.2.2  matt 	chunk_node_t *tchunk, *delchunk;
   1290  1.10.2.2  matt 
   1291  1.10.2.2  matt 	assert(size != 0);
   1292  1.10.2.2  matt 	assert((size & chunksize_mask) == 0);
   1293  1.10.2.2  matt 
   1294  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   1295  1.10.2.2  matt 
   1296  1.10.2.2  matt 	if (size == chunksize) {
   1297  1.10.2.2  matt 		/*
   1298  1.10.2.2  matt 		 * Check for address ranges that were previously chunks and try
   1299  1.10.2.2  matt 		 * to use them.
   1300  1.10.2.2  matt 		 */
   1301  1.10.2.2  matt 
   1302  1.10.2.2  matt 		/* LINTED */
   1303  1.10.2.2  matt 		tchunk = RB_MIN(chunk_tree_s, &old_chunks);
   1304  1.10.2.2  matt 		while (tchunk != NULL) {
   1305  1.10.2.2  matt 			/* Found an address range.  Try to recycle it. */
   1306  1.10.2.2  matt 
   1307  1.10.2.2  matt 			chunk = tchunk->chunk;
   1308  1.10.2.2  matt 			delchunk = tchunk;
   1309  1.10.2.2  matt 			/* LINTED */
   1310  1.10.2.2  matt 			tchunk = RB_NEXT(chunk_tree_s, &old_chunks, delchunk);
   1311  1.10.2.2  matt 
   1312  1.10.2.2  matt 			/* Remove delchunk from the tree. */
   1313  1.10.2.2  matt 			/* LINTED */
   1314  1.10.2.2  matt 			RB_REMOVE(chunk_tree_s, &old_chunks, delchunk);
   1315  1.10.2.2  matt 			base_chunk_node_dealloc(delchunk);
   1316  1.10.2.2  matt 
   1317  1.10.2.2  matt #ifdef USE_BRK
   1318  1.10.2.2  matt 			if ((uintptr_t)chunk >= (uintptr_t)brk_base
   1319  1.10.2.2  matt 			    && (uintptr_t)chunk < (uintptr_t)brk_max) {
   1320  1.10.2.2  matt 				/* Re-use a previously freed brk chunk. */
   1321  1.10.2.2  matt 				ret = chunk;
   1322  1.10.2.2  matt 				goto RETURN;
   1323  1.10.2.2  matt 			}
   1324  1.10.2.2  matt #endif
   1325  1.10.2.2  matt 			if ((ret = pages_map(chunk, size)) != NULL) {
   1326  1.10.2.2  matt 				/* Success. */
   1327  1.10.2.2  matt 				goto RETURN;
   1328  1.10.2.2  matt 			}
   1329  1.10.2.2  matt 		}
   1330  1.10.2.2  matt 	}
   1331  1.10.2.2  matt 
   1332  1.10.2.2  matt 	/*
   1333  1.10.2.2  matt 	 * Try to over-allocate, but allow the OS to place the allocation
   1334  1.10.2.2  matt 	 * anywhere.  Beware of size_t wrap-around.
   1335  1.10.2.2  matt 	 */
   1336  1.10.2.2  matt 	if (size + chunksize > size) {
   1337  1.10.2.2  matt 		if ((ret = pages_map_align(NULL, size, chunksize_2pow))
   1338  1.10.2.2  matt 		    != NULL) {
   1339  1.10.2.2  matt 			goto RETURN;
   1340  1.10.2.2  matt 		}
   1341  1.10.2.2  matt 	}
   1342  1.10.2.2  matt 
   1343  1.10.2.2  matt #ifdef USE_BRK
   1344  1.10.2.2  matt 	/*
   1345  1.10.2.2  matt 	 * Try to create allocations in brk, in order to make full use of
   1346  1.10.2.2  matt 	 * limited address space.
   1347  1.10.2.2  matt 	 */
   1348  1.10.2.2  matt 	if (brk_prev != (void *)-1) {
   1349  1.10.2.2  matt 		void *brk_cur;
   1350  1.10.2.2  matt 		intptr_t incr;
   1351  1.10.2.2  matt 
   1352  1.10.2.2  matt 		/*
   1353  1.10.2.2  matt 		 * The loop is necessary to recover from races with other
   1354  1.10.2.2  matt 		 * threads that are using brk for something other than malloc.
   1355  1.10.2.2  matt 		 */
   1356  1.10.2.2  matt 		malloc_mutex_lock(&brk_mtx);
   1357  1.10.2.2  matt 		do {
   1358  1.10.2.2  matt 			/* Get the current end of brk. */
   1359  1.10.2.2  matt 			brk_cur = sbrk(0);
   1360  1.10.2.2  matt 
   1361  1.10.2.2  matt 			/*
   1362  1.10.2.2  matt 			 * Calculate how much padding is necessary to
   1363  1.10.2.2  matt 			 * chunk-align the end of brk.
   1364  1.10.2.2  matt 			 */
   1365  1.10.2.2  matt 			incr = (intptr_t)size
   1366  1.10.2.2  matt 			    - (intptr_t)CHUNK_ADDR2OFFSET(brk_cur);
   1367  1.10.2.2  matt 			if (incr == size) {
   1368  1.10.2.2  matt 				ret = brk_cur;
   1369  1.10.2.2  matt 			} else {
   1370  1.10.2.2  matt 				ret = (void *)((intptr_t)brk_cur + incr);
   1371  1.10.2.2  matt 				incr += size;
   1372  1.10.2.2  matt 			}
   1373  1.10.2.2  matt 
   1374  1.10.2.2  matt 			brk_prev = sbrk(incr);
   1375  1.10.2.2  matt 			if (brk_prev == brk_cur) {
   1376  1.10.2.2  matt 				/* Success. */
   1377  1.10.2.2  matt 				malloc_mutex_unlock(&brk_mtx);
   1378  1.10.2.2  matt 				brk_max = (void *)((intptr_t)ret + size);
   1379  1.10.2.2  matt 				goto RETURN;
   1380  1.10.2.2  matt 			}
   1381  1.10.2.2  matt 		} while (brk_prev != (void *)-1);
   1382  1.10.2.2  matt 		malloc_mutex_unlock(&brk_mtx);
   1383  1.10.2.2  matt 	}
   1384  1.10.2.2  matt #endif
   1385  1.10.2.2  matt 
   1386  1.10.2.2  matt 	/* All strategies for allocation failed. */
   1387  1.10.2.2  matt 	ret = NULL;
   1388  1.10.2.2  matt RETURN:
   1389  1.10.2.2  matt 	if (ret != NULL) {
   1390  1.10.2.2  matt 		chunk_node_t key;
   1391  1.10.2.2  matt 		/*
   1392  1.10.2.2  matt 		 * Clean out any entries in old_chunks that overlap with the
   1393  1.10.2.2  matt 		 * memory we just allocated.
   1394  1.10.2.2  matt 		 */
   1395  1.10.2.2  matt 		key.chunk = ret;
   1396  1.10.2.2  matt 		/* LINTED */
   1397  1.10.2.2  matt 		tchunk = RB_NFIND(chunk_tree_s, &old_chunks, &key);
   1398  1.10.2.2  matt 		while (tchunk != NULL
   1399  1.10.2.2  matt 		    && (uintptr_t)tchunk->chunk >= (uintptr_t)ret
   1400  1.10.2.2  matt 		    && (uintptr_t)tchunk->chunk < (uintptr_t)ret + size) {
   1401  1.10.2.2  matt 			delchunk = tchunk;
   1402  1.10.2.2  matt 			/* LINTED */
   1403  1.10.2.2  matt 			tchunk = RB_NEXT(chunk_tree_s, &old_chunks, delchunk);
   1404  1.10.2.2  matt 			/* LINTED */
   1405  1.10.2.2  matt 			RB_REMOVE(chunk_tree_s, &old_chunks, delchunk);
   1406  1.10.2.2  matt 			base_chunk_node_dealloc(delchunk);
   1407  1.10.2.2  matt 		}
   1408  1.10.2.2  matt 
   1409  1.10.2.2  matt 	}
   1410  1.10.2.2  matt #ifdef MALLOC_STATS
   1411  1.10.2.2  matt 	if (ret != NULL) {
   1412  1.10.2.2  matt 		stats_chunks.nchunks += (size / chunksize);
   1413  1.10.2.2  matt 		stats_chunks.curchunks += (size / chunksize);
   1414  1.10.2.2  matt 	}
   1415  1.10.2.2  matt 	if (stats_chunks.curchunks > stats_chunks.highchunks)
   1416  1.10.2.2  matt 		stats_chunks.highchunks = stats_chunks.curchunks;
   1417  1.10.2.2  matt #endif
   1418  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   1419  1.10.2.2  matt 
   1420  1.10.2.2  matt 	assert(CHUNK_ADDR2BASE(ret) == ret);
   1421  1.10.2.2  matt 	return (ret);
   1422  1.10.2.2  matt }
   1423  1.10.2.2  matt 
   1424  1.10.2.2  matt static void
   1425  1.10.2.2  matt chunk_dealloc(void *chunk, size_t size)
   1426  1.10.2.2  matt {
   1427  1.10.2.2  matt 	chunk_node_t *node;
   1428  1.10.2.2  matt 
   1429  1.10.2.2  matt 	assert(chunk != NULL);
   1430  1.10.2.2  matt 	assert(CHUNK_ADDR2BASE(chunk) == chunk);
   1431  1.10.2.2  matt 	assert(size != 0);
   1432  1.10.2.2  matt 	assert((size & chunksize_mask) == 0);
   1433  1.10.2.2  matt 
   1434  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   1435  1.10.2.2  matt 
   1436  1.10.2.2  matt #ifdef USE_BRK
   1437  1.10.2.2  matt 	if ((uintptr_t)chunk >= (uintptr_t)brk_base
   1438  1.10.2.2  matt 	    && (uintptr_t)chunk < (uintptr_t)brk_max) {
   1439  1.10.2.2  matt 		void *brk_cur;
   1440  1.10.2.2  matt 
   1441  1.10.2.2  matt 		malloc_mutex_lock(&brk_mtx);
   1442  1.10.2.2  matt 		/* Get the current end of brk. */
   1443  1.10.2.2  matt 		brk_cur = sbrk(0);
   1444  1.10.2.2  matt 
   1445  1.10.2.2  matt 		/*
   1446  1.10.2.2  matt 		 * Try to shrink the data segment if this chunk is at the end
   1447  1.10.2.2  matt 		 * of the data segment.  The sbrk() call here is subject to a
   1448  1.10.2.2  matt 		 * race condition with threads that use brk(2) or sbrk(2)
   1449  1.10.2.2  matt 		 * directly, but the alternative would be to leak memory for
   1450  1.10.2.2  matt 		 * the sake of poorly designed multi-threaded programs.
   1451  1.10.2.2  matt 		 */
   1452  1.10.2.2  matt 		if (brk_cur == brk_max
   1453  1.10.2.2  matt 		    && (void *)((uintptr_t)chunk + size) == brk_max
   1454  1.10.2.2  matt 		    && sbrk(-(intptr_t)size) == brk_max) {
   1455  1.10.2.2  matt 			malloc_mutex_unlock(&brk_mtx);
   1456  1.10.2.2  matt 			if (brk_prev == brk_max) {
   1457  1.10.2.2  matt 				/* Success. */
   1458  1.10.2.2  matt 				brk_prev = (void *)((intptr_t)brk_max
   1459  1.10.2.2  matt 				    - (intptr_t)size);
   1460  1.10.2.2  matt 				brk_max = brk_prev;
   1461  1.10.2.2  matt 			}
   1462  1.10.2.2  matt 		} else {
   1463  1.10.2.2  matt 			size_t offset;
   1464  1.10.2.2  matt 
   1465  1.10.2.2  matt 			malloc_mutex_unlock(&brk_mtx);
   1466  1.10.2.2  matt 			madvise(chunk, size, MADV_FREE);
   1467  1.10.2.2  matt 
   1468  1.10.2.2  matt 			/*
   1469  1.10.2.2  matt 			 * Iteratively create records of each chunk-sized
   1470  1.10.2.2  matt 			 * memory region that 'chunk' is comprised of, so that
   1471  1.10.2.2  matt 			 * the address range can be recycled if memory usage
   1472  1.10.2.2  matt 			 * increases later on.
   1473  1.10.2.2  matt 			 */
   1474  1.10.2.2  matt 			for (offset = 0; offset < size; offset += chunksize) {
   1475  1.10.2.2  matt 				node = base_chunk_node_alloc();
   1476  1.10.2.2  matt 				if (node == NULL)
   1477  1.10.2.2  matt 					break;
   1478  1.10.2.2  matt 
   1479  1.10.2.2  matt 				node->chunk = (void *)((uintptr_t)chunk
   1480  1.10.2.2  matt 				    + (uintptr_t)offset);
   1481  1.10.2.2  matt 				node->size = chunksize;
   1482  1.10.2.2  matt 				/* LINTED */
   1483  1.10.2.2  matt 				RB_INSERT(chunk_tree_s, &old_chunks, node);
   1484  1.10.2.2  matt 			}
   1485  1.10.2.2  matt 		}
   1486  1.10.2.2  matt 	} else {
   1487  1.10.2.2  matt #endif
   1488  1.10.2.2  matt 		pages_unmap(chunk, size);
   1489  1.10.2.2  matt 
   1490  1.10.2.2  matt 		/*
   1491  1.10.2.2  matt 		 * Make a record of the chunk's address, so that the address
   1492  1.10.2.2  matt 		 * range can be recycled if memory usage increases later on.
   1493  1.10.2.2  matt 		 * Don't bother to create entries if (size > chunksize), since
   1494  1.10.2.2  matt 		 * doing so could cause scalability issues for truly gargantuan
   1495  1.10.2.2  matt 		 * objects (many gigabytes or larger).
   1496  1.10.2.2  matt 		 */
   1497  1.10.2.2  matt 		if (size == chunksize) {
   1498  1.10.2.2  matt 			node = base_chunk_node_alloc();
   1499  1.10.2.2  matt 			if (node != NULL) {
   1500  1.10.2.2  matt 				node->chunk = (void *)(uintptr_t)chunk;
   1501  1.10.2.2  matt 				node->size = chunksize;
   1502  1.10.2.2  matt 				/* LINTED */
   1503  1.10.2.2  matt 				RB_INSERT(chunk_tree_s, &old_chunks, node);
   1504  1.10.2.2  matt 			}
   1505  1.10.2.2  matt 		}
   1506  1.10.2.2  matt #ifdef USE_BRK
   1507  1.10.2.2  matt 	}
   1508  1.10.2.2  matt #endif
   1509  1.10.2.2  matt 
   1510  1.10.2.2  matt #ifdef MALLOC_STATS
   1511  1.10.2.2  matt 	stats_chunks.curchunks -= (size / chunksize);
   1512  1.10.2.2  matt #endif
   1513  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   1514  1.10.2.2  matt }
   1515  1.10.2.2  matt 
   1516  1.10.2.2  matt /*
   1517  1.10.2.2  matt  * End chunk management functions.
   1518  1.10.2.2  matt  */
   1519  1.10.2.2  matt /******************************************************************************/
   1520  1.10.2.2  matt /*
   1521  1.10.2.2  matt  * Begin arena.
   1522  1.10.2.2  matt  */
   1523  1.10.2.2  matt 
   1524  1.10.2.2  matt /*
   1525  1.10.2.4  matt  * Choose an arena based on a per-thread and (optimistically) per-CPU value.
   1526  1.10.2.4  matt  *
   1527  1.10.2.4  matt  * We maintain at least one block of arenas.  Usually there are more.
   1528  1.10.2.4  matt  * The blocks are $ncpu arenas in size.  Whole blocks are 'hashed'
   1529  1.10.2.4  matt  * amongst threads.  To accomplish this, next_arena advances only in
   1530  1.10.2.4  matt  * ncpu steps.
   1531  1.10.2.2  matt  */
   1532  1.10.2.2  matt static inline arena_t *
   1533  1.10.2.2  matt choose_arena(void)
   1534  1.10.2.2  matt {
   1535  1.10.2.4  matt 	unsigned i, curcpu;
   1536  1.10.2.4  matt 	arena_t **map;
   1537  1.10.2.2  matt 
   1538  1.10.2.4  matt 	map = get_arenas_map();
   1539  1.10.2.4  matt 	curcpu = thr_curcpu();
   1540  1.10.2.4  matt 	if (__predict_true(map != NULL && map[curcpu] != NULL))
   1541  1.10.2.4  matt 		return map[curcpu];
   1542  1.10.2.2  matt 
   1543  1.10.2.4  matt 	/* Initialize the current block of arenas and advance to next. */
   1544  1.10.2.2  matt 	malloc_mutex_lock(&arenas_mtx);
   1545  1.10.2.4  matt 	assert(next_arena % ncpus == 0);
   1546  1.10.2.4  matt 	assert(narenas % ncpus == 0);
   1547  1.10.2.4  matt 	map = &arenas[next_arena];
   1548  1.10.2.4  matt 	set_arenas_map(map);
   1549  1.10.2.4  matt 	for (i = 0; i < ncpus; i++) {
   1550  1.10.2.4  matt 		if (arenas[next_arena] == NULL)
   1551  1.10.2.4  matt 			arenas_extend(next_arena);
   1552  1.10.2.4  matt 		next_arena = (next_arena + 1) % narenas;
   1553  1.10.2.2  matt 	}
   1554  1.10.2.2  matt 	malloc_mutex_unlock(&arenas_mtx);
   1555  1.10.2.2  matt 
   1556  1.10.2.4  matt 	/*
   1557  1.10.2.4  matt 	 * If we were unable to allocate an arena above, then default to
   1558  1.10.2.4  matt 	 * the first arena, which is always present.
   1559  1.10.2.4  matt 	 */
   1560  1.10.2.4  matt 	curcpu = thr_curcpu();
   1561  1.10.2.4  matt 	if (map[curcpu] != NULL)
   1562  1.10.2.4  matt 		return map[curcpu];
   1563  1.10.2.4  matt 	return arenas[0];
   1564  1.10.2.2  matt }
   1565  1.10.2.2  matt 
   1566  1.10.2.2  matt #ifndef lint
   1567  1.10.2.2  matt static inline int
   1568  1.10.2.2  matt arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
   1569  1.10.2.2  matt {
   1570  1.10.2.2  matt 
   1571  1.10.2.2  matt 	assert(a != NULL);
   1572  1.10.2.2  matt 	assert(b != NULL);
   1573  1.10.2.2  matt 
   1574  1.10.2.2  matt 	if ((uintptr_t)a < (uintptr_t)b)
   1575  1.10.2.2  matt 		return (-1);
   1576  1.10.2.2  matt 	else if (a == b)
   1577  1.10.2.2  matt 		return (0);
   1578  1.10.2.2  matt 	else
   1579  1.10.2.2  matt 		return (1);
   1580  1.10.2.2  matt }
   1581  1.10.2.2  matt 
   1582  1.10.2.2  matt /* Generate red-black tree code for arena chunks. */
   1583  1.10.2.2  matt RB_GENERATE_STATIC(arena_chunk_tree_s, arena_chunk_s, link, arena_chunk_comp);
   1584  1.10.2.2  matt #endif
   1585  1.10.2.2  matt 
   1586  1.10.2.2  matt #ifndef lint
   1587  1.10.2.2  matt static inline int
   1588  1.10.2.2  matt arena_run_comp(arena_run_t *a, arena_run_t *b)
   1589  1.10.2.2  matt {
   1590  1.10.2.2  matt 
   1591  1.10.2.2  matt 	assert(a != NULL);
   1592  1.10.2.2  matt 	assert(b != NULL);
   1593  1.10.2.2  matt 
   1594  1.10.2.2  matt 	if ((uintptr_t)a < (uintptr_t)b)
   1595  1.10.2.2  matt 		return (-1);
   1596  1.10.2.2  matt 	else if (a == b)
   1597  1.10.2.2  matt 		return (0);
   1598  1.10.2.2  matt 	else
   1599  1.10.2.2  matt 		return (1);
   1600  1.10.2.2  matt }
   1601  1.10.2.2  matt 
   1602  1.10.2.2  matt /* Generate red-black tree code for arena runs. */
   1603  1.10.2.2  matt RB_GENERATE_STATIC(arena_run_tree_s, arena_run_s, link, arena_run_comp);
   1604  1.10.2.2  matt #endif
   1605  1.10.2.2  matt 
   1606  1.10.2.2  matt static inline void *
   1607  1.10.2.2  matt arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
   1608  1.10.2.2  matt {
   1609  1.10.2.2  matt 	void *ret;
   1610  1.10.2.2  matt 	unsigned i, mask, bit, regind;
   1611  1.10.2.2  matt 
   1612  1.10.2.2  matt 	assert(run->magic == ARENA_RUN_MAGIC);
   1613  1.10.2.2  matt 	assert(run->regs_minelm < bin->regs_mask_nelms);
   1614  1.10.2.2  matt 
   1615  1.10.2.2  matt 	/*
   1616  1.10.2.2  matt 	 * Move the first check outside the loop, so that run->regs_minelm can
   1617  1.10.2.2  matt 	 * be updated unconditionally, without the possibility of updating it
   1618  1.10.2.2  matt 	 * multiple times.
   1619  1.10.2.2  matt 	 */
   1620  1.10.2.2  matt 	i = run->regs_minelm;
   1621  1.10.2.2  matt 	mask = run->regs_mask[i];
   1622  1.10.2.2  matt 	if (mask != 0) {
   1623  1.10.2.2  matt 		/* Usable allocation found. */
   1624  1.10.2.2  matt 		bit = ffs((int)mask) - 1;
   1625  1.10.2.2  matt 
   1626  1.10.2.2  matt 		regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
   1627  1.10.2.2  matt 		ret = (void *)(((uintptr_t)run) + bin->reg0_offset
   1628  1.10.2.2  matt 		    + (bin->reg_size * regind));
   1629  1.10.2.2  matt 
   1630  1.10.2.2  matt 		/* Clear bit. */
   1631  1.10.2.2  matt 		mask ^= (1 << bit);
   1632  1.10.2.2  matt 		run->regs_mask[i] = mask;
   1633  1.10.2.2  matt 
   1634  1.10.2.2  matt 		return (ret);
   1635  1.10.2.2  matt 	}
   1636  1.10.2.2  matt 
   1637  1.10.2.2  matt 	for (i++; i < bin->regs_mask_nelms; i++) {
   1638  1.10.2.2  matt 		mask = run->regs_mask[i];
   1639  1.10.2.2  matt 		if (mask != 0) {
   1640  1.10.2.2  matt 			/* Usable allocation found. */
   1641  1.10.2.2  matt 			bit = ffs((int)mask) - 1;
   1642  1.10.2.2  matt 
   1643  1.10.2.2  matt 			regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
   1644  1.10.2.2  matt 			ret = (void *)(((uintptr_t)run) + bin->reg0_offset
   1645  1.10.2.2  matt 			    + (bin->reg_size * regind));
   1646  1.10.2.2  matt 
   1647  1.10.2.2  matt 			/* Clear bit. */
   1648  1.10.2.2  matt 			mask ^= (1 << bit);
   1649  1.10.2.2  matt 			run->regs_mask[i] = mask;
   1650  1.10.2.2  matt 
   1651  1.10.2.2  matt 			/*
   1652  1.10.2.2  matt 			 * Make a note that nothing before this element
   1653  1.10.2.2  matt 			 * contains a free region.
   1654  1.10.2.2  matt 			 */
   1655  1.10.2.2  matt 			run->regs_minelm = i; /* Low payoff: + (mask == 0); */
   1656  1.10.2.2  matt 
   1657  1.10.2.2  matt 			return (ret);
   1658  1.10.2.2  matt 		}
   1659  1.10.2.2  matt 	}
   1660  1.10.2.2  matt 	/* Not reached. */
   1661  1.10.2.2  matt 	/* LINTED */
   1662  1.10.2.2  matt 	assert(0);
   1663  1.10.2.2  matt 	return (NULL);
   1664  1.10.2.2  matt }
   1665  1.10.2.2  matt 
   1666  1.10.2.2  matt static inline void
   1667  1.10.2.2  matt arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size)
   1668  1.10.2.2  matt {
   1669  1.10.2.2  matt 	/*
   1670  1.10.2.2  matt 	 * To divide by a number D that is not a power of two we multiply
   1671  1.10.2.2  matt 	 * by (2^21 / D) and then right shift by 21 positions.
   1672  1.10.2.2  matt 	 *
   1673  1.10.2.2  matt 	 *   X / D
   1674  1.10.2.2  matt 	 *
   1675  1.10.2.2  matt 	 * becomes
   1676  1.10.2.2  matt 	 *
   1677  1.10.2.2  matt 	 *   (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT
   1678  1.10.2.2  matt 	 */
   1679  1.10.2.2  matt #define SIZE_INV_SHIFT 21
   1680  1.10.2.2  matt #define SIZE_INV(s) (((1 << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1)
   1681  1.10.2.2  matt 	static const unsigned size_invs[] = {
   1682  1.10.2.2  matt 	    SIZE_INV(3),
   1683  1.10.2.2  matt 	    SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
   1684  1.10.2.2  matt 	    SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
   1685  1.10.2.2  matt 	    SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
   1686  1.10.2.2  matt 	    SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
   1687  1.10.2.2  matt 	    SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
   1688  1.10.2.2  matt 	    SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
   1689  1.10.2.2  matt 	    SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
   1690  1.10.2.2  matt #if (QUANTUM_2POW_MIN < 4)
   1691  1.10.2.2  matt 	    ,
   1692  1.10.2.2  matt 	    SIZE_INV(32), SIZE_INV(33), SIZE_INV(34), SIZE_INV(35),
   1693  1.10.2.2  matt 	    SIZE_INV(36), SIZE_INV(37), SIZE_INV(38), SIZE_INV(39),
   1694  1.10.2.2  matt 	    SIZE_INV(40), SIZE_INV(41), SIZE_INV(42), SIZE_INV(43),
   1695  1.10.2.2  matt 	    SIZE_INV(44), SIZE_INV(45), SIZE_INV(46), SIZE_INV(47),
   1696  1.10.2.2  matt 	    SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51),
   1697  1.10.2.2  matt 	    SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55),
   1698  1.10.2.2  matt 	    SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59),
   1699  1.10.2.2  matt 	    SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63)
   1700  1.10.2.2  matt #endif
   1701  1.10.2.2  matt 	};
   1702  1.10.2.2  matt 	unsigned diff, regind, elm, bit;
   1703  1.10.2.2  matt 
   1704  1.10.2.2  matt 	/* LINTED */
   1705  1.10.2.2  matt 	assert(run->magic == ARENA_RUN_MAGIC);
   1706  1.10.2.2  matt 	assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3
   1707  1.10.2.2  matt 	    >= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN));
   1708  1.10.2.2  matt 
   1709  1.10.2.2  matt 	/*
   1710  1.10.2.2  matt 	 * Avoid doing division with a variable divisor if possible.  Using
   1711  1.10.2.2  matt 	 * actual division here can reduce allocator throughput by over 20%!
   1712  1.10.2.2  matt 	 */
   1713  1.10.2.2  matt 	diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->reg0_offset);
   1714  1.10.2.2  matt 	if ((size & (size - 1)) == 0) {
   1715  1.10.2.2  matt 		/*
   1716  1.10.2.2  matt 		 * log2_table allows fast division of a power of two in the
   1717  1.10.2.2  matt 		 * [1..128] range.
   1718  1.10.2.2  matt 		 *
   1719  1.10.2.2  matt 		 * (x / divisor) becomes (x >> log2_table[divisor - 1]).
   1720  1.10.2.2  matt 		 */
   1721  1.10.2.2  matt 		static const unsigned char log2_table[] = {
   1722  1.10.2.2  matt 		    0, 1, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4,
   1723  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5,
   1724  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   1725  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
   1726  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   1727  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   1728  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   1729  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
   1730  1.10.2.2  matt 		};
   1731  1.10.2.2  matt 
   1732  1.10.2.2  matt 		if (size <= 128)
   1733  1.10.2.2  matt 			regind = (diff >> log2_table[size - 1]);
   1734  1.10.2.2  matt 		else if (size <= 32768)
   1735  1.10.2.2  matt 			regind = diff >> (8 + log2_table[(size >> 8) - 1]);
   1736  1.10.2.2  matt 		else {
   1737  1.10.2.2  matt 			/*
   1738  1.10.2.2  matt 			 * The page size is too large for us to use the lookup
   1739  1.10.2.2  matt 			 * table.  Use real division.
   1740  1.10.2.2  matt 			 */
   1741  1.10.2.2  matt 			regind = (unsigned)(diff / size);
   1742  1.10.2.2  matt 		}
   1743  1.10.2.2  matt 	} else if (size <= ((sizeof(size_invs) / sizeof(unsigned))
   1744  1.10.2.2  matt 	    << QUANTUM_2POW_MIN) + 2) {
   1745  1.10.2.2  matt 		regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff;
   1746  1.10.2.2  matt 		regind >>= SIZE_INV_SHIFT;
   1747  1.10.2.2  matt 	} else {
   1748  1.10.2.2  matt 		/*
   1749  1.10.2.2  matt 		 * size_invs isn't large enough to handle this size class, so
   1750  1.10.2.2  matt 		 * calculate regind using actual division.  This only happens
   1751  1.10.2.2  matt 		 * if the user increases small_max via the 'S' runtime
   1752  1.10.2.2  matt 		 * configuration option.
   1753  1.10.2.2  matt 		 */
   1754  1.10.2.2  matt 		regind = (unsigned)(diff / size);
   1755  1.10.2.2  matt 	};
   1756  1.10.2.2  matt 	assert(diff == regind * size);
   1757  1.10.2.2  matt 	assert(regind < bin->nregs);
   1758  1.10.2.2  matt 
   1759  1.10.2.2  matt 	elm = regind >> (SIZEOF_INT_2POW + 3);
   1760  1.10.2.2  matt 	if (elm < run->regs_minelm)
   1761  1.10.2.2  matt 		run->regs_minelm = elm;
   1762  1.10.2.2  matt 	bit = regind - (elm << (SIZEOF_INT_2POW + 3));
   1763  1.10.2.2  matt 	assert((run->regs_mask[elm] & (1 << bit)) == 0);
   1764  1.10.2.2  matt 	run->regs_mask[elm] |= (1 << bit);
   1765  1.10.2.2  matt #undef SIZE_INV
   1766  1.10.2.2  matt #undef SIZE_INV_SHIFT
   1767  1.10.2.2  matt }
   1768  1.10.2.2  matt 
   1769  1.10.2.2  matt static void
   1770  1.10.2.2  matt arena_run_split(arena_t *arena, arena_run_t *run, size_t size)
   1771  1.10.2.2  matt {
   1772  1.10.2.2  matt 	arena_chunk_t *chunk;
   1773  1.10.2.2  matt 	unsigned run_ind, map_offset, total_pages, need_pages, rem_pages;
   1774  1.10.2.2  matt 	unsigned i;
   1775  1.10.2.2  matt 
   1776  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
   1777  1.10.2.2  matt 	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
   1778  1.10.2.2  matt 	    >> pagesize_2pow);
   1779  1.10.2.2  matt 	total_pages = chunk->map[run_ind].npages;
   1780  1.10.2.2  matt 	need_pages = (unsigned)(size >> pagesize_2pow);
   1781  1.10.2.2  matt 	assert(need_pages <= total_pages);
   1782  1.10.2.2  matt 	rem_pages = total_pages - need_pages;
   1783  1.10.2.2  matt 
   1784  1.10.2.2  matt 	/* Split enough pages from the front of run to fit allocation size. */
   1785  1.10.2.2  matt 	map_offset = run_ind;
   1786  1.10.2.2  matt 	for (i = 0; i < need_pages; i++) {
   1787  1.10.2.2  matt 		chunk->map[map_offset + i].npages = need_pages;
   1788  1.10.2.2  matt 		chunk->map[map_offset + i].pos = i;
   1789  1.10.2.2  matt 	}
   1790  1.10.2.2  matt 
   1791  1.10.2.2  matt 	/* Keep track of trailing unused pages for later use. */
   1792  1.10.2.2  matt 	if (rem_pages > 0) {
   1793  1.10.2.2  matt 		/* Update map for trailing pages. */
   1794  1.10.2.2  matt 		map_offset += need_pages;
   1795  1.10.2.2  matt 		chunk->map[map_offset].npages = rem_pages;
   1796  1.10.2.2  matt 		chunk->map[map_offset].pos = POS_FREE;
   1797  1.10.2.2  matt 		chunk->map[map_offset + rem_pages - 1].npages = rem_pages;
   1798  1.10.2.2  matt 		chunk->map[map_offset + rem_pages - 1].pos = POS_FREE;
   1799  1.10.2.2  matt 	}
   1800  1.10.2.2  matt 
   1801  1.10.2.2  matt 	chunk->pages_used += need_pages;
   1802  1.10.2.2  matt }
   1803  1.10.2.2  matt 
   1804  1.10.2.2  matt static arena_chunk_t *
   1805  1.10.2.2  matt arena_chunk_alloc(arena_t *arena)
   1806  1.10.2.2  matt {
   1807  1.10.2.2  matt 	arena_chunk_t *chunk;
   1808  1.10.2.2  matt 
   1809  1.10.2.2  matt 	if (arena->spare != NULL) {
   1810  1.10.2.2  matt 		chunk = arena->spare;
   1811  1.10.2.2  matt 		arena->spare = NULL;
   1812  1.10.2.2  matt 
   1813  1.10.2.2  matt 		/* LINTED */
   1814  1.10.2.2  matt 		RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
   1815  1.10.2.2  matt 	} else {
   1816  1.10.2.2  matt 		chunk = (arena_chunk_t *)chunk_alloc(chunksize);
   1817  1.10.2.2  matt 		if (chunk == NULL)
   1818  1.10.2.2  matt 			return (NULL);
   1819  1.10.2.2  matt #ifdef MALLOC_STATS
   1820  1.10.2.2  matt 		arena->stats.mapped += chunksize;
   1821  1.10.2.2  matt #endif
   1822  1.10.2.2  matt 
   1823  1.10.2.2  matt 		chunk->arena = arena;
   1824  1.10.2.2  matt 
   1825  1.10.2.2  matt 		/* LINTED */
   1826  1.10.2.2  matt 		RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
   1827  1.10.2.2  matt 
   1828  1.10.2.2  matt 		/*
   1829  1.10.2.2  matt 		 * Claim that no pages are in use, since the header is merely
   1830  1.10.2.2  matt 		 * overhead.
   1831  1.10.2.2  matt 		 */
   1832  1.10.2.2  matt 		chunk->pages_used = 0;
   1833  1.10.2.2  matt 
   1834  1.10.2.2  matt 		chunk->max_frun_npages = chunk_npages -
   1835  1.10.2.2  matt 		    arena_chunk_header_npages;
   1836  1.10.2.2  matt 		chunk->min_frun_ind = arena_chunk_header_npages;
   1837  1.10.2.2  matt 
   1838  1.10.2.2  matt 		/*
   1839  1.10.2.2  matt 		 * Initialize enough of the map to support one maximal free run.
   1840  1.10.2.2  matt 		 */
   1841  1.10.2.2  matt 		chunk->map[arena_chunk_header_npages].npages = chunk_npages -
   1842  1.10.2.2  matt 		    arena_chunk_header_npages;
   1843  1.10.2.2  matt 		chunk->map[arena_chunk_header_npages].pos = POS_FREE;
   1844  1.10.2.2  matt 		chunk->map[chunk_npages - 1].npages = chunk_npages -
   1845  1.10.2.2  matt 		    arena_chunk_header_npages;
   1846  1.10.2.2  matt 		chunk->map[chunk_npages - 1].pos = POS_FREE;
   1847  1.10.2.2  matt 	}
   1848  1.10.2.2  matt 
   1849  1.10.2.2  matt 	return (chunk);
   1850  1.10.2.2  matt }
   1851  1.10.2.2  matt 
   1852  1.10.2.2  matt static void
   1853  1.10.2.2  matt arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
   1854  1.10.2.2  matt {
   1855  1.10.2.2  matt 
   1856  1.10.2.2  matt 	/*
   1857  1.10.2.2  matt 	 * Remove chunk from the chunk tree, regardless of whether this chunk
   1858  1.10.2.2  matt 	 * will be cached, so that the arena does not use it.
   1859  1.10.2.2  matt 	 */
   1860  1.10.2.2  matt 	/* LINTED */
   1861  1.10.2.2  matt 	RB_REMOVE(arena_chunk_tree_s, &chunk->arena->chunks, chunk);
   1862  1.10.2.2  matt 
   1863  1.10.2.2  matt 	if (opt_hint == false) {
   1864  1.10.2.2  matt 		if (arena->spare != NULL) {
   1865  1.10.2.2  matt 			chunk_dealloc((void *)arena->spare, chunksize);
   1866  1.10.2.2  matt #ifdef MALLOC_STATS
   1867  1.10.2.2  matt 			arena->stats.mapped -= chunksize;
   1868  1.10.2.2  matt #endif
   1869  1.10.2.2  matt 		}
   1870  1.10.2.2  matt 		arena->spare = chunk;
   1871  1.10.2.2  matt 	} else {
   1872  1.10.2.2  matt 		assert(arena->spare == NULL);
   1873  1.10.2.2  matt 		chunk_dealloc((void *)chunk, chunksize);
   1874  1.10.2.2  matt #ifdef MALLOC_STATS
   1875  1.10.2.2  matt 		arena->stats.mapped -= chunksize;
   1876  1.10.2.2  matt #endif
   1877  1.10.2.2  matt 	}
   1878  1.10.2.2  matt }
   1879  1.10.2.2  matt 
   1880  1.10.2.2  matt static arena_run_t *
   1881  1.10.2.2  matt arena_run_alloc(arena_t *arena, size_t size)
   1882  1.10.2.2  matt {
   1883  1.10.2.2  matt 	arena_chunk_t *chunk;
   1884  1.10.2.2  matt 	arena_run_t *run;
   1885  1.10.2.2  matt 	unsigned need_npages, limit_pages, compl_need_npages;
   1886  1.10.2.2  matt 
   1887  1.10.2.2  matt 	assert(size <= (chunksize - (arena_chunk_header_npages <<
   1888  1.10.2.2  matt 	    pagesize_2pow)));
   1889  1.10.2.2  matt 	assert((size & pagesize_mask) == 0);
   1890  1.10.2.2  matt 
   1891  1.10.2.2  matt 	/*
   1892  1.10.2.2  matt 	 * Search through arena's chunks in address order for a free run that is
   1893  1.10.2.2  matt 	 * large enough.  Look for the first fit.
   1894  1.10.2.2  matt 	 */
   1895  1.10.2.2  matt 	need_npages = (unsigned)(size >> pagesize_2pow);
   1896  1.10.2.2  matt 	limit_pages = chunk_npages - arena_chunk_header_npages;
   1897  1.10.2.2  matt 	compl_need_npages = limit_pages - need_npages;
   1898  1.10.2.2  matt 	/* LINTED */
   1899  1.10.2.2  matt 	RB_FOREACH(chunk, arena_chunk_tree_s, &arena->chunks) {
   1900  1.10.2.2  matt 		/*
   1901  1.10.2.2  matt 		 * Avoid searching this chunk if there are not enough
   1902  1.10.2.2  matt 		 * contiguous free pages for there to possibly be a large
   1903  1.10.2.2  matt 		 * enough free run.
   1904  1.10.2.2  matt 		 */
   1905  1.10.2.2  matt 		if (chunk->pages_used <= compl_need_npages &&
   1906  1.10.2.2  matt 		    need_npages <= chunk->max_frun_npages) {
   1907  1.10.2.2  matt 			arena_chunk_map_t *mapelm;
   1908  1.10.2.2  matt 			unsigned i;
   1909  1.10.2.2  matt 			unsigned max_frun_npages = 0;
   1910  1.10.2.2  matt 			unsigned min_frun_ind = chunk_npages;
   1911  1.10.2.2  matt 
   1912  1.10.2.2  matt 			assert(chunk->min_frun_ind >=
   1913  1.10.2.2  matt 			    arena_chunk_header_npages);
   1914  1.10.2.2  matt 			for (i = chunk->min_frun_ind; i < chunk_npages;) {
   1915  1.10.2.2  matt 				mapelm = &chunk->map[i];
   1916  1.10.2.2  matt 				if (mapelm->pos == POS_FREE) {
   1917  1.10.2.2  matt 					if (mapelm->npages >= need_npages) {
   1918  1.10.2.2  matt 						run = (arena_run_t *)
   1919  1.10.2.2  matt 						    ((uintptr_t)chunk + (i <<
   1920  1.10.2.2  matt 						    pagesize_2pow));
   1921  1.10.2.2  matt 						/* Update page map. */
   1922  1.10.2.2  matt 						arena_run_split(arena, run,
   1923  1.10.2.2  matt 						    size);
   1924  1.10.2.2  matt 						return (run);
   1925  1.10.2.2  matt 					}
   1926  1.10.2.2  matt 					if (mapelm->npages >
   1927  1.10.2.2  matt 					    max_frun_npages) {
   1928  1.10.2.2  matt 						max_frun_npages =
   1929  1.10.2.2  matt 						    mapelm->npages;
   1930  1.10.2.2  matt 					}
   1931  1.10.2.2  matt 					if (i < min_frun_ind) {
   1932  1.10.2.2  matt 						min_frun_ind = i;
   1933  1.10.2.2  matt 						if (i < chunk->min_frun_ind)
   1934  1.10.2.2  matt 							chunk->min_frun_ind = i;
   1935  1.10.2.2  matt 					}
   1936  1.10.2.2  matt 				}
   1937  1.10.2.2  matt 				i += mapelm->npages;
   1938  1.10.2.2  matt 			}
   1939  1.10.2.2  matt 			/*
   1940  1.10.2.2  matt 			 * Search failure.  Reset cached chunk->max_frun_npages.
   1941  1.10.2.2  matt 			 * chunk->min_frun_ind was already reset above (if
   1942  1.10.2.2  matt 			 * necessary).
   1943  1.10.2.2  matt 			 */
   1944  1.10.2.2  matt 			chunk->max_frun_npages = max_frun_npages;
   1945  1.10.2.2  matt 		}
   1946  1.10.2.2  matt 	}
   1947  1.10.2.2  matt 
   1948  1.10.2.2  matt 	/*
   1949  1.10.2.2  matt 	 * No usable runs.  Create a new chunk from which to allocate the run.
   1950  1.10.2.2  matt 	 */
   1951  1.10.2.2  matt 	chunk = arena_chunk_alloc(arena);
   1952  1.10.2.2  matt 	if (chunk == NULL)
   1953  1.10.2.2  matt 		return (NULL);
   1954  1.10.2.2  matt 	run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages <<
   1955  1.10.2.2  matt 	    pagesize_2pow));
   1956  1.10.2.2  matt 	/* Update page map. */
   1957  1.10.2.2  matt 	arena_run_split(arena, run, size);
   1958  1.10.2.2  matt 	return (run);
   1959  1.10.2.2  matt }
   1960  1.10.2.2  matt 
   1961  1.10.2.2  matt static void
   1962  1.10.2.2  matt arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size)
   1963  1.10.2.2  matt {
   1964  1.10.2.2  matt 	arena_chunk_t *chunk;
   1965  1.10.2.2  matt 	unsigned run_ind, run_pages;
   1966  1.10.2.2  matt 
   1967  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
   1968  1.10.2.2  matt 
   1969  1.10.2.2  matt 	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
   1970  1.10.2.2  matt 	    >> pagesize_2pow);
   1971  1.10.2.2  matt 	assert(run_ind >= arena_chunk_header_npages);
   1972  1.10.2.2  matt 	assert(run_ind < (chunksize >> pagesize_2pow));
   1973  1.10.2.2  matt 	run_pages = (unsigned)(size >> pagesize_2pow);
   1974  1.10.2.2  matt 	assert(run_pages == chunk->map[run_ind].npages);
   1975  1.10.2.2  matt 
   1976  1.10.2.2  matt 	/* Subtract pages from count of pages used in chunk. */
   1977  1.10.2.2  matt 	chunk->pages_used -= run_pages;
   1978  1.10.2.2  matt 
   1979  1.10.2.2  matt 	/* Mark run as deallocated. */
   1980  1.10.2.2  matt 	assert(chunk->map[run_ind].npages == run_pages);
   1981  1.10.2.2  matt 	chunk->map[run_ind].pos = POS_FREE;
   1982  1.10.2.2  matt 	assert(chunk->map[run_ind + run_pages - 1].npages == run_pages);
   1983  1.10.2.2  matt 	chunk->map[run_ind + run_pages - 1].pos = POS_FREE;
   1984  1.10.2.2  matt 
   1985  1.10.2.2  matt 	/*
   1986  1.10.2.2  matt 	 * Tell the kernel that we don't need the data in this run, but only if
   1987  1.10.2.2  matt 	 * requested via runtime configuration.
   1988  1.10.2.2  matt 	 */
   1989  1.10.2.2  matt 	if (opt_hint)
   1990  1.10.2.2  matt 		madvise(run, size, MADV_FREE);
   1991  1.10.2.2  matt 
   1992  1.10.2.2  matt 	/* Try to coalesce with neighboring runs. */
   1993  1.10.2.2  matt 	if (run_ind > arena_chunk_header_npages &&
   1994  1.10.2.2  matt 	    chunk->map[run_ind - 1].pos == POS_FREE) {
   1995  1.10.2.2  matt 		unsigned prev_npages;
   1996  1.10.2.2  matt 
   1997  1.10.2.2  matt 		/* Coalesce with previous run. */
   1998  1.10.2.2  matt 		prev_npages = chunk->map[run_ind - 1].npages;
   1999  1.10.2.2  matt 		run_ind -= prev_npages;
   2000  1.10.2.2  matt 		assert(chunk->map[run_ind].npages == prev_npages);
   2001  1.10.2.2  matt 		assert(chunk->map[run_ind].pos == POS_FREE);
   2002  1.10.2.2  matt 		run_pages += prev_npages;
   2003  1.10.2.2  matt 
   2004  1.10.2.2  matt 		chunk->map[run_ind].npages = run_pages;
   2005  1.10.2.2  matt 		assert(chunk->map[run_ind].pos == POS_FREE);
   2006  1.10.2.2  matt 		chunk->map[run_ind + run_pages - 1].npages = run_pages;
   2007  1.10.2.2  matt 		assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
   2008  1.10.2.2  matt 	}
   2009  1.10.2.2  matt 
   2010  1.10.2.2  matt 	if (run_ind + run_pages < chunk_npages &&
   2011  1.10.2.2  matt 	    chunk->map[run_ind + run_pages].pos == POS_FREE) {
   2012  1.10.2.2  matt 		unsigned next_npages;
   2013  1.10.2.2  matt 
   2014  1.10.2.2  matt 		/* Coalesce with next run. */
   2015  1.10.2.2  matt 		next_npages = chunk->map[run_ind + run_pages].npages;
   2016  1.10.2.2  matt 		run_pages += next_npages;
   2017  1.10.2.2  matt 		assert(chunk->map[run_ind + run_pages - 1].npages ==
   2018  1.10.2.2  matt 		    next_npages);
   2019  1.10.2.2  matt 		assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
   2020  1.10.2.2  matt 
   2021  1.10.2.2  matt 		chunk->map[run_ind].npages = run_pages;
   2022  1.10.2.2  matt 		chunk->map[run_ind].pos = POS_FREE;
   2023  1.10.2.2  matt 		chunk->map[run_ind + run_pages - 1].npages = run_pages;
   2024  1.10.2.2  matt 		assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
   2025  1.10.2.2  matt 	}
   2026  1.10.2.2  matt 
   2027  1.10.2.2  matt 	if (chunk->map[run_ind].npages > chunk->max_frun_npages)
   2028  1.10.2.2  matt 		chunk->max_frun_npages = chunk->map[run_ind].npages;
   2029  1.10.2.2  matt 	if (run_ind < chunk->min_frun_ind)
   2030  1.10.2.2  matt 		chunk->min_frun_ind = run_ind;
   2031  1.10.2.2  matt 
   2032  1.10.2.2  matt 	/* Deallocate chunk if it is now completely unused. */
   2033  1.10.2.2  matt 	if (chunk->pages_used == 0)
   2034  1.10.2.2  matt 		arena_chunk_dealloc(arena, chunk);
   2035  1.10.2.2  matt }
   2036  1.10.2.2  matt 
   2037  1.10.2.2  matt static arena_run_t *
   2038  1.10.2.2  matt arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
   2039  1.10.2.2  matt {
   2040  1.10.2.2  matt 	arena_run_t *run;
   2041  1.10.2.2  matt 	unsigned i, remainder;
   2042  1.10.2.2  matt 
   2043  1.10.2.2  matt 	/* Look for a usable run. */
   2044  1.10.2.2  matt 	/* LINTED */
   2045  1.10.2.2  matt 	if ((run = RB_MIN(arena_run_tree_s, &bin->runs)) != NULL) {
   2046  1.10.2.2  matt 		/* run is guaranteed to have available space. */
   2047  1.10.2.2  matt 		/* LINTED */
   2048  1.10.2.2  matt 		RB_REMOVE(arena_run_tree_s, &bin->runs, run);
   2049  1.10.2.2  matt #ifdef MALLOC_STATS
   2050  1.10.2.2  matt 		bin->stats.reruns++;
   2051  1.10.2.2  matt #endif
   2052  1.10.2.2  matt 		return (run);
   2053  1.10.2.2  matt 	}
   2054  1.10.2.2  matt 	/* No existing runs have any space available. */
   2055  1.10.2.2  matt 
   2056  1.10.2.2  matt 	/* Allocate a new run. */
   2057  1.10.2.2  matt 	run = arena_run_alloc(arena, bin->run_size);
   2058  1.10.2.2  matt 	if (run == NULL)
   2059  1.10.2.2  matt 		return (NULL);
   2060  1.10.2.2  matt 
   2061  1.10.2.2  matt 	/* Initialize run internals. */
   2062  1.10.2.2  matt 	run->bin = bin;
   2063  1.10.2.2  matt 
   2064  1.10.2.2  matt 	for (i = 0; i < bin->regs_mask_nelms; i++)
   2065  1.10.2.2  matt 		run->regs_mask[i] = UINT_MAX;
   2066  1.10.2.2  matt 	remainder = bin->nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1);
   2067  1.10.2.2  matt 	if (remainder != 0) {
   2068  1.10.2.2  matt 		/* The last element has spare bits that need to be unset. */
   2069  1.10.2.2  matt 		run->regs_mask[i] = (UINT_MAX >> ((1 << (SIZEOF_INT_2POW + 3))
   2070  1.10.2.2  matt 		    - remainder));
   2071  1.10.2.2  matt 	}
   2072  1.10.2.2  matt 
   2073  1.10.2.2  matt 	run->regs_minelm = 0;
   2074  1.10.2.2  matt 
   2075  1.10.2.2  matt 	run->nfree = bin->nregs;
   2076  1.10.2.2  matt #ifdef MALLOC_DEBUG
   2077  1.10.2.2  matt 	run->magic = ARENA_RUN_MAGIC;
   2078  1.10.2.2  matt #endif
   2079  1.10.2.2  matt 
   2080  1.10.2.2  matt #ifdef MALLOC_STATS
   2081  1.10.2.2  matt 	bin->stats.nruns++;
   2082  1.10.2.2  matt 	bin->stats.curruns++;
   2083  1.10.2.2  matt 	if (bin->stats.curruns > bin->stats.highruns)
   2084  1.10.2.2  matt 		bin->stats.highruns = bin->stats.curruns;
   2085  1.10.2.2  matt #endif
   2086  1.10.2.2  matt 	return (run);
   2087  1.10.2.2  matt }
   2088  1.10.2.2  matt 
   2089  1.10.2.2  matt /* bin->runcur must have space available before this function is called. */
   2090  1.10.2.2  matt static inline void *
   2091  1.10.2.2  matt arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
   2092  1.10.2.2  matt {
   2093  1.10.2.2  matt 	void *ret;
   2094  1.10.2.2  matt 
   2095  1.10.2.2  matt 	assert(run->magic == ARENA_RUN_MAGIC);
   2096  1.10.2.2  matt 	assert(run->nfree > 0);
   2097  1.10.2.2  matt 
   2098  1.10.2.2  matt 	ret = arena_run_reg_alloc(run, bin);
   2099  1.10.2.2  matt 	assert(ret != NULL);
   2100  1.10.2.2  matt 	run->nfree--;
   2101  1.10.2.2  matt 
   2102  1.10.2.2  matt 	return (ret);
   2103  1.10.2.2  matt }
   2104  1.10.2.2  matt 
   2105  1.10.2.2  matt /* Re-fill bin->runcur, then call arena_bin_malloc_easy(). */
   2106  1.10.2.2  matt static void *
   2107  1.10.2.2  matt arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
   2108  1.10.2.2  matt {
   2109  1.10.2.2  matt 
   2110  1.10.2.2  matt 	bin->runcur = arena_bin_nonfull_run_get(arena, bin);
   2111  1.10.2.2  matt 	if (bin->runcur == NULL)
   2112  1.10.2.2  matt 		return (NULL);
   2113  1.10.2.2  matt 	assert(bin->runcur->magic == ARENA_RUN_MAGIC);
   2114  1.10.2.2  matt 	assert(bin->runcur->nfree > 0);
   2115  1.10.2.2  matt 
   2116  1.10.2.2  matt 	return (arena_bin_malloc_easy(arena, bin, bin->runcur));
   2117  1.10.2.2  matt }
   2118  1.10.2.2  matt 
   2119  1.10.2.2  matt /*
   2120  1.10.2.2  matt  * Calculate bin->run_size such that it meets the following constraints:
   2121  1.10.2.2  matt  *
   2122  1.10.2.2  matt  *   *) bin->run_size >= min_run_size
   2123  1.10.2.2  matt  *   *) bin->run_size <= arena_maxclass
   2124  1.10.2.2  matt  *   *) bin->run_size <= RUN_MAX_SMALL
   2125  1.10.2.2  matt  *   *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
   2126  1.10.2.2  matt  *
   2127  1.10.2.2  matt  * bin->nregs, bin->regs_mask_nelms, and bin->reg0_offset are
   2128  1.10.2.2  matt  * also calculated here, since these settings are all interdependent.
   2129  1.10.2.2  matt  */
   2130  1.10.2.2  matt static size_t
   2131  1.10.2.2  matt arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
   2132  1.10.2.2  matt {
   2133  1.10.2.2  matt 	size_t try_run_size, good_run_size;
   2134  1.10.2.2  matt 	unsigned good_nregs, good_mask_nelms, good_reg0_offset;
   2135  1.10.2.2  matt 	unsigned try_nregs, try_mask_nelms, try_reg0_offset;
   2136  1.10.2.2  matt 	float max_ovrhd = RUN_MAX_OVRHD;
   2137  1.10.2.2  matt 
   2138  1.10.2.2  matt 	assert(min_run_size >= pagesize);
   2139  1.10.2.2  matt 	assert(min_run_size <= arena_maxclass);
   2140  1.10.2.2  matt 	assert(min_run_size <= RUN_MAX_SMALL);
   2141  1.10.2.2  matt 
   2142  1.10.2.2  matt 	/*
   2143  1.10.2.2  matt 	 * Calculate known-valid settings before entering the run_size
   2144  1.10.2.2  matt 	 * expansion loop, so that the first part of the loop always copies
   2145  1.10.2.2  matt 	 * valid settings.
   2146  1.10.2.2  matt 	 *
   2147  1.10.2.2  matt 	 * The do..while loop iteratively reduces the number of regions until
   2148  1.10.2.2  matt 	 * the run header and the regions no longer overlap.  A closed formula
   2149  1.10.2.2  matt 	 * would be quite messy, since there is an interdependency between the
   2150  1.10.2.2  matt 	 * header's mask length and the number of regions.
   2151  1.10.2.2  matt 	 */
   2152  1.10.2.2  matt 	try_run_size = min_run_size;
   2153  1.10.2.2  matt 	try_nregs = (unsigned)(((try_run_size - sizeof(arena_run_t)) /
   2154  1.10.2.2  matt 	    bin->reg_size) + 1); /* Counter-act the first line of the loop. */
   2155  1.10.2.2  matt 	do {
   2156  1.10.2.2  matt 		try_nregs--;
   2157  1.10.2.2  matt 		try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
   2158  1.10.2.2  matt 		    ((try_nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0);
   2159  1.10.2.2  matt 		try_reg0_offset = (unsigned)(try_run_size -
   2160  1.10.2.2  matt 		    (try_nregs * bin->reg_size));
   2161  1.10.2.2  matt 	} while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1))
   2162  1.10.2.2  matt 	    > try_reg0_offset);
   2163  1.10.2.2  matt 
   2164  1.10.2.2  matt 	/* run_size expansion loop. */
   2165  1.10.2.2  matt 	do {
   2166  1.10.2.2  matt 		/*
   2167  1.10.2.2  matt 		 * Copy valid settings before trying more aggressive settings.
   2168  1.10.2.2  matt 		 */
   2169  1.10.2.2  matt 		good_run_size = try_run_size;
   2170  1.10.2.2  matt 		good_nregs = try_nregs;
   2171  1.10.2.2  matt 		good_mask_nelms = try_mask_nelms;
   2172  1.10.2.2  matt 		good_reg0_offset = try_reg0_offset;
   2173  1.10.2.2  matt 
   2174  1.10.2.2  matt 		/* Try more aggressive settings. */
   2175  1.10.2.2  matt 		try_run_size += pagesize;
   2176  1.10.2.2  matt 		try_nregs = (unsigned)(((try_run_size - sizeof(arena_run_t)) /
   2177  1.10.2.2  matt 		    bin->reg_size) + 1); /* Counter-act try_nregs-- in loop. */
   2178  1.10.2.2  matt 		do {
   2179  1.10.2.2  matt 			try_nregs--;
   2180  1.10.2.2  matt 			try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
   2181  1.10.2.2  matt 			    ((try_nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1)) ?
   2182  1.10.2.2  matt 			    1 : 0);
   2183  1.10.2.2  matt 			try_reg0_offset = (unsigned)(try_run_size - (try_nregs *
   2184  1.10.2.2  matt 			    bin->reg_size));
   2185  1.10.2.2  matt 		} while (sizeof(arena_run_t) + (sizeof(unsigned) *
   2186  1.10.2.2  matt 		    (try_mask_nelms - 1)) > try_reg0_offset);
   2187  1.10.2.2  matt 	} while (try_run_size <= arena_maxclass && try_run_size <= RUN_MAX_SMALL
   2188  1.10.2.2  matt 	    && max_ovrhd > RUN_MAX_OVRHD_RELAX / ((float)(bin->reg_size << 3))
   2189  1.10.2.2  matt 	    && ((float)(try_reg0_offset)) / ((float)(try_run_size)) >
   2190  1.10.2.2  matt 	    max_ovrhd);
   2191  1.10.2.2  matt 
   2192  1.10.2.2  matt 	assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
   2193  1.10.2.2  matt 	    <= good_reg0_offset);
   2194  1.10.2.2  matt 	assert((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
   2195  1.10.2.2  matt 
   2196  1.10.2.2  matt 	/* Copy final settings. */
   2197  1.10.2.2  matt 	bin->run_size = good_run_size;
   2198  1.10.2.2  matt 	bin->nregs = good_nregs;
   2199  1.10.2.2  matt 	bin->regs_mask_nelms = good_mask_nelms;
   2200  1.10.2.2  matt 	bin->reg0_offset = good_reg0_offset;
   2201  1.10.2.2  matt 
   2202  1.10.2.2  matt 	return (good_run_size);
   2203  1.10.2.2  matt }
   2204  1.10.2.2  matt 
   2205  1.10.2.2  matt static void *
   2206  1.10.2.2  matt arena_malloc(arena_t *arena, size_t size)
   2207  1.10.2.2  matt {
   2208  1.10.2.2  matt 	void *ret;
   2209  1.10.2.2  matt 
   2210  1.10.2.2  matt 	assert(arena != NULL);
   2211  1.10.2.2  matt 	assert(arena->magic == ARENA_MAGIC);
   2212  1.10.2.2  matt 	assert(size != 0);
   2213  1.10.2.2  matt 	assert(QUANTUM_CEILING(size) <= arena_maxclass);
   2214  1.10.2.2  matt 
   2215  1.10.2.2  matt 	if (size <= bin_maxclass) {
   2216  1.10.2.2  matt 		arena_bin_t *bin;
   2217  1.10.2.2  matt 		arena_run_t *run;
   2218  1.10.2.2  matt 
   2219  1.10.2.2  matt 		/* Small allocation. */
   2220  1.10.2.2  matt 
   2221  1.10.2.2  matt 		if (size < small_min) {
   2222  1.10.2.2  matt 			/* Tiny. */
   2223  1.10.2.2  matt 			size = pow2_ceil(size);
   2224  1.10.2.2  matt 			bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
   2225  1.10.2.2  matt 			    1)))];
   2226  1.10.2.2  matt #if (!defined(NDEBUG) || defined(MALLOC_STATS))
   2227  1.10.2.2  matt 			/*
   2228  1.10.2.2  matt 			 * Bin calculation is always correct, but we may need
   2229  1.10.2.2  matt 			 * to fix size for the purposes of assertions and/or
   2230  1.10.2.2  matt 			 * stats accuracy.
   2231  1.10.2.2  matt 			 */
   2232  1.10.2.2  matt 			if (size < (1 << TINY_MIN_2POW))
   2233  1.10.2.2  matt 				size = (1 << TINY_MIN_2POW);
   2234  1.10.2.2  matt #endif
   2235  1.10.2.2  matt 		} else if (size <= small_max) {
   2236  1.10.2.2  matt 			/* Quantum-spaced. */
   2237  1.10.2.2  matt 			size = QUANTUM_CEILING(size);
   2238  1.10.2.2  matt 			bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
   2239  1.10.2.2  matt 			    - 1];
   2240  1.10.2.2  matt 		} else {
   2241  1.10.2.2  matt 			/* Sub-page. */
   2242  1.10.2.2  matt 			size = pow2_ceil(size);
   2243  1.10.2.2  matt 			bin = &arena->bins[ntbins + nqbins
   2244  1.10.2.2  matt 			    + (ffs((int)(size >> opt_small_max_2pow)) - 2)];
   2245  1.10.2.2  matt 		}
   2246  1.10.2.2  matt 		assert(size == bin->reg_size);
   2247  1.10.2.2  matt 
   2248  1.10.2.2  matt 		malloc_mutex_lock(&arena->mtx);
   2249  1.10.2.2  matt 		if ((run = bin->runcur) != NULL && run->nfree > 0)
   2250  1.10.2.2  matt 			ret = arena_bin_malloc_easy(arena, bin, run);
   2251  1.10.2.2  matt 		else
   2252  1.10.2.2  matt 			ret = arena_bin_malloc_hard(arena, bin);
   2253  1.10.2.2  matt 
   2254  1.10.2.2  matt 		if (ret == NULL) {
   2255  1.10.2.2  matt 			malloc_mutex_unlock(&arena->mtx);
   2256  1.10.2.2  matt 			return (NULL);
   2257  1.10.2.2  matt 		}
   2258  1.10.2.2  matt 
   2259  1.10.2.2  matt #ifdef MALLOC_STATS
   2260  1.10.2.2  matt 		bin->stats.nrequests++;
   2261  1.10.2.2  matt 		arena->stats.nmalloc_small++;
   2262  1.10.2.2  matt 		arena->stats.allocated_small += size;
   2263  1.10.2.2  matt #endif
   2264  1.10.2.2  matt 	} else {
   2265  1.10.2.2  matt 		/* Large allocation. */
   2266  1.10.2.2  matt 		size = PAGE_CEILING(size);
   2267  1.10.2.2  matt 		malloc_mutex_lock(&arena->mtx);
   2268  1.10.2.2  matt 		ret = (void *)arena_run_alloc(arena, size);
   2269  1.10.2.2  matt 		if (ret == NULL) {
   2270  1.10.2.2  matt 			malloc_mutex_unlock(&arena->mtx);
   2271  1.10.2.2  matt 			return (NULL);
   2272  1.10.2.2  matt 		}
   2273  1.10.2.2  matt #ifdef MALLOC_STATS
   2274  1.10.2.2  matt 		arena->stats.nmalloc_large++;
   2275  1.10.2.2  matt 		arena->stats.allocated_large += size;
   2276  1.10.2.2  matt #endif
   2277  1.10.2.2  matt 	}
   2278  1.10.2.2  matt 
   2279  1.10.2.2  matt 	malloc_mutex_unlock(&arena->mtx);
   2280  1.10.2.2  matt 
   2281  1.10.2.2  matt 	if (opt_junk)
   2282  1.10.2.2  matt 		memset(ret, 0xa5, size);
   2283  1.10.2.2  matt 	else if (opt_zero)
   2284  1.10.2.2  matt 		memset(ret, 0, size);
   2285  1.10.2.2  matt 	return (ret);
   2286  1.10.2.2  matt }
   2287  1.10.2.2  matt 
   2288  1.10.2.2  matt static inline void
   2289  1.10.2.2  matt arena_palloc_trim(arena_t *arena, arena_chunk_t *chunk, unsigned pageind,
   2290  1.10.2.2  matt     unsigned npages)
   2291  1.10.2.2  matt {
   2292  1.10.2.2  matt 	unsigned i;
   2293  1.10.2.2  matt 
   2294  1.10.2.2  matt 	assert(npages > 0);
   2295  1.10.2.2  matt 
   2296  1.10.2.2  matt 	/*
   2297  1.10.2.2  matt 	 * Modifiy the map such that arena_run_dalloc() sees the run as
   2298  1.10.2.2  matt 	 * separately allocated.
   2299  1.10.2.2  matt 	 */
   2300  1.10.2.2  matt 	for (i = 0; i < npages; i++) {
   2301  1.10.2.2  matt 		chunk->map[pageind + i].npages = npages;
   2302  1.10.2.2  matt 		chunk->map[pageind + i].pos = i;
   2303  1.10.2.2  matt 	}
   2304  1.10.2.2  matt 	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)chunk + (pageind <<
   2305  1.10.2.2  matt 	    pagesize_2pow)), npages << pagesize_2pow);
   2306  1.10.2.2  matt }
   2307  1.10.2.2  matt 
   2308  1.10.2.2  matt /* Only handles large allocations that require more than page alignment. */
   2309  1.10.2.2  matt static void *
   2310  1.10.2.2  matt arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
   2311  1.10.2.2  matt {
   2312  1.10.2.2  matt 	void *ret;
   2313  1.10.2.2  matt 	size_t offset;
   2314  1.10.2.2  matt 	arena_chunk_t *chunk;
   2315  1.10.2.2  matt 	unsigned pageind, i, npages;
   2316  1.10.2.2  matt 
   2317  1.10.2.2  matt 	assert((size & pagesize_mask) == 0);
   2318  1.10.2.2  matt 	assert((alignment & pagesize_mask) == 0);
   2319  1.10.2.2  matt 
   2320  1.10.2.2  matt 	npages = (unsigned)(size >> pagesize_2pow);
   2321  1.10.2.2  matt 
   2322  1.10.2.2  matt 	malloc_mutex_lock(&arena->mtx);
   2323  1.10.2.2  matt 	ret = (void *)arena_run_alloc(arena, alloc_size);
   2324  1.10.2.2  matt 	if (ret == NULL) {
   2325  1.10.2.2  matt 		malloc_mutex_unlock(&arena->mtx);
   2326  1.10.2.2  matt 		return (NULL);
   2327  1.10.2.2  matt 	}
   2328  1.10.2.2  matt 
   2329  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
   2330  1.10.2.2  matt 
   2331  1.10.2.2  matt 	offset = (uintptr_t)ret & (alignment - 1);
   2332  1.10.2.2  matt 	assert((offset & pagesize_mask) == 0);
   2333  1.10.2.2  matt 	assert(offset < alloc_size);
   2334  1.10.2.2  matt 	if (offset == 0) {
   2335  1.10.2.2  matt 		pageind = (unsigned)(((uintptr_t)ret - (uintptr_t)chunk) >>
   2336  1.10.2.2  matt 		    pagesize_2pow);
   2337  1.10.2.2  matt 
   2338  1.10.2.2  matt 		/* Update the map for the run to be kept. */
   2339  1.10.2.2  matt 		for (i = 0; i < npages; i++) {
   2340  1.10.2.2  matt 			chunk->map[pageind + i].npages = npages;
   2341  1.10.2.2  matt 			assert(chunk->map[pageind + i].pos == i);
   2342  1.10.2.2  matt 		}
   2343  1.10.2.2  matt 
   2344  1.10.2.2  matt 		/* Trim trailing space. */
   2345  1.10.2.2  matt 		arena_palloc_trim(arena, chunk, pageind + npages,
   2346  1.10.2.2  matt 		    (unsigned)((alloc_size - size) >> pagesize_2pow));
   2347  1.10.2.2  matt 	} else {
   2348  1.10.2.2  matt 		size_t leadsize, trailsize;
   2349  1.10.2.2  matt 
   2350  1.10.2.2  matt 		leadsize = alignment - offset;
   2351  1.10.2.2  matt 		ret = (void *)((uintptr_t)ret + leadsize);
   2352  1.10.2.2  matt 		pageind = (unsigned)(((uintptr_t)ret - (uintptr_t)chunk) >>
   2353  1.10.2.2  matt 		    pagesize_2pow);
   2354  1.10.2.2  matt 
   2355  1.10.2.2  matt 		/* Update the map for the run to be kept. */
   2356  1.10.2.2  matt 		for (i = 0; i < npages; i++) {
   2357  1.10.2.2  matt 			chunk->map[pageind + i].npages = npages;
   2358  1.10.2.2  matt 			chunk->map[pageind + i].pos = i;
   2359  1.10.2.2  matt 		}
   2360  1.10.2.2  matt 
   2361  1.10.2.2  matt 		/* Trim leading space. */
   2362  1.10.2.2  matt 		arena_palloc_trim(arena, chunk,
   2363  1.10.2.2  matt 		    (unsigned)(pageind - (leadsize >> pagesize_2pow)),
   2364  1.10.2.2  matt 		    (unsigned)(leadsize >> pagesize_2pow));
   2365  1.10.2.2  matt 
   2366  1.10.2.2  matt 		trailsize = alloc_size - leadsize - size;
   2367  1.10.2.2  matt 		if (trailsize != 0) {
   2368  1.10.2.2  matt 			/* Trim trailing space. */
   2369  1.10.2.2  matt 			assert(trailsize < alloc_size);
   2370  1.10.2.2  matt 			arena_palloc_trim(arena, chunk, pageind + npages,
   2371  1.10.2.2  matt 			    (unsigned)(trailsize >> pagesize_2pow));
   2372  1.10.2.2  matt 		}
   2373  1.10.2.2  matt 	}
   2374  1.10.2.2  matt 
   2375  1.10.2.2  matt #ifdef MALLOC_STATS
   2376  1.10.2.2  matt 	arena->stats.nmalloc_large++;
   2377  1.10.2.2  matt 	arena->stats.allocated_large += size;
   2378  1.10.2.2  matt #endif
   2379  1.10.2.2  matt 	malloc_mutex_unlock(&arena->mtx);
   2380  1.10.2.2  matt 
   2381  1.10.2.2  matt 	if (opt_junk)
   2382  1.10.2.2  matt 		memset(ret, 0xa5, size);
   2383  1.10.2.2  matt 	else if (opt_zero)
   2384  1.10.2.2  matt 		memset(ret, 0, size);
   2385  1.10.2.2  matt 	return (ret);
   2386  1.10.2.2  matt }
   2387  1.10.2.2  matt 
   2388  1.10.2.2  matt /* Return the size of the allocation pointed to by ptr. */
   2389  1.10.2.2  matt static size_t
   2390  1.10.2.2  matt arena_salloc(const void *ptr)
   2391  1.10.2.2  matt {
   2392  1.10.2.2  matt 	size_t ret;
   2393  1.10.2.2  matt 	arena_chunk_t *chunk;
   2394  1.10.2.2  matt 	arena_chunk_map_t *mapelm;
   2395  1.10.2.2  matt 	unsigned pageind;
   2396  1.10.2.2  matt 
   2397  1.10.2.2  matt 	assert(ptr != NULL);
   2398  1.10.2.2  matt 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
   2399  1.10.2.2  matt 
   2400  1.10.2.2  matt 	/*
   2401  1.10.2.2  matt 	 * No arena data structures that we query here can change in a way that
   2402  1.10.2.2  matt 	 * affects this function, so we don't need to lock.
   2403  1.10.2.2  matt 	 */
   2404  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
   2405  1.10.2.2  matt 	pageind = (unsigned)(((uintptr_t)ptr - (uintptr_t)chunk) >>
   2406  1.10.2.2  matt 	    pagesize_2pow);
   2407  1.10.2.2  matt 	mapelm = &chunk->map[pageind];
   2408  1.10.2.2  matt 	if (mapelm->pos != 0 || ptr != (char *)((uintptr_t)chunk) + (pageind <<
   2409  1.10.2.2  matt 	    pagesize_2pow)) {
   2410  1.10.2.2  matt 		arena_run_t *run;
   2411  1.10.2.2  matt 
   2412  1.10.2.2  matt 		pageind -= mapelm->pos;
   2413  1.10.2.2  matt 
   2414  1.10.2.2  matt 		run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
   2415  1.10.2.2  matt 		    pagesize_2pow));
   2416  1.10.2.2  matt 		assert(run->magic == ARENA_RUN_MAGIC);
   2417  1.10.2.2  matt 		ret = run->bin->reg_size;
   2418  1.10.2.2  matt 	} else
   2419  1.10.2.2  matt 		ret = mapelm->npages << pagesize_2pow;
   2420  1.10.2.2  matt 
   2421  1.10.2.2  matt 	return (ret);
   2422  1.10.2.2  matt }
   2423  1.10.2.2  matt 
   2424  1.10.2.2  matt static void *
   2425  1.10.2.2  matt arena_ralloc(void *ptr, size_t size, size_t oldsize)
   2426  1.10.2.2  matt {
   2427  1.10.2.2  matt 	void *ret;
   2428  1.10.2.2  matt 
   2429  1.10.2.2  matt 	/* Avoid moving the allocation if the size class would not change. */
   2430  1.10.2.2  matt 	if (size < small_min) {
   2431  1.10.2.2  matt 		if (oldsize < small_min &&
   2432  1.10.2.2  matt 		    ffs((int)(pow2_ceil(size) >> (TINY_MIN_2POW + 1)))
   2433  1.10.2.2  matt 		    == ffs((int)(pow2_ceil(oldsize) >> (TINY_MIN_2POW + 1))))
   2434  1.10.2.2  matt 			goto IN_PLACE;
   2435  1.10.2.2  matt 	} else if (size <= small_max) {
   2436  1.10.2.2  matt 		if (oldsize >= small_min && oldsize <= small_max &&
   2437  1.10.2.2  matt 		    (QUANTUM_CEILING(size) >> opt_quantum_2pow)
   2438  1.10.2.2  matt 		    == (QUANTUM_CEILING(oldsize) >> opt_quantum_2pow))
   2439  1.10.2.2  matt 			goto IN_PLACE;
   2440  1.10.2.2  matt 	} else {
   2441  1.10.2.2  matt 		/*
   2442  1.10.2.2  matt 		 * We make no attempt to resize runs here, though it would be
   2443  1.10.2.2  matt 		 * possible to do so.
   2444  1.10.2.2  matt 		 */
   2445  1.10.2.2  matt 		if (oldsize > small_max && PAGE_CEILING(size) == oldsize)
   2446  1.10.2.2  matt 			goto IN_PLACE;
   2447  1.10.2.2  matt 	}
   2448  1.10.2.2  matt 
   2449  1.10.2.2  matt 	/*
   2450  1.10.2.2  matt 	 * If we get here, then size and oldsize are different enough that we
   2451  1.10.2.2  matt 	 * need to use a different size class.  In that case, fall back to
   2452  1.10.2.2  matt 	 * allocating new space and copying.
   2453  1.10.2.2  matt 	 */
   2454  1.10.2.2  matt 	ret = arena_malloc(choose_arena(), size);
   2455  1.10.2.2  matt 	if (ret == NULL)
   2456  1.10.2.2  matt 		return (NULL);
   2457  1.10.2.2  matt 
   2458  1.10.2.2  matt 	/* Junk/zero-filling were already done by arena_malloc(). */
   2459  1.10.2.2  matt 	if (size < oldsize)
   2460  1.10.2.2  matt 		memcpy(ret, ptr, size);
   2461  1.10.2.2  matt 	else
   2462  1.10.2.2  matt 		memcpy(ret, ptr, oldsize);
   2463  1.10.2.2  matt 	idalloc(ptr);
   2464  1.10.2.2  matt 	return (ret);
   2465  1.10.2.2  matt IN_PLACE:
   2466  1.10.2.2  matt 	if (opt_junk && size < oldsize)
   2467  1.10.2.2  matt 		memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size);
   2468  1.10.2.2  matt 	else if (opt_zero && size > oldsize)
   2469  1.10.2.2  matt 		memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
   2470  1.10.2.2  matt 	return (ptr);
   2471  1.10.2.2  matt }
   2472  1.10.2.2  matt 
   2473  1.10.2.2  matt static void
   2474  1.10.2.2  matt arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
   2475  1.10.2.2  matt {
   2476  1.10.2.2  matt 	unsigned pageind;
   2477  1.10.2.2  matt 	arena_chunk_map_t *mapelm;
   2478  1.10.2.2  matt 	size_t size;
   2479  1.10.2.2  matt 
   2480  1.10.2.2  matt 	assert(arena != NULL);
   2481  1.10.2.2  matt 	assert(arena->magic == ARENA_MAGIC);
   2482  1.10.2.2  matt 	assert(chunk->arena == arena);
   2483  1.10.2.2  matt 	assert(ptr != NULL);
   2484  1.10.2.2  matt 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
   2485  1.10.2.2  matt 
   2486  1.10.2.2  matt 	pageind = (unsigned)(((uintptr_t)ptr - (uintptr_t)chunk) >>
   2487  1.10.2.2  matt 	    pagesize_2pow);
   2488  1.10.2.2  matt 	mapelm = &chunk->map[pageind];
   2489  1.10.2.2  matt 	if (mapelm->pos != 0 || ptr != (char *)((uintptr_t)chunk) + (pageind <<
   2490  1.10.2.2  matt 	    pagesize_2pow)) {
   2491  1.10.2.2  matt 		arena_run_t *run;
   2492  1.10.2.2  matt 		arena_bin_t *bin;
   2493  1.10.2.2  matt 
   2494  1.10.2.2  matt 		/* Small allocation. */
   2495  1.10.2.2  matt 
   2496  1.10.2.2  matt 		pageind -= mapelm->pos;
   2497  1.10.2.2  matt 
   2498  1.10.2.2  matt 		run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
   2499  1.10.2.2  matt 		    pagesize_2pow));
   2500  1.10.2.2  matt 		assert(run->magic == ARENA_RUN_MAGIC);
   2501  1.10.2.2  matt 		bin = run->bin;
   2502  1.10.2.2  matt 		size = bin->reg_size;
   2503  1.10.2.2  matt 
   2504  1.10.2.2  matt 		if (opt_junk)
   2505  1.10.2.2  matt 			memset(ptr, 0x5a, size);
   2506  1.10.2.2  matt 
   2507  1.10.2.2  matt 		malloc_mutex_lock(&arena->mtx);
   2508  1.10.2.2  matt 		arena_run_reg_dalloc(run, bin, ptr, size);
   2509  1.10.2.2  matt 		run->nfree++;
   2510  1.10.2.2  matt 
   2511  1.10.2.2  matt 		if (run->nfree == bin->nregs) {
   2512  1.10.2.2  matt 			/* Deallocate run. */
   2513  1.10.2.2  matt 			if (run == bin->runcur)
   2514  1.10.2.2  matt 				bin->runcur = NULL;
   2515  1.10.2.2  matt 			else if (bin->nregs != 1) {
   2516  1.10.2.2  matt 				/*
   2517  1.10.2.2  matt 				 * This block's conditional is necessary because
   2518  1.10.2.2  matt 				 * if the run only contains one region, then it
   2519  1.10.2.2  matt 				 * never gets inserted into the non-full runs
   2520  1.10.2.2  matt 				 * tree.
   2521  1.10.2.2  matt 				 */
   2522  1.10.2.2  matt 				/* LINTED */
   2523  1.10.2.2  matt 				RB_REMOVE(arena_run_tree_s, &bin->runs, run);
   2524  1.10.2.2  matt 			}
   2525  1.10.2.2  matt #ifdef MALLOC_DEBUG
   2526  1.10.2.2  matt 			run->magic = 0;
   2527  1.10.2.2  matt #endif
   2528  1.10.2.2  matt 			arena_run_dalloc(arena, run, bin->run_size);
   2529  1.10.2.2  matt #ifdef MALLOC_STATS
   2530  1.10.2.2  matt 			bin->stats.curruns--;
   2531  1.10.2.2  matt #endif
   2532  1.10.2.2  matt 		} else if (run->nfree == 1 && run != bin->runcur) {
   2533  1.10.2.2  matt 			/*
   2534  1.10.2.2  matt 			 * Make sure that bin->runcur always refers to the
   2535  1.10.2.2  matt 			 * lowest non-full run, if one exists.
   2536  1.10.2.2  matt 			 */
   2537  1.10.2.2  matt 			if (bin->runcur == NULL)
   2538  1.10.2.2  matt 				bin->runcur = run;
   2539  1.10.2.2  matt 			else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
   2540  1.10.2.2  matt 				/* Switch runcur. */
   2541  1.10.2.2  matt 				if (bin->runcur->nfree > 0) {
   2542  1.10.2.2  matt 					/* Insert runcur. */
   2543  1.10.2.2  matt 					/* LINTED */
   2544  1.10.2.2  matt 					RB_INSERT(arena_run_tree_s, &bin->runs,
   2545  1.10.2.2  matt 					    bin->runcur);
   2546  1.10.2.2  matt 				}
   2547  1.10.2.2  matt 				bin->runcur = run;
   2548  1.10.2.2  matt 			} else {
   2549  1.10.2.2  matt 				/* LINTED */
   2550  1.10.2.2  matt 				RB_INSERT(arena_run_tree_s, &bin->runs, run);
   2551  1.10.2.2  matt 			}
   2552  1.10.2.2  matt 		}
   2553  1.10.2.2  matt #ifdef MALLOC_STATS
   2554  1.10.2.2  matt 		arena->stats.allocated_small -= size;
   2555  1.10.2.2  matt 		arena->stats.ndalloc_small++;
   2556  1.10.2.2  matt #endif
   2557  1.10.2.2  matt 	} else {
   2558  1.10.2.2  matt 		/* Large allocation. */
   2559  1.10.2.2  matt 
   2560  1.10.2.2  matt 		size = mapelm->npages << pagesize_2pow;
   2561  1.10.2.2  matt 		assert((((uintptr_t)ptr) & pagesize_mask) == 0);
   2562  1.10.2.2  matt 
   2563  1.10.2.2  matt 		if (opt_junk)
   2564  1.10.2.2  matt 			memset(ptr, 0x5a, size);
   2565  1.10.2.2  matt 
   2566  1.10.2.2  matt 		malloc_mutex_lock(&arena->mtx);
   2567  1.10.2.2  matt 		arena_run_dalloc(arena, (arena_run_t *)ptr, size);
   2568  1.10.2.2  matt #ifdef MALLOC_STATS
   2569  1.10.2.2  matt 		arena->stats.allocated_large -= size;
   2570  1.10.2.2  matt 		arena->stats.ndalloc_large++;
   2571  1.10.2.2  matt #endif
   2572  1.10.2.2  matt 	}
   2573  1.10.2.2  matt 
   2574  1.10.2.2  matt 	malloc_mutex_unlock(&arena->mtx);
   2575  1.10.2.2  matt }
   2576  1.10.2.2  matt 
   2577  1.10.2.2  matt static bool
   2578  1.10.2.2  matt arena_new(arena_t *arena)
   2579  1.10.2.2  matt {
   2580  1.10.2.2  matt 	unsigned i;
   2581  1.10.2.2  matt 	arena_bin_t *bin;
   2582  1.10.2.2  matt 	size_t prev_run_size;
   2583  1.10.2.2  matt 
   2584  1.10.2.2  matt 	malloc_mutex_init(&arena->mtx);
   2585  1.10.2.2  matt 
   2586  1.10.2.2  matt #ifdef MALLOC_STATS
   2587  1.10.2.2  matt 	memset(&arena->stats, 0, sizeof(arena_stats_t));
   2588  1.10.2.2  matt #endif
   2589  1.10.2.2  matt 
   2590  1.10.2.2  matt 	/* Initialize chunks. */
   2591  1.10.2.2  matt 	RB_INIT(&arena->chunks);
   2592  1.10.2.2  matt 	arena->spare = NULL;
   2593  1.10.2.2  matt 
   2594  1.10.2.2  matt 	/* Initialize bins. */
   2595  1.10.2.2  matt 	prev_run_size = pagesize;
   2596  1.10.2.2  matt 
   2597  1.10.2.2  matt 	/* (2^n)-spaced tiny bins. */
   2598  1.10.2.2  matt 	for (i = 0; i < ntbins; i++) {
   2599  1.10.2.2  matt 		bin = &arena->bins[i];
   2600  1.10.2.2  matt 		bin->runcur = NULL;
   2601  1.10.2.2  matt 		RB_INIT(&bin->runs);
   2602  1.10.2.2  matt 
   2603  1.10.2.2  matt 		bin->reg_size = (1 << (TINY_MIN_2POW + i));
   2604  1.10.2.2  matt 		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
   2605  1.10.2.2  matt 
   2606  1.10.2.2  matt #ifdef MALLOC_STATS
   2607  1.10.2.2  matt 		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
   2608  1.10.2.2  matt #endif
   2609  1.10.2.2  matt 	}
   2610  1.10.2.2  matt 
   2611  1.10.2.2  matt 	/* Quantum-spaced bins. */
   2612  1.10.2.2  matt 	for (; i < ntbins + nqbins; i++) {
   2613  1.10.2.2  matt 		bin = &arena->bins[i];
   2614  1.10.2.2  matt 		bin->runcur = NULL;
   2615  1.10.2.2  matt 		RB_INIT(&bin->runs);
   2616  1.10.2.2  matt 
   2617  1.10.2.2  matt 		bin->reg_size = quantum * (i - ntbins + 1);
   2618  1.10.2.2  matt /*
   2619  1.10.2.2  matt 		pow2_size = pow2_ceil(quantum * (i - ntbins + 1));
   2620  1.10.2.2  matt */
   2621  1.10.2.2  matt 		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
   2622  1.10.2.2  matt 
   2623  1.10.2.2  matt #ifdef MALLOC_STATS
   2624  1.10.2.2  matt 		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
   2625  1.10.2.2  matt #endif
   2626  1.10.2.2  matt 	}
   2627  1.10.2.2  matt 
   2628  1.10.2.2  matt 	/* (2^n)-spaced sub-page bins. */
   2629  1.10.2.2  matt 	for (; i < ntbins + nqbins + nsbins; i++) {
   2630  1.10.2.2  matt 		bin = &arena->bins[i];
   2631  1.10.2.2  matt 		bin->runcur = NULL;
   2632  1.10.2.2  matt 		RB_INIT(&bin->runs);
   2633  1.10.2.2  matt 
   2634  1.10.2.2  matt 		bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
   2635  1.10.2.2  matt 
   2636  1.10.2.2  matt 		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
   2637  1.10.2.2  matt 
   2638  1.10.2.2  matt #ifdef MALLOC_STATS
   2639  1.10.2.2  matt 		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
   2640  1.10.2.2  matt #endif
   2641  1.10.2.2  matt 	}
   2642  1.10.2.2  matt 
   2643  1.10.2.2  matt #ifdef MALLOC_DEBUG
   2644  1.10.2.2  matt 	arena->magic = ARENA_MAGIC;
   2645  1.10.2.2  matt #endif
   2646  1.10.2.2  matt 
   2647  1.10.2.2  matt 	return (false);
   2648  1.10.2.2  matt }
   2649  1.10.2.2  matt 
   2650  1.10.2.2  matt /* Create a new arena and insert it into the arenas array at index ind. */
   2651  1.10.2.2  matt static arena_t *
   2652  1.10.2.2  matt arenas_extend(unsigned ind)
   2653  1.10.2.2  matt {
   2654  1.10.2.2  matt 	arena_t *ret;
   2655  1.10.2.2  matt 
   2656  1.10.2.2  matt 	/* Allocate enough space for trailing bins. */
   2657  1.10.2.2  matt 	ret = (arena_t *)base_alloc(sizeof(arena_t)
   2658  1.10.2.2  matt 	    + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
   2659  1.10.2.2  matt 	if (ret != NULL && arena_new(ret) == false) {
   2660  1.10.2.2  matt 		arenas[ind] = ret;
   2661  1.10.2.2  matt 		return (ret);
   2662  1.10.2.2  matt 	}
   2663  1.10.2.2  matt 	/* Only reached if there is an OOM error. */
   2664  1.10.2.2  matt 
   2665  1.10.2.2  matt 	/*
   2666  1.10.2.2  matt 	 * OOM here is quite inconvenient to propagate, since dealing with it
   2667  1.10.2.2  matt 	 * would require a check for failure in the fast path.  Instead, punt
   2668  1.10.2.2  matt 	 * by using arenas[0].  In practice, this is an extremely unlikely
   2669  1.10.2.2  matt 	 * failure.
   2670  1.10.2.2  matt 	 */
   2671  1.10.2.3  matt 	_malloc_message(getprogname(),
   2672  1.10.2.2  matt 	    ": (malloc) Error initializing arena\n", "", "");
   2673  1.10.2.2  matt 	if (opt_abort)
   2674  1.10.2.2  matt 		abort();
   2675  1.10.2.2  matt 
   2676  1.10.2.2  matt 	return (arenas[0]);
   2677  1.10.2.2  matt }
   2678  1.10.2.2  matt 
   2679  1.10.2.2  matt /*
   2680  1.10.2.2  matt  * End arena.
   2681  1.10.2.2  matt  */
   2682  1.10.2.2  matt /******************************************************************************/
   2683  1.10.2.2  matt /*
   2684  1.10.2.2  matt  * Begin general internal functions.
   2685  1.10.2.2  matt  */
   2686  1.10.2.2  matt 
   2687  1.10.2.2  matt static void *
   2688  1.10.2.2  matt huge_malloc(size_t size)
   2689  1.10.2.2  matt {
   2690  1.10.2.2  matt 	void *ret;
   2691  1.10.2.2  matt 	size_t csize;
   2692  1.10.2.2  matt 	chunk_node_t *node;
   2693  1.10.2.2  matt 
   2694  1.10.2.2  matt 	/* Allocate one or more contiguous chunks for this request. */
   2695  1.10.2.2  matt 
   2696  1.10.2.2  matt 	csize = CHUNK_CEILING(size);
   2697  1.10.2.2  matt 	if (csize == 0) {
   2698  1.10.2.2  matt 		/* size is large enough to cause size_t wrap-around. */
   2699  1.10.2.2  matt 		return (NULL);
   2700  1.10.2.2  matt 	}
   2701  1.10.2.2  matt 
   2702  1.10.2.2  matt 	/* Allocate a chunk node with which to track the chunk. */
   2703  1.10.2.2  matt 	node = base_chunk_node_alloc();
   2704  1.10.2.2  matt 	if (node == NULL)
   2705  1.10.2.2  matt 		return (NULL);
   2706  1.10.2.2  matt 
   2707  1.10.2.2  matt 	ret = chunk_alloc(csize);
   2708  1.10.2.2  matt 	if (ret == NULL) {
   2709  1.10.2.2  matt 		base_chunk_node_dealloc(node);
   2710  1.10.2.2  matt 		return (NULL);
   2711  1.10.2.2  matt 	}
   2712  1.10.2.2  matt 
   2713  1.10.2.2  matt 	/* Insert node into huge. */
   2714  1.10.2.2  matt 	node->chunk = ret;
   2715  1.10.2.2  matt 	node->size = csize;
   2716  1.10.2.2  matt 
   2717  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   2718  1.10.2.2  matt 	RB_INSERT(chunk_tree_s, &huge, node);
   2719  1.10.2.2  matt #ifdef MALLOC_STATS
   2720  1.10.2.2  matt 	huge_nmalloc++;
   2721  1.10.2.2  matt 	huge_allocated += csize;
   2722  1.10.2.2  matt #endif
   2723  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   2724  1.10.2.2  matt 
   2725  1.10.2.2  matt 	if (opt_junk)
   2726  1.10.2.2  matt 		memset(ret, 0xa5, csize);
   2727  1.10.2.2  matt 	else if (opt_zero)
   2728  1.10.2.2  matt 		memset(ret, 0, csize);
   2729  1.10.2.2  matt 
   2730  1.10.2.2  matt 	return (ret);
   2731  1.10.2.2  matt }
   2732  1.10.2.2  matt 
   2733  1.10.2.2  matt /* Only handles large allocations that require more than chunk alignment. */
   2734  1.10.2.2  matt static void *
   2735  1.10.2.2  matt huge_palloc(size_t alignment, size_t size)
   2736  1.10.2.2  matt {
   2737  1.10.2.2  matt 	void *ret;
   2738  1.10.2.2  matt 	size_t alloc_size, chunk_size, offset;
   2739  1.10.2.2  matt 	chunk_node_t *node;
   2740  1.10.2.2  matt 
   2741  1.10.2.2  matt 	/*
   2742  1.10.2.2  matt 	 * This allocation requires alignment that is even larger than chunk
   2743  1.10.2.2  matt 	 * alignment.  This means that huge_malloc() isn't good enough.
   2744  1.10.2.2  matt 	 *
   2745  1.10.2.2  matt 	 * Allocate almost twice as many chunks as are demanded by the size or
   2746  1.10.2.2  matt 	 * alignment, in order to assure the alignment can be achieved, then
   2747  1.10.2.2  matt 	 * unmap leading and trailing chunks.
   2748  1.10.2.2  matt 	 */
   2749  1.10.2.2  matt 	assert(alignment >= chunksize);
   2750  1.10.2.2  matt 
   2751  1.10.2.2  matt 	chunk_size = CHUNK_CEILING(size);
   2752  1.10.2.2  matt 
   2753  1.10.2.2  matt 	if (size >= alignment)
   2754  1.10.2.2  matt 		alloc_size = chunk_size + alignment - chunksize;
   2755  1.10.2.2  matt 	else
   2756  1.10.2.2  matt 		alloc_size = (alignment << 1) - chunksize;
   2757  1.10.2.2  matt 
   2758  1.10.2.2  matt 	/* Allocate a chunk node with which to track the chunk. */
   2759  1.10.2.2  matt 	node = base_chunk_node_alloc();
   2760  1.10.2.2  matt 	if (node == NULL)
   2761  1.10.2.2  matt 		return (NULL);
   2762  1.10.2.2  matt 
   2763  1.10.2.2  matt 	ret = chunk_alloc(alloc_size);
   2764  1.10.2.2  matt 	if (ret == NULL) {
   2765  1.10.2.2  matt 		base_chunk_node_dealloc(node);
   2766  1.10.2.2  matt 		return (NULL);
   2767  1.10.2.2  matt 	}
   2768  1.10.2.2  matt 
   2769  1.10.2.2  matt 	offset = (uintptr_t)ret & (alignment - 1);
   2770  1.10.2.2  matt 	assert((offset & chunksize_mask) == 0);
   2771  1.10.2.2  matt 	assert(offset < alloc_size);
   2772  1.10.2.2  matt 	if (offset == 0) {
   2773  1.10.2.2  matt 		/* Trim trailing space. */
   2774  1.10.2.2  matt 		chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
   2775  1.10.2.2  matt 		    - chunk_size);
   2776  1.10.2.2  matt 	} else {
   2777  1.10.2.2  matt 		size_t trailsize;
   2778  1.10.2.2  matt 
   2779  1.10.2.2  matt 		/* Trim leading space. */
   2780  1.10.2.2  matt 		chunk_dealloc(ret, alignment - offset);
   2781  1.10.2.2  matt 
   2782  1.10.2.2  matt 		ret = (void *)((uintptr_t)ret + (alignment - offset));
   2783  1.10.2.2  matt 
   2784  1.10.2.2  matt 		trailsize = alloc_size - (alignment - offset) - chunk_size;
   2785  1.10.2.2  matt 		if (trailsize != 0) {
   2786  1.10.2.2  matt 		    /* Trim trailing space. */
   2787  1.10.2.2  matt 		    assert(trailsize < alloc_size);
   2788  1.10.2.2  matt 		    chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
   2789  1.10.2.2  matt 			trailsize);
   2790  1.10.2.2  matt 		}
   2791  1.10.2.2  matt 	}
   2792  1.10.2.2  matt 
   2793  1.10.2.2  matt 	/* Insert node into huge. */
   2794  1.10.2.2  matt 	node->chunk = ret;
   2795  1.10.2.2  matt 	node->size = chunk_size;
   2796  1.10.2.2  matt 
   2797  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   2798  1.10.2.2  matt 	RB_INSERT(chunk_tree_s, &huge, node);
   2799  1.10.2.2  matt #ifdef MALLOC_STATS
   2800  1.10.2.2  matt 	huge_nmalloc++;
   2801  1.10.2.2  matt 	huge_allocated += chunk_size;
   2802  1.10.2.2  matt #endif
   2803  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   2804  1.10.2.2  matt 
   2805  1.10.2.2  matt 	if (opt_junk)
   2806  1.10.2.2  matt 		memset(ret, 0xa5, chunk_size);
   2807  1.10.2.2  matt 	else if (opt_zero)
   2808  1.10.2.2  matt 		memset(ret, 0, chunk_size);
   2809  1.10.2.2  matt 
   2810  1.10.2.2  matt 	return (ret);
   2811  1.10.2.2  matt }
   2812  1.10.2.2  matt 
   2813  1.10.2.2  matt static void *
   2814  1.10.2.2  matt huge_ralloc(void *ptr, size_t size, size_t oldsize)
   2815  1.10.2.2  matt {
   2816  1.10.2.2  matt 	void *ret;
   2817  1.10.2.2  matt 
   2818  1.10.2.2  matt 	/* Avoid moving the allocation if the size class would not change. */
   2819  1.10.2.2  matt 	if (oldsize > arena_maxclass &&
   2820  1.10.2.2  matt 	    CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
   2821  1.10.2.2  matt 		if (opt_junk && size < oldsize) {
   2822  1.10.2.2  matt 			memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize
   2823  1.10.2.2  matt 			    - size);
   2824  1.10.2.2  matt 		} else if (opt_zero && size > oldsize) {
   2825  1.10.2.2  matt 			memset((void *)((uintptr_t)ptr + oldsize), 0, size
   2826  1.10.2.2  matt 			    - oldsize);
   2827  1.10.2.2  matt 		}
   2828  1.10.2.2  matt 		return (ptr);
   2829  1.10.2.2  matt 	}
   2830  1.10.2.2  matt 
   2831  1.10.2.2  matt 	if (CHUNK_ADDR2BASE(ptr) == ptr
   2832  1.10.2.2  matt #ifdef USE_BRK
   2833  1.10.2.2  matt 	    && ((uintptr_t)ptr < (uintptr_t)brk_base
   2834  1.10.2.2  matt 	    || (uintptr_t)ptr >= (uintptr_t)brk_max)
   2835  1.10.2.2  matt #endif
   2836  1.10.2.2  matt 	    ) {
   2837  1.10.2.2  matt 		chunk_node_t *node, key;
   2838  1.10.2.2  matt 		void *newptr;
   2839  1.10.2.2  matt 		size_t oldcsize;
   2840  1.10.2.2  matt 		size_t newcsize;
   2841  1.10.2.2  matt 
   2842  1.10.2.2  matt 		newcsize = CHUNK_CEILING(size);
   2843  1.10.2.2  matt 		oldcsize = CHUNK_CEILING(oldsize);
   2844  1.10.2.2  matt 		assert(oldcsize != newcsize);
   2845  1.10.2.2  matt 		if (newcsize == 0) {
   2846  1.10.2.2  matt 			/* size_t wrap-around */
   2847  1.10.2.2  matt 			return (NULL);
   2848  1.10.2.2  matt 		}
   2849  1.10.2.2  matt 		newptr = mremap(ptr, oldcsize, NULL, newcsize,
   2850  1.10.2.2  matt 		    MAP_ALIGNED(chunksize_2pow));
   2851  1.10.2.2  matt 		if (newptr != MAP_FAILED) {
   2852  1.10.2.2  matt 			assert(CHUNK_ADDR2BASE(newptr) == newptr);
   2853  1.10.2.2  matt 
   2854  1.10.2.2  matt 			/* update tree */
   2855  1.10.2.2  matt 			malloc_mutex_lock(&chunks_mtx);
   2856  1.10.2.2  matt 			key.chunk = __DECONST(void *, ptr);
   2857  1.10.2.2  matt 			/* LINTED */
   2858  1.10.2.2  matt 			node = RB_FIND(chunk_tree_s, &huge, &key);
   2859  1.10.2.2  matt 			assert(node != NULL);
   2860  1.10.2.2  matt 			assert(node->chunk == ptr);
   2861  1.10.2.2  matt 			assert(node->size == oldcsize);
   2862  1.10.2.2  matt 			node->size = newcsize;
   2863  1.10.2.2  matt 			if (ptr != newptr) {
   2864  1.10.2.2  matt 				RB_REMOVE(chunk_tree_s, &huge, node);
   2865  1.10.2.2  matt 				node->chunk = newptr;
   2866  1.10.2.2  matt 				RB_INSERT(chunk_tree_s, &huge, node);
   2867  1.10.2.2  matt 			}
   2868  1.10.2.2  matt #ifdef MALLOC_STATS
   2869  1.10.2.2  matt 			huge_nralloc++;
   2870  1.10.2.2  matt 			huge_allocated += newcsize - oldcsize;
   2871  1.10.2.2  matt 			if (newcsize > oldcsize) {
   2872  1.10.2.2  matt 				stats_chunks.curchunks +=
   2873  1.10.2.2  matt 				    (newcsize - oldcsize) / chunksize;
   2874  1.10.2.2  matt 				if (stats_chunks.curchunks >
   2875  1.10.2.2  matt 				    stats_chunks.highchunks)
   2876  1.10.2.2  matt 					stats_chunks.highchunks =
   2877  1.10.2.2  matt 					    stats_chunks.curchunks;
   2878  1.10.2.2  matt 			} else {
   2879  1.10.2.2  matt 				stats_chunks.curchunks -=
   2880  1.10.2.2  matt 				    (oldcsize - newcsize) / chunksize;
   2881  1.10.2.2  matt 			}
   2882  1.10.2.2  matt #endif
   2883  1.10.2.2  matt 			malloc_mutex_unlock(&chunks_mtx);
   2884  1.10.2.2  matt 
   2885  1.10.2.2  matt 			if (opt_junk && size < oldsize) {
   2886  1.10.2.2  matt 				memset((void *)((uintptr_t)newptr + size), 0x5a,
   2887  1.10.2.2  matt 				    newcsize - size);
   2888  1.10.2.2  matt 			} else if (opt_zero && size > oldsize) {
   2889  1.10.2.2  matt 				memset((void *)((uintptr_t)newptr + oldsize), 0,
   2890  1.10.2.2  matt 				    size - oldsize);
   2891  1.10.2.2  matt 			}
   2892  1.10.2.2  matt 			return (newptr);
   2893  1.10.2.2  matt 		}
   2894  1.10.2.2  matt 	}
   2895  1.10.2.2  matt 
   2896  1.10.2.2  matt 	/*
   2897  1.10.2.2  matt 	 * If we get here, then size and oldsize are different enough that we
   2898  1.10.2.2  matt 	 * need to use a different size class.  In that case, fall back to
   2899  1.10.2.2  matt 	 * allocating new space and copying.
   2900  1.10.2.2  matt 	 */
   2901  1.10.2.2  matt 	ret = huge_malloc(size);
   2902  1.10.2.2  matt 	if (ret == NULL)
   2903  1.10.2.2  matt 		return (NULL);
   2904  1.10.2.2  matt 
   2905  1.10.2.2  matt 	if (CHUNK_ADDR2BASE(ptr) == ptr) {
   2906  1.10.2.2  matt 		/* The old allocation is a chunk. */
   2907  1.10.2.2  matt 		if (size < oldsize)
   2908  1.10.2.2  matt 			memcpy(ret, ptr, size);
   2909  1.10.2.2  matt 		else
   2910  1.10.2.2  matt 			memcpy(ret, ptr, oldsize);
   2911  1.10.2.2  matt 	} else {
   2912  1.10.2.2  matt 		/* The old allocation is a region. */
   2913  1.10.2.2  matt 		assert(oldsize < size);
   2914  1.10.2.2  matt 		memcpy(ret, ptr, oldsize);
   2915  1.10.2.2  matt 	}
   2916  1.10.2.2  matt 	idalloc(ptr);
   2917  1.10.2.2  matt 	return (ret);
   2918  1.10.2.2  matt }
   2919  1.10.2.2  matt 
   2920  1.10.2.2  matt static void
   2921  1.10.2.2  matt huge_dalloc(void *ptr)
   2922  1.10.2.2  matt {
   2923  1.10.2.2  matt 	chunk_node_t key;
   2924  1.10.2.2  matt 	chunk_node_t *node;
   2925  1.10.2.2  matt 
   2926  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   2927  1.10.2.2  matt 
   2928  1.10.2.2  matt 	/* Extract from tree of huge allocations. */
   2929  1.10.2.2  matt 	key.chunk = ptr;
   2930  1.10.2.2  matt 	/* LINTED */
   2931  1.10.2.2  matt 	node = RB_FIND(chunk_tree_s, &huge, &key);
   2932  1.10.2.2  matt 	assert(node != NULL);
   2933  1.10.2.2  matt 	assert(node->chunk == ptr);
   2934  1.10.2.2  matt 	/* LINTED */
   2935  1.10.2.2  matt 	RB_REMOVE(chunk_tree_s, &huge, node);
   2936  1.10.2.2  matt 
   2937  1.10.2.2  matt #ifdef MALLOC_STATS
   2938  1.10.2.2  matt 	huge_ndalloc++;
   2939  1.10.2.2  matt 	huge_allocated -= node->size;
   2940  1.10.2.2  matt #endif
   2941  1.10.2.2  matt 
   2942  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   2943  1.10.2.2  matt 
   2944  1.10.2.2  matt 	/* Unmap chunk. */
   2945  1.10.2.2  matt #ifdef USE_BRK
   2946  1.10.2.2  matt 	if (opt_junk)
   2947  1.10.2.2  matt 		memset(node->chunk, 0x5a, node->size);
   2948  1.10.2.2  matt #endif
   2949  1.10.2.2  matt 	chunk_dealloc(node->chunk, node->size);
   2950  1.10.2.2  matt 
   2951  1.10.2.2  matt 	base_chunk_node_dealloc(node);
   2952  1.10.2.2  matt }
   2953  1.10.2.2  matt 
   2954  1.10.2.2  matt static void *
   2955  1.10.2.2  matt imalloc(size_t size)
   2956  1.10.2.2  matt {
   2957  1.10.2.2  matt 	void *ret;
   2958  1.10.2.2  matt 
   2959  1.10.2.2  matt 	assert(size != 0);
   2960  1.10.2.2  matt 
   2961  1.10.2.2  matt 	if (size <= arena_maxclass)
   2962  1.10.2.2  matt 		ret = arena_malloc(choose_arena(), size);
   2963  1.10.2.2  matt 	else
   2964  1.10.2.2  matt 		ret = huge_malloc(size);
   2965  1.10.2.2  matt 
   2966  1.10.2.2  matt 	return (ret);
   2967  1.10.2.2  matt }
   2968  1.10.2.2  matt 
   2969  1.10.2.2  matt static void *
   2970  1.10.2.2  matt ipalloc(size_t alignment, size_t size)
   2971  1.10.2.2  matt {
   2972  1.10.2.2  matt 	void *ret;
   2973  1.10.2.2  matt 	size_t ceil_size;
   2974  1.10.2.2  matt 
   2975  1.10.2.2  matt 	/*
   2976  1.10.2.2  matt 	 * Round size up to the nearest multiple of alignment.
   2977  1.10.2.2  matt 	 *
   2978  1.10.2.2  matt 	 * This done, we can take advantage of the fact that for each small
   2979  1.10.2.2  matt 	 * size class, every object is aligned at the smallest power of two
   2980  1.10.2.2  matt 	 * that is non-zero in the base two representation of the size.  For
   2981  1.10.2.2  matt 	 * example:
   2982  1.10.2.2  matt 	 *
   2983  1.10.2.2  matt 	 *   Size |   Base 2 | Minimum alignment
   2984  1.10.2.2  matt 	 *   -----+----------+------------------
   2985  1.10.2.2  matt 	 *     96 |  1100000 |  32
   2986  1.10.2.2  matt 	 *    144 | 10100000 |  32
   2987  1.10.2.2  matt 	 *    192 | 11000000 |  64
   2988  1.10.2.2  matt 	 *
   2989  1.10.2.2  matt 	 * Depending on runtime settings, it is possible that arena_malloc()
   2990  1.10.2.2  matt 	 * will further round up to a power of two, but that never causes
   2991  1.10.2.2  matt 	 * correctness issues.
   2992  1.10.2.2  matt 	 */
   2993  1.10.2.2  matt 	ceil_size = (size + (alignment - 1)) & (-alignment);
   2994  1.10.2.2  matt 	/*
   2995  1.10.2.2  matt 	 * (ceil_size < size) protects against the combination of maximal
   2996  1.10.2.2  matt 	 * alignment and size greater than maximal alignment.
   2997  1.10.2.2  matt 	 */
   2998  1.10.2.2  matt 	if (ceil_size < size) {
   2999  1.10.2.2  matt 		/* size_t overflow. */
   3000  1.10.2.2  matt 		return (NULL);
   3001  1.10.2.2  matt 	}
   3002  1.10.2.2  matt 
   3003  1.10.2.2  matt 	if (ceil_size <= pagesize || (alignment <= pagesize
   3004  1.10.2.2  matt 	    && ceil_size <= arena_maxclass))
   3005  1.10.2.2  matt 		ret = arena_malloc(choose_arena(), ceil_size);
   3006  1.10.2.2  matt 	else {
   3007  1.10.2.2  matt 		size_t run_size;
   3008  1.10.2.2  matt 
   3009  1.10.2.2  matt 		/*
   3010  1.10.2.2  matt 		 * We can't achieve sub-page alignment, so round up alignment
   3011  1.10.2.2  matt 		 * permanently; it makes later calculations simpler.
   3012  1.10.2.2  matt 		 */
   3013  1.10.2.2  matt 		alignment = PAGE_CEILING(alignment);
   3014  1.10.2.2  matt 		ceil_size = PAGE_CEILING(size);
   3015  1.10.2.2  matt 		/*
   3016  1.10.2.2  matt 		 * (ceil_size < size) protects against very large sizes within
   3017  1.10.2.2  matt 		 * pagesize of SIZE_T_MAX.
   3018  1.10.2.2  matt 		 *
   3019  1.10.2.2  matt 		 * (ceil_size + alignment < ceil_size) protects against the
   3020  1.10.2.2  matt 		 * combination of maximal alignment and ceil_size large enough
   3021  1.10.2.2  matt 		 * to cause overflow.  This is similar to the first overflow
   3022  1.10.2.2  matt 		 * check above, but it needs to be repeated due to the new
   3023  1.10.2.2  matt 		 * ceil_size value, which may now be *equal* to maximal
   3024  1.10.2.2  matt 		 * alignment, whereas before we only detected overflow if the
   3025  1.10.2.2  matt 		 * original size was *greater* than maximal alignment.
   3026  1.10.2.2  matt 		 */
   3027  1.10.2.2  matt 		if (ceil_size < size || ceil_size + alignment < ceil_size) {
   3028  1.10.2.2  matt 			/* size_t overflow. */
   3029  1.10.2.2  matt 			return (NULL);
   3030  1.10.2.2  matt 		}
   3031  1.10.2.2  matt 
   3032  1.10.2.2  matt 		/*
   3033  1.10.2.2  matt 		 * Calculate the size of the over-size run that arena_palloc()
   3034  1.10.2.2  matt 		 * would need to allocate in order to guarantee the alignment.
   3035  1.10.2.2  matt 		 */
   3036  1.10.2.2  matt 		if (ceil_size >= alignment)
   3037  1.10.2.2  matt 			run_size = ceil_size + alignment - pagesize;
   3038  1.10.2.2  matt 		else {
   3039  1.10.2.2  matt 			/*
   3040  1.10.2.2  matt 			 * It is possible that (alignment << 1) will cause
   3041  1.10.2.2  matt 			 * overflow, but it doesn't matter because we also
   3042  1.10.2.2  matt 			 * subtract pagesize, which in the case of overflow
   3043  1.10.2.2  matt 			 * leaves us with a very large run_size.  That causes
   3044  1.10.2.2  matt 			 * the first conditional below to fail, which means
   3045  1.10.2.2  matt 			 * that the bogus run_size value never gets used for
   3046  1.10.2.2  matt 			 * anything important.
   3047  1.10.2.2  matt 			 */
   3048  1.10.2.2  matt 			run_size = (alignment << 1) - pagesize;
   3049  1.10.2.2  matt 		}
   3050  1.10.2.2  matt 
   3051  1.10.2.2  matt 		if (run_size <= arena_maxclass) {
   3052  1.10.2.2  matt 			ret = arena_palloc(choose_arena(), alignment, ceil_size,
   3053  1.10.2.2  matt 			    run_size);
   3054  1.10.2.2  matt 		} else if (alignment <= chunksize)
   3055  1.10.2.2  matt 			ret = huge_malloc(ceil_size);
   3056  1.10.2.2  matt 		else
   3057  1.10.2.2  matt 			ret = huge_palloc(alignment, ceil_size);
   3058  1.10.2.2  matt 	}
   3059  1.10.2.2  matt 
   3060  1.10.2.2  matt 	assert(((uintptr_t)ret & (alignment - 1)) == 0);
   3061  1.10.2.2  matt 	return (ret);
   3062  1.10.2.2  matt }
   3063  1.10.2.2  matt 
   3064  1.10.2.2  matt static void *
   3065  1.10.2.2  matt icalloc(size_t size)
   3066  1.10.2.2  matt {
   3067  1.10.2.2  matt 	void *ret;
   3068  1.10.2.2  matt 
   3069  1.10.2.2  matt 	if (size <= arena_maxclass) {
   3070  1.10.2.2  matt 		ret = arena_malloc(choose_arena(), size);
   3071  1.10.2.2  matt 		if (ret == NULL)
   3072  1.10.2.2  matt 			return (NULL);
   3073  1.10.2.2  matt 		memset(ret, 0, size);
   3074  1.10.2.2  matt 	} else {
   3075  1.10.2.2  matt 		/*
   3076  1.10.2.2  matt 		 * The virtual memory system provides zero-filled pages, so
   3077  1.10.2.2  matt 		 * there is no need to do so manually, unless opt_junk is
   3078  1.10.2.2  matt 		 * enabled, in which case huge_malloc() fills huge allocations
   3079  1.10.2.2  matt 		 * with junk.
   3080  1.10.2.2  matt 		 */
   3081  1.10.2.2  matt 		ret = huge_malloc(size);
   3082  1.10.2.2  matt 		if (ret == NULL)
   3083  1.10.2.2  matt 			return (NULL);
   3084  1.10.2.2  matt 
   3085  1.10.2.2  matt 		if (opt_junk)
   3086  1.10.2.2  matt 			memset(ret, 0, size);
   3087  1.10.2.2  matt #ifdef USE_BRK
   3088  1.10.2.2  matt 		else if ((uintptr_t)ret >= (uintptr_t)brk_base
   3089  1.10.2.2  matt 		    && (uintptr_t)ret < (uintptr_t)brk_max) {
   3090  1.10.2.2  matt 			/*
   3091  1.10.2.2  matt 			 * This may be a re-used brk chunk.  Therefore, zero
   3092  1.10.2.2  matt 			 * the memory.
   3093  1.10.2.2  matt 			 */
   3094  1.10.2.2  matt 			memset(ret, 0, size);
   3095  1.10.2.2  matt 		}
   3096  1.10.2.2  matt #endif
   3097  1.10.2.2  matt 	}
   3098  1.10.2.2  matt 
   3099  1.10.2.2  matt 	return (ret);
   3100  1.10.2.2  matt }
   3101  1.10.2.2  matt 
   3102  1.10.2.2  matt static size_t
   3103  1.10.2.2  matt isalloc(const void *ptr)
   3104  1.10.2.2  matt {
   3105  1.10.2.2  matt 	size_t ret;
   3106  1.10.2.2  matt 	arena_chunk_t *chunk;
   3107  1.10.2.2  matt 
   3108  1.10.2.2  matt 	assert(ptr != NULL);
   3109  1.10.2.2  matt 
   3110  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
   3111  1.10.2.2  matt 	if (chunk != ptr) {
   3112  1.10.2.2  matt 		/* Region. */
   3113  1.10.2.2  matt 		assert(chunk->arena->magic == ARENA_MAGIC);
   3114  1.10.2.2  matt 
   3115  1.10.2.2  matt 		ret = arena_salloc(ptr);
   3116  1.10.2.2  matt 	} else {
   3117  1.10.2.2  matt 		chunk_node_t *node, key;
   3118  1.10.2.2  matt 
   3119  1.10.2.2  matt 		/* Chunk (huge allocation). */
   3120  1.10.2.2  matt 
   3121  1.10.2.2  matt 		malloc_mutex_lock(&chunks_mtx);
   3122  1.10.2.2  matt 
   3123  1.10.2.2  matt 		/* Extract from tree of huge allocations. */
   3124  1.10.2.2  matt 		key.chunk = __DECONST(void *, ptr);
   3125  1.10.2.2  matt 		/* LINTED */
   3126  1.10.2.2  matt 		node = RB_FIND(chunk_tree_s, &huge, &key);
   3127  1.10.2.2  matt 		assert(node != NULL);
   3128  1.10.2.2  matt 
   3129  1.10.2.2  matt 		ret = node->size;
   3130  1.10.2.2  matt 
   3131  1.10.2.2  matt 		malloc_mutex_unlock(&chunks_mtx);
   3132  1.10.2.2  matt 	}
   3133  1.10.2.2  matt 
   3134  1.10.2.2  matt 	return (ret);
   3135  1.10.2.2  matt }
   3136  1.10.2.2  matt 
   3137  1.10.2.2  matt static void *
   3138  1.10.2.2  matt iralloc(void *ptr, size_t size)
   3139  1.10.2.2  matt {
   3140  1.10.2.2  matt 	void *ret;
   3141  1.10.2.2  matt 	size_t oldsize;
   3142  1.10.2.2  matt 
   3143  1.10.2.2  matt 	assert(ptr != NULL);
   3144  1.10.2.2  matt 	assert(size != 0);
   3145  1.10.2.2  matt 
   3146  1.10.2.2  matt 	oldsize = isalloc(ptr);
   3147  1.10.2.2  matt 
   3148  1.10.2.2  matt 	if (size <= arena_maxclass)
   3149  1.10.2.2  matt 		ret = arena_ralloc(ptr, size, oldsize);
   3150  1.10.2.2  matt 	else
   3151  1.10.2.2  matt 		ret = huge_ralloc(ptr, size, oldsize);
   3152  1.10.2.2  matt 
   3153  1.10.2.2  matt 	return (ret);
   3154  1.10.2.2  matt }
   3155  1.10.2.2  matt 
   3156  1.10.2.2  matt static void
   3157  1.10.2.2  matt idalloc(void *ptr)
   3158  1.10.2.2  matt {
   3159  1.10.2.2  matt 	arena_chunk_t *chunk;
   3160  1.10.2.2  matt 
   3161  1.10.2.2  matt 	assert(ptr != NULL);
   3162  1.10.2.2  matt 
   3163  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
   3164  1.10.2.2  matt 	if (chunk != ptr) {
   3165  1.10.2.2  matt 		/* Region. */
   3166  1.10.2.2  matt 		arena_dalloc(chunk->arena, chunk, ptr);
   3167  1.10.2.2  matt 	} else
   3168  1.10.2.2  matt 		huge_dalloc(ptr);
   3169  1.10.2.2  matt }
   3170  1.10.2.2  matt 
   3171  1.10.2.2  matt static void
   3172  1.10.2.2  matt malloc_print_stats(void)
   3173  1.10.2.2  matt {
   3174  1.10.2.2  matt 
   3175  1.10.2.2  matt 	if (opt_print_stats) {
   3176  1.10.2.2  matt 		char s[UMAX2S_BUFSIZE];
   3177  1.10.2.2  matt 		_malloc_message("___ Begin malloc statistics ___\n", "", "",
   3178  1.10.2.2  matt 		    "");
   3179  1.10.2.2  matt 		_malloc_message("Assertions ",
   3180  1.10.2.2  matt #ifdef NDEBUG
   3181  1.10.2.2  matt 		    "disabled",
   3182  1.10.2.2  matt #else
   3183  1.10.2.2  matt 		    "enabled",
   3184  1.10.2.2  matt #endif
   3185  1.10.2.2  matt 		    "\n", "");
   3186  1.10.2.2  matt 		_malloc_message("Boolean MALLOC_OPTIONS: ",
   3187  1.10.2.2  matt 		    opt_abort ? "A" : "a",
   3188  1.10.2.2  matt 		    opt_junk ? "J" : "j",
   3189  1.10.2.2  matt 		    opt_hint ? "H" : "h");
   3190  1.10.2.2  matt 		_malloc_message(opt_utrace ? "PU" : "Pu",
   3191  1.10.2.2  matt 		    opt_sysv ? "V" : "v",
   3192  1.10.2.2  matt 		    opt_xmalloc ? "X" : "x",
   3193  1.10.2.2  matt 		    opt_zero ? "Z\n" : "z\n");
   3194  1.10.2.2  matt 
   3195  1.10.2.2  matt 		_malloc_message("CPUs: ", umax2s(ncpus, s), "\n", "");
   3196  1.10.2.2  matt 		_malloc_message("Max arenas: ", umax2s(narenas, s), "\n", "");
   3197  1.10.2.2  matt 		_malloc_message("Pointer size: ", umax2s(sizeof(void *), s),
   3198  1.10.2.2  matt 		    "\n", "");
   3199  1.10.2.2  matt 		_malloc_message("Quantum size: ", umax2s(quantum, s), "\n", "");
   3200  1.10.2.2  matt 		_malloc_message("Max small size: ", umax2s(small_max, s), "\n",
   3201  1.10.2.2  matt 		    "");
   3202  1.10.2.2  matt 
   3203  1.10.2.2  matt 		_malloc_message("Chunk size: ", umax2s(chunksize, s), "", "");
   3204  1.10.2.2  matt 		_malloc_message(" (2^", umax2s(opt_chunk_2pow, s), ")\n", "");
   3205  1.10.2.2  matt 
   3206  1.10.2.2  matt #ifdef MALLOC_STATS
   3207  1.10.2.2  matt 		{
   3208  1.10.2.2  matt 			size_t allocated, mapped;
   3209  1.10.2.2  matt 			unsigned i;
   3210  1.10.2.2  matt 			arena_t *arena;
   3211  1.10.2.2  matt 
   3212  1.10.2.2  matt 			/* Calculate and print allocated/mapped stats. */
   3213  1.10.2.2  matt 
   3214  1.10.2.2  matt 			/* arenas. */
   3215  1.10.2.2  matt 			for (i = 0, allocated = 0; i < narenas; i++) {
   3216  1.10.2.2  matt 				if (arenas[i] != NULL) {
   3217  1.10.2.2  matt 					malloc_mutex_lock(&arenas[i]->mtx);
   3218  1.10.2.2  matt 					allocated +=
   3219  1.10.2.2  matt 					    arenas[i]->stats.allocated_small;
   3220  1.10.2.2  matt 					allocated +=
   3221  1.10.2.2  matt 					    arenas[i]->stats.allocated_large;
   3222  1.10.2.2  matt 					malloc_mutex_unlock(&arenas[i]->mtx);
   3223  1.10.2.2  matt 				}
   3224  1.10.2.2  matt 			}
   3225  1.10.2.2  matt 
   3226  1.10.2.2  matt 			/* huge/base. */
   3227  1.10.2.2  matt 			malloc_mutex_lock(&chunks_mtx);
   3228  1.10.2.2  matt 			allocated += huge_allocated;
   3229  1.10.2.2  matt 			mapped = stats_chunks.curchunks * chunksize;
   3230  1.10.2.2  matt 			malloc_mutex_unlock(&chunks_mtx);
   3231  1.10.2.2  matt 
   3232  1.10.2.2  matt 			malloc_mutex_lock(&base_mtx);
   3233  1.10.2.2  matt 			mapped += base_mapped;
   3234  1.10.2.2  matt 			malloc_mutex_unlock(&base_mtx);
   3235  1.10.2.2  matt 
   3236  1.10.2.2  matt 			malloc_printf("Allocated: %zu, mapped: %zu\n",
   3237  1.10.2.2  matt 			    allocated, mapped);
   3238  1.10.2.2  matt 
   3239  1.10.2.2  matt 			/* Print chunk stats. */
   3240  1.10.2.2  matt 			{
   3241  1.10.2.2  matt 				chunk_stats_t chunks_stats;
   3242  1.10.2.2  matt 
   3243  1.10.2.2  matt 				malloc_mutex_lock(&chunks_mtx);
   3244  1.10.2.2  matt 				chunks_stats = stats_chunks;
   3245  1.10.2.2  matt 				malloc_mutex_unlock(&chunks_mtx);
   3246  1.10.2.2  matt 
   3247  1.10.2.2  matt 				malloc_printf("chunks: nchunks   "
   3248  1.10.2.2  matt 				    "highchunks    curchunks\n");
   3249  1.10.2.2  matt 				malloc_printf("  %13llu%13lu%13lu\n",
   3250  1.10.2.2  matt 				    chunks_stats.nchunks,
   3251  1.10.2.2  matt 				    chunks_stats.highchunks,
   3252  1.10.2.2  matt 				    chunks_stats.curchunks);
   3253  1.10.2.2  matt 			}
   3254  1.10.2.2  matt 
   3255  1.10.2.2  matt 			/* Print chunk stats. */
   3256  1.10.2.2  matt 			malloc_printf(
   3257  1.10.2.2  matt 			    "huge: nmalloc      ndalloc      "
   3258  1.10.2.2  matt 			    "nralloc    allocated\n");
   3259  1.10.2.2  matt 			malloc_printf(" %12llu %12llu %12llu %12zu\n",
   3260  1.10.2.2  matt 			    huge_nmalloc, huge_ndalloc, huge_nralloc,
   3261  1.10.2.2  matt 			    huge_allocated);
   3262  1.10.2.2  matt 
   3263  1.10.2.2  matt 			/* Print stats for each arena. */
   3264  1.10.2.2  matt 			for (i = 0; i < narenas; i++) {
   3265  1.10.2.2  matt 				arena = arenas[i];
   3266  1.10.2.2  matt 				if (arena != NULL) {
   3267  1.10.2.2  matt 					malloc_printf(
   3268  1.10.2.2  matt 					    "\narenas[%u] @ %p\n", i, arena);
   3269  1.10.2.2  matt 					malloc_mutex_lock(&arena->mtx);
   3270  1.10.2.2  matt 					stats_print(arena);
   3271  1.10.2.2  matt 					malloc_mutex_unlock(&arena->mtx);
   3272  1.10.2.2  matt 				}
   3273  1.10.2.2  matt 			}
   3274  1.10.2.2  matt 		}
   3275  1.10.2.2  matt #endif /* #ifdef MALLOC_STATS */
   3276  1.10.2.2  matt 		_malloc_message("--- End malloc statistics ---\n", "", "", "");
   3277  1.10.2.2  matt 	}
   3278  1.10.2.2  matt }
   3279  1.10.2.2  matt 
   3280  1.10.2.2  matt /*
   3281  1.10.2.2  matt  * FreeBSD's pthreads implementation calls malloc(3), so the malloc
   3282  1.10.2.2  matt  * implementation has to take pains to avoid infinite recursion during
   3283  1.10.2.2  matt  * initialization.
   3284  1.10.2.2  matt  */
   3285  1.10.2.2  matt static inline bool
   3286  1.10.2.2  matt malloc_init(void)
   3287  1.10.2.2  matt {
   3288  1.10.2.2  matt 
   3289  1.10.2.2  matt 	if (malloc_initialized == false)
   3290  1.10.2.2  matt 		return (malloc_init_hard());
   3291  1.10.2.2  matt 
   3292  1.10.2.2  matt 	return (false);
   3293  1.10.2.2  matt }
   3294  1.10.2.2  matt 
   3295  1.10.2.2  matt static bool
   3296  1.10.2.2  matt malloc_init_hard(void)
   3297  1.10.2.2  matt {
   3298  1.10.2.2  matt 	unsigned i, j;
   3299  1.10.2.2  matt 	ssize_t linklen;
   3300  1.10.2.2  matt 	char buf[PATH_MAX + 1];
   3301  1.10.2.2  matt 	const char *opts = "";
   3302  1.10.2.2  matt 
   3303  1.10.2.2  matt 	malloc_mutex_lock(&init_lock);
   3304  1.10.2.2  matt 	if (malloc_initialized) {
   3305  1.10.2.2  matt 		/*
   3306  1.10.2.2  matt 		 * Another thread initialized the allocator before this one
   3307  1.10.2.2  matt 		 * acquired init_lock.
   3308  1.10.2.2  matt 		 */
   3309  1.10.2.2  matt 		malloc_mutex_unlock(&init_lock);
   3310  1.10.2.2  matt 		return (false);
   3311  1.10.2.2  matt 	}
   3312  1.10.2.2  matt 
   3313  1.10.2.2  matt 	/* Get number of CPUs. */
   3314  1.10.2.2  matt 	{
   3315  1.10.2.2  matt 		int mib[2];
   3316  1.10.2.2  matt 		size_t len;
   3317  1.10.2.2  matt 
   3318  1.10.2.2  matt 		mib[0] = CTL_HW;
   3319  1.10.2.2  matt 		mib[1] = HW_NCPU;
   3320  1.10.2.2  matt 		len = sizeof(ncpus);
   3321  1.10.2.2  matt 		if (sysctl(mib, 2, &ncpus, &len, (void *) 0, 0) == -1) {
   3322  1.10.2.2  matt 			/* Error. */
   3323  1.10.2.2  matt 			ncpus = 1;
   3324  1.10.2.2  matt 		}
   3325  1.10.2.2  matt 	}
   3326  1.10.2.2  matt 
   3327  1.10.2.2  matt 	/* Get page size. */
   3328  1.10.2.2  matt 	{
   3329  1.10.2.2  matt 		long result;
   3330  1.10.2.2  matt 
   3331  1.10.2.2  matt 		result = sysconf(_SC_PAGESIZE);
   3332  1.10.2.2  matt 		assert(result != -1);
   3333  1.10.2.2  matt 		pagesize = (unsigned) result;
   3334  1.10.2.2  matt 
   3335  1.10.2.2  matt 		/*
   3336  1.10.2.2  matt 		 * We assume that pagesize is a power of 2 when calculating
   3337  1.10.2.2  matt 		 * pagesize_mask and pagesize_2pow.
   3338  1.10.2.2  matt 		 */
   3339  1.10.2.2  matt 		assert(((result - 1) & result) == 0);
   3340  1.10.2.2  matt 		pagesize_mask = result - 1;
   3341  1.10.2.2  matt 		pagesize_2pow = ffs((int)result) - 1;
   3342  1.10.2.2  matt 	}
   3343  1.10.2.2  matt 
   3344  1.10.2.2  matt 	for (i = 0; i < 3; i++) {
   3345  1.10.2.2  matt 		/* Get runtime configuration. */
   3346  1.10.2.2  matt 		switch (i) {
   3347  1.10.2.2  matt 		case 0:
   3348  1.10.2.2  matt 			if ((linklen = readlink("/etc/malloc.conf", buf,
   3349  1.10.2.2  matt 						sizeof(buf) - 1)) != -1) {
   3350  1.10.2.2  matt 				/*
   3351  1.10.2.2  matt 				 * Use the contents of the "/etc/malloc.conf"
   3352  1.10.2.2  matt 				 * symbolic link's name.
   3353  1.10.2.2  matt 				 */
   3354  1.10.2.2  matt 				buf[linklen] = '\0';
   3355  1.10.2.2  matt 				opts = buf;
   3356  1.10.2.2  matt 			} else {
   3357  1.10.2.2  matt 				/* No configuration specified. */
   3358  1.10.2.2  matt 				buf[0] = '\0';
   3359  1.10.2.2  matt 				opts = buf;
   3360  1.10.2.2  matt 			}
   3361  1.10.2.2  matt 			break;
   3362  1.10.2.2  matt 		case 1:
   3363  1.10.2.2  matt 			if (issetugid() == 0 && (opts =
   3364  1.10.2.2  matt 			    getenv("MALLOC_OPTIONS")) != NULL) {
   3365  1.10.2.2  matt 				/*
   3366  1.10.2.2  matt 				 * Do nothing; opts is already initialized to
   3367  1.10.2.2  matt 				 * the value of the MALLOC_OPTIONS environment
   3368  1.10.2.2  matt 				 * variable.
   3369  1.10.2.2  matt 				 */
   3370  1.10.2.2  matt 			} else {
   3371  1.10.2.2  matt 				/* No configuration specified. */
   3372  1.10.2.2  matt 				buf[0] = '\0';
   3373  1.10.2.2  matt 				opts = buf;
   3374  1.10.2.2  matt 			}
   3375  1.10.2.2  matt 			break;
   3376  1.10.2.2  matt 		case 2:
   3377  1.10.2.2  matt 			if (_malloc_options != NULL) {
   3378  1.10.2.2  matt 			    /*
   3379  1.10.2.2  matt 			     * Use options that were compiled into the program.
   3380  1.10.2.2  matt 			     */
   3381  1.10.2.2  matt 			    opts = _malloc_options;
   3382  1.10.2.2  matt 			} else {
   3383  1.10.2.2  matt 				/* No configuration specified. */
   3384  1.10.2.2  matt 				buf[0] = '\0';
   3385  1.10.2.2  matt 				opts = buf;
   3386  1.10.2.2  matt 			}
   3387  1.10.2.2  matt 			break;
   3388  1.10.2.2  matt 		default:
   3389  1.10.2.2  matt 			/* NOTREACHED */
   3390  1.10.2.2  matt 			/* LINTED */
   3391  1.10.2.2  matt 			assert(false);
   3392  1.10.2.2  matt 		}
   3393  1.10.2.2  matt 
   3394  1.10.2.2  matt 		for (j = 0; opts[j] != '\0'; j++) {
   3395  1.10.2.2  matt 			switch (opts[j]) {
   3396  1.10.2.2  matt 			case 'a':
   3397  1.10.2.2  matt 				opt_abort = false;
   3398  1.10.2.2  matt 				break;
   3399  1.10.2.2  matt 			case 'A':
   3400  1.10.2.2  matt 				opt_abort = true;
   3401  1.10.2.2  matt 				break;
   3402  1.10.2.2  matt 			case 'h':
   3403  1.10.2.2  matt 				opt_hint = false;
   3404  1.10.2.2  matt 				break;
   3405  1.10.2.2  matt 			case 'H':
   3406  1.10.2.2  matt 				opt_hint = true;
   3407  1.10.2.2  matt 				break;
   3408  1.10.2.2  matt 			case 'j':
   3409  1.10.2.2  matt 				opt_junk = false;
   3410  1.10.2.2  matt 				break;
   3411  1.10.2.2  matt 			case 'J':
   3412  1.10.2.2  matt 				opt_junk = true;
   3413  1.10.2.2  matt 				break;
   3414  1.10.2.2  matt 			case 'k':
   3415  1.10.2.2  matt 				/*
   3416  1.10.2.2  matt 				 * Chunks always require at least one header
   3417  1.10.2.2  matt 				 * page, so chunks can never be smaller than
   3418  1.10.2.2  matt 				 * two pages.
   3419  1.10.2.2  matt 				 */
   3420  1.10.2.2  matt 				if (opt_chunk_2pow > pagesize_2pow + 1)
   3421  1.10.2.2  matt 					opt_chunk_2pow--;
   3422  1.10.2.2  matt 				break;
   3423  1.10.2.2  matt 			case 'K':
   3424  1.10.2.2  matt 				/*
   3425  1.10.2.2  matt 				 * There must be fewer pages in a chunk than
   3426  1.10.2.2  matt 				 * can be recorded by the pos field of
   3427  1.10.2.2  matt 				 * arena_chunk_map_t, in order to make POS_FREE
   3428  1.10.2.2  matt 				 * special.
   3429  1.10.2.2  matt 				 */
   3430  1.10.2.2  matt 				if (opt_chunk_2pow - pagesize_2pow
   3431  1.10.2.2  matt 				    < (sizeof(uint32_t) << 3) - 1)
   3432  1.10.2.2  matt 					opt_chunk_2pow++;
   3433  1.10.2.2  matt 				break;
   3434  1.10.2.2  matt 			case 'n':
   3435  1.10.2.2  matt 				opt_narenas_lshift--;
   3436  1.10.2.2  matt 				break;
   3437  1.10.2.2  matt 			case 'N':
   3438  1.10.2.2  matt 				opt_narenas_lshift++;
   3439  1.10.2.2  matt 				break;
   3440  1.10.2.2  matt 			case 'p':
   3441  1.10.2.2  matt 				opt_print_stats = false;
   3442  1.10.2.2  matt 				break;
   3443  1.10.2.2  matt 			case 'P':
   3444  1.10.2.2  matt 				opt_print_stats = true;
   3445  1.10.2.2  matt 				break;
   3446  1.10.2.2  matt 			case 'q':
   3447  1.10.2.2  matt 				if (opt_quantum_2pow > QUANTUM_2POW_MIN)
   3448  1.10.2.2  matt 					opt_quantum_2pow--;
   3449  1.10.2.2  matt 				break;
   3450  1.10.2.2  matt 			case 'Q':
   3451  1.10.2.2  matt 				if (opt_quantum_2pow < pagesize_2pow - 1)
   3452  1.10.2.2  matt 					opt_quantum_2pow++;
   3453  1.10.2.2  matt 				break;
   3454  1.10.2.2  matt 			case 's':
   3455  1.10.2.2  matt 				if (opt_small_max_2pow > QUANTUM_2POW_MIN)
   3456  1.10.2.2  matt 					opt_small_max_2pow--;
   3457  1.10.2.2  matt 				break;
   3458  1.10.2.2  matt 			case 'S':
   3459  1.10.2.2  matt 				if (opt_small_max_2pow < pagesize_2pow - 1)
   3460  1.10.2.2  matt 					opt_small_max_2pow++;
   3461  1.10.2.2  matt 				break;
   3462  1.10.2.2  matt 			case 'u':
   3463  1.10.2.2  matt 				opt_utrace = false;
   3464  1.10.2.2  matt 				break;
   3465  1.10.2.2  matt 			case 'U':
   3466  1.10.2.2  matt 				opt_utrace = true;
   3467  1.10.2.2  matt 				break;
   3468  1.10.2.2  matt 			case 'v':
   3469  1.10.2.2  matt 				opt_sysv = false;
   3470  1.10.2.2  matt 				break;
   3471  1.10.2.2  matt 			case 'V':
   3472  1.10.2.2  matt 				opt_sysv = true;
   3473  1.10.2.2  matt 				break;
   3474  1.10.2.2  matt 			case 'x':
   3475  1.10.2.2  matt 				opt_xmalloc = false;
   3476  1.10.2.2  matt 				break;
   3477  1.10.2.2  matt 			case 'X':
   3478  1.10.2.2  matt 				opt_xmalloc = true;
   3479  1.10.2.2  matt 				break;
   3480  1.10.2.2  matt 			case 'z':
   3481  1.10.2.2  matt 				opt_zero = false;
   3482  1.10.2.2  matt 				break;
   3483  1.10.2.2  matt 			case 'Z':
   3484  1.10.2.2  matt 				opt_zero = true;
   3485  1.10.2.2  matt 				break;
   3486  1.10.2.2  matt 			default: {
   3487  1.10.2.2  matt 				char cbuf[2];
   3488  1.10.2.2  matt 
   3489  1.10.2.2  matt 				cbuf[0] = opts[j];
   3490  1.10.2.2  matt 				cbuf[1] = '\0';
   3491  1.10.2.3  matt 				_malloc_message(getprogname(),
   3492  1.10.2.2  matt 				    ": (malloc) Unsupported character in "
   3493  1.10.2.2  matt 				    "malloc options: '", cbuf, "'\n");
   3494  1.10.2.2  matt 			}
   3495  1.10.2.2  matt 			}
   3496  1.10.2.2  matt 		}
   3497  1.10.2.2  matt 	}
   3498  1.10.2.2  matt 
   3499  1.10.2.2  matt 	/* Take care to call atexit() only once. */
   3500  1.10.2.2  matt 	if (opt_print_stats) {
   3501  1.10.2.2  matt 		/* Print statistics at exit. */
   3502  1.10.2.2  matt 		atexit(malloc_print_stats);
   3503  1.10.2.2  matt 	}
   3504  1.10.2.2  matt 
   3505  1.10.2.2  matt 	/* Set variables according to the value of opt_small_max_2pow. */
   3506  1.10.2.2  matt 	if (opt_small_max_2pow < opt_quantum_2pow)
   3507  1.10.2.2  matt 		opt_small_max_2pow = opt_quantum_2pow;
   3508  1.10.2.2  matt 	small_max = (1 << opt_small_max_2pow);
   3509  1.10.2.2  matt 
   3510  1.10.2.2  matt 	/* Set bin-related variables. */
   3511  1.10.2.2  matt 	bin_maxclass = (pagesize >> 1);
   3512  1.10.2.2  matt 	assert(opt_quantum_2pow >= TINY_MIN_2POW);
   3513  1.10.2.2  matt 	ntbins = (unsigned)(opt_quantum_2pow - TINY_MIN_2POW);
   3514  1.10.2.2  matt 	assert(ntbins <= opt_quantum_2pow);
   3515  1.10.2.2  matt 	nqbins = (unsigned)(small_max >> opt_quantum_2pow);
   3516  1.10.2.2  matt 	nsbins = (unsigned)(pagesize_2pow - opt_small_max_2pow - 1);
   3517  1.10.2.2  matt 
   3518  1.10.2.2  matt 	/* Set variables according to the value of opt_quantum_2pow. */
   3519  1.10.2.2  matt 	quantum = (1 << opt_quantum_2pow);
   3520  1.10.2.2  matt 	quantum_mask = quantum - 1;
   3521  1.10.2.2  matt 	if (ntbins > 0)
   3522  1.10.2.2  matt 		small_min = (quantum >> 1) + 1;
   3523  1.10.2.2  matt 	else
   3524  1.10.2.2  matt 		small_min = 1;
   3525  1.10.2.2  matt 	assert(small_min <= quantum);
   3526  1.10.2.2  matt 
   3527  1.10.2.2  matt 	/* Set variables according to the value of opt_chunk_2pow. */
   3528  1.10.2.2  matt 	chunksize = (1LU << opt_chunk_2pow);
   3529  1.10.2.2  matt 	chunksize_mask = chunksize - 1;
   3530  1.10.2.2  matt 	chunksize_2pow = (unsigned)opt_chunk_2pow;
   3531  1.10.2.2  matt 	chunk_npages = (unsigned)(chunksize >> pagesize_2pow);
   3532  1.10.2.2  matt 	{
   3533  1.10.2.2  matt 		unsigned header_size;
   3534  1.10.2.2  matt 
   3535  1.10.2.2  matt 		header_size = (unsigned)(sizeof(arena_chunk_t) +
   3536  1.10.2.2  matt 		    (sizeof(arena_chunk_map_t) * (chunk_npages - 1)));
   3537  1.10.2.2  matt 		arena_chunk_header_npages = (header_size >> pagesize_2pow);
   3538  1.10.2.2  matt 		if ((header_size & pagesize_mask) != 0)
   3539  1.10.2.2  matt 			arena_chunk_header_npages++;
   3540  1.10.2.2  matt 	}
   3541  1.10.2.2  matt 	arena_maxclass = chunksize - (arena_chunk_header_npages <<
   3542  1.10.2.2  matt 	    pagesize_2pow);
   3543  1.10.2.2  matt 
   3544  1.10.2.2  matt 	UTRACE(0, 0, 0);
   3545  1.10.2.2  matt 
   3546  1.10.2.2  matt #ifdef MALLOC_STATS
   3547  1.10.2.2  matt 	memset(&stats_chunks, 0, sizeof(chunk_stats_t));
   3548  1.10.2.2  matt #endif
   3549  1.10.2.2  matt 
   3550  1.10.2.2  matt 	/* Various sanity checks that regard configuration. */
   3551  1.10.2.2  matt 	assert(quantum >= sizeof(void *));
   3552  1.10.2.2  matt 	assert(quantum <= pagesize);
   3553  1.10.2.2  matt 	assert(chunksize >= pagesize);
   3554  1.10.2.2  matt 	assert(quantum * 4 <= chunksize);
   3555  1.10.2.2  matt 
   3556  1.10.2.2  matt 	/* Initialize chunks data. */
   3557  1.10.2.2  matt 	malloc_mutex_init(&chunks_mtx);
   3558  1.10.2.2  matt 	RB_INIT(&huge);
   3559  1.10.2.2  matt #ifdef USE_BRK
   3560  1.10.2.2  matt 	malloc_mutex_init(&brk_mtx);
   3561  1.10.2.2  matt 	brk_base = sbrk(0);
   3562  1.10.2.2  matt 	brk_prev = brk_base;
   3563  1.10.2.2  matt 	brk_max = brk_base;
   3564  1.10.2.2  matt #endif
   3565  1.10.2.2  matt #ifdef MALLOC_STATS
   3566  1.10.2.2  matt 	huge_nmalloc = 0;
   3567  1.10.2.2  matt 	huge_ndalloc = 0;
   3568  1.10.2.2  matt 	huge_nralloc = 0;
   3569  1.10.2.2  matt 	huge_allocated = 0;
   3570  1.10.2.2  matt #endif
   3571  1.10.2.2  matt 	RB_INIT(&old_chunks);
   3572  1.10.2.2  matt 
   3573  1.10.2.2  matt 	/* Initialize base allocation data structures. */
   3574  1.10.2.2  matt #ifdef MALLOC_STATS
   3575  1.10.2.2  matt 	base_mapped = 0;
   3576  1.10.2.2  matt #endif
   3577  1.10.2.2  matt #ifdef USE_BRK
   3578  1.10.2.2  matt 	/*
   3579  1.10.2.2  matt 	 * Allocate a base chunk here, since it doesn't actually have to be
   3580  1.10.2.2  matt 	 * chunk-aligned.  Doing this before allocating any other chunks allows
   3581  1.10.2.2  matt 	 * the use of space that would otherwise be wasted.
   3582  1.10.2.2  matt 	 */
   3583  1.10.2.2  matt 	base_pages_alloc(0);
   3584  1.10.2.2  matt #endif
   3585  1.10.2.2  matt 	base_chunk_nodes = NULL;
   3586  1.10.2.2  matt 	malloc_mutex_init(&base_mtx);
   3587  1.10.2.2  matt 
   3588  1.10.2.2  matt 	if (ncpus > 1) {
   3589  1.10.2.2  matt 		/*
   3590  1.10.2.2  matt 		 * For SMP systems, create four times as many arenas as there
   3591  1.10.2.2  matt 		 * are CPUs by default.
   3592  1.10.2.2  matt 		 */
   3593  1.10.2.2  matt 		opt_narenas_lshift += 2;
   3594  1.10.2.2  matt 	}
   3595  1.10.2.2  matt 
   3596  1.10.2.2  matt #ifdef NO_TLS
   3597  1.10.2.2  matt 	/* Initialize arena key. */
   3598  1.10.2.2  matt 	(void)thr_keycreate(&arenas_map_key, NULL);
   3599  1.10.2.2  matt #endif
   3600  1.10.2.2  matt 
   3601  1.10.2.2  matt 	/* Determine how many arenas to use. */
   3602  1.10.2.2  matt 	narenas = ncpus;
   3603  1.10.2.2  matt 	if (opt_narenas_lshift > 0) {
   3604  1.10.2.2  matt 		if ((narenas << opt_narenas_lshift) > narenas)
   3605  1.10.2.2  matt 			narenas <<= opt_narenas_lshift;
   3606  1.10.2.2  matt 		/*
   3607  1.10.2.2  matt 		 * Make sure not to exceed the limits of what base_malloc()
   3608  1.10.2.2  matt 		 * can handle.
   3609  1.10.2.2  matt 		 */
   3610  1.10.2.2  matt 		if (narenas * sizeof(arena_t *) > chunksize)
   3611  1.10.2.2  matt 			narenas = (unsigned)(chunksize / sizeof(arena_t *));
   3612  1.10.2.2  matt 	} else if (opt_narenas_lshift < 0) {
   3613  1.10.2.2  matt 		if ((narenas << opt_narenas_lshift) < narenas)
   3614  1.10.2.2  matt 			narenas <<= opt_narenas_lshift;
   3615  1.10.2.2  matt 		/* Make sure there is at least one arena. */
   3616  1.10.2.2  matt 		if (narenas == 0)
   3617  1.10.2.2  matt 			narenas = 1;
   3618  1.10.2.2  matt 	}
   3619  1.10.2.2  matt 
   3620  1.10.2.2  matt 	next_arena = 0;
   3621  1.10.2.2  matt 
   3622  1.10.2.2  matt 	/* Allocate and initialize arenas. */
   3623  1.10.2.2  matt 	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
   3624  1.10.2.2  matt 	if (arenas == NULL) {
   3625  1.10.2.2  matt 		malloc_mutex_unlock(&init_lock);
   3626  1.10.2.2  matt 		return (true);
   3627  1.10.2.2  matt 	}
   3628  1.10.2.2  matt 	/*
   3629  1.10.2.2  matt 	 * Zero the array.  In practice, this should always be pre-zeroed,
   3630  1.10.2.2  matt 	 * since it was just mmap()ed, but let's be sure.
   3631  1.10.2.2  matt 	 */
   3632  1.10.2.2  matt 	memset(arenas, 0, sizeof(arena_t *) * narenas);
   3633  1.10.2.2  matt 
   3634  1.10.2.2  matt 	/*
   3635  1.10.2.2  matt 	 * Initialize one arena here.  The rest are lazily created in
   3636  1.10.2.2  matt 	 * arena_choose_hard().
   3637  1.10.2.2  matt 	 */
   3638  1.10.2.2  matt 	arenas_extend(0);
   3639  1.10.2.2  matt 	if (arenas[0] == NULL) {
   3640  1.10.2.2  matt 		malloc_mutex_unlock(&init_lock);
   3641  1.10.2.2  matt 		return (true);
   3642  1.10.2.2  matt 	}
   3643  1.10.2.2  matt 
   3644  1.10.2.2  matt 	malloc_mutex_init(&arenas_mtx);
   3645  1.10.2.2  matt 
   3646  1.10.2.2  matt 	malloc_initialized = true;
   3647  1.10.2.2  matt 	malloc_mutex_unlock(&init_lock);
   3648  1.10.2.2  matt 	return (false);
   3649  1.10.2.2  matt }
   3650  1.10.2.2  matt 
   3651  1.10.2.2  matt /*
   3652  1.10.2.2  matt  * End general internal functions.
   3653  1.10.2.2  matt  */
   3654  1.10.2.2  matt /******************************************************************************/
   3655  1.10.2.2  matt /*
   3656  1.10.2.2  matt  * Begin malloc(3)-compatible functions.
   3657  1.10.2.2  matt  */
   3658  1.10.2.2  matt 
   3659  1.10.2.2  matt void *
   3660  1.10.2.2  matt malloc(size_t size)
   3661  1.10.2.2  matt {
   3662  1.10.2.2  matt 	void *ret;
   3663  1.10.2.2  matt 
   3664  1.10.2.2  matt 	if (malloc_init()) {
   3665  1.10.2.2  matt 		ret = NULL;
   3666  1.10.2.2  matt 		goto RETURN;
   3667  1.10.2.2  matt 	}
   3668  1.10.2.2  matt 
   3669  1.10.2.2  matt 	if (size == 0) {
   3670  1.10.2.2  matt 		if (opt_sysv == false)
   3671  1.10.2.2  matt 			size = 1;
   3672  1.10.2.2  matt 		else {
   3673  1.10.2.2  matt 			ret = NULL;
   3674  1.10.2.2  matt 			goto RETURN;
   3675  1.10.2.2  matt 		}
   3676  1.10.2.2  matt 	}
   3677  1.10.2.2  matt 
   3678  1.10.2.2  matt 	ret = imalloc(size);
   3679  1.10.2.2  matt 
   3680  1.10.2.2  matt RETURN:
   3681  1.10.2.2  matt 	if (ret == NULL) {
   3682  1.10.2.2  matt 		if (opt_xmalloc) {
   3683  1.10.2.3  matt 			_malloc_message(getprogname(),
   3684  1.10.2.2  matt 			    ": (malloc) Error in malloc(): out of memory\n", "",
   3685  1.10.2.2  matt 			    "");
   3686  1.10.2.2  matt 			abort();
   3687  1.10.2.2  matt 		}
   3688  1.10.2.2  matt 		errno = ENOMEM;
   3689  1.10.2.2  matt 	}
   3690  1.10.2.2  matt 
   3691  1.10.2.2  matt 	UTRACE(0, size, ret);
   3692  1.10.2.2  matt 	return (ret);
   3693  1.10.2.2  matt }
   3694  1.10.2.2  matt 
   3695  1.10.2.2  matt int
   3696  1.10.2.2  matt posix_memalign(void **memptr, size_t alignment, size_t size)
   3697  1.10.2.2  matt {
   3698  1.10.2.2  matt 	int ret;
   3699  1.10.2.2  matt 	void *result;
   3700  1.10.2.2  matt 
   3701  1.10.2.2  matt 	if (malloc_init())
   3702  1.10.2.2  matt 		result = NULL;
   3703  1.10.2.2  matt 	else {
   3704  1.10.2.2  matt 		/* Make sure that alignment is a large enough power of 2. */
   3705  1.10.2.2  matt 		if (((alignment - 1) & alignment) != 0
   3706  1.10.2.2  matt 		    || alignment < sizeof(void *)) {
   3707  1.10.2.2  matt 			if (opt_xmalloc) {
   3708  1.10.2.3  matt 				_malloc_message(getprogname(),
   3709  1.10.2.2  matt 				    ": (malloc) Error in posix_memalign(): "
   3710  1.10.2.2  matt 				    "invalid alignment\n", "", "");
   3711  1.10.2.2  matt 				abort();
   3712  1.10.2.2  matt 			}
   3713  1.10.2.2  matt 			result = NULL;
   3714  1.10.2.2  matt 			ret = EINVAL;
   3715  1.10.2.2  matt 			goto RETURN;
   3716  1.10.2.2  matt 		}
   3717  1.10.2.2  matt 
   3718  1.10.2.2  matt 		result = ipalloc(alignment, size);
   3719  1.10.2.2  matt 	}
   3720  1.10.2.2  matt 
   3721  1.10.2.2  matt 	if (result == NULL) {
   3722  1.10.2.2  matt 		if (opt_xmalloc) {
   3723  1.10.2.3  matt 			_malloc_message(getprogname(),
   3724  1.10.2.2  matt 			": (malloc) Error in posix_memalign(): out of memory\n",
   3725  1.10.2.2  matt 			"", "");
   3726  1.10.2.2  matt 			abort();
   3727  1.10.2.2  matt 		}
   3728  1.10.2.2  matt 		ret = ENOMEM;
   3729  1.10.2.2  matt 		goto RETURN;
   3730  1.10.2.2  matt 	}
   3731  1.10.2.2  matt 
   3732  1.10.2.2  matt 	*memptr = result;
   3733  1.10.2.2  matt 	ret = 0;
   3734  1.10.2.2  matt 
   3735  1.10.2.2  matt RETURN:
   3736  1.10.2.2  matt 	UTRACE(0, size, result);
   3737  1.10.2.2  matt 	return (ret);
   3738  1.10.2.2  matt }
   3739  1.10.2.2  matt 
   3740  1.10.2.2  matt void *
   3741  1.10.2.2  matt calloc(size_t num, size_t size)
   3742  1.10.2.2  matt {
   3743  1.10.2.2  matt 	void *ret;
   3744  1.10.2.2  matt 	size_t num_size;
   3745  1.10.2.2  matt 
   3746  1.10.2.2  matt 	if (malloc_init()) {
   3747  1.10.2.2  matt 		num_size = 0;
   3748  1.10.2.2  matt 		ret = NULL;
   3749  1.10.2.2  matt 		goto RETURN;
   3750  1.10.2.2  matt 	}
   3751  1.10.2.2  matt 
   3752  1.10.2.2  matt 	num_size = num * size;
   3753  1.10.2.2  matt 	if (num_size == 0) {
   3754  1.10.2.2  matt 		if ((opt_sysv == false) && ((num == 0) || (size == 0)))
   3755  1.10.2.2  matt 			num_size = 1;
   3756  1.10.2.2  matt 		else {
   3757  1.10.2.2  matt 			ret = NULL;
   3758  1.10.2.2  matt 			goto RETURN;
   3759  1.10.2.2  matt 		}
   3760  1.10.2.2  matt 	/*
   3761  1.10.2.2  matt 	 * Try to avoid division here.  We know that it isn't possible to
   3762  1.10.2.2  matt 	 * overflow during multiplication if neither operand uses any of the
   3763  1.10.2.2  matt 	 * most significant half of the bits in a size_t.
   3764  1.10.2.2  matt 	 */
   3765  1.10.2.2  matt 	} else if ((unsigned long long)((num | size) &
   3766  1.10.2.2  matt 	   ((unsigned long long)SIZE_T_MAX << (sizeof(size_t) << 2))) &&
   3767  1.10.2.2  matt 	   (num_size / size != num)) {
   3768  1.10.2.2  matt 		/* size_t overflow. */
   3769  1.10.2.2  matt 		ret = NULL;
   3770  1.10.2.2  matt 		goto RETURN;
   3771  1.10.2.2  matt 	}
   3772  1.10.2.2  matt 
   3773  1.10.2.2  matt 	ret = icalloc(num_size);
   3774  1.10.2.2  matt 
   3775  1.10.2.2  matt RETURN:
   3776  1.10.2.2  matt 	if (ret == NULL) {
   3777  1.10.2.2  matt 		if (opt_xmalloc) {
   3778  1.10.2.3  matt 			_malloc_message(getprogname(),
   3779  1.10.2.2  matt 			    ": (malloc) Error in calloc(): out of memory\n", "",
   3780  1.10.2.2  matt 			    "");
   3781  1.10.2.2  matt 			abort();
   3782  1.10.2.2  matt 		}
   3783  1.10.2.2  matt 		errno = ENOMEM;
   3784  1.10.2.2  matt 	}
   3785  1.10.2.2  matt 
   3786  1.10.2.2  matt 	UTRACE(0, num_size, ret);
   3787  1.10.2.2  matt 	return (ret);
   3788  1.10.2.2  matt }
   3789  1.10.2.2  matt 
   3790  1.10.2.2  matt void *
   3791  1.10.2.2  matt realloc(void *ptr, size_t size)
   3792  1.10.2.2  matt {
   3793  1.10.2.2  matt 	void *ret;
   3794  1.10.2.2  matt 
   3795  1.10.2.2  matt 	if (size == 0) {
   3796  1.10.2.2  matt 		if (opt_sysv == false)
   3797  1.10.2.2  matt 			size = 1;
   3798  1.10.2.2  matt 		else {
   3799  1.10.2.2  matt 			if (ptr != NULL)
   3800  1.10.2.2  matt 				idalloc(ptr);
   3801  1.10.2.2  matt 			ret = NULL;
   3802  1.10.2.2  matt 			goto RETURN;
   3803  1.10.2.2  matt 		}
   3804  1.10.2.2  matt 	}
   3805  1.10.2.2  matt 
   3806  1.10.2.2  matt 	if (ptr != NULL) {
   3807  1.10.2.2  matt 		assert(malloc_initialized);
   3808  1.10.2.2  matt 
   3809  1.10.2.2  matt 		ret = iralloc(ptr, size);
   3810  1.10.2.2  matt 
   3811  1.10.2.2  matt 		if (ret == NULL) {
   3812  1.10.2.2  matt 			if (opt_xmalloc) {
   3813  1.10.2.3  matt 				_malloc_message(getprogname(),
   3814  1.10.2.2  matt 				    ": (malloc) Error in realloc(): out of "
   3815  1.10.2.2  matt 				    "memory\n", "", "");
   3816  1.10.2.2  matt 				abort();
   3817  1.10.2.2  matt 			}
   3818  1.10.2.2  matt 			errno = ENOMEM;
   3819  1.10.2.2  matt 		}
   3820  1.10.2.2  matt 	} else {
   3821  1.10.2.2  matt 		if (malloc_init())
   3822  1.10.2.2  matt 			ret = NULL;
   3823  1.10.2.2  matt 		else
   3824  1.10.2.2  matt 			ret = imalloc(size);
   3825  1.10.2.2  matt 
   3826  1.10.2.2  matt 		if (ret == NULL) {
   3827  1.10.2.2  matt 			if (opt_xmalloc) {
   3828  1.10.2.3  matt 				_malloc_message(getprogname(),
   3829  1.10.2.2  matt 				    ": (malloc) Error in realloc(): out of "
   3830  1.10.2.2  matt 				    "memory\n", "", "");
   3831  1.10.2.2  matt 				abort();
   3832  1.10.2.2  matt 			}
   3833  1.10.2.2  matt 			errno = ENOMEM;
   3834  1.10.2.2  matt 		}
   3835  1.10.2.2  matt 	}
   3836  1.10.2.2  matt 
   3837  1.10.2.2  matt RETURN:
   3838  1.10.2.2  matt 	UTRACE(ptr, size, ret);
   3839  1.10.2.2  matt 	return (ret);
   3840  1.10.2.2  matt }
   3841  1.10.2.2  matt 
   3842  1.10.2.2  matt void
   3843  1.10.2.2  matt free(void *ptr)
   3844  1.10.2.2  matt {
   3845  1.10.2.2  matt 
   3846  1.10.2.2  matt 	UTRACE(ptr, 0, 0);
   3847  1.10.2.2  matt 	if (ptr != NULL) {
   3848  1.10.2.2  matt 		assert(malloc_initialized);
   3849  1.10.2.2  matt 
   3850  1.10.2.2  matt 		idalloc(ptr);
   3851  1.10.2.2  matt 	}
   3852  1.10.2.2  matt }
   3853  1.10.2.2  matt 
   3854  1.10.2.2  matt /*
   3855  1.10.2.2  matt  * End malloc(3)-compatible functions.
   3856  1.10.2.2  matt  */
   3857  1.10.2.2  matt /******************************************************************************/
   3858  1.10.2.2  matt /*
   3859  1.10.2.2  matt  * Begin non-standard functions.
   3860  1.10.2.2  matt  */
   3861  1.10.2.2  matt #ifndef __NetBSD__
   3862  1.10.2.2  matt size_t
   3863  1.10.2.2  matt malloc_usable_size(const void *ptr)
   3864  1.10.2.2  matt {
   3865  1.10.2.2  matt 
   3866  1.10.2.2  matt 	assert(ptr != NULL);
   3867  1.10.2.2  matt 
   3868  1.10.2.2  matt 	return (isalloc(ptr));
   3869  1.10.2.2  matt }
   3870  1.10.2.2  matt #endif
   3871  1.10.2.2  matt 
   3872  1.10.2.2  matt /*
   3873  1.10.2.2  matt  * End non-standard functions.
   3874  1.10.2.2  matt  */
   3875  1.10.2.2  matt /******************************************************************************/
   3876  1.10.2.2  matt /*
   3877  1.10.2.2  matt  * Begin library-private functions, used by threading libraries for protection
   3878  1.10.2.2  matt  * of malloc during fork().  These functions are only called if the program is
   3879  1.10.2.2  matt  * running in threaded mode, so there is no need to check whether the program
   3880  1.10.2.2  matt  * is threaded here.
   3881  1.10.2.2  matt  */
   3882  1.10.2.2  matt 
   3883  1.10.2.2  matt void
   3884  1.10.2.2  matt _malloc_prefork(void)
   3885  1.10.2.2  matt {
   3886  1.10.2.2  matt 	unsigned i;
   3887  1.10.2.2  matt 
   3888  1.10.2.2  matt 	/* Acquire all mutexes in a safe order. */
   3889  1.10.2.2  matt 
   3890  1.10.2.2  matt 	malloc_mutex_lock(&arenas_mtx);
   3891  1.10.2.2  matt 	for (i = 0; i < narenas; i++) {
   3892  1.10.2.2  matt 		if (arenas[i] != NULL)
   3893  1.10.2.2  matt 			malloc_mutex_lock(&arenas[i]->mtx);
   3894  1.10.2.2  matt 	}
   3895  1.10.2.2  matt 	malloc_mutex_unlock(&arenas_mtx);
   3896  1.10.2.2  matt 
   3897  1.10.2.2  matt 	malloc_mutex_lock(&base_mtx);
   3898  1.10.2.2  matt 
   3899  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   3900  1.10.2.2  matt }
   3901  1.10.2.2  matt 
   3902  1.10.2.2  matt void
   3903  1.10.2.2  matt _malloc_postfork(void)
   3904  1.10.2.2  matt {
   3905  1.10.2.2  matt 	unsigned i;
   3906  1.10.2.2  matt 
   3907  1.10.2.2  matt 	/* Release all mutexes, now that fork() has completed. */
   3908  1.10.2.2  matt 
   3909  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   3910  1.10.2.2  matt 
   3911  1.10.2.2  matt 	malloc_mutex_unlock(&base_mtx);
   3912  1.10.2.2  matt 
   3913  1.10.2.2  matt 	malloc_mutex_lock(&arenas_mtx);
   3914  1.10.2.2  matt 	for (i = 0; i < narenas; i++) {
   3915  1.10.2.2  matt 		if (arenas[i] != NULL)
   3916  1.10.2.2  matt 			malloc_mutex_unlock(&arenas[i]->mtx);
   3917  1.10.2.2  matt 	}
   3918  1.10.2.2  matt 	malloc_mutex_unlock(&arenas_mtx);
   3919  1.10.2.2  matt }
   3920  1.10.2.2  matt 
   3921  1.10.2.2  matt /*
   3922  1.10.2.2  matt  * End library-private functions.
   3923  1.10.2.2  matt  */
   3924  1.10.2.2  matt /******************************************************************************/
   3925