Home | History | Annotate | Line # | Download | only in stdlib
jemalloc.c revision 1.10.2.2
      1  1.10.2.2  matt /*	$NetBSD: jemalloc.c,v 1.10.2.2 2007/11/06 23:11:19 matt Exp $	*/
      2  1.10.2.2  matt 
      3  1.10.2.2  matt /*-
      4  1.10.2.2  matt  * Copyright (C) 2006,2007 Jason Evans <jasone (at) FreeBSD.org>.
      5  1.10.2.2  matt  * All rights reserved.
      6  1.10.2.2  matt  *
      7  1.10.2.2  matt  * Redistribution and use in source and binary forms, with or without
      8  1.10.2.2  matt  * modification, are permitted provided that the following conditions
      9  1.10.2.2  matt  * are met:
     10  1.10.2.2  matt  * 1. Redistributions of source code must retain the above copyright
     11  1.10.2.2  matt  *    notice(s), this list of conditions and the following disclaimer as
     12  1.10.2.2  matt  *    the first lines of this file unmodified other than the possible
     13  1.10.2.2  matt  *    addition of one or more copyright notices.
     14  1.10.2.2  matt  * 2. Redistributions in binary form must reproduce the above copyright
     15  1.10.2.2  matt  *    notice(s), this list of conditions and the following disclaimer in
     16  1.10.2.2  matt  *    the documentation and/or other materials provided with the
     17  1.10.2.2  matt  *    distribution.
     18  1.10.2.2  matt  *
     19  1.10.2.2  matt  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
     20  1.10.2.2  matt  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  1.10.2.2  matt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.10.2.2  matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
     23  1.10.2.2  matt  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.10.2.2  matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.10.2.2  matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
     26  1.10.2.2  matt  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     27  1.10.2.2  matt  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
     28  1.10.2.2  matt  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
     29  1.10.2.2  matt  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30  1.10.2.2  matt  *
     31  1.10.2.2  matt  *******************************************************************************
     32  1.10.2.2  matt  *
     33  1.10.2.2  matt  * This allocator implementation is designed to provide scalable performance
     34  1.10.2.2  matt  * for multi-threaded programs on multi-processor systems.  The following
     35  1.10.2.2  matt  * features are included for this purpose:
     36  1.10.2.2  matt  *
     37  1.10.2.2  matt  *   + Multiple arenas are used if there are multiple CPUs, which reduces lock
     38  1.10.2.2  matt  *     contention and cache sloshing.
     39  1.10.2.2  matt  *
     40  1.10.2.2  matt  *   + Cache line sharing between arenas is avoided for internal data
     41  1.10.2.2  matt  *     structures.
     42  1.10.2.2  matt  *
     43  1.10.2.2  matt  *   + Memory is managed in chunks and runs (chunks can be split into runs),
     44  1.10.2.2  matt  *     rather than as individual pages.  This provides a constant-time
     45  1.10.2.2  matt  *     mechanism for associating allocations with particular arenas.
     46  1.10.2.2  matt  *
     47  1.10.2.2  matt  * Allocation requests are rounded up to the nearest size class, and no record
     48  1.10.2.2  matt  * of the original request size is maintained.  Allocations are broken into
     49  1.10.2.2  matt  * categories according to size class.  Assuming runtime defaults, 4 kB pages
     50  1.10.2.2  matt  * and a 16 byte quantum, the size classes in each category are as follows:
     51  1.10.2.2  matt  *
     52  1.10.2.2  matt  *   |=====================================|
     53  1.10.2.2  matt  *   | Category | Subcategory    |    Size |
     54  1.10.2.2  matt  *   |=====================================|
     55  1.10.2.2  matt  *   | Small    | Tiny           |       2 |
     56  1.10.2.2  matt  *   |          |                |       4 |
     57  1.10.2.2  matt  *   |          |                |       8 |
     58  1.10.2.2  matt  *   |          |----------------+---------|
     59  1.10.2.2  matt  *   |          | Quantum-spaced |      16 |
     60  1.10.2.2  matt  *   |          |                |      32 |
     61  1.10.2.2  matt  *   |          |                |      48 |
     62  1.10.2.2  matt  *   |          |                |     ... |
     63  1.10.2.2  matt  *   |          |                |     480 |
     64  1.10.2.2  matt  *   |          |                |     496 |
     65  1.10.2.2  matt  *   |          |                |     512 |
     66  1.10.2.2  matt  *   |          |----------------+---------|
     67  1.10.2.2  matt  *   |          | Sub-page       |    1 kB |
     68  1.10.2.2  matt  *   |          |                |    2 kB |
     69  1.10.2.2  matt  *   |=====================================|
     70  1.10.2.2  matt  *   | Large                     |    4 kB |
     71  1.10.2.2  matt  *   |                           |    8 kB |
     72  1.10.2.2  matt  *   |                           |   12 kB |
     73  1.10.2.2  matt  *   |                           |     ... |
     74  1.10.2.2  matt  *   |                           | 1012 kB |
     75  1.10.2.2  matt  *   |                           | 1016 kB |
     76  1.10.2.2  matt  *   |                           | 1020 kB |
     77  1.10.2.2  matt  *   |=====================================|
     78  1.10.2.2  matt  *   | Huge                      |    1 MB |
     79  1.10.2.2  matt  *   |                           |    2 MB |
     80  1.10.2.2  matt  *   |                           |    3 MB |
     81  1.10.2.2  matt  *   |                           |     ... |
     82  1.10.2.2  matt  *   |=====================================|
     83  1.10.2.2  matt  *
     84  1.10.2.2  matt  * A different mechanism is used for each category:
     85  1.10.2.2  matt  *
     86  1.10.2.2  matt  *   Small : Each size class is segregated into its own set of runs.  Each run
     87  1.10.2.2  matt  *           maintains a bitmap of which regions are free/allocated.
     88  1.10.2.2  matt  *
     89  1.10.2.2  matt  *   Large : Each allocation is backed by a dedicated run.  Metadata are stored
     90  1.10.2.2  matt  *           in the associated arena chunk header maps.
     91  1.10.2.2  matt  *
     92  1.10.2.2  matt  *   Huge : Each allocation is backed by a dedicated contiguous set of chunks.
     93  1.10.2.2  matt  *          Metadata are stored in a separate red-black tree.
     94  1.10.2.2  matt  *
     95  1.10.2.2  matt  *******************************************************************************
     96  1.10.2.2  matt  */
     97  1.10.2.2  matt 
     98  1.10.2.2  matt /* LINTLIBRARY */
     99  1.10.2.2  matt 
    100  1.10.2.2  matt #ifdef __NetBSD__
    101  1.10.2.2  matt #  define xutrace(a, b)		utrace("malloc", (a), (b))
    102  1.10.2.2  matt #  define __DECONST(x, y)	((x)__UNCONST(y))
    103  1.10.2.2  matt #  define NO_TLS
    104  1.10.2.2  matt #else
    105  1.10.2.2  matt #  define xutrace(a, b)		utrace((a), (b))
    106  1.10.2.2  matt #endif	/* __NetBSD__ */
    107  1.10.2.2  matt 
    108  1.10.2.2  matt /*
    109  1.10.2.2  matt  * MALLOC_PRODUCTION disables assertions and statistics gathering.  It also
    110  1.10.2.2  matt  * defaults the A and J runtime options to off.  These settings are appropriate
    111  1.10.2.2  matt  * for production systems.
    112  1.10.2.2  matt  */
    113  1.10.2.2  matt #define MALLOC_PRODUCTION
    114  1.10.2.2  matt 
    115  1.10.2.2  matt #ifndef MALLOC_PRODUCTION
    116  1.10.2.2  matt #  define MALLOC_DEBUG
    117  1.10.2.2  matt #endif
    118  1.10.2.2  matt 
    119  1.10.2.2  matt #include <sys/cdefs.h>
    120  1.10.2.2  matt /* __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.147 2007/06/15 22:00:16 jasone Exp $"); */
    121  1.10.2.2  matt __RCSID("$NetBSD: jemalloc.c,v 1.10.2.2 2007/11/06 23:11:19 matt Exp $");
    122  1.10.2.2  matt 
    123  1.10.2.2  matt #ifdef __FreeBSD__
    124  1.10.2.2  matt #include "libc_private.h"
    125  1.10.2.2  matt #ifdef MALLOC_DEBUG
    126  1.10.2.2  matt #  define _LOCK_DEBUG
    127  1.10.2.2  matt #endif
    128  1.10.2.2  matt #include "spinlock.h"
    129  1.10.2.2  matt #endif
    130  1.10.2.2  matt #include "namespace.h"
    131  1.10.2.2  matt #include <sys/mman.h>
    132  1.10.2.2  matt #include <sys/param.h>
    133  1.10.2.2  matt #ifdef __FreeBSD__
    134  1.10.2.2  matt #include <sys/stddef.h>
    135  1.10.2.2  matt #endif
    136  1.10.2.2  matt #include <sys/time.h>
    137  1.10.2.2  matt #include <sys/types.h>
    138  1.10.2.2  matt #include <sys/sysctl.h>
    139  1.10.2.2  matt #include <sys/tree.h>
    140  1.10.2.2  matt #include <sys/uio.h>
    141  1.10.2.2  matt #include <sys/ktrace.h> /* Must come after several other sys/ includes. */
    142  1.10.2.2  matt 
    143  1.10.2.2  matt #ifdef __FreeBSD__
    144  1.10.2.2  matt #include <machine/atomic.h>
    145  1.10.2.2  matt #include <machine/cpufunc.h>
    146  1.10.2.2  matt #endif
    147  1.10.2.2  matt #include <machine/vmparam.h>
    148  1.10.2.2  matt 
    149  1.10.2.2  matt #include <errno.h>
    150  1.10.2.2  matt #include <limits.h>
    151  1.10.2.2  matt #include <pthread.h>
    152  1.10.2.2  matt #include <sched.h>
    153  1.10.2.2  matt #include <stdarg.h>
    154  1.10.2.2  matt #include <stdbool.h>
    155  1.10.2.2  matt #include <stdio.h>
    156  1.10.2.2  matt #include <stdint.h>
    157  1.10.2.2  matt #include <stdlib.h>
    158  1.10.2.2  matt #include <string.h>
    159  1.10.2.2  matt #include <strings.h>
    160  1.10.2.2  matt #include <unistd.h>
    161  1.10.2.2  matt 
    162  1.10.2.2  matt #ifdef __NetBSD__
    163  1.10.2.2  matt #  include <reentrant.h>
    164  1.10.2.2  matt void	_malloc_prefork(void);
    165  1.10.2.2  matt void	_malloc_postfork(void);
    166  1.10.2.2  matt ssize_t	_write(int, const void *, size_t);
    167  1.10.2.2  matt const char	*_getprogname(void);
    168  1.10.2.2  matt #endif
    169  1.10.2.2  matt 
    170  1.10.2.2  matt #ifdef __FreeBSD__
    171  1.10.2.2  matt #include "un-namespace.h"
    172  1.10.2.2  matt #endif
    173  1.10.2.2  matt 
    174  1.10.2.2  matt /* MALLOC_STATS enables statistics calculation. */
    175  1.10.2.2  matt #ifndef MALLOC_PRODUCTION
    176  1.10.2.2  matt #  define MALLOC_STATS
    177  1.10.2.2  matt #endif
    178  1.10.2.2  matt 
    179  1.10.2.2  matt #ifdef MALLOC_DEBUG
    180  1.10.2.2  matt #  ifdef NDEBUG
    181  1.10.2.2  matt #    undef NDEBUG
    182  1.10.2.2  matt #  endif
    183  1.10.2.2  matt #else
    184  1.10.2.2  matt #  ifndef NDEBUG
    185  1.10.2.2  matt #    define NDEBUG
    186  1.10.2.2  matt #  endif
    187  1.10.2.2  matt #endif
    188  1.10.2.2  matt #include <assert.h>
    189  1.10.2.2  matt 
    190  1.10.2.2  matt #ifdef MALLOC_DEBUG
    191  1.10.2.2  matt    /* Disable inlining to make debugging easier. */
    192  1.10.2.2  matt #  define inline
    193  1.10.2.2  matt #endif
    194  1.10.2.2  matt 
    195  1.10.2.2  matt /* Size of stack-allocated buffer passed to strerror_r(). */
    196  1.10.2.2  matt #define	STRERROR_BUF		64
    197  1.10.2.2  matt 
    198  1.10.2.2  matt /* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */
    199  1.10.2.2  matt #ifdef __i386__
    200  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    201  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    202  1.10.2.2  matt #  define USE_BRK
    203  1.10.2.2  matt #endif
    204  1.10.2.2  matt #ifdef __ia64__
    205  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    206  1.10.2.2  matt #  define SIZEOF_PTR_2POW	3
    207  1.10.2.2  matt #endif
    208  1.10.2.2  matt #ifdef __alpha__
    209  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    210  1.10.2.2  matt #  define SIZEOF_PTR_2POW	3
    211  1.10.2.2  matt #  define NO_TLS
    212  1.10.2.2  matt #endif
    213  1.10.2.2  matt #ifdef __sparc64__
    214  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    215  1.10.2.2  matt #  define SIZEOF_PTR_2POW	3
    216  1.10.2.2  matt #  define NO_TLS
    217  1.10.2.2  matt #endif
    218  1.10.2.2  matt #ifdef __amd64__
    219  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    220  1.10.2.2  matt #  define SIZEOF_PTR_2POW	3
    221  1.10.2.2  matt #endif
    222  1.10.2.2  matt #ifdef __arm__
    223  1.10.2.2  matt #  define QUANTUM_2POW_MIN	3
    224  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    225  1.10.2.2  matt #  define USE_BRK
    226  1.10.2.2  matt #  define NO_TLS
    227  1.10.2.2  matt #endif
    228  1.10.2.2  matt #ifdef __powerpc__
    229  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    230  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    231  1.10.2.2  matt #  define USE_BRK
    232  1.10.2.2  matt #endif
    233  1.10.2.2  matt #if defined(__sparc__) && !defined(__sparc64__)
    234  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    235  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    236  1.10.2.2  matt #  define USE_BRK
    237  1.10.2.2  matt #endif
    238  1.10.2.2  matt #ifdef __vax__
    239  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    240  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    241  1.10.2.2  matt #  define USE_BRK
    242  1.10.2.2  matt #endif
    243  1.10.2.2  matt #ifdef __sh__
    244  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    245  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    246  1.10.2.2  matt #  define USE_BRK
    247  1.10.2.2  matt #endif
    248  1.10.2.2  matt #ifdef __m68k__
    249  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    250  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    251  1.10.2.2  matt #  define USE_BRK
    252  1.10.2.2  matt #endif
    253  1.10.2.2  matt #ifdef __mips__
    254  1.10.2.2  matt #  define QUANTUM_2POW_MIN	4
    255  1.10.2.2  matt #  define SIZEOF_PTR_2POW	2
    256  1.10.2.2  matt #  define USE_BRK
    257  1.10.2.2  matt #endif
    258  1.10.2.2  matt #ifdef __hppa__
    259  1.10.2.2  matt #  define QUANTUM_2POW_MIN     4
    260  1.10.2.2  matt #  define SIZEOF_PTR_2POW      2
    261  1.10.2.2  matt #  define USE_BRK
    262  1.10.2.2  matt #endif
    263  1.10.2.2  matt 
    264  1.10.2.2  matt #define	SIZEOF_PTR		(1 << SIZEOF_PTR_2POW)
    265  1.10.2.2  matt 
    266  1.10.2.2  matt /* sizeof(int) == (1 << SIZEOF_INT_2POW). */
    267  1.10.2.2  matt #ifndef SIZEOF_INT_2POW
    268  1.10.2.2  matt #  define SIZEOF_INT_2POW	2
    269  1.10.2.2  matt #endif
    270  1.10.2.2  matt 
    271  1.10.2.2  matt /* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */
    272  1.10.2.2  matt #if (!defined(PIC) && !defined(NO_TLS))
    273  1.10.2.2  matt #  define NO_TLS
    274  1.10.2.2  matt #endif
    275  1.10.2.2  matt 
    276  1.10.2.2  matt /*
    277  1.10.2.2  matt  * Size and alignment of memory chunks that are allocated by the OS's virtual
    278  1.10.2.2  matt  * memory system.
    279  1.10.2.2  matt  */
    280  1.10.2.2  matt #define	CHUNK_2POW_DEFAULT	20
    281  1.10.2.2  matt 
    282  1.10.2.2  matt /*
    283  1.10.2.2  matt  * Maximum size of L1 cache line.  This is used to avoid cache line aliasing,
    284  1.10.2.2  matt  * so over-estimates are okay (up to a point), but under-estimates will
    285  1.10.2.2  matt  * negatively affect performance.
    286  1.10.2.2  matt  */
    287  1.10.2.2  matt #define	CACHELINE_2POW		6
    288  1.10.2.2  matt #define	CACHELINE		((size_t)(1 << CACHELINE_2POW))
    289  1.10.2.2  matt 
    290  1.10.2.2  matt /* Smallest size class to support. */
    291  1.10.2.2  matt #define	TINY_MIN_2POW		1
    292  1.10.2.2  matt 
    293  1.10.2.2  matt /*
    294  1.10.2.2  matt  * Maximum size class that is a multiple of the quantum, but not (necessarily)
    295  1.10.2.2  matt  * a power of 2.  Above this size, allocations are rounded up to the nearest
    296  1.10.2.2  matt  * power of 2.
    297  1.10.2.2  matt  */
    298  1.10.2.2  matt #define	SMALL_MAX_2POW_DEFAULT	9
    299  1.10.2.2  matt #define	SMALL_MAX_DEFAULT	(1 << SMALL_MAX_2POW_DEFAULT)
    300  1.10.2.2  matt 
    301  1.10.2.2  matt /*
    302  1.10.2.2  matt  * Maximum desired run header overhead.  Runs are sized as small as possible
    303  1.10.2.2  matt  * such that this setting is still honored, without violating other constraints.
    304  1.10.2.2  matt  * The goal is to make runs as small as possible without exceeding a per run
    305  1.10.2.2  matt  * external fragmentation threshold.
    306  1.10.2.2  matt  *
    307  1.10.2.2  matt  * Note that it is possible to set this low enough that it cannot be honored
    308  1.10.2.2  matt  * for some/all object sizes, since there is one bit of header overhead per
    309  1.10.2.2  matt  * object (plus a constant).  In such cases, this constraint is relaxed.
    310  1.10.2.2  matt  *
    311  1.10.2.2  matt  * RUN_MAX_OVRHD_RELAX specifies the maximum number of bits per region of
    312  1.10.2.2  matt  * overhead for which RUN_MAX_OVRHD is relaxed.
    313  1.10.2.2  matt  */
    314  1.10.2.2  matt #define RUN_MAX_OVRHD		0.015
    315  1.10.2.2  matt #define RUN_MAX_OVRHD_RELAX	1.5
    316  1.10.2.2  matt 
    317  1.10.2.2  matt /* Put a cap on small object run size.  This overrides RUN_MAX_OVRHD. */
    318  1.10.2.2  matt #define RUN_MAX_SMALL_2POW	15
    319  1.10.2.2  matt #define RUN_MAX_SMALL		(1 << RUN_MAX_SMALL_2POW)
    320  1.10.2.2  matt 
    321  1.10.2.2  matt /******************************************************************************/
    322  1.10.2.2  matt 
    323  1.10.2.2  matt #ifdef __FreeBSD__
    324  1.10.2.2  matt /*
    325  1.10.2.2  matt  * Mutexes based on spinlocks.  We can't use normal pthread mutexes, because
    326  1.10.2.2  matt  * they require malloc()ed memory.
    327  1.10.2.2  matt  */
    328  1.10.2.2  matt typedef struct {
    329  1.10.2.2  matt 	spinlock_t	lock;
    330  1.10.2.2  matt } malloc_mutex_t;
    331  1.10.2.2  matt 
    332  1.10.2.2  matt /* Set to true once the allocator has been initialized. */
    333  1.10.2.2  matt static bool malloc_initialized = false;
    334  1.10.2.2  matt 
    335  1.10.2.2  matt /* Used to avoid initialization races. */
    336  1.10.2.2  matt static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
    337  1.10.2.2  matt #else
    338  1.10.2.2  matt #define	malloc_mutex_t	mutex_t
    339  1.10.2.2  matt 
    340  1.10.2.2  matt /* Set to true once the allocator has been initialized. */
    341  1.10.2.2  matt static bool malloc_initialized = false;
    342  1.10.2.2  matt 
    343  1.10.2.2  matt /* Used to avoid initialization races. */
    344  1.10.2.2  matt static mutex_t init_lock = MUTEX_INITIALIZER;
    345  1.10.2.2  matt #endif
    346  1.10.2.2  matt 
    347  1.10.2.2  matt /******************************************************************************/
    348  1.10.2.2  matt /*
    349  1.10.2.2  matt  * Statistics data structures.
    350  1.10.2.2  matt  */
    351  1.10.2.2  matt 
    352  1.10.2.2  matt #ifdef MALLOC_STATS
    353  1.10.2.2  matt 
    354  1.10.2.2  matt typedef struct malloc_bin_stats_s malloc_bin_stats_t;
    355  1.10.2.2  matt struct malloc_bin_stats_s {
    356  1.10.2.2  matt 	/*
    357  1.10.2.2  matt 	 * Number of allocation requests that corresponded to the size of this
    358  1.10.2.2  matt 	 * bin.
    359  1.10.2.2  matt 	 */
    360  1.10.2.2  matt 	uint64_t	nrequests;
    361  1.10.2.2  matt 
    362  1.10.2.2  matt 	/* Total number of runs created for this bin's size class. */
    363  1.10.2.2  matt 	uint64_t	nruns;
    364  1.10.2.2  matt 
    365  1.10.2.2  matt 	/*
    366  1.10.2.2  matt 	 * Total number of runs reused by extracting them from the runs tree for
    367  1.10.2.2  matt 	 * this bin's size class.
    368  1.10.2.2  matt 	 */
    369  1.10.2.2  matt 	uint64_t	reruns;
    370  1.10.2.2  matt 
    371  1.10.2.2  matt 	/* High-water mark for this bin. */
    372  1.10.2.2  matt 	unsigned long	highruns;
    373  1.10.2.2  matt 
    374  1.10.2.2  matt 	/* Current number of runs in this bin. */
    375  1.10.2.2  matt 	unsigned long	curruns;
    376  1.10.2.2  matt };
    377  1.10.2.2  matt 
    378  1.10.2.2  matt typedef struct arena_stats_s arena_stats_t;
    379  1.10.2.2  matt struct arena_stats_s {
    380  1.10.2.2  matt 	/* Number of bytes currently mapped. */
    381  1.10.2.2  matt 	size_t		mapped;
    382  1.10.2.2  matt 
    383  1.10.2.2  matt 	/* Per-size-category statistics. */
    384  1.10.2.2  matt 	size_t		allocated_small;
    385  1.10.2.2  matt 	uint64_t	nmalloc_small;
    386  1.10.2.2  matt 	uint64_t	ndalloc_small;
    387  1.10.2.2  matt 
    388  1.10.2.2  matt 	size_t		allocated_large;
    389  1.10.2.2  matt 	uint64_t	nmalloc_large;
    390  1.10.2.2  matt 	uint64_t	ndalloc_large;
    391  1.10.2.2  matt };
    392  1.10.2.2  matt 
    393  1.10.2.2  matt typedef struct chunk_stats_s chunk_stats_t;
    394  1.10.2.2  matt struct chunk_stats_s {
    395  1.10.2.2  matt 	/* Number of chunks that were allocated. */
    396  1.10.2.2  matt 	uint64_t	nchunks;
    397  1.10.2.2  matt 
    398  1.10.2.2  matt 	/* High-water mark for number of chunks allocated. */
    399  1.10.2.2  matt 	unsigned long	highchunks;
    400  1.10.2.2  matt 
    401  1.10.2.2  matt 	/*
    402  1.10.2.2  matt 	 * Current number of chunks allocated.  This value isn't maintained for
    403  1.10.2.2  matt 	 * any other purpose, so keep track of it in order to be able to set
    404  1.10.2.2  matt 	 * highchunks.
    405  1.10.2.2  matt 	 */
    406  1.10.2.2  matt 	unsigned long	curchunks;
    407  1.10.2.2  matt };
    408  1.10.2.2  matt 
    409  1.10.2.2  matt #endif /* #ifdef MALLOC_STATS */
    410  1.10.2.2  matt 
    411  1.10.2.2  matt /******************************************************************************/
    412  1.10.2.2  matt /*
    413  1.10.2.2  matt  * Chunk data structures.
    414  1.10.2.2  matt  */
    415  1.10.2.2  matt 
    416  1.10.2.2  matt /* Tree of chunks. */
    417  1.10.2.2  matt typedef struct chunk_node_s chunk_node_t;
    418  1.10.2.2  matt struct chunk_node_s {
    419  1.10.2.2  matt 	/* Linkage for the chunk tree. */
    420  1.10.2.2  matt 	RB_ENTRY(chunk_node_s) link;
    421  1.10.2.2  matt 
    422  1.10.2.2  matt 	/*
    423  1.10.2.2  matt 	 * Pointer to the chunk that this tree node is responsible for.  In some
    424  1.10.2.2  matt 	 * (but certainly not all) cases, this data structure is placed at the
    425  1.10.2.2  matt 	 * beginning of the corresponding chunk, so this field may point to this
    426  1.10.2.2  matt 	 * node.
    427  1.10.2.2  matt 	 */
    428  1.10.2.2  matt 	void	*chunk;
    429  1.10.2.2  matt 
    430  1.10.2.2  matt 	/* Total chunk size. */
    431  1.10.2.2  matt 	size_t	size;
    432  1.10.2.2  matt };
    433  1.10.2.2  matt typedef struct chunk_tree_s chunk_tree_t;
    434  1.10.2.2  matt RB_HEAD(chunk_tree_s, chunk_node_s);
    435  1.10.2.2  matt 
    436  1.10.2.2  matt /******************************************************************************/
    437  1.10.2.2  matt /*
    438  1.10.2.2  matt  * Arena data structures.
    439  1.10.2.2  matt  */
    440  1.10.2.2  matt 
    441  1.10.2.2  matt typedef struct arena_s arena_t;
    442  1.10.2.2  matt typedef struct arena_bin_s arena_bin_t;
    443  1.10.2.2  matt 
    444  1.10.2.2  matt typedef struct arena_chunk_map_s arena_chunk_map_t;
    445  1.10.2.2  matt struct arena_chunk_map_s {
    446  1.10.2.2  matt 	/* Number of pages in run. */
    447  1.10.2.2  matt 	uint32_t	npages;
    448  1.10.2.2  matt 	/*
    449  1.10.2.2  matt 	 * Position within run.  For a free run, this is POS_FREE for the first
    450  1.10.2.2  matt 	 * and last pages.  The POS_FREE special value makes it possible to
    451  1.10.2.2  matt 	 * quickly coalesce free runs.
    452  1.10.2.2  matt 	 *
    453  1.10.2.2  matt 	 * This is the limiting factor for chunksize; there can be at most 2^31
    454  1.10.2.2  matt 	 * pages in a run.
    455  1.10.2.2  matt 	 */
    456  1.10.2.2  matt #define POS_FREE ((uint32_t)0xffffffffU)
    457  1.10.2.2  matt 	uint32_t	pos;
    458  1.10.2.2  matt };
    459  1.10.2.2  matt 
    460  1.10.2.2  matt /* Arena chunk header. */
    461  1.10.2.2  matt typedef struct arena_chunk_s arena_chunk_t;
    462  1.10.2.2  matt struct arena_chunk_s {
    463  1.10.2.2  matt 	/* Arena that owns the chunk. */
    464  1.10.2.2  matt 	arena_t *arena;
    465  1.10.2.2  matt 
    466  1.10.2.2  matt 	/* Linkage for the arena's chunk tree. */
    467  1.10.2.2  matt 	RB_ENTRY(arena_chunk_s) link;
    468  1.10.2.2  matt 
    469  1.10.2.2  matt 	/*
    470  1.10.2.2  matt 	 * Number of pages in use.  This is maintained in order to make
    471  1.10.2.2  matt 	 * detection of empty chunks fast.
    472  1.10.2.2  matt 	 */
    473  1.10.2.2  matt 	uint32_t pages_used;
    474  1.10.2.2  matt 
    475  1.10.2.2  matt 	/*
    476  1.10.2.2  matt 	 * Every time a free run larger than this value is created/coalesced,
    477  1.10.2.2  matt 	 * this value is increased.  The only way that the value decreases is if
    478  1.10.2.2  matt 	 * arena_run_alloc() fails to find a free run as large as advertised by
    479  1.10.2.2  matt 	 * this value.
    480  1.10.2.2  matt 	 */
    481  1.10.2.2  matt 	uint32_t max_frun_npages;
    482  1.10.2.2  matt 
    483  1.10.2.2  matt 	/*
    484  1.10.2.2  matt 	 * Every time a free run that starts at an earlier page than this value
    485  1.10.2.2  matt 	 * is created/coalesced, this value is decreased.  It is reset in a
    486  1.10.2.2  matt 	 * similar fashion to max_frun_npages.
    487  1.10.2.2  matt 	 */
    488  1.10.2.2  matt 	uint32_t min_frun_ind;
    489  1.10.2.2  matt 
    490  1.10.2.2  matt 	/*
    491  1.10.2.2  matt 	 * Map of pages within chunk that keeps track of free/large/small.  For
    492  1.10.2.2  matt 	 * free runs, only the map entries for the first and last pages are
    493  1.10.2.2  matt 	 * kept up to date, so that free runs can be quickly coalesced.
    494  1.10.2.2  matt 	 */
    495  1.10.2.2  matt 	arena_chunk_map_t map[1]; /* Dynamically sized. */
    496  1.10.2.2  matt };
    497  1.10.2.2  matt typedef struct arena_chunk_tree_s arena_chunk_tree_t;
    498  1.10.2.2  matt RB_HEAD(arena_chunk_tree_s, arena_chunk_s);
    499  1.10.2.2  matt 
    500  1.10.2.2  matt typedef struct arena_run_s arena_run_t;
    501  1.10.2.2  matt struct arena_run_s {
    502  1.10.2.2  matt 	/* Linkage for run trees. */
    503  1.10.2.2  matt 	RB_ENTRY(arena_run_s) link;
    504  1.10.2.2  matt 
    505  1.10.2.2  matt #ifdef MALLOC_DEBUG
    506  1.10.2.2  matt 	uint32_t	magic;
    507  1.10.2.2  matt #  define ARENA_RUN_MAGIC 0x384adf93
    508  1.10.2.2  matt #endif
    509  1.10.2.2  matt 
    510  1.10.2.2  matt 	/* Bin this run is associated with. */
    511  1.10.2.2  matt 	arena_bin_t	*bin;
    512  1.10.2.2  matt 
    513  1.10.2.2  matt 	/* Index of first element that might have a free region. */
    514  1.10.2.2  matt 	unsigned	regs_minelm;
    515  1.10.2.2  matt 
    516  1.10.2.2  matt 	/* Number of free regions in run. */
    517  1.10.2.2  matt 	unsigned	nfree;
    518  1.10.2.2  matt 
    519  1.10.2.2  matt 	/* Bitmask of in-use regions (0: in use, 1: free). */
    520  1.10.2.2  matt 	unsigned	regs_mask[1]; /* Dynamically sized. */
    521  1.10.2.2  matt };
    522  1.10.2.2  matt typedef struct arena_run_tree_s arena_run_tree_t;
    523  1.10.2.2  matt RB_HEAD(arena_run_tree_s, arena_run_s);
    524  1.10.2.2  matt 
    525  1.10.2.2  matt struct arena_bin_s {
    526  1.10.2.2  matt 	/*
    527  1.10.2.2  matt 	 * Current run being used to service allocations of this bin's size
    528  1.10.2.2  matt 	 * class.
    529  1.10.2.2  matt 	 */
    530  1.10.2.2  matt 	arena_run_t	*runcur;
    531  1.10.2.2  matt 
    532  1.10.2.2  matt 	/*
    533  1.10.2.2  matt 	 * Tree of non-full runs.  This tree is used when looking for an
    534  1.10.2.2  matt 	 * existing run when runcur is no longer usable.  We choose the
    535  1.10.2.2  matt 	 * non-full run that is lowest in memory; this policy tends to keep
    536  1.10.2.2  matt 	 * objects packed well, and it can also help reduce the number of
    537  1.10.2.2  matt 	 * almost-empty chunks.
    538  1.10.2.2  matt 	 */
    539  1.10.2.2  matt 	arena_run_tree_t runs;
    540  1.10.2.2  matt 
    541  1.10.2.2  matt 	/* Size of regions in a run for this bin's size class. */
    542  1.10.2.2  matt 	size_t		reg_size;
    543  1.10.2.2  matt 
    544  1.10.2.2  matt 	/* Total size of a run for this bin's size class. */
    545  1.10.2.2  matt 	size_t		run_size;
    546  1.10.2.2  matt 
    547  1.10.2.2  matt 	/* Total number of regions in a run for this bin's size class. */
    548  1.10.2.2  matt 	uint32_t	nregs;
    549  1.10.2.2  matt 
    550  1.10.2.2  matt 	/* Number of elements in a run's regs_mask for this bin's size class. */
    551  1.10.2.2  matt 	uint32_t	regs_mask_nelms;
    552  1.10.2.2  matt 
    553  1.10.2.2  matt 	/* Offset of first region in a run for this bin's size class. */
    554  1.10.2.2  matt 	uint32_t	reg0_offset;
    555  1.10.2.2  matt 
    556  1.10.2.2  matt #ifdef MALLOC_STATS
    557  1.10.2.2  matt 	/* Bin statistics. */
    558  1.10.2.2  matt 	malloc_bin_stats_t stats;
    559  1.10.2.2  matt #endif
    560  1.10.2.2  matt };
    561  1.10.2.2  matt 
    562  1.10.2.2  matt struct arena_s {
    563  1.10.2.2  matt #ifdef MALLOC_DEBUG
    564  1.10.2.2  matt 	uint32_t		magic;
    565  1.10.2.2  matt #  define ARENA_MAGIC 0x947d3d24
    566  1.10.2.2  matt #endif
    567  1.10.2.2  matt 
    568  1.10.2.2  matt 	/* All operations on this arena require that mtx be locked. */
    569  1.10.2.2  matt 	malloc_mutex_t		mtx;
    570  1.10.2.2  matt 
    571  1.10.2.2  matt #ifdef MALLOC_STATS
    572  1.10.2.2  matt 	arena_stats_t		stats;
    573  1.10.2.2  matt #endif
    574  1.10.2.2  matt 
    575  1.10.2.2  matt 	/*
    576  1.10.2.2  matt 	 * Tree of chunks this arena manages.
    577  1.10.2.2  matt 	 */
    578  1.10.2.2  matt 	arena_chunk_tree_t	chunks;
    579  1.10.2.2  matt 
    580  1.10.2.2  matt 	/*
    581  1.10.2.2  matt 	 * In order to avoid rapid chunk allocation/deallocation when an arena
    582  1.10.2.2  matt 	 * oscillates right on the cusp of needing a new chunk, cache the most
    583  1.10.2.2  matt 	 * recently freed chunk.  This caching is disabled by opt_hint.
    584  1.10.2.2  matt 	 *
    585  1.10.2.2  matt 	 * There is one spare chunk per arena, rather than one spare total, in
    586  1.10.2.2  matt 	 * order to avoid interactions between multiple threads that could make
    587  1.10.2.2  matt 	 * a single spare inadequate.
    588  1.10.2.2  matt 	 */
    589  1.10.2.2  matt 	arena_chunk_t *spare;
    590  1.10.2.2  matt 
    591  1.10.2.2  matt 	/*
    592  1.10.2.2  matt 	 * bins is used to store rings of free regions of the following sizes,
    593  1.10.2.2  matt 	 * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
    594  1.10.2.2  matt 	 *
    595  1.10.2.2  matt 	 *   bins[i] | size |
    596  1.10.2.2  matt 	 *   --------+------+
    597  1.10.2.2  matt 	 *        0  |    2 |
    598  1.10.2.2  matt 	 *        1  |    4 |
    599  1.10.2.2  matt 	 *        2  |    8 |
    600  1.10.2.2  matt 	 *   --------+------+
    601  1.10.2.2  matt 	 *        3  |   16 |
    602  1.10.2.2  matt 	 *        4  |   32 |
    603  1.10.2.2  matt 	 *        5  |   48 |
    604  1.10.2.2  matt 	 *        6  |   64 |
    605  1.10.2.2  matt 	 *           :      :
    606  1.10.2.2  matt 	 *           :      :
    607  1.10.2.2  matt 	 *       33  |  496 |
    608  1.10.2.2  matt 	 *       34  |  512 |
    609  1.10.2.2  matt 	 *   --------+------+
    610  1.10.2.2  matt 	 *       35  | 1024 |
    611  1.10.2.2  matt 	 *       36  | 2048 |
    612  1.10.2.2  matt 	 *   --------+------+
    613  1.10.2.2  matt 	 */
    614  1.10.2.2  matt 	arena_bin_t		bins[1]; /* Dynamically sized. */
    615  1.10.2.2  matt };
    616  1.10.2.2  matt 
    617  1.10.2.2  matt /******************************************************************************/
    618  1.10.2.2  matt /*
    619  1.10.2.2  matt  * Data.
    620  1.10.2.2  matt  */
    621  1.10.2.2  matt 
    622  1.10.2.2  matt /* Number of CPUs. */
    623  1.10.2.2  matt static unsigned		ncpus;
    624  1.10.2.2  matt 
    625  1.10.2.2  matt /* VM page size. */
    626  1.10.2.2  matt static size_t		pagesize;
    627  1.10.2.2  matt static size_t		pagesize_mask;
    628  1.10.2.2  matt static int		pagesize_2pow;
    629  1.10.2.2  matt 
    630  1.10.2.2  matt /* Various bin-related settings. */
    631  1.10.2.2  matt static size_t		bin_maxclass; /* Max size class for bins. */
    632  1.10.2.2  matt static unsigned		ntbins; /* Number of (2^n)-spaced tiny bins. */
    633  1.10.2.2  matt static unsigned		nqbins; /* Number of quantum-spaced bins. */
    634  1.10.2.2  matt static unsigned		nsbins; /* Number of (2^n)-spaced sub-page bins. */
    635  1.10.2.2  matt static size_t		small_min;
    636  1.10.2.2  matt static size_t		small_max;
    637  1.10.2.2  matt 
    638  1.10.2.2  matt /* Various quantum-related settings. */
    639  1.10.2.2  matt static size_t		quantum;
    640  1.10.2.2  matt static size_t		quantum_mask; /* (quantum - 1). */
    641  1.10.2.2  matt 
    642  1.10.2.2  matt /* Various chunk-related settings. */
    643  1.10.2.2  matt static size_t		chunksize;
    644  1.10.2.2  matt static size_t		chunksize_mask; /* (chunksize - 1). */
    645  1.10.2.2  matt static int		chunksize_2pow;
    646  1.10.2.2  matt static unsigned		chunk_npages;
    647  1.10.2.2  matt static unsigned		arena_chunk_header_npages;
    648  1.10.2.2  matt static size_t		arena_maxclass; /* Max size class for arenas. */
    649  1.10.2.2  matt 
    650  1.10.2.2  matt /********/
    651  1.10.2.2  matt /*
    652  1.10.2.2  matt  * Chunks.
    653  1.10.2.2  matt  */
    654  1.10.2.2  matt 
    655  1.10.2.2  matt /* Protects chunk-related data structures. */
    656  1.10.2.2  matt static malloc_mutex_t	chunks_mtx;
    657  1.10.2.2  matt 
    658  1.10.2.2  matt /* Tree of chunks that are stand-alone huge allocations. */
    659  1.10.2.2  matt static chunk_tree_t	huge;
    660  1.10.2.2  matt 
    661  1.10.2.2  matt #ifdef USE_BRK
    662  1.10.2.2  matt /*
    663  1.10.2.2  matt  * Try to use brk for chunk-size allocations, due to address space constraints.
    664  1.10.2.2  matt  */
    665  1.10.2.2  matt /*
    666  1.10.2.2  matt  * Protects sbrk() calls.  This must be separate from chunks_mtx, since
    667  1.10.2.2  matt  * base_pages_alloc() also uses sbrk(), but cannot lock chunks_mtx (doing so
    668  1.10.2.2  matt  * could cause recursive lock acquisition).
    669  1.10.2.2  matt  */
    670  1.10.2.2  matt static malloc_mutex_t	brk_mtx;
    671  1.10.2.2  matt /* Result of first sbrk(0) call. */
    672  1.10.2.2  matt static void		*brk_base;
    673  1.10.2.2  matt /* Current end of brk, or ((void *)-1) if brk is exhausted. */
    674  1.10.2.2  matt static void		*brk_prev;
    675  1.10.2.2  matt /* Current upper limit on brk addresses. */
    676  1.10.2.2  matt static void		*brk_max;
    677  1.10.2.2  matt #endif
    678  1.10.2.2  matt 
    679  1.10.2.2  matt #ifdef MALLOC_STATS
    680  1.10.2.2  matt /* Huge allocation statistics. */
    681  1.10.2.2  matt static uint64_t		huge_nmalloc;
    682  1.10.2.2  matt static uint64_t		huge_ndalloc;
    683  1.10.2.2  matt static uint64_t		huge_nralloc;
    684  1.10.2.2  matt static size_t		huge_allocated;
    685  1.10.2.2  matt #endif
    686  1.10.2.2  matt 
    687  1.10.2.2  matt /*
    688  1.10.2.2  matt  * Tree of chunks that were previously allocated.  This is used when allocating
    689  1.10.2.2  matt  * chunks, in an attempt to re-use address space.
    690  1.10.2.2  matt  */
    691  1.10.2.2  matt static chunk_tree_t	old_chunks;
    692  1.10.2.2  matt 
    693  1.10.2.2  matt /****************************/
    694  1.10.2.2  matt /*
    695  1.10.2.2  matt  * base (internal allocation).
    696  1.10.2.2  matt  */
    697  1.10.2.2  matt 
    698  1.10.2.2  matt /*
    699  1.10.2.2  matt  * Current pages that are being used for internal memory allocations.  These
    700  1.10.2.2  matt  * pages are carved up in cacheline-size quanta, so that there is no chance of
    701  1.10.2.2  matt  * false cache line sharing.
    702  1.10.2.2  matt  */
    703  1.10.2.2  matt static void		*base_pages;
    704  1.10.2.2  matt static void		*base_next_addr;
    705  1.10.2.2  matt static void		*base_past_addr; /* Addr immediately past base_pages. */
    706  1.10.2.2  matt static chunk_node_t	*base_chunk_nodes; /* LIFO cache of chunk nodes. */
    707  1.10.2.2  matt static malloc_mutex_t	base_mtx;
    708  1.10.2.2  matt #ifdef MALLOC_STATS
    709  1.10.2.2  matt static size_t		base_mapped;
    710  1.10.2.2  matt #endif
    711  1.10.2.2  matt 
    712  1.10.2.2  matt /********/
    713  1.10.2.2  matt /*
    714  1.10.2.2  matt  * Arenas.
    715  1.10.2.2  matt  */
    716  1.10.2.2  matt 
    717  1.10.2.2  matt /*
    718  1.10.2.2  matt  * Arenas that are used to service external requests.  Not all elements of the
    719  1.10.2.2  matt  * arenas array are necessarily used; arenas are created lazily as needed.
    720  1.10.2.2  matt  */
    721  1.10.2.2  matt static arena_t		**arenas;
    722  1.10.2.2  matt static unsigned		narenas;
    723  1.10.2.2  matt static unsigned		next_arena;
    724  1.10.2.2  matt static malloc_mutex_t	arenas_mtx; /* Protects arenas initialization. */
    725  1.10.2.2  matt 
    726  1.10.2.2  matt #ifndef NO_TLS
    727  1.10.2.2  matt /*
    728  1.10.2.2  matt  * Map of pthread_self() --> arenas[???], used for selecting an arena to use
    729  1.10.2.2  matt  * for allocations.
    730  1.10.2.2  matt  */
    731  1.10.2.2  matt static __thread arena_t	*arenas_map;
    732  1.10.2.2  matt #define	get_arenas_map()	(arenas_map)
    733  1.10.2.2  matt #define	set_arenas_map(x)	(arenas_map = x)
    734  1.10.2.2  matt #else
    735  1.10.2.2  matt static thread_key_t arenas_map_key;
    736  1.10.2.2  matt #define	get_arenas_map()	thr_getspecific(arenas_map_key)
    737  1.10.2.2  matt #define	set_arenas_map(x)	thr_setspecific(arenas_map_key, x)
    738  1.10.2.2  matt #endif
    739  1.10.2.2  matt 
    740  1.10.2.2  matt #ifdef MALLOC_STATS
    741  1.10.2.2  matt /* Chunk statistics. */
    742  1.10.2.2  matt static chunk_stats_t	stats_chunks;
    743  1.10.2.2  matt #endif
    744  1.10.2.2  matt 
    745  1.10.2.2  matt /*******************************/
    746  1.10.2.2  matt /*
    747  1.10.2.2  matt  * Runtime configuration options.
    748  1.10.2.2  matt  */
    749  1.10.2.2  matt const char	*_malloc_options;
    750  1.10.2.2  matt 
    751  1.10.2.2  matt #ifndef MALLOC_PRODUCTION
    752  1.10.2.2  matt static bool	opt_abort = true;
    753  1.10.2.2  matt static bool	opt_junk = true;
    754  1.10.2.2  matt #else
    755  1.10.2.2  matt static bool	opt_abort = false;
    756  1.10.2.2  matt static bool	opt_junk = false;
    757  1.10.2.2  matt #endif
    758  1.10.2.2  matt static bool	opt_hint = false;
    759  1.10.2.2  matt static bool	opt_print_stats = false;
    760  1.10.2.2  matt static int	opt_quantum_2pow = QUANTUM_2POW_MIN;
    761  1.10.2.2  matt static int	opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT;
    762  1.10.2.2  matt static int	opt_chunk_2pow = CHUNK_2POW_DEFAULT;
    763  1.10.2.2  matt static bool	opt_utrace = false;
    764  1.10.2.2  matt static bool	opt_sysv = false;
    765  1.10.2.2  matt static bool	opt_xmalloc = false;
    766  1.10.2.2  matt static bool	opt_zero = false;
    767  1.10.2.2  matt static int32_t	opt_narenas_lshift = 0;
    768  1.10.2.2  matt 
    769  1.10.2.2  matt typedef struct {
    770  1.10.2.2  matt 	void	*p;
    771  1.10.2.2  matt 	size_t	s;
    772  1.10.2.2  matt 	void	*r;
    773  1.10.2.2  matt } malloc_utrace_t;
    774  1.10.2.2  matt 
    775  1.10.2.2  matt #define	UTRACE(a, b, c)							\
    776  1.10.2.2  matt 	if (opt_utrace) {						\
    777  1.10.2.2  matt 		malloc_utrace_t ut;					\
    778  1.10.2.2  matt 		ut.p = a;						\
    779  1.10.2.2  matt 		ut.s = b;						\
    780  1.10.2.2  matt 		ut.r = c;						\
    781  1.10.2.2  matt 		xutrace(&ut, sizeof(ut));				\
    782  1.10.2.2  matt 	}
    783  1.10.2.2  matt 
    784  1.10.2.2  matt /******************************************************************************/
    785  1.10.2.2  matt /*
    786  1.10.2.2  matt  * Begin function prototypes for non-inline static functions.
    787  1.10.2.2  matt  */
    788  1.10.2.2  matt 
    789  1.10.2.2  matt static void	wrtmessage(const char *p1, const char *p2, const char *p3,
    790  1.10.2.2  matt 		const char *p4);
    791  1.10.2.2  matt #ifdef MALLOC_STATS
    792  1.10.2.2  matt static void	malloc_printf(const char *format, ...);
    793  1.10.2.2  matt #endif
    794  1.10.2.2  matt static char	*umax2s(uintmax_t x, char *s);
    795  1.10.2.2  matt static bool	base_pages_alloc(size_t minsize);
    796  1.10.2.2  matt static void	*base_alloc(size_t size);
    797  1.10.2.2  matt static chunk_node_t *base_chunk_node_alloc(void);
    798  1.10.2.2  matt static void	base_chunk_node_dealloc(chunk_node_t *node);
    799  1.10.2.2  matt #ifdef MALLOC_STATS
    800  1.10.2.2  matt static void	stats_print(arena_t *arena);
    801  1.10.2.2  matt #endif
    802  1.10.2.2  matt static void	*pages_map(void *addr, size_t size);
    803  1.10.2.2  matt static void	*pages_map_align(void *addr, size_t size, int align);
    804  1.10.2.2  matt static void	pages_unmap(void *addr, size_t size);
    805  1.10.2.2  matt static void	*chunk_alloc(size_t size);
    806  1.10.2.2  matt static void	chunk_dealloc(void *chunk, size_t size);
    807  1.10.2.2  matt static arena_t	*choose_arena_hard(void);
    808  1.10.2.2  matt static void	arena_run_split(arena_t *arena, arena_run_t *run, size_t size);
    809  1.10.2.2  matt static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
    810  1.10.2.2  matt static void	arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
    811  1.10.2.2  matt static arena_run_t *arena_run_alloc(arena_t *arena, size_t size);
    812  1.10.2.2  matt static void	arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size);
    813  1.10.2.2  matt static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
    814  1.10.2.2  matt static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
    815  1.10.2.2  matt static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
    816  1.10.2.2  matt static void	*arena_malloc(arena_t *arena, size_t size);
    817  1.10.2.2  matt static void	*arena_palloc(arena_t *arena, size_t alignment, size_t size,
    818  1.10.2.2  matt     size_t alloc_size);
    819  1.10.2.2  matt static size_t	arena_salloc(const void *ptr);
    820  1.10.2.2  matt static void	*arena_ralloc(void *ptr, size_t size, size_t oldsize);
    821  1.10.2.2  matt static void	arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
    822  1.10.2.2  matt static bool	arena_new(arena_t *arena);
    823  1.10.2.2  matt static arena_t	*arenas_extend(unsigned ind);
    824  1.10.2.2  matt static void	*huge_malloc(size_t size);
    825  1.10.2.2  matt static void	*huge_palloc(size_t alignment, size_t size);
    826  1.10.2.2  matt static void	*huge_ralloc(void *ptr, size_t size, size_t oldsize);
    827  1.10.2.2  matt static void	huge_dalloc(void *ptr);
    828  1.10.2.2  matt static void	*imalloc(size_t size);
    829  1.10.2.2  matt static void	*ipalloc(size_t alignment, size_t size);
    830  1.10.2.2  matt static void	*icalloc(size_t size);
    831  1.10.2.2  matt static size_t	isalloc(const void *ptr);
    832  1.10.2.2  matt static void	*iralloc(void *ptr, size_t size);
    833  1.10.2.2  matt static void	idalloc(void *ptr);
    834  1.10.2.2  matt static void	malloc_print_stats(void);
    835  1.10.2.2  matt static bool	malloc_init_hard(void);
    836  1.10.2.2  matt 
    837  1.10.2.2  matt /*
    838  1.10.2.2  matt  * End function prototypes.
    839  1.10.2.2  matt  */
    840  1.10.2.2  matt /******************************************************************************/
    841  1.10.2.2  matt /*
    842  1.10.2.2  matt  * Begin mutex.
    843  1.10.2.2  matt  */
    844  1.10.2.2  matt 
    845  1.10.2.2  matt #ifdef __NetBSD__
    846  1.10.2.2  matt #define	malloc_mutex_init(m)	mutex_init(m, NULL)
    847  1.10.2.2  matt #define	malloc_mutex_lock(m)	mutex_lock(m)
    848  1.10.2.2  matt #define	malloc_mutex_unlock(m)	mutex_unlock(m)
    849  1.10.2.2  matt #else	/* __NetBSD__ */
    850  1.10.2.2  matt static inline void
    851  1.10.2.2  matt malloc_mutex_init(malloc_mutex_t *a_mutex)
    852  1.10.2.2  matt {
    853  1.10.2.2  matt 	static const spinlock_t lock = _SPINLOCK_INITIALIZER;
    854  1.10.2.2  matt 
    855  1.10.2.2  matt 	a_mutex->lock = lock;
    856  1.10.2.2  matt }
    857  1.10.2.2  matt 
    858  1.10.2.2  matt static inline void
    859  1.10.2.2  matt malloc_mutex_lock(malloc_mutex_t *a_mutex)
    860  1.10.2.2  matt {
    861  1.10.2.2  matt 
    862  1.10.2.2  matt 	if (__isthreaded)
    863  1.10.2.2  matt 		_SPINLOCK(&a_mutex->lock);
    864  1.10.2.2  matt }
    865  1.10.2.2  matt 
    866  1.10.2.2  matt static inline void
    867  1.10.2.2  matt malloc_mutex_unlock(malloc_mutex_t *a_mutex)
    868  1.10.2.2  matt {
    869  1.10.2.2  matt 
    870  1.10.2.2  matt 	if (__isthreaded)
    871  1.10.2.2  matt 		_SPINUNLOCK(&a_mutex->lock);
    872  1.10.2.2  matt }
    873  1.10.2.2  matt #endif	/* __NetBSD__ */
    874  1.10.2.2  matt 
    875  1.10.2.2  matt /*
    876  1.10.2.2  matt  * End mutex.
    877  1.10.2.2  matt  */
    878  1.10.2.2  matt /******************************************************************************/
    879  1.10.2.2  matt /*
    880  1.10.2.2  matt  * Begin Utility functions/macros.
    881  1.10.2.2  matt  */
    882  1.10.2.2  matt 
    883  1.10.2.2  matt /* Return the chunk address for allocation address a. */
    884  1.10.2.2  matt #define	CHUNK_ADDR2BASE(a)						\
    885  1.10.2.2  matt 	((void *)((uintptr_t)(a) & ~chunksize_mask))
    886  1.10.2.2  matt 
    887  1.10.2.2  matt /* Return the chunk offset of address a. */
    888  1.10.2.2  matt #define	CHUNK_ADDR2OFFSET(a)						\
    889  1.10.2.2  matt 	((size_t)((uintptr_t)(a) & chunksize_mask))
    890  1.10.2.2  matt 
    891  1.10.2.2  matt /* Return the smallest chunk multiple that is >= s. */
    892  1.10.2.2  matt #define	CHUNK_CEILING(s)						\
    893  1.10.2.2  matt 	(((s) + chunksize_mask) & ~chunksize_mask)
    894  1.10.2.2  matt 
    895  1.10.2.2  matt /* Return the smallest cacheline multiple that is >= s. */
    896  1.10.2.2  matt #define	CACHELINE_CEILING(s)						\
    897  1.10.2.2  matt 	(((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
    898  1.10.2.2  matt 
    899  1.10.2.2  matt /* Return the smallest quantum multiple that is >= a. */
    900  1.10.2.2  matt #define	QUANTUM_CEILING(a)						\
    901  1.10.2.2  matt 	(((a) + quantum_mask) & ~quantum_mask)
    902  1.10.2.2  matt 
    903  1.10.2.2  matt /* Return the smallest pagesize multiple that is >= s. */
    904  1.10.2.2  matt #define	PAGE_CEILING(s)							\
    905  1.10.2.2  matt 	(((s) + pagesize_mask) & ~pagesize_mask)
    906  1.10.2.2  matt 
    907  1.10.2.2  matt /* Compute the smallest power of 2 that is >= x. */
    908  1.10.2.2  matt static inline size_t
    909  1.10.2.2  matt pow2_ceil(size_t x)
    910  1.10.2.2  matt {
    911  1.10.2.2  matt 
    912  1.10.2.2  matt 	x--;
    913  1.10.2.2  matt 	x |= x >> 1;
    914  1.10.2.2  matt 	x |= x >> 2;
    915  1.10.2.2  matt 	x |= x >> 4;
    916  1.10.2.2  matt 	x |= x >> 8;
    917  1.10.2.2  matt 	x |= x >> 16;
    918  1.10.2.2  matt #if (SIZEOF_PTR == 8)
    919  1.10.2.2  matt 	x |= x >> 32;
    920  1.10.2.2  matt #endif
    921  1.10.2.2  matt 	x++;
    922  1.10.2.2  matt 	return (x);
    923  1.10.2.2  matt }
    924  1.10.2.2  matt 
    925  1.10.2.2  matt static void
    926  1.10.2.2  matt wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
    927  1.10.2.2  matt {
    928  1.10.2.2  matt 
    929  1.10.2.2  matt 	_write(STDERR_FILENO, p1, strlen(p1));
    930  1.10.2.2  matt 	_write(STDERR_FILENO, p2, strlen(p2));
    931  1.10.2.2  matt 	_write(STDERR_FILENO, p3, strlen(p3));
    932  1.10.2.2  matt 	_write(STDERR_FILENO, p4, strlen(p4));
    933  1.10.2.2  matt }
    934  1.10.2.2  matt 
    935  1.10.2.2  matt void	(*_malloc_message)(const char *p1, const char *p2, const char *p3,
    936  1.10.2.2  matt 	    const char *p4) = wrtmessage;
    937  1.10.2.2  matt 
    938  1.10.2.2  matt #ifdef MALLOC_STATS
    939  1.10.2.2  matt /*
    940  1.10.2.2  matt  * Print to stderr in such a way as to (hopefully) avoid memory allocation.
    941  1.10.2.2  matt  */
    942  1.10.2.2  matt static void
    943  1.10.2.2  matt malloc_printf(const char *format, ...)
    944  1.10.2.2  matt {
    945  1.10.2.2  matt 	char buf[4096];
    946  1.10.2.2  matt 	va_list ap;
    947  1.10.2.2  matt 
    948  1.10.2.2  matt 	va_start(ap, format);
    949  1.10.2.2  matt 	vsnprintf(buf, sizeof(buf), format, ap);
    950  1.10.2.2  matt 	va_end(ap);
    951  1.10.2.2  matt 	_malloc_message(buf, "", "", "");
    952  1.10.2.2  matt }
    953  1.10.2.2  matt #endif
    954  1.10.2.2  matt 
    955  1.10.2.2  matt /*
    956  1.10.2.2  matt  * We don't want to depend on vsnprintf() for production builds, since that can
    957  1.10.2.2  matt  * cause unnecessary bloat for static binaries.  umax2s() provides minimal
    958  1.10.2.2  matt  * integer printing functionality, so that malloc_printf() use can be limited to
    959  1.10.2.2  matt  * MALLOC_STATS code.
    960  1.10.2.2  matt  */
    961  1.10.2.2  matt #define UMAX2S_BUFSIZE	21
    962  1.10.2.2  matt static char *
    963  1.10.2.2  matt umax2s(uintmax_t x, char *s)
    964  1.10.2.2  matt {
    965  1.10.2.2  matt 	unsigned i;
    966  1.10.2.2  matt 
    967  1.10.2.2  matt 	/* Make sure UMAX2S_BUFSIZE is large enough. */
    968  1.10.2.2  matt 	/* LINTED */
    969  1.10.2.2  matt 	assert(sizeof(uintmax_t) <= 8);
    970  1.10.2.2  matt 
    971  1.10.2.2  matt 	i = UMAX2S_BUFSIZE - 1;
    972  1.10.2.2  matt 	s[i] = '\0';
    973  1.10.2.2  matt 	do {
    974  1.10.2.2  matt 		i--;
    975  1.10.2.2  matt 		s[i] = "0123456789"[(int)x % 10];
    976  1.10.2.2  matt 		x /= (uintmax_t)10LL;
    977  1.10.2.2  matt 	} while (x > 0);
    978  1.10.2.2  matt 
    979  1.10.2.2  matt 	return (&s[i]);
    980  1.10.2.2  matt }
    981  1.10.2.2  matt 
    982  1.10.2.2  matt /******************************************************************************/
    983  1.10.2.2  matt 
    984  1.10.2.2  matt static bool
    985  1.10.2.2  matt base_pages_alloc(size_t minsize)
    986  1.10.2.2  matt {
    987  1.10.2.2  matt 	size_t csize = 0;
    988  1.10.2.2  matt 
    989  1.10.2.2  matt #ifdef USE_BRK
    990  1.10.2.2  matt 	/*
    991  1.10.2.2  matt 	 * Do special brk allocation here, since base allocations don't need to
    992  1.10.2.2  matt 	 * be chunk-aligned.
    993  1.10.2.2  matt 	 */
    994  1.10.2.2  matt 	if (brk_prev != (void *)-1) {
    995  1.10.2.2  matt 		void *brk_cur;
    996  1.10.2.2  matt 		intptr_t incr;
    997  1.10.2.2  matt 
    998  1.10.2.2  matt 		if (minsize != 0)
    999  1.10.2.2  matt 			csize = CHUNK_CEILING(minsize);
   1000  1.10.2.2  matt 
   1001  1.10.2.2  matt 		malloc_mutex_lock(&brk_mtx);
   1002  1.10.2.2  matt 		do {
   1003  1.10.2.2  matt 			/* Get the current end of brk. */
   1004  1.10.2.2  matt 			brk_cur = sbrk(0);
   1005  1.10.2.2  matt 
   1006  1.10.2.2  matt 			/*
   1007  1.10.2.2  matt 			 * Calculate how much padding is necessary to
   1008  1.10.2.2  matt 			 * chunk-align the end of brk.  Don't worry about
   1009  1.10.2.2  matt 			 * brk_cur not being chunk-aligned though.
   1010  1.10.2.2  matt 			 */
   1011  1.10.2.2  matt 			incr = (intptr_t)chunksize
   1012  1.10.2.2  matt 			    - (intptr_t)CHUNK_ADDR2OFFSET(brk_cur);
   1013  1.10.2.2  matt 			if (incr < minsize)
   1014  1.10.2.2  matt 				incr += csize;
   1015  1.10.2.2  matt 
   1016  1.10.2.2  matt 			brk_prev = sbrk(incr);
   1017  1.10.2.2  matt 			if (brk_prev == brk_cur) {
   1018  1.10.2.2  matt 				/* Success. */
   1019  1.10.2.2  matt 				malloc_mutex_unlock(&brk_mtx);
   1020  1.10.2.2  matt 				base_pages = brk_cur;
   1021  1.10.2.2  matt 				base_next_addr = base_pages;
   1022  1.10.2.2  matt 				base_past_addr = (void *)((uintptr_t)base_pages
   1023  1.10.2.2  matt 				    + incr);
   1024  1.10.2.2  matt #ifdef MALLOC_STATS
   1025  1.10.2.2  matt 				base_mapped += incr;
   1026  1.10.2.2  matt #endif
   1027  1.10.2.2  matt 				return (false);
   1028  1.10.2.2  matt 			}
   1029  1.10.2.2  matt 		} while (brk_prev != (void *)-1);
   1030  1.10.2.2  matt 		malloc_mutex_unlock(&brk_mtx);
   1031  1.10.2.2  matt 	}
   1032  1.10.2.2  matt 	if (minsize == 0) {
   1033  1.10.2.2  matt 		/*
   1034  1.10.2.2  matt 		 * Failure during initialization doesn't matter, so avoid
   1035  1.10.2.2  matt 		 * falling through to the mmap-based page mapping code.
   1036  1.10.2.2  matt 		 */
   1037  1.10.2.2  matt 		return (true);
   1038  1.10.2.2  matt 	}
   1039  1.10.2.2  matt #endif
   1040  1.10.2.2  matt 	assert(minsize != 0);
   1041  1.10.2.2  matt 	csize = PAGE_CEILING(minsize);
   1042  1.10.2.2  matt 	base_pages = pages_map(NULL, csize);
   1043  1.10.2.2  matt 	if (base_pages == NULL)
   1044  1.10.2.2  matt 		return (true);
   1045  1.10.2.2  matt 	base_next_addr = base_pages;
   1046  1.10.2.2  matt 	base_past_addr = (void *)((uintptr_t)base_pages + csize);
   1047  1.10.2.2  matt #ifdef MALLOC_STATS
   1048  1.10.2.2  matt 	base_mapped += csize;
   1049  1.10.2.2  matt #endif
   1050  1.10.2.2  matt 	return (false);
   1051  1.10.2.2  matt }
   1052  1.10.2.2  matt 
   1053  1.10.2.2  matt static void *
   1054  1.10.2.2  matt base_alloc(size_t size)
   1055  1.10.2.2  matt {
   1056  1.10.2.2  matt 	void *ret;
   1057  1.10.2.2  matt 	size_t csize;
   1058  1.10.2.2  matt 
   1059  1.10.2.2  matt 	/* Round size up to nearest multiple of the cacheline size. */
   1060  1.10.2.2  matt 	csize = CACHELINE_CEILING(size);
   1061  1.10.2.2  matt 
   1062  1.10.2.2  matt 	malloc_mutex_lock(&base_mtx);
   1063  1.10.2.2  matt 
   1064  1.10.2.2  matt 	/* Make sure there's enough space for the allocation. */
   1065  1.10.2.2  matt 	if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
   1066  1.10.2.2  matt 		if (base_pages_alloc(csize)) {
   1067  1.10.2.2  matt 			ret = NULL;
   1068  1.10.2.2  matt 			goto RETURN;
   1069  1.10.2.2  matt 		}
   1070  1.10.2.2  matt 	}
   1071  1.10.2.2  matt 
   1072  1.10.2.2  matt 	/* Allocate. */
   1073  1.10.2.2  matt 	ret = base_next_addr;
   1074  1.10.2.2  matt 	base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
   1075  1.10.2.2  matt 
   1076  1.10.2.2  matt RETURN:
   1077  1.10.2.2  matt 	malloc_mutex_unlock(&base_mtx);
   1078  1.10.2.2  matt 	return (ret);
   1079  1.10.2.2  matt }
   1080  1.10.2.2  matt 
   1081  1.10.2.2  matt static chunk_node_t *
   1082  1.10.2.2  matt base_chunk_node_alloc(void)
   1083  1.10.2.2  matt {
   1084  1.10.2.2  matt 	chunk_node_t *ret;
   1085  1.10.2.2  matt 
   1086  1.10.2.2  matt 	malloc_mutex_lock(&base_mtx);
   1087  1.10.2.2  matt 	if (base_chunk_nodes != NULL) {
   1088  1.10.2.2  matt 		ret = base_chunk_nodes;
   1089  1.10.2.2  matt 		/* LINTED */
   1090  1.10.2.2  matt 		base_chunk_nodes = *(chunk_node_t **)ret;
   1091  1.10.2.2  matt 		malloc_mutex_unlock(&base_mtx);
   1092  1.10.2.2  matt 	} else {
   1093  1.10.2.2  matt 		malloc_mutex_unlock(&base_mtx);
   1094  1.10.2.2  matt 		ret = (chunk_node_t *)base_alloc(sizeof(chunk_node_t));
   1095  1.10.2.2  matt 	}
   1096  1.10.2.2  matt 
   1097  1.10.2.2  matt 	return (ret);
   1098  1.10.2.2  matt }
   1099  1.10.2.2  matt 
   1100  1.10.2.2  matt static void
   1101  1.10.2.2  matt base_chunk_node_dealloc(chunk_node_t *node)
   1102  1.10.2.2  matt {
   1103  1.10.2.2  matt 
   1104  1.10.2.2  matt 	malloc_mutex_lock(&base_mtx);
   1105  1.10.2.2  matt 	/* LINTED */
   1106  1.10.2.2  matt 	*(chunk_node_t **)node = base_chunk_nodes;
   1107  1.10.2.2  matt 	base_chunk_nodes = node;
   1108  1.10.2.2  matt 	malloc_mutex_unlock(&base_mtx);
   1109  1.10.2.2  matt }
   1110  1.10.2.2  matt 
   1111  1.10.2.2  matt /******************************************************************************/
   1112  1.10.2.2  matt 
   1113  1.10.2.2  matt #ifdef MALLOC_STATS
   1114  1.10.2.2  matt static void
   1115  1.10.2.2  matt stats_print(arena_t *arena)
   1116  1.10.2.2  matt {
   1117  1.10.2.2  matt 	unsigned i;
   1118  1.10.2.2  matt 	int gap_start;
   1119  1.10.2.2  matt 
   1120  1.10.2.2  matt 	malloc_printf(
   1121  1.10.2.2  matt 	    "          allocated/mapped            nmalloc      ndalloc\n");
   1122  1.10.2.2  matt 
   1123  1.10.2.2  matt 	malloc_printf("small: %12zu %-12s %12llu %12llu\n",
   1124  1.10.2.2  matt 	    arena->stats.allocated_small, "", arena->stats.nmalloc_small,
   1125  1.10.2.2  matt 	    arena->stats.ndalloc_small);
   1126  1.10.2.2  matt 	malloc_printf("large: %12zu %-12s %12llu %12llu\n",
   1127  1.10.2.2  matt 	    arena->stats.allocated_large, "", arena->stats.nmalloc_large,
   1128  1.10.2.2  matt 	    arena->stats.ndalloc_large);
   1129  1.10.2.2  matt 	malloc_printf("total: %12zu/%-12zu %12llu %12llu\n",
   1130  1.10.2.2  matt 	    arena->stats.allocated_small + arena->stats.allocated_large,
   1131  1.10.2.2  matt 	    arena->stats.mapped,
   1132  1.10.2.2  matt 	    arena->stats.nmalloc_small + arena->stats.nmalloc_large,
   1133  1.10.2.2  matt 	    arena->stats.ndalloc_small + arena->stats.ndalloc_large);
   1134  1.10.2.2  matt 
   1135  1.10.2.2  matt 	malloc_printf("bins:     bin   size regs pgs  requests   newruns"
   1136  1.10.2.2  matt 	    "    reruns maxruns curruns\n");
   1137  1.10.2.2  matt 	for (i = 0, gap_start = -1; i < ntbins + nqbins + nsbins; i++) {
   1138  1.10.2.2  matt 		if (arena->bins[i].stats.nrequests == 0) {
   1139  1.10.2.2  matt 			if (gap_start == -1)
   1140  1.10.2.2  matt 				gap_start = i;
   1141  1.10.2.2  matt 		} else {
   1142  1.10.2.2  matt 			if (gap_start != -1) {
   1143  1.10.2.2  matt 				if (i > gap_start + 1) {
   1144  1.10.2.2  matt 					/* Gap of more than one size class. */
   1145  1.10.2.2  matt 					malloc_printf("[%u..%u]\n",
   1146  1.10.2.2  matt 					    gap_start, i - 1);
   1147  1.10.2.2  matt 				} else {
   1148  1.10.2.2  matt 					/* Gap of one size class. */
   1149  1.10.2.2  matt 					malloc_printf("[%u]\n", gap_start);
   1150  1.10.2.2  matt 				}
   1151  1.10.2.2  matt 				gap_start = -1;
   1152  1.10.2.2  matt 			}
   1153  1.10.2.2  matt 			malloc_printf(
   1154  1.10.2.2  matt 			    "%13u %1s %4u %4u %3u %9llu %9llu"
   1155  1.10.2.2  matt 			    " %9llu %7lu %7lu\n",
   1156  1.10.2.2  matt 			    i,
   1157  1.10.2.2  matt 			    i < ntbins ? "T" : i < ntbins + nqbins ? "Q" : "S",
   1158  1.10.2.2  matt 			    arena->bins[i].reg_size,
   1159  1.10.2.2  matt 			    arena->bins[i].nregs,
   1160  1.10.2.2  matt 			    arena->bins[i].run_size >> pagesize_2pow,
   1161  1.10.2.2  matt 			    arena->bins[i].stats.nrequests,
   1162  1.10.2.2  matt 			    arena->bins[i].stats.nruns,
   1163  1.10.2.2  matt 			    arena->bins[i].stats.reruns,
   1164  1.10.2.2  matt 			    arena->bins[i].stats.highruns,
   1165  1.10.2.2  matt 			    arena->bins[i].stats.curruns);
   1166  1.10.2.2  matt 		}
   1167  1.10.2.2  matt 	}
   1168  1.10.2.2  matt 	if (gap_start != -1) {
   1169  1.10.2.2  matt 		if (i > gap_start + 1) {
   1170  1.10.2.2  matt 			/* Gap of more than one size class. */
   1171  1.10.2.2  matt 			malloc_printf("[%u..%u]\n", gap_start, i - 1);
   1172  1.10.2.2  matt 		} else {
   1173  1.10.2.2  matt 			/* Gap of one size class. */
   1174  1.10.2.2  matt 			malloc_printf("[%u]\n", gap_start);
   1175  1.10.2.2  matt 		}
   1176  1.10.2.2  matt 	}
   1177  1.10.2.2  matt }
   1178  1.10.2.2  matt #endif
   1179  1.10.2.2  matt 
   1180  1.10.2.2  matt /*
   1181  1.10.2.2  matt  * End Utility functions/macros.
   1182  1.10.2.2  matt  */
   1183  1.10.2.2  matt /******************************************************************************/
   1184  1.10.2.2  matt /*
   1185  1.10.2.2  matt  * Begin chunk management functions.
   1186  1.10.2.2  matt  */
   1187  1.10.2.2  matt 
   1188  1.10.2.2  matt #ifndef lint
   1189  1.10.2.2  matt static inline int
   1190  1.10.2.2  matt chunk_comp(chunk_node_t *a, chunk_node_t *b)
   1191  1.10.2.2  matt {
   1192  1.10.2.2  matt 
   1193  1.10.2.2  matt 	assert(a != NULL);
   1194  1.10.2.2  matt 	assert(b != NULL);
   1195  1.10.2.2  matt 
   1196  1.10.2.2  matt 	if ((uintptr_t)a->chunk < (uintptr_t)b->chunk)
   1197  1.10.2.2  matt 		return (-1);
   1198  1.10.2.2  matt 	else if (a->chunk == b->chunk)
   1199  1.10.2.2  matt 		return (0);
   1200  1.10.2.2  matt 	else
   1201  1.10.2.2  matt 		return (1);
   1202  1.10.2.2  matt }
   1203  1.10.2.2  matt 
   1204  1.10.2.2  matt /* Generate red-black tree code for chunks. */
   1205  1.10.2.2  matt RB_GENERATE_STATIC(chunk_tree_s, chunk_node_s, link, chunk_comp);
   1206  1.10.2.2  matt #endif
   1207  1.10.2.2  matt 
   1208  1.10.2.2  matt static void *
   1209  1.10.2.2  matt pages_map_align(void *addr, size_t size, int align)
   1210  1.10.2.2  matt {
   1211  1.10.2.2  matt 	void *ret;
   1212  1.10.2.2  matt 
   1213  1.10.2.2  matt 	/*
   1214  1.10.2.2  matt 	 * We don't use MAP_FIXED here, because it can cause the *replacement*
   1215  1.10.2.2  matt 	 * of existing mappings, and we only want to create new mappings.
   1216  1.10.2.2  matt 	 */
   1217  1.10.2.2  matt 	ret = mmap(addr, size, PROT_READ | PROT_WRITE,
   1218  1.10.2.2  matt 	    MAP_PRIVATE | MAP_ANON | MAP_ALIGNED(align), -1, 0);
   1219  1.10.2.2  matt 	assert(ret != NULL);
   1220  1.10.2.2  matt 
   1221  1.10.2.2  matt 	if (ret == MAP_FAILED)
   1222  1.10.2.2  matt 		ret = NULL;
   1223  1.10.2.2  matt 	else if (addr != NULL && ret != addr) {
   1224  1.10.2.2  matt 		/*
   1225  1.10.2.2  matt 		 * We succeeded in mapping memory, but not in the right place.
   1226  1.10.2.2  matt 		 */
   1227  1.10.2.2  matt 		if (munmap(ret, size) == -1) {
   1228  1.10.2.2  matt 			char buf[STRERROR_BUF];
   1229  1.10.2.2  matt 
   1230  1.10.2.2  matt 			strerror_r(errno, buf, sizeof(buf));
   1231  1.10.2.2  matt 			_malloc_message(_getprogname(),
   1232  1.10.2.2  matt 			    ": (malloc) Error in munmap(): ", buf, "\n");
   1233  1.10.2.2  matt 			if (opt_abort)
   1234  1.10.2.2  matt 				abort();
   1235  1.10.2.2  matt 		}
   1236  1.10.2.2  matt 		ret = NULL;
   1237  1.10.2.2  matt 	}
   1238  1.10.2.2  matt 
   1239  1.10.2.2  matt 	assert(ret == NULL || (addr == NULL && ret != addr)
   1240  1.10.2.2  matt 	    || (addr != NULL && ret == addr));
   1241  1.10.2.2  matt 	return (ret);
   1242  1.10.2.2  matt }
   1243  1.10.2.2  matt 
   1244  1.10.2.2  matt static void *
   1245  1.10.2.2  matt pages_map(void *addr, size_t size)
   1246  1.10.2.2  matt {
   1247  1.10.2.2  matt 
   1248  1.10.2.2  matt 	return pages_map_align(addr, size, 0);
   1249  1.10.2.2  matt }
   1250  1.10.2.2  matt 
   1251  1.10.2.2  matt static void
   1252  1.10.2.2  matt pages_unmap(void *addr, size_t size)
   1253  1.10.2.2  matt {
   1254  1.10.2.2  matt 
   1255  1.10.2.2  matt 	if (munmap(addr, size) == -1) {
   1256  1.10.2.2  matt 		char buf[STRERROR_BUF];
   1257  1.10.2.2  matt 
   1258  1.10.2.2  matt 		strerror_r(errno, buf, sizeof(buf));
   1259  1.10.2.2  matt 		_malloc_message(_getprogname(),
   1260  1.10.2.2  matt 		    ": (malloc) Error in munmap(): ", buf, "\n");
   1261  1.10.2.2  matt 		if (opt_abort)
   1262  1.10.2.2  matt 			abort();
   1263  1.10.2.2  matt 	}
   1264  1.10.2.2  matt }
   1265  1.10.2.2  matt 
   1266  1.10.2.2  matt static void *
   1267  1.10.2.2  matt chunk_alloc(size_t size)
   1268  1.10.2.2  matt {
   1269  1.10.2.2  matt 	void *ret, *chunk;
   1270  1.10.2.2  matt 	chunk_node_t *tchunk, *delchunk;
   1271  1.10.2.2  matt 
   1272  1.10.2.2  matt 	assert(size != 0);
   1273  1.10.2.2  matt 	assert((size & chunksize_mask) == 0);
   1274  1.10.2.2  matt 
   1275  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   1276  1.10.2.2  matt 
   1277  1.10.2.2  matt 	if (size == chunksize) {
   1278  1.10.2.2  matt 		/*
   1279  1.10.2.2  matt 		 * Check for address ranges that were previously chunks and try
   1280  1.10.2.2  matt 		 * to use them.
   1281  1.10.2.2  matt 		 */
   1282  1.10.2.2  matt 
   1283  1.10.2.2  matt 		/* LINTED */
   1284  1.10.2.2  matt 		tchunk = RB_MIN(chunk_tree_s, &old_chunks);
   1285  1.10.2.2  matt 		while (tchunk != NULL) {
   1286  1.10.2.2  matt 			/* Found an address range.  Try to recycle it. */
   1287  1.10.2.2  matt 
   1288  1.10.2.2  matt 			chunk = tchunk->chunk;
   1289  1.10.2.2  matt 			delchunk = tchunk;
   1290  1.10.2.2  matt 			/* LINTED */
   1291  1.10.2.2  matt 			tchunk = RB_NEXT(chunk_tree_s, &old_chunks, delchunk);
   1292  1.10.2.2  matt 
   1293  1.10.2.2  matt 			/* Remove delchunk from the tree. */
   1294  1.10.2.2  matt 			/* LINTED */
   1295  1.10.2.2  matt 			RB_REMOVE(chunk_tree_s, &old_chunks, delchunk);
   1296  1.10.2.2  matt 			base_chunk_node_dealloc(delchunk);
   1297  1.10.2.2  matt 
   1298  1.10.2.2  matt #ifdef USE_BRK
   1299  1.10.2.2  matt 			if ((uintptr_t)chunk >= (uintptr_t)brk_base
   1300  1.10.2.2  matt 			    && (uintptr_t)chunk < (uintptr_t)brk_max) {
   1301  1.10.2.2  matt 				/* Re-use a previously freed brk chunk. */
   1302  1.10.2.2  matt 				ret = chunk;
   1303  1.10.2.2  matt 				goto RETURN;
   1304  1.10.2.2  matt 			}
   1305  1.10.2.2  matt #endif
   1306  1.10.2.2  matt 			if ((ret = pages_map(chunk, size)) != NULL) {
   1307  1.10.2.2  matt 				/* Success. */
   1308  1.10.2.2  matt 				goto RETURN;
   1309  1.10.2.2  matt 			}
   1310  1.10.2.2  matt 		}
   1311  1.10.2.2  matt 	}
   1312  1.10.2.2  matt 
   1313  1.10.2.2  matt 	/*
   1314  1.10.2.2  matt 	 * Try to over-allocate, but allow the OS to place the allocation
   1315  1.10.2.2  matt 	 * anywhere.  Beware of size_t wrap-around.
   1316  1.10.2.2  matt 	 */
   1317  1.10.2.2  matt 	if (size + chunksize > size) {
   1318  1.10.2.2  matt 		if ((ret = pages_map_align(NULL, size, chunksize_2pow))
   1319  1.10.2.2  matt 		    != NULL) {
   1320  1.10.2.2  matt 			goto RETURN;
   1321  1.10.2.2  matt 		}
   1322  1.10.2.2  matt 	}
   1323  1.10.2.2  matt 
   1324  1.10.2.2  matt #ifdef USE_BRK
   1325  1.10.2.2  matt 	/*
   1326  1.10.2.2  matt 	 * Try to create allocations in brk, in order to make full use of
   1327  1.10.2.2  matt 	 * limited address space.
   1328  1.10.2.2  matt 	 */
   1329  1.10.2.2  matt 	if (brk_prev != (void *)-1) {
   1330  1.10.2.2  matt 		void *brk_cur;
   1331  1.10.2.2  matt 		intptr_t incr;
   1332  1.10.2.2  matt 
   1333  1.10.2.2  matt 		/*
   1334  1.10.2.2  matt 		 * The loop is necessary to recover from races with other
   1335  1.10.2.2  matt 		 * threads that are using brk for something other than malloc.
   1336  1.10.2.2  matt 		 */
   1337  1.10.2.2  matt 		malloc_mutex_lock(&brk_mtx);
   1338  1.10.2.2  matt 		do {
   1339  1.10.2.2  matt 			/* Get the current end of brk. */
   1340  1.10.2.2  matt 			brk_cur = sbrk(0);
   1341  1.10.2.2  matt 
   1342  1.10.2.2  matt 			/*
   1343  1.10.2.2  matt 			 * Calculate how much padding is necessary to
   1344  1.10.2.2  matt 			 * chunk-align the end of brk.
   1345  1.10.2.2  matt 			 */
   1346  1.10.2.2  matt 			incr = (intptr_t)size
   1347  1.10.2.2  matt 			    - (intptr_t)CHUNK_ADDR2OFFSET(brk_cur);
   1348  1.10.2.2  matt 			if (incr == size) {
   1349  1.10.2.2  matt 				ret = brk_cur;
   1350  1.10.2.2  matt 			} else {
   1351  1.10.2.2  matt 				ret = (void *)((intptr_t)brk_cur + incr);
   1352  1.10.2.2  matt 				incr += size;
   1353  1.10.2.2  matt 			}
   1354  1.10.2.2  matt 
   1355  1.10.2.2  matt 			brk_prev = sbrk(incr);
   1356  1.10.2.2  matt 			if (brk_prev == brk_cur) {
   1357  1.10.2.2  matt 				/* Success. */
   1358  1.10.2.2  matt 				malloc_mutex_unlock(&brk_mtx);
   1359  1.10.2.2  matt 				brk_max = (void *)((intptr_t)ret + size);
   1360  1.10.2.2  matt 				goto RETURN;
   1361  1.10.2.2  matt 			}
   1362  1.10.2.2  matt 		} while (brk_prev != (void *)-1);
   1363  1.10.2.2  matt 		malloc_mutex_unlock(&brk_mtx);
   1364  1.10.2.2  matt 	}
   1365  1.10.2.2  matt #endif
   1366  1.10.2.2  matt 
   1367  1.10.2.2  matt 	/* All strategies for allocation failed. */
   1368  1.10.2.2  matt 	ret = NULL;
   1369  1.10.2.2  matt RETURN:
   1370  1.10.2.2  matt 	if (ret != NULL) {
   1371  1.10.2.2  matt 		chunk_node_t key;
   1372  1.10.2.2  matt 		/*
   1373  1.10.2.2  matt 		 * Clean out any entries in old_chunks that overlap with the
   1374  1.10.2.2  matt 		 * memory we just allocated.
   1375  1.10.2.2  matt 		 */
   1376  1.10.2.2  matt 		key.chunk = ret;
   1377  1.10.2.2  matt 		/* LINTED */
   1378  1.10.2.2  matt 		tchunk = RB_NFIND(chunk_tree_s, &old_chunks, &key);
   1379  1.10.2.2  matt 		while (tchunk != NULL
   1380  1.10.2.2  matt 		    && (uintptr_t)tchunk->chunk >= (uintptr_t)ret
   1381  1.10.2.2  matt 		    && (uintptr_t)tchunk->chunk < (uintptr_t)ret + size) {
   1382  1.10.2.2  matt 			delchunk = tchunk;
   1383  1.10.2.2  matt 			/* LINTED */
   1384  1.10.2.2  matt 			tchunk = RB_NEXT(chunk_tree_s, &old_chunks, delchunk);
   1385  1.10.2.2  matt 			/* LINTED */
   1386  1.10.2.2  matt 			RB_REMOVE(chunk_tree_s, &old_chunks, delchunk);
   1387  1.10.2.2  matt 			base_chunk_node_dealloc(delchunk);
   1388  1.10.2.2  matt 		}
   1389  1.10.2.2  matt 
   1390  1.10.2.2  matt 	}
   1391  1.10.2.2  matt #ifdef MALLOC_STATS
   1392  1.10.2.2  matt 	if (ret != NULL) {
   1393  1.10.2.2  matt 		stats_chunks.nchunks += (size / chunksize);
   1394  1.10.2.2  matt 		stats_chunks.curchunks += (size / chunksize);
   1395  1.10.2.2  matt 	}
   1396  1.10.2.2  matt 	if (stats_chunks.curchunks > stats_chunks.highchunks)
   1397  1.10.2.2  matt 		stats_chunks.highchunks = stats_chunks.curchunks;
   1398  1.10.2.2  matt #endif
   1399  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   1400  1.10.2.2  matt 
   1401  1.10.2.2  matt 	assert(CHUNK_ADDR2BASE(ret) == ret);
   1402  1.10.2.2  matt 	return (ret);
   1403  1.10.2.2  matt }
   1404  1.10.2.2  matt 
   1405  1.10.2.2  matt static void
   1406  1.10.2.2  matt chunk_dealloc(void *chunk, size_t size)
   1407  1.10.2.2  matt {
   1408  1.10.2.2  matt 	chunk_node_t *node;
   1409  1.10.2.2  matt 
   1410  1.10.2.2  matt 	assert(chunk != NULL);
   1411  1.10.2.2  matt 	assert(CHUNK_ADDR2BASE(chunk) == chunk);
   1412  1.10.2.2  matt 	assert(size != 0);
   1413  1.10.2.2  matt 	assert((size & chunksize_mask) == 0);
   1414  1.10.2.2  matt 
   1415  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   1416  1.10.2.2  matt 
   1417  1.10.2.2  matt #ifdef USE_BRK
   1418  1.10.2.2  matt 	if ((uintptr_t)chunk >= (uintptr_t)brk_base
   1419  1.10.2.2  matt 	    && (uintptr_t)chunk < (uintptr_t)brk_max) {
   1420  1.10.2.2  matt 		void *brk_cur;
   1421  1.10.2.2  matt 
   1422  1.10.2.2  matt 		malloc_mutex_lock(&brk_mtx);
   1423  1.10.2.2  matt 		/* Get the current end of brk. */
   1424  1.10.2.2  matt 		brk_cur = sbrk(0);
   1425  1.10.2.2  matt 
   1426  1.10.2.2  matt 		/*
   1427  1.10.2.2  matt 		 * Try to shrink the data segment if this chunk is at the end
   1428  1.10.2.2  matt 		 * of the data segment.  The sbrk() call here is subject to a
   1429  1.10.2.2  matt 		 * race condition with threads that use brk(2) or sbrk(2)
   1430  1.10.2.2  matt 		 * directly, but the alternative would be to leak memory for
   1431  1.10.2.2  matt 		 * the sake of poorly designed multi-threaded programs.
   1432  1.10.2.2  matt 		 */
   1433  1.10.2.2  matt 		if (brk_cur == brk_max
   1434  1.10.2.2  matt 		    && (void *)((uintptr_t)chunk + size) == brk_max
   1435  1.10.2.2  matt 		    && sbrk(-(intptr_t)size) == brk_max) {
   1436  1.10.2.2  matt 			malloc_mutex_unlock(&brk_mtx);
   1437  1.10.2.2  matt 			if (brk_prev == brk_max) {
   1438  1.10.2.2  matt 				/* Success. */
   1439  1.10.2.2  matt 				brk_prev = (void *)((intptr_t)brk_max
   1440  1.10.2.2  matt 				    - (intptr_t)size);
   1441  1.10.2.2  matt 				brk_max = brk_prev;
   1442  1.10.2.2  matt 			}
   1443  1.10.2.2  matt 		} else {
   1444  1.10.2.2  matt 			size_t offset;
   1445  1.10.2.2  matt 
   1446  1.10.2.2  matt 			malloc_mutex_unlock(&brk_mtx);
   1447  1.10.2.2  matt 			madvise(chunk, size, MADV_FREE);
   1448  1.10.2.2  matt 
   1449  1.10.2.2  matt 			/*
   1450  1.10.2.2  matt 			 * Iteratively create records of each chunk-sized
   1451  1.10.2.2  matt 			 * memory region that 'chunk' is comprised of, so that
   1452  1.10.2.2  matt 			 * the address range can be recycled if memory usage
   1453  1.10.2.2  matt 			 * increases later on.
   1454  1.10.2.2  matt 			 */
   1455  1.10.2.2  matt 			for (offset = 0; offset < size; offset += chunksize) {
   1456  1.10.2.2  matt 				node = base_chunk_node_alloc();
   1457  1.10.2.2  matt 				if (node == NULL)
   1458  1.10.2.2  matt 					break;
   1459  1.10.2.2  matt 
   1460  1.10.2.2  matt 				node->chunk = (void *)((uintptr_t)chunk
   1461  1.10.2.2  matt 				    + (uintptr_t)offset);
   1462  1.10.2.2  matt 				node->size = chunksize;
   1463  1.10.2.2  matt 				/* LINTED */
   1464  1.10.2.2  matt 				RB_INSERT(chunk_tree_s, &old_chunks, node);
   1465  1.10.2.2  matt 			}
   1466  1.10.2.2  matt 		}
   1467  1.10.2.2  matt 	} else {
   1468  1.10.2.2  matt #endif
   1469  1.10.2.2  matt 		pages_unmap(chunk, size);
   1470  1.10.2.2  matt 
   1471  1.10.2.2  matt 		/*
   1472  1.10.2.2  matt 		 * Make a record of the chunk's address, so that the address
   1473  1.10.2.2  matt 		 * range can be recycled if memory usage increases later on.
   1474  1.10.2.2  matt 		 * Don't bother to create entries if (size > chunksize), since
   1475  1.10.2.2  matt 		 * doing so could cause scalability issues for truly gargantuan
   1476  1.10.2.2  matt 		 * objects (many gigabytes or larger).
   1477  1.10.2.2  matt 		 */
   1478  1.10.2.2  matt 		if (size == chunksize) {
   1479  1.10.2.2  matt 			node = base_chunk_node_alloc();
   1480  1.10.2.2  matt 			if (node != NULL) {
   1481  1.10.2.2  matt 				node->chunk = (void *)(uintptr_t)chunk;
   1482  1.10.2.2  matt 				node->size = chunksize;
   1483  1.10.2.2  matt 				/* LINTED */
   1484  1.10.2.2  matt 				RB_INSERT(chunk_tree_s, &old_chunks, node);
   1485  1.10.2.2  matt 			}
   1486  1.10.2.2  matt 		}
   1487  1.10.2.2  matt #ifdef USE_BRK
   1488  1.10.2.2  matt 	}
   1489  1.10.2.2  matt #endif
   1490  1.10.2.2  matt 
   1491  1.10.2.2  matt #ifdef MALLOC_STATS
   1492  1.10.2.2  matt 	stats_chunks.curchunks -= (size / chunksize);
   1493  1.10.2.2  matt #endif
   1494  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   1495  1.10.2.2  matt }
   1496  1.10.2.2  matt 
   1497  1.10.2.2  matt /*
   1498  1.10.2.2  matt  * End chunk management functions.
   1499  1.10.2.2  matt  */
   1500  1.10.2.2  matt /******************************************************************************/
   1501  1.10.2.2  matt /*
   1502  1.10.2.2  matt  * Begin arena.
   1503  1.10.2.2  matt  */
   1504  1.10.2.2  matt 
   1505  1.10.2.2  matt /*
   1506  1.10.2.2  matt  * Choose an arena based on a per-thread value (fast-path code, calls slow-path
   1507  1.10.2.2  matt  * code if necessary).
   1508  1.10.2.2  matt  */
   1509  1.10.2.2  matt static inline arena_t *
   1510  1.10.2.2  matt choose_arena(void)
   1511  1.10.2.2  matt {
   1512  1.10.2.2  matt 	arena_t *ret;
   1513  1.10.2.2  matt 
   1514  1.10.2.2  matt 	/*
   1515  1.10.2.2  matt 	 * We can only use TLS if this is a PIC library, since for the static
   1516  1.10.2.2  matt 	 * library version, libc's malloc is used by TLS allocation, which
   1517  1.10.2.2  matt 	 * introduces a bootstrapping issue.
   1518  1.10.2.2  matt 	 */
   1519  1.10.2.2  matt 	if (__isthreaded == false) {
   1520  1.10.2.2  matt 	    /*
   1521  1.10.2.2  matt 	     * Avoid the overhead of TLS for single-threaded operation.  If the
   1522  1.10.2.2  matt 	     * app switches to threaded mode, the initial thread may end up
   1523  1.10.2.2  matt 	     * being assigned to some other arena, but this one-time switch
   1524  1.10.2.2  matt 	     * shouldn't cause significant issues.
   1525  1.10.2.2  matt 	     */
   1526  1.10.2.2  matt 	    return (arenas[0]);
   1527  1.10.2.2  matt 	}
   1528  1.10.2.2  matt 
   1529  1.10.2.2  matt 	ret = get_arenas_map();
   1530  1.10.2.2  matt 	if (ret == NULL)
   1531  1.10.2.2  matt 		ret = choose_arena_hard();
   1532  1.10.2.2  matt 
   1533  1.10.2.2  matt 	assert(ret != NULL);
   1534  1.10.2.2  matt 	return (ret);
   1535  1.10.2.2  matt }
   1536  1.10.2.2  matt 
   1537  1.10.2.2  matt /*
   1538  1.10.2.2  matt  * Choose an arena based on a per-thread value (slow-path code only, called
   1539  1.10.2.2  matt  * only by choose_arena()).
   1540  1.10.2.2  matt  */
   1541  1.10.2.2  matt static arena_t *
   1542  1.10.2.2  matt choose_arena_hard(void)
   1543  1.10.2.2  matt {
   1544  1.10.2.2  matt 	arena_t *ret;
   1545  1.10.2.2  matt 
   1546  1.10.2.2  matt 	assert(__isthreaded);
   1547  1.10.2.2  matt 
   1548  1.10.2.2  matt 	/* Assign one of the arenas to this thread, in a round-robin fashion. */
   1549  1.10.2.2  matt 	malloc_mutex_lock(&arenas_mtx);
   1550  1.10.2.2  matt 	ret = arenas[next_arena];
   1551  1.10.2.2  matt 	if (ret == NULL)
   1552  1.10.2.2  matt 		ret = arenas_extend(next_arena);
   1553  1.10.2.2  matt 	if (ret == NULL) {
   1554  1.10.2.2  matt 		/*
   1555  1.10.2.2  matt 		 * Make sure that this function never returns NULL, so that
   1556  1.10.2.2  matt 		 * choose_arena() doesn't have to check for a NULL return
   1557  1.10.2.2  matt 		 * value.
   1558  1.10.2.2  matt 		 */
   1559  1.10.2.2  matt 		ret = arenas[0];
   1560  1.10.2.2  matt 	}
   1561  1.10.2.2  matt 	next_arena = (next_arena + 1) % narenas;
   1562  1.10.2.2  matt 	malloc_mutex_unlock(&arenas_mtx);
   1563  1.10.2.2  matt 	set_arenas_map(ret);
   1564  1.10.2.2  matt 
   1565  1.10.2.2  matt 	return (ret);
   1566  1.10.2.2  matt }
   1567  1.10.2.2  matt 
   1568  1.10.2.2  matt #ifndef lint
   1569  1.10.2.2  matt static inline int
   1570  1.10.2.2  matt arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
   1571  1.10.2.2  matt {
   1572  1.10.2.2  matt 
   1573  1.10.2.2  matt 	assert(a != NULL);
   1574  1.10.2.2  matt 	assert(b != NULL);
   1575  1.10.2.2  matt 
   1576  1.10.2.2  matt 	if ((uintptr_t)a < (uintptr_t)b)
   1577  1.10.2.2  matt 		return (-1);
   1578  1.10.2.2  matt 	else if (a == b)
   1579  1.10.2.2  matt 		return (0);
   1580  1.10.2.2  matt 	else
   1581  1.10.2.2  matt 		return (1);
   1582  1.10.2.2  matt }
   1583  1.10.2.2  matt 
   1584  1.10.2.2  matt /* Generate red-black tree code for arena chunks. */
   1585  1.10.2.2  matt RB_GENERATE_STATIC(arena_chunk_tree_s, arena_chunk_s, link, arena_chunk_comp);
   1586  1.10.2.2  matt #endif
   1587  1.10.2.2  matt 
   1588  1.10.2.2  matt #ifndef lint
   1589  1.10.2.2  matt static inline int
   1590  1.10.2.2  matt arena_run_comp(arena_run_t *a, arena_run_t *b)
   1591  1.10.2.2  matt {
   1592  1.10.2.2  matt 
   1593  1.10.2.2  matt 	assert(a != NULL);
   1594  1.10.2.2  matt 	assert(b != NULL);
   1595  1.10.2.2  matt 
   1596  1.10.2.2  matt 	if ((uintptr_t)a < (uintptr_t)b)
   1597  1.10.2.2  matt 		return (-1);
   1598  1.10.2.2  matt 	else if (a == b)
   1599  1.10.2.2  matt 		return (0);
   1600  1.10.2.2  matt 	else
   1601  1.10.2.2  matt 		return (1);
   1602  1.10.2.2  matt }
   1603  1.10.2.2  matt 
   1604  1.10.2.2  matt /* Generate red-black tree code for arena runs. */
   1605  1.10.2.2  matt RB_GENERATE_STATIC(arena_run_tree_s, arena_run_s, link, arena_run_comp);
   1606  1.10.2.2  matt #endif
   1607  1.10.2.2  matt 
   1608  1.10.2.2  matt static inline void *
   1609  1.10.2.2  matt arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
   1610  1.10.2.2  matt {
   1611  1.10.2.2  matt 	void *ret;
   1612  1.10.2.2  matt 	unsigned i, mask, bit, regind;
   1613  1.10.2.2  matt 
   1614  1.10.2.2  matt 	assert(run->magic == ARENA_RUN_MAGIC);
   1615  1.10.2.2  matt 	assert(run->regs_minelm < bin->regs_mask_nelms);
   1616  1.10.2.2  matt 
   1617  1.10.2.2  matt 	/*
   1618  1.10.2.2  matt 	 * Move the first check outside the loop, so that run->regs_minelm can
   1619  1.10.2.2  matt 	 * be updated unconditionally, without the possibility of updating it
   1620  1.10.2.2  matt 	 * multiple times.
   1621  1.10.2.2  matt 	 */
   1622  1.10.2.2  matt 	i = run->regs_minelm;
   1623  1.10.2.2  matt 	mask = run->regs_mask[i];
   1624  1.10.2.2  matt 	if (mask != 0) {
   1625  1.10.2.2  matt 		/* Usable allocation found. */
   1626  1.10.2.2  matt 		bit = ffs((int)mask) - 1;
   1627  1.10.2.2  matt 
   1628  1.10.2.2  matt 		regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
   1629  1.10.2.2  matt 		ret = (void *)(((uintptr_t)run) + bin->reg0_offset
   1630  1.10.2.2  matt 		    + (bin->reg_size * regind));
   1631  1.10.2.2  matt 
   1632  1.10.2.2  matt 		/* Clear bit. */
   1633  1.10.2.2  matt 		mask ^= (1 << bit);
   1634  1.10.2.2  matt 		run->regs_mask[i] = mask;
   1635  1.10.2.2  matt 
   1636  1.10.2.2  matt 		return (ret);
   1637  1.10.2.2  matt 	}
   1638  1.10.2.2  matt 
   1639  1.10.2.2  matt 	for (i++; i < bin->regs_mask_nelms; i++) {
   1640  1.10.2.2  matt 		mask = run->regs_mask[i];
   1641  1.10.2.2  matt 		if (mask != 0) {
   1642  1.10.2.2  matt 			/* Usable allocation found. */
   1643  1.10.2.2  matt 			bit = ffs((int)mask) - 1;
   1644  1.10.2.2  matt 
   1645  1.10.2.2  matt 			regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
   1646  1.10.2.2  matt 			ret = (void *)(((uintptr_t)run) + bin->reg0_offset
   1647  1.10.2.2  matt 			    + (bin->reg_size * regind));
   1648  1.10.2.2  matt 
   1649  1.10.2.2  matt 			/* Clear bit. */
   1650  1.10.2.2  matt 			mask ^= (1 << bit);
   1651  1.10.2.2  matt 			run->regs_mask[i] = mask;
   1652  1.10.2.2  matt 
   1653  1.10.2.2  matt 			/*
   1654  1.10.2.2  matt 			 * Make a note that nothing before this element
   1655  1.10.2.2  matt 			 * contains a free region.
   1656  1.10.2.2  matt 			 */
   1657  1.10.2.2  matt 			run->regs_minelm = i; /* Low payoff: + (mask == 0); */
   1658  1.10.2.2  matt 
   1659  1.10.2.2  matt 			return (ret);
   1660  1.10.2.2  matt 		}
   1661  1.10.2.2  matt 	}
   1662  1.10.2.2  matt 	/* Not reached. */
   1663  1.10.2.2  matt 	/* LINTED */
   1664  1.10.2.2  matt 	assert(0);
   1665  1.10.2.2  matt 	return (NULL);
   1666  1.10.2.2  matt }
   1667  1.10.2.2  matt 
   1668  1.10.2.2  matt static inline void
   1669  1.10.2.2  matt arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size)
   1670  1.10.2.2  matt {
   1671  1.10.2.2  matt 	/*
   1672  1.10.2.2  matt 	 * To divide by a number D that is not a power of two we multiply
   1673  1.10.2.2  matt 	 * by (2^21 / D) and then right shift by 21 positions.
   1674  1.10.2.2  matt 	 *
   1675  1.10.2.2  matt 	 *   X / D
   1676  1.10.2.2  matt 	 *
   1677  1.10.2.2  matt 	 * becomes
   1678  1.10.2.2  matt 	 *
   1679  1.10.2.2  matt 	 *   (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT
   1680  1.10.2.2  matt 	 */
   1681  1.10.2.2  matt #define SIZE_INV_SHIFT 21
   1682  1.10.2.2  matt #define SIZE_INV(s) (((1 << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1)
   1683  1.10.2.2  matt 	static const unsigned size_invs[] = {
   1684  1.10.2.2  matt 	    SIZE_INV(3),
   1685  1.10.2.2  matt 	    SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
   1686  1.10.2.2  matt 	    SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
   1687  1.10.2.2  matt 	    SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
   1688  1.10.2.2  matt 	    SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
   1689  1.10.2.2  matt 	    SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
   1690  1.10.2.2  matt 	    SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
   1691  1.10.2.2  matt 	    SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
   1692  1.10.2.2  matt #if (QUANTUM_2POW_MIN < 4)
   1693  1.10.2.2  matt 	    ,
   1694  1.10.2.2  matt 	    SIZE_INV(32), SIZE_INV(33), SIZE_INV(34), SIZE_INV(35),
   1695  1.10.2.2  matt 	    SIZE_INV(36), SIZE_INV(37), SIZE_INV(38), SIZE_INV(39),
   1696  1.10.2.2  matt 	    SIZE_INV(40), SIZE_INV(41), SIZE_INV(42), SIZE_INV(43),
   1697  1.10.2.2  matt 	    SIZE_INV(44), SIZE_INV(45), SIZE_INV(46), SIZE_INV(47),
   1698  1.10.2.2  matt 	    SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51),
   1699  1.10.2.2  matt 	    SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55),
   1700  1.10.2.2  matt 	    SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59),
   1701  1.10.2.2  matt 	    SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63)
   1702  1.10.2.2  matt #endif
   1703  1.10.2.2  matt 	};
   1704  1.10.2.2  matt 	unsigned diff, regind, elm, bit;
   1705  1.10.2.2  matt 
   1706  1.10.2.2  matt 	/* LINTED */
   1707  1.10.2.2  matt 	assert(run->magic == ARENA_RUN_MAGIC);
   1708  1.10.2.2  matt 	assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3
   1709  1.10.2.2  matt 	    >= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN));
   1710  1.10.2.2  matt 
   1711  1.10.2.2  matt 	/*
   1712  1.10.2.2  matt 	 * Avoid doing division with a variable divisor if possible.  Using
   1713  1.10.2.2  matt 	 * actual division here can reduce allocator throughput by over 20%!
   1714  1.10.2.2  matt 	 */
   1715  1.10.2.2  matt 	diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->reg0_offset);
   1716  1.10.2.2  matt 	if ((size & (size - 1)) == 0) {
   1717  1.10.2.2  matt 		/*
   1718  1.10.2.2  matt 		 * log2_table allows fast division of a power of two in the
   1719  1.10.2.2  matt 		 * [1..128] range.
   1720  1.10.2.2  matt 		 *
   1721  1.10.2.2  matt 		 * (x / divisor) becomes (x >> log2_table[divisor - 1]).
   1722  1.10.2.2  matt 		 */
   1723  1.10.2.2  matt 		static const unsigned char log2_table[] = {
   1724  1.10.2.2  matt 		    0, 1, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4,
   1725  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5,
   1726  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   1727  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
   1728  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   1729  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   1730  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
   1731  1.10.2.2  matt 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
   1732  1.10.2.2  matt 		};
   1733  1.10.2.2  matt 
   1734  1.10.2.2  matt 		if (size <= 128)
   1735  1.10.2.2  matt 			regind = (diff >> log2_table[size - 1]);
   1736  1.10.2.2  matt 		else if (size <= 32768)
   1737  1.10.2.2  matt 			regind = diff >> (8 + log2_table[(size >> 8) - 1]);
   1738  1.10.2.2  matt 		else {
   1739  1.10.2.2  matt 			/*
   1740  1.10.2.2  matt 			 * The page size is too large for us to use the lookup
   1741  1.10.2.2  matt 			 * table.  Use real division.
   1742  1.10.2.2  matt 			 */
   1743  1.10.2.2  matt 			regind = (unsigned)(diff / size);
   1744  1.10.2.2  matt 		}
   1745  1.10.2.2  matt 	} else if (size <= ((sizeof(size_invs) / sizeof(unsigned))
   1746  1.10.2.2  matt 	    << QUANTUM_2POW_MIN) + 2) {
   1747  1.10.2.2  matt 		regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff;
   1748  1.10.2.2  matt 		regind >>= SIZE_INV_SHIFT;
   1749  1.10.2.2  matt 	} else {
   1750  1.10.2.2  matt 		/*
   1751  1.10.2.2  matt 		 * size_invs isn't large enough to handle this size class, so
   1752  1.10.2.2  matt 		 * calculate regind using actual division.  This only happens
   1753  1.10.2.2  matt 		 * if the user increases small_max via the 'S' runtime
   1754  1.10.2.2  matt 		 * configuration option.
   1755  1.10.2.2  matt 		 */
   1756  1.10.2.2  matt 		regind = (unsigned)(diff / size);
   1757  1.10.2.2  matt 	};
   1758  1.10.2.2  matt 	assert(diff == regind * size);
   1759  1.10.2.2  matt 	assert(regind < bin->nregs);
   1760  1.10.2.2  matt 
   1761  1.10.2.2  matt 	elm = regind >> (SIZEOF_INT_2POW + 3);
   1762  1.10.2.2  matt 	if (elm < run->regs_minelm)
   1763  1.10.2.2  matt 		run->regs_minelm = elm;
   1764  1.10.2.2  matt 	bit = regind - (elm << (SIZEOF_INT_2POW + 3));
   1765  1.10.2.2  matt 	assert((run->regs_mask[elm] & (1 << bit)) == 0);
   1766  1.10.2.2  matt 	run->regs_mask[elm] |= (1 << bit);
   1767  1.10.2.2  matt #undef SIZE_INV
   1768  1.10.2.2  matt #undef SIZE_INV_SHIFT
   1769  1.10.2.2  matt }
   1770  1.10.2.2  matt 
   1771  1.10.2.2  matt static void
   1772  1.10.2.2  matt arena_run_split(arena_t *arena, arena_run_t *run, size_t size)
   1773  1.10.2.2  matt {
   1774  1.10.2.2  matt 	arena_chunk_t *chunk;
   1775  1.10.2.2  matt 	unsigned run_ind, map_offset, total_pages, need_pages, rem_pages;
   1776  1.10.2.2  matt 	unsigned i;
   1777  1.10.2.2  matt 
   1778  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
   1779  1.10.2.2  matt 	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
   1780  1.10.2.2  matt 	    >> pagesize_2pow);
   1781  1.10.2.2  matt 	total_pages = chunk->map[run_ind].npages;
   1782  1.10.2.2  matt 	need_pages = (unsigned)(size >> pagesize_2pow);
   1783  1.10.2.2  matt 	assert(need_pages <= total_pages);
   1784  1.10.2.2  matt 	rem_pages = total_pages - need_pages;
   1785  1.10.2.2  matt 
   1786  1.10.2.2  matt 	/* Split enough pages from the front of run to fit allocation size. */
   1787  1.10.2.2  matt 	map_offset = run_ind;
   1788  1.10.2.2  matt 	for (i = 0; i < need_pages; i++) {
   1789  1.10.2.2  matt 		chunk->map[map_offset + i].npages = need_pages;
   1790  1.10.2.2  matt 		chunk->map[map_offset + i].pos = i;
   1791  1.10.2.2  matt 	}
   1792  1.10.2.2  matt 
   1793  1.10.2.2  matt 	/* Keep track of trailing unused pages for later use. */
   1794  1.10.2.2  matt 	if (rem_pages > 0) {
   1795  1.10.2.2  matt 		/* Update map for trailing pages. */
   1796  1.10.2.2  matt 		map_offset += need_pages;
   1797  1.10.2.2  matt 		chunk->map[map_offset].npages = rem_pages;
   1798  1.10.2.2  matt 		chunk->map[map_offset].pos = POS_FREE;
   1799  1.10.2.2  matt 		chunk->map[map_offset + rem_pages - 1].npages = rem_pages;
   1800  1.10.2.2  matt 		chunk->map[map_offset + rem_pages - 1].pos = POS_FREE;
   1801  1.10.2.2  matt 	}
   1802  1.10.2.2  matt 
   1803  1.10.2.2  matt 	chunk->pages_used += need_pages;
   1804  1.10.2.2  matt }
   1805  1.10.2.2  matt 
   1806  1.10.2.2  matt static arena_chunk_t *
   1807  1.10.2.2  matt arena_chunk_alloc(arena_t *arena)
   1808  1.10.2.2  matt {
   1809  1.10.2.2  matt 	arena_chunk_t *chunk;
   1810  1.10.2.2  matt 
   1811  1.10.2.2  matt 	if (arena->spare != NULL) {
   1812  1.10.2.2  matt 		chunk = arena->spare;
   1813  1.10.2.2  matt 		arena->spare = NULL;
   1814  1.10.2.2  matt 
   1815  1.10.2.2  matt 		/* LINTED */
   1816  1.10.2.2  matt 		RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
   1817  1.10.2.2  matt 	} else {
   1818  1.10.2.2  matt 		chunk = (arena_chunk_t *)chunk_alloc(chunksize);
   1819  1.10.2.2  matt 		if (chunk == NULL)
   1820  1.10.2.2  matt 			return (NULL);
   1821  1.10.2.2  matt #ifdef MALLOC_STATS
   1822  1.10.2.2  matt 		arena->stats.mapped += chunksize;
   1823  1.10.2.2  matt #endif
   1824  1.10.2.2  matt 
   1825  1.10.2.2  matt 		chunk->arena = arena;
   1826  1.10.2.2  matt 
   1827  1.10.2.2  matt 		/* LINTED */
   1828  1.10.2.2  matt 		RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
   1829  1.10.2.2  matt 
   1830  1.10.2.2  matt 		/*
   1831  1.10.2.2  matt 		 * Claim that no pages are in use, since the header is merely
   1832  1.10.2.2  matt 		 * overhead.
   1833  1.10.2.2  matt 		 */
   1834  1.10.2.2  matt 		chunk->pages_used = 0;
   1835  1.10.2.2  matt 
   1836  1.10.2.2  matt 		chunk->max_frun_npages = chunk_npages -
   1837  1.10.2.2  matt 		    arena_chunk_header_npages;
   1838  1.10.2.2  matt 		chunk->min_frun_ind = arena_chunk_header_npages;
   1839  1.10.2.2  matt 
   1840  1.10.2.2  matt 		/*
   1841  1.10.2.2  matt 		 * Initialize enough of the map to support one maximal free run.
   1842  1.10.2.2  matt 		 */
   1843  1.10.2.2  matt 		chunk->map[arena_chunk_header_npages].npages = chunk_npages -
   1844  1.10.2.2  matt 		    arena_chunk_header_npages;
   1845  1.10.2.2  matt 		chunk->map[arena_chunk_header_npages].pos = POS_FREE;
   1846  1.10.2.2  matt 		chunk->map[chunk_npages - 1].npages = chunk_npages -
   1847  1.10.2.2  matt 		    arena_chunk_header_npages;
   1848  1.10.2.2  matt 		chunk->map[chunk_npages - 1].pos = POS_FREE;
   1849  1.10.2.2  matt 	}
   1850  1.10.2.2  matt 
   1851  1.10.2.2  matt 	return (chunk);
   1852  1.10.2.2  matt }
   1853  1.10.2.2  matt 
   1854  1.10.2.2  matt static void
   1855  1.10.2.2  matt arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
   1856  1.10.2.2  matt {
   1857  1.10.2.2  matt 
   1858  1.10.2.2  matt 	/*
   1859  1.10.2.2  matt 	 * Remove chunk from the chunk tree, regardless of whether this chunk
   1860  1.10.2.2  matt 	 * will be cached, so that the arena does not use it.
   1861  1.10.2.2  matt 	 */
   1862  1.10.2.2  matt 	/* LINTED */
   1863  1.10.2.2  matt 	RB_REMOVE(arena_chunk_tree_s, &chunk->arena->chunks, chunk);
   1864  1.10.2.2  matt 
   1865  1.10.2.2  matt 	if (opt_hint == false) {
   1866  1.10.2.2  matt 		if (arena->spare != NULL) {
   1867  1.10.2.2  matt 			chunk_dealloc((void *)arena->spare, chunksize);
   1868  1.10.2.2  matt #ifdef MALLOC_STATS
   1869  1.10.2.2  matt 			arena->stats.mapped -= chunksize;
   1870  1.10.2.2  matt #endif
   1871  1.10.2.2  matt 		}
   1872  1.10.2.2  matt 		arena->spare = chunk;
   1873  1.10.2.2  matt 	} else {
   1874  1.10.2.2  matt 		assert(arena->spare == NULL);
   1875  1.10.2.2  matt 		chunk_dealloc((void *)chunk, chunksize);
   1876  1.10.2.2  matt #ifdef MALLOC_STATS
   1877  1.10.2.2  matt 		arena->stats.mapped -= chunksize;
   1878  1.10.2.2  matt #endif
   1879  1.10.2.2  matt 	}
   1880  1.10.2.2  matt }
   1881  1.10.2.2  matt 
   1882  1.10.2.2  matt static arena_run_t *
   1883  1.10.2.2  matt arena_run_alloc(arena_t *arena, size_t size)
   1884  1.10.2.2  matt {
   1885  1.10.2.2  matt 	arena_chunk_t *chunk;
   1886  1.10.2.2  matt 	arena_run_t *run;
   1887  1.10.2.2  matt 	unsigned need_npages, limit_pages, compl_need_npages;
   1888  1.10.2.2  matt 
   1889  1.10.2.2  matt 	assert(size <= (chunksize - (arena_chunk_header_npages <<
   1890  1.10.2.2  matt 	    pagesize_2pow)));
   1891  1.10.2.2  matt 	assert((size & pagesize_mask) == 0);
   1892  1.10.2.2  matt 
   1893  1.10.2.2  matt 	/*
   1894  1.10.2.2  matt 	 * Search through arena's chunks in address order for a free run that is
   1895  1.10.2.2  matt 	 * large enough.  Look for the first fit.
   1896  1.10.2.2  matt 	 */
   1897  1.10.2.2  matt 	need_npages = (unsigned)(size >> pagesize_2pow);
   1898  1.10.2.2  matt 	limit_pages = chunk_npages - arena_chunk_header_npages;
   1899  1.10.2.2  matt 	compl_need_npages = limit_pages - need_npages;
   1900  1.10.2.2  matt 	/* LINTED */
   1901  1.10.2.2  matt 	RB_FOREACH(chunk, arena_chunk_tree_s, &arena->chunks) {
   1902  1.10.2.2  matt 		/*
   1903  1.10.2.2  matt 		 * Avoid searching this chunk if there are not enough
   1904  1.10.2.2  matt 		 * contiguous free pages for there to possibly be a large
   1905  1.10.2.2  matt 		 * enough free run.
   1906  1.10.2.2  matt 		 */
   1907  1.10.2.2  matt 		if (chunk->pages_used <= compl_need_npages &&
   1908  1.10.2.2  matt 		    need_npages <= chunk->max_frun_npages) {
   1909  1.10.2.2  matt 			arena_chunk_map_t *mapelm;
   1910  1.10.2.2  matt 			unsigned i;
   1911  1.10.2.2  matt 			unsigned max_frun_npages = 0;
   1912  1.10.2.2  matt 			unsigned min_frun_ind = chunk_npages;
   1913  1.10.2.2  matt 
   1914  1.10.2.2  matt 			assert(chunk->min_frun_ind >=
   1915  1.10.2.2  matt 			    arena_chunk_header_npages);
   1916  1.10.2.2  matt 			for (i = chunk->min_frun_ind; i < chunk_npages;) {
   1917  1.10.2.2  matt 				mapelm = &chunk->map[i];
   1918  1.10.2.2  matt 				if (mapelm->pos == POS_FREE) {
   1919  1.10.2.2  matt 					if (mapelm->npages >= need_npages) {
   1920  1.10.2.2  matt 						run = (arena_run_t *)
   1921  1.10.2.2  matt 						    ((uintptr_t)chunk + (i <<
   1922  1.10.2.2  matt 						    pagesize_2pow));
   1923  1.10.2.2  matt 						/* Update page map. */
   1924  1.10.2.2  matt 						arena_run_split(arena, run,
   1925  1.10.2.2  matt 						    size);
   1926  1.10.2.2  matt 						return (run);
   1927  1.10.2.2  matt 					}
   1928  1.10.2.2  matt 					if (mapelm->npages >
   1929  1.10.2.2  matt 					    max_frun_npages) {
   1930  1.10.2.2  matt 						max_frun_npages =
   1931  1.10.2.2  matt 						    mapelm->npages;
   1932  1.10.2.2  matt 					}
   1933  1.10.2.2  matt 					if (i < min_frun_ind) {
   1934  1.10.2.2  matt 						min_frun_ind = i;
   1935  1.10.2.2  matt 						if (i < chunk->min_frun_ind)
   1936  1.10.2.2  matt 							chunk->min_frun_ind = i;
   1937  1.10.2.2  matt 					}
   1938  1.10.2.2  matt 				}
   1939  1.10.2.2  matt 				i += mapelm->npages;
   1940  1.10.2.2  matt 			}
   1941  1.10.2.2  matt 			/*
   1942  1.10.2.2  matt 			 * Search failure.  Reset cached chunk->max_frun_npages.
   1943  1.10.2.2  matt 			 * chunk->min_frun_ind was already reset above (if
   1944  1.10.2.2  matt 			 * necessary).
   1945  1.10.2.2  matt 			 */
   1946  1.10.2.2  matt 			chunk->max_frun_npages = max_frun_npages;
   1947  1.10.2.2  matt 		}
   1948  1.10.2.2  matt 	}
   1949  1.10.2.2  matt 
   1950  1.10.2.2  matt 	/*
   1951  1.10.2.2  matt 	 * No usable runs.  Create a new chunk from which to allocate the run.
   1952  1.10.2.2  matt 	 */
   1953  1.10.2.2  matt 	chunk = arena_chunk_alloc(arena);
   1954  1.10.2.2  matt 	if (chunk == NULL)
   1955  1.10.2.2  matt 		return (NULL);
   1956  1.10.2.2  matt 	run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages <<
   1957  1.10.2.2  matt 	    pagesize_2pow));
   1958  1.10.2.2  matt 	/* Update page map. */
   1959  1.10.2.2  matt 	arena_run_split(arena, run, size);
   1960  1.10.2.2  matt 	return (run);
   1961  1.10.2.2  matt }
   1962  1.10.2.2  matt 
   1963  1.10.2.2  matt static void
   1964  1.10.2.2  matt arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size)
   1965  1.10.2.2  matt {
   1966  1.10.2.2  matt 	arena_chunk_t *chunk;
   1967  1.10.2.2  matt 	unsigned run_ind, run_pages;
   1968  1.10.2.2  matt 
   1969  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
   1970  1.10.2.2  matt 
   1971  1.10.2.2  matt 	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
   1972  1.10.2.2  matt 	    >> pagesize_2pow);
   1973  1.10.2.2  matt 	assert(run_ind >= arena_chunk_header_npages);
   1974  1.10.2.2  matt 	assert(run_ind < (chunksize >> pagesize_2pow));
   1975  1.10.2.2  matt 	run_pages = (unsigned)(size >> pagesize_2pow);
   1976  1.10.2.2  matt 	assert(run_pages == chunk->map[run_ind].npages);
   1977  1.10.2.2  matt 
   1978  1.10.2.2  matt 	/* Subtract pages from count of pages used in chunk. */
   1979  1.10.2.2  matt 	chunk->pages_used -= run_pages;
   1980  1.10.2.2  matt 
   1981  1.10.2.2  matt 	/* Mark run as deallocated. */
   1982  1.10.2.2  matt 	assert(chunk->map[run_ind].npages == run_pages);
   1983  1.10.2.2  matt 	chunk->map[run_ind].pos = POS_FREE;
   1984  1.10.2.2  matt 	assert(chunk->map[run_ind + run_pages - 1].npages == run_pages);
   1985  1.10.2.2  matt 	chunk->map[run_ind + run_pages - 1].pos = POS_FREE;
   1986  1.10.2.2  matt 
   1987  1.10.2.2  matt 	/*
   1988  1.10.2.2  matt 	 * Tell the kernel that we don't need the data in this run, but only if
   1989  1.10.2.2  matt 	 * requested via runtime configuration.
   1990  1.10.2.2  matt 	 */
   1991  1.10.2.2  matt 	if (opt_hint)
   1992  1.10.2.2  matt 		madvise(run, size, MADV_FREE);
   1993  1.10.2.2  matt 
   1994  1.10.2.2  matt 	/* Try to coalesce with neighboring runs. */
   1995  1.10.2.2  matt 	if (run_ind > arena_chunk_header_npages &&
   1996  1.10.2.2  matt 	    chunk->map[run_ind - 1].pos == POS_FREE) {
   1997  1.10.2.2  matt 		unsigned prev_npages;
   1998  1.10.2.2  matt 
   1999  1.10.2.2  matt 		/* Coalesce with previous run. */
   2000  1.10.2.2  matt 		prev_npages = chunk->map[run_ind - 1].npages;
   2001  1.10.2.2  matt 		run_ind -= prev_npages;
   2002  1.10.2.2  matt 		assert(chunk->map[run_ind].npages == prev_npages);
   2003  1.10.2.2  matt 		assert(chunk->map[run_ind].pos == POS_FREE);
   2004  1.10.2.2  matt 		run_pages += prev_npages;
   2005  1.10.2.2  matt 
   2006  1.10.2.2  matt 		chunk->map[run_ind].npages = run_pages;
   2007  1.10.2.2  matt 		assert(chunk->map[run_ind].pos == POS_FREE);
   2008  1.10.2.2  matt 		chunk->map[run_ind + run_pages - 1].npages = run_pages;
   2009  1.10.2.2  matt 		assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
   2010  1.10.2.2  matt 	}
   2011  1.10.2.2  matt 
   2012  1.10.2.2  matt 	if (run_ind + run_pages < chunk_npages &&
   2013  1.10.2.2  matt 	    chunk->map[run_ind + run_pages].pos == POS_FREE) {
   2014  1.10.2.2  matt 		unsigned next_npages;
   2015  1.10.2.2  matt 
   2016  1.10.2.2  matt 		/* Coalesce with next run. */
   2017  1.10.2.2  matt 		next_npages = chunk->map[run_ind + run_pages].npages;
   2018  1.10.2.2  matt 		run_pages += next_npages;
   2019  1.10.2.2  matt 		assert(chunk->map[run_ind + run_pages - 1].npages ==
   2020  1.10.2.2  matt 		    next_npages);
   2021  1.10.2.2  matt 		assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
   2022  1.10.2.2  matt 
   2023  1.10.2.2  matt 		chunk->map[run_ind].npages = run_pages;
   2024  1.10.2.2  matt 		chunk->map[run_ind].pos = POS_FREE;
   2025  1.10.2.2  matt 		chunk->map[run_ind + run_pages - 1].npages = run_pages;
   2026  1.10.2.2  matt 		assert(chunk->map[run_ind + run_pages - 1].pos == POS_FREE);
   2027  1.10.2.2  matt 	}
   2028  1.10.2.2  matt 
   2029  1.10.2.2  matt 	if (chunk->map[run_ind].npages > chunk->max_frun_npages)
   2030  1.10.2.2  matt 		chunk->max_frun_npages = chunk->map[run_ind].npages;
   2031  1.10.2.2  matt 	if (run_ind < chunk->min_frun_ind)
   2032  1.10.2.2  matt 		chunk->min_frun_ind = run_ind;
   2033  1.10.2.2  matt 
   2034  1.10.2.2  matt 	/* Deallocate chunk if it is now completely unused. */
   2035  1.10.2.2  matt 	if (chunk->pages_used == 0)
   2036  1.10.2.2  matt 		arena_chunk_dealloc(arena, chunk);
   2037  1.10.2.2  matt }
   2038  1.10.2.2  matt 
   2039  1.10.2.2  matt static arena_run_t *
   2040  1.10.2.2  matt arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
   2041  1.10.2.2  matt {
   2042  1.10.2.2  matt 	arena_run_t *run;
   2043  1.10.2.2  matt 	unsigned i, remainder;
   2044  1.10.2.2  matt 
   2045  1.10.2.2  matt 	/* Look for a usable run. */
   2046  1.10.2.2  matt 	/* LINTED */
   2047  1.10.2.2  matt 	if ((run = RB_MIN(arena_run_tree_s, &bin->runs)) != NULL) {
   2048  1.10.2.2  matt 		/* run is guaranteed to have available space. */
   2049  1.10.2.2  matt 		/* LINTED */
   2050  1.10.2.2  matt 		RB_REMOVE(arena_run_tree_s, &bin->runs, run);
   2051  1.10.2.2  matt #ifdef MALLOC_STATS
   2052  1.10.2.2  matt 		bin->stats.reruns++;
   2053  1.10.2.2  matt #endif
   2054  1.10.2.2  matt 		return (run);
   2055  1.10.2.2  matt 	}
   2056  1.10.2.2  matt 	/* No existing runs have any space available. */
   2057  1.10.2.2  matt 
   2058  1.10.2.2  matt 	/* Allocate a new run. */
   2059  1.10.2.2  matt 	run = arena_run_alloc(arena, bin->run_size);
   2060  1.10.2.2  matt 	if (run == NULL)
   2061  1.10.2.2  matt 		return (NULL);
   2062  1.10.2.2  matt 
   2063  1.10.2.2  matt 	/* Initialize run internals. */
   2064  1.10.2.2  matt 	run->bin = bin;
   2065  1.10.2.2  matt 
   2066  1.10.2.2  matt 	for (i = 0; i < bin->regs_mask_nelms; i++)
   2067  1.10.2.2  matt 		run->regs_mask[i] = UINT_MAX;
   2068  1.10.2.2  matt 	remainder = bin->nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1);
   2069  1.10.2.2  matt 	if (remainder != 0) {
   2070  1.10.2.2  matt 		/* The last element has spare bits that need to be unset. */
   2071  1.10.2.2  matt 		run->regs_mask[i] = (UINT_MAX >> ((1 << (SIZEOF_INT_2POW + 3))
   2072  1.10.2.2  matt 		    - remainder));
   2073  1.10.2.2  matt 	}
   2074  1.10.2.2  matt 
   2075  1.10.2.2  matt 	run->regs_minelm = 0;
   2076  1.10.2.2  matt 
   2077  1.10.2.2  matt 	run->nfree = bin->nregs;
   2078  1.10.2.2  matt #ifdef MALLOC_DEBUG
   2079  1.10.2.2  matt 	run->magic = ARENA_RUN_MAGIC;
   2080  1.10.2.2  matt #endif
   2081  1.10.2.2  matt 
   2082  1.10.2.2  matt #ifdef MALLOC_STATS
   2083  1.10.2.2  matt 	bin->stats.nruns++;
   2084  1.10.2.2  matt 	bin->stats.curruns++;
   2085  1.10.2.2  matt 	if (bin->stats.curruns > bin->stats.highruns)
   2086  1.10.2.2  matt 		bin->stats.highruns = bin->stats.curruns;
   2087  1.10.2.2  matt #endif
   2088  1.10.2.2  matt 	return (run);
   2089  1.10.2.2  matt }
   2090  1.10.2.2  matt 
   2091  1.10.2.2  matt /* bin->runcur must have space available before this function is called. */
   2092  1.10.2.2  matt static inline void *
   2093  1.10.2.2  matt arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
   2094  1.10.2.2  matt {
   2095  1.10.2.2  matt 	void *ret;
   2096  1.10.2.2  matt 
   2097  1.10.2.2  matt 	assert(run->magic == ARENA_RUN_MAGIC);
   2098  1.10.2.2  matt 	assert(run->nfree > 0);
   2099  1.10.2.2  matt 
   2100  1.10.2.2  matt 	ret = arena_run_reg_alloc(run, bin);
   2101  1.10.2.2  matt 	assert(ret != NULL);
   2102  1.10.2.2  matt 	run->nfree--;
   2103  1.10.2.2  matt 
   2104  1.10.2.2  matt 	return (ret);
   2105  1.10.2.2  matt }
   2106  1.10.2.2  matt 
   2107  1.10.2.2  matt /* Re-fill bin->runcur, then call arena_bin_malloc_easy(). */
   2108  1.10.2.2  matt static void *
   2109  1.10.2.2  matt arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
   2110  1.10.2.2  matt {
   2111  1.10.2.2  matt 
   2112  1.10.2.2  matt 	bin->runcur = arena_bin_nonfull_run_get(arena, bin);
   2113  1.10.2.2  matt 	if (bin->runcur == NULL)
   2114  1.10.2.2  matt 		return (NULL);
   2115  1.10.2.2  matt 	assert(bin->runcur->magic == ARENA_RUN_MAGIC);
   2116  1.10.2.2  matt 	assert(bin->runcur->nfree > 0);
   2117  1.10.2.2  matt 
   2118  1.10.2.2  matt 	return (arena_bin_malloc_easy(arena, bin, bin->runcur));
   2119  1.10.2.2  matt }
   2120  1.10.2.2  matt 
   2121  1.10.2.2  matt /*
   2122  1.10.2.2  matt  * Calculate bin->run_size such that it meets the following constraints:
   2123  1.10.2.2  matt  *
   2124  1.10.2.2  matt  *   *) bin->run_size >= min_run_size
   2125  1.10.2.2  matt  *   *) bin->run_size <= arena_maxclass
   2126  1.10.2.2  matt  *   *) bin->run_size <= RUN_MAX_SMALL
   2127  1.10.2.2  matt  *   *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
   2128  1.10.2.2  matt  *
   2129  1.10.2.2  matt  * bin->nregs, bin->regs_mask_nelms, and bin->reg0_offset are
   2130  1.10.2.2  matt  * also calculated here, since these settings are all interdependent.
   2131  1.10.2.2  matt  */
   2132  1.10.2.2  matt static size_t
   2133  1.10.2.2  matt arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
   2134  1.10.2.2  matt {
   2135  1.10.2.2  matt 	size_t try_run_size, good_run_size;
   2136  1.10.2.2  matt 	unsigned good_nregs, good_mask_nelms, good_reg0_offset;
   2137  1.10.2.2  matt 	unsigned try_nregs, try_mask_nelms, try_reg0_offset;
   2138  1.10.2.2  matt 	float max_ovrhd = RUN_MAX_OVRHD;
   2139  1.10.2.2  matt 
   2140  1.10.2.2  matt 	assert(min_run_size >= pagesize);
   2141  1.10.2.2  matt 	assert(min_run_size <= arena_maxclass);
   2142  1.10.2.2  matt 	assert(min_run_size <= RUN_MAX_SMALL);
   2143  1.10.2.2  matt 
   2144  1.10.2.2  matt 	/*
   2145  1.10.2.2  matt 	 * Calculate known-valid settings before entering the run_size
   2146  1.10.2.2  matt 	 * expansion loop, so that the first part of the loop always copies
   2147  1.10.2.2  matt 	 * valid settings.
   2148  1.10.2.2  matt 	 *
   2149  1.10.2.2  matt 	 * The do..while loop iteratively reduces the number of regions until
   2150  1.10.2.2  matt 	 * the run header and the regions no longer overlap.  A closed formula
   2151  1.10.2.2  matt 	 * would be quite messy, since there is an interdependency between the
   2152  1.10.2.2  matt 	 * header's mask length and the number of regions.
   2153  1.10.2.2  matt 	 */
   2154  1.10.2.2  matt 	try_run_size = min_run_size;
   2155  1.10.2.2  matt 	try_nregs = (unsigned)(((try_run_size - sizeof(arena_run_t)) /
   2156  1.10.2.2  matt 	    bin->reg_size) + 1); /* Counter-act the first line of the loop. */
   2157  1.10.2.2  matt 	do {
   2158  1.10.2.2  matt 		try_nregs--;
   2159  1.10.2.2  matt 		try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
   2160  1.10.2.2  matt 		    ((try_nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0);
   2161  1.10.2.2  matt 		try_reg0_offset = (unsigned)(try_run_size -
   2162  1.10.2.2  matt 		    (try_nregs * bin->reg_size));
   2163  1.10.2.2  matt 	} while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1))
   2164  1.10.2.2  matt 	    > try_reg0_offset);
   2165  1.10.2.2  matt 
   2166  1.10.2.2  matt 	/* run_size expansion loop. */
   2167  1.10.2.2  matt 	do {
   2168  1.10.2.2  matt 		/*
   2169  1.10.2.2  matt 		 * Copy valid settings before trying more aggressive settings.
   2170  1.10.2.2  matt 		 */
   2171  1.10.2.2  matt 		good_run_size = try_run_size;
   2172  1.10.2.2  matt 		good_nregs = try_nregs;
   2173  1.10.2.2  matt 		good_mask_nelms = try_mask_nelms;
   2174  1.10.2.2  matt 		good_reg0_offset = try_reg0_offset;
   2175  1.10.2.2  matt 
   2176  1.10.2.2  matt 		/* Try more aggressive settings. */
   2177  1.10.2.2  matt 		try_run_size += pagesize;
   2178  1.10.2.2  matt 		try_nregs = (unsigned)(((try_run_size - sizeof(arena_run_t)) /
   2179  1.10.2.2  matt 		    bin->reg_size) + 1); /* Counter-act try_nregs-- in loop. */
   2180  1.10.2.2  matt 		do {
   2181  1.10.2.2  matt 			try_nregs--;
   2182  1.10.2.2  matt 			try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
   2183  1.10.2.2  matt 			    ((try_nregs & ((1 << (SIZEOF_INT_2POW + 3)) - 1)) ?
   2184  1.10.2.2  matt 			    1 : 0);
   2185  1.10.2.2  matt 			try_reg0_offset = (unsigned)(try_run_size - (try_nregs *
   2186  1.10.2.2  matt 			    bin->reg_size));
   2187  1.10.2.2  matt 		} while (sizeof(arena_run_t) + (sizeof(unsigned) *
   2188  1.10.2.2  matt 		    (try_mask_nelms - 1)) > try_reg0_offset);
   2189  1.10.2.2  matt 	} while (try_run_size <= arena_maxclass && try_run_size <= RUN_MAX_SMALL
   2190  1.10.2.2  matt 	    && max_ovrhd > RUN_MAX_OVRHD_RELAX / ((float)(bin->reg_size << 3))
   2191  1.10.2.2  matt 	    && ((float)(try_reg0_offset)) / ((float)(try_run_size)) >
   2192  1.10.2.2  matt 	    max_ovrhd);
   2193  1.10.2.2  matt 
   2194  1.10.2.2  matt 	assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
   2195  1.10.2.2  matt 	    <= good_reg0_offset);
   2196  1.10.2.2  matt 	assert((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
   2197  1.10.2.2  matt 
   2198  1.10.2.2  matt 	/* Copy final settings. */
   2199  1.10.2.2  matt 	bin->run_size = good_run_size;
   2200  1.10.2.2  matt 	bin->nregs = good_nregs;
   2201  1.10.2.2  matt 	bin->regs_mask_nelms = good_mask_nelms;
   2202  1.10.2.2  matt 	bin->reg0_offset = good_reg0_offset;
   2203  1.10.2.2  matt 
   2204  1.10.2.2  matt 	return (good_run_size);
   2205  1.10.2.2  matt }
   2206  1.10.2.2  matt 
   2207  1.10.2.2  matt static void *
   2208  1.10.2.2  matt arena_malloc(arena_t *arena, size_t size)
   2209  1.10.2.2  matt {
   2210  1.10.2.2  matt 	void *ret;
   2211  1.10.2.2  matt 
   2212  1.10.2.2  matt 	assert(arena != NULL);
   2213  1.10.2.2  matt 	assert(arena->magic == ARENA_MAGIC);
   2214  1.10.2.2  matt 	assert(size != 0);
   2215  1.10.2.2  matt 	assert(QUANTUM_CEILING(size) <= arena_maxclass);
   2216  1.10.2.2  matt 
   2217  1.10.2.2  matt 	if (size <= bin_maxclass) {
   2218  1.10.2.2  matt 		arena_bin_t *bin;
   2219  1.10.2.2  matt 		arena_run_t *run;
   2220  1.10.2.2  matt 
   2221  1.10.2.2  matt 		/* Small allocation. */
   2222  1.10.2.2  matt 
   2223  1.10.2.2  matt 		if (size < small_min) {
   2224  1.10.2.2  matt 			/* Tiny. */
   2225  1.10.2.2  matt 			size = pow2_ceil(size);
   2226  1.10.2.2  matt 			bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
   2227  1.10.2.2  matt 			    1)))];
   2228  1.10.2.2  matt #if (!defined(NDEBUG) || defined(MALLOC_STATS))
   2229  1.10.2.2  matt 			/*
   2230  1.10.2.2  matt 			 * Bin calculation is always correct, but we may need
   2231  1.10.2.2  matt 			 * to fix size for the purposes of assertions and/or
   2232  1.10.2.2  matt 			 * stats accuracy.
   2233  1.10.2.2  matt 			 */
   2234  1.10.2.2  matt 			if (size < (1 << TINY_MIN_2POW))
   2235  1.10.2.2  matt 				size = (1 << TINY_MIN_2POW);
   2236  1.10.2.2  matt #endif
   2237  1.10.2.2  matt 		} else if (size <= small_max) {
   2238  1.10.2.2  matt 			/* Quantum-spaced. */
   2239  1.10.2.2  matt 			size = QUANTUM_CEILING(size);
   2240  1.10.2.2  matt 			bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
   2241  1.10.2.2  matt 			    - 1];
   2242  1.10.2.2  matt 		} else {
   2243  1.10.2.2  matt 			/* Sub-page. */
   2244  1.10.2.2  matt 			size = pow2_ceil(size);
   2245  1.10.2.2  matt 			bin = &arena->bins[ntbins + nqbins
   2246  1.10.2.2  matt 			    + (ffs((int)(size >> opt_small_max_2pow)) - 2)];
   2247  1.10.2.2  matt 		}
   2248  1.10.2.2  matt 		assert(size == bin->reg_size);
   2249  1.10.2.2  matt 
   2250  1.10.2.2  matt 		malloc_mutex_lock(&arena->mtx);
   2251  1.10.2.2  matt 		if ((run = bin->runcur) != NULL && run->nfree > 0)
   2252  1.10.2.2  matt 			ret = arena_bin_malloc_easy(arena, bin, run);
   2253  1.10.2.2  matt 		else
   2254  1.10.2.2  matt 			ret = arena_bin_malloc_hard(arena, bin);
   2255  1.10.2.2  matt 
   2256  1.10.2.2  matt 		if (ret == NULL) {
   2257  1.10.2.2  matt 			malloc_mutex_unlock(&arena->mtx);
   2258  1.10.2.2  matt 			return (NULL);
   2259  1.10.2.2  matt 		}
   2260  1.10.2.2  matt 
   2261  1.10.2.2  matt #ifdef MALLOC_STATS
   2262  1.10.2.2  matt 		bin->stats.nrequests++;
   2263  1.10.2.2  matt 		arena->stats.nmalloc_small++;
   2264  1.10.2.2  matt 		arena->stats.allocated_small += size;
   2265  1.10.2.2  matt #endif
   2266  1.10.2.2  matt 	} else {
   2267  1.10.2.2  matt 		/* Large allocation. */
   2268  1.10.2.2  matt 		size = PAGE_CEILING(size);
   2269  1.10.2.2  matt 		malloc_mutex_lock(&arena->mtx);
   2270  1.10.2.2  matt 		ret = (void *)arena_run_alloc(arena, size);
   2271  1.10.2.2  matt 		if (ret == NULL) {
   2272  1.10.2.2  matt 			malloc_mutex_unlock(&arena->mtx);
   2273  1.10.2.2  matt 			return (NULL);
   2274  1.10.2.2  matt 		}
   2275  1.10.2.2  matt #ifdef MALLOC_STATS
   2276  1.10.2.2  matt 		arena->stats.nmalloc_large++;
   2277  1.10.2.2  matt 		arena->stats.allocated_large += size;
   2278  1.10.2.2  matt #endif
   2279  1.10.2.2  matt 	}
   2280  1.10.2.2  matt 
   2281  1.10.2.2  matt 	malloc_mutex_unlock(&arena->mtx);
   2282  1.10.2.2  matt 
   2283  1.10.2.2  matt 	if (opt_junk)
   2284  1.10.2.2  matt 		memset(ret, 0xa5, size);
   2285  1.10.2.2  matt 	else if (opt_zero)
   2286  1.10.2.2  matt 		memset(ret, 0, size);
   2287  1.10.2.2  matt 	return (ret);
   2288  1.10.2.2  matt }
   2289  1.10.2.2  matt 
   2290  1.10.2.2  matt static inline void
   2291  1.10.2.2  matt arena_palloc_trim(arena_t *arena, arena_chunk_t *chunk, unsigned pageind,
   2292  1.10.2.2  matt     unsigned npages)
   2293  1.10.2.2  matt {
   2294  1.10.2.2  matt 	unsigned i;
   2295  1.10.2.2  matt 
   2296  1.10.2.2  matt 	assert(npages > 0);
   2297  1.10.2.2  matt 
   2298  1.10.2.2  matt 	/*
   2299  1.10.2.2  matt 	 * Modifiy the map such that arena_run_dalloc() sees the run as
   2300  1.10.2.2  matt 	 * separately allocated.
   2301  1.10.2.2  matt 	 */
   2302  1.10.2.2  matt 	for (i = 0; i < npages; i++) {
   2303  1.10.2.2  matt 		chunk->map[pageind + i].npages = npages;
   2304  1.10.2.2  matt 		chunk->map[pageind + i].pos = i;
   2305  1.10.2.2  matt 	}
   2306  1.10.2.2  matt 	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)chunk + (pageind <<
   2307  1.10.2.2  matt 	    pagesize_2pow)), npages << pagesize_2pow);
   2308  1.10.2.2  matt }
   2309  1.10.2.2  matt 
   2310  1.10.2.2  matt /* Only handles large allocations that require more than page alignment. */
   2311  1.10.2.2  matt static void *
   2312  1.10.2.2  matt arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
   2313  1.10.2.2  matt {
   2314  1.10.2.2  matt 	void *ret;
   2315  1.10.2.2  matt 	size_t offset;
   2316  1.10.2.2  matt 	arena_chunk_t *chunk;
   2317  1.10.2.2  matt 	unsigned pageind, i, npages;
   2318  1.10.2.2  matt 
   2319  1.10.2.2  matt 	assert((size & pagesize_mask) == 0);
   2320  1.10.2.2  matt 	assert((alignment & pagesize_mask) == 0);
   2321  1.10.2.2  matt 
   2322  1.10.2.2  matt 	npages = (unsigned)(size >> pagesize_2pow);
   2323  1.10.2.2  matt 
   2324  1.10.2.2  matt 	malloc_mutex_lock(&arena->mtx);
   2325  1.10.2.2  matt 	ret = (void *)arena_run_alloc(arena, alloc_size);
   2326  1.10.2.2  matt 	if (ret == NULL) {
   2327  1.10.2.2  matt 		malloc_mutex_unlock(&arena->mtx);
   2328  1.10.2.2  matt 		return (NULL);
   2329  1.10.2.2  matt 	}
   2330  1.10.2.2  matt 
   2331  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
   2332  1.10.2.2  matt 
   2333  1.10.2.2  matt 	offset = (uintptr_t)ret & (alignment - 1);
   2334  1.10.2.2  matt 	assert((offset & pagesize_mask) == 0);
   2335  1.10.2.2  matt 	assert(offset < alloc_size);
   2336  1.10.2.2  matt 	if (offset == 0) {
   2337  1.10.2.2  matt 		pageind = (unsigned)(((uintptr_t)ret - (uintptr_t)chunk) >>
   2338  1.10.2.2  matt 		    pagesize_2pow);
   2339  1.10.2.2  matt 
   2340  1.10.2.2  matt 		/* Update the map for the run to be kept. */
   2341  1.10.2.2  matt 		for (i = 0; i < npages; i++) {
   2342  1.10.2.2  matt 			chunk->map[pageind + i].npages = npages;
   2343  1.10.2.2  matt 			assert(chunk->map[pageind + i].pos == i);
   2344  1.10.2.2  matt 		}
   2345  1.10.2.2  matt 
   2346  1.10.2.2  matt 		/* Trim trailing space. */
   2347  1.10.2.2  matt 		arena_palloc_trim(arena, chunk, pageind + npages,
   2348  1.10.2.2  matt 		    (unsigned)((alloc_size - size) >> pagesize_2pow));
   2349  1.10.2.2  matt 	} else {
   2350  1.10.2.2  matt 		size_t leadsize, trailsize;
   2351  1.10.2.2  matt 
   2352  1.10.2.2  matt 		leadsize = alignment - offset;
   2353  1.10.2.2  matt 		ret = (void *)((uintptr_t)ret + leadsize);
   2354  1.10.2.2  matt 		pageind = (unsigned)(((uintptr_t)ret - (uintptr_t)chunk) >>
   2355  1.10.2.2  matt 		    pagesize_2pow);
   2356  1.10.2.2  matt 
   2357  1.10.2.2  matt 		/* Update the map for the run to be kept. */
   2358  1.10.2.2  matt 		for (i = 0; i < npages; i++) {
   2359  1.10.2.2  matt 			chunk->map[pageind + i].npages = npages;
   2360  1.10.2.2  matt 			chunk->map[pageind + i].pos = i;
   2361  1.10.2.2  matt 		}
   2362  1.10.2.2  matt 
   2363  1.10.2.2  matt 		/* Trim leading space. */
   2364  1.10.2.2  matt 		arena_palloc_trim(arena, chunk,
   2365  1.10.2.2  matt 		    (unsigned)(pageind - (leadsize >> pagesize_2pow)),
   2366  1.10.2.2  matt 		    (unsigned)(leadsize >> pagesize_2pow));
   2367  1.10.2.2  matt 
   2368  1.10.2.2  matt 		trailsize = alloc_size - leadsize - size;
   2369  1.10.2.2  matt 		if (trailsize != 0) {
   2370  1.10.2.2  matt 			/* Trim trailing space. */
   2371  1.10.2.2  matt 			assert(trailsize < alloc_size);
   2372  1.10.2.2  matt 			arena_palloc_trim(arena, chunk, pageind + npages,
   2373  1.10.2.2  matt 			    (unsigned)(trailsize >> pagesize_2pow));
   2374  1.10.2.2  matt 		}
   2375  1.10.2.2  matt 	}
   2376  1.10.2.2  matt 
   2377  1.10.2.2  matt #ifdef MALLOC_STATS
   2378  1.10.2.2  matt 	arena->stats.nmalloc_large++;
   2379  1.10.2.2  matt 	arena->stats.allocated_large += size;
   2380  1.10.2.2  matt #endif
   2381  1.10.2.2  matt 	malloc_mutex_unlock(&arena->mtx);
   2382  1.10.2.2  matt 
   2383  1.10.2.2  matt 	if (opt_junk)
   2384  1.10.2.2  matt 		memset(ret, 0xa5, size);
   2385  1.10.2.2  matt 	else if (opt_zero)
   2386  1.10.2.2  matt 		memset(ret, 0, size);
   2387  1.10.2.2  matt 	return (ret);
   2388  1.10.2.2  matt }
   2389  1.10.2.2  matt 
   2390  1.10.2.2  matt /* Return the size of the allocation pointed to by ptr. */
   2391  1.10.2.2  matt static size_t
   2392  1.10.2.2  matt arena_salloc(const void *ptr)
   2393  1.10.2.2  matt {
   2394  1.10.2.2  matt 	size_t ret;
   2395  1.10.2.2  matt 	arena_chunk_t *chunk;
   2396  1.10.2.2  matt 	arena_chunk_map_t *mapelm;
   2397  1.10.2.2  matt 	unsigned pageind;
   2398  1.10.2.2  matt 
   2399  1.10.2.2  matt 	assert(ptr != NULL);
   2400  1.10.2.2  matt 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
   2401  1.10.2.2  matt 
   2402  1.10.2.2  matt 	/*
   2403  1.10.2.2  matt 	 * No arena data structures that we query here can change in a way that
   2404  1.10.2.2  matt 	 * affects this function, so we don't need to lock.
   2405  1.10.2.2  matt 	 */
   2406  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
   2407  1.10.2.2  matt 	pageind = (unsigned)(((uintptr_t)ptr - (uintptr_t)chunk) >>
   2408  1.10.2.2  matt 	    pagesize_2pow);
   2409  1.10.2.2  matt 	mapelm = &chunk->map[pageind];
   2410  1.10.2.2  matt 	if (mapelm->pos != 0 || ptr != (char *)((uintptr_t)chunk) + (pageind <<
   2411  1.10.2.2  matt 	    pagesize_2pow)) {
   2412  1.10.2.2  matt 		arena_run_t *run;
   2413  1.10.2.2  matt 
   2414  1.10.2.2  matt 		pageind -= mapelm->pos;
   2415  1.10.2.2  matt 
   2416  1.10.2.2  matt 		run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
   2417  1.10.2.2  matt 		    pagesize_2pow));
   2418  1.10.2.2  matt 		assert(run->magic == ARENA_RUN_MAGIC);
   2419  1.10.2.2  matt 		ret = run->bin->reg_size;
   2420  1.10.2.2  matt 	} else
   2421  1.10.2.2  matt 		ret = mapelm->npages << pagesize_2pow;
   2422  1.10.2.2  matt 
   2423  1.10.2.2  matt 	return (ret);
   2424  1.10.2.2  matt }
   2425  1.10.2.2  matt 
   2426  1.10.2.2  matt static void *
   2427  1.10.2.2  matt arena_ralloc(void *ptr, size_t size, size_t oldsize)
   2428  1.10.2.2  matt {
   2429  1.10.2.2  matt 	void *ret;
   2430  1.10.2.2  matt 
   2431  1.10.2.2  matt 	/* Avoid moving the allocation if the size class would not change. */
   2432  1.10.2.2  matt 	if (size < small_min) {
   2433  1.10.2.2  matt 		if (oldsize < small_min &&
   2434  1.10.2.2  matt 		    ffs((int)(pow2_ceil(size) >> (TINY_MIN_2POW + 1)))
   2435  1.10.2.2  matt 		    == ffs((int)(pow2_ceil(oldsize) >> (TINY_MIN_2POW + 1))))
   2436  1.10.2.2  matt 			goto IN_PLACE;
   2437  1.10.2.2  matt 	} else if (size <= small_max) {
   2438  1.10.2.2  matt 		if (oldsize >= small_min && oldsize <= small_max &&
   2439  1.10.2.2  matt 		    (QUANTUM_CEILING(size) >> opt_quantum_2pow)
   2440  1.10.2.2  matt 		    == (QUANTUM_CEILING(oldsize) >> opt_quantum_2pow))
   2441  1.10.2.2  matt 			goto IN_PLACE;
   2442  1.10.2.2  matt 	} else {
   2443  1.10.2.2  matt 		/*
   2444  1.10.2.2  matt 		 * We make no attempt to resize runs here, though it would be
   2445  1.10.2.2  matt 		 * possible to do so.
   2446  1.10.2.2  matt 		 */
   2447  1.10.2.2  matt 		if (oldsize > small_max && PAGE_CEILING(size) == oldsize)
   2448  1.10.2.2  matt 			goto IN_PLACE;
   2449  1.10.2.2  matt 	}
   2450  1.10.2.2  matt 
   2451  1.10.2.2  matt 	/*
   2452  1.10.2.2  matt 	 * If we get here, then size and oldsize are different enough that we
   2453  1.10.2.2  matt 	 * need to use a different size class.  In that case, fall back to
   2454  1.10.2.2  matt 	 * allocating new space and copying.
   2455  1.10.2.2  matt 	 */
   2456  1.10.2.2  matt 	ret = arena_malloc(choose_arena(), size);
   2457  1.10.2.2  matt 	if (ret == NULL)
   2458  1.10.2.2  matt 		return (NULL);
   2459  1.10.2.2  matt 
   2460  1.10.2.2  matt 	/* Junk/zero-filling were already done by arena_malloc(). */
   2461  1.10.2.2  matt 	if (size < oldsize)
   2462  1.10.2.2  matt 		memcpy(ret, ptr, size);
   2463  1.10.2.2  matt 	else
   2464  1.10.2.2  matt 		memcpy(ret, ptr, oldsize);
   2465  1.10.2.2  matt 	idalloc(ptr);
   2466  1.10.2.2  matt 	return (ret);
   2467  1.10.2.2  matt IN_PLACE:
   2468  1.10.2.2  matt 	if (opt_junk && size < oldsize)
   2469  1.10.2.2  matt 		memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size);
   2470  1.10.2.2  matt 	else if (opt_zero && size > oldsize)
   2471  1.10.2.2  matt 		memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
   2472  1.10.2.2  matt 	return (ptr);
   2473  1.10.2.2  matt }
   2474  1.10.2.2  matt 
   2475  1.10.2.2  matt static void
   2476  1.10.2.2  matt arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
   2477  1.10.2.2  matt {
   2478  1.10.2.2  matt 	unsigned pageind;
   2479  1.10.2.2  matt 	arena_chunk_map_t *mapelm;
   2480  1.10.2.2  matt 	size_t size;
   2481  1.10.2.2  matt 
   2482  1.10.2.2  matt 	assert(arena != NULL);
   2483  1.10.2.2  matt 	assert(arena->magic == ARENA_MAGIC);
   2484  1.10.2.2  matt 	assert(chunk->arena == arena);
   2485  1.10.2.2  matt 	assert(ptr != NULL);
   2486  1.10.2.2  matt 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
   2487  1.10.2.2  matt 
   2488  1.10.2.2  matt 	pageind = (unsigned)(((uintptr_t)ptr - (uintptr_t)chunk) >>
   2489  1.10.2.2  matt 	    pagesize_2pow);
   2490  1.10.2.2  matt 	mapelm = &chunk->map[pageind];
   2491  1.10.2.2  matt 	if (mapelm->pos != 0 || ptr != (char *)((uintptr_t)chunk) + (pageind <<
   2492  1.10.2.2  matt 	    pagesize_2pow)) {
   2493  1.10.2.2  matt 		arena_run_t *run;
   2494  1.10.2.2  matt 		arena_bin_t *bin;
   2495  1.10.2.2  matt 
   2496  1.10.2.2  matt 		/* Small allocation. */
   2497  1.10.2.2  matt 
   2498  1.10.2.2  matt 		pageind -= mapelm->pos;
   2499  1.10.2.2  matt 
   2500  1.10.2.2  matt 		run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
   2501  1.10.2.2  matt 		    pagesize_2pow));
   2502  1.10.2.2  matt 		assert(run->magic == ARENA_RUN_MAGIC);
   2503  1.10.2.2  matt 		bin = run->bin;
   2504  1.10.2.2  matt 		size = bin->reg_size;
   2505  1.10.2.2  matt 
   2506  1.10.2.2  matt 		if (opt_junk)
   2507  1.10.2.2  matt 			memset(ptr, 0x5a, size);
   2508  1.10.2.2  matt 
   2509  1.10.2.2  matt 		malloc_mutex_lock(&arena->mtx);
   2510  1.10.2.2  matt 		arena_run_reg_dalloc(run, bin, ptr, size);
   2511  1.10.2.2  matt 		run->nfree++;
   2512  1.10.2.2  matt 
   2513  1.10.2.2  matt 		if (run->nfree == bin->nregs) {
   2514  1.10.2.2  matt 			/* Deallocate run. */
   2515  1.10.2.2  matt 			if (run == bin->runcur)
   2516  1.10.2.2  matt 				bin->runcur = NULL;
   2517  1.10.2.2  matt 			else if (bin->nregs != 1) {
   2518  1.10.2.2  matt 				/*
   2519  1.10.2.2  matt 				 * This block's conditional is necessary because
   2520  1.10.2.2  matt 				 * if the run only contains one region, then it
   2521  1.10.2.2  matt 				 * never gets inserted into the non-full runs
   2522  1.10.2.2  matt 				 * tree.
   2523  1.10.2.2  matt 				 */
   2524  1.10.2.2  matt 				/* LINTED */
   2525  1.10.2.2  matt 				RB_REMOVE(arena_run_tree_s, &bin->runs, run);
   2526  1.10.2.2  matt 			}
   2527  1.10.2.2  matt #ifdef MALLOC_DEBUG
   2528  1.10.2.2  matt 			run->magic = 0;
   2529  1.10.2.2  matt #endif
   2530  1.10.2.2  matt 			arena_run_dalloc(arena, run, bin->run_size);
   2531  1.10.2.2  matt #ifdef MALLOC_STATS
   2532  1.10.2.2  matt 			bin->stats.curruns--;
   2533  1.10.2.2  matt #endif
   2534  1.10.2.2  matt 		} else if (run->nfree == 1 && run != bin->runcur) {
   2535  1.10.2.2  matt 			/*
   2536  1.10.2.2  matt 			 * Make sure that bin->runcur always refers to the
   2537  1.10.2.2  matt 			 * lowest non-full run, if one exists.
   2538  1.10.2.2  matt 			 */
   2539  1.10.2.2  matt 			if (bin->runcur == NULL)
   2540  1.10.2.2  matt 				bin->runcur = run;
   2541  1.10.2.2  matt 			else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
   2542  1.10.2.2  matt 				/* Switch runcur. */
   2543  1.10.2.2  matt 				if (bin->runcur->nfree > 0) {
   2544  1.10.2.2  matt 					/* Insert runcur. */
   2545  1.10.2.2  matt 					/* LINTED */
   2546  1.10.2.2  matt 					RB_INSERT(arena_run_tree_s, &bin->runs,
   2547  1.10.2.2  matt 					    bin->runcur);
   2548  1.10.2.2  matt 				}
   2549  1.10.2.2  matt 				bin->runcur = run;
   2550  1.10.2.2  matt 			} else {
   2551  1.10.2.2  matt 				/* LINTED */
   2552  1.10.2.2  matt 				RB_INSERT(arena_run_tree_s, &bin->runs, run);
   2553  1.10.2.2  matt 			}
   2554  1.10.2.2  matt 		}
   2555  1.10.2.2  matt #ifdef MALLOC_STATS
   2556  1.10.2.2  matt 		arena->stats.allocated_small -= size;
   2557  1.10.2.2  matt 		arena->stats.ndalloc_small++;
   2558  1.10.2.2  matt #endif
   2559  1.10.2.2  matt 	} else {
   2560  1.10.2.2  matt 		/* Large allocation. */
   2561  1.10.2.2  matt 
   2562  1.10.2.2  matt 		size = mapelm->npages << pagesize_2pow;
   2563  1.10.2.2  matt 		assert((((uintptr_t)ptr) & pagesize_mask) == 0);
   2564  1.10.2.2  matt 
   2565  1.10.2.2  matt 		if (opt_junk)
   2566  1.10.2.2  matt 			memset(ptr, 0x5a, size);
   2567  1.10.2.2  matt 
   2568  1.10.2.2  matt 		malloc_mutex_lock(&arena->mtx);
   2569  1.10.2.2  matt 		arena_run_dalloc(arena, (arena_run_t *)ptr, size);
   2570  1.10.2.2  matt #ifdef MALLOC_STATS
   2571  1.10.2.2  matt 		arena->stats.allocated_large -= size;
   2572  1.10.2.2  matt 		arena->stats.ndalloc_large++;
   2573  1.10.2.2  matt #endif
   2574  1.10.2.2  matt 	}
   2575  1.10.2.2  matt 
   2576  1.10.2.2  matt 	malloc_mutex_unlock(&arena->mtx);
   2577  1.10.2.2  matt }
   2578  1.10.2.2  matt 
   2579  1.10.2.2  matt static bool
   2580  1.10.2.2  matt arena_new(arena_t *arena)
   2581  1.10.2.2  matt {
   2582  1.10.2.2  matt 	unsigned i;
   2583  1.10.2.2  matt 	arena_bin_t *bin;
   2584  1.10.2.2  matt 	size_t prev_run_size;
   2585  1.10.2.2  matt 
   2586  1.10.2.2  matt 	malloc_mutex_init(&arena->mtx);
   2587  1.10.2.2  matt 
   2588  1.10.2.2  matt #ifdef MALLOC_STATS
   2589  1.10.2.2  matt 	memset(&arena->stats, 0, sizeof(arena_stats_t));
   2590  1.10.2.2  matt #endif
   2591  1.10.2.2  matt 
   2592  1.10.2.2  matt 	/* Initialize chunks. */
   2593  1.10.2.2  matt 	RB_INIT(&arena->chunks);
   2594  1.10.2.2  matt 	arena->spare = NULL;
   2595  1.10.2.2  matt 
   2596  1.10.2.2  matt 	/* Initialize bins. */
   2597  1.10.2.2  matt 	prev_run_size = pagesize;
   2598  1.10.2.2  matt 
   2599  1.10.2.2  matt 	/* (2^n)-spaced tiny bins. */
   2600  1.10.2.2  matt 	for (i = 0; i < ntbins; i++) {
   2601  1.10.2.2  matt 		bin = &arena->bins[i];
   2602  1.10.2.2  matt 		bin->runcur = NULL;
   2603  1.10.2.2  matt 		RB_INIT(&bin->runs);
   2604  1.10.2.2  matt 
   2605  1.10.2.2  matt 		bin->reg_size = (1 << (TINY_MIN_2POW + i));
   2606  1.10.2.2  matt 		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
   2607  1.10.2.2  matt 
   2608  1.10.2.2  matt #ifdef MALLOC_STATS
   2609  1.10.2.2  matt 		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
   2610  1.10.2.2  matt #endif
   2611  1.10.2.2  matt 	}
   2612  1.10.2.2  matt 
   2613  1.10.2.2  matt 	/* Quantum-spaced bins. */
   2614  1.10.2.2  matt 	for (; i < ntbins + nqbins; i++) {
   2615  1.10.2.2  matt 		bin = &arena->bins[i];
   2616  1.10.2.2  matt 		bin->runcur = NULL;
   2617  1.10.2.2  matt 		RB_INIT(&bin->runs);
   2618  1.10.2.2  matt 
   2619  1.10.2.2  matt 		bin->reg_size = quantum * (i - ntbins + 1);
   2620  1.10.2.2  matt /*
   2621  1.10.2.2  matt 		pow2_size = pow2_ceil(quantum * (i - ntbins + 1));
   2622  1.10.2.2  matt */
   2623  1.10.2.2  matt 		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
   2624  1.10.2.2  matt 
   2625  1.10.2.2  matt #ifdef MALLOC_STATS
   2626  1.10.2.2  matt 		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
   2627  1.10.2.2  matt #endif
   2628  1.10.2.2  matt 	}
   2629  1.10.2.2  matt 
   2630  1.10.2.2  matt 	/* (2^n)-spaced sub-page bins. */
   2631  1.10.2.2  matt 	for (; i < ntbins + nqbins + nsbins; i++) {
   2632  1.10.2.2  matt 		bin = &arena->bins[i];
   2633  1.10.2.2  matt 		bin->runcur = NULL;
   2634  1.10.2.2  matt 		RB_INIT(&bin->runs);
   2635  1.10.2.2  matt 
   2636  1.10.2.2  matt 		bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
   2637  1.10.2.2  matt 
   2638  1.10.2.2  matt 		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
   2639  1.10.2.2  matt 
   2640  1.10.2.2  matt #ifdef MALLOC_STATS
   2641  1.10.2.2  matt 		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
   2642  1.10.2.2  matt #endif
   2643  1.10.2.2  matt 	}
   2644  1.10.2.2  matt 
   2645  1.10.2.2  matt #ifdef MALLOC_DEBUG
   2646  1.10.2.2  matt 	arena->magic = ARENA_MAGIC;
   2647  1.10.2.2  matt #endif
   2648  1.10.2.2  matt 
   2649  1.10.2.2  matt 	return (false);
   2650  1.10.2.2  matt }
   2651  1.10.2.2  matt 
   2652  1.10.2.2  matt /* Create a new arena and insert it into the arenas array at index ind. */
   2653  1.10.2.2  matt static arena_t *
   2654  1.10.2.2  matt arenas_extend(unsigned ind)
   2655  1.10.2.2  matt {
   2656  1.10.2.2  matt 	arena_t *ret;
   2657  1.10.2.2  matt 
   2658  1.10.2.2  matt 	/* Allocate enough space for trailing bins. */
   2659  1.10.2.2  matt 	ret = (arena_t *)base_alloc(sizeof(arena_t)
   2660  1.10.2.2  matt 	    + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
   2661  1.10.2.2  matt 	if (ret != NULL && arena_new(ret) == false) {
   2662  1.10.2.2  matt 		arenas[ind] = ret;
   2663  1.10.2.2  matt 		return (ret);
   2664  1.10.2.2  matt 	}
   2665  1.10.2.2  matt 	/* Only reached if there is an OOM error. */
   2666  1.10.2.2  matt 
   2667  1.10.2.2  matt 	/*
   2668  1.10.2.2  matt 	 * OOM here is quite inconvenient to propagate, since dealing with it
   2669  1.10.2.2  matt 	 * would require a check for failure in the fast path.  Instead, punt
   2670  1.10.2.2  matt 	 * by using arenas[0].  In practice, this is an extremely unlikely
   2671  1.10.2.2  matt 	 * failure.
   2672  1.10.2.2  matt 	 */
   2673  1.10.2.2  matt 	_malloc_message(_getprogname(),
   2674  1.10.2.2  matt 	    ": (malloc) Error initializing arena\n", "", "");
   2675  1.10.2.2  matt 	if (opt_abort)
   2676  1.10.2.2  matt 		abort();
   2677  1.10.2.2  matt 
   2678  1.10.2.2  matt 	return (arenas[0]);
   2679  1.10.2.2  matt }
   2680  1.10.2.2  matt 
   2681  1.10.2.2  matt /*
   2682  1.10.2.2  matt  * End arena.
   2683  1.10.2.2  matt  */
   2684  1.10.2.2  matt /******************************************************************************/
   2685  1.10.2.2  matt /*
   2686  1.10.2.2  matt  * Begin general internal functions.
   2687  1.10.2.2  matt  */
   2688  1.10.2.2  matt 
   2689  1.10.2.2  matt static void *
   2690  1.10.2.2  matt huge_malloc(size_t size)
   2691  1.10.2.2  matt {
   2692  1.10.2.2  matt 	void *ret;
   2693  1.10.2.2  matt 	size_t csize;
   2694  1.10.2.2  matt 	chunk_node_t *node;
   2695  1.10.2.2  matt 
   2696  1.10.2.2  matt 	/* Allocate one or more contiguous chunks for this request. */
   2697  1.10.2.2  matt 
   2698  1.10.2.2  matt 	csize = CHUNK_CEILING(size);
   2699  1.10.2.2  matt 	if (csize == 0) {
   2700  1.10.2.2  matt 		/* size is large enough to cause size_t wrap-around. */
   2701  1.10.2.2  matt 		return (NULL);
   2702  1.10.2.2  matt 	}
   2703  1.10.2.2  matt 
   2704  1.10.2.2  matt 	/* Allocate a chunk node with which to track the chunk. */
   2705  1.10.2.2  matt 	node = base_chunk_node_alloc();
   2706  1.10.2.2  matt 	if (node == NULL)
   2707  1.10.2.2  matt 		return (NULL);
   2708  1.10.2.2  matt 
   2709  1.10.2.2  matt 	ret = chunk_alloc(csize);
   2710  1.10.2.2  matt 	if (ret == NULL) {
   2711  1.10.2.2  matt 		base_chunk_node_dealloc(node);
   2712  1.10.2.2  matt 		return (NULL);
   2713  1.10.2.2  matt 	}
   2714  1.10.2.2  matt 
   2715  1.10.2.2  matt 	/* Insert node into huge. */
   2716  1.10.2.2  matt 	node->chunk = ret;
   2717  1.10.2.2  matt 	node->size = csize;
   2718  1.10.2.2  matt 
   2719  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   2720  1.10.2.2  matt 	RB_INSERT(chunk_tree_s, &huge, node);
   2721  1.10.2.2  matt #ifdef MALLOC_STATS
   2722  1.10.2.2  matt 	huge_nmalloc++;
   2723  1.10.2.2  matt 	huge_allocated += csize;
   2724  1.10.2.2  matt #endif
   2725  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   2726  1.10.2.2  matt 
   2727  1.10.2.2  matt 	if (opt_junk)
   2728  1.10.2.2  matt 		memset(ret, 0xa5, csize);
   2729  1.10.2.2  matt 	else if (opt_zero)
   2730  1.10.2.2  matt 		memset(ret, 0, csize);
   2731  1.10.2.2  matt 
   2732  1.10.2.2  matt 	return (ret);
   2733  1.10.2.2  matt }
   2734  1.10.2.2  matt 
   2735  1.10.2.2  matt /* Only handles large allocations that require more than chunk alignment. */
   2736  1.10.2.2  matt static void *
   2737  1.10.2.2  matt huge_palloc(size_t alignment, size_t size)
   2738  1.10.2.2  matt {
   2739  1.10.2.2  matt 	void *ret;
   2740  1.10.2.2  matt 	size_t alloc_size, chunk_size, offset;
   2741  1.10.2.2  matt 	chunk_node_t *node;
   2742  1.10.2.2  matt 
   2743  1.10.2.2  matt 	/*
   2744  1.10.2.2  matt 	 * This allocation requires alignment that is even larger than chunk
   2745  1.10.2.2  matt 	 * alignment.  This means that huge_malloc() isn't good enough.
   2746  1.10.2.2  matt 	 *
   2747  1.10.2.2  matt 	 * Allocate almost twice as many chunks as are demanded by the size or
   2748  1.10.2.2  matt 	 * alignment, in order to assure the alignment can be achieved, then
   2749  1.10.2.2  matt 	 * unmap leading and trailing chunks.
   2750  1.10.2.2  matt 	 */
   2751  1.10.2.2  matt 	assert(alignment >= chunksize);
   2752  1.10.2.2  matt 
   2753  1.10.2.2  matt 	chunk_size = CHUNK_CEILING(size);
   2754  1.10.2.2  matt 
   2755  1.10.2.2  matt 	if (size >= alignment)
   2756  1.10.2.2  matt 		alloc_size = chunk_size + alignment - chunksize;
   2757  1.10.2.2  matt 	else
   2758  1.10.2.2  matt 		alloc_size = (alignment << 1) - chunksize;
   2759  1.10.2.2  matt 
   2760  1.10.2.2  matt 	/* Allocate a chunk node with which to track the chunk. */
   2761  1.10.2.2  matt 	node = base_chunk_node_alloc();
   2762  1.10.2.2  matt 	if (node == NULL)
   2763  1.10.2.2  matt 		return (NULL);
   2764  1.10.2.2  matt 
   2765  1.10.2.2  matt 	ret = chunk_alloc(alloc_size);
   2766  1.10.2.2  matt 	if (ret == NULL) {
   2767  1.10.2.2  matt 		base_chunk_node_dealloc(node);
   2768  1.10.2.2  matt 		return (NULL);
   2769  1.10.2.2  matt 	}
   2770  1.10.2.2  matt 
   2771  1.10.2.2  matt 	offset = (uintptr_t)ret & (alignment - 1);
   2772  1.10.2.2  matt 	assert((offset & chunksize_mask) == 0);
   2773  1.10.2.2  matt 	assert(offset < alloc_size);
   2774  1.10.2.2  matt 	if (offset == 0) {
   2775  1.10.2.2  matt 		/* Trim trailing space. */
   2776  1.10.2.2  matt 		chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
   2777  1.10.2.2  matt 		    - chunk_size);
   2778  1.10.2.2  matt 	} else {
   2779  1.10.2.2  matt 		size_t trailsize;
   2780  1.10.2.2  matt 
   2781  1.10.2.2  matt 		/* Trim leading space. */
   2782  1.10.2.2  matt 		chunk_dealloc(ret, alignment - offset);
   2783  1.10.2.2  matt 
   2784  1.10.2.2  matt 		ret = (void *)((uintptr_t)ret + (alignment - offset));
   2785  1.10.2.2  matt 
   2786  1.10.2.2  matt 		trailsize = alloc_size - (alignment - offset) - chunk_size;
   2787  1.10.2.2  matt 		if (trailsize != 0) {
   2788  1.10.2.2  matt 		    /* Trim trailing space. */
   2789  1.10.2.2  matt 		    assert(trailsize < alloc_size);
   2790  1.10.2.2  matt 		    chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
   2791  1.10.2.2  matt 			trailsize);
   2792  1.10.2.2  matt 		}
   2793  1.10.2.2  matt 	}
   2794  1.10.2.2  matt 
   2795  1.10.2.2  matt 	/* Insert node into huge. */
   2796  1.10.2.2  matt 	node->chunk = ret;
   2797  1.10.2.2  matt 	node->size = chunk_size;
   2798  1.10.2.2  matt 
   2799  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   2800  1.10.2.2  matt 	RB_INSERT(chunk_tree_s, &huge, node);
   2801  1.10.2.2  matt #ifdef MALLOC_STATS
   2802  1.10.2.2  matt 	huge_nmalloc++;
   2803  1.10.2.2  matt 	huge_allocated += chunk_size;
   2804  1.10.2.2  matt #endif
   2805  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   2806  1.10.2.2  matt 
   2807  1.10.2.2  matt 	if (opt_junk)
   2808  1.10.2.2  matt 		memset(ret, 0xa5, chunk_size);
   2809  1.10.2.2  matt 	else if (opt_zero)
   2810  1.10.2.2  matt 		memset(ret, 0, chunk_size);
   2811  1.10.2.2  matt 
   2812  1.10.2.2  matt 	return (ret);
   2813  1.10.2.2  matt }
   2814  1.10.2.2  matt 
   2815  1.10.2.2  matt static void *
   2816  1.10.2.2  matt huge_ralloc(void *ptr, size_t size, size_t oldsize)
   2817  1.10.2.2  matt {
   2818  1.10.2.2  matt 	void *ret;
   2819  1.10.2.2  matt 
   2820  1.10.2.2  matt 	/* Avoid moving the allocation if the size class would not change. */
   2821  1.10.2.2  matt 	if (oldsize > arena_maxclass &&
   2822  1.10.2.2  matt 	    CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
   2823  1.10.2.2  matt 		if (opt_junk && size < oldsize) {
   2824  1.10.2.2  matt 			memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize
   2825  1.10.2.2  matt 			    - size);
   2826  1.10.2.2  matt 		} else if (opt_zero && size > oldsize) {
   2827  1.10.2.2  matt 			memset((void *)((uintptr_t)ptr + oldsize), 0, size
   2828  1.10.2.2  matt 			    - oldsize);
   2829  1.10.2.2  matt 		}
   2830  1.10.2.2  matt 		return (ptr);
   2831  1.10.2.2  matt 	}
   2832  1.10.2.2  matt 
   2833  1.10.2.2  matt 	if (CHUNK_ADDR2BASE(ptr) == ptr
   2834  1.10.2.2  matt #ifdef USE_BRK
   2835  1.10.2.2  matt 	    && ((uintptr_t)ptr < (uintptr_t)brk_base
   2836  1.10.2.2  matt 	    || (uintptr_t)ptr >= (uintptr_t)brk_max)
   2837  1.10.2.2  matt #endif
   2838  1.10.2.2  matt 	    ) {
   2839  1.10.2.2  matt 		chunk_node_t *node, key;
   2840  1.10.2.2  matt 		void *newptr;
   2841  1.10.2.2  matt 		size_t oldcsize;
   2842  1.10.2.2  matt 		size_t newcsize;
   2843  1.10.2.2  matt 
   2844  1.10.2.2  matt 		newcsize = CHUNK_CEILING(size);
   2845  1.10.2.2  matt 		oldcsize = CHUNK_CEILING(oldsize);
   2846  1.10.2.2  matt 		assert(oldcsize != newcsize);
   2847  1.10.2.2  matt 		if (newcsize == 0) {
   2848  1.10.2.2  matt 			/* size_t wrap-around */
   2849  1.10.2.2  matt 			return (NULL);
   2850  1.10.2.2  matt 		}
   2851  1.10.2.2  matt 		newptr = mremap(ptr, oldcsize, NULL, newcsize,
   2852  1.10.2.2  matt 		    MAP_ALIGNED(chunksize_2pow));
   2853  1.10.2.2  matt 		if (newptr != MAP_FAILED) {
   2854  1.10.2.2  matt 			assert(CHUNK_ADDR2BASE(newptr) == newptr);
   2855  1.10.2.2  matt 
   2856  1.10.2.2  matt 			/* update tree */
   2857  1.10.2.2  matt 			malloc_mutex_lock(&chunks_mtx);
   2858  1.10.2.2  matt 			key.chunk = __DECONST(void *, ptr);
   2859  1.10.2.2  matt 			/* LINTED */
   2860  1.10.2.2  matt 			node = RB_FIND(chunk_tree_s, &huge, &key);
   2861  1.10.2.2  matt 			assert(node != NULL);
   2862  1.10.2.2  matt 			assert(node->chunk == ptr);
   2863  1.10.2.2  matt 			assert(node->size == oldcsize);
   2864  1.10.2.2  matt 			node->size = newcsize;
   2865  1.10.2.2  matt 			if (ptr != newptr) {
   2866  1.10.2.2  matt 				RB_REMOVE(chunk_tree_s, &huge, node);
   2867  1.10.2.2  matt 				node->chunk = newptr;
   2868  1.10.2.2  matt 				RB_INSERT(chunk_tree_s, &huge, node);
   2869  1.10.2.2  matt 			}
   2870  1.10.2.2  matt #ifdef MALLOC_STATS
   2871  1.10.2.2  matt 			huge_nralloc++;
   2872  1.10.2.2  matt 			huge_allocated += newcsize - oldcsize;
   2873  1.10.2.2  matt 			if (newcsize > oldcsize) {
   2874  1.10.2.2  matt 				stats_chunks.curchunks +=
   2875  1.10.2.2  matt 				    (newcsize - oldcsize) / chunksize;
   2876  1.10.2.2  matt 				if (stats_chunks.curchunks >
   2877  1.10.2.2  matt 				    stats_chunks.highchunks)
   2878  1.10.2.2  matt 					stats_chunks.highchunks =
   2879  1.10.2.2  matt 					    stats_chunks.curchunks;
   2880  1.10.2.2  matt 			} else {
   2881  1.10.2.2  matt 				stats_chunks.curchunks -=
   2882  1.10.2.2  matt 				    (oldcsize - newcsize) / chunksize;
   2883  1.10.2.2  matt 			}
   2884  1.10.2.2  matt #endif
   2885  1.10.2.2  matt 			malloc_mutex_unlock(&chunks_mtx);
   2886  1.10.2.2  matt 
   2887  1.10.2.2  matt 			if (opt_junk && size < oldsize) {
   2888  1.10.2.2  matt 				memset((void *)((uintptr_t)newptr + size), 0x5a,
   2889  1.10.2.2  matt 				    newcsize - size);
   2890  1.10.2.2  matt 			} else if (opt_zero && size > oldsize) {
   2891  1.10.2.2  matt 				memset((void *)((uintptr_t)newptr + oldsize), 0,
   2892  1.10.2.2  matt 				    size - oldsize);
   2893  1.10.2.2  matt 			}
   2894  1.10.2.2  matt 			return (newptr);
   2895  1.10.2.2  matt 		}
   2896  1.10.2.2  matt 	}
   2897  1.10.2.2  matt 
   2898  1.10.2.2  matt 	/*
   2899  1.10.2.2  matt 	 * If we get here, then size and oldsize are different enough that we
   2900  1.10.2.2  matt 	 * need to use a different size class.  In that case, fall back to
   2901  1.10.2.2  matt 	 * allocating new space and copying.
   2902  1.10.2.2  matt 	 */
   2903  1.10.2.2  matt 	ret = huge_malloc(size);
   2904  1.10.2.2  matt 	if (ret == NULL)
   2905  1.10.2.2  matt 		return (NULL);
   2906  1.10.2.2  matt 
   2907  1.10.2.2  matt 	if (CHUNK_ADDR2BASE(ptr) == ptr) {
   2908  1.10.2.2  matt 		/* The old allocation is a chunk. */
   2909  1.10.2.2  matt 		if (size < oldsize)
   2910  1.10.2.2  matt 			memcpy(ret, ptr, size);
   2911  1.10.2.2  matt 		else
   2912  1.10.2.2  matt 			memcpy(ret, ptr, oldsize);
   2913  1.10.2.2  matt 	} else {
   2914  1.10.2.2  matt 		/* The old allocation is a region. */
   2915  1.10.2.2  matt 		assert(oldsize < size);
   2916  1.10.2.2  matt 		memcpy(ret, ptr, oldsize);
   2917  1.10.2.2  matt 	}
   2918  1.10.2.2  matt 	idalloc(ptr);
   2919  1.10.2.2  matt 	return (ret);
   2920  1.10.2.2  matt }
   2921  1.10.2.2  matt 
   2922  1.10.2.2  matt static void
   2923  1.10.2.2  matt huge_dalloc(void *ptr)
   2924  1.10.2.2  matt {
   2925  1.10.2.2  matt 	chunk_node_t key;
   2926  1.10.2.2  matt 	chunk_node_t *node;
   2927  1.10.2.2  matt 
   2928  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   2929  1.10.2.2  matt 
   2930  1.10.2.2  matt 	/* Extract from tree of huge allocations. */
   2931  1.10.2.2  matt 	key.chunk = ptr;
   2932  1.10.2.2  matt 	/* LINTED */
   2933  1.10.2.2  matt 	node = RB_FIND(chunk_tree_s, &huge, &key);
   2934  1.10.2.2  matt 	assert(node != NULL);
   2935  1.10.2.2  matt 	assert(node->chunk == ptr);
   2936  1.10.2.2  matt 	/* LINTED */
   2937  1.10.2.2  matt 	RB_REMOVE(chunk_tree_s, &huge, node);
   2938  1.10.2.2  matt 
   2939  1.10.2.2  matt #ifdef MALLOC_STATS
   2940  1.10.2.2  matt 	huge_ndalloc++;
   2941  1.10.2.2  matt 	huge_allocated -= node->size;
   2942  1.10.2.2  matt #endif
   2943  1.10.2.2  matt 
   2944  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   2945  1.10.2.2  matt 
   2946  1.10.2.2  matt 	/* Unmap chunk. */
   2947  1.10.2.2  matt #ifdef USE_BRK
   2948  1.10.2.2  matt 	if (opt_junk)
   2949  1.10.2.2  matt 		memset(node->chunk, 0x5a, node->size);
   2950  1.10.2.2  matt #endif
   2951  1.10.2.2  matt 	chunk_dealloc(node->chunk, node->size);
   2952  1.10.2.2  matt 
   2953  1.10.2.2  matt 	base_chunk_node_dealloc(node);
   2954  1.10.2.2  matt }
   2955  1.10.2.2  matt 
   2956  1.10.2.2  matt static void *
   2957  1.10.2.2  matt imalloc(size_t size)
   2958  1.10.2.2  matt {
   2959  1.10.2.2  matt 	void *ret;
   2960  1.10.2.2  matt 
   2961  1.10.2.2  matt 	assert(size != 0);
   2962  1.10.2.2  matt 
   2963  1.10.2.2  matt 	if (size <= arena_maxclass)
   2964  1.10.2.2  matt 		ret = arena_malloc(choose_arena(), size);
   2965  1.10.2.2  matt 	else
   2966  1.10.2.2  matt 		ret = huge_malloc(size);
   2967  1.10.2.2  matt 
   2968  1.10.2.2  matt 	return (ret);
   2969  1.10.2.2  matt }
   2970  1.10.2.2  matt 
   2971  1.10.2.2  matt static void *
   2972  1.10.2.2  matt ipalloc(size_t alignment, size_t size)
   2973  1.10.2.2  matt {
   2974  1.10.2.2  matt 	void *ret;
   2975  1.10.2.2  matt 	size_t ceil_size;
   2976  1.10.2.2  matt 
   2977  1.10.2.2  matt 	/*
   2978  1.10.2.2  matt 	 * Round size up to the nearest multiple of alignment.
   2979  1.10.2.2  matt 	 *
   2980  1.10.2.2  matt 	 * This done, we can take advantage of the fact that for each small
   2981  1.10.2.2  matt 	 * size class, every object is aligned at the smallest power of two
   2982  1.10.2.2  matt 	 * that is non-zero in the base two representation of the size.  For
   2983  1.10.2.2  matt 	 * example:
   2984  1.10.2.2  matt 	 *
   2985  1.10.2.2  matt 	 *   Size |   Base 2 | Minimum alignment
   2986  1.10.2.2  matt 	 *   -----+----------+------------------
   2987  1.10.2.2  matt 	 *     96 |  1100000 |  32
   2988  1.10.2.2  matt 	 *    144 | 10100000 |  32
   2989  1.10.2.2  matt 	 *    192 | 11000000 |  64
   2990  1.10.2.2  matt 	 *
   2991  1.10.2.2  matt 	 * Depending on runtime settings, it is possible that arena_malloc()
   2992  1.10.2.2  matt 	 * will further round up to a power of two, but that never causes
   2993  1.10.2.2  matt 	 * correctness issues.
   2994  1.10.2.2  matt 	 */
   2995  1.10.2.2  matt 	ceil_size = (size + (alignment - 1)) & (-alignment);
   2996  1.10.2.2  matt 	/*
   2997  1.10.2.2  matt 	 * (ceil_size < size) protects against the combination of maximal
   2998  1.10.2.2  matt 	 * alignment and size greater than maximal alignment.
   2999  1.10.2.2  matt 	 */
   3000  1.10.2.2  matt 	if (ceil_size < size) {
   3001  1.10.2.2  matt 		/* size_t overflow. */
   3002  1.10.2.2  matt 		return (NULL);
   3003  1.10.2.2  matt 	}
   3004  1.10.2.2  matt 
   3005  1.10.2.2  matt 	if (ceil_size <= pagesize || (alignment <= pagesize
   3006  1.10.2.2  matt 	    && ceil_size <= arena_maxclass))
   3007  1.10.2.2  matt 		ret = arena_malloc(choose_arena(), ceil_size);
   3008  1.10.2.2  matt 	else {
   3009  1.10.2.2  matt 		size_t run_size;
   3010  1.10.2.2  matt 
   3011  1.10.2.2  matt 		/*
   3012  1.10.2.2  matt 		 * We can't achieve sub-page alignment, so round up alignment
   3013  1.10.2.2  matt 		 * permanently; it makes later calculations simpler.
   3014  1.10.2.2  matt 		 */
   3015  1.10.2.2  matt 		alignment = PAGE_CEILING(alignment);
   3016  1.10.2.2  matt 		ceil_size = PAGE_CEILING(size);
   3017  1.10.2.2  matt 		/*
   3018  1.10.2.2  matt 		 * (ceil_size < size) protects against very large sizes within
   3019  1.10.2.2  matt 		 * pagesize of SIZE_T_MAX.
   3020  1.10.2.2  matt 		 *
   3021  1.10.2.2  matt 		 * (ceil_size + alignment < ceil_size) protects against the
   3022  1.10.2.2  matt 		 * combination of maximal alignment and ceil_size large enough
   3023  1.10.2.2  matt 		 * to cause overflow.  This is similar to the first overflow
   3024  1.10.2.2  matt 		 * check above, but it needs to be repeated due to the new
   3025  1.10.2.2  matt 		 * ceil_size value, which may now be *equal* to maximal
   3026  1.10.2.2  matt 		 * alignment, whereas before we only detected overflow if the
   3027  1.10.2.2  matt 		 * original size was *greater* than maximal alignment.
   3028  1.10.2.2  matt 		 */
   3029  1.10.2.2  matt 		if (ceil_size < size || ceil_size + alignment < ceil_size) {
   3030  1.10.2.2  matt 			/* size_t overflow. */
   3031  1.10.2.2  matt 			return (NULL);
   3032  1.10.2.2  matt 		}
   3033  1.10.2.2  matt 
   3034  1.10.2.2  matt 		/*
   3035  1.10.2.2  matt 		 * Calculate the size of the over-size run that arena_palloc()
   3036  1.10.2.2  matt 		 * would need to allocate in order to guarantee the alignment.
   3037  1.10.2.2  matt 		 */
   3038  1.10.2.2  matt 		if (ceil_size >= alignment)
   3039  1.10.2.2  matt 			run_size = ceil_size + alignment - pagesize;
   3040  1.10.2.2  matt 		else {
   3041  1.10.2.2  matt 			/*
   3042  1.10.2.2  matt 			 * It is possible that (alignment << 1) will cause
   3043  1.10.2.2  matt 			 * overflow, but it doesn't matter because we also
   3044  1.10.2.2  matt 			 * subtract pagesize, which in the case of overflow
   3045  1.10.2.2  matt 			 * leaves us with a very large run_size.  That causes
   3046  1.10.2.2  matt 			 * the first conditional below to fail, which means
   3047  1.10.2.2  matt 			 * that the bogus run_size value never gets used for
   3048  1.10.2.2  matt 			 * anything important.
   3049  1.10.2.2  matt 			 */
   3050  1.10.2.2  matt 			run_size = (alignment << 1) - pagesize;
   3051  1.10.2.2  matt 		}
   3052  1.10.2.2  matt 
   3053  1.10.2.2  matt 		if (run_size <= arena_maxclass) {
   3054  1.10.2.2  matt 			ret = arena_palloc(choose_arena(), alignment, ceil_size,
   3055  1.10.2.2  matt 			    run_size);
   3056  1.10.2.2  matt 		} else if (alignment <= chunksize)
   3057  1.10.2.2  matt 			ret = huge_malloc(ceil_size);
   3058  1.10.2.2  matt 		else
   3059  1.10.2.2  matt 			ret = huge_palloc(alignment, ceil_size);
   3060  1.10.2.2  matt 	}
   3061  1.10.2.2  matt 
   3062  1.10.2.2  matt 	assert(((uintptr_t)ret & (alignment - 1)) == 0);
   3063  1.10.2.2  matt 	return (ret);
   3064  1.10.2.2  matt }
   3065  1.10.2.2  matt 
   3066  1.10.2.2  matt static void *
   3067  1.10.2.2  matt icalloc(size_t size)
   3068  1.10.2.2  matt {
   3069  1.10.2.2  matt 	void *ret;
   3070  1.10.2.2  matt 
   3071  1.10.2.2  matt 	if (size <= arena_maxclass) {
   3072  1.10.2.2  matt 		ret = arena_malloc(choose_arena(), size);
   3073  1.10.2.2  matt 		if (ret == NULL)
   3074  1.10.2.2  matt 			return (NULL);
   3075  1.10.2.2  matt 		memset(ret, 0, size);
   3076  1.10.2.2  matt 	} else {
   3077  1.10.2.2  matt 		/*
   3078  1.10.2.2  matt 		 * The virtual memory system provides zero-filled pages, so
   3079  1.10.2.2  matt 		 * there is no need to do so manually, unless opt_junk is
   3080  1.10.2.2  matt 		 * enabled, in which case huge_malloc() fills huge allocations
   3081  1.10.2.2  matt 		 * with junk.
   3082  1.10.2.2  matt 		 */
   3083  1.10.2.2  matt 		ret = huge_malloc(size);
   3084  1.10.2.2  matt 		if (ret == NULL)
   3085  1.10.2.2  matt 			return (NULL);
   3086  1.10.2.2  matt 
   3087  1.10.2.2  matt 		if (opt_junk)
   3088  1.10.2.2  matt 			memset(ret, 0, size);
   3089  1.10.2.2  matt #ifdef USE_BRK
   3090  1.10.2.2  matt 		else if ((uintptr_t)ret >= (uintptr_t)brk_base
   3091  1.10.2.2  matt 		    && (uintptr_t)ret < (uintptr_t)brk_max) {
   3092  1.10.2.2  matt 			/*
   3093  1.10.2.2  matt 			 * This may be a re-used brk chunk.  Therefore, zero
   3094  1.10.2.2  matt 			 * the memory.
   3095  1.10.2.2  matt 			 */
   3096  1.10.2.2  matt 			memset(ret, 0, size);
   3097  1.10.2.2  matt 		}
   3098  1.10.2.2  matt #endif
   3099  1.10.2.2  matt 	}
   3100  1.10.2.2  matt 
   3101  1.10.2.2  matt 	return (ret);
   3102  1.10.2.2  matt }
   3103  1.10.2.2  matt 
   3104  1.10.2.2  matt static size_t
   3105  1.10.2.2  matt isalloc(const void *ptr)
   3106  1.10.2.2  matt {
   3107  1.10.2.2  matt 	size_t ret;
   3108  1.10.2.2  matt 	arena_chunk_t *chunk;
   3109  1.10.2.2  matt 
   3110  1.10.2.2  matt 	assert(ptr != NULL);
   3111  1.10.2.2  matt 
   3112  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
   3113  1.10.2.2  matt 	if (chunk != ptr) {
   3114  1.10.2.2  matt 		/* Region. */
   3115  1.10.2.2  matt 		assert(chunk->arena->magic == ARENA_MAGIC);
   3116  1.10.2.2  matt 
   3117  1.10.2.2  matt 		ret = arena_salloc(ptr);
   3118  1.10.2.2  matt 	} else {
   3119  1.10.2.2  matt 		chunk_node_t *node, key;
   3120  1.10.2.2  matt 
   3121  1.10.2.2  matt 		/* Chunk (huge allocation). */
   3122  1.10.2.2  matt 
   3123  1.10.2.2  matt 		malloc_mutex_lock(&chunks_mtx);
   3124  1.10.2.2  matt 
   3125  1.10.2.2  matt 		/* Extract from tree of huge allocations. */
   3126  1.10.2.2  matt 		key.chunk = __DECONST(void *, ptr);
   3127  1.10.2.2  matt 		/* LINTED */
   3128  1.10.2.2  matt 		node = RB_FIND(chunk_tree_s, &huge, &key);
   3129  1.10.2.2  matt 		assert(node != NULL);
   3130  1.10.2.2  matt 
   3131  1.10.2.2  matt 		ret = node->size;
   3132  1.10.2.2  matt 
   3133  1.10.2.2  matt 		malloc_mutex_unlock(&chunks_mtx);
   3134  1.10.2.2  matt 	}
   3135  1.10.2.2  matt 
   3136  1.10.2.2  matt 	return (ret);
   3137  1.10.2.2  matt }
   3138  1.10.2.2  matt 
   3139  1.10.2.2  matt static void *
   3140  1.10.2.2  matt iralloc(void *ptr, size_t size)
   3141  1.10.2.2  matt {
   3142  1.10.2.2  matt 	void *ret;
   3143  1.10.2.2  matt 	size_t oldsize;
   3144  1.10.2.2  matt 
   3145  1.10.2.2  matt 	assert(ptr != NULL);
   3146  1.10.2.2  matt 	assert(size != 0);
   3147  1.10.2.2  matt 
   3148  1.10.2.2  matt 	oldsize = isalloc(ptr);
   3149  1.10.2.2  matt 
   3150  1.10.2.2  matt 	if (size <= arena_maxclass)
   3151  1.10.2.2  matt 		ret = arena_ralloc(ptr, size, oldsize);
   3152  1.10.2.2  matt 	else
   3153  1.10.2.2  matt 		ret = huge_ralloc(ptr, size, oldsize);
   3154  1.10.2.2  matt 
   3155  1.10.2.2  matt 	return (ret);
   3156  1.10.2.2  matt }
   3157  1.10.2.2  matt 
   3158  1.10.2.2  matt static void
   3159  1.10.2.2  matt idalloc(void *ptr)
   3160  1.10.2.2  matt {
   3161  1.10.2.2  matt 	arena_chunk_t *chunk;
   3162  1.10.2.2  matt 
   3163  1.10.2.2  matt 	assert(ptr != NULL);
   3164  1.10.2.2  matt 
   3165  1.10.2.2  matt 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
   3166  1.10.2.2  matt 	if (chunk != ptr) {
   3167  1.10.2.2  matt 		/* Region. */
   3168  1.10.2.2  matt 		arena_dalloc(chunk->arena, chunk, ptr);
   3169  1.10.2.2  matt 	} else
   3170  1.10.2.2  matt 		huge_dalloc(ptr);
   3171  1.10.2.2  matt }
   3172  1.10.2.2  matt 
   3173  1.10.2.2  matt static void
   3174  1.10.2.2  matt malloc_print_stats(void)
   3175  1.10.2.2  matt {
   3176  1.10.2.2  matt 
   3177  1.10.2.2  matt 	if (opt_print_stats) {
   3178  1.10.2.2  matt 		char s[UMAX2S_BUFSIZE];
   3179  1.10.2.2  matt 		_malloc_message("___ Begin malloc statistics ___\n", "", "",
   3180  1.10.2.2  matt 		    "");
   3181  1.10.2.2  matt 		_malloc_message("Assertions ",
   3182  1.10.2.2  matt #ifdef NDEBUG
   3183  1.10.2.2  matt 		    "disabled",
   3184  1.10.2.2  matt #else
   3185  1.10.2.2  matt 		    "enabled",
   3186  1.10.2.2  matt #endif
   3187  1.10.2.2  matt 		    "\n", "");
   3188  1.10.2.2  matt 		_malloc_message("Boolean MALLOC_OPTIONS: ",
   3189  1.10.2.2  matt 		    opt_abort ? "A" : "a",
   3190  1.10.2.2  matt 		    opt_junk ? "J" : "j",
   3191  1.10.2.2  matt 		    opt_hint ? "H" : "h");
   3192  1.10.2.2  matt 		_malloc_message(opt_utrace ? "PU" : "Pu",
   3193  1.10.2.2  matt 		    opt_sysv ? "V" : "v",
   3194  1.10.2.2  matt 		    opt_xmalloc ? "X" : "x",
   3195  1.10.2.2  matt 		    opt_zero ? "Z\n" : "z\n");
   3196  1.10.2.2  matt 
   3197  1.10.2.2  matt 		_malloc_message("CPUs: ", umax2s(ncpus, s), "\n", "");
   3198  1.10.2.2  matt 		_malloc_message("Max arenas: ", umax2s(narenas, s), "\n", "");
   3199  1.10.2.2  matt 		_malloc_message("Pointer size: ", umax2s(sizeof(void *), s),
   3200  1.10.2.2  matt 		    "\n", "");
   3201  1.10.2.2  matt 		_malloc_message("Quantum size: ", umax2s(quantum, s), "\n", "");
   3202  1.10.2.2  matt 		_malloc_message("Max small size: ", umax2s(small_max, s), "\n",
   3203  1.10.2.2  matt 		    "");
   3204  1.10.2.2  matt 
   3205  1.10.2.2  matt 		_malloc_message("Chunk size: ", umax2s(chunksize, s), "", "");
   3206  1.10.2.2  matt 		_malloc_message(" (2^", umax2s(opt_chunk_2pow, s), ")\n", "");
   3207  1.10.2.2  matt 
   3208  1.10.2.2  matt #ifdef MALLOC_STATS
   3209  1.10.2.2  matt 		{
   3210  1.10.2.2  matt 			size_t allocated, mapped;
   3211  1.10.2.2  matt 			unsigned i;
   3212  1.10.2.2  matt 			arena_t *arena;
   3213  1.10.2.2  matt 
   3214  1.10.2.2  matt 			/* Calculate and print allocated/mapped stats. */
   3215  1.10.2.2  matt 
   3216  1.10.2.2  matt 			/* arenas. */
   3217  1.10.2.2  matt 			for (i = 0, allocated = 0; i < narenas; i++) {
   3218  1.10.2.2  matt 				if (arenas[i] != NULL) {
   3219  1.10.2.2  matt 					malloc_mutex_lock(&arenas[i]->mtx);
   3220  1.10.2.2  matt 					allocated +=
   3221  1.10.2.2  matt 					    arenas[i]->stats.allocated_small;
   3222  1.10.2.2  matt 					allocated +=
   3223  1.10.2.2  matt 					    arenas[i]->stats.allocated_large;
   3224  1.10.2.2  matt 					malloc_mutex_unlock(&arenas[i]->mtx);
   3225  1.10.2.2  matt 				}
   3226  1.10.2.2  matt 			}
   3227  1.10.2.2  matt 
   3228  1.10.2.2  matt 			/* huge/base. */
   3229  1.10.2.2  matt 			malloc_mutex_lock(&chunks_mtx);
   3230  1.10.2.2  matt 			allocated += huge_allocated;
   3231  1.10.2.2  matt 			mapped = stats_chunks.curchunks * chunksize;
   3232  1.10.2.2  matt 			malloc_mutex_unlock(&chunks_mtx);
   3233  1.10.2.2  matt 
   3234  1.10.2.2  matt 			malloc_mutex_lock(&base_mtx);
   3235  1.10.2.2  matt 			mapped += base_mapped;
   3236  1.10.2.2  matt 			malloc_mutex_unlock(&base_mtx);
   3237  1.10.2.2  matt 
   3238  1.10.2.2  matt 			malloc_printf("Allocated: %zu, mapped: %zu\n",
   3239  1.10.2.2  matt 			    allocated, mapped);
   3240  1.10.2.2  matt 
   3241  1.10.2.2  matt 			/* Print chunk stats. */
   3242  1.10.2.2  matt 			{
   3243  1.10.2.2  matt 				chunk_stats_t chunks_stats;
   3244  1.10.2.2  matt 
   3245  1.10.2.2  matt 				malloc_mutex_lock(&chunks_mtx);
   3246  1.10.2.2  matt 				chunks_stats = stats_chunks;
   3247  1.10.2.2  matt 				malloc_mutex_unlock(&chunks_mtx);
   3248  1.10.2.2  matt 
   3249  1.10.2.2  matt 				malloc_printf("chunks: nchunks   "
   3250  1.10.2.2  matt 				    "highchunks    curchunks\n");
   3251  1.10.2.2  matt 				malloc_printf("  %13llu%13lu%13lu\n",
   3252  1.10.2.2  matt 				    chunks_stats.nchunks,
   3253  1.10.2.2  matt 				    chunks_stats.highchunks,
   3254  1.10.2.2  matt 				    chunks_stats.curchunks);
   3255  1.10.2.2  matt 			}
   3256  1.10.2.2  matt 
   3257  1.10.2.2  matt 			/* Print chunk stats. */
   3258  1.10.2.2  matt 			malloc_printf(
   3259  1.10.2.2  matt 			    "huge: nmalloc      ndalloc      "
   3260  1.10.2.2  matt 			    "nralloc    allocated\n");
   3261  1.10.2.2  matt 			malloc_printf(" %12llu %12llu %12llu %12zu\n",
   3262  1.10.2.2  matt 			    huge_nmalloc, huge_ndalloc, huge_nralloc,
   3263  1.10.2.2  matt 			    huge_allocated);
   3264  1.10.2.2  matt 
   3265  1.10.2.2  matt 			/* Print stats for each arena. */
   3266  1.10.2.2  matt 			for (i = 0; i < narenas; i++) {
   3267  1.10.2.2  matt 				arena = arenas[i];
   3268  1.10.2.2  matt 				if (arena != NULL) {
   3269  1.10.2.2  matt 					malloc_printf(
   3270  1.10.2.2  matt 					    "\narenas[%u] @ %p\n", i, arena);
   3271  1.10.2.2  matt 					malloc_mutex_lock(&arena->mtx);
   3272  1.10.2.2  matt 					stats_print(arena);
   3273  1.10.2.2  matt 					malloc_mutex_unlock(&arena->mtx);
   3274  1.10.2.2  matt 				}
   3275  1.10.2.2  matt 			}
   3276  1.10.2.2  matt 		}
   3277  1.10.2.2  matt #endif /* #ifdef MALLOC_STATS */
   3278  1.10.2.2  matt 		_malloc_message("--- End malloc statistics ---\n", "", "", "");
   3279  1.10.2.2  matt 	}
   3280  1.10.2.2  matt }
   3281  1.10.2.2  matt 
   3282  1.10.2.2  matt /*
   3283  1.10.2.2  matt  * FreeBSD's pthreads implementation calls malloc(3), so the malloc
   3284  1.10.2.2  matt  * implementation has to take pains to avoid infinite recursion during
   3285  1.10.2.2  matt  * initialization.
   3286  1.10.2.2  matt  */
   3287  1.10.2.2  matt static inline bool
   3288  1.10.2.2  matt malloc_init(void)
   3289  1.10.2.2  matt {
   3290  1.10.2.2  matt 
   3291  1.10.2.2  matt 	if (malloc_initialized == false)
   3292  1.10.2.2  matt 		return (malloc_init_hard());
   3293  1.10.2.2  matt 
   3294  1.10.2.2  matt 	return (false);
   3295  1.10.2.2  matt }
   3296  1.10.2.2  matt 
   3297  1.10.2.2  matt static bool
   3298  1.10.2.2  matt malloc_init_hard(void)
   3299  1.10.2.2  matt {
   3300  1.10.2.2  matt 	unsigned i, j;
   3301  1.10.2.2  matt 	ssize_t linklen;
   3302  1.10.2.2  matt 	char buf[PATH_MAX + 1];
   3303  1.10.2.2  matt 	const char *opts = "";
   3304  1.10.2.2  matt 
   3305  1.10.2.2  matt 	malloc_mutex_lock(&init_lock);
   3306  1.10.2.2  matt 	if (malloc_initialized) {
   3307  1.10.2.2  matt 		/*
   3308  1.10.2.2  matt 		 * Another thread initialized the allocator before this one
   3309  1.10.2.2  matt 		 * acquired init_lock.
   3310  1.10.2.2  matt 		 */
   3311  1.10.2.2  matt 		malloc_mutex_unlock(&init_lock);
   3312  1.10.2.2  matt 		return (false);
   3313  1.10.2.2  matt 	}
   3314  1.10.2.2  matt 
   3315  1.10.2.2  matt 	/* Get number of CPUs. */
   3316  1.10.2.2  matt 	{
   3317  1.10.2.2  matt 		int mib[2];
   3318  1.10.2.2  matt 		size_t len;
   3319  1.10.2.2  matt 
   3320  1.10.2.2  matt 		mib[0] = CTL_HW;
   3321  1.10.2.2  matt 		mib[1] = HW_NCPU;
   3322  1.10.2.2  matt 		len = sizeof(ncpus);
   3323  1.10.2.2  matt 		if (sysctl(mib, 2, &ncpus, &len, (void *) 0, 0) == -1) {
   3324  1.10.2.2  matt 			/* Error. */
   3325  1.10.2.2  matt 			ncpus = 1;
   3326  1.10.2.2  matt 		}
   3327  1.10.2.2  matt 	}
   3328  1.10.2.2  matt 
   3329  1.10.2.2  matt 	/* Get page size. */
   3330  1.10.2.2  matt 	{
   3331  1.10.2.2  matt 		long result;
   3332  1.10.2.2  matt 
   3333  1.10.2.2  matt 		result = sysconf(_SC_PAGESIZE);
   3334  1.10.2.2  matt 		assert(result != -1);
   3335  1.10.2.2  matt 		pagesize = (unsigned) result;
   3336  1.10.2.2  matt 
   3337  1.10.2.2  matt 		/*
   3338  1.10.2.2  matt 		 * We assume that pagesize is a power of 2 when calculating
   3339  1.10.2.2  matt 		 * pagesize_mask and pagesize_2pow.
   3340  1.10.2.2  matt 		 */
   3341  1.10.2.2  matt 		assert(((result - 1) & result) == 0);
   3342  1.10.2.2  matt 		pagesize_mask = result - 1;
   3343  1.10.2.2  matt 		pagesize_2pow = ffs((int)result) - 1;
   3344  1.10.2.2  matt 	}
   3345  1.10.2.2  matt 
   3346  1.10.2.2  matt 	for (i = 0; i < 3; i++) {
   3347  1.10.2.2  matt 		/* Get runtime configuration. */
   3348  1.10.2.2  matt 		switch (i) {
   3349  1.10.2.2  matt 		case 0:
   3350  1.10.2.2  matt 			if ((linklen = readlink("/etc/malloc.conf", buf,
   3351  1.10.2.2  matt 						sizeof(buf) - 1)) != -1) {
   3352  1.10.2.2  matt 				/*
   3353  1.10.2.2  matt 				 * Use the contents of the "/etc/malloc.conf"
   3354  1.10.2.2  matt 				 * symbolic link's name.
   3355  1.10.2.2  matt 				 */
   3356  1.10.2.2  matt 				buf[linklen] = '\0';
   3357  1.10.2.2  matt 				opts = buf;
   3358  1.10.2.2  matt 			} else {
   3359  1.10.2.2  matt 				/* No configuration specified. */
   3360  1.10.2.2  matt 				buf[0] = '\0';
   3361  1.10.2.2  matt 				opts = buf;
   3362  1.10.2.2  matt 			}
   3363  1.10.2.2  matt 			break;
   3364  1.10.2.2  matt 		case 1:
   3365  1.10.2.2  matt 			if (issetugid() == 0 && (opts =
   3366  1.10.2.2  matt 			    getenv("MALLOC_OPTIONS")) != NULL) {
   3367  1.10.2.2  matt 				/*
   3368  1.10.2.2  matt 				 * Do nothing; opts is already initialized to
   3369  1.10.2.2  matt 				 * the value of the MALLOC_OPTIONS environment
   3370  1.10.2.2  matt 				 * variable.
   3371  1.10.2.2  matt 				 */
   3372  1.10.2.2  matt 			} else {
   3373  1.10.2.2  matt 				/* No configuration specified. */
   3374  1.10.2.2  matt 				buf[0] = '\0';
   3375  1.10.2.2  matt 				opts = buf;
   3376  1.10.2.2  matt 			}
   3377  1.10.2.2  matt 			break;
   3378  1.10.2.2  matt 		case 2:
   3379  1.10.2.2  matt 			if (_malloc_options != NULL) {
   3380  1.10.2.2  matt 			    /*
   3381  1.10.2.2  matt 			     * Use options that were compiled into the program.
   3382  1.10.2.2  matt 			     */
   3383  1.10.2.2  matt 			    opts = _malloc_options;
   3384  1.10.2.2  matt 			} else {
   3385  1.10.2.2  matt 				/* No configuration specified. */
   3386  1.10.2.2  matt 				buf[0] = '\0';
   3387  1.10.2.2  matt 				opts = buf;
   3388  1.10.2.2  matt 			}
   3389  1.10.2.2  matt 			break;
   3390  1.10.2.2  matt 		default:
   3391  1.10.2.2  matt 			/* NOTREACHED */
   3392  1.10.2.2  matt 			/* LINTED */
   3393  1.10.2.2  matt 			assert(false);
   3394  1.10.2.2  matt 		}
   3395  1.10.2.2  matt 
   3396  1.10.2.2  matt 		for (j = 0; opts[j] != '\0'; j++) {
   3397  1.10.2.2  matt 			switch (opts[j]) {
   3398  1.10.2.2  matt 			case 'a':
   3399  1.10.2.2  matt 				opt_abort = false;
   3400  1.10.2.2  matt 				break;
   3401  1.10.2.2  matt 			case 'A':
   3402  1.10.2.2  matt 				opt_abort = true;
   3403  1.10.2.2  matt 				break;
   3404  1.10.2.2  matt 			case 'h':
   3405  1.10.2.2  matt 				opt_hint = false;
   3406  1.10.2.2  matt 				break;
   3407  1.10.2.2  matt 			case 'H':
   3408  1.10.2.2  matt 				opt_hint = true;
   3409  1.10.2.2  matt 				break;
   3410  1.10.2.2  matt 			case 'j':
   3411  1.10.2.2  matt 				opt_junk = false;
   3412  1.10.2.2  matt 				break;
   3413  1.10.2.2  matt 			case 'J':
   3414  1.10.2.2  matt 				opt_junk = true;
   3415  1.10.2.2  matt 				break;
   3416  1.10.2.2  matt 			case 'k':
   3417  1.10.2.2  matt 				/*
   3418  1.10.2.2  matt 				 * Chunks always require at least one header
   3419  1.10.2.2  matt 				 * page, so chunks can never be smaller than
   3420  1.10.2.2  matt 				 * two pages.
   3421  1.10.2.2  matt 				 */
   3422  1.10.2.2  matt 				if (opt_chunk_2pow > pagesize_2pow + 1)
   3423  1.10.2.2  matt 					opt_chunk_2pow--;
   3424  1.10.2.2  matt 				break;
   3425  1.10.2.2  matt 			case 'K':
   3426  1.10.2.2  matt 				/*
   3427  1.10.2.2  matt 				 * There must be fewer pages in a chunk than
   3428  1.10.2.2  matt 				 * can be recorded by the pos field of
   3429  1.10.2.2  matt 				 * arena_chunk_map_t, in order to make POS_FREE
   3430  1.10.2.2  matt 				 * special.
   3431  1.10.2.2  matt 				 */
   3432  1.10.2.2  matt 				if (opt_chunk_2pow - pagesize_2pow
   3433  1.10.2.2  matt 				    < (sizeof(uint32_t) << 3) - 1)
   3434  1.10.2.2  matt 					opt_chunk_2pow++;
   3435  1.10.2.2  matt 				break;
   3436  1.10.2.2  matt 			case 'n':
   3437  1.10.2.2  matt 				opt_narenas_lshift--;
   3438  1.10.2.2  matt 				break;
   3439  1.10.2.2  matt 			case 'N':
   3440  1.10.2.2  matt 				opt_narenas_lshift++;
   3441  1.10.2.2  matt 				break;
   3442  1.10.2.2  matt 			case 'p':
   3443  1.10.2.2  matt 				opt_print_stats = false;
   3444  1.10.2.2  matt 				break;
   3445  1.10.2.2  matt 			case 'P':
   3446  1.10.2.2  matt 				opt_print_stats = true;
   3447  1.10.2.2  matt 				break;
   3448  1.10.2.2  matt 			case 'q':
   3449  1.10.2.2  matt 				if (opt_quantum_2pow > QUANTUM_2POW_MIN)
   3450  1.10.2.2  matt 					opt_quantum_2pow--;
   3451  1.10.2.2  matt 				break;
   3452  1.10.2.2  matt 			case 'Q':
   3453  1.10.2.2  matt 				if (opt_quantum_2pow < pagesize_2pow - 1)
   3454  1.10.2.2  matt 					opt_quantum_2pow++;
   3455  1.10.2.2  matt 				break;
   3456  1.10.2.2  matt 			case 's':
   3457  1.10.2.2  matt 				if (opt_small_max_2pow > QUANTUM_2POW_MIN)
   3458  1.10.2.2  matt 					opt_small_max_2pow--;
   3459  1.10.2.2  matt 				break;
   3460  1.10.2.2  matt 			case 'S':
   3461  1.10.2.2  matt 				if (opt_small_max_2pow < pagesize_2pow - 1)
   3462  1.10.2.2  matt 					opt_small_max_2pow++;
   3463  1.10.2.2  matt 				break;
   3464  1.10.2.2  matt 			case 'u':
   3465  1.10.2.2  matt 				opt_utrace = false;
   3466  1.10.2.2  matt 				break;
   3467  1.10.2.2  matt 			case 'U':
   3468  1.10.2.2  matt 				opt_utrace = true;
   3469  1.10.2.2  matt 				break;
   3470  1.10.2.2  matt 			case 'v':
   3471  1.10.2.2  matt 				opt_sysv = false;
   3472  1.10.2.2  matt 				break;
   3473  1.10.2.2  matt 			case 'V':
   3474  1.10.2.2  matt 				opt_sysv = true;
   3475  1.10.2.2  matt 				break;
   3476  1.10.2.2  matt 			case 'x':
   3477  1.10.2.2  matt 				opt_xmalloc = false;
   3478  1.10.2.2  matt 				break;
   3479  1.10.2.2  matt 			case 'X':
   3480  1.10.2.2  matt 				opt_xmalloc = true;
   3481  1.10.2.2  matt 				break;
   3482  1.10.2.2  matt 			case 'z':
   3483  1.10.2.2  matt 				opt_zero = false;
   3484  1.10.2.2  matt 				break;
   3485  1.10.2.2  matt 			case 'Z':
   3486  1.10.2.2  matt 				opt_zero = true;
   3487  1.10.2.2  matt 				break;
   3488  1.10.2.2  matt 			default: {
   3489  1.10.2.2  matt 				char cbuf[2];
   3490  1.10.2.2  matt 
   3491  1.10.2.2  matt 				cbuf[0] = opts[j];
   3492  1.10.2.2  matt 				cbuf[1] = '\0';
   3493  1.10.2.2  matt 				_malloc_message(_getprogname(),
   3494  1.10.2.2  matt 				    ": (malloc) Unsupported character in "
   3495  1.10.2.2  matt 				    "malloc options: '", cbuf, "'\n");
   3496  1.10.2.2  matt 			}
   3497  1.10.2.2  matt 			}
   3498  1.10.2.2  matt 		}
   3499  1.10.2.2  matt 	}
   3500  1.10.2.2  matt 
   3501  1.10.2.2  matt 	/* Take care to call atexit() only once. */
   3502  1.10.2.2  matt 	if (opt_print_stats) {
   3503  1.10.2.2  matt 		/* Print statistics at exit. */
   3504  1.10.2.2  matt 		atexit(malloc_print_stats);
   3505  1.10.2.2  matt 	}
   3506  1.10.2.2  matt 
   3507  1.10.2.2  matt 	/* Set variables according to the value of opt_small_max_2pow. */
   3508  1.10.2.2  matt 	if (opt_small_max_2pow < opt_quantum_2pow)
   3509  1.10.2.2  matt 		opt_small_max_2pow = opt_quantum_2pow;
   3510  1.10.2.2  matt 	small_max = (1 << opt_small_max_2pow);
   3511  1.10.2.2  matt 
   3512  1.10.2.2  matt 	/* Set bin-related variables. */
   3513  1.10.2.2  matt 	bin_maxclass = (pagesize >> 1);
   3514  1.10.2.2  matt 	assert(opt_quantum_2pow >= TINY_MIN_2POW);
   3515  1.10.2.2  matt 	ntbins = (unsigned)(opt_quantum_2pow - TINY_MIN_2POW);
   3516  1.10.2.2  matt 	assert(ntbins <= opt_quantum_2pow);
   3517  1.10.2.2  matt 	nqbins = (unsigned)(small_max >> opt_quantum_2pow);
   3518  1.10.2.2  matt 	nsbins = (unsigned)(pagesize_2pow - opt_small_max_2pow - 1);
   3519  1.10.2.2  matt 
   3520  1.10.2.2  matt 	/* Set variables according to the value of opt_quantum_2pow. */
   3521  1.10.2.2  matt 	quantum = (1 << opt_quantum_2pow);
   3522  1.10.2.2  matt 	quantum_mask = quantum - 1;
   3523  1.10.2.2  matt 	if (ntbins > 0)
   3524  1.10.2.2  matt 		small_min = (quantum >> 1) + 1;
   3525  1.10.2.2  matt 	else
   3526  1.10.2.2  matt 		small_min = 1;
   3527  1.10.2.2  matt 	assert(small_min <= quantum);
   3528  1.10.2.2  matt 
   3529  1.10.2.2  matt 	/* Set variables according to the value of opt_chunk_2pow. */
   3530  1.10.2.2  matt 	chunksize = (1LU << opt_chunk_2pow);
   3531  1.10.2.2  matt 	chunksize_mask = chunksize - 1;
   3532  1.10.2.2  matt 	chunksize_2pow = (unsigned)opt_chunk_2pow;
   3533  1.10.2.2  matt 	chunk_npages = (unsigned)(chunksize >> pagesize_2pow);
   3534  1.10.2.2  matt 	{
   3535  1.10.2.2  matt 		unsigned header_size;
   3536  1.10.2.2  matt 
   3537  1.10.2.2  matt 		header_size = (unsigned)(sizeof(arena_chunk_t) +
   3538  1.10.2.2  matt 		    (sizeof(arena_chunk_map_t) * (chunk_npages - 1)));
   3539  1.10.2.2  matt 		arena_chunk_header_npages = (header_size >> pagesize_2pow);
   3540  1.10.2.2  matt 		if ((header_size & pagesize_mask) != 0)
   3541  1.10.2.2  matt 			arena_chunk_header_npages++;
   3542  1.10.2.2  matt 	}
   3543  1.10.2.2  matt 	arena_maxclass = chunksize - (arena_chunk_header_npages <<
   3544  1.10.2.2  matt 	    pagesize_2pow);
   3545  1.10.2.2  matt 
   3546  1.10.2.2  matt 	UTRACE(0, 0, 0);
   3547  1.10.2.2  matt 
   3548  1.10.2.2  matt #ifdef MALLOC_STATS
   3549  1.10.2.2  matt 	memset(&stats_chunks, 0, sizeof(chunk_stats_t));
   3550  1.10.2.2  matt #endif
   3551  1.10.2.2  matt 
   3552  1.10.2.2  matt 	/* Various sanity checks that regard configuration. */
   3553  1.10.2.2  matt 	assert(quantum >= sizeof(void *));
   3554  1.10.2.2  matt 	assert(quantum <= pagesize);
   3555  1.10.2.2  matt 	assert(chunksize >= pagesize);
   3556  1.10.2.2  matt 	assert(quantum * 4 <= chunksize);
   3557  1.10.2.2  matt 
   3558  1.10.2.2  matt 	/* Initialize chunks data. */
   3559  1.10.2.2  matt 	malloc_mutex_init(&chunks_mtx);
   3560  1.10.2.2  matt 	RB_INIT(&huge);
   3561  1.10.2.2  matt #ifdef USE_BRK
   3562  1.10.2.2  matt 	malloc_mutex_init(&brk_mtx);
   3563  1.10.2.2  matt 	brk_base = sbrk(0);
   3564  1.10.2.2  matt 	brk_prev = brk_base;
   3565  1.10.2.2  matt 	brk_max = brk_base;
   3566  1.10.2.2  matt #endif
   3567  1.10.2.2  matt #ifdef MALLOC_STATS
   3568  1.10.2.2  matt 	huge_nmalloc = 0;
   3569  1.10.2.2  matt 	huge_ndalloc = 0;
   3570  1.10.2.2  matt 	huge_nralloc = 0;
   3571  1.10.2.2  matt 	huge_allocated = 0;
   3572  1.10.2.2  matt #endif
   3573  1.10.2.2  matt 	RB_INIT(&old_chunks);
   3574  1.10.2.2  matt 
   3575  1.10.2.2  matt 	/* Initialize base allocation data structures. */
   3576  1.10.2.2  matt #ifdef MALLOC_STATS
   3577  1.10.2.2  matt 	base_mapped = 0;
   3578  1.10.2.2  matt #endif
   3579  1.10.2.2  matt #ifdef USE_BRK
   3580  1.10.2.2  matt 	/*
   3581  1.10.2.2  matt 	 * Allocate a base chunk here, since it doesn't actually have to be
   3582  1.10.2.2  matt 	 * chunk-aligned.  Doing this before allocating any other chunks allows
   3583  1.10.2.2  matt 	 * the use of space that would otherwise be wasted.
   3584  1.10.2.2  matt 	 */
   3585  1.10.2.2  matt 	base_pages_alloc(0);
   3586  1.10.2.2  matt #endif
   3587  1.10.2.2  matt 	base_chunk_nodes = NULL;
   3588  1.10.2.2  matt 	malloc_mutex_init(&base_mtx);
   3589  1.10.2.2  matt 
   3590  1.10.2.2  matt 	if (ncpus > 1) {
   3591  1.10.2.2  matt 		/*
   3592  1.10.2.2  matt 		 * For SMP systems, create four times as many arenas as there
   3593  1.10.2.2  matt 		 * are CPUs by default.
   3594  1.10.2.2  matt 		 */
   3595  1.10.2.2  matt 		opt_narenas_lshift += 2;
   3596  1.10.2.2  matt 	}
   3597  1.10.2.2  matt 
   3598  1.10.2.2  matt #ifdef NO_TLS
   3599  1.10.2.2  matt 	/* Initialize arena key. */
   3600  1.10.2.2  matt 	(void)thr_keycreate(&arenas_map_key, NULL);
   3601  1.10.2.2  matt #endif
   3602  1.10.2.2  matt 
   3603  1.10.2.2  matt 	/* Determine how many arenas to use. */
   3604  1.10.2.2  matt 	narenas = ncpus;
   3605  1.10.2.2  matt 	if (opt_narenas_lshift > 0) {
   3606  1.10.2.2  matt 		if ((narenas << opt_narenas_lshift) > narenas)
   3607  1.10.2.2  matt 			narenas <<= opt_narenas_lshift;
   3608  1.10.2.2  matt 		/*
   3609  1.10.2.2  matt 		 * Make sure not to exceed the limits of what base_malloc()
   3610  1.10.2.2  matt 		 * can handle.
   3611  1.10.2.2  matt 		 */
   3612  1.10.2.2  matt 		if (narenas * sizeof(arena_t *) > chunksize)
   3613  1.10.2.2  matt 			narenas = (unsigned)(chunksize / sizeof(arena_t *));
   3614  1.10.2.2  matt 	} else if (opt_narenas_lshift < 0) {
   3615  1.10.2.2  matt 		if ((narenas << opt_narenas_lshift) < narenas)
   3616  1.10.2.2  matt 			narenas <<= opt_narenas_lshift;
   3617  1.10.2.2  matt 		/* Make sure there is at least one arena. */
   3618  1.10.2.2  matt 		if (narenas == 0)
   3619  1.10.2.2  matt 			narenas = 1;
   3620  1.10.2.2  matt 	}
   3621  1.10.2.2  matt 
   3622  1.10.2.2  matt 	next_arena = 0;
   3623  1.10.2.2  matt 
   3624  1.10.2.2  matt 	/* Allocate and initialize arenas. */
   3625  1.10.2.2  matt 	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
   3626  1.10.2.2  matt 	if (arenas == NULL) {
   3627  1.10.2.2  matt 		malloc_mutex_unlock(&init_lock);
   3628  1.10.2.2  matt 		return (true);
   3629  1.10.2.2  matt 	}
   3630  1.10.2.2  matt 	/*
   3631  1.10.2.2  matt 	 * Zero the array.  In practice, this should always be pre-zeroed,
   3632  1.10.2.2  matt 	 * since it was just mmap()ed, but let's be sure.
   3633  1.10.2.2  matt 	 */
   3634  1.10.2.2  matt 	memset(arenas, 0, sizeof(arena_t *) * narenas);
   3635  1.10.2.2  matt 
   3636  1.10.2.2  matt 	/*
   3637  1.10.2.2  matt 	 * Initialize one arena here.  The rest are lazily created in
   3638  1.10.2.2  matt 	 * arena_choose_hard().
   3639  1.10.2.2  matt 	 */
   3640  1.10.2.2  matt 	arenas_extend(0);
   3641  1.10.2.2  matt 	if (arenas[0] == NULL) {
   3642  1.10.2.2  matt 		malloc_mutex_unlock(&init_lock);
   3643  1.10.2.2  matt 		return (true);
   3644  1.10.2.2  matt 	}
   3645  1.10.2.2  matt 
   3646  1.10.2.2  matt 	malloc_mutex_init(&arenas_mtx);
   3647  1.10.2.2  matt 
   3648  1.10.2.2  matt 	malloc_initialized = true;
   3649  1.10.2.2  matt 	malloc_mutex_unlock(&init_lock);
   3650  1.10.2.2  matt 	return (false);
   3651  1.10.2.2  matt }
   3652  1.10.2.2  matt 
   3653  1.10.2.2  matt /*
   3654  1.10.2.2  matt  * End general internal functions.
   3655  1.10.2.2  matt  */
   3656  1.10.2.2  matt /******************************************************************************/
   3657  1.10.2.2  matt /*
   3658  1.10.2.2  matt  * Begin malloc(3)-compatible functions.
   3659  1.10.2.2  matt  */
   3660  1.10.2.2  matt 
   3661  1.10.2.2  matt void *
   3662  1.10.2.2  matt malloc(size_t size)
   3663  1.10.2.2  matt {
   3664  1.10.2.2  matt 	void *ret;
   3665  1.10.2.2  matt 
   3666  1.10.2.2  matt 	if (malloc_init()) {
   3667  1.10.2.2  matt 		ret = NULL;
   3668  1.10.2.2  matt 		goto RETURN;
   3669  1.10.2.2  matt 	}
   3670  1.10.2.2  matt 
   3671  1.10.2.2  matt 	if (size == 0) {
   3672  1.10.2.2  matt 		if (opt_sysv == false)
   3673  1.10.2.2  matt 			size = 1;
   3674  1.10.2.2  matt 		else {
   3675  1.10.2.2  matt 			ret = NULL;
   3676  1.10.2.2  matt 			goto RETURN;
   3677  1.10.2.2  matt 		}
   3678  1.10.2.2  matt 	}
   3679  1.10.2.2  matt 
   3680  1.10.2.2  matt 	ret = imalloc(size);
   3681  1.10.2.2  matt 
   3682  1.10.2.2  matt RETURN:
   3683  1.10.2.2  matt 	if (ret == NULL) {
   3684  1.10.2.2  matt 		if (opt_xmalloc) {
   3685  1.10.2.2  matt 			_malloc_message(_getprogname(),
   3686  1.10.2.2  matt 			    ": (malloc) Error in malloc(): out of memory\n", "",
   3687  1.10.2.2  matt 			    "");
   3688  1.10.2.2  matt 			abort();
   3689  1.10.2.2  matt 		}
   3690  1.10.2.2  matt 		errno = ENOMEM;
   3691  1.10.2.2  matt 	}
   3692  1.10.2.2  matt 
   3693  1.10.2.2  matt 	UTRACE(0, size, ret);
   3694  1.10.2.2  matt 	return (ret);
   3695  1.10.2.2  matt }
   3696  1.10.2.2  matt 
   3697  1.10.2.2  matt /* XXXAD */
   3698  1.10.2.2  matt int	posix_memalign(void **memptr, size_t alignment, size_t size);
   3699  1.10.2.2  matt 
   3700  1.10.2.2  matt int
   3701  1.10.2.2  matt posix_memalign(void **memptr, size_t alignment, size_t size)
   3702  1.10.2.2  matt {
   3703  1.10.2.2  matt 	int ret;
   3704  1.10.2.2  matt 	void *result;
   3705  1.10.2.2  matt 
   3706  1.10.2.2  matt 	if (malloc_init())
   3707  1.10.2.2  matt 		result = NULL;
   3708  1.10.2.2  matt 	else {
   3709  1.10.2.2  matt 		/* Make sure that alignment is a large enough power of 2. */
   3710  1.10.2.2  matt 		if (((alignment - 1) & alignment) != 0
   3711  1.10.2.2  matt 		    || alignment < sizeof(void *)) {
   3712  1.10.2.2  matt 			if (opt_xmalloc) {
   3713  1.10.2.2  matt 				_malloc_message(_getprogname(),
   3714  1.10.2.2  matt 				    ": (malloc) Error in posix_memalign(): "
   3715  1.10.2.2  matt 				    "invalid alignment\n", "", "");
   3716  1.10.2.2  matt 				abort();
   3717  1.10.2.2  matt 			}
   3718  1.10.2.2  matt 			result = NULL;
   3719  1.10.2.2  matt 			ret = EINVAL;
   3720  1.10.2.2  matt 			goto RETURN;
   3721  1.10.2.2  matt 		}
   3722  1.10.2.2  matt 
   3723  1.10.2.2  matt 		result = ipalloc(alignment, size);
   3724  1.10.2.2  matt 	}
   3725  1.10.2.2  matt 
   3726  1.10.2.2  matt 	if (result == NULL) {
   3727  1.10.2.2  matt 		if (opt_xmalloc) {
   3728  1.10.2.2  matt 			_malloc_message(_getprogname(),
   3729  1.10.2.2  matt 			": (malloc) Error in posix_memalign(): out of memory\n",
   3730  1.10.2.2  matt 			"", "");
   3731  1.10.2.2  matt 			abort();
   3732  1.10.2.2  matt 		}
   3733  1.10.2.2  matt 		ret = ENOMEM;
   3734  1.10.2.2  matt 		goto RETURN;
   3735  1.10.2.2  matt 	}
   3736  1.10.2.2  matt 
   3737  1.10.2.2  matt 	*memptr = result;
   3738  1.10.2.2  matt 	ret = 0;
   3739  1.10.2.2  matt 
   3740  1.10.2.2  matt RETURN:
   3741  1.10.2.2  matt 	UTRACE(0, size, result);
   3742  1.10.2.2  matt 	return (ret);
   3743  1.10.2.2  matt }
   3744  1.10.2.2  matt 
   3745  1.10.2.2  matt void *
   3746  1.10.2.2  matt calloc(size_t num, size_t size)
   3747  1.10.2.2  matt {
   3748  1.10.2.2  matt 	void *ret;
   3749  1.10.2.2  matt 	size_t num_size;
   3750  1.10.2.2  matt 
   3751  1.10.2.2  matt 	if (malloc_init()) {
   3752  1.10.2.2  matt 		num_size = 0;
   3753  1.10.2.2  matt 		ret = NULL;
   3754  1.10.2.2  matt 		goto RETURN;
   3755  1.10.2.2  matt 	}
   3756  1.10.2.2  matt 
   3757  1.10.2.2  matt 	num_size = num * size;
   3758  1.10.2.2  matt 	if (num_size == 0) {
   3759  1.10.2.2  matt 		if ((opt_sysv == false) && ((num == 0) || (size == 0)))
   3760  1.10.2.2  matt 			num_size = 1;
   3761  1.10.2.2  matt 		else {
   3762  1.10.2.2  matt 			ret = NULL;
   3763  1.10.2.2  matt 			goto RETURN;
   3764  1.10.2.2  matt 		}
   3765  1.10.2.2  matt 	/*
   3766  1.10.2.2  matt 	 * Try to avoid division here.  We know that it isn't possible to
   3767  1.10.2.2  matt 	 * overflow during multiplication if neither operand uses any of the
   3768  1.10.2.2  matt 	 * most significant half of the bits in a size_t.
   3769  1.10.2.2  matt 	 */
   3770  1.10.2.2  matt 	} else if ((unsigned long long)((num | size) &
   3771  1.10.2.2  matt 	   ((unsigned long long)SIZE_T_MAX << (sizeof(size_t) << 2))) &&
   3772  1.10.2.2  matt 	   (num_size / size != num)) {
   3773  1.10.2.2  matt 		/* size_t overflow. */
   3774  1.10.2.2  matt 		ret = NULL;
   3775  1.10.2.2  matt 		goto RETURN;
   3776  1.10.2.2  matt 	}
   3777  1.10.2.2  matt 
   3778  1.10.2.2  matt 	ret = icalloc(num_size);
   3779  1.10.2.2  matt 
   3780  1.10.2.2  matt RETURN:
   3781  1.10.2.2  matt 	if (ret == NULL) {
   3782  1.10.2.2  matt 		if (opt_xmalloc) {
   3783  1.10.2.2  matt 			_malloc_message(_getprogname(),
   3784  1.10.2.2  matt 			    ": (malloc) Error in calloc(): out of memory\n", "",
   3785  1.10.2.2  matt 			    "");
   3786  1.10.2.2  matt 			abort();
   3787  1.10.2.2  matt 		}
   3788  1.10.2.2  matt 		errno = ENOMEM;
   3789  1.10.2.2  matt 	}
   3790  1.10.2.2  matt 
   3791  1.10.2.2  matt 	UTRACE(0, num_size, ret);
   3792  1.10.2.2  matt 	return (ret);
   3793  1.10.2.2  matt }
   3794  1.10.2.2  matt 
   3795  1.10.2.2  matt void *
   3796  1.10.2.2  matt realloc(void *ptr, size_t size)
   3797  1.10.2.2  matt {
   3798  1.10.2.2  matt 	void *ret;
   3799  1.10.2.2  matt 
   3800  1.10.2.2  matt 	if (size == 0) {
   3801  1.10.2.2  matt 		if (opt_sysv == false)
   3802  1.10.2.2  matt 			size = 1;
   3803  1.10.2.2  matt 		else {
   3804  1.10.2.2  matt 			if (ptr != NULL)
   3805  1.10.2.2  matt 				idalloc(ptr);
   3806  1.10.2.2  matt 			ret = NULL;
   3807  1.10.2.2  matt 			goto RETURN;
   3808  1.10.2.2  matt 		}
   3809  1.10.2.2  matt 	}
   3810  1.10.2.2  matt 
   3811  1.10.2.2  matt 	if (ptr != NULL) {
   3812  1.10.2.2  matt 		assert(malloc_initialized);
   3813  1.10.2.2  matt 
   3814  1.10.2.2  matt 		ret = iralloc(ptr, size);
   3815  1.10.2.2  matt 
   3816  1.10.2.2  matt 		if (ret == NULL) {
   3817  1.10.2.2  matt 			if (opt_xmalloc) {
   3818  1.10.2.2  matt 				_malloc_message(_getprogname(),
   3819  1.10.2.2  matt 				    ": (malloc) Error in realloc(): out of "
   3820  1.10.2.2  matt 				    "memory\n", "", "");
   3821  1.10.2.2  matt 				abort();
   3822  1.10.2.2  matt 			}
   3823  1.10.2.2  matt 			errno = ENOMEM;
   3824  1.10.2.2  matt 		}
   3825  1.10.2.2  matt 	} else {
   3826  1.10.2.2  matt 		if (malloc_init())
   3827  1.10.2.2  matt 			ret = NULL;
   3828  1.10.2.2  matt 		else
   3829  1.10.2.2  matt 			ret = imalloc(size);
   3830  1.10.2.2  matt 
   3831  1.10.2.2  matt 		if (ret == NULL) {
   3832  1.10.2.2  matt 			if (opt_xmalloc) {
   3833  1.10.2.2  matt 				_malloc_message(_getprogname(),
   3834  1.10.2.2  matt 				    ": (malloc) Error in realloc(): out of "
   3835  1.10.2.2  matt 				    "memory\n", "", "");
   3836  1.10.2.2  matt 				abort();
   3837  1.10.2.2  matt 			}
   3838  1.10.2.2  matt 			errno = ENOMEM;
   3839  1.10.2.2  matt 		}
   3840  1.10.2.2  matt 	}
   3841  1.10.2.2  matt 
   3842  1.10.2.2  matt RETURN:
   3843  1.10.2.2  matt 	UTRACE(ptr, size, ret);
   3844  1.10.2.2  matt 	return (ret);
   3845  1.10.2.2  matt }
   3846  1.10.2.2  matt 
   3847  1.10.2.2  matt void
   3848  1.10.2.2  matt free(void *ptr)
   3849  1.10.2.2  matt {
   3850  1.10.2.2  matt 
   3851  1.10.2.2  matt 	UTRACE(ptr, 0, 0);
   3852  1.10.2.2  matt 	if (ptr != NULL) {
   3853  1.10.2.2  matt 		assert(malloc_initialized);
   3854  1.10.2.2  matt 
   3855  1.10.2.2  matt 		idalloc(ptr);
   3856  1.10.2.2  matt 	}
   3857  1.10.2.2  matt }
   3858  1.10.2.2  matt 
   3859  1.10.2.2  matt /*
   3860  1.10.2.2  matt  * End malloc(3)-compatible functions.
   3861  1.10.2.2  matt  */
   3862  1.10.2.2  matt /******************************************************************************/
   3863  1.10.2.2  matt /*
   3864  1.10.2.2  matt  * Begin non-standard functions.
   3865  1.10.2.2  matt  */
   3866  1.10.2.2  matt #ifndef __NetBSD__
   3867  1.10.2.2  matt size_t
   3868  1.10.2.2  matt malloc_usable_size(const void *ptr)
   3869  1.10.2.2  matt {
   3870  1.10.2.2  matt 
   3871  1.10.2.2  matt 	assert(ptr != NULL);
   3872  1.10.2.2  matt 
   3873  1.10.2.2  matt 	return (isalloc(ptr));
   3874  1.10.2.2  matt }
   3875  1.10.2.2  matt #endif
   3876  1.10.2.2  matt 
   3877  1.10.2.2  matt /*
   3878  1.10.2.2  matt  * End non-standard functions.
   3879  1.10.2.2  matt  */
   3880  1.10.2.2  matt /******************************************************************************/
   3881  1.10.2.2  matt /*
   3882  1.10.2.2  matt  * Begin library-private functions, used by threading libraries for protection
   3883  1.10.2.2  matt  * of malloc during fork().  These functions are only called if the program is
   3884  1.10.2.2  matt  * running in threaded mode, so there is no need to check whether the program
   3885  1.10.2.2  matt  * is threaded here.
   3886  1.10.2.2  matt  */
   3887  1.10.2.2  matt 
   3888  1.10.2.2  matt void
   3889  1.10.2.2  matt _malloc_prefork(void)
   3890  1.10.2.2  matt {
   3891  1.10.2.2  matt 	unsigned i;
   3892  1.10.2.2  matt 
   3893  1.10.2.2  matt 	/* Acquire all mutexes in a safe order. */
   3894  1.10.2.2  matt 
   3895  1.10.2.2  matt 	malloc_mutex_lock(&arenas_mtx);
   3896  1.10.2.2  matt 	for (i = 0; i < narenas; i++) {
   3897  1.10.2.2  matt 		if (arenas[i] != NULL)
   3898  1.10.2.2  matt 			malloc_mutex_lock(&arenas[i]->mtx);
   3899  1.10.2.2  matt 	}
   3900  1.10.2.2  matt 	malloc_mutex_unlock(&arenas_mtx);
   3901  1.10.2.2  matt 
   3902  1.10.2.2  matt 	malloc_mutex_lock(&base_mtx);
   3903  1.10.2.2  matt 
   3904  1.10.2.2  matt 	malloc_mutex_lock(&chunks_mtx);
   3905  1.10.2.2  matt }
   3906  1.10.2.2  matt 
   3907  1.10.2.2  matt void
   3908  1.10.2.2  matt _malloc_postfork(void)
   3909  1.10.2.2  matt {
   3910  1.10.2.2  matt 	unsigned i;
   3911  1.10.2.2  matt 
   3912  1.10.2.2  matt 	/* Release all mutexes, now that fork() has completed. */
   3913  1.10.2.2  matt 
   3914  1.10.2.2  matt 	malloc_mutex_unlock(&chunks_mtx);
   3915  1.10.2.2  matt 
   3916  1.10.2.2  matt 	malloc_mutex_unlock(&base_mtx);
   3917  1.10.2.2  matt 
   3918  1.10.2.2  matt 	malloc_mutex_lock(&arenas_mtx);
   3919  1.10.2.2  matt 	for (i = 0; i < narenas; i++) {
   3920  1.10.2.2  matt 		if (arenas[i] != NULL)
   3921  1.10.2.2  matt 			malloc_mutex_unlock(&arenas[i]->mtx);
   3922  1.10.2.2  matt 	}
   3923  1.10.2.2  matt 	malloc_mutex_unlock(&arenas_mtx);
   3924  1.10.2.2  matt }
   3925  1.10.2.2  matt 
   3926  1.10.2.2  matt /*
   3927  1.10.2.2  matt  * End library-private functions.
   3928  1.10.2.2  matt  */
   3929  1.10.2.2  matt /******************************************************************************/
   3930