1d514b0f3Smrg// based on dlmalloc from Doug Lea
2d514b0f3Smrg
3d514b0f3Smrg
4d514b0f3Smrg// quote from the Doug Lea original file
5d514b0f3Smrg    /*
6d514b0f3Smrg      This is a version (aka dlmalloc) of malloc/free/realloc written by
7d514b0f3Smrg      Doug Lea and released to the public domain, as explained at
8d514b0f3Smrg      http://creativecommons.org/licenses/publicdomain.  Send questions,
9d514b0f3Smrg      comments, complaints, performance data, etc to dl@cs.oswego.edu
10d514b0f3Smrg
11d514b0f3Smrg    * Version 2.8.3 Thu Sep 22 11:16:15 2005  Doug Lea  (dl at gee)
12d514b0f3Smrg
13d514b0f3Smrg       Note: There may be an updated version of this malloc obtainable at
14d514b0f3Smrg               ftp://gee.cs.oswego.edu/pub/misc/malloc.c
15d514b0f3Smrg             Check before installing!
16d514b0f3Smrg    */
17d514b0f3Smrg
18d514b0f3Smrg
19d514b0f3Smrg#include <stdint.h>
20d514b0f3Smrg#include <stdlib.h>
21d514b0f3Smrg#include <string.h>
22d514b0f3Smrg#include "mspace.h"
23d514b0f3Smrg
24d514b0f3Smrg#define MALLOC_ALIGNMENT ((size_t)8U)
25d514b0f3Smrg#define USE_LOCKS 0
26d514b0f3Smrg#define malloc_getpagesize ((size_t)4096U)
27d514b0f3Smrg#define DEFAULT_GRANULARITY malloc_getpagesize
28d514b0f3Smrg#define MAX_SIZE_T (~(size_t)0)
29d514b0f3Smrg#define MALLOC_FAILURE_ACTION
30d514b0f3Smrg#define MALLINFO_FIELD_TYPE size_t
31d514b0f3Smrg#define FOOTERS 0
32d514b0f3Smrg#define INSECURE 0
33d514b0f3Smrg#define PROCEED_ON_ERROR 0
34d514b0f3Smrg#define DEBUG 0
35d514b0f3Smrg#define ABORT_ON_ASSERT_FAILURE 1
36d514b0f3Smrg#define ABORT(user_data) abort_func(user_data)
37d514b0f3Smrg#define USE_BUILTIN_FFS 0
38d514b0f3Smrg#define USE_DEV_RANDOM 0
39d514b0f3Smrg#define PRINT(params) print_func params
40d514b0f3Smrg
41d514b0f3Smrg
42d514b0f3Smrg#define MEMCPY(dest, src, n) memcpy (dest, src, n)
43d514b0f3Smrg#define MEMCLEAR(dest, n) memset (dest, 0, n);
44d514b0f3Smrg
45d514b0f3Smrg
46d514b0f3Smrg#define M_GRANULARITY        (-1)
47d514b0f3Smrg
48d514b0f3Smrgvoid __attribute__ ((__noreturn__)) default_abort_func(void *user_data)
49d514b0f3Smrg{
50d514b0f3Smrg    for (;;);
51d514b0f3Smrg}
52d514b0f3Smrg
53d514b0f3Smrgvoid default_print_func(void *user_data, const char *format, ...)
54d514b0f3Smrg{
55d514b0f3Smrg}
56d514b0f3Smrg
57d514b0f3Smrgstatic mspace_abort_t abort_func = default_abort_func;
58d514b0f3Smrgstatic mspace_print_t print_func = default_print_func;
59d514b0f3Smrg
60d514b0f3Smrgvoid mspace_set_abort_func(mspace_abort_t f)
61d514b0f3Smrg{
62d514b0f3Smrg    abort_func = f;
63d514b0f3Smrg}
64d514b0f3Smrg
65d514b0f3Smrgvoid mspace_set_print_func(mspace_print_t f)
66d514b0f3Smrg{
67d514b0f3Smrg    print_func = f;
68d514b0f3Smrg}
69d514b0f3Smrg
70d514b0f3Smrg/* ------------------------ Mallinfo declarations ------------------------ */
71d514b0f3Smrg
72d514b0f3Smrg#if !NO_MALLINFO
73d514b0f3Smrg/*
74d514b0f3Smrg  This version of malloc supports the standard SVID/XPG mallinfo
75d514b0f3Smrg  routine that returns a struct containing usage properties and
76d514b0f3Smrg  statistics. It should work on any system that has a
77d514b0f3Smrg  /usr/include/malloc.h defining struct mallinfo.  The main
78d514b0f3Smrg  declaration needed is the mallinfo struct that is returned (by-copy)
79d514b0f3Smrg  by mallinfo().  The malloinfo struct contains a bunch of fields that
80d514b0f3Smrg  are not even meaningful in this version of malloc.  These fields are
81d514b0f3Smrg  are instead filled by mallinfo() with other numbers that might be of
82d514b0f3Smrg  interest.
83d514b0f3Smrg
84d514b0f3Smrg  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
85d514b0f3Smrg  /usr/include/malloc.h file that includes a declaration of struct
86d514b0f3Smrg  mallinfo.  If so, it is included; else a compliant version is
87d514b0f3Smrg  declared below.  These must be precisely the same for mallinfo() to
88d514b0f3Smrg  work.  The original SVID version of this struct, defined on most
89d514b0f3Smrg  systems with mallinfo, declares all fields as ints. But some others
90d514b0f3Smrg  define as unsigned long. If your system defines the fields using a
91d514b0f3Smrg  type of different width than listed here, you MUST #include your
92d514b0f3Smrg  system version and #define HAVE_USR_INCLUDE_MALLOC_H.
93d514b0f3Smrg*/
94d514b0f3Smrg
95d514b0f3Smrg/* #define HAVE_USR_INCLUDE_MALLOC_H */
96d514b0f3Smrg
97d514b0f3Smrg
98d514b0f3Smrgstruct mallinfo {
99d514b0f3Smrg  MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
100d514b0f3Smrg  MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
101d514b0f3Smrg  MALLINFO_FIELD_TYPE smblks;   /* always 0 */
102d514b0f3Smrg  MALLINFO_FIELD_TYPE hblks;    /* always 0 */
103d514b0f3Smrg  MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
104d514b0f3Smrg  MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
105d514b0f3Smrg  MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
106d514b0f3Smrg  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
107d514b0f3Smrg  MALLINFO_FIELD_TYPE fordblks; /* total free space */
108d514b0f3Smrg  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
109d514b0f3Smrg};
110d514b0f3Smrg
111d514b0f3Smrg#endif /* NO_MALLINFO */
112d514b0f3Smrg
113d514b0f3Smrg
114d514b0f3Smrg
115d514b0f3Smrg#ifdef DEBUG
116d514b0f3Smrg#if ABORT_ON_ASSERT_FAILURE
117d514b0f3Smrg#define assert(user_data, x) if(!(x)) ABORT(user_data)
118d514b0f3Smrg#else /* ABORT_ON_ASSERT_FAILURE */
119d514b0f3Smrg#include <assert.h>
120d514b0f3Smrg#endif /* ABORT_ON_ASSERT_FAILURE */
121d514b0f3Smrg#else  /* DEBUG */
122d514b0f3Smrg#define assert(user_data, x)
123d514b0f3Smrg#endif /* DEBUG */
124d514b0f3Smrg
125d514b0f3Smrg/* ------------------- size_t and alignment properties -------------------- */
126d514b0f3Smrg
127d514b0f3Smrg/* The byte and bit size of a size_t */
128d514b0f3Smrg#define SIZE_T_SIZE         (sizeof(size_t))
129d514b0f3Smrg#define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
130d514b0f3Smrg
131d514b0f3Smrg/* Some constants coerced to size_t */
132d514b0f3Smrg/* Annoying but necessary to avoid errors on some platforms */
133d514b0f3Smrg#define SIZE_T_ZERO         ((size_t)0)
134d514b0f3Smrg#define SIZE_T_ONE          ((size_t)1)
135d514b0f3Smrg#define SIZE_T_TWO          ((size_t)2)
136d514b0f3Smrg#define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
137d514b0f3Smrg#define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
138d514b0f3Smrg#define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
139d514b0f3Smrg#define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
140d514b0f3Smrg
141d514b0f3Smrg/* The bit mask value corresponding to MALLOC_ALIGNMENT */
142d514b0f3Smrg#define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
143d514b0f3Smrg
144d514b0f3Smrg/* True if address a has acceptable alignment */
145d514b0f3Smrg#define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
146d514b0f3Smrg
147d514b0f3Smrg/* the number of bytes to offset an address to align it */
148d514b0f3Smrg#define align_offset(A)\
149d514b0f3Smrg ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
150d514b0f3Smrg  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
151d514b0f3Smrg
152d514b0f3Smrg/* --------------------------- Lock preliminaries ------------------------ */
153d514b0f3Smrg
154d514b0f3Smrg#if USE_LOCKS
155d514b0f3Smrg
156d514b0f3Smrg/*
157d514b0f3Smrg  When locks are defined, there are up to two global locks:
158d514b0f3Smrg
159d514b0f3Smrg  * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
160d514b0f3Smrg    MORECORE.  In many cases sys_alloc requires two calls, that should
161d514b0f3Smrg    not be interleaved with calls by other threads.  This does not
162d514b0f3Smrg    protect against direct calls to MORECORE by other threads not
163d514b0f3Smrg    using this lock, so there is still code to cope the best we can on
164d514b0f3Smrg    interference.
165d514b0f3Smrg
166d514b0f3Smrg  * magic_init_mutex ensures that mparams.magic and other
167d514b0f3Smrg    unique mparams values are initialized only once.
168d514b0f3Smrg*/
169d514b0f3Smrg
170d514b0f3Smrg
171d514b0f3Smrg#define USE_LOCK_BIT               (2U)
172d514b0f3Smrg#else  /* USE_LOCKS */
173d514b0f3Smrg#define USE_LOCK_BIT               (0U)
174d514b0f3Smrg#define INITIAL_LOCK(l)
175d514b0f3Smrg#endif /* USE_LOCKS */
176d514b0f3Smrg
177d514b0f3Smrg#if USE_LOCKS
178d514b0f3Smrg#define ACQUIRE_MAGIC_INIT_LOCK()  ACQUIRE_LOCK(&magic_init_mutex);
179d514b0f3Smrg#define RELEASE_MAGIC_INIT_LOCK()  RELEASE_LOCK(&magic_init_mutex);
180d514b0f3Smrg#else  /* USE_LOCKS */
181d514b0f3Smrg#define ACQUIRE_MAGIC_INIT_LOCK()
182d514b0f3Smrg#define RELEASE_MAGIC_INIT_LOCK()
183d514b0f3Smrg#endif /* USE_LOCKS */
184d514b0f3Smrg
185d514b0f3Smrg
186d514b0f3Smrg
187d514b0f3Smrg/* -----------------------  Chunk representations ------------------------ */
188d514b0f3Smrg
189d514b0f3Smrg/*
190d514b0f3Smrg  (The following includes lightly edited explanations by Colin Plumb.)
191d514b0f3Smrg
192d514b0f3Smrg  The malloc_chunk declaration below is misleading (but accurate and
193d514b0f3Smrg  necessary).  It declares a "view" into memory allowing access to
194d514b0f3Smrg  necessary fields at known offsets from a given base.
195d514b0f3Smrg
196d514b0f3Smrg  Chunks of memory are maintained using a `boundary tag' method as
197d514b0f3Smrg  originally described by Knuth.  (See the paper by Paul Wilson
198d514b0f3Smrg  ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
199d514b0f3Smrg  techniques.)  Sizes of free chunks are stored both in the front of
200d514b0f3Smrg  each chunk and at the end.  This makes consolidating fragmented
201d514b0f3Smrg  chunks into bigger chunks fast.  The head fields also hold bits
202d514b0f3Smrg  representing whether chunks are free or in use.
203d514b0f3Smrg
204d514b0f3Smrg  Here are some pictures to make it clearer.  They are "exploded" to
205d514b0f3Smrg  show that the state of a chunk can be thought of as extending from
206d514b0f3Smrg  the high 31 bits of the head field of its header through the
207d514b0f3Smrg  prev_foot and PINUSE_BIT bit of the following chunk header.
208d514b0f3Smrg
209d514b0f3Smrg  A chunk that's in use looks like:
210d514b0f3Smrg
211d514b0f3Smrg   chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
212d514b0f3Smrg           | Size of previous chunk (if P = 1)                             |
213d514b0f3Smrg           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
214d514b0f3Smrg         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
215d514b0f3Smrg         | Size of this chunk                                         1| +-+
216d514b0f3Smrg   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
217d514b0f3Smrg         |                                                               |
218d514b0f3Smrg         +-                                                             -+
219d514b0f3Smrg         |                                                               |
220d514b0f3Smrg         +-                                                             -+
221d514b0f3Smrg         |                                                               :
222d514b0f3Smrg         +-      size - sizeof(size_t) available payload bytes          -+
223d514b0f3Smrg         :                                                               |
224d514b0f3Smrg chunk-> +-                                                             -+
225d514b0f3Smrg         |                                                               |
226d514b0f3Smrg         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
227d514b0f3Smrg       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
228d514b0f3Smrg       | Size of next chunk (may or may not be in use)               | +-+
229d514b0f3Smrg mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
230d514b0f3Smrg
231d514b0f3Smrg    And if it's free, it looks like this:
232d514b0f3Smrg
233d514b0f3Smrg   chunk-> +-                                                             -+
234d514b0f3Smrg           | User payload (must be in use, or we would have merged!)       |
235d514b0f3Smrg           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
236d514b0f3Smrg         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
237d514b0f3Smrg         | Size of this chunk                                         0| +-+
238d514b0f3Smrg   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
239d514b0f3Smrg         | Next pointer                                                  |
240d514b0f3Smrg         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
241d514b0f3Smrg         | Prev pointer                                                  |
242d514b0f3Smrg         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
243d514b0f3Smrg         |                                                               :
244d514b0f3Smrg         +-      size - sizeof(struct chunk) unused bytes               -+
245d514b0f3Smrg         :                                                               |
246d514b0f3Smrg chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
247d514b0f3Smrg         | Size of this chunk                                            |
248d514b0f3Smrg         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
249d514b0f3Smrg       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
250d514b0f3Smrg       | Size of next chunk (must be in use, or we would have merged)| +-+
251d514b0f3Smrg mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
252d514b0f3Smrg       |                                                               :
253d514b0f3Smrg       +- User payload                                                -+
254d514b0f3Smrg       :                                                               |
255d514b0f3Smrg       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
256d514b0f3Smrg                                                                     |0|
257d514b0f3Smrg                                                                     +-+
258d514b0f3Smrg  Note that since we always merge adjacent free chunks, the chunks
259d514b0f3Smrg  adjacent to a free chunk must be in use.
260d514b0f3Smrg
261d514b0f3Smrg  Given a pointer to a chunk (which can be derived trivially from the
262d514b0f3Smrg  payload pointer) we can, in O(1) time, find out whether the adjacent
263d514b0f3Smrg  chunks are free, and if so, unlink them from the lists that they
264d514b0f3Smrg  are on and merge them with the current chunk.
265d514b0f3Smrg
266d514b0f3Smrg  Chunks always begin on even word boundaries, so the mem portion
267d514b0f3Smrg  (which is returned to the user) is also on an even word boundary, and
268d514b0f3Smrg  thus at least double-word aligned.
269d514b0f3Smrg
270d514b0f3Smrg  The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
271d514b0f3Smrg  chunk size (which is always a multiple of two words), is an in-use
272d514b0f3Smrg  bit for the *previous* chunk.  If that bit is *clear*, then the
273d514b0f3Smrg  word before the current chunk size contains the previous chunk
274d514b0f3Smrg  size, and can be used to find the front of the previous chunk.
275d514b0f3Smrg  The very first chunk allocated always has this bit set, preventing
276d514b0f3Smrg  access to non-existent (or non-owned) memory. If pinuse is set for
277d514b0f3Smrg  any given chunk, then you CANNOT determine the size of the
278d514b0f3Smrg  previous chunk, and might even get a memory addressing fault when
279d514b0f3Smrg  trying to do so.
280d514b0f3Smrg
281d514b0f3Smrg  The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
282d514b0f3Smrg  the chunk size redundantly records whether the current chunk is
283d514b0f3Smrg  inuse. This redundancy enables usage checks within free and realloc,
284d514b0f3Smrg  and reduces indirection when freeing and consolidating chunks.
285d514b0f3Smrg
286d514b0f3Smrg  Each freshly allocated chunk must have both cinuse and pinuse set.
287d514b0f3Smrg  That is, each allocated chunk borders either a previously allocated
288d514b0f3Smrg  and still in-use chunk, or the base of its memory arena. This is
289d514b0f3Smrg  ensured by making all allocations from the `lowest' part of any
290d514b0f3Smrg  found chunk.  Further, no free chunk physically borders another one,
291d514b0f3Smrg  so each free chunk is known to be preceded and followed by either
292d514b0f3Smrg  inuse chunks or the ends of memory.
293d514b0f3Smrg
294d514b0f3Smrg  Note that the `foot' of the current chunk is actually represented
295d514b0f3Smrg  as the prev_foot of the NEXT chunk. This makes it easier to
296d514b0f3Smrg  deal with alignments etc but can be very confusing when trying
297d514b0f3Smrg  to extend or adapt this code.
298d514b0f3Smrg
299d514b0f3Smrg  The exceptions to all this are
300d514b0f3Smrg
301d514b0f3Smrg     1. The special chunk `top' is the top-most available chunk (i.e.,
302d514b0f3Smrg        the one bordering the end of available memory). It is treated
303d514b0f3Smrg        specially.  Top is never included in any bin, is used only if
304d514b0f3Smrg        no other chunk is available, and is released back to the
305d514b0f3Smrg        system if it is very large (see M_TRIM_THRESHOLD).  In effect,
306d514b0f3Smrg        the top chunk is treated as larger (and thus less well
307d514b0f3Smrg        fitting) than any other available chunk.  The top chunk
308d514b0f3Smrg        doesn't update its trailing size field since there is no next
309d514b0f3Smrg        contiguous chunk that would have to index off it. However,
310d514b0f3Smrg        space is still allocated for it (TOP_FOOT_SIZE) to enable
311d514b0f3Smrg        separation or merging when space is extended.
312d514b0f3Smrg
313d514b0f3Smrg     3. Chunks allocated via mmap, which have the lowest-order bit
314d514b0f3Smrg        (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
315d514b0f3Smrg        PINUSE_BIT in their head fields.  Because they are allocated
316d514b0f3Smrg        one-by-one, each must carry its own prev_foot field, which is
317d514b0f3Smrg        also used to hold the offset this chunk has within its mmapped
318d514b0f3Smrg        region, which is needed to preserve alignment. Each mmapped
319d514b0f3Smrg        chunk is trailed by the first two fields of a fake next-chunk
320d514b0f3Smrg        for sake of usage checks.
321d514b0f3Smrg
322d514b0f3Smrg*/
323d514b0f3Smrg
324d514b0f3Smrgstruct malloc_chunk {
325d514b0f3Smrg  size_t               prev_foot;  /* Size of previous chunk (if free).  */
326d514b0f3Smrg  size_t               head;       /* Size and inuse bits. */
327d514b0f3Smrg  struct malloc_chunk* fd;         /* double links -- used only if free. */
328d514b0f3Smrg  struct malloc_chunk* bk;
329d514b0f3Smrg};
330d514b0f3Smrg
331d514b0f3Smrgtypedef struct malloc_chunk  mchunk;
332d514b0f3Smrgtypedef struct malloc_chunk* mchunkptr;
333d514b0f3Smrgtypedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
334d514b0f3Smrgtypedef unsigned int bindex_t;         /* Described below */
335d514b0f3Smrgtypedef unsigned int binmap_t;         /* Described below */
336d514b0f3Smrgtypedef unsigned int flag_t;           /* The type of various bit flag sets */
337d514b0f3Smrg
338d514b0f3Smrg
339d514b0f3Smrg/* ------------------- Chunks sizes and alignments ----------------------- */
340d514b0f3Smrg
341d514b0f3Smrg#define MCHUNK_SIZE         (sizeof(mchunk))
342d514b0f3Smrg
343d514b0f3Smrg#if FOOTERS
344d514b0f3Smrg#define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
345d514b0f3Smrg#else /* FOOTERS */
346d514b0f3Smrg#define CHUNK_OVERHEAD      (SIZE_T_SIZE)
347d514b0f3Smrg#endif /* FOOTERS */
348d514b0f3Smrg
349d514b0f3Smrg/* The smallest size we can malloc is an aligned minimal chunk */
350d514b0f3Smrg#define MIN_CHUNK_SIZE\
351d514b0f3Smrg  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
352d514b0f3Smrg
353d514b0f3Smrg/* conversion from malloc headers to user pointers, and back */
354d514b0f3Smrg#define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
355d514b0f3Smrg#define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
356d514b0f3Smrg/* chunk associated with aligned address A */
357d514b0f3Smrg#define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
358d514b0f3Smrg
359d514b0f3Smrg/* Bounds on request (not chunk) sizes. */
360d514b0f3Smrg#define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
361d514b0f3Smrg#define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
362d514b0f3Smrg
363d514b0f3Smrg/* pad request bytes into a usable size */
364d514b0f3Smrg#define pad_request(req) \
365d514b0f3Smrg   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
366d514b0f3Smrg
367d514b0f3Smrg/* pad request, checking for minimum (but not maximum) */
368d514b0f3Smrg#define request2size(req) \
369d514b0f3Smrg  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
370d514b0f3Smrg
371d514b0f3Smrg/* ------------------ Operations on head and foot fields ----------------- */
372d514b0f3Smrg
373d514b0f3Smrg/*
374d514b0f3Smrg  The head field of a chunk is or'ed with PINUSE_BIT when previous
375d514b0f3Smrg  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
376d514b0f3Smrg  use. If the chunk was obtained with mmap, the prev_foot field has
377d514b0f3Smrg  IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
378d514b0f3Smrg  mmapped region to the base of the chunk.
379d514b0f3Smrg*/
380d514b0f3Smrg
381d514b0f3Smrg#define PINUSE_BIT          (SIZE_T_ONE)
382d514b0f3Smrg#define CINUSE_BIT          (SIZE_T_TWO)
383d514b0f3Smrg#define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
384d514b0f3Smrg
385d514b0f3Smrg/* Head value for fenceposts */
386d514b0f3Smrg#define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
387d514b0f3Smrg
388d514b0f3Smrg/* extraction of fields from head words */
389d514b0f3Smrg#define cinuse(p)           ((p)->head & CINUSE_BIT)
390d514b0f3Smrg#define pinuse(p)           ((p)->head & PINUSE_BIT)
391d514b0f3Smrg#define chunksize(p)        ((p)->head & ~(INUSE_BITS))
392d514b0f3Smrg
393d514b0f3Smrg#define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
394d514b0f3Smrg#define clear_cinuse(p)     ((p)->head &= ~CINUSE_BIT)
395d514b0f3Smrg
396d514b0f3Smrg/* Treat space at ptr +/- offset as a chunk */
397d514b0f3Smrg#define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
398d514b0f3Smrg#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
399d514b0f3Smrg
400d514b0f3Smrg/* Ptr to next or previous physical malloc_chunk. */
401d514b0f3Smrg#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
402d514b0f3Smrg#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
403d514b0f3Smrg
404d514b0f3Smrg/* extract next chunk's pinuse bit */
405d514b0f3Smrg#define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
406d514b0f3Smrg
407d514b0f3Smrg/* Get/set size at footer */
408d514b0f3Smrg#define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
409d514b0f3Smrg#define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
410d514b0f3Smrg
411d514b0f3Smrg/* Set size, pinuse bit, and foot */
412d514b0f3Smrg#define set_size_and_pinuse_of_free_chunk(p, s)\
413d514b0f3Smrg  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
414d514b0f3Smrg
415d514b0f3Smrg/* Set size, pinuse bit, foot, and clear next pinuse */
416d514b0f3Smrg#define set_free_with_pinuse(p, s, n)\
417d514b0f3Smrg  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
418d514b0f3Smrg
419d514b0f3Smrg/* Get the internal overhead associated with chunk p */
420d514b0f3Smrg#define overhead_for(p) CHUNK_OVERHEAD
421d514b0f3Smrg
422d514b0f3Smrg/* Return true if malloced space is not necessarily cleared */
423d514b0f3Smrg#define calloc_must_clear(p) (1)
424d514b0f3Smrg
425d514b0f3Smrg
426d514b0f3Smrg/* ---------------------- Overlaid data structures ----------------------- */
427d514b0f3Smrg
428d514b0f3Smrg/*
429d514b0f3Smrg  When chunks are not in use, they are treated as nodes of either
430d514b0f3Smrg  lists or trees.
431d514b0f3Smrg
432d514b0f3Smrg  "Small"  chunks are stored in circular doubly-linked lists, and look
433d514b0f3Smrg  like this:
434d514b0f3Smrg
435d514b0f3Smrg    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
436d514b0f3Smrg            |             Size of previous chunk                            |
437d514b0f3Smrg            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
438d514b0f3Smrg    `head:' |             Size of chunk, in bytes                         |P|
439d514b0f3Smrg      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
440d514b0f3Smrg            |             Forward pointer to next chunk in list             |
441d514b0f3Smrg            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
442d514b0f3Smrg            |             Back pointer to previous chunk in list            |
443d514b0f3Smrg            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
444d514b0f3Smrg            |             Unused space (may be 0 bytes long)                .
445d514b0f3Smrg            .                                                               .
446d514b0f3Smrg            .                                                               |
447d514b0f3Smrgnextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
448d514b0f3Smrg    `foot:' |             Size of chunk, in bytes                           |
449d514b0f3Smrg            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
450d514b0f3Smrg
451d514b0f3Smrg  Larger chunks are kept in a form of bitwise digital trees (aka
452d514b0f3Smrg  tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
453d514b0f3Smrg  free chunks greater than 256 bytes, their size doesn't impose any
454d514b0f3Smrg  constraints on user chunk sizes.  Each node looks like:
455d514b0f3Smrg
456d514b0f3Smrg    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
457d514b0f3Smrg            |             Size of previous chunk                            |
458d514b0f3Smrg            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
459d514b0f3Smrg    `head:' |             Size of chunk, in bytes                         |P|
460d514b0f3Smrg      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
461d514b0f3Smrg            |             Forward pointer to next chunk of same size        |
462d514b0f3Smrg            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
463d514b0f3Smrg            |             Back pointer to previous chunk of same size       |
464d514b0f3Smrg            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
465d514b0f3Smrg            |             Pointer to left child (child[0])                  |
466d514b0f3Smrg            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
467d514b0f3Smrg            |             Pointer to right child (child[1])                 |
468d514b0f3Smrg            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
469d514b0f3Smrg            |             Pointer to parent                                 |
470d514b0f3Smrg            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
471d514b0f3Smrg            |             bin index of this chunk                           |
472d514b0f3Smrg            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
473d514b0f3Smrg            |             Unused space                                      .
474d514b0f3Smrg            .                                                               |
475d514b0f3Smrgnextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
476d514b0f3Smrg    `foot:' |             Size of chunk, in bytes                           |
477d514b0f3Smrg            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
478d514b0f3Smrg
479d514b0f3Smrg  Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
480d514b0f3Smrg  of the same size are arranged in a circularly-linked list, with only
481d514b0f3Smrg  the oldest chunk (the next to be used, in our FIFO ordering)
482d514b0f3Smrg  actually in the tree.  (Tree members are distinguished by a non-null
483d514b0f3Smrg  parent pointer.)  If a chunk with the same size as an existing node
484d514b0f3Smrg  is inserted, it is linked off the existing node using pointers that
485d514b0f3Smrg  work in the same way as fd/bk pointers of small chunks.
486d514b0f3Smrg
487d514b0f3Smrg  Each tree contains a power of 2 sized range of chunk sizes (the
488d514b0f3Smrg  smallest is 0x100 <= x < 0x180), which is divided in half at each
489d514b0f3Smrg  tree level, with the chunks in the smaller half of the range (0x100
490d514b0f3Smrg  <= x < 0x140 for the top nose) in the left subtree and the larger
491d514b0f3Smrg  half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
492d514b0f3Smrg  done by inspecting individual bits.
493d514b0f3Smrg
494d514b0f3Smrg  Using these rules, each node's left subtree contains all smaller
495d514b0f3Smrg  sizes than its right subtree.  However, the node at the root of each
496d514b0f3Smrg  subtree has no particular ordering relationship to either.  (The
497d514b0f3Smrg  dividing line between the subtree sizes is based on trie relation.)
498d514b0f3Smrg  If we remove the last chunk of a given size from the interior of the
499d514b0f3Smrg  tree, we need to replace it with a leaf node.  The tree ordering
500d514b0f3Smrg  rules permit a node to be replaced by any leaf below it.
501d514b0f3Smrg
502d514b0f3Smrg  The smallest chunk in a tree (a common operation in a best-fit
503d514b0f3Smrg  allocator) can be found by walking a path to the leftmost leaf in
504d514b0f3Smrg  the tree.  Unlike a usual binary tree, where we follow left child
505d514b0f3Smrg  pointers until we reach a null, here we follow the right child
506d514b0f3Smrg  pointer any time the left one is null, until we reach a leaf with
507d514b0f3Smrg  both child pointers null. The smallest chunk in the tree will be
508d514b0f3Smrg  somewhere along that path.
509d514b0f3Smrg
510d514b0f3Smrg  The worst case number of steps to add, find, or remove a node is
511d514b0f3Smrg  bounded by the number of bits differentiating chunks within
512d514b0f3Smrg  bins. Under current bin calculations, this ranges from 6 up to 21
513d514b0f3Smrg  (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
514d514b0f3Smrg  is of course much better.
515d514b0f3Smrg*/
516d514b0f3Smrg
517d514b0f3Smrgstruct malloc_tree_chunk {
518d514b0f3Smrg  /* The first four fields must be compatible with malloc_chunk */
519d514b0f3Smrg  size_t                    prev_foot;
520d514b0f3Smrg  size_t                    head;
521d514b0f3Smrg  struct malloc_tree_chunk* fd;
522d514b0f3Smrg  struct malloc_tree_chunk* bk;
523d514b0f3Smrg
524d514b0f3Smrg  struct malloc_tree_chunk* child[2];
525d514b0f3Smrg  struct malloc_tree_chunk* parent;
526d514b0f3Smrg  bindex_t                  index;
527d514b0f3Smrg};
528d514b0f3Smrg
529d514b0f3Smrgtypedef struct malloc_tree_chunk  tchunk;
530d514b0f3Smrgtypedef struct malloc_tree_chunk* tchunkptr;
531d514b0f3Smrgtypedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
532d514b0f3Smrg
533d514b0f3Smrg/* A little helper macro for trees */
534d514b0f3Smrg#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
535d514b0f3Smrg
536d514b0f3Smrg/* ----------------------------- Segments -------------------------------- */
537d514b0f3Smrg
538d514b0f3Smrg/*
539d514b0f3Smrg  Each malloc space may include non-contiguous segments, held in a
540d514b0f3Smrg  list headed by an embedded malloc_segment record representing the
541d514b0f3Smrg  top-most space. Segments also include flags holding properties of
542d514b0f3Smrg  the space. Large chunks that are directly allocated by mmap are not
543d514b0f3Smrg  included in this list. They are instead independently created and
544d514b0f3Smrg  destroyed without otherwise keeping track of them.
545d514b0f3Smrg
546d514b0f3Smrg  Segment management mainly comes into play for spaces allocated by
547d514b0f3Smrg  MMAP.  Any call to MMAP might or might not return memory that is
548d514b0f3Smrg  adjacent to an existing segment.  MORECORE normally contiguously
549d514b0f3Smrg  extends the current space, so this space is almost always adjacent,
550d514b0f3Smrg  which is simpler and faster to deal with. (This is why MORECORE is
551d514b0f3Smrg  used preferentially to MMAP when both are available -- see
552d514b0f3Smrg  sys_alloc.)  When allocating using MMAP, we don't use any of the
553d514b0f3Smrg  hinting mechanisms (inconsistently) supported in various
554d514b0f3Smrg  implementations of unix mmap, or distinguish reserving from
555d514b0f3Smrg  committing memory. Instead, we just ask for space, and exploit
556d514b0f3Smrg  contiguity when we get it.  It is probably possible to do
557d514b0f3Smrg  better than this on some systems, but no general scheme seems
558d514b0f3Smrg  to be significantly better.
559d514b0f3Smrg
560d514b0f3Smrg  Management entails a simpler variant of the consolidation scheme
561d514b0f3Smrg  used for chunks to reduce fragmentation -- new adjacent memory is
562d514b0f3Smrg  normally prepended or appended to an existing segment. However,
563d514b0f3Smrg  there are limitations compared to chunk consolidation that mostly
564d514b0f3Smrg  reflect the fact that segment processing is relatively infrequent
565d514b0f3Smrg  (occurring only when getting memory from system) and that we
566d514b0f3Smrg  don't expect to have huge numbers of segments:
567d514b0f3Smrg
568d514b0f3Smrg  * Segments are not indexed, so traversal requires linear scans.  (It
569d514b0f3Smrg    would be possible to index these, but is not worth the extra
570d514b0f3Smrg    overhead and complexity for most programs on most platforms.)
571d514b0f3Smrg  * New segments are only appended to old ones when holding top-most
572d514b0f3Smrg    memory; if they cannot be prepended to others, they are held in
573d514b0f3Smrg    different segments.
574d514b0f3Smrg
575d514b0f3Smrg  Except for the top-most segment of an mstate, each segment record
576d514b0f3Smrg  is kept at the tail of its segment. Segments are added by pushing
577d514b0f3Smrg  segment records onto the list headed by &mstate.seg for the
578d514b0f3Smrg  containing mstate.
579d514b0f3Smrg
580d514b0f3Smrg  Segment flags control allocation/merge/deallocation policies:
581d514b0f3Smrg  * If EXTERN_BIT set, then we did not allocate this segment,
582d514b0f3Smrg    and so should not try to deallocate or merge with others.
583d514b0f3Smrg    (This currently holds only for the initial segment passed
584d514b0f3Smrg    into create_mspace_with_base.)
585d514b0f3Smrg  * If IS_MMAPPED_BIT set, the segment may be merged with
586d514b0f3Smrg    other surrounding mmapped segments and trimmed/de-allocated
587d514b0f3Smrg    using munmap.
588d514b0f3Smrg  * If neither bit is set, then the segment was obtained using
589d514b0f3Smrg    MORECORE so can be merged with surrounding MORECORE'd segments
590d514b0f3Smrg    and deallocated/trimmed using MORECORE with negative arguments.
591d514b0f3Smrg*/
592d514b0f3Smrg
593d514b0f3Smrgstruct malloc_segment {
594d514b0f3Smrg  char*        base;             /* base address */
595d514b0f3Smrg  size_t       size;             /* allocated size */
596d514b0f3Smrg  struct malloc_segment* next;   /* ptr to next segment */
597d514b0f3Smrg};
598d514b0f3Smrg
599d514b0f3Smrgtypedef struct malloc_segment  msegment;
600d514b0f3Smrgtypedef struct malloc_segment* msegmentptr;
601d514b0f3Smrg
602d514b0f3Smrg/* ---------------------------- malloc_state ----------------------------- */
603d514b0f3Smrg
604d514b0f3Smrg/*
605d514b0f3Smrg   A malloc_state holds all of the bookkeeping for a space.
606d514b0f3Smrg   The main fields are:
607d514b0f3Smrg
608d514b0f3Smrg  Top
609d514b0f3Smrg    The topmost chunk of the currently active segment. Its size is
610d514b0f3Smrg    cached in topsize.  The actual size of topmost space is
611d514b0f3Smrg    topsize+TOP_FOOT_SIZE, which includes space reserved for adding
612d514b0f3Smrg    fenceposts and segment records if necessary when getting more
613d514b0f3Smrg    space from the system.  The size at which to autotrim top is
614d514b0f3Smrg    cached from mparams in trim_check, except that it is disabled if
615d514b0f3Smrg    an autotrim fails.
616d514b0f3Smrg
617d514b0f3Smrg  Designated victim (dv)
618d514b0f3Smrg    This is the preferred chunk for servicing small requests that
619d514b0f3Smrg    don't have exact fits.  It is normally the chunk split off most
620d514b0f3Smrg    recently to service another small request.  Its size is cached in
621d514b0f3Smrg    dvsize. The link fields of this chunk are not maintained since it
622d514b0f3Smrg    is not kept in a bin.
623d514b0f3Smrg
624d514b0f3Smrg  SmallBins
625d514b0f3Smrg    An array of bin headers for free chunks.  These bins hold chunks
626d514b0f3Smrg    with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
627d514b0f3Smrg    chunks of all the same size, spaced 8 bytes apart.  To simplify
628d514b0f3Smrg    use in double-linked lists, each bin header acts as a malloc_chunk
629d514b0f3Smrg    pointing to the real first node, if it exists (else pointing to
630d514b0f3Smrg    itself).  This avoids special-casing for headers.  But to avoid
631d514b0f3Smrg    waste, we allocate only the fd/bk pointers of bins, and then use
632d514b0f3Smrg    repositioning tricks to treat these as the fields of a chunk.
633d514b0f3Smrg
634d514b0f3Smrg  TreeBins
635d514b0f3Smrg    Treebins are pointers to the roots of trees holding a range of
636d514b0f3Smrg    sizes. There are 2 equally spaced treebins for each power of two
637d514b0f3Smrg    from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
638d514b0f3Smrg    larger.
639d514b0f3Smrg
640d514b0f3Smrg  Bin maps
641d514b0f3Smrg    There is one bit map for small bins ("smallmap") and one for
642d514b0f3Smrg    treebins ("treemap).  Each bin sets its bit when non-empty, and
643d514b0f3Smrg    clears the bit when empty.  Bit operations are then used to avoid
644d514b0f3Smrg    bin-by-bin searching -- nearly all "search" is done without ever
645d514b0f3Smrg    looking at bins that won't be selected.  The bit maps
646d514b0f3Smrg    conservatively use 32 bits per map word, even if on 64bit system.
647d514b0f3Smrg    For a good description of some of the bit-based techniques used
648d514b0f3Smrg    here, see Henry S. Warren Jr's book "Hacker's Delight" (and
649d514b0f3Smrg    supplement at http://hackersdelight.org/). Many of these are
650d514b0f3Smrg    intended to reduce the branchiness of paths through malloc etc, as
651d514b0f3Smrg    well as to reduce the number of memory locations read or written.
652d514b0f3Smrg
653d514b0f3Smrg  Segments
654d514b0f3Smrg    A list of segments headed by an embedded malloc_segment record
655d514b0f3Smrg    representing the initial space.
656d514b0f3Smrg
657d514b0f3Smrg  Address check support
658d514b0f3Smrg    The least_addr field is the least address ever obtained from
659d514b0f3Smrg    MORECORE or MMAP. Attempted frees and reallocs of any address less
660d514b0f3Smrg    than this are trapped (unless INSECURE is defined).
661d514b0f3Smrg
662d514b0f3Smrg  Magic tag
663d514b0f3Smrg    A cross-check field that should always hold same value as mparams.magic.
664d514b0f3Smrg
665d514b0f3Smrg  Flags
666d514b0f3Smrg    Bits recording whether to use MMAP, locks, or contiguous MORECORE
667d514b0f3Smrg
668d514b0f3Smrg  Statistics
669d514b0f3Smrg    Each space keeps track of current and maximum system memory
670d514b0f3Smrg    obtained via MORECORE or MMAP.
671d514b0f3Smrg
672d514b0f3Smrg  Locking
673d514b0f3Smrg    If USE_LOCKS is defined, the "mutex" lock is acquired and released
674d514b0f3Smrg    around every public call using this mspace.
675d514b0f3Smrg*/
676d514b0f3Smrg
677d514b0f3Smrg/* Bin types, widths and sizes */
678d514b0f3Smrg#define NSMALLBINS        (32U)
679d514b0f3Smrg#define NTREEBINS         (32U)
680d514b0f3Smrg#define SMALLBIN_SHIFT    (3U)
681d514b0f3Smrg#define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
682d514b0f3Smrg#define TREEBIN_SHIFT     (8U)
683d514b0f3Smrg#define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
684d514b0f3Smrg#define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
685d514b0f3Smrg#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
686d514b0f3Smrg
687d514b0f3Smrgstruct malloc_state {
688d514b0f3Smrg  binmap_t   smallmap;
689d514b0f3Smrg  binmap_t   treemap;
690d514b0f3Smrg  size_t     dvsize;
691d514b0f3Smrg  size_t     topsize;
692d514b0f3Smrg  char*      least_addr;
693d514b0f3Smrg  mchunkptr  dv;
694d514b0f3Smrg  mchunkptr  top;
695d514b0f3Smrg  size_t     magic;
696d514b0f3Smrg  mchunkptr  smallbins[(NSMALLBINS+1)*2];
697d514b0f3Smrg  tbinptr    treebins[NTREEBINS];
698d514b0f3Smrg  size_t     footprint;
699d514b0f3Smrg  size_t     max_footprint;
700d514b0f3Smrg  flag_t     mflags;
701d514b0f3Smrg  void      *user_data;
702d514b0f3Smrg#if USE_LOCKS
703d514b0f3Smrg  MLOCK_T    mutex;     /* locate lock among fields that rarely change */
704d514b0f3Smrg#endif /* USE_LOCKS */
705d514b0f3Smrg  msegment   seg;
706d514b0f3Smrg};
707d514b0f3Smrg
708d514b0f3Smrgtypedef struct malloc_state*    mstate;
709d514b0f3Smrg
710d514b0f3Smrg/* ------------- Global malloc_state and malloc_params ------------------- */
711d514b0f3Smrg
712d514b0f3Smrg/*
713d514b0f3Smrg  malloc_params holds global properties, including those that can be
714d514b0f3Smrg  dynamically set using mallopt. There is a single instance, mparams,
715d514b0f3Smrg  initialized in init_mparams.
716d514b0f3Smrg*/
717d514b0f3Smrg
718d514b0f3Smrgstruct malloc_params {
719d514b0f3Smrg  size_t magic;
720d514b0f3Smrg  size_t page_size;
721d514b0f3Smrg  size_t granularity;
722d514b0f3Smrg  flag_t default_mflags;
723d514b0f3Smrg};
724d514b0f3Smrg
725d514b0f3Smrgstatic struct malloc_params mparams;
726d514b0f3Smrg
727d514b0f3Smrg/* The global malloc_state used for all non-"mspace" calls */
728d514b0f3Smrg//static struct malloc_state _gm_;
729d514b0f3Smrg//#define gm                 (&_gm_)
730d514b0f3Smrg//#define is_global(M)       ((M) == &_gm_)
731d514b0f3Smrg#define is_initialized(M)  ((M)->top != 0)
732d514b0f3Smrg
733d514b0f3Smrg/* -------------------------- system alloc setup ------------------------- */
734d514b0f3Smrg
735d514b0f3Smrg/* Operations on mflags */
736d514b0f3Smrg
737d514b0f3Smrg#define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
738d514b0f3Smrg#define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
739d514b0f3Smrg#define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
740d514b0f3Smrg
741d514b0f3Smrg#define set_lock(M,L)\
742d514b0f3Smrg ((M)->mflags = (L)?\
743d514b0f3Smrg  ((M)->mflags | USE_LOCK_BIT) :\
744d514b0f3Smrg  ((M)->mflags & ~USE_LOCK_BIT))
745d514b0f3Smrg
746d514b0f3Smrg/* page-align a size */
747d514b0f3Smrg#define page_align(S)\
748d514b0f3Smrg (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
749d514b0f3Smrg
750d514b0f3Smrg/* granularity-align a size */
751d514b0f3Smrg#define granularity_align(S)\
752d514b0f3Smrg  (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
753d514b0f3Smrg
754d514b0f3Smrg#define is_page_aligned(S)\
755d514b0f3Smrg   (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
756d514b0f3Smrg#define is_granularity_aligned(S)\
757d514b0f3Smrg   (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
758d514b0f3Smrg
759d514b0f3Smrg/*  True if segment S holds address A */
760d514b0f3Smrg#define segment_holds(S, A)\
761d514b0f3Smrg  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
762d514b0f3Smrg
763d514b0f3Smrg#if DEBUG
764d514b0f3Smrg/* Return segment holding given address */
765d514b0f3Smrgstatic msegmentptr segment_holding(mstate m, char* addr) {
766d514b0f3Smrg  msegmentptr sp = &m->seg;
767d514b0f3Smrg  for (;;) {
768d514b0f3Smrg    if (addr >= sp->base && addr < sp->base + sp->size)
769d514b0f3Smrg      return sp;
770d514b0f3Smrg    if ((sp = sp->next) == 0)
771d514b0f3Smrg      return 0;
772d514b0f3Smrg  }
773d514b0f3Smrg}
774d514b0f3Smrg
775d514b0f3Smrg/* Return true if segment contains a segment link */
776d514b0f3Smrgstatic int has_segment_link(mstate m, msegmentptr ss) {
777d514b0f3Smrg  msegmentptr sp = &m->seg;
778d514b0f3Smrg  for (;;) {
779d514b0f3Smrg    if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
780d514b0f3Smrg      return 1;
781d514b0f3Smrg    if ((sp = sp->next) == 0)
782d514b0f3Smrg      return 0;
783d514b0f3Smrg  }
784d514b0f3Smrg}
785d514b0f3Smrg#endif
786d514b0f3Smrg
787d514b0f3Smrg
788d514b0f3Smrg/*
789d514b0f3Smrg  TOP_FOOT_SIZE is padding at the end of a segment, including space
790d514b0f3Smrg  that may be needed to place segment records and fenceposts when new
791d514b0f3Smrg  noncontiguous segments are added.
792d514b0f3Smrg*/
793d514b0f3Smrg#define TOP_FOOT_SIZE\
794d514b0f3Smrg  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
795d514b0f3Smrg
796d514b0f3Smrg
797d514b0f3Smrg/* -------------------------------  Hooks -------------------------------- */
798d514b0f3Smrg
799d514b0f3Smrg/*
800d514b0f3Smrg  PREACTION should be defined to return 0 on success, and nonzero on
801d514b0f3Smrg  failure. If you are not using locking, you can redefine these to do
802d514b0f3Smrg  anything you like.
803d514b0f3Smrg*/
804d514b0f3Smrg
805d514b0f3Smrg#if USE_LOCKS
806d514b0f3Smrg
807d514b0f3Smrg/* Ensure locks are initialized */
808d514b0f3Smrg#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
809d514b0f3Smrg
810d514b0f3Smrg#define PREACTION(M)  ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
811d514b0f3Smrg#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
812d514b0f3Smrg#else /* USE_LOCKS */
813d514b0f3Smrg
814d514b0f3Smrg#ifndef PREACTION
815d514b0f3Smrg#define PREACTION(M) (0)
816d514b0f3Smrg#endif  /* PREACTION */
817d514b0f3Smrg
818d514b0f3Smrg#ifndef POSTACTION
819d514b0f3Smrg#define POSTACTION(M)
820d514b0f3Smrg#endif  /* POSTACTION */
821d514b0f3Smrg
822d514b0f3Smrg#endif /* USE_LOCKS */
823d514b0f3Smrg
824d514b0f3Smrg/*
825d514b0f3Smrg  CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
826d514b0f3Smrg  USAGE_ERROR_ACTION is triggered on detected bad frees and
827d514b0f3Smrg  reallocs. The argument p is an address that might have triggered the
828d514b0f3Smrg  fault. It is ignored by the two predefined actions, but might be
829d514b0f3Smrg  useful in custom actions that try to help diagnose errors.
830d514b0f3Smrg*/
831d514b0f3Smrg
832d514b0f3Smrg#if PROCEED_ON_ERROR
833d514b0f3Smrg
834d514b0f3Smrg/* A count of the number of corruption errors causing resets */
835d514b0f3Smrgint malloc_corruption_error_count;
836d514b0f3Smrg
837d514b0f3Smrg/* default corruption action */
838d514b0f3Smrgstatic void reset_on_error(mstate m);
839d514b0f3Smrg
840d514b0f3Smrg#define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
841d514b0f3Smrg#define USAGE_ERROR_ACTION(m, p)
842d514b0f3Smrg
843d514b0f3Smrg#else /* PROCEED_ON_ERROR */
844d514b0f3Smrg
845d514b0f3Smrg#ifndef CORRUPTION_ERROR_ACTION
846d514b0f3Smrg#define CORRUPTION_ERROR_ACTION(m) ABORT(m->user_data)
847d514b0f3Smrg#endif /* CORRUPTION_ERROR_ACTION */
848d514b0f3Smrg
849d514b0f3Smrg#ifndef USAGE_ERROR_ACTION
850d514b0f3Smrg#define USAGE_ERROR_ACTION(m,p) ABORT(m->user_data)
851d514b0f3Smrg#endif /* USAGE_ERROR_ACTION */
852d514b0f3Smrg
853d514b0f3Smrg#endif /* PROCEED_ON_ERROR */
854d514b0f3Smrg
855d514b0f3Smrg/* -------------------------- Debugging setup ---------------------------- */
856d514b0f3Smrg
857d514b0f3Smrg#if ! DEBUG
858d514b0f3Smrg
859d514b0f3Smrg#define check_free_chunk(M,P)
860d514b0f3Smrg#define check_inuse_chunk(M,P)
861d514b0f3Smrg#define check_malloced_chunk(M,P,N)
862d514b0f3Smrg#define check_malloc_state(M)
863d514b0f3Smrg#define check_top_chunk(M,P)
864d514b0f3Smrg
865d514b0f3Smrg#else /* DEBUG */
866d514b0f3Smrg#define check_free_chunk(M,P)       do_check_free_chunk(M,P)
867d514b0f3Smrg#define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
868d514b0f3Smrg#define check_top_chunk(M,P)        do_check_top_chunk(M,P)
869d514b0f3Smrg#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
870d514b0f3Smrg#define check_malloc_state(M)       do_check_malloc_state(M)
871d514b0f3Smrg
872d514b0f3Smrgstatic void   do_check_any_chunk(mstate m, mchunkptr p);
873d514b0f3Smrgstatic void   do_check_top_chunk(mstate m, mchunkptr p);
874d514b0f3Smrgstatic void   do_check_inuse_chunk(mstate m, mchunkptr p);
875d514b0f3Smrgstatic void   do_check_free_chunk(mstate m, mchunkptr p);
876d514b0f3Smrgstatic void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
877d514b0f3Smrgstatic void   do_check_tree(mstate m, tchunkptr t);
878d514b0f3Smrgstatic void   do_check_treebin(mstate m, bindex_t i);
879d514b0f3Smrgstatic void   do_check_smallbin(mstate m, bindex_t i);
880d514b0f3Smrgstatic void   do_check_malloc_state(mstate m);
881d514b0f3Smrgstatic int    bin_find(mstate m, mchunkptr x);
882d514b0f3Smrgstatic size_t traverse_and_check(mstate m);
883d514b0f3Smrg#endif /* DEBUG */
884d514b0f3Smrg
885d514b0f3Smrg/* ---------------------------- Indexing Bins ---------------------------- */
886d514b0f3Smrg
887d514b0f3Smrg#define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
888d514b0f3Smrg#define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
889d514b0f3Smrg#define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
890d514b0f3Smrg#define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
891d514b0f3Smrg
892d514b0f3Smrg/* addressing by index. See above about smallbin repositioning */
893d514b0f3Smrg#define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
894d514b0f3Smrg#define treebin_at(M,i)     (&((M)->treebins[i]))
895d514b0f3Smrg
896d514b0f3Smrg/* assign tree index for size S to variable I */
897d514b0f3Smrg#if defined(__GNUC__) && defined(i386)
898d514b0f3Smrg#define compute_tree_index(S, I)\
899d514b0f3Smrg{\
900d514b0f3Smrg  size_t X = S >> TREEBIN_SHIFT;\
901d514b0f3Smrg  if (X == 0)\
902d514b0f3Smrg    I = 0;\
903d514b0f3Smrg  else if (X > 0xFFFF)\
904d514b0f3Smrg    I = NTREEBINS-1;\
905d514b0f3Smrg  else {\
906d514b0f3Smrg    unsigned int K;\
907d514b0f3Smrg    __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X));\
908d514b0f3Smrg    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
909d514b0f3Smrg  }\
910d514b0f3Smrg}
911d514b0f3Smrg#else /* GNUC */
912d514b0f3Smrg#define compute_tree_index(S, I)\
913d514b0f3Smrg{\
914d514b0f3Smrg  size_t X = S >> TREEBIN_SHIFT;\
915d514b0f3Smrg  if (X == 0)\
916d514b0f3Smrg    I = 0;\
917d514b0f3Smrg  else if (X > 0xFFFF)\
918d514b0f3Smrg    I = NTREEBINS-1;\
919d514b0f3Smrg  else {\
920d514b0f3Smrg    unsigned int Y = (unsigned int)X;\
921d514b0f3Smrg    unsigned int N = ((Y - 0x100) >> 16) & 8;\
922d514b0f3Smrg    unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
923d514b0f3Smrg    N += K;\
924d514b0f3Smrg    N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
925d514b0f3Smrg    K = 14 - N + ((Y <<= K) >> 15);\
926d514b0f3Smrg    I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
927d514b0f3Smrg  }\
928d514b0f3Smrg}
929d514b0f3Smrg#endif /* GNUC */
930d514b0f3Smrg
931d514b0f3Smrg/* Bit representing maximum resolved size in a treebin at i */
932d514b0f3Smrg#define bit_for_tree_index(i) \
933d514b0f3Smrg   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
934d514b0f3Smrg
935d514b0f3Smrg/* Shift placing maximum resolved bit in a treebin at i as sign bit */
936d514b0f3Smrg#define leftshift_for_tree_index(i) \
937d514b0f3Smrg   ((i == NTREEBINS-1)? 0 : \
938d514b0f3Smrg    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
939d514b0f3Smrg
940d514b0f3Smrg/* The size of the smallest chunk held in bin with index i */
941d514b0f3Smrg#define minsize_for_tree_index(i) \
942d514b0f3Smrg   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
943d514b0f3Smrg   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
944d514b0f3Smrg
945d514b0f3Smrg/* ------------------------ Operations on bin maps ----------------------- */
946d514b0f3Smrg
947d514b0f3Smrg/* bit corresponding to given index */
948d514b0f3Smrg#define idx2bit(i)              ((binmap_t)(1) << (i))
949d514b0f3Smrg
950d514b0f3Smrg/* Mark/Clear bits with given index */
951d514b0f3Smrg#define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
952d514b0f3Smrg#define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
953d514b0f3Smrg#define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
954d514b0f3Smrg
955d514b0f3Smrg#define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
956d514b0f3Smrg#define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
957d514b0f3Smrg#define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
958d514b0f3Smrg
959d514b0f3Smrg/* index corresponding to given bit */
960d514b0f3Smrg
961d514b0f3Smrg#if defined(__GNUC__) && defined(i386)
962d514b0f3Smrg#define compute_bit2idx(X, I)\
963d514b0f3Smrg{\
964d514b0f3Smrg  unsigned int J;\
965d514b0f3Smrg  __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
966d514b0f3Smrg  I = (bindex_t)J;\
967d514b0f3Smrg}
968d514b0f3Smrg
969d514b0f3Smrg#else /* GNUC */
970d514b0f3Smrg#if  USE_BUILTIN_FFS
971d514b0f3Smrg#define compute_bit2idx(X, I) I = ffs(X)-1
972d514b0f3Smrg
973d514b0f3Smrg#else /* USE_BUILTIN_FFS */
974d514b0f3Smrg#define compute_bit2idx(X, I)\
975d514b0f3Smrg{\
976d514b0f3Smrg  unsigned int Y = X - 1;\
977d514b0f3Smrg  unsigned int K = Y >> (16-4) & 16;\
978d514b0f3Smrg  unsigned int N = K;        Y >>= K;\
979d514b0f3Smrg  N += K = Y >> (8-3) &  8;  Y >>= K;\
980d514b0f3Smrg  N += K = Y >> (4-2) &  4;  Y >>= K;\
981d514b0f3Smrg  N += K = Y >> (2-1) &  2;  Y >>= K;\
982d514b0f3Smrg  N += K = Y >> (1-0) &  1;  Y >>= K;\
983d514b0f3Smrg  I = (bindex_t)(N + Y);\
984d514b0f3Smrg}
985d514b0f3Smrg#endif /* USE_BUILTIN_FFS */
986d514b0f3Smrg#endif /* GNUC */
987d514b0f3Smrg
988d514b0f3Smrg/* isolate the least set bit of a bitmap */
989d514b0f3Smrg#define least_bit(x)         ((x) & -(x))
990d514b0f3Smrg
991d514b0f3Smrg/* mask with all bits to left of least bit of x on */
992d514b0f3Smrg#define left_bits(x)         ((x<<1) | -(x<<1))
993d514b0f3Smrg
994d514b0f3Smrg/* mask with all bits to left of or equal to least bit of x on */
995d514b0f3Smrg#define same_or_left_bits(x) ((x) | -(x))
996d514b0f3Smrg
997d514b0f3Smrg
998d514b0f3Smrg/* ----------------------- Runtime Check Support ------------------------- */
999d514b0f3Smrg
1000d514b0f3Smrg/*
1001d514b0f3Smrg  For security, the main invariant is that malloc/free/etc never
1002d514b0f3Smrg  writes to a static address other than malloc_state, unless static
1003d514b0f3Smrg  malloc_state itself has been corrupted, which cannot occur via
1004d514b0f3Smrg  malloc (because of these checks). In essence this means that we
1005d514b0f3Smrg  believe all pointers, sizes, maps etc held in malloc_state, but
1006d514b0f3Smrg  check all of those linked or offsetted from other embedded data
1007d514b0f3Smrg  structures.  These checks are interspersed with main code in a way
1008d514b0f3Smrg  that tends to minimize their run-time cost.
1009d514b0f3Smrg
1010d514b0f3Smrg  When FOOTERS is defined, in addition to range checking, we also
1011d514b0f3Smrg  verify footer fields of inuse chunks, which can be used guarantee
1012d514b0f3Smrg  that the mstate controlling malloc/free is intact.  This is a
1013d514b0f3Smrg  streamlined version of the approach described by William Robertson
1014d514b0f3Smrg  et al in "Run-time Detection of Heap-based Overflows" LISA'03
1015d514b0f3Smrg  http://www.usenix.org/events/lisa03/tech/robertson.html The footer
1016d514b0f3Smrg  of an inuse chunk holds the xor of its mstate and a random seed,
1017d514b0f3Smrg  that is checked upon calls to free() and realloc().  This is
1018d514b0f3Smrg  (probablistically) unguessable from outside the program, but can be
1019d514b0f3Smrg  computed by any code successfully malloc'ing any chunk, so does not
1020d514b0f3Smrg  itself provide protection against code that has already broken
1021d514b0f3Smrg  security through some other means.  Unlike Robertson et al, we
1022d514b0f3Smrg  always dynamically check addresses of all offset chunks (previous,
1023d514b0f3Smrg  next, etc). This turns out to be cheaper than relying on hashes.
1024d514b0f3Smrg*/
1025d514b0f3Smrg
1026d514b0f3Smrg#if !INSECURE
1027d514b0f3Smrg/* Check if address a is at least as high as any from MORECORE or MMAP */
1028d514b0f3Smrg#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
1029d514b0f3Smrg/* Check if address of next chunk n is higher than base chunk p */
1030d514b0f3Smrg#define ok_next(p, n)    ((char*)(p) < (char*)(n))
1031d514b0f3Smrg/* Check if p has its cinuse bit on */
1032d514b0f3Smrg#define ok_cinuse(p)     cinuse(p)
1033d514b0f3Smrg/* Check if p has its pinuse bit on */
1034d514b0f3Smrg#define ok_pinuse(p)     pinuse(p)
1035d514b0f3Smrg
1036d514b0f3Smrg#else /* !INSECURE */
1037d514b0f3Smrg#define ok_address(M, a) (1)
1038d514b0f3Smrg#define ok_next(b, n)    (1)
1039d514b0f3Smrg#define ok_cinuse(p)     (1)
1040d514b0f3Smrg#define ok_pinuse(p)     (1)
1041d514b0f3Smrg#endif /* !INSECURE */
1042d514b0f3Smrg
1043d514b0f3Smrg#if (FOOTERS && !INSECURE)
1044d514b0f3Smrg/* Check if (alleged) mstate m has expected magic field */
1045d514b0f3Smrg#define ok_magic(M)      ((M)->magic == mparams.magic)
1046d514b0f3Smrg#else  /* (FOOTERS && !INSECURE) */
1047d514b0f3Smrg#define ok_magic(M)      (1)
1048d514b0f3Smrg#endif /* (FOOTERS && !INSECURE) */
1049d514b0f3Smrg
1050d514b0f3Smrg
1051d514b0f3Smrg/* In gcc, use __builtin_expect to minimize impact of checks */
1052d514b0f3Smrg#if !INSECURE
1053d514b0f3Smrg#if defined(__GNUC__) && __GNUC__ >= 3
1054d514b0f3Smrg#define RTCHECK(e)  __builtin_expect(e, 1)
1055d514b0f3Smrg#else /* GNUC */
1056d514b0f3Smrg#define RTCHECK(e)  (e)
1057d514b0f3Smrg#endif /* GNUC */
1058d514b0f3Smrg#else /* !INSECURE */
1059d514b0f3Smrg#define RTCHECK(e)  (1)
1060d514b0f3Smrg#endif /* !INSECURE */
1061d514b0f3Smrg
1062d514b0f3Smrg/* macros to set up inuse chunks with or without footers */
1063d514b0f3Smrg
1064d514b0f3Smrg#if !FOOTERS
1065d514b0f3Smrg
1066d514b0f3Smrg#define mark_inuse_foot(M,p,s)
1067d514b0f3Smrg
1068d514b0f3Smrg/* Set cinuse bit and pinuse bit of next chunk */
1069d514b0f3Smrg#define set_inuse(M,p,s)\
1070d514b0f3Smrg  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1071d514b0f3Smrg  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1072d514b0f3Smrg
1073d514b0f3Smrg/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
1074d514b0f3Smrg#define set_inuse_and_pinuse(M,p,s)\
1075d514b0f3Smrg  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1076d514b0f3Smrg  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1077d514b0f3Smrg
1078d514b0f3Smrg/* Set size, cinuse and pinuse bit of this chunk */
1079d514b0f3Smrg#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1080d514b0f3Smrg  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
1081d514b0f3Smrg
1082d514b0f3Smrg#else /* FOOTERS */
1083d514b0f3Smrg
1084d514b0f3Smrg/* Set foot of inuse chunk to be xor of mstate and seed */
1085d514b0f3Smrg#define mark_inuse_foot(M,p,s)\
1086d514b0f3Smrg  (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
1087d514b0f3Smrg
1088d514b0f3Smrg#define get_mstate_for(p)\
1089d514b0f3Smrg  ((mstate)(((mchunkptr)((char*)(p) +\
1090d514b0f3Smrg    (chunksize(p))))->prev_foot ^ mparams.magic))
1091d514b0f3Smrg
1092d514b0f3Smrg#define set_inuse(M,p,s)\
1093d514b0f3Smrg  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1094d514b0f3Smrg  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
1095d514b0f3Smrg  mark_inuse_foot(M,p,s))
1096d514b0f3Smrg
1097d514b0f3Smrg#define set_inuse_and_pinuse(M,p,s)\
1098d514b0f3Smrg  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1099d514b0f3Smrg  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
1100d514b0f3Smrg mark_inuse_foot(M,p,s))
1101d514b0f3Smrg
1102d514b0f3Smrg#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1103d514b0f3Smrg  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1104d514b0f3Smrg  mark_inuse_foot(M, p, s))
1105d514b0f3Smrg
1106d514b0f3Smrg#endif /* !FOOTERS */
1107d514b0f3Smrg
1108d514b0f3Smrg/* ---------------------------- setting mparams -------------------------- */
1109d514b0f3Smrg
1110d514b0f3Smrg/* Initialize mparams */
1111d514b0f3Smrgstatic int init_mparams(void) {
1112d514b0f3Smrg  if (mparams.page_size == 0) {
1113d514b0f3Smrg    size_t s;
1114d514b0f3Smrg
1115d514b0f3Smrg    mparams.default_mflags = USE_LOCK_BIT;
1116d514b0f3Smrg
1117d514b0f3Smrg#if (FOOTERS && !INSECURE)
1118d514b0f3Smrg    {
1119d514b0f3Smrg#if USE_DEV_RANDOM
1120d514b0f3Smrg      int fd;
1121d514b0f3Smrg      unsigned char buf[sizeof(size_t)];
1122d514b0f3Smrg      /* Try to use /dev/urandom, else fall back on using time */
1123d514b0f3Smrg      if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
1124d514b0f3Smrg          read(fd, buf, sizeof(buf)) == sizeof(buf)) {
1125d514b0f3Smrg        s = *((size_t *) buf);
1126d514b0f3Smrg        close(fd);
1127d514b0f3Smrg      }
1128d514b0f3Smrg      else
1129d514b0f3Smrg#endif /* USE_DEV_RANDOM */
1130d514b0f3Smrg        s = (size_t)(time(0) ^ (size_t)0x55555555U);
1131d514b0f3Smrg
1132d514b0f3Smrg      s |= (size_t)8U;    /* ensure nonzero */
1133d514b0f3Smrg      s &= ~(size_t)7U;   /* improve chances of fault for bad values */
1134d514b0f3Smrg
1135d514b0f3Smrg    }
1136d514b0f3Smrg#else /* (FOOTERS && !INSECURE) */
1137d514b0f3Smrg    s = (size_t)0x58585858U;
1138d514b0f3Smrg#endif /* (FOOTERS && !INSECURE) */
1139d514b0f3Smrg    ACQUIRE_MAGIC_INIT_LOCK();
1140d514b0f3Smrg    if (mparams.magic == 0) {
1141d514b0f3Smrg      mparams.magic = s;
1142d514b0f3Smrg      /* Set up lock for main malloc area */
1143d514b0f3Smrg      //INITIAL_LOCK(&gm->mutex);
1144d514b0f3Smrg      //gm->mflags = mparams.default_mflags;
1145d514b0f3Smrg    }
1146d514b0f3Smrg    RELEASE_MAGIC_INIT_LOCK();
1147d514b0f3Smrg
1148d514b0f3Smrg
1149d514b0f3Smrg    mparams.page_size = malloc_getpagesize;
1150d514b0f3Smrg    mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
1151d514b0f3Smrg                           DEFAULT_GRANULARITY : mparams.page_size);
1152d514b0f3Smrg
1153d514b0f3Smrg    /* Sanity-check configuration:
1154d514b0f3Smrg       size_t must be unsigned and as wide as pointer type.
1155d514b0f3Smrg       ints must be at least 4 bytes.
1156d514b0f3Smrg       alignment must be at least 8.
1157d514b0f3Smrg       Alignment, min chunk size, and page size must all be powers of 2.
1158d514b0f3Smrg    */
1159d514b0f3Smrg    if ((sizeof(size_t) != sizeof(char*)) ||
1160d514b0f3Smrg        (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
1161d514b0f3Smrg        (sizeof(int) < 4)  ||
1162d514b0f3Smrg        (MALLOC_ALIGNMENT < (size_t)8U) ||
1163d514b0f3Smrg        ((MALLOC_ALIGNMENT    & (MALLOC_ALIGNMENT-SIZE_T_ONE))    != 0) ||
1164d514b0f3Smrg        ((MCHUNK_SIZE         & (MCHUNK_SIZE-SIZE_T_ONE))         != 0) ||
1165d514b0f3Smrg        ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
1166d514b0f3Smrg        ((mparams.page_size   & (mparams.page_size-SIZE_T_ONE))   != 0))
1167d514b0f3Smrg      ABORT(NULL);
1168d514b0f3Smrg  }
1169d514b0f3Smrg  return 0;
1170d514b0f3Smrg}
1171d514b0f3Smrg
1172d514b0f3Smrg/* support for mallopt */
1173d514b0f3Smrgstatic int change_mparam(int param_number, int value) {
1174d514b0f3Smrg  size_t val = (size_t)value;
1175d514b0f3Smrg  init_mparams();
1176d514b0f3Smrg  switch(param_number) {
1177d514b0f3Smrg  case M_GRANULARITY:
1178d514b0f3Smrg    if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
1179d514b0f3Smrg      mparams.granularity = val;
1180d514b0f3Smrg      return 1;
1181d514b0f3Smrg    }
1182d514b0f3Smrg    else
1183d514b0f3Smrg      return 0;
1184d514b0f3Smrg  default:
1185d514b0f3Smrg    return 0;
1186d514b0f3Smrg  }
1187d514b0f3Smrg}
1188d514b0f3Smrg
1189d514b0f3Smrg#if DEBUG
1190d514b0f3Smrg/* ------------------------- Debugging Support --------------------------- */
1191d514b0f3Smrg
1192d514b0f3Smrg/* Check properties of any chunk, whether free, inuse, mmapped etc  */
1193d514b0f3Smrgstatic void do_check_any_chunk(mstate m, mchunkptr p) {
1194d514b0f3Smrg  assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1195d514b0f3Smrg  assert(m->user_data, ok_address(m, p));
1196d514b0f3Smrg}
1197d514b0f3Smrg
1198d514b0f3Smrg/* Check properties of top chunk */
1199d514b0f3Smrgstatic void do_check_top_chunk(mstate m, mchunkptr p) {
1200d514b0f3Smrg  msegmentptr sp = segment_holding(m, (char*)p);
1201d514b0f3Smrg  size_t  sz = chunksize(p);
1202d514b0f3Smrg  assert(m->user_data, sp != 0);
1203d514b0f3Smrg  assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1204d514b0f3Smrg  assert(m->user_data, ok_address(m, p));
1205d514b0f3Smrg  assert(m->user_data, sz == m->topsize);
1206d514b0f3Smrg  assert(m->user_data, sz > 0);
1207d514b0f3Smrg  assert(m->user_data, sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
1208d514b0f3Smrg  assert(m->user_data, pinuse(p));
1209d514b0f3Smrg  assert(m->user_data, !next_pinuse(p));
1210d514b0f3Smrg}
1211d514b0f3Smrg
1212d514b0f3Smrg/* Check properties of inuse chunks */
1213d514b0f3Smrgstatic void do_check_inuse_chunk(mstate m, mchunkptr p) {
1214d514b0f3Smrg  do_check_any_chunk(m, p);
1215d514b0f3Smrg  assert(m->user_data, cinuse(p));
1216d514b0f3Smrg  assert(m->user_data, next_pinuse(p));
1217d514b0f3Smrg  /* If not pinuse, previous chunk has OK offset */
1218d514b0f3Smrg  assert(m->user_data, pinuse(p) || next_chunk(prev_chunk(p)) == p);
1219d514b0f3Smrg}
1220d514b0f3Smrg
1221d514b0f3Smrg/* Check properties of free chunks */
1222d514b0f3Smrgstatic void do_check_free_chunk(mstate m, mchunkptr p) {
1223d514b0f3Smrg  size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
1224d514b0f3Smrg  mchunkptr next = chunk_plus_offset(p, sz);
1225d514b0f3Smrg  do_check_any_chunk(m, p);
1226d514b0f3Smrg  assert(m->user_data, !cinuse(p));
1227d514b0f3Smrg  assert(m->user_data, !next_pinuse(p));
1228d514b0f3Smrg  if (p != m->dv && p != m->top) {
1229d514b0f3Smrg    if (sz >= MIN_CHUNK_SIZE) {
1230d514b0f3Smrg      assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0);
1231d514b0f3Smrg      assert(m->user_data, is_aligned(chunk2mem(p)));
1232d514b0f3Smrg      assert(m->user_data, next->prev_foot == sz);
1233d514b0f3Smrg      assert(m->user_data, pinuse(p));
1234d514b0f3Smrg      assert(m->user_data, next == m->top || cinuse(next));
1235d514b0f3Smrg      assert(m->user_data, p->fd->bk == p);
1236d514b0f3Smrg      assert(m->user_data, p->bk->fd == p);
1237d514b0f3Smrg    }
1238d514b0f3Smrg    else  /* markers are always of size SIZE_T_SIZE */
1239d514b0f3Smrg      assert(m->user_data, sz == SIZE_T_SIZE);
1240d514b0f3Smrg  }
1241d514b0f3Smrg}
1242d514b0f3Smrg
1243d514b0f3Smrg/* Check properties of malloced chunks at the point they are malloced */
1244d514b0f3Smrgstatic void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
1245d514b0f3Smrg  if (mem != 0) {
1246d514b0f3Smrg    mchunkptr p = mem2chunk(mem);
1247d514b0f3Smrg    size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
1248d514b0f3Smrg    do_check_inuse_chunk(m, p);
1249d514b0f3Smrg    assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0);
1250d514b0f3Smrg    assert(m->user_data, sz >= MIN_CHUNK_SIZE);
1251d514b0f3Smrg    assert(m->user_data, sz >= s);
1252d514b0f3Smrg    /* size is less than MIN_CHUNK_SIZE more than request */
1253d514b0f3Smrg    assert(m->user_data, sz < (s + MIN_CHUNK_SIZE));
1254d514b0f3Smrg  }
1255d514b0f3Smrg}
1256d514b0f3Smrg
1257d514b0f3Smrg/* Check a tree and its subtrees.  */
1258d514b0f3Smrgstatic void do_check_tree(mstate m, tchunkptr t) {
1259d514b0f3Smrg  tchunkptr head = 0;
1260d514b0f3Smrg  tchunkptr u = t;
1261d514b0f3Smrg  bindex_t tindex = t->index;
1262d514b0f3Smrg  size_t tsize = chunksize(t);
1263d514b0f3Smrg  bindex_t idx;
1264d514b0f3Smrg  compute_tree_index(tsize, idx);
1265d514b0f3Smrg  assert(m->user_data, tindex == idx);
1266d514b0f3Smrg  assert(m->user_data, tsize >= MIN_LARGE_SIZE);
1267d514b0f3Smrg  assert(m->user_data, tsize >= minsize_for_tree_index(idx));
1268d514b0f3Smrg  assert(m->user_data, (idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
1269d514b0f3Smrg
1270d514b0f3Smrg  do { /* traverse through chain of same-sized nodes */
1271d514b0f3Smrg    do_check_any_chunk(m, ((mchunkptr)u));
1272d514b0f3Smrg    assert(m->user_data, u->index == tindex);
1273d514b0f3Smrg    assert(m->user_data, chunksize(u) == tsize);
1274d514b0f3Smrg    assert(m->user_data, !cinuse(u));
1275d514b0f3Smrg    assert(m->user_data, !next_pinuse(u));
1276d514b0f3Smrg    assert(m->user_data, u->fd->bk == u);
1277d514b0f3Smrg    assert(m->user_data, u->bk->fd == u);
1278d514b0f3Smrg    if (u->parent == 0) {
1279d514b0f3Smrg      assert(m->user_data, u->child[0] == 0);
1280d514b0f3Smrg      assert(m->user_data, u->child[1] == 0);
1281d514b0f3Smrg    }
1282d514b0f3Smrg    else {
1283d514b0f3Smrg      assert(m->user_data, head == 0); /* only one node on chain has parent */
1284d514b0f3Smrg      head = u;
1285d514b0f3Smrg      assert(m->user_data, u->parent != u);
1286d514b0f3Smrg      assert(m->user_data, u->parent->child[0] == u ||
1287d514b0f3Smrg             u->parent->child[1] == u ||
1288d514b0f3Smrg             *((tbinptr*)(u->parent)) == u);
1289d514b0f3Smrg      if (u->child[0] != 0) {
1290d514b0f3Smrg        assert(m->user_data, u->child[0]->parent == u);
1291d514b0f3Smrg        assert(m->user_data, u->child[0] != u);
1292d514b0f3Smrg        do_check_tree(m, u->child[0]);
1293d514b0f3Smrg      }
1294d514b0f3Smrg      if (u->child[1] != 0) {
1295d514b0f3Smrg        assert(m->user_data, u->child[1]->parent == u);
1296d514b0f3Smrg        assert(m->user_data, u->child[1] != u);
1297d514b0f3Smrg        do_check_tree(m, u->child[1]);
1298d514b0f3Smrg      }
1299d514b0f3Smrg      if (u->child[0] != 0 && u->child[1] != 0) {
1300d514b0f3Smrg        assert(m->user_data, chunksize(u->child[0]) < chunksize(u->child[1]));
1301d514b0f3Smrg      }
1302d514b0f3Smrg    }
1303d514b0f3Smrg    u = u->fd;
1304d514b0f3Smrg  } while (u != t);
1305d514b0f3Smrg  assert(m->user_data, head != 0);
1306d514b0f3Smrg}
1307d514b0f3Smrg
1308d514b0f3Smrg/*  Check all the chunks in a treebin.  */
1309d514b0f3Smrgstatic void do_check_treebin(mstate m, bindex_t i) {
1310d514b0f3Smrg  tbinptr* tb = treebin_at(m, i);
1311d514b0f3Smrg  tchunkptr t = *tb;
1312d514b0f3Smrg  int empty = (m->treemap & (1U << i)) == 0;
1313d514b0f3Smrg  if (t == 0)
1314d514b0f3Smrg    assert(m->user_data, empty);
1315d514b0f3Smrg  if (!empty)
1316d514b0f3Smrg    do_check_tree(m, t);
1317d514b0f3Smrg}
1318d514b0f3Smrg
1319d514b0f3Smrg/*  Check all the chunks in a smallbin.  */
1320d514b0f3Smrgstatic void do_check_smallbin(mstate m, bindex_t i) {
1321d514b0f3Smrg  sbinptr b = smallbin_at(m, i);
1322d514b0f3Smrg  mchunkptr p = b->bk;
1323d514b0f3Smrg  unsigned int empty = (m->smallmap & (1U << i)) == 0;
1324d514b0f3Smrg  if (p == b)
1325d514b0f3Smrg    assert(m->user_data, empty);
1326d514b0f3Smrg  if (!empty) {
1327d514b0f3Smrg    for (; p != b; p = p->bk) {
1328d514b0f3Smrg      size_t size = chunksize(p);
1329d514b0f3Smrg      mchunkptr q;
1330d514b0f3Smrg      /* each chunk claims to be free */
1331d514b0f3Smrg      do_check_free_chunk(m, p);
1332d514b0f3Smrg      /* chunk belongs in bin */
1333d514b0f3Smrg      assert(m->user_data, small_index(size) == i);
1334d514b0f3Smrg      assert(m->user_data, p->bk == b || chunksize(p->bk) == chunksize(p));
1335d514b0f3Smrg      /* chunk is followed by an inuse chunk */
1336d514b0f3Smrg      q = next_chunk(p);
1337d514b0f3Smrg      if (q->head != FENCEPOST_HEAD)
1338d514b0f3Smrg        do_check_inuse_chunk(m, q);
1339d514b0f3Smrg    }
1340d514b0f3Smrg  }
1341d514b0f3Smrg}
1342d514b0f3Smrg
1343d514b0f3Smrg/* Find x in a bin. Used in other check functions. */
1344d514b0f3Smrgstatic int bin_find(mstate m, mchunkptr x) {
1345d514b0f3Smrg  size_t size = chunksize(x);
1346d514b0f3Smrg  if (is_small(size)) {
1347d514b0f3Smrg    bindex_t sidx = small_index(size);
1348d514b0f3Smrg    sbinptr b = smallbin_at(m, sidx);
1349d514b0f3Smrg    if (smallmap_is_marked(m, sidx)) {
1350d514b0f3Smrg      mchunkptr p = b;
1351d514b0f3Smrg      do {
1352d514b0f3Smrg        if (p == x)
1353d514b0f3Smrg          return 1;
1354d514b0f3Smrg      } while ((p = p->fd) != b);
1355d514b0f3Smrg    }
1356d514b0f3Smrg  }
1357d514b0f3Smrg  else {
1358d514b0f3Smrg    bindex_t tidx;
1359d514b0f3Smrg    compute_tree_index(size, tidx);
1360d514b0f3Smrg    if (treemap_is_marked(m, tidx)) {
1361d514b0f3Smrg      tchunkptr t = *treebin_at(m, tidx);
1362d514b0f3Smrg      size_t sizebits = size << leftshift_for_tree_index(tidx);
1363d514b0f3Smrg      while (t != 0 && chunksize(t) != size) {
1364d514b0f3Smrg        t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
1365d514b0f3Smrg        sizebits <<= 1;
1366d514b0f3Smrg      }
1367d514b0f3Smrg      if (t != 0) {
1368d514b0f3Smrg        tchunkptr u = t;
1369d514b0f3Smrg        do {
1370d514b0f3Smrg          if (u == (tchunkptr)x)
1371d514b0f3Smrg            return 1;
1372d514b0f3Smrg        } while ((u = u->fd) != t);
1373d514b0f3Smrg      }
1374d514b0f3Smrg    }
1375d514b0f3Smrg  }
1376d514b0f3Smrg  return 0;
1377d514b0f3Smrg}
1378d514b0f3Smrg
1379d514b0f3Smrg/* Traverse each chunk and check it; return total */
1380d514b0f3Smrgstatic size_t traverse_and_check(mstate m) {
1381d514b0f3Smrg  size_t sum = 0;
1382d514b0f3Smrg  if (is_initialized(m)) {
1383d514b0f3Smrg    msegmentptr s = &m->seg;
1384d514b0f3Smrg    sum += m->topsize + TOP_FOOT_SIZE;
1385d514b0f3Smrg    while (s != 0) {
1386d514b0f3Smrg      mchunkptr q = align_as_chunk(s->base);
1387d514b0f3Smrg      mchunkptr lastq = 0;
1388d514b0f3Smrg      assert(m->user_data, pinuse(q));
1389d514b0f3Smrg      while (segment_holds(s, q) &&
1390d514b0f3Smrg             q != m->top && q->head != FENCEPOST_HEAD) {
1391d514b0f3Smrg        sum += chunksize(q);
1392d514b0f3Smrg        if (cinuse(q)) {
1393d514b0f3Smrg          assert(m->user_data, !bin_find(m, q));
1394d514b0f3Smrg          do_check_inuse_chunk(m, q);
1395d514b0f3Smrg        }
1396d514b0f3Smrg        else {
1397d514b0f3Smrg          assert(m->user_data, q == m->dv || bin_find(m, q));
1398d514b0f3Smrg          assert(m->user_data, lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
1399d514b0f3Smrg          do_check_free_chunk(m, q);
1400d514b0f3Smrg        }
1401d514b0f3Smrg        lastq = q;
1402d514b0f3Smrg        q = next_chunk(q);
1403d514b0f3Smrg      }
1404d514b0f3Smrg      s = s->next;
1405d514b0f3Smrg    }
1406d514b0f3Smrg  }
1407d514b0f3Smrg  return sum;
1408d514b0f3Smrg}
1409d514b0f3Smrg
1410d514b0f3Smrg/* Check all properties of malloc_state. */
1411d514b0f3Smrgstatic void do_check_malloc_state(mstate m) {
1412d514b0f3Smrg  bindex_t i;
1413d514b0f3Smrg  size_t total;
1414d514b0f3Smrg  /* check bins */
1415d514b0f3Smrg  for (i = 0; i < NSMALLBINS; ++i)
1416d514b0f3Smrg    do_check_smallbin(m, i);
1417d514b0f3Smrg  for (i = 0; i < NTREEBINS; ++i)
1418d514b0f3Smrg    do_check_treebin(m, i);
1419d514b0f3Smrg
1420d514b0f3Smrg  if (m->dvsize != 0) { /* check dv chunk */
1421d514b0f3Smrg    do_check_any_chunk(m, m->dv);
1422d514b0f3Smrg    assert(m->user_data, m->dvsize == chunksize(m->dv));
1423d514b0f3Smrg    assert(m->user_data, m->dvsize >= MIN_CHUNK_SIZE);
1424d514b0f3Smrg    assert(m->user_data, bin_find(m, m->dv) == 0);
1425d514b0f3Smrg  }
1426d514b0f3Smrg
1427d514b0f3Smrg  if (m->top != 0) {   /* check top chunk */
1428d514b0f3Smrg    do_check_top_chunk(m, m->top);
1429d514b0f3Smrg    assert(m->user_data, m->topsize == chunksize(m->top));
1430d514b0f3Smrg    assert(m->user_data, m->topsize > 0);
1431d514b0f3Smrg    assert(m->user_data, bin_find(m, m->top) == 0);
1432d514b0f3Smrg  }
1433d514b0f3Smrg
1434d514b0f3Smrg  total = traverse_and_check(m);
1435d514b0f3Smrg  assert(m->user_data, total <= m->footprint);
1436d514b0f3Smrg  assert(m->user_data, m->footprint <= m->max_footprint);
1437d514b0f3Smrg}
1438d514b0f3Smrg#endif /* DEBUG */
1439d514b0f3Smrg
1440d514b0f3Smrg/* ----------------------------- statistics ------------------------------ */
1441d514b0f3Smrg
1442d514b0f3Smrg#if !NO_MALLINFO
1443d514b0f3Smrgstatic struct mallinfo internal_mallinfo(mstate m) {
1444d514b0f3Smrg  struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1445d514b0f3Smrg  if (!PREACTION(m)) {
1446d514b0f3Smrg    check_malloc_state(m);
1447d514b0f3Smrg    if (is_initialized(m)) {
1448d514b0f3Smrg      size_t nfree = SIZE_T_ONE; /* top always free */
1449d514b0f3Smrg      size_t mfree = m->topsize + TOP_FOOT_SIZE;
1450d514b0f3Smrg      size_t sum = mfree;
1451d514b0f3Smrg      msegmentptr s = &m->seg;
1452d514b0f3Smrg      while (s != 0) {
1453d514b0f3Smrg        mchunkptr q = align_as_chunk(s->base);
1454d514b0f3Smrg        while (segment_holds(s, q) &&
1455d514b0f3Smrg               q != m->top && q->head != FENCEPOST_HEAD) {
1456d514b0f3Smrg          size_t sz = chunksize(q);
1457d514b0f3Smrg          sum += sz;
1458d514b0f3Smrg          if (!cinuse(q)) {
1459d514b0f3Smrg            mfree += sz;
1460d514b0f3Smrg            ++nfree;
1461d514b0f3Smrg          }
1462d514b0f3Smrg          q = next_chunk(q);
1463d514b0f3Smrg        }
1464d514b0f3Smrg        s = s->next;
1465d514b0f3Smrg      }
1466d514b0f3Smrg
1467d514b0f3Smrg      nm.arena    = sum;
1468d514b0f3Smrg      nm.ordblks  = nfree;
1469d514b0f3Smrg      nm.hblkhd   = m->footprint - sum;
1470d514b0f3Smrg      nm.usmblks  = m->max_footprint;
1471d514b0f3Smrg      nm.uordblks = m->footprint - mfree;
1472d514b0f3Smrg      nm.fordblks = mfree;
1473d514b0f3Smrg      nm.keepcost = m->topsize;
1474d514b0f3Smrg    }
1475d514b0f3Smrg
1476d514b0f3Smrg    POSTACTION(m);
1477d514b0f3Smrg  }
1478d514b0f3Smrg  return nm;
1479d514b0f3Smrg}
1480d514b0f3Smrg#endif /* !NO_MALLINFO */
1481d514b0f3Smrg
1482d514b0f3Smrgstatic void internal_malloc_stats(mstate m, size_t *ret_maxfp, size_t *ret_fp,
1483d514b0f3Smrg                                  size_t *ret_used) {
1484d514b0f3Smrg  if (!PREACTION(m)) {
1485d514b0f3Smrg    size_t maxfp = 0;
1486d514b0f3Smrg    size_t fp = 0;
1487d514b0f3Smrg    size_t used = 0;
1488d514b0f3Smrg    check_malloc_state(m);
1489d514b0f3Smrg    if (is_initialized(m)) {
1490d514b0f3Smrg      msegmentptr s = &m->seg;
1491d514b0f3Smrg      maxfp = m->max_footprint;
1492d514b0f3Smrg      fp = m->footprint;
1493d514b0f3Smrg      used = fp - (m->topsize + TOP_FOOT_SIZE);
1494d514b0f3Smrg
1495d514b0f3Smrg      while (s != 0) {
1496d514b0f3Smrg        mchunkptr q = align_as_chunk(s->base);
1497d514b0f3Smrg        while (segment_holds(s, q) &&
1498d514b0f3Smrg               q != m->top && q->head != FENCEPOST_HEAD) {
1499d514b0f3Smrg          if (!cinuse(q))
1500d514b0f3Smrg            used -= chunksize(q);
1501d514b0f3Smrg          q = next_chunk(q);
1502d514b0f3Smrg        }
1503d514b0f3Smrg        s = s->next;
1504d514b0f3Smrg      }
1505d514b0f3Smrg    }
1506d514b0f3Smrg
1507d514b0f3Smrg    if (ret_maxfp || ret_fp || ret_used) {
1508d514b0f3Smrg        if (ret_maxfp) {
1509d514b0f3Smrg            *ret_maxfp = maxfp;
1510d514b0f3Smrg        }
1511d514b0f3Smrg        if (ret_fp) {
1512d514b0f3Smrg            *ret_fp = fp;
1513d514b0f3Smrg        }
1514d514b0f3Smrg        if (ret_used) {
1515d514b0f3Smrg            *ret_used = used;
1516d514b0f3Smrg        }
1517d514b0f3Smrg    } else {
1518d514b0f3Smrg        PRINT((m->user_data, "max system bytes = %10lu\n", (unsigned long)(maxfp)));
1519d514b0f3Smrg        PRINT((m->user_data, "system bytes     = %10lu\n", (unsigned long)(fp)));
1520d514b0f3Smrg        PRINT((m->user_data, "in use bytes     = %10lu\n", (unsigned long)(used)));
1521d514b0f3Smrg    }
1522d514b0f3Smrg
1523d514b0f3Smrg    POSTACTION(m);
1524d514b0f3Smrg  }
1525d514b0f3Smrg}
1526d514b0f3Smrg
1527d514b0f3Smrg/* ----------------------- Operations on smallbins ----------------------- */
1528d514b0f3Smrg
1529d514b0f3Smrg/*
1530d514b0f3Smrg  Various forms of linking and unlinking are defined as macros.  Even
1531d514b0f3Smrg  the ones for trees, which are very long but have very short typical
1532d514b0f3Smrg  paths.  This is ugly but reduces reliance on inlining support of
1533d514b0f3Smrg  compilers.
1534d514b0f3Smrg*/
1535d514b0f3Smrg
1536d514b0f3Smrg/* Link a free chunk into a smallbin  */
1537d514b0f3Smrg#define insert_small_chunk(M, P, S) {\
1538d514b0f3Smrg  bindex_t I  = small_index(S);\
1539d514b0f3Smrg  mchunkptr B = smallbin_at(M, I);\
1540d514b0f3Smrg  mchunkptr F = B;\
1541d514b0f3Smrg  assert((M)->user_data, S >= MIN_CHUNK_SIZE);\
1542d514b0f3Smrg  if (!smallmap_is_marked(M, I))\
1543d514b0f3Smrg    mark_smallmap(M, I);\
1544d514b0f3Smrg  else if (RTCHECK(ok_address(M, B->fd)))\
1545d514b0f3Smrg    F = B->fd;\
1546d514b0f3Smrg  else {\
1547d514b0f3Smrg    CORRUPTION_ERROR_ACTION(M);\
1548d514b0f3Smrg  }\
1549d514b0f3Smrg  B->fd = P;\
1550d514b0f3Smrg  F->bk = P;\
1551d514b0f3Smrg  P->fd = F;\
1552d514b0f3Smrg  P->bk = B;\
1553d514b0f3Smrg}
1554d514b0f3Smrg
1555d514b0f3Smrg/* Unlink a chunk from a smallbin  */
1556d514b0f3Smrg#define unlink_small_chunk(M, P, S) {\
1557d514b0f3Smrg  mchunkptr F = P->fd;\
1558d514b0f3Smrg  mchunkptr B = P->bk;\
1559d514b0f3Smrg  bindex_t I = small_index(S);\
1560d514b0f3Smrg  assert((M)->user_data, P != B);\
1561d514b0f3Smrg  assert((M)->user_data, P != F);\
1562d514b0f3Smrg  assert((M)->user_data, chunksize(P) == small_index2size(I));\
1563d514b0f3Smrg  if (F == B)\
1564d514b0f3Smrg    clear_smallmap(M, I);\
1565d514b0f3Smrg  else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
1566d514b0f3Smrg                   (B == smallbin_at(M,I) || ok_address(M, B)))) {\
1567d514b0f3Smrg    F->bk = B;\
1568d514b0f3Smrg    B->fd = F;\
1569d514b0f3Smrg  }\
1570d514b0f3Smrg  else {\
1571d514b0f3Smrg    CORRUPTION_ERROR_ACTION(M);\
1572d514b0f3Smrg  }\
1573d514b0f3Smrg}
1574d514b0f3Smrg
1575d514b0f3Smrg/* Unlink the first chunk from a smallbin */
1576d514b0f3Smrg#define unlink_first_small_chunk(M, B, P, I) {\
1577d514b0f3Smrg  mchunkptr F = P->fd;\
1578d514b0f3Smrg  assert((M)->user_data, P != B);\
1579d514b0f3Smrg  assert((M)->user_data, P != F);\
1580d514b0f3Smrg  assert((M)->user_data, chunksize(P) == small_index2size(I));\
1581d514b0f3Smrg  if (B == F)\
1582d514b0f3Smrg    clear_smallmap(M, I);\
1583d514b0f3Smrg  else if (RTCHECK(ok_address(M, F))) {\
1584d514b0f3Smrg    B->fd = F;\
1585d514b0f3Smrg    F->bk = B;\
1586d514b0f3Smrg  }\
1587d514b0f3Smrg  else {\
1588d514b0f3Smrg    CORRUPTION_ERROR_ACTION(M);\
1589d514b0f3Smrg  }\
1590d514b0f3Smrg}
1591d514b0f3Smrg
1592d514b0f3Smrg/* Replace dv node, binning the old one */
1593d514b0f3Smrg/* Used only when dvsize known to be small */
1594d514b0f3Smrg#define replace_dv(M, P, S) {\
1595d514b0f3Smrg  size_t DVS = M->dvsize;\
1596d514b0f3Smrg  if (DVS != 0) {\
1597d514b0f3Smrg    mchunkptr DV = M->dv;\
1598d514b0f3Smrg    assert((M)->user_data, is_small(DVS));\
1599d514b0f3Smrg    insert_small_chunk(M, DV, DVS);\
1600d514b0f3Smrg  }\
1601d514b0f3Smrg  M->dvsize = S;\
1602d514b0f3Smrg  M->dv = P;\
1603d514b0f3Smrg}
1604d514b0f3Smrg
1605d514b0f3Smrg
1606d514b0f3Smrg/* ------------------------- Operations on trees ------------------------- */
1607d514b0f3Smrg
1608d514b0f3Smrg/* Insert chunk into tree */
1609d514b0f3Smrg#define insert_large_chunk(M, X, S) {\
1610d514b0f3Smrg  tbinptr* H;\
1611d514b0f3Smrg  bindex_t I;\
1612d514b0f3Smrg  compute_tree_index(S, I);\
1613d514b0f3Smrg  H = treebin_at(M, I);\
1614d514b0f3Smrg  X->index = I;\
1615d514b0f3Smrg  X->child[0] = X->child[1] = 0;\
1616d514b0f3Smrg  if (!treemap_is_marked(M, I)) {\
1617d514b0f3Smrg    mark_treemap(M, I);\
1618d514b0f3Smrg    *H = X;\
1619d514b0f3Smrg    X->parent = (tchunkptr)H;\
1620d514b0f3Smrg    X->fd = X->bk = X;\
1621d514b0f3Smrg  }\
1622d514b0f3Smrg  else {\
1623d514b0f3Smrg    tchunkptr T = *H;\
1624d514b0f3Smrg    size_t K = S << leftshift_for_tree_index(I);\
1625d514b0f3Smrg    for (;;) {\
1626d514b0f3Smrg      if (chunksize(T) != S) {\
1627d514b0f3Smrg        tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
1628d514b0f3Smrg        K <<= 1;\
1629d514b0f3Smrg        if (*C != 0)\
1630d514b0f3Smrg          T = *C;\
1631d514b0f3Smrg        else if (RTCHECK(ok_address(M, C))) {\
1632d514b0f3Smrg          *C = X;\
1633d514b0f3Smrg          X->parent = T;\
1634d514b0f3Smrg          X->fd = X->bk = X;\
1635d514b0f3Smrg          break;\
1636d514b0f3Smrg        }\
1637d514b0f3Smrg        else {\
1638d514b0f3Smrg          CORRUPTION_ERROR_ACTION(M);\
1639d514b0f3Smrg          break;\
1640d514b0f3Smrg        }\
1641d514b0f3Smrg      }\
1642d514b0f3Smrg      else {\
1643d514b0f3Smrg        tchunkptr F = T->fd;\
1644d514b0f3Smrg        if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
1645d514b0f3Smrg          T->fd = F->bk = X;\
1646d514b0f3Smrg          X->fd = F;\
1647d514b0f3Smrg          X->bk = T;\
1648d514b0f3Smrg          X->parent = 0;\
1649d514b0f3Smrg          break;\
1650d514b0f3Smrg        }\
1651d514b0f3Smrg        else {\
1652d514b0f3Smrg          CORRUPTION_ERROR_ACTION(M);\
1653d514b0f3Smrg          break;\
1654d514b0f3Smrg        }\
1655d514b0f3Smrg      }\
1656d514b0f3Smrg    }\
1657d514b0f3Smrg  }\
1658d514b0f3Smrg}
1659d514b0f3Smrg
1660d514b0f3Smrg/*
1661d514b0f3Smrg  Unlink steps:
1662d514b0f3Smrg
1663d514b0f3Smrg  1. If x is a chained node, unlink it from its same-sized fd/bk links
1664d514b0f3Smrg     and choose its bk node as its replacement.
1665d514b0f3Smrg  2. If x was the last node of its size, but not a leaf node, it must
1666d514b0f3Smrg     be replaced with a leaf node (not merely one with an open left or
1667d514b0f3Smrg     right), to make sure that lefts and rights of descendents
1668d514b0f3Smrg     correspond properly to bit masks.  We use the rightmost descendent
1669d514b0f3Smrg     of x.  We could use any other leaf, but this is easy to locate and
1670d514b0f3Smrg     tends to counteract removal of leftmosts elsewhere, and so keeps
1671d514b0f3Smrg     paths shorter than minimally guaranteed.  This doesn't loop much
1672d514b0f3Smrg     because on average a node in a tree is near the bottom.
1673d514b0f3Smrg  3. If x is the base of a chain (i.e., has parent links) relink
1674d514b0f3Smrg     x's parent and children to x's replacement (or null if none).
1675d514b0f3Smrg*/
1676d514b0f3Smrg
1677d514b0f3Smrg#define unlink_large_chunk(M, X) {\
1678d514b0f3Smrg  tchunkptr XP = X->parent;\
1679d514b0f3Smrg  tchunkptr R;\
1680d514b0f3Smrg  if (X->bk != X) {\
1681d514b0f3Smrg    tchunkptr F = X->fd;\
1682d514b0f3Smrg    R = X->bk;\
1683d514b0f3Smrg    if (RTCHECK(ok_address(M, F))) {\
1684d514b0f3Smrg      F->bk = R;\
1685d514b0f3Smrg      R->fd = F;\
1686d514b0f3Smrg    }\
1687d514b0f3Smrg    else {\
1688d514b0f3Smrg      CORRUPTION_ERROR_ACTION(M);\
1689d514b0f3Smrg    }\
1690d514b0f3Smrg  }\
1691d514b0f3Smrg  else {\
1692d514b0f3Smrg    tchunkptr* RP;\
1693d514b0f3Smrg    if (((R = *(RP = &(X->child[1]))) != 0) ||\
1694d514b0f3Smrg        ((R = *(RP = &(X->child[0]))) != 0)) {\
1695d514b0f3Smrg      tchunkptr* CP;\
1696d514b0f3Smrg      while ((*(CP = &(R->child[1])) != 0) ||\
1697d514b0f3Smrg             (*(CP = &(R->child[0])) != 0)) {\
1698d514b0f3Smrg        R = *(RP = CP);\
1699d514b0f3Smrg      }\
1700d514b0f3Smrg      if (RTCHECK(ok_address(M, RP)))\
1701d514b0f3Smrg        *RP = 0;\
1702d514b0f3Smrg      else {\
1703d514b0f3Smrg        CORRUPTION_ERROR_ACTION(M);\
1704d514b0f3Smrg      }\
1705d514b0f3Smrg    }\
1706d514b0f3Smrg  }\
1707d514b0f3Smrg  if (XP != 0) {\
1708d514b0f3Smrg    tbinptr* H = treebin_at(M, X->index);\
1709d514b0f3Smrg    if (X == *H) {\
1710d514b0f3Smrg      if ((*H = R) == 0) \
1711d514b0f3Smrg        clear_treemap(M, X->index);\
1712d514b0f3Smrg    }\
1713d514b0f3Smrg    else if (RTCHECK(ok_address(M, XP))) {\
1714d514b0f3Smrg      if (XP->child[0] == X) \
1715d514b0f3Smrg        XP->child[0] = R;\
1716d514b0f3Smrg      else \
1717d514b0f3Smrg        XP->child[1] = R;\
1718d514b0f3Smrg    }\
1719d514b0f3Smrg    else\
1720d514b0f3Smrg      CORRUPTION_ERROR_ACTION(M);\
1721d514b0f3Smrg    if (R != 0) {\
1722d514b0f3Smrg      if (RTCHECK(ok_address(M, R))) {\
1723d514b0f3Smrg        tchunkptr C0, C1;\
1724d514b0f3Smrg        R->parent = XP;\
1725d514b0f3Smrg        if ((C0 = X->child[0]) != 0) {\
1726d514b0f3Smrg          if (RTCHECK(ok_address(M, C0))) {\
1727d514b0f3Smrg            R->child[0] = C0;\
1728d514b0f3Smrg            C0->parent = R;\
1729d514b0f3Smrg          }\
1730d514b0f3Smrg          else\
1731d514b0f3Smrg            CORRUPTION_ERROR_ACTION(M);\
1732d514b0f3Smrg        }\
1733d514b0f3Smrg        if ((C1 = X->child[1]) != 0) {\
1734d514b0f3Smrg          if (RTCHECK(ok_address(M, C1))) {\
1735d514b0f3Smrg            R->child[1] = C1;\
1736d514b0f3Smrg            C1->parent = R;\
1737d514b0f3Smrg          }\
1738d514b0f3Smrg          else\
1739d514b0f3Smrg            CORRUPTION_ERROR_ACTION(M);\
1740d514b0f3Smrg        }\
1741d514b0f3Smrg      }\
1742d514b0f3Smrg      else\
1743d514b0f3Smrg        CORRUPTION_ERROR_ACTION(M);\
1744d514b0f3Smrg    }\
1745d514b0f3Smrg  }\
1746d514b0f3Smrg}
1747d514b0f3Smrg
1748d514b0f3Smrg/* Relays to large vs small bin operations */
1749d514b0f3Smrg
1750d514b0f3Smrg#define insert_chunk(M, P, S)\
1751d514b0f3Smrg  if (is_small(S)) insert_small_chunk(M, P, S)\
1752d514b0f3Smrg  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
1753d514b0f3Smrg
1754d514b0f3Smrg#define unlink_chunk(M, P, S)\
1755d514b0f3Smrg  if (is_small(S)) unlink_small_chunk(M, P, S)\
1756d514b0f3Smrg  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
1757d514b0f3Smrg
1758d514b0f3Smrg
1759d514b0f3Smrg/* Relays to internal calls to malloc/free from realloc, memalign etc */
1760d514b0f3Smrg
1761d514b0f3Smrg#define internal_malloc(m, b) mspace_malloc(m, b)
1762d514b0f3Smrg#define internal_free(m, mem) mspace_free(m,mem);
1763d514b0f3Smrg
1764d514b0f3Smrg
1765d514b0f3Smrg/* -------------------------- mspace management -------------------------- */
1766d514b0f3Smrg
1767d514b0f3Smrg/* Initialize top chunk and its size */
1768d514b0f3Smrgstatic void init_top(mstate m, mchunkptr p, size_t psize) {
1769d514b0f3Smrg  /* Ensure alignment */
1770d514b0f3Smrg  size_t offset = align_offset(chunk2mem(p));
1771d514b0f3Smrg  p = (mchunkptr)((char*)p + offset);
1772d514b0f3Smrg  psize -= offset;
1773d514b0f3Smrg
1774d514b0f3Smrg  m->top = p;
1775d514b0f3Smrg  m->topsize = psize;
1776d514b0f3Smrg  p->head = psize | PINUSE_BIT;
1777d514b0f3Smrg  /* set size of fake trailing chunk holding overhead space only once */
1778d514b0f3Smrg  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
1779d514b0f3Smrg}
1780d514b0f3Smrg
1781d514b0f3Smrg/* Initialize bins for a new mstate that is otherwise zeroed out */
1782d514b0f3Smrgstatic void init_bins(mstate m) {
1783d514b0f3Smrg  /* Establish circular links for smallbins */
1784d514b0f3Smrg  bindex_t i;
1785d514b0f3Smrg  for (i = 0; i < NSMALLBINS; ++i) {
1786d514b0f3Smrg    sbinptr bin = smallbin_at(m,i);
1787d514b0f3Smrg    bin->fd = bin->bk = bin;
1788d514b0f3Smrg  }
1789d514b0f3Smrg}
1790d514b0f3Smrg
1791d514b0f3Smrg#if PROCEED_ON_ERROR
1792d514b0f3Smrg
1793d514b0f3Smrg/* default corruption action */
1794d514b0f3Smrgstatic void reset_on_error(mstate m) {
1795d514b0f3Smrg  int i;
1796d514b0f3Smrg  ++malloc_corruption_error_count;
1797d514b0f3Smrg  /* Reinitialize fields to forget about all memory */
1798d514b0f3Smrg  m->smallbins = m->treebins = 0;
1799d514b0f3Smrg  m->dvsize = m->topsize = 0;
1800d514b0f3Smrg  m->seg.base = 0;
1801d514b0f3Smrg  m->seg.size = 0;
1802d514b0f3Smrg  m->seg.next = 0;
1803d514b0f3Smrg  m->top = m->dv = 0;
1804d514b0f3Smrg  for (i = 0; i < NTREEBINS; ++i)
1805d514b0f3Smrg    *treebin_at(m, i) = 0;
1806d514b0f3Smrg  init_bins(m);
1807d514b0f3Smrg}
1808d514b0f3Smrg#endif /* PROCEED_ON_ERROR */
1809d514b0f3Smrg
1810d514b0f3Smrg#if 0
1811d514b0f3Smrg/* Allocate chunk and prepend remainder with chunk in successor base. */
1812d514b0f3Smrgstatic void* prepend_alloc(mstate m, char* newbase, char* oldbase,
1813d514b0f3Smrg                           size_t nb) {
1814d514b0f3Smrg  mchunkptr p = align_as_chunk(newbase);
1815d514b0f3Smrg  mchunkptr oldfirst = align_as_chunk(oldbase);
1816d514b0f3Smrg  size_t psize = (char*)oldfirst - (char*)p;
1817d514b0f3Smrg  mchunkptr q = chunk_plus_offset(p, nb);
1818d514b0f3Smrg  size_t qsize = psize - nb;
1819d514b0f3Smrg  set_size_and_pinuse_of_inuse_chunk(m, p, nb);
1820d514b0f3Smrg
1821d514b0f3Smrg  assert(m->user_data, (char*)oldfirst > (char*)q);
1822d514b0f3Smrg  assert(m->user_data, pinuse(oldfirst));
1823d514b0f3Smrg  assert(m->user_data, qsize >= MIN_CHUNK_SIZE);
1824d514b0f3Smrg
1825d514b0f3Smrg  /* consolidate remainder with first chunk of old base */
1826d514b0f3Smrg  if (oldfirst == m->top) {
1827d514b0f3Smrg    size_t tsize = m->topsize += qsize;
1828d514b0f3Smrg    m->top = q;
1829d514b0f3Smrg    q->head = tsize | PINUSE_BIT;
1830d514b0f3Smrg    check_top_chunk(m, q);
1831d514b0f3Smrg  }
1832d514b0f3Smrg  else if (oldfirst == m->dv) {
1833d514b0f3Smrg    size_t dsize = m->dvsize += qsize;
1834d514b0f3Smrg    m->dv = q;
1835d514b0f3Smrg    set_size_and_pinuse_of_free_chunk(q, dsize);
1836d514b0f3Smrg  }
1837d514b0f3Smrg  else {
1838d514b0f3Smrg    if (!cinuse(oldfirst)) {
1839d514b0f3Smrg      size_t nsize = chunksize(oldfirst);
1840d514b0f3Smrg      unlink_chunk(m, oldfirst, nsize);
1841d514b0f3Smrg      oldfirst = chunk_plus_offset(oldfirst, nsize);
1842d514b0f3Smrg      qsize += nsize;
1843d514b0f3Smrg    }
1844d514b0f3Smrg    set_free_with_pinuse(q, qsize, oldfirst);
1845d514b0f3Smrg    insert_chunk(m, q, qsize);
1846d514b0f3Smrg    check_free_chunk(m, q);
1847d514b0f3Smrg  }
1848d514b0f3Smrg
1849d514b0f3Smrg  check_malloced_chunk(m, chunk2mem(p), nb);
1850d514b0f3Smrg  return chunk2mem(p);
1851d514b0f3Smrg}
1852d514b0f3Smrg#endif
1853d514b0f3Smrg
1854d514b0f3Smrg/* -------------------------- System allocation -------------------------- */
1855d514b0f3Smrg
1856d514b0f3Smrg/* Get memory from system using MORECORE or MMAP */
1857d514b0f3Smrgstatic void* sys_alloc(mstate m, size_t nb) {
1858d514b0f3Smrg  MALLOC_FAILURE_ACTION;
1859d514b0f3Smrg  return 0;
1860d514b0f3Smrg}
1861d514b0f3Smrg
1862d514b0f3Smrg/* ---------------------------- malloc support --------------------------- */
1863d514b0f3Smrg
1864d514b0f3Smrg/* allocate a large request from the best fitting chunk in a treebin */
1865d514b0f3Smrgstatic void* tmalloc_large(mstate m, size_t nb) {
1866d514b0f3Smrg  tchunkptr v = 0;
1867d514b0f3Smrg  size_t rsize = -nb; /* Unsigned negation */
1868d514b0f3Smrg  tchunkptr t;
1869d514b0f3Smrg  bindex_t idx;
1870d514b0f3Smrg  compute_tree_index(nb, idx);
1871d514b0f3Smrg
1872d514b0f3Smrg  if ((t = *treebin_at(m, idx)) != 0) {
1873d514b0f3Smrg    /* Traverse tree for this bin looking for node with size == nb */
1874d514b0f3Smrg    size_t sizebits = nb << leftshift_for_tree_index(idx);
1875d514b0f3Smrg    tchunkptr rst = 0;  /* The deepest untaken right subtree */
1876d514b0f3Smrg    for (;;) {
1877d514b0f3Smrg      tchunkptr rt;
1878d514b0f3Smrg      size_t trem = chunksize(t) - nb;
1879d514b0f3Smrg      if (trem < rsize) {
1880d514b0f3Smrg        v = t;
1881d514b0f3Smrg        if ((rsize = trem) == 0)
1882d514b0f3Smrg          break;
1883d514b0f3Smrg      }
1884d514b0f3Smrg      rt = t->child[1];
1885d514b0f3Smrg      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
1886d514b0f3Smrg      if (rt != 0 && rt != t)
1887d514b0f3Smrg        rst = rt;
1888d514b0f3Smrg      if (t == 0) {
1889d514b0f3Smrg        t = rst; /* set t to least subtree holding sizes > nb */
1890d514b0f3Smrg        break;
1891d514b0f3Smrg      }
1892d514b0f3Smrg      sizebits <<= 1;
1893d514b0f3Smrg    }
1894d514b0f3Smrg  }
1895d514b0f3Smrg
1896d514b0f3Smrg  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
1897d514b0f3Smrg    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
1898d514b0f3Smrg    if (leftbits != 0) {
1899d514b0f3Smrg      bindex_t i;
1900d514b0f3Smrg      binmap_t leastbit = least_bit(leftbits);
1901d514b0f3Smrg      compute_bit2idx(leastbit, i);
1902d514b0f3Smrg      t = *treebin_at(m, i);
1903d514b0f3Smrg    }
1904d514b0f3Smrg  }
1905d514b0f3Smrg
1906d514b0f3Smrg  while (t != 0) { /* find smallest of tree or subtree */
1907d514b0f3Smrg    size_t trem = chunksize(t) - nb;
1908d514b0f3Smrg    if (trem < rsize) {
1909d514b0f3Smrg      rsize = trem;
1910d514b0f3Smrg      v = t;
1911d514b0f3Smrg    }
1912d514b0f3Smrg    t = leftmost_child(t);
1913d514b0f3Smrg  }
1914d514b0f3Smrg
1915d514b0f3Smrg  /*  If dv is a better fit, return 0 so malloc will use it */
1916d514b0f3Smrg  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
1917d514b0f3Smrg    if (RTCHECK(ok_address(m, v))) { /* split */
1918d514b0f3Smrg      mchunkptr r = chunk_plus_offset(v, nb);
1919d514b0f3Smrg      assert(m->user_data, chunksize(v) == rsize + nb);
1920d514b0f3Smrg      if (RTCHECK(ok_next(v, r))) {
1921d514b0f3Smrg        unlink_large_chunk(m, v);
1922d514b0f3Smrg        if (rsize < MIN_CHUNK_SIZE)
1923d514b0f3Smrg          set_inuse_and_pinuse(m, v, (rsize + nb));
1924d514b0f3Smrg        else {
1925d514b0f3Smrg          set_size_and_pinuse_of_inuse_chunk(m, v, nb);
1926d514b0f3Smrg          set_size_and_pinuse_of_free_chunk(r, rsize);
1927d514b0f3Smrg          insert_chunk(m, r, rsize);
1928d514b0f3Smrg        }
1929d514b0f3Smrg        return chunk2mem(v);
1930d514b0f3Smrg      }
1931d514b0f3Smrg    }
1932d514b0f3Smrg    CORRUPTION_ERROR_ACTION(m);
1933d514b0f3Smrg  }
1934d514b0f3Smrg  return 0;
1935d514b0f3Smrg}
1936d514b0f3Smrg
1937d514b0f3Smrg/* allocate a small request from the best fitting chunk in a treebin */
1938d514b0f3Smrgstatic void* tmalloc_small(mstate m, size_t nb) {
1939d514b0f3Smrg  tchunkptr t, v;
1940d514b0f3Smrg  size_t rsize;
1941d514b0f3Smrg  bindex_t i;
1942d514b0f3Smrg  binmap_t leastbit = least_bit(m->treemap);
1943d514b0f3Smrg  compute_bit2idx(leastbit, i);
1944d514b0f3Smrg
1945d514b0f3Smrg  v = t = *treebin_at(m, i);
1946d514b0f3Smrg  rsize = chunksize(t) - nb;
1947d514b0f3Smrg
1948d514b0f3Smrg  while ((t = leftmost_child(t)) != 0) {
1949d514b0f3Smrg    size_t trem = chunksize(t) - nb;
1950d514b0f3Smrg    if (trem < rsize) {
1951d514b0f3Smrg      rsize = trem;
1952d514b0f3Smrg      v = t;
1953d514b0f3Smrg    }
1954d514b0f3Smrg  }
1955d514b0f3Smrg
1956d514b0f3Smrg  if (RTCHECK(ok_address(m, v))) {
1957d514b0f3Smrg    mchunkptr r = chunk_plus_offset(v, nb);
1958d514b0f3Smrg    assert(m->user_data, chunksize(v) == rsize + nb);
1959d514b0f3Smrg    if (RTCHECK(ok_next(v, r))) {
1960d514b0f3Smrg      unlink_large_chunk(m, v);
1961d514b0f3Smrg      if (rsize < MIN_CHUNK_SIZE)
1962d514b0f3Smrg        set_inuse_and_pinuse(m, v, (rsize + nb));
1963d514b0f3Smrg      else {
1964d514b0f3Smrg        set_size_and_pinuse_of_inuse_chunk(m, v, nb);
1965d514b0f3Smrg        set_size_and_pinuse_of_free_chunk(r, rsize);
1966d514b0f3Smrg        replace_dv(m, r, rsize);
1967d514b0f3Smrg      }
1968d514b0f3Smrg      return chunk2mem(v);
1969d514b0f3Smrg    }
1970d514b0f3Smrg  }
1971d514b0f3Smrg
1972d514b0f3Smrg  CORRUPTION_ERROR_ACTION(m);
1973d514b0f3Smrg  return 0;
1974d514b0f3Smrg}
1975d514b0f3Smrg
1976d514b0f3Smrg/* --------------------------- realloc support --------------------------- */
1977d514b0f3Smrg
1978d514b0f3Smrgstatic void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
1979d514b0f3Smrg  if (bytes >= MAX_REQUEST) {
1980d514b0f3Smrg    MALLOC_FAILURE_ACTION;
1981d514b0f3Smrg    return 0;
1982d514b0f3Smrg  }
1983d514b0f3Smrg  if (!PREACTION(m)) {
1984d514b0f3Smrg    mchunkptr oldp = mem2chunk(oldmem);
1985d514b0f3Smrg    size_t oldsize = chunksize(oldp);
1986d514b0f3Smrg    mchunkptr next = chunk_plus_offset(oldp, oldsize);
1987d514b0f3Smrg    mchunkptr newp = 0;
1988d514b0f3Smrg    void* extra = 0;
1989d514b0f3Smrg
1990d514b0f3Smrg    /* Try to either shrink or extend into top. Else malloc-copy-free */
1991d514b0f3Smrg
1992d514b0f3Smrg    if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
1993d514b0f3Smrg                ok_next(oldp, next) && ok_pinuse(next))) {
1994d514b0f3Smrg      size_t nb = request2size(bytes);
1995d514b0f3Smrg      if (oldsize >= nb) { /* already big enough */
1996d514b0f3Smrg        size_t rsize = oldsize - nb;
1997d514b0f3Smrg        newp = oldp;
1998d514b0f3Smrg        if (rsize >= MIN_CHUNK_SIZE) {
1999d514b0f3Smrg          mchunkptr remainder = chunk_plus_offset(newp, nb);
2000d514b0f3Smrg          set_inuse(m, newp, nb);
2001d514b0f3Smrg          set_inuse(m, remainder, rsize);
2002d514b0f3Smrg          extra = chunk2mem(remainder);
2003d514b0f3Smrg        }
2004d514b0f3Smrg      }
2005d514b0f3Smrg      else if (next == m->top && oldsize + m->topsize > nb) {
2006d514b0f3Smrg        /* Expand into top */
2007d514b0f3Smrg        size_t newsize = oldsize + m->topsize;
2008d514b0f3Smrg        size_t newtopsize = newsize - nb;
2009d514b0f3Smrg        mchunkptr newtop = chunk_plus_offset(oldp, nb);
2010d514b0f3Smrg        set_inuse(m, oldp, nb);
2011d514b0f3Smrg        newtop->head = newtopsize |PINUSE_BIT;
2012d514b0f3Smrg        m->top = newtop;
2013d514b0f3Smrg        m->topsize = newtopsize;
2014d514b0f3Smrg        newp = oldp;
2015d514b0f3Smrg      }
2016d514b0f3Smrg    }
2017d514b0f3Smrg    else {
2018d514b0f3Smrg      USAGE_ERROR_ACTION(m, oldmem);
2019d514b0f3Smrg      POSTACTION(m);
2020d514b0f3Smrg      return 0;
2021d514b0f3Smrg    }
2022d514b0f3Smrg
2023d514b0f3Smrg    POSTACTION(m);
2024d514b0f3Smrg
2025d514b0f3Smrg    if (newp != 0) {
2026d514b0f3Smrg      if (extra != 0) {
2027d514b0f3Smrg        internal_free(m, extra);
2028d514b0f3Smrg      }
2029d514b0f3Smrg      check_inuse_chunk(m, newp);
2030d514b0f3Smrg      return chunk2mem(newp);
2031d514b0f3Smrg    }
2032d514b0f3Smrg    else {
2033d514b0f3Smrg      void* newmem = internal_malloc(m, bytes);
2034d514b0f3Smrg      if (newmem != 0) {
2035d514b0f3Smrg        size_t oc = oldsize - overhead_for(oldp);
2036d514b0f3Smrg        MEMCPY(newmem, oldmem, (oc < bytes)? oc : bytes);
2037d514b0f3Smrg        internal_free(m, oldmem);
2038d514b0f3Smrg      }
2039d514b0f3Smrg      return newmem;
2040d514b0f3Smrg    }
2041d514b0f3Smrg  }
2042d514b0f3Smrg  return 0;
2043d514b0f3Smrg}
2044d514b0f3Smrg
2045d514b0f3Smrg/* --------------------------- memalign support -------------------------- */
2046d514b0f3Smrg
2047d514b0f3Smrgstatic void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
2048d514b0f3Smrg  if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
2049d514b0f3Smrg    return internal_malloc(m, bytes);
2050d514b0f3Smrg  if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
2051d514b0f3Smrg    alignment = MIN_CHUNK_SIZE;
2052d514b0f3Smrg  if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
2053d514b0f3Smrg    size_t a = MALLOC_ALIGNMENT << 1;
2054d514b0f3Smrg    while (a < alignment) a <<= 1;
2055d514b0f3Smrg    alignment = a;
2056d514b0f3Smrg  }
2057d514b0f3Smrg
2058d514b0f3Smrg  if (bytes >= MAX_REQUEST - alignment) {
2059d514b0f3Smrg    if (m != 0)  { /* Test isn't needed but avoids compiler warning */
2060d514b0f3Smrg      MALLOC_FAILURE_ACTION;
2061d514b0f3Smrg    }
2062d514b0f3Smrg  }
2063d514b0f3Smrg  else {
2064d514b0f3Smrg    size_t nb = request2size(bytes);
2065d514b0f3Smrg    size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
2066d514b0f3Smrg    char* mem = (char*)internal_malloc(m, req);
2067d514b0f3Smrg    if (mem != 0) {
2068d514b0f3Smrg      void* leader = 0;
2069d514b0f3Smrg      void* trailer = 0;
2070d514b0f3Smrg      mchunkptr p = mem2chunk(mem);
2071d514b0f3Smrg
2072d514b0f3Smrg      if (PREACTION(m)) return 0;
2073d514b0f3Smrg      if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
2074d514b0f3Smrg        /*
2075d514b0f3Smrg          Find an aligned spot inside chunk.  Since we need to give
2076d514b0f3Smrg          back leading space in a chunk of at least MIN_CHUNK_SIZE, if
2077d514b0f3Smrg          the first calculation places us at a spot with less than
2078d514b0f3Smrg          MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
2079d514b0f3Smrg          We've allocated enough total room so that this is always
2080d514b0f3Smrg          possible.
2081d514b0f3Smrg        */
2082d514b0f3Smrg        char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
2083d514b0f3Smrg                                                       alignment -
2084d514b0f3Smrg                                                       SIZE_T_ONE)) &
2085d514b0f3Smrg                                             -alignment));
2086d514b0f3Smrg        char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
2087d514b0f3Smrg          br : br+alignment;
2088d514b0f3Smrg        mchunkptr newp = (mchunkptr)pos;
2089d514b0f3Smrg        size_t leadsize = pos - (char*)(p);
2090d514b0f3Smrg        size_t newsize = chunksize(p) - leadsize;
2091d514b0f3Smrg
2092d514b0f3Smrg        /* Otherwise, give back leader, use the rest */
2093d514b0f3Smrg        set_inuse(m, newp, newsize);
2094d514b0f3Smrg        set_inuse(m, p, leadsize);
2095d514b0f3Smrg        leader = chunk2mem(p);
2096d514b0f3Smrg
2097d514b0f3Smrg        p = newp;
2098d514b0f3Smrg      }
2099d514b0f3Smrg
2100d514b0f3Smrg      assert(m->user_data, chunksize(p) >= nb);
2101d514b0f3Smrg      assert(m->user_data, (((size_t)(chunk2mem(p))) % alignment) == 0);
2102d514b0f3Smrg      check_inuse_chunk(m, p);
2103d514b0f3Smrg      POSTACTION(m);
2104d514b0f3Smrg      if (leader != 0) {
2105d514b0f3Smrg        internal_free(m, leader);
2106d514b0f3Smrg      }
2107d514b0f3Smrg      if (trailer != 0) {
2108d514b0f3Smrg        internal_free(m, trailer);
2109d514b0f3Smrg      }
2110d514b0f3Smrg      return chunk2mem(p);
2111d514b0f3Smrg    }
2112d514b0f3Smrg  }
2113d514b0f3Smrg  return 0;
2114d514b0f3Smrg}
2115d514b0f3Smrg
2116d514b0f3Smrg/* ----------------------------- user mspaces ---------------------------- */
2117d514b0f3Smrg
2118d514b0f3Smrgstatic mstate init_user_mstate(char* tbase, size_t tsize, void *user_data) {
2119d514b0f3Smrg  size_t msize = pad_request(sizeof(struct malloc_state));
2120d514b0f3Smrg  mchunkptr mn;
2121d514b0f3Smrg  mchunkptr msp = align_as_chunk(tbase);
2122d514b0f3Smrg  mstate m = (mstate)(chunk2mem(msp));
2123d514b0f3Smrg  MEMCLEAR(m, msize);
2124d514b0f3Smrg  INITIAL_LOCK(&m->mutex);
2125d514b0f3Smrg  msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
2126d514b0f3Smrg  m->seg.base = m->least_addr = tbase;
2127d514b0f3Smrg  m->seg.size = m->footprint = m->max_footprint = tsize;
2128d514b0f3Smrg  m->magic = mparams.magic;
2129d514b0f3Smrg  m->mflags = mparams.default_mflags;
2130d514b0f3Smrg  m->user_data = user_data;
2131d514b0f3Smrg  init_bins(m);
2132d514b0f3Smrg  mn = next_chunk(mem2chunk(m));
2133d514b0f3Smrg  init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
2134d514b0f3Smrg  check_top_chunk(m, m->top);
2135d514b0f3Smrg  return m;
2136d514b0f3Smrg}
2137d514b0f3Smrg
2138d514b0f3Smrgmspace create_mspace_with_base(void* base, size_t capacity, int locked, void *user_data) {
2139d514b0f3Smrg  mstate m = 0;
2140d514b0f3Smrg  size_t msize = pad_request(sizeof(struct malloc_state));
2141d514b0f3Smrg  init_mparams(); /* Ensure pagesize etc initialized */
2142d514b0f3Smrg
2143d514b0f3Smrg  if (capacity > msize + TOP_FOOT_SIZE &&
2144d514b0f3Smrg      capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
2145d514b0f3Smrg    m = init_user_mstate((char*)base, capacity, user_data);
2146d514b0f3Smrg    set_lock(m, locked);
2147d514b0f3Smrg  }
2148d514b0f3Smrg  return (mspace)m;
2149d514b0f3Smrg}
2150d514b0f3Smrg
2151d514b0f3Smrg/*
2152d514b0f3Smrg  mspace versions of routines are near-clones of the global
2153d514b0f3Smrg  versions. This is not so nice but better than the alternatives.
2154d514b0f3Smrg*/
2155d514b0f3Smrg
2156d514b0f3Smrg
2157d514b0f3Smrgvoid* mspace_malloc(mspace msp, size_t bytes) {
2158d514b0f3Smrg  mstate ms = (mstate)msp;
2159d514b0f3Smrg  if (!ok_magic(ms)) {
2160d514b0f3Smrg    USAGE_ERROR_ACTION(ms,ms);
2161d514b0f3Smrg    return 0;
2162d514b0f3Smrg  }
2163d514b0f3Smrg  if (!PREACTION(ms)) {
2164d514b0f3Smrg    void* mem;
2165d514b0f3Smrg    size_t nb;
2166d514b0f3Smrg    if (bytes <= MAX_SMALL_REQUEST) {
2167d514b0f3Smrg      bindex_t idx;
2168d514b0f3Smrg      binmap_t smallbits;
2169d514b0f3Smrg      nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
2170d514b0f3Smrg      idx = small_index(nb);
2171d514b0f3Smrg      smallbits = ms->smallmap >> idx;
2172d514b0f3Smrg
2173d514b0f3Smrg      if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
2174d514b0f3Smrg        mchunkptr b, p;
2175d514b0f3Smrg        idx += ~smallbits & 1;       /* Uses next bin if idx empty */
2176d514b0f3Smrg        b = smallbin_at(ms, idx);
2177d514b0f3Smrg        p = b->fd;
2178d514b0f3Smrg        assert(ms->user_data, chunksize(p) == small_index2size(idx));
2179d514b0f3Smrg        unlink_first_small_chunk(ms, b, p, idx);
2180d514b0f3Smrg        set_inuse_and_pinuse(ms, p, small_index2size(idx));
2181d514b0f3Smrg        mem = chunk2mem(p);
2182d514b0f3Smrg        check_malloced_chunk(ms, mem, nb);
2183d514b0f3Smrg        goto postaction;
2184d514b0f3Smrg      }
2185d514b0f3Smrg
2186d514b0f3Smrg      else if (nb > ms->dvsize) {
2187d514b0f3Smrg        if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
2188d514b0f3Smrg          mchunkptr b, p, r;
2189d514b0f3Smrg          size_t rsize;
2190d514b0f3Smrg          bindex_t i;
2191d514b0f3Smrg          binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
2192d514b0f3Smrg          binmap_t leastbit = least_bit(leftbits);
2193d514b0f3Smrg          compute_bit2idx(leastbit, i);
2194d514b0f3Smrg          b = smallbin_at(ms, i);
2195d514b0f3Smrg          p = b->fd;
2196d514b0f3Smrg          assert(ms->user_data, chunksize(p) == small_index2size(i));
2197d514b0f3Smrg          unlink_first_small_chunk(ms, b, p, i);
2198d514b0f3Smrg          rsize = small_index2size(i) - nb;
2199d514b0f3Smrg          /* Fit here cannot be remainderless if 4byte sizes */
2200d514b0f3Smrg          if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
2201d514b0f3Smrg            set_inuse_and_pinuse(ms, p, small_index2size(i));
2202d514b0f3Smrg          else {
2203d514b0f3Smrg            set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
2204d514b0f3Smrg            r = chunk_plus_offset(p, nb);
2205d514b0f3Smrg            set_size_and_pinuse_of_free_chunk(r, rsize);
2206d514b0f3Smrg            replace_dv(ms, r, rsize);
2207d514b0f3Smrg          }
2208d514b0f3Smrg          mem = chunk2mem(p);
2209d514b0f3Smrg          check_malloced_chunk(ms, mem, nb);
2210d514b0f3Smrg          goto postaction;
2211d514b0f3Smrg        }
2212d514b0f3Smrg
2213d514b0f3Smrg        else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
2214d514b0f3Smrg          check_malloced_chunk(ms, mem, nb);
2215d514b0f3Smrg          goto postaction;
2216d514b0f3Smrg        }
2217d514b0f3Smrg      }
2218d514b0f3Smrg    }
2219d514b0f3Smrg    else if (bytes >= MAX_REQUEST)
2220d514b0f3Smrg      nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
2221d514b0f3Smrg    else {
2222d514b0f3Smrg      nb = pad_request(bytes);
2223d514b0f3Smrg      if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
2224d514b0f3Smrg        check_malloced_chunk(ms, mem, nb);
2225d514b0f3Smrg        goto postaction;
2226d514b0f3Smrg      }
2227d514b0f3Smrg    }
2228d514b0f3Smrg
2229d514b0f3Smrg    if (nb <= ms->dvsize) {
2230d514b0f3Smrg      size_t rsize = ms->dvsize - nb;
2231d514b0f3Smrg      mchunkptr p = ms->dv;
2232d514b0f3Smrg      if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
2233d514b0f3Smrg        mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
2234d514b0f3Smrg        ms->dvsize = rsize;
2235d514b0f3Smrg        set_size_and_pinuse_of_free_chunk(r, rsize);
2236d514b0f3Smrg        set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
2237d514b0f3Smrg      }
2238d514b0f3Smrg      else { /* exhaust dv */
2239d514b0f3Smrg        size_t dvs = ms->dvsize;
2240d514b0f3Smrg        ms->dvsize = 0;
2241d514b0f3Smrg        ms->dv = 0;
2242d514b0f3Smrg        set_inuse_and_pinuse(ms, p, dvs);
2243d514b0f3Smrg      }
2244d514b0f3Smrg      mem = chunk2mem(p);
2245d514b0f3Smrg      check_malloced_chunk(ms, mem, nb);
2246d514b0f3Smrg      goto postaction;
2247d514b0f3Smrg    }
2248d514b0f3Smrg
2249d514b0f3Smrg    else if (nb < ms->topsize) { /* Split top */
2250d514b0f3Smrg      size_t rsize = ms->topsize -= nb;
2251d514b0f3Smrg      mchunkptr p = ms->top;
2252d514b0f3Smrg      mchunkptr r = ms->top = chunk_plus_offset(p, nb);
2253d514b0f3Smrg      r->head = rsize | PINUSE_BIT;
2254d514b0f3Smrg      set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
2255d514b0f3Smrg      mem = chunk2mem(p);
2256d514b0f3Smrg      check_top_chunk(ms, ms->top);
2257d514b0f3Smrg      check_malloced_chunk(ms, mem, nb);
2258d514b0f3Smrg      goto postaction;
2259d514b0f3Smrg    }
2260d514b0f3Smrg
2261d514b0f3Smrg    mem = sys_alloc(ms, nb);
2262d514b0f3Smrg
2263d514b0f3Smrg  postaction:
2264d514b0f3Smrg    POSTACTION(ms);
2265d514b0f3Smrg    return mem;
2266d514b0f3Smrg  }
2267d514b0f3Smrg
2268d514b0f3Smrg  return 0;
2269d514b0f3Smrg}
2270d514b0f3Smrg
2271d514b0f3Smrgvoid mspace_free(mspace msp, void* mem) {
2272d514b0f3Smrg  if (mem != 0) {
2273d514b0f3Smrg    mchunkptr p  = mem2chunk(mem);
2274d514b0f3Smrg#if FOOTERS
2275d514b0f3Smrg    mstate fm = get_mstate_for(p);
2276d514b0f3Smrg#else /* FOOTERS */
2277d514b0f3Smrg    mstate fm = (mstate)msp;
2278d514b0f3Smrg#endif /* FOOTERS */
2279d514b0f3Smrg    if (!ok_magic(fm)) {
2280d514b0f3Smrg      USAGE_ERROR_ACTION(fm, p);
2281d514b0f3Smrg      return;
2282d514b0f3Smrg    }
2283d514b0f3Smrg    if (!PREACTION(fm)) {
2284d514b0f3Smrg      check_inuse_chunk(fm, p);
2285d514b0f3Smrg      if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
2286d514b0f3Smrg        size_t psize = chunksize(p);
2287d514b0f3Smrg        mchunkptr next = chunk_plus_offset(p, psize);
2288d514b0f3Smrg        if (!pinuse(p)) {
2289d514b0f3Smrg          size_t prevsize = p->prev_foot;
2290d514b0f3Smrg
2291d514b0f3Smrg          mchunkptr prev = chunk_minus_offset(p, prevsize);
2292d514b0f3Smrg          psize += prevsize;
2293d514b0f3Smrg          p = prev;
2294d514b0f3Smrg          if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
2295d514b0f3Smrg            if (p != fm->dv) {
2296d514b0f3Smrg              unlink_chunk(fm, p, prevsize);
2297d514b0f3Smrg            }
2298d514b0f3Smrg            else if ((next->head & INUSE_BITS) == INUSE_BITS) {
2299d514b0f3Smrg              fm->dvsize = psize;
2300d514b0f3Smrg              set_free_with_pinuse(p, psize, next);
2301d514b0f3Smrg              goto postaction;
2302d514b0f3Smrg            }
2303d514b0f3Smrg          }
2304d514b0f3Smrg          else
2305d514b0f3Smrg            goto erroraction;
2306d514b0f3Smrg        }
2307d514b0f3Smrg
2308d514b0f3Smrg        if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
2309d514b0f3Smrg          if (!cinuse(next)) {  /* consolidate forward */
2310d514b0f3Smrg            if (next == fm->top) {
2311d514b0f3Smrg              size_t tsize = fm->topsize += psize;
2312d514b0f3Smrg              fm->top = p;
2313d514b0f3Smrg              p->head = tsize | PINUSE_BIT;
2314d514b0f3Smrg              if (p == fm->dv) {
2315d514b0f3Smrg                fm->dv = 0;
2316d514b0f3Smrg                fm->dvsize = 0;
2317d514b0f3Smrg              }
2318d514b0f3Smrg              goto postaction;
2319d514b0f3Smrg            }
2320d514b0f3Smrg            else if (next == fm->dv) {
2321d514b0f3Smrg              size_t dsize = fm->dvsize += psize;
2322d514b0f3Smrg              fm->dv = p;
2323d514b0f3Smrg              set_size_and_pinuse_of_free_chunk(p, dsize);
2324d514b0f3Smrg              goto postaction;
2325d514b0f3Smrg            }
2326d514b0f3Smrg            else {
2327d514b0f3Smrg              size_t nsize = chunksize(next);
2328d514b0f3Smrg              psize += nsize;
2329d514b0f3Smrg              unlink_chunk(fm, next, nsize);
2330d514b0f3Smrg              set_size_and_pinuse_of_free_chunk(p, psize);
2331d514b0f3Smrg              if (p == fm->dv) {
2332d514b0f3Smrg                fm->dvsize = psize;
2333d514b0f3Smrg                goto postaction;
2334d514b0f3Smrg              }
2335d514b0f3Smrg            }
2336d514b0f3Smrg          }
2337d514b0f3Smrg          else
2338d514b0f3Smrg            set_free_with_pinuse(p, psize, next);
2339d514b0f3Smrg          insert_chunk(fm, p, psize);
2340d514b0f3Smrg          check_free_chunk(fm, p);
2341d514b0f3Smrg          goto postaction;
2342d514b0f3Smrg        }
2343d514b0f3Smrg      }
2344d514b0f3Smrg    erroraction:
2345d514b0f3Smrg      USAGE_ERROR_ACTION(fm, p);
2346d514b0f3Smrg    postaction:
2347d514b0f3Smrg      POSTACTION(fm);
2348d514b0f3Smrg    }
2349d514b0f3Smrg  }
2350d514b0f3Smrg}
2351d514b0f3Smrg
2352d514b0f3Smrgvoid* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
2353d514b0f3Smrg  void* mem;
2354d514b0f3Smrg  size_t req = 0;
2355d514b0f3Smrg  mstate ms = (mstate)msp;
2356d514b0f3Smrg  if (!ok_magic(ms)) {
2357d514b0f3Smrg    USAGE_ERROR_ACTION(ms,ms);
2358d514b0f3Smrg    return 0;
2359d514b0f3Smrg  }
2360d514b0f3Smrg  if (n_elements != 0) {
2361d514b0f3Smrg    req = n_elements * elem_size;
2362d514b0f3Smrg    if (((n_elements | elem_size) & ~(size_t)0xffff) &&
2363d514b0f3Smrg        (req / n_elements != elem_size))
2364d514b0f3Smrg      req = MAX_SIZE_T; /* force downstream failure on overflow */
2365d514b0f3Smrg  }
2366d514b0f3Smrg  mem = internal_malloc(ms, req);
2367d514b0f3Smrg  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
2368d514b0f3Smrg    MEMCLEAR(mem, req);
2369d514b0f3Smrg  return mem;
2370d514b0f3Smrg}
2371d514b0f3Smrg
2372d514b0f3Smrgvoid* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
2373d514b0f3Smrg  if (oldmem == 0)
2374d514b0f3Smrg    return mspace_malloc(msp, bytes);
2375d514b0f3Smrg#ifdef REALLOC_ZERO_BYTES_FREES
2376d514b0f3Smrg  if (bytes == 0) {
2377d514b0f3Smrg    mspace_free(msp, oldmem);
2378d514b0f3Smrg    return 0;
2379d514b0f3Smrg  }
2380d514b0f3Smrg#endif /* REALLOC_ZERO_BYTES_FREES */
2381d514b0f3Smrg  else {
2382d514b0f3Smrg#if FOOTERS
2383d514b0f3Smrg    mchunkptr p  = mem2chunk(oldmem);
2384d514b0f3Smrg    mstate ms = get_mstate_for(p);
2385d514b0f3Smrg#else /* FOOTERS */
2386d514b0f3Smrg    mstate ms = (mstate)msp;
2387d514b0f3Smrg#endif /* FOOTERS */
2388d514b0f3Smrg    if (!ok_magic(ms)) {
2389d514b0f3Smrg      USAGE_ERROR_ACTION(ms,ms);
2390d514b0f3Smrg      return 0;
2391d514b0f3Smrg    }
2392d514b0f3Smrg    return internal_realloc(ms, oldmem, bytes);
2393d514b0f3Smrg  }
2394d514b0f3Smrg}
2395d514b0f3Smrg
2396d514b0f3Smrgvoid* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
2397d514b0f3Smrg  mstate ms = (mstate)msp;
2398d514b0f3Smrg  if (!ok_magic(ms)) {
2399d514b0f3Smrg    USAGE_ERROR_ACTION(ms,ms);
2400d514b0f3Smrg    return 0;
2401d514b0f3Smrg  }
2402d514b0f3Smrg  return internal_memalign(ms, alignment, bytes);
2403d514b0f3Smrg}
2404d514b0f3Smrg
2405d514b0f3Smrgvoid mspace_malloc_stats_return(mspace msp, size_t *ret_maxfp, size_t *ret_fp,
2406d514b0f3Smrg                                size_t *ret_used)
2407d514b0f3Smrg{
2408d514b0f3Smrg
2409d514b0f3Smrg  mstate ms = (mstate)msp;
2410d514b0f3Smrg  if (ok_magic(ms)) {
2411d514b0f3Smrg    internal_malloc_stats(ms, ret_maxfp, ret_fp, ret_used);
2412d514b0f3Smrg  }
2413d514b0f3Smrg  else {
2414d514b0f3Smrg    USAGE_ERROR_ACTION(ms,ms);
2415d514b0f3Smrg  }
2416d514b0f3Smrg}
2417d514b0f3Smrg
2418d514b0f3Smrg
2419d514b0f3Smrgvoid mspace_malloc_stats(mspace msp) {
2420d514b0f3Smrg    mspace_malloc_stats_return(msp, NULL, NULL, NULL);
2421d514b0f3Smrg}
2422d514b0f3Smrg
2423d514b0f3Smrgsize_t mspace_footprint(mspace msp) {
2424d514b0f3Smrg  size_t result;
2425d514b0f3Smrg  mstate ms = (mstate)msp;
2426d514b0f3Smrg  if (ok_magic(ms)) {
2427d514b0f3Smrg    result = ms->footprint;
2428d514b0f3Smrg  }
2429d514b0f3Smrg  USAGE_ERROR_ACTION(ms,ms);
2430d514b0f3Smrg  return result;
2431d514b0f3Smrg}
2432d514b0f3Smrg
2433d514b0f3Smrg
2434d514b0f3Smrgsize_t mspace_max_footprint(mspace msp) {
2435d514b0f3Smrg  size_t result;
2436d514b0f3Smrg  mstate ms = (mstate)msp;
2437d514b0f3Smrg  if (ok_magic(ms)) {
2438d514b0f3Smrg    result = ms->max_footprint;
2439d514b0f3Smrg  }
2440d514b0f3Smrg  USAGE_ERROR_ACTION(ms,ms);
2441d514b0f3Smrg  return result;
2442d514b0f3Smrg}
2443d514b0f3Smrg
2444d514b0f3Smrg
2445d514b0f3Smrg#if !NO_MALLINFO
2446d514b0f3Smrgstruct mallinfo mspace_mallinfo(mspace msp) {
2447d514b0f3Smrg  mstate ms = (mstate)msp;
2448d514b0f3Smrg  if (!ok_magic(ms)) {
2449d514b0f3Smrg    USAGE_ERROR_ACTION(ms,ms);
2450d514b0f3Smrg  }
2451d514b0f3Smrg  return internal_mallinfo(ms);
2452d514b0f3Smrg}
2453d514b0f3Smrg#endif /* NO_MALLINFO */
2454d514b0f3Smrg
2455d514b0f3Smrgint mspace_mallopt(int param_number, int value) {
2456d514b0f3Smrg  return change_mparam(param_number, value);
2457d514b0f3Smrg}
2458d514b0f3Smrg
2459