1// based on dlmalloc from Doug Lea
2
3
4// quote from the Doug Lea original file
5    /*
6      This is a version (aka dlmalloc) of malloc/free/realloc written by
7      Doug Lea and released to the public domain, as explained at
8      http://creativecommons.org/licenses/publicdomain.  Send questions,
9      comments, complaints, performance data, etc to dl@cs.oswego.edu
10
11    * Version 2.8.3 Thu Sep 22 11:16:15 2005  Doug Lea  (dl at gee)
12
13       Note: There may be an updated version of this malloc obtainable at
14               ftp://gee.cs.oswego.edu/pub/misc/malloc.c
15             Check before installing!
16    */
17
18
19#include <stdint.h>
20#include <stdlib.h>
21#include <string.h>
22#include "mspace.h"
23
24#define MALLOC_ALIGNMENT ((size_t)8U)
25#define USE_LOCKS 0
26#define malloc_getpagesize ((size_t)4096U)
27#define DEFAULT_GRANULARITY malloc_getpagesize
28#define MAX_SIZE_T (~(size_t)0)
29#define MALLOC_FAILURE_ACTION
30#define MALLINFO_FIELD_TYPE size_t
31#define FOOTERS 0
32#define INSECURE 0
33#define PROCEED_ON_ERROR 0
34#define DEBUG 0
35#define ABORT_ON_ASSERT_FAILURE 1
36#define ABORT(user_data) abort_func(user_data)
37#define USE_BUILTIN_FFS 0
38#define USE_DEV_RANDOM 0
39#define PRINT(params) print_func params
40
41
42#define MEMCPY(dest, src, n) memcpy (dest, src, n)
43#define MEMCLEAR(dest, n) memset (dest, 0, n);
44
45
46#define M_GRANULARITY        (-1)
47
48void __attribute__ ((__noreturn__)) default_abort_func(void *user_data)
49{
50    for (;;);
51}
52
53void default_print_func(void *user_data, const char *format, ...)
54{
55}
56
57static mspace_abort_t abort_func = default_abort_func;
58static mspace_print_t print_func = default_print_func;
59
60void mspace_set_abort_func(mspace_abort_t f)
61{
62    abort_func = f;
63}
64
65void mspace_set_print_func(mspace_print_t f)
66{
67    print_func = f;
68}
69
70/* ------------------------ Mallinfo declarations ------------------------ */
71
72#if !NO_MALLINFO
73/*
74  This version of malloc supports the standard SVID/XPG mallinfo
75  routine that returns a struct containing usage properties and
76  statistics. It should work on any system that has a
77  /usr/include/malloc.h defining struct mallinfo.  The main
78  declaration needed is the mallinfo struct that is returned (by-copy)
79  by mallinfo().  The malloinfo struct contains a bunch of fields that
80  are not even meaningful in this version of malloc.  These fields are
81  are instead filled by mallinfo() with other numbers that might be of
82  interest.
83
84  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
85  /usr/include/malloc.h file that includes a declaration of struct
86  mallinfo.  If so, it is included; else a compliant version is
87  declared below.  These must be precisely the same for mallinfo() to
88  work.  The original SVID version of this struct, defined on most
89  systems with mallinfo, declares all fields as ints. But some others
90  define as unsigned long. If your system defines the fields using a
91  type of different width than listed here, you MUST #include your
92  system version and #define HAVE_USR_INCLUDE_MALLOC_H.
93*/
94
95/* #define HAVE_USR_INCLUDE_MALLOC_H */
96
97
98struct mallinfo {
99  MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
100  MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
101  MALLINFO_FIELD_TYPE smblks;   /* always 0 */
102  MALLINFO_FIELD_TYPE hblks;    /* always 0 */
103  MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
104  MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
105  MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
106  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
107  MALLINFO_FIELD_TYPE fordblks; /* total free space */
108  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
109};
110
111#endif /* NO_MALLINFO */
112
113
114
115#ifdef DEBUG
116#if ABORT_ON_ASSERT_FAILURE
117#define assert(user_data, x) if(!(x)) ABORT(user_data)
118#else /* ABORT_ON_ASSERT_FAILURE */
119#include <assert.h>
120#endif /* ABORT_ON_ASSERT_FAILURE */
121#else  /* DEBUG */
122#define assert(user_data, x)
123#endif /* DEBUG */
124
125/* ------------------- size_t and alignment properties -------------------- */
126
127/* The byte and bit size of a size_t */
128#define SIZE_T_SIZE         (sizeof(size_t))
129#define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
130
131/* Some constants coerced to size_t */
132/* Annoying but necessary to avoid errors on some platforms */
133#define SIZE_T_ZERO         ((size_t)0)
134#define SIZE_T_ONE          ((size_t)1)
135#define SIZE_T_TWO          ((size_t)2)
136#define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
137#define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
138#define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
139#define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
140
141/* The bit mask value corresponding to MALLOC_ALIGNMENT */
142#define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
143
144/* True if address a has acceptable alignment */
145#define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
146
147/* the number of bytes to offset an address to align it */
148#define align_offset(A)\
149 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
150  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
151
152/* --------------------------- Lock preliminaries ------------------------ */
153
154#if USE_LOCKS
155
156/*
157  When locks are defined, there are up to two global locks:
158
159  * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
160    MORECORE.  In many cases sys_alloc requires two calls, that should
161    not be interleaved with calls by other threads.  This does not
162    protect against direct calls to MORECORE by other threads not
163    using this lock, so there is still code to cope the best we can on
164    interference.
165
166  * magic_init_mutex ensures that mparams.magic and other
167    unique mparams values are initialized only once.
168*/
169
170
171#define USE_LOCK_BIT               (2U)
172#else  /* USE_LOCKS */
173#define USE_LOCK_BIT               (0U)
174#define INITIAL_LOCK(l)
175#endif /* USE_LOCKS */
176
177#if USE_LOCKS
178#define ACQUIRE_MAGIC_INIT_LOCK()  ACQUIRE_LOCK(&magic_init_mutex);
179#define RELEASE_MAGIC_INIT_LOCK()  RELEASE_LOCK(&magic_init_mutex);
180#else  /* USE_LOCKS */
181#define ACQUIRE_MAGIC_INIT_LOCK()
182#define RELEASE_MAGIC_INIT_LOCK()
183#endif /* USE_LOCKS */
184
185
186
187/* -----------------------  Chunk representations ------------------------ */
188
189/*
190  (The following includes lightly edited explanations by Colin Plumb.)
191
192  The malloc_chunk declaration below is misleading (but accurate and
193  necessary).  It declares a "view" into memory allowing access to
194  necessary fields at known offsets from a given base.
195
196  Chunks of memory are maintained using a `boundary tag' method as
197  originally described by Knuth.  (See the paper by Paul Wilson
198  ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
199  techniques.)  Sizes of free chunks are stored both in the front of
200  each chunk and at the end.  This makes consolidating fragmented
201  chunks into bigger chunks fast.  The head fields also hold bits
202  representing whether chunks are free or in use.
203
204  Here are some pictures to make it clearer.  They are "exploded" to
205  show that the state of a chunk can be thought of as extending from
206  the high 31 bits of the head field of its header through the
207  prev_foot and PINUSE_BIT bit of the following chunk header.
208
209  A chunk that's in use looks like:
210
211   chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
212           | Size of previous chunk (if P = 1)                             |
213           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
214         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
215         | Size of this chunk                                         1| +-+
216   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
217         |                                                               |
218         +-                                                             -+
219         |                                                               |
220         +-                                                             -+
221         |                                                               :
222         +-      size - sizeof(size_t) available payload bytes          -+
223         :                                                               |
224 chunk-> +-                                                             -+
225         |                                                               |
226         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
227       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
228       | Size of next chunk (may or may not be in use)               | +-+
229 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
230
231    And if it's free, it looks like this:
232
233   chunk-> +-                                                             -+
234           | User payload (must be in use, or we would have merged!)       |
235           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
236         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
237         | Size of this chunk                                         0| +-+
238   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
239         | Next pointer                                                  |
240         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
241         | Prev pointer                                                  |
242         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
243         |                                                               :
244         +-      size - sizeof(struct chunk) unused bytes               -+
245         :                                                               |
246 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
247         | Size of this chunk                                            |
248         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
249       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
250       | Size of next chunk (must be in use, or we would have merged)| +-+
251 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
252       |                                                               :
253       +- User payload                                                -+
254       :                                                               |
255       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
256                                                                     |0|
257                                                                     +-+
258  Note that since we always merge adjacent free chunks, the chunks
259  adjacent to a free chunk must be in use.
260
261  Given a pointer to a chunk (which can be derived trivially from the
262  payload pointer) we can, in O(1) time, find out whether the adjacent
263  chunks are free, and if so, unlink them from the lists that they
264  are on and merge them with the current chunk.
265
266  Chunks always begin on even word boundaries, so the mem portion
267  (which is returned to the user) is also on an even word boundary, and
268  thus at least double-word aligned.
269
270  The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
271  chunk size (which is always a multiple of two words), is an in-use
272  bit for the *previous* chunk.  If that bit is *clear*, then the
273  word before the current chunk size contains the previous chunk
274  size, and can be used to find the front of the previous chunk.
275  The very first chunk allocated always has this bit set, preventing
276  access to non-existent (or non-owned) memory. If pinuse is set for
277  any given chunk, then you CANNOT determine the size of the
278  previous chunk, and might even get a memory addressing fault when
279  trying to do so.
280
281  The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
282  the chunk size redundantly records whether the current chunk is
283  inuse. This redundancy enables usage checks within free and realloc,
284  and reduces indirection when freeing and consolidating chunks.
285
286  Each freshly allocated chunk must have both cinuse and pinuse set.
287  That is, each allocated chunk borders either a previously allocated
288  and still in-use chunk, or the base of its memory arena. This is
289  ensured by making all allocations from the `lowest' part of any
290  found chunk.  Further, no free chunk physically borders another one,
291  so each free chunk is known to be preceded and followed by either
292  inuse chunks or the ends of memory.
293
294  Note that the `foot' of the current chunk is actually represented
295  as the prev_foot of the NEXT chunk. This makes it easier to
296  deal with alignments etc but can be very confusing when trying
297  to extend or adapt this code.
298
299  The exceptions to all this are
300
301     1. The special chunk `top' is the top-most available chunk (i.e.,
302        the one bordering the end of available memory). It is treated
303        specially.  Top is never included in any bin, is used only if
304        no other chunk is available, and is released back to the
305        system if it is very large (see M_TRIM_THRESHOLD).  In effect,
306        the top chunk is treated as larger (and thus less well
307        fitting) than any other available chunk.  The top chunk
308        doesn't update its trailing size field since there is no next
309        contiguous chunk that would have to index off it. However,
310        space is still allocated for it (TOP_FOOT_SIZE) to enable
311        separation or merging when space is extended.
312
313     3. Chunks allocated via mmap, which have the lowest-order bit
314        (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
315        PINUSE_BIT in their head fields.  Because they are allocated
316        one-by-one, each must carry its own prev_foot field, which is
317        also used to hold the offset this chunk has within its mmapped
318        region, which is needed to preserve alignment. Each mmapped
319        chunk is trailed by the first two fields of a fake next-chunk
320        for sake of usage checks.
321
322*/
323
324struct malloc_chunk {
325  size_t               prev_foot;  /* Size of previous chunk (if free).  */
326  size_t               head;       /* Size and inuse bits. */
327  struct malloc_chunk* fd;         /* double links -- used only if free. */
328  struct malloc_chunk* bk;
329};
330
331typedef struct malloc_chunk  mchunk;
332typedef struct malloc_chunk* mchunkptr;
333typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
334typedef unsigned int bindex_t;         /* Described below */
335typedef unsigned int binmap_t;         /* Described below */
336typedef unsigned int flag_t;           /* The type of various bit flag sets */
337
338
339/* ------------------- Chunks sizes and alignments ----------------------- */
340
341#define MCHUNK_SIZE         (sizeof(mchunk))
342
343#if FOOTERS
344#define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
345#else /* FOOTERS */
346#define CHUNK_OVERHEAD      (SIZE_T_SIZE)
347#endif /* FOOTERS */
348
349/* The smallest size we can malloc is an aligned minimal chunk */
350#define MIN_CHUNK_SIZE\
351  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
352
353/* conversion from malloc headers to user pointers, and back */
354#define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
355#define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
356/* chunk associated with aligned address A */
357#define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
358
359/* Bounds on request (not chunk) sizes. */
360#define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
361#define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
362
363/* pad request bytes into a usable size */
364#define pad_request(req) \
365   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
366
367/* pad request, checking for minimum (but not maximum) */
368#define request2size(req) \
369  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
370
371/* ------------------ Operations on head and foot fields ----------------- */
372
373/*
374  The head field of a chunk is or'ed with PINUSE_BIT when previous
375  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
376  use. If the chunk was obtained with mmap, the prev_foot field has
377  IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
378  mmapped region to the base of the chunk.
379*/
380
381#define PINUSE_BIT          (SIZE_T_ONE)
382#define CINUSE_BIT          (SIZE_T_TWO)
383#define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
384
385/* Head value for fenceposts */
386#define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
387
388/* extraction of fields from head words */
389#define cinuse(p)           ((p)->head & CINUSE_BIT)
390#define pinuse(p)           ((p)->head & PINUSE_BIT)
391#define chunksize(p)        ((p)->head & ~(INUSE_BITS))
392
393#define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
394#define clear_cinuse(p)     ((p)->head &= ~CINUSE_BIT)
395
396/* Treat space at ptr +/- offset as a chunk */
397#define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
398#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
399
400/* Ptr to next or previous physical malloc_chunk. */
401#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
402#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
403
404/* extract next chunk's pinuse bit */
405#define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
406
407/* Get/set size at footer */
408#define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
409#define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
410
411/* Set size, pinuse bit, and foot */
412#define set_size_and_pinuse_of_free_chunk(p, s)\
413  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
414
415/* Set size, pinuse bit, foot, and clear next pinuse */
416#define set_free_with_pinuse(p, s, n)\
417  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
418
419/* Get the internal overhead associated with chunk p */
420#define overhead_for(p) CHUNK_OVERHEAD
421
422/* Return true if malloced space is not necessarily cleared */
423#define calloc_must_clear(p) (1)
424
425
426/* ---------------------- Overlaid data structures ----------------------- */
427
428/*
429  When chunks are not in use, they are treated as nodes of either
430  lists or trees.
431
432  "Small"  chunks are stored in circular doubly-linked lists, and look
433  like this:
434
435    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
436            |             Size of previous chunk                            |
437            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
438    `head:' |             Size of chunk, in bytes                         |P|
439      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
440            |             Forward pointer to next chunk in list             |
441            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
442            |             Back pointer to previous chunk in list            |
443            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
444            |             Unused space (may be 0 bytes long)                .
445            .                                                               .
446            .                                                               |
447nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
448    `foot:' |             Size of chunk, in bytes                           |
449            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
450
451  Larger chunks are kept in a form of bitwise digital trees (aka
452  tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
453  free chunks greater than 256 bytes, their size doesn't impose any
454  constraints on user chunk sizes.  Each node looks like:
455
456    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
457            |             Size of previous chunk                            |
458            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
459    `head:' |             Size of chunk, in bytes                         |P|
460      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
461            |             Forward pointer to next chunk of same size        |
462            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
463            |             Back pointer to previous chunk of same size       |
464            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
465            |             Pointer to left child (child[0])                  |
466            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
467            |             Pointer to right child (child[1])                 |
468            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
469            |             Pointer to parent                                 |
470            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
471            |             bin index of this chunk                           |
472            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
473            |             Unused space                                      .
474            .                                                               |
475nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
476    `foot:' |             Size of chunk, in bytes                           |
477            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
478
479  Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
480  of the same size are arranged in a circularly-linked list, with only
481  the oldest chunk (the next to be used, in our FIFO ordering)
482  actually in the tree.  (Tree members are distinguished by a non-null
483  parent pointer.)  If a chunk with the same size as an existing node
484  is inserted, it is linked off the existing node using pointers that
485  work in the same way as fd/bk pointers of small chunks.
486
487  Each tree contains a power of 2 sized range of chunk sizes (the
488  smallest is 0x100 <= x < 0x180), which is divided in half at each
489  tree level, with the chunks in the smaller half of the range (0x100
490  <= x < 0x140 for the top nose) in the left subtree and the larger
491  half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
492  done by inspecting individual bits.
493
494  Using these rules, each node's left subtree contains all smaller
495  sizes than its right subtree.  However, the node at the root of each
496  subtree has no particular ordering relationship to either.  (The
497  dividing line between the subtree sizes is based on trie relation.)
498  If we remove the last chunk of a given size from the interior of the
499  tree, we need to replace it with a leaf node.  The tree ordering
500  rules permit a node to be replaced by any leaf below it.
501
502  The smallest chunk in a tree (a common operation in a best-fit
503  allocator) can be found by walking a path to the leftmost leaf in
504  the tree.  Unlike a usual binary tree, where we follow left child
505  pointers until we reach a null, here we follow the right child
506  pointer any time the left one is null, until we reach a leaf with
507  both child pointers null. The smallest chunk in the tree will be
508  somewhere along that path.
509
510  The worst case number of steps to add, find, or remove a node is
511  bounded by the number of bits differentiating chunks within
512  bins. Under current bin calculations, this ranges from 6 up to 21
513  (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
514  is of course much better.
515*/
516
517struct malloc_tree_chunk {
518  /* The first four fields must be compatible with malloc_chunk */
519  size_t                    prev_foot;
520  size_t                    head;
521  struct malloc_tree_chunk* fd;
522  struct malloc_tree_chunk* bk;
523
524  struct malloc_tree_chunk* child[2];
525  struct malloc_tree_chunk* parent;
526  bindex_t                  index;
527};
528
529typedef struct malloc_tree_chunk  tchunk;
530typedef struct malloc_tree_chunk* tchunkptr;
531typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
532
533/* A little helper macro for trees */
534#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
535
536/* ----------------------------- Segments -------------------------------- */
537
538/*
539  Each malloc space may include non-contiguous segments, held in a
540  list headed by an embedded malloc_segment record representing the
541  top-most space. Segments also include flags holding properties of
542  the space. Large chunks that are directly allocated by mmap are not
543  included in this list. They are instead independently created and
544  destroyed without otherwise keeping track of them.
545
546  Segment management mainly comes into play for spaces allocated by
547  MMAP.  Any call to MMAP might or might not return memory that is
548  adjacent to an existing segment.  MORECORE normally contiguously
549  extends the current space, so this space is almost always adjacent,
550  which is simpler and faster to deal with. (This is why MORECORE is
551  used preferentially to MMAP when both are available -- see
552  sys_alloc.)  When allocating using MMAP, we don't use any of the
553  hinting mechanisms (inconsistently) supported in various
554  implementations of unix mmap, or distinguish reserving from
555  committing memory. Instead, we just ask for space, and exploit
556  contiguity when we get it.  It is probably possible to do
557  better than this on some systems, but no general scheme seems
558  to be significantly better.
559
560  Management entails a simpler variant of the consolidation scheme
561  used for chunks to reduce fragmentation -- new adjacent memory is
562  normally prepended or appended to an existing segment. However,
563  there are limitations compared to chunk consolidation that mostly
564  reflect the fact that segment processing is relatively infrequent
565  (occurring only when getting memory from system) and that we
566  don't expect to have huge numbers of segments:
567
568  * Segments are not indexed, so traversal requires linear scans.  (It
569    would be possible to index these, but is not worth the extra
570    overhead and complexity for most programs on most platforms.)
571  * New segments are only appended to old ones when holding top-most
572    memory; if they cannot be prepended to others, they are held in
573    different segments.
574
575  Except for the top-most segment of an mstate, each segment record
576  is kept at the tail of its segment. Segments are added by pushing
577  segment records onto the list headed by &mstate.seg for the
578  containing mstate.
579
580  Segment flags control allocation/merge/deallocation policies:
581  * If EXTERN_BIT set, then we did not allocate this segment,
582    and so should not try to deallocate or merge with others.
583    (This currently holds only for the initial segment passed
584    into create_mspace_with_base.)
585  * If IS_MMAPPED_BIT set, the segment may be merged with
586    other surrounding mmapped segments and trimmed/de-allocated
587    using munmap.
588  * If neither bit is set, then the segment was obtained using
589    MORECORE so can be merged with surrounding MORECORE'd segments
590    and deallocated/trimmed using MORECORE with negative arguments.
591*/
592
593struct malloc_segment {
594  char*        base;             /* base address */
595  size_t       size;             /* allocated size */
596  struct malloc_segment* next;   /* ptr to next segment */
597};
598
599typedef struct malloc_segment  msegment;
600typedef struct malloc_segment* msegmentptr;
601
602/* ---------------------------- malloc_state ----------------------------- */
603
604/*
605   A malloc_state holds all of the bookkeeping for a space.
606   The main fields are:
607
608  Top
609    The topmost chunk of the currently active segment. Its size is
610    cached in topsize.  The actual size of topmost space is
611    topsize+TOP_FOOT_SIZE, which includes space reserved for adding
612    fenceposts and segment records if necessary when getting more
613    space from the system.  The size at which to autotrim top is
614    cached from mparams in trim_check, except that it is disabled if
615    an autotrim fails.
616
617  Designated victim (dv)
618    This is the preferred chunk for servicing small requests that
619    don't have exact fits.  It is normally the chunk split off most
620    recently to service another small request.  Its size is cached in
621    dvsize. The link fields of this chunk are not maintained since it
622    is not kept in a bin.
623
624  SmallBins
625    An array of bin headers for free chunks.  These bins hold chunks
626    with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
627    chunks of all the same size, spaced 8 bytes apart.  To simplify
628    use in double-linked lists, each bin header acts as a malloc_chunk
629    pointing to the real first node, if it exists (else pointing to
630    itself).  This avoids special-casing for headers.  But to avoid
631    waste, we allocate only the fd/bk pointers of bins, and then use
632    repositioning tricks to treat these as the fields of a chunk.
633
634  TreeBins
635    Treebins are pointers to the roots of trees holding a range of
636    sizes. There are 2 equally spaced treebins for each power of two
637    from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
638    larger.
639
640  Bin maps
641    There is one bit map for small bins ("smallmap") and one for
642    treebins ("treemap).  Each bin sets its bit when non-empty, and
643    clears the bit when empty.  Bit operations are then used to avoid
644    bin-by-bin searching -- nearly all "search" is done without ever
645    looking at bins that won't be selected.  The bit maps
646    conservatively use 32 bits per map word, even if on 64bit system.
647    For a good description of some of the bit-based techniques used
648    here, see Henry S. Warren Jr's book "Hacker's Delight" (and
649    supplement at http://hackersdelight.org/). Many of these are
650    intended to reduce the branchiness of paths through malloc etc, as
651    well as to reduce the number of memory locations read or written.
652
653  Segments
654    A list of segments headed by an embedded malloc_segment record
655    representing the initial space.
656
657  Address check support
658    The least_addr field is the least address ever obtained from
659    MORECORE or MMAP. Attempted frees and reallocs of any address less
660    than this are trapped (unless INSECURE is defined).
661
662  Magic tag
663    A cross-check field that should always hold same value as mparams.magic.
664
665  Flags
666    Bits recording whether to use MMAP, locks, or contiguous MORECORE
667
668  Statistics
669    Each space keeps track of current and maximum system memory
670    obtained via MORECORE or MMAP.
671
672  Locking
673    If USE_LOCKS is defined, the "mutex" lock is acquired and released
674    around every public call using this mspace.
675*/
676
677/* Bin types, widths and sizes */
678#define NSMALLBINS        (32U)
679#define NTREEBINS         (32U)
680#define SMALLBIN_SHIFT    (3U)
681#define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
682#define TREEBIN_SHIFT     (8U)
683#define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
684#define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
685#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
686
687struct malloc_state {
688  binmap_t   smallmap;
689  binmap_t   treemap;
690  size_t     dvsize;
691  size_t     topsize;
692  char*      least_addr;
693  mchunkptr  dv;
694  mchunkptr  top;
695  size_t     magic;
696  mchunkptr  smallbins[(NSMALLBINS+1)*2];
697  tbinptr    treebins[NTREEBINS];
698  size_t     footprint;
699  size_t     max_footprint;
700  flag_t     mflags;
701  void      *user_data;
702#if USE_LOCKS
703  MLOCK_T    mutex;     /* locate lock among fields that rarely change */
704#endif /* USE_LOCKS */
705  msegment   seg;
706};
707
708typedef struct malloc_state*    mstate;
709
710/* ------------- Global malloc_state and malloc_params ------------------- */
711
712/*
713  malloc_params holds global properties, including those that can be
714  dynamically set using mallopt. There is a single instance, mparams,
715  initialized in init_mparams.
716*/
717
718struct malloc_params {
719  size_t magic;
720  size_t page_size;
721  size_t granularity;
722  flag_t default_mflags;
723};
724
725static struct malloc_params mparams;
726
727/* The global malloc_state used for all non-"mspace" calls */
728//static struct malloc_state _gm_;
729//#define gm                 (&_gm_)
730//#define is_global(M)       ((M) == &_gm_)
731#define is_initialized(M)  ((M)->top != 0)
732
733/* -------------------------- system alloc setup ------------------------- */
734
735/* Operations on mflags */
736
737#define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
738#define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
739#define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
740
741#define set_lock(M,L)\
742 ((M)->mflags = (L)?\
743  ((M)->mflags | USE_LOCK_BIT) :\
744  ((M)->mflags & ~USE_LOCK_BIT))
745
746/* page-align a size */
747#define page_align(S)\
748 (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
749
750/* granularity-align a size */
751#define granularity_align(S)\
752  (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
753
754#define is_page_aligned(S)\
755   (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
756#define is_granularity_aligned(S)\
757   (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
758
759/*  True if segment S holds address A */
760#define segment_holds(S, A)\
761  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
762
763#if DEBUG
764/* Return segment holding given address */
765static msegmentptr segment_holding(mstate m, char* addr) {
766  msegmentptr sp = &m->seg;
767  for (;;) {
768    if (addr >= sp->base && addr < sp->base + sp->size)
769      return sp;
770    if ((sp = sp->next) == 0)
771      return 0;
772  }
773}
774
775/* Return true if segment contains a segment link */
776static int has_segment_link(mstate m, msegmentptr ss) {
777  msegmentptr sp = &m->seg;
778  for (;;) {
779    if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
780      return 1;
781    if ((sp = sp->next) == 0)
782      return 0;
783  }
784}
785#endif
786
787
788/*
789  TOP_FOOT_SIZE is padding at the end of a segment, including space
790  that may be needed to place segment records and fenceposts when new
791  noncontiguous segments are added.
792*/
793#define TOP_FOOT_SIZE\
794  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
795
796
797/* -------------------------------  Hooks -------------------------------- */
798
799/*
800  PREACTION should be defined to return 0 on success, and nonzero on
801  failure. If you are not using locking, you can redefine these to do
802  anything you like.
803*/
804
805#if USE_LOCKS
806
807/* Ensure locks are initialized */
808#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
809
810#define PREACTION(M)  ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
811#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
812#else /* USE_LOCKS */
813
814#ifndef PREACTION
815#define PREACTION(M) (0)
816#endif  /* PREACTION */
817
818#ifndef POSTACTION
819#define POSTACTION(M)
820#endif  /* POSTACTION */
821
822#endif /* USE_LOCKS */
823
824/*
825  CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
826  USAGE_ERROR_ACTION is triggered on detected bad frees and
827  reallocs. The argument p is an address that might have triggered the
828  fault. It is ignored by the two predefined actions, but might be
829  useful in custom actions that try to help diagnose errors.
830*/
831
832#if PROCEED_ON_ERROR
833
834/* A count of the number of corruption errors causing resets */
835int malloc_corruption_error_count;
836
837/* default corruption action */
838static void reset_on_error(mstate m);
839
840#define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
841#define USAGE_ERROR_ACTION(m, p)
842
843#else /* PROCEED_ON_ERROR */
844
845#ifndef CORRUPTION_ERROR_ACTION
846#define CORRUPTION_ERROR_ACTION(m) ABORT(m->user_data)
847#endif /* CORRUPTION_ERROR_ACTION */
848
849#ifndef USAGE_ERROR_ACTION
850#define USAGE_ERROR_ACTION(m,p) ABORT(m->user_data)
851#endif /* USAGE_ERROR_ACTION */
852
853#endif /* PROCEED_ON_ERROR */
854
855/* -------------------------- Debugging setup ---------------------------- */
856
857#if ! DEBUG
858
859#define check_free_chunk(M,P)
860#define check_inuse_chunk(M,P)
861#define check_malloced_chunk(M,P,N)
862#define check_malloc_state(M)
863#define check_top_chunk(M,P)
864
865#else /* DEBUG */
866#define check_free_chunk(M,P)       do_check_free_chunk(M,P)
867#define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
868#define check_top_chunk(M,P)        do_check_top_chunk(M,P)
869#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
870#define check_malloc_state(M)       do_check_malloc_state(M)
871
872static void   do_check_any_chunk(mstate m, mchunkptr p);
873static void   do_check_top_chunk(mstate m, mchunkptr p);
874static void   do_check_inuse_chunk(mstate m, mchunkptr p);
875static void   do_check_free_chunk(mstate m, mchunkptr p);
876static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
877static void   do_check_tree(mstate m, tchunkptr t);
878static void   do_check_treebin(mstate m, bindex_t i);
879static void   do_check_smallbin(mstate m, bindex_t i);
880static void   do_check_malloc_state(mstate m);
881static int    bin_find(mstate m, mchunkptr x);
882static size_t traverse_and_check(mstate m);
883#endif /* DEBUG */
884
885/* ---------------------------- Indexing Bins ---------------------------- */
886
887#define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
888#define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
889#define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
890#define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
891
892/* addressing by index. See above about smallbin repositioning */
893#define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
894#define treebin_at(M,i)     (&((M)->treebins[i]))
895
896/* assign tree index for size S to variable I */
897#if defined(__GNUC__) && defined(i386)
898#define compute_tree_index(S, I)\
899{\
900  size_t X = S >> TREEBIN_SHIFT;\
901  if (X == 0)\
902    I = 0;\
903  else if (X > 0xFFFF)\
904    I = NTREEBINS-1;\
905  else {\
906    unsigned int K;\
907    __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X));\
908    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
909  }\
910}
911#else /* GNUC */
912#define compute_tree_index(S, I)\
913{\
914  size_t X = S >> TREEBIN_SHIFT;\
915  if (X == 0)\
916    I = 0;\
917  else if (X > 0xFFFF)\
918    I = NTREEBINS-1;\
919  else {\
920    unsigned int Y = (unsigned int)X;\
921    unsigned int N = ((Y - 0x100) >> 16) & 8;\
922    unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
923    N += K;\
924    N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
925    K = 14 - N + ((Y <<= K) >> 15);\
926    I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
927  }\
928}
929#endif /* GNUC */
930
931/* Bit representing maximum resolved size in a treebin at i */
932#define bit_for_tree_index(i) \
933   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
934
935/* Shift placing maximum resolved bit in a treebin at i as sign bit */
936#define leftshift_for_tree_index(i) \
937   ((i == NTREEBINS-1)? 0 : \
938    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
939
940/* The size of the smallest chunk held in bin with index i */
941#define minsize_for_tree_index(i) \
942   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
943   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
944
945/* ------------------------ Operations on bin maps ----------------------- */
946
947/* bit corresponding to given index */
948#define idx2bit(i)              ((binmap_t)(1) << (i))
949
950/* Mark/Clear bits with given index */
951#define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
952#define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
953#define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
954
955#define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
956#define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
957#define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
958
959/* index corresponding to given bit */
960
961#if defined(__GNUC__) && defined(i386)
962#define compute_bit2idx(X, I)\
963{\
964  unsigned int J;\
965  __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
966  I = (bindex_t)J;\
967}
968
969#else /* GNUC */
970#if  USE_BUILTIN_FFS
971#define compute_bit2idx(X, I) I = ffs(X)-1
972
973#else /* USE_BUILTIN_FFS */
974#define compute_bit2idx(X, I)\
975{\
976  unsigned int Y = X - 1;\
977  unsigned int K = Y >> (16-4) & 16;\
978  unsigned int N = K;        Y >>= K;\
979  N += K = Y >> (8-3) &  8;  Y >>= K;\
980  N += K = Y >> (4-2) &  4;  Y >>= K;\
981  N += K = Y >> (2-1) &  2;  Y >>= K;\
982  N += K = Y >> (1-0) &  1;  Y >>= K;\
983  I = (bindex_t)(N + Y);\
984}
985#endif /* USE_BUILTIN_FFS */
986#endif /* GNUC */
987
988/* isolate the least set bit of a bitmap */
989#define least_bit(x)         ((x) & -(x))
990
991/* mask with all bits to left of least bit of x on */
992#define left_bits(x)         ((x<<1) | -(x<<1))
993
994/* mask with all bits to left of or equal to least bit of x on */
995#define same_or_left_bits(x) ((x) | -(x))
996
997
998/* ----------------------- Runtime Check Support ------------------------- */
999
1000/*
1001  For security, the main invariant is that malloc/free/etc never
1002  writes to a static address other than malloc_state, unless static
1003  malloc_state itself has been corrupted, which cannot occur via
1004  malloc (because of these checks). In essence this means that we
1005  believe all pointers, sizes, maps etc held in malloc_state, but
1006  check all of those linked or offsetted from other embedded data
1007  structures.  These checks are interspersed with main code in a way
1008  that tends to minimize their run-time cost.
1009
1010  When FOOTERS is defined, in addition to range checking, we also
1011  verify footer fields of inuse chunks, which can be used guarantee
1012  that the mstate controlling malloc/free is intact.  This is a
1013  streamlined version of the approach described by William Robertson
1014  et al in "Run-time Detection of Heap-based Overflows" LISA'03
1015  http://www.usenix.org/events/lisa03/tech/robertson.html The footer
1016  of an inuse chunk holds the xor of its mstate and a random seed,
1017  that is checked upon calls to free() and realloc().  This is
1018  (probablistically) unguessable from outside the program, but can be
1019  computed by any code successfully malloc'ing any chunk, so does not
1020  itself provide protection against code that has already broken
1021  security through some other means.  Unlike Robertson et al, we
1022  always dynamically check addresses of all offset chunks (previous,
1023  next, etc). This turns out to be cheaper than relying on hashes.
1024*/
1025
1026#if !INSECURE
1027/* Check if address a is at least as high as any from MORECORE or MMAP */
1028#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
1029/* Check if address of next chunk n is higher than base chunk p */
1030#define ok_next(p, n)    ((char*)(p) < (char*)(n))
1031/* Check if p has its cinuse bit on */
1032#define ok_cinuse(p)     cinuse(p)
1033/* Check if p has its pinuse bit on */
1034#define ok_pinuse(p)     pinuse(p)
1035
1036#else /* !INSECURE */
1037#define ok_address(M, a) (1)
1038#define ok_next(b, n)    (1)
1039#define ok_cinuse(p)     (1)
1040#define ok_pinuse(p)     (1)
1041#endif /* !INSECURE */
1042
1043#if (FOOTERS && !INSECURE)
1044/* Check if (alleged) mstate m has expected magic field */
1045#define ok_magic(M)      ((M)->magic == mparams.magic)
1046#else  /* (FOOTERS && !INSECURE) */
1047#define ok_magic(M)      (1)
1048#endif /* (FOOTERS && !INSECURE) */
1049
1050
1051/* In gcc, use __builtin_expect to minimize impact of checks */
1052#if !INSECURE
1053#if defined(__GNUC__) && __GNUC__ >= 3
1054#define RTCHECK(e)  __builtin_expect(e, 1)
1055#else /* GNUC */
1056#define RTCHECK(e)  (e)
1057#endif /* GNUC */
1058#else /* !INSECURE */
1059#define RTCHECK(e)  (1)
1060#endif /* !INSECURE */
1061
1062/* macros to set up inuse chunks with or without footers */
1063
1064#if !FOOTERS
1065
1066#define mark_inuse_foot(M,p,s)
1067
1068/* Set cinuse bit and pinuse bit of next chunk */
1069#define set_inuse(M,p,s)\
1070  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1071  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1072
1073/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
1074#define set_inuse_and_pinuse(M,p,s)\
1075  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1076  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1077
1078/* Set size, cinuse and pinuse bit of this chunk */
1079#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1080  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
1081
1082#else /* FOOTERS */
1083
1084/* Set foot of inuse chunk to be xor of mstate and seed */
1085#define mark_inuse_foot(M,p,s)\
1086  (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
1087
1088#define get_mstate_for(p)\
1089  ((mstate)(((mchunkptr)((char*)(p) +\
1090    (chunksize(p))))->prev_foot ^ mparams.magic))
1091
1092#define set_inuse(M,p,s)\
1093  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1094  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
1095  mark_inuse_foot(M,p,s))
1096
1097#define set_inuse_and_pinuse(M,p,s)\
1098  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1099  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
1100 mark_inuse_foot(M,p,s))
1101
1102#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1103  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1104  mark_inuse_foot(M, p, s))
1105
1106#endif /* !FOOTERS */
1107
1108/* ---------------------------- setting mparams -------------------------- */
1109
1110/* Initialize mparams */
1111static int init_mparams(void) {
1112  if (mparams.page_size == 0) {
1113    size_t s;
1114
1115    mparams.default_mflags = USE_LOCK_BIT;
1116
1117#if (FOOTERS && !INSECURE)
1118    {
1119#if USE_DEV_RANDOM
1120      int fd;
1121      unsigned char buf[sizeof(size_t)];
1122      /* Try to use /dev/urandom, else fall back on using time */
1123      if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
1124          read(fd, buf, sizeof(buf)) == sizeof(buf)) {
1125        s = *((size_t *) buf);
1126        close(fd);
1127      }
1128      else
1129#endif /* USE_DEV_RANDOM */
1130        s = (size_t)(time(0) ^ (size_t)0x55555555U);
1131
1132      s |= (size_t)8U;    /* ensure nonzero */
1133      s &= ~(size_t)7U;   /* improve chances of fault for bad values */
1134
1135    }
1136#else /* (FOOTERS && !INSECURE) */
1137    s = (size_t)0x58585858U;
1138#endif /* (FOOTERS && !INSECURE) */
1139    ACQUIRE_MAGIC_INIT_LOCK();
1140    if (mparams.magic == 0) {
1141      mparams.magic = s;
1142      /* Set up lock for main malloc area */
1143      //INITIAL_LOCK(&gm->mutex);
1144      //gm->mflags = mparams.default_mflags;
1145    }
1146    RELEASE_MAGIC_INIT_LOCK();
1147
1148
1149    mparams.page_size = malloc_getpagesize;
1150    mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
1151                           DEFAULT_GRANULARITY : mparams.page_size);
1152
1153    /* Sanity-check configuration:
1154       size_t must be unsigned and as wide as pointer type.
1155       ints must be at least 4 bytes.
1156       alignment must be at least 8.
1157       Alignment, min chunk size, and page size must all be powers of 2.
1158    */
1159    if ((sizeof(size_t) != sizeof(char*)) ||
1160        (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
1161        (sizeof(int) < 4)  ||
1162        (MALLOC_ALIGNMENT < (size_t)8U) ||
1163        ((MALLOC_ALIGNMENT    & (MALLOC_ALIGNMENT-SIZE_T_ONE))    != 0) ||
1164        ((MCHUNK_SIZE         & (MCHUNK_SIZE-SIZE_T_ONE))         != 0) ||
1165        ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
1166        ((mparams.page_size   & (mparams.page_size-SIZE_T_ONE))   != 0))
1167      ABORT(NULL);
1168  }
1169  return 0;
1170}
1171
1172/* support for mallopt */
1173static int change_mparam(int param_number, int value) {
1174  size_t val = (size_t)value;
1175  init_mparams();
1176  switch(param_number) {
1177  case M_GRANULARITY:
1178    if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
1179      mparams.granularity = val;
1180      return 1;
1181    }
1182    else
1183      return 0;
1184  default:
1185    return 0;
1186  }
1187}
1188
1189#if DEBUG
1190/* ------------------------- Debugging Support --------------------------- */
1191
1192/* Check properties of any chunk, whether free, inuse, mmapped etc  */
1193static void do_check_any_chunk(mstate m, mchunkptr p) {
1194  assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1195  assert(m->user_data, ok_address(m, p));
1196}
1197
1198/* Check properties of top chunk */
1199static void do_check_top_chunk(mstate m, mchunkptr p) {
1200  msegmentptr sp = segment_holding(m, (char*)p);
1201  size_t  sz = chunksize(p);
1202  assert(m->user_data, sp != 0);
1203  assert(m->user_data, (is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1204  assert(m->user_data, ok_address(m, p));
1205  assert(m->user_data, sz == m->topsize);
1206  assert(m->user_data, sz > 0);
1207  assert(m->user_data, sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
1208  assert(m->user_data, pinuse(p));
1209  assert(m->user_data, !next_pinuse(p));
1210}
1211
1212/* Check properties of inuse chunks */
1213static void do_check_inuse_chunk(mstate m, mchunkptr p) {
1214  do_check_any_chunk(m, p);
1215  assert(m->user_data, cinuse(p));
1216  assert(m->user_data, next_pinuse(p));
1217  /* If not pinuse, previous chunk has OK offset */
1218  assert(m->user_data, pinuse(p) || next_chunk(prev_chunk(p)) == p);
1219}
1220
1221/* Check properties of free chunks */
1222static void do_check_free_chunk(mstate m, mchunkptr p) {
1223  size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
1224  mchunkptr next = chunk_plus_offset(p, sz);
1225  do_check_any_chunk(m, p);
1226  assert(m->user_data, !cinuse(p));
1227  assert(m->user_data, !next_pinuse(p));
1228  if (p != m->dv && p != m->top) {
1229    if (sz >= MIN_CHUNK_SIZE) {
1230      assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0);
1231      assert(m->user_data, is_aligned(chunk2mem(p)));
1232      assert(m->user_data, next->prev_foot == sz);
1233      assert(m->user_data, pinuse(p));
1234      assert(m->user_data, next == m->top || cinuse(next));
1235      assert(m->user_data, p->fd->bk == p);
1236      assert(m->user_data, p->bk->fd == p);
1237    }
1238    else  /* markers are always of size SIZE_T_SIZE */
1239      assert(m->user_data, sz == SIZE_T_SIZE);
1240  }
1241}
1242
1243/* Check properties of malloced chunks at the point they are malloced */
1244static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
1245  if (mem != 0) {
1246    mchunkptr p = mem2chunk(mem);
1247    size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
1248    do_check_inuse_chunk(m, p);
1249    assert(m->user_data, (sz & CHUNK_ALIGN_MASK) == 0);
1250    assert(m->user_data, sz >= MIN_CHUNK_SIZE);
1251    assert(m->user_data, sz >= s);
1252    /* size is less than MIN_CHUNK_SIZE more than request */
1253    assert(m->user_data, sz < (s + MIN_CHUNK_SIZE));
1254  }
1255}
1256
1257/* Check a tree and its subtrees.  */
1258static void do_check_tree(mstate m, tchunkptr t) {
1259  tchunkptr head = 0;
1260  tchunkptr u = t;
1261  bindex_t tindex = t->index;
1262  size_t tsize = chunksize(t);
1263  bindex_t idx;
1264  compute_tree_index(tsize, idx);
1265  assert(m->user_data, tindex == idx);
1266  assert(m->user_data, tsize >= MIN_LARGE_SIZE);
1267  assert(m->user_data, tsize >= minsize_for_tree_index(idx));
1268  assert(m->user_data, (idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
1269
1270  do { /* traverse through chain of same-sized nodes */
1271    do_check_any_chunk(m, ((mchunkptr)u));
1272    assert(m->user_data, u->index == tindex);
1273    assert(m->user_data, chunksize(u) == tsize);
1274    assert(m->user_data, !cinuse(u));
1275    assert(m->user_data, !next_pinuse(u));
1276    assert(m->user_data, u->fd->bk == u);
1277    assert(m->user_data, u->bk->fd == u);
1278    if (u->parent == 0) {
1279      assert(m->user_data, u->child[0] == 0);
1280      assert(m->user_data, u->child[1] == 0);
1281    }
1282    else {
1283      assert(m->user_data, head == 0); /* only one node on chain has parent */
1284      head = u;
1285      assert(m->user_data, u->parent != u);
1286      assert(m->user_data, u->parent->child[0] == u ||
1287             u->parent->child[1] == u ||
1288             *((tbinptr*)(u->parent)) == u);
1289      if (u->child[0] != 0) {
1290        assert(m->user_data, u->child[0]->parent == u);
1291        assert(m->user_data, u->child[0] != u);
1292        do_check_tree(m, u->child[0]);
1293      }
1294      if (u->child[1] != 0) {
1295        assert(m->user_data, u->child[1]->parent == u);
1296        assert(m->user_data, u->child[1] != u);
1297        do_check_tree(m, u->child[1]);
1298      }
1299      if (u->child[0] != 0 && u->child[1] != 0) {
1300        assert(m->user_data, chunksize(u->child[0]) < chunksize(u->child[1]));
1301      }
1302    }
1303    u = u->fd;
1304  } while (u != t);
1305  assert(m->user_data, head != 0);
1306}
1307
1308/*  Check all the chunks in a treebin.  */
1309static void do_check_treebin(mstate m, bindex_t i) {
1310  tbinptr* tb = treebin_at(m, i);
1311  tchunkptr t = *tb;
1312  int empty = (m->treemap & (1U << i)) == 0;
1313  if (t == 0)
1314    assert(m->user_data, empty);
1315  if (!empty)
1316    do_check_tree(m, t);
1317}
1318
1319/*  Check all the chunks in a smallbin.  */
1320static void do_check_smallbin(mstate m, bindex_t i) {
1321  sbinptr b = smallbin_at(m, i);
1322  mchunkptr p = b->bk;
1323  unsigned int empty = (m->smallmap & (1U << i)) == 0;
1324  if (p == b)
1325    assert(m->user_data, empty);
1326  if (!empty) {
1327    for (; p != b; p = p->bk) {
1328      size_t size = chunksize(p);
1329      mchunkptr q;
1330      /* each chunk claims to be free */
1331      do_check_free_chunk(m, p);
1332      /* chunk belongs in bin */
1333      assert(m->user_data, small_index(size) == i);
1334      assert(m->user_data, p->bk == b || chunksize(p->bk) == chunksize(p));
1335      /* chunk is followed by an inuse chunk */
1336      q = next_chunk(p);
1337      if (q->head != FENCEPOST_HEAD)
1338        do_check_inuse_chunk(m, q);
1339    }
1340  }
1341}
1342
1343/* Find x in a bin. Used in other check functions. */
1344static int bin_find(mstate m, mchunkptr x) {
1345  size_t size = chunksize(x);
1346  if (is_small(size)) {
1347    bindex_t sidx = small_index(size);
1348    sbinptr b = smallbin_at(m, sidx);
1349    if (smallmap_is_marked(m, sidx)) {
1350      mchunkptr p = b;
1351      do {
1352        if (p == x)
1353          return 1;
1354      } while ((p = p->fd) != b);
1355    }
1356  }
1357  else {
1358    bindex_t tidx;
1359    compute_tree_index(size, tidx);
1360    if (treemap_is_marked(m, tidx)) {
1361      tchunkptr t = *treebin_at(m, tidx);
1362      size_t sizebits = size << leftshift_for_tree_index(tidx);
1363      while (t != 0 && chunksize(t) != size) {
1364        t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
1365        sizebits <<= 1;
1366      }
1367      if (t != 0) {
1368        tchunkptr u = t;
1369        do {
1370          if (u == (tchunkptr)x)
1371            return 1;
1372        } while ((u = u->fd) != t);
1373      }
1374    }
1375  }
1376  return 0;
1377}
1378
1379/* Traverse each chunk and check it; return total */
1380static size_t traverse_and_check(mstate m) {
1381  size_t sum = 0;
1382  if (is_initialized(m)) {
1383    msegmentptr s = &m->seg;
1384    sum += m->topsize + TOP_FOOT_SIZE;
1385    while (s != 0) {
1386      mchunkptr q = align_as_chunk(s->base);
1387      mchunkptr lastq = 0;
1388      assert(m->user_data, pinuse(q));
1389      while (segment_holds(s, q) &&
1390             q != m->top && q->head != FENCEPOST_HEAD) {
1391        sum += chunksize(q);
1392        if (cinuse(q)) {
1393          assert(m->user_data, !bin_find(m, q));
1394          do_check_inuse_chunk(m, q);
1395        }
1396        else {
1397          assert(m->user_data, q == m->dv || bin_find(m, q));
1398          assert(m->user_data, lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
1399          do_check_free_chunk(m, q);
1400        }
1401        lastq = q;
1402        q = next_chunk(q);
1403      }
1404      s = s->next;
1405    }
1406  }
1407  return sum;
1408}
1409
1410/* Check all properties of malloc_state. */
1411static void do_check_malloc_state(mstate m) {
1412  bindex_t i;
1413  size_t total;
1414  /* check bins */
1415  for (i = 0; i < NSMALLBINS; ++i)
1416    do_check_smallbin(m, i);
1417  for (i = 0; i < NTREEBINS; ++i)
1418    do_check_treebin(m, i);
1419
1420  if (m->dvsize != 0) { /* check dv chunk */
1421    do_check_any_chunk(m, m->dv);
1422    assert(m->user_data, m->dvsize == chunksize(m->dv));
1423    assert(m->user_data, m->dvsize >= MIN_CHUNK_SIZE);
1424    assert(m->user_data, bin_find(m, m->dv) == 0);
1425  }
1426
1427  if (m->top != 0) {   /* check top chunk */
1428    do_check_top_chunk(m, m->top);
1429    assert(m->user_data, m->topsize == chunksize(m->top));
1430    assert(m->user_data, m->topsize > 0);
1431    assert(m->user_data, bin_find(m, m->top) == 0);
1432  }
1433
1434  total = traverse_and_check(m);
1435  assert(m->user_data, total <= m->footprint);
1436  assert(m->user_data, m->footprint <= m->max_footprint);
1437}
1438#endif /* DEBUG */
1439
1440/* ----------------------------- statistics ------------------------------ */
1441
1442#if !NO_MALLINFO
1443static struct mallinfo internal_mallinfo(mstate m) {
1444  struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1445  if (!PREACTION(m)) {
1446    check_malloc_state(m);
1447    if (is_initialized(m)) {
1448      size_t nfree = SIZE_T_ONE; /* top always free */
1449      size_t mfree = m->topsize + TOP_FOOT_SIZE;
1450      size_t sum = mfree;
1451      msegmentptr s = &m->seg;
1452      while (s != 0) {
1453        mchunkptr q = align_as_chunk(s->base);
1454        while (segment_holds(s, q) &&
1455               q != m->top && q->head != FENCEPOST_HEAD) {
1456          size_t sz = chunksize(q);
1457          sum += sz;
1458          if (!cinuse(q)) {
1459            mfree += sz;
1460            ++nfree;
1461          }
1462          q = next_chunk(q);
1463        }
1464        s = s->next;
1465      }
1466
1467      nm.arena    = sum;
1468      nm.ordblks  = nfree;
1469      nm.hblkhd   = m->footprint - sum;
1470      nm.usmblks  = m->max_footprint;
1471      nm.uordblks = m->footprint - mfree;
1472      nm.fordblks = mfree;
1473      nm.keepcost = m->topsize;
1474    }
1475
1476    POSTACTION(m);
1477  }
1478  return nm;
1479}
1480#endif /* !NO_MALLINFO */
1481
1482static void internal_malloc_stats(mstate m, size_t *ret_maxfp, size_t *ret_fp,
1483                                  size_t *ret_used) {
1484  if (!PREACTION(m)) {
1485    size_t maxfp = 0;
1486    size_t fp = 0;
1487    size_t used = 0;
1488    check_malloc_state(m);
1489    if (is_initialized(m)) {
1490      msegmentptr s = &m->seg;
1491      maxfp = m->max_footprint;
1492      fp = m->footprint;
1493      used = fp - (m->topsize + TOP_FOOT_SIZE);
1494
1495      while (s != 0) {
1496        mchunkptr q = align_as_chunk(s->base);
1497        while (segment_holds(s, q) &&
1498               q != m->top && q->head != FENCEPOST_HEAD) {
1499          if (!cinuse(q))
1500            used -= chunksize(q);
1501          q = next_chunk(q);
1502        }
1503        s = s->next;
1504      }
1505    }
1506
1507    if (ret_maxfp || ret_fp || ret_used) {
1508        if (ret_maxfp) {
1509            *ret_maxfp = maxfp;
1510        }
1511        if (ret_fp) {
1512            *ret_fp = fp;
1513        }
1514        if (ret_used) {
1515            *ret_used = used;
1516        }
1517    } else {
1518        PRINT((m->user_data, "max system bytes = %10lu\n", (unsigned long)(maxfp)));
1519        PRINT((m->user_data, "system bytes     = %10lu\n", (unsigned long)(fp)));
1520        PRINT((m->user_data, "in use bytes     = %10lu\n", (unsigned long)(used)));
1521    }
1522
1523    POSTACTION(m);
1524  }
1525}
1526
1527/* ----------------------- Operations on smallbins ----------------------- */
1528
1529/*
1530  Various forms of linking and unlinking are defined as macros.  Even
1531  the ones for trees, which are very long but have very short typical
1532  paths.  This is ugly but reduces reliance on inlining support of
1533  compilers.
1534*/
1535
1536/* Link a free chunk into a smallbin  */
1537#define insert_small_chunk(M, P, S) {\
1538  bindex_t I  = small_index(S);\
1539  mchunkptr B = smallbin_at(M, I);\
1540  mchunkptr F = B;\
1541  assert((M)->user_data, S >= MIN_CHUNK_SIZE);\
1542  if (!smallmap_is_marked(M, I))\
1543    mark_smallmap(M, I);\
1544  else if (RTCHECK(ok_address(M, B->fd)))\
1545    F = B->fd;\
1546  else {\
1547    CORRUPTION_ERROR_ACTION(M);\
1548  }\
1549  B->fd = P;\
1550  F->bk = P;\
1551  P->fd = F;\
1552  P->bk = B;\
1553}
1554
1555/* Unlink a chunk from a smallbin  */
1556#define unlink_small_chunk(M, P, S) {\
1557  mchunkptr F = P->fd;\
1558  mchunkptr B = P->bk;\
1559  bindex_t I = small_index(S);\
1560  assert((M)->user_data, P != B);\
1561  assert((M)->user_data, P != F);\
1562  assert((M)->user_data, chunksize(P) == small_index2size(I));\
1563  if (F == B)\
1564    clear_smallmap(M, I);\
1565  else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
1566                   (B == smallbin_at(M,I) || ok_address(M, B)))) {\
1567    F->bk = B;\
1568    B->fd = F;\
1569  }\
1570  else {\
1571    CORRUPTION_ERROR_ACTION(M);\
1572  }\
1573}
1574
1575/* Unlink the first chunk from a smallbin */
1576#define unlink_first_small_chunk(M, B, P, I) {\
1577  mchunkptr F = P->fd;\
1578  assert((M)->user_data, P != B);\
1579  assert((M)->user_data, P != F);\
1580  assert((M)->user_data, chunksize(P) == small_index2size(I));\
1581  if (B == F)\
1582    clear_smallmap(M, I);\
1583  else if (RTCHECK(ok_address(M, F))) {\
1584    B->fd = F;\
1585    F->bk = B;\
1586  }\
1587  else {\
1588    CORRUPTION_ERROR_ACTION(M);\
1589  }\
1590}
1591
1592/* Replace dv node, binning the old one */
1593/* Used only when dvsize known to be small */
1594#define replace_dv(M, P, S) {\
1595  size_t DVS = M->dvsize;\
1596  if (DVS != 0) {\
1597    mchunkptr DV = M->dv;\
1598    assert((M)->user_data, is_small(DVS));\
1599    insert_small_chunk(M, DV, DVS);\
1600  }\
1601  M->dvsize = S;\
1602  M->dv = P;\
1603}
1604
1605
1606/* ------------------------- Operations on trees ------------------------- */
1607
1608/* Insert chunk into tree */
1609#define insert_large_chunk(M, X, S) {\
1610  tbinptr* H;\
1611  bindex_t I;\
1612  compute_tree_index(S, I);\
1613  H = treebin_at(M, I);\
1614  X->index = I;\
1615  X->child[0] = X->child[1] = 0;\
1616  if (!treemap_is_marked(M, I)) {\
1617    mark_treemap(M, I);\
1618    *H = X;\
1619    X->parent = (tchunkptr)H;\
1620    X->fd = X->bk = X;\
1621  }\
1622  else {\
1623    tchunkptr T = *H;\
1624    size_t K = S << leftshift_for_tree_index(I);\
1625    for (;;) {\
1626      if (chunksize(T) != S) {\
1627        tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
1628        K <<= 1;\
1629        if (*C != 0)\
1630          T = *C;\
1631        else if (RTCHECK(ok_address(M, C))) {\
1632          *C = X;\
1633          X->parent = T;\
1634          X->fd = X->bk = X;\
1635          break;\
1636        }\
1637        else {\
1638          CORRUPTION_ERROR_ACTION(M);\
1639          break;\
1640        }\
1641      }\
1642      else {\
1643        tchunkptr F = T->fd;\
1644        if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
1645          T->fd = F->bk = X;\
1646          X->fd = F;\
1647          X->bk = T;\
1648          X->parent = 0;\
1649          break;\
1650        }\
1651        else {\
1652          CORRUPTION_ERROR_ACTION(M);\
1653          break;\
1654        }\
1655      }\
1656    }\
1657  }\
1658}
1659
1660/*
1661  Unlink steps:
1662
1663  1. If x is a chained node, unlink it from its same-sized fd/bk links
1664     and choose its bk node as its replacement.
1665  2. If x was the last node of its size, but not a leaf node, it must
1666     be replaced with a leaf node (not merely one with an open left or
1667     right), to make sure that lefts and rights of descendents
1668     correspond properly to bit masks.  We use the rightmost descendent
1669     of x.  We could use any other leaf, but this is easy to locate and
1670     tends to counteract removal of leftmosts elsewhere, and so keeps
1671     paths shorter than minimally guaranteed.  This doesn't loop much
1672     because on average a node in a tree is near the bottom.
1673  3. If x is the base of a chain (i.e., has parent links) relink
1674     x's parent and children to x's replacement (or null if none).
1675*/
1676
1677#define unlink_large_chunk(M, X) {\
1678  tchunkptr XP = X->parent;\
1679  tchunkptr R;\
1680  if (X->bk != X) {\
1681    tchunkptr F = X->fd;\
1682    R = X->bk;\
1683    if (RTCHECK(ok_address(M, F))) {\
1684      F->bk = R;\
1685      R->fd = F;\
1686    }\
1687    else {\
1688      CORRUPTION_ERROR_ACTION(M);\
1689    }\
1690  }\
1691  else {\
1692    tchunkptr* RP;\
1693    if (((R = *(RP = &(X->child[1]))) != 0) ||\
1694        ((R = *(RP = &(X->child[0]))) != 0)) {\
1695      tchunkptr* CP;\
1696      while ((*(CP = &(R->child[1])) != 0) ||\
1697             (*(CP = &(R->child[0])) != 0)) {\
1698        R = *(RP = CP);\
1699      }\
1700      if (RTCHECK(ok_address(M, RP)))\
1701        *RP = 0;\
1702      else {\
1703        CORRUPTION_ERROR_ACTION(M);\
1704      }\
1705    }\
1706  }\
1707  if (XP != 0) {\
1708    tbinptr* H = treebin_at(M, X->index);\
1709    if (X == *H) {\
1710      if ((*H = R) == 0) \
1711        clear_treemap(M, X->index);\
1712    }\
1713    else if (RTCHECK(ok_address(M, XP))) {\
1714      if (XP->child[0] == X) \
1715        XP->child[0] = R;\
1716      else \
1717        XP->child[1] = R;\
1718    }\
1719    else\
1720      CORRUPTION_ERROR_ACTION(M);\
1721    if (R != 0) {\
1722      if (RTCHECK(ok_address(M, R))) {\
1723        tchunkptr C0, C1;\
1724        R->parent = XP;\
1725        if ((C0 = X->child[0]) != 0) {\
1726          if (RTCHECK(ok_address(M, C0))) {\
1727            R->child[0] = C0;\
1728            C0->parent = R;\
1729          }\
1730          else\
1731            CORRUPTION_ERROR_ACTION(M);\
1732        }\
1733        if ((C1 = X->child[1]) != 0) {\
1734          if (RTCHECK(ok_address(M, C1))) {\
1735            R->child[1] = C1;\
1736            C1->parent = R;\
1737          }\
1738          else\
1739            CORRUPTION_ERROR_ACTION(M);\
1740        }\
1741      }\
1742      else\
1743        CORRUPTION_ERROR_ACTION(M);\
1744    }\
1745  }\
1746}
1747
1748/* Relays to large vs small bin operations */
1749
1750#define insert_chunk(M, P, S)\
1751  if (is_small(S)) insert_small_chunk(M, P, S)\
1752  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
1753
1754#define unlink_chunk(M, P, S)\
1755  if (is_small(S)) unlink_small_chunk(M, P, S)\
1756  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
1757
1758
1759/* Relays to internal calls to malloc/free from realloc, memalign etc */
1760
1761#define internal_malloc(m, b) mspace_malloc(m, b)
1762#define internal_free(m, mem) mspace_free(m,mem);
1763
1764
1765/* -------------------------- mspace management -------------------------- */
1766
1767/* Initialize top chunk and its size */
1768static void init_top(mstate m, mchunkptr p, size_t psize) {
1769  /* Ensure alignment */
1770  size_t offset = align_offset(chunk2mem(p));
1771  p = (mchunkptr)((char*)p + offset);
1772  psize -= offset;
1773
1774  m->top = p;
1775  m->topsize = psize;
1776  p->head = psize | PINUSE_BIT;
1777  /* set size of fake trailing chunk holding overhead space only once */
1778  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
1779}
1780
1781/* Initialize bins for a new mstate that is otherwise zeroed out */
1782static void init_bins(mstate m) {
1783  /* Establish circular links for smallbins */
1784  bindex_t i;
1785  for (i = 0; i < NSMALLBINS; ++i) {
1786    sbinptr bin = smallbin_at(m,i);
1787    bin->fd = bin->bk = bin;
1788  }
1789}
1790
1791#if PROCEED_ON_ERROR
1792
1793/* default corruption action */
1794static void reset_on_error(mstate m) {
1795  int i;
1796  ++malloc_corruption_error_count;
1797  /* Reinitialize fields to forget about all memory */
1798  m->smallbins = m->treebins = 0;
1799  m->dvsize = m->topsize = 0;
1800  m->seg.base = 0;
1801  m->seg.size = 0;
1802  m->seg.next = 0;
1803  m->top = m->dv = 0;
1804  for (i = 0; i < NTREEBINS; ++i)
1805    *treebin_at(m, i) = 0;
1806  init_bins(m);
1807}
1808#endif /* PROCEED_ON_ERROR */
1809
1810#if 0
1811/* Allocate chunk and prepend remainder with chunk in successor base. */
1812static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
1813                           size_t nb) {
1814  mchunkptr p = align_as_chunk(newbase);
1815  mchunkptr oldfirst = align_as_chunk(oldbase);
1816  size_t psize = (char*)oldfirst - (char*)p;
1817  mchunkptr q = chunk_plus_offset(p, nb);
1818  size_t qsize = psize - nb;
1819  set_size_and_pinuse_of_inuse_chunk(m, p, nb);
1820
1821  assert(m->user_data, (char*)oldfirst > (char*)q);
1822  assert(m->user_data, pinuse(oldfirst));
1823  assert(m->user_data, qsize >= MIN_CHUNK_SIZE);
1824
1825  /* consolidate remainder with first chunk of old base */
1826  if (oldfirst == m->top) {
1827    size_t tsize = m->topsize += qsize;
1828    m->top = q;
1829    q->head = tsize | PINUSE_BIT;
1830    check_top_chunk(m, q);
1831  }
1832  else if (oldfirst == m->dv) {
1833    size_t dsize = m->dvsize += qsize;
1834    m->dv = q;
1835    set_size_and_pinuse_of_free_chunk(q, dsize);
1836  }
1837  else {
1838    if (!cinuse(oldfirst)) {
1839      size_t nsize = chunksize(oldfirst);
1840      unlink_chunk(m, oldfirst, nsize);
1841      oldfirst = chunk_plus_offset(oldfirst, nsize);
1842      qsize += nsize;
1843    }
1844    set_free_with_pinuse(q, qsize, oldfirst);
1845    insert_chunk(m, q, qsize);
1846    check_free_chunk(m, q);
1847  }
1848
1849  check_malloced_chunk(m, chunk2mem(p), nb);
1850  return chunk2mem(p);
1851}
1852#endif
1853
1854/* -------------------------- System allocation -------------------------- */
1855
1856/* Get memory from system using MORECORE or MMAP */
1857static void* sys_alloc(mstate m, size_t nb) {
1858  MALLOC_FAILURE_ACTION;
1859  return 0;
1860}
1861
1862/* ---------------------------- malloc support --------------------------- */
1863
1864/* allocate a large request from the best fitting chunk in a treebin */
1865static void* tmalloc_large(mstate m, size_t nb) {
1866  tchunkptr v = 0;
1867  size_t rsize = -nb; /* Unsigned negation */
1868  tchunkptr t;
1869  bindex_t idx;
1870  compute_tree_index(nb, idx);
1871
1872  if ((t = *treebin_at(m, idx)) != 0) {
1873    /* Traverse tree for this bin looking for node with size == nb */
1874    size_t sizebits = nb << leftshift_for_tree_index(idx);
1875    tchunkptr rst = 0;  /* The deepest untaken right subtree */
1876    for (;;) {
1877      tchunkptr rt;
1878      size_t trem = chunksize(t) - nb;
1879      if (trem < rsize) {
1880        v = t;
1881        if ((rsize = trem) == 0)
1882          break;
1883      }
1884      rt = t->child[1];
1885      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
1886      if (rt != 0 && rt != t)
1887        rst = rt;
1888      if (t == 0) {
1889        t = rst; /* set t to least subtree holding sizes > nb */
1890        break;
1891      }
1892      sizebits <<= 1;
1893    }
1894  }
1895
1896  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
1897    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
1898    if (leftbits != 0) {
1899      bindex_t i;
1900      binmap_t leastbit = least_bit(leftbits);
1901      compute_bit2idx(leastbit, i);
1902      t = *treebin_at(m, i);
1903    }
1904  }
1905
1906  while (t != 0) { /* find smallest of tree or subtree */
1907    size_t trem = chunksize(t) - nb;
1908    if (trem < rsize) {
1909      rsize = trem;
1910      v = t;
1911    }
1912    t = leftmost_child(t);
1913  }
1914
1915  /*  If dv is a better fit, return 0 so malloc will use it */
1916  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
1917    if (RTCHECK(ok_address(m, v))) { /* split */
1918      mchunkptr r = chunk_plus_offset(v, nb);
1919      assert(m->user_data, chunksize(v) == rsize + nb);
1920      if (RTCHECK(ok_next(v, r))) {
1921        unlink_large_chunk(m, v);
1922        if (rsize < MIN_CHUNK_SIZE)
1923          set_inuse_and_pinuse(m, v, (rsize + nb));
1924        else {
1925          set_size_and_pinuse_of_inuse_chunk(m, v, nb);
1926          set_size_and_pinuse_of_free_chunk(r, rsize);
1927          insert_chunk(m, r, rsize);
1928        }
1929        return chunk2mem(v);
1930      }
1931    }
1932    CORRUPTION_ERROR_ACTION(m);
1933  }
1934  return 0;
1935}
1936
1937/* allocate a small request from the best fitting chunk in a treebin */
1938static void* tmalloc_small(mstate m, size_t nb) {
1939  tchunkptr t, v;
1940  size_t rsize;
1941  bindex_t i;
1942  binmap_t leastbit = least_bit(m->treemap);
1943  compute_bit2idx(leastbit, i);
1944
1945  v = t = *treebin_at(m, i);
1946  rsize = chunksize(t) - nb;
1947
1948  while ((t = leftmost_child(t)) != 0) {
1949    size_t trem = chunksize(t) - nb;
1950    if (trem < rsize) {
1951      rsize = trem;
1952      v = t;
1953    }
1954  }
1955
1956  if (RTCHECK(ok_address(m, v))) {
1957    mchunkptr r = chunk_plus_offset(v, nb);
1958    assert(m->user_data, chunksize(v) == rsize + nb);
1959    if (RTCHECK(ok_next(v, r))) {
1960      unlink_large_chunk(m, v);
1961      if (rsize < MIN_CHUNK_SIZE)
1962        set_inuse_and_pinuse(m, v, (rsize + nb));
1963      else {
1964        set_size_and_pinuse_of_inuse_chunk(m, v, nb);
1965        set_size_and_pinuse_of_free_chunk(r, rsize);
1966        replace_dv(m, r, rsize);
1967      }
1968      return chunk2mem(v);
1969    }
1970  }
1971
1972  CORRUPTION_ERROR_ACTION(m);
1973  return 0;
1974}
1975
1976/* --------------------------- realloc support --------------------------- */
1977
1978static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
1979  if (bytes >= MAX_REQUEST) {
1980    MALLOC_FAILURE_ACTION;
1981    return 0;
1982  }
1983  if (!PREACTION(m)) {
1984    mchunkptr oldp = mem2chunk(oldmem);
1985    size_t oldsize = chunksize(oldp);
1986    mchunkptr next = chunk_plus_offset(oldp, oldsize);
1987    mchunkptr newp = 0;
1988    void* extra = 0;
1989
1990    /* Try to either shrink or extend into top. Else malloc-copy-free */
1991
1992    if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
1993                ok_next(oldp, next) && ok_pinuse(next))) {
1994      size_t nb = request2size(bytes);
1995      if (oldsize >= nb) { /* already big enough */
1996        size_t rsize = oldsize - nb;
1997        newp = oldp;
1998        if (rsize >= MIN_CHUNK_SIZE) {
1999          mchunkptr remainder = chunk_plus_offset(newp, nb);
2000          set_inuse(m, newp, nb);
2001          set_inuse(m, remainder, rsize);
2002          extra = chunk2mem(remainder);
2003        }
2004      }
2005      else if (next == m->top && oldsize + m->topsize > nb) {
2006        /* Expand into top */
2007        size_t newsize = oldsize + m->topsize;
2008        size_t newtopsize = newsize - nb;
2009        mchunkptr newtop = chunk_plus_offset(oldp, nb);
2010        set_inuse(m, oldp, nb);
2011        newtop->head = newtopsize |PINUSE_BIT;
2012        m->top = newtop;
2013        m->topsize = newtopsize;
2014        newp = oldp;
2015      }
2016    }
2017    else {
2018      USAGE_ERROR_ACTION(m, oldmem);
2019      POSTACTION(m);
2020      return 0;
2021    }
2022
2023    POSTACTION(m);
2024
2025    if (newp != 0) {
2026      if (extra != 0) {
2027        internal_free(m, extra);
2028      }
2029      check_inuse_chunk(m, newp);
2030      return chunk2mem(newp);
2031    }
2032    else {
2033      void* newmem = internal_malloc(m, bytes);
2034      if (newmem != 0) {
2035        size_t oc = oldsize - overhead_for(oldp);
2036        MEMCPY(newmem, oldmem, (oc < bytes)? oc : bytes);
2037        internal_free(m, oldmem);
2038      }
2039      return newmem;
2040    }
2041  }
2042  return 0;
2043}
2044
2045/* --------------------------- memalign support -------------------------- */
2046
2047static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
2048  if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
2049    return internal_malloc(m, bytes);
2050  if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
2051    alignment = MIN_CHUNK_SIZE;
2052  if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
2053    size_t a = MALLOC_ALIGNMENT << 1;
2054    while (a < alignment) a <<= 1;
2055    alignment = a;
2056  }
2057
2058  if (bytes >= MAX_REQUEST - alignment) {
2059    if (m != 0)  { /* Test isn't needed but avoids compiler warning */
2060      MALLOC_FAILURE_ACTION;
2061    }
2062  }
2063  else {
2064    size_t nb = request2size(bytes);
2065    size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
2066    char* mem = (char*)internal_malloc(m, req);
2067    if (mem != 0) {
2068      void* leader = 0;
2069      void* trailer = 0;
2070      mchunkptr p = mem2chunk(mem);
2071
2072      if (PREACTION(m)) return 0;
2073      if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
2074        /*
2075          Find an aligned spot inside chunk.  Since we need to give
2076          back leading space in a chunk of at least MIN_CHUNK_SIZE, if
2077          the first calculation places us at a spot with less than
2078          MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
2079          We've allocated enough total room so that this is always
2080          possible.
2081        */
2082        char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
2083                                                       alignment -
2084                                                       SIZE_T_ONE)) &
2085                                             -alignment));
2086        char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
2087          br : br+alignment;
2088        mchunkptr newp = (mchunkptr)pos;
2089        size_t leadsize = pos - (char*)(p);
2090        size_t newsize = chunksize(p) - leadsize;
2091
2092        /* Otherwise, give back leader, use the rest */
2093        set_inuse(m, newp, newsize);
2094        set_inuse(m, p, leadsize);
2095        leader = chunk2mem(p);
2096
2097        p = newp;
2098      }
2099
2100      assert(m->user_data, chunksize(p) >= nb);
2101      assert(m->user_data, (((size_t)(chunk2mem(p))) % alignment) == 0);
2102      check_inuse_chunk(m, p);
2103      POSTACTION(m);
2104      if (leader != 0) {
2105        internal_free(m, leader);
2106      }
2107      if (trailer != 0) {
2108        internal_free(m, trailer);
2109      }
2110      return chunk2mem(p);
2111    }
2112  }
2113  return 0;
2114}
2115
2116/* ----------------------------- user mspaces ---------------------------- */
2117
2118static mstate init_user_mstate(char* tbase, size_t tsize, void *user_data) {
2119  size_t msize = pad_request(sizeof(struct malloc_state));
2120  mchunkptr mn;
2121  mchunkptr msp = align_as_chunk(tbase);
2122  mstate m = (mstate)(chunk2mem(msp));
2123  MEMCLEAR(m, msize);
2124  INITIAL_LOCK(&m->mutex);
2125  msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
2126  m->seg.base = m->least_addr = tbase;
2127  m->seg.size = m->footprint = m->max_footprint = tsize;
2128  m->magic = mparams.magic;
2129  m->mflags = mparams.default_mflags;
2130  m->user_data = user_data;
2131  init_bins(m);
2132  mn = next_chunk(mem2chunk(m));
2133  init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
2134  check_top_chunk(m, m->top);
2135  return m;
2136}
2137
2138mspace create_mspace_with_base(void* base, size_t capacity, int locked, void *user_data) {
2139  mstate m = 0;
2140  size_t msize = pad_request(sizeof(struct malloc_state));
2141  init_mparams(); /* Ensure pagesize etc initialized */
2142
2143  if (capacity > msize + TOP_FOOT_SIZE &&
2144      capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
2145    m = init_user_mstate((char*)base, capacity, user_data);
2146    set_lock(m, locked);
2147  }
2148  return (mspace)m;
2149}
2150
2151/*
2152  mspace versions of routines are near-clones of the global
2153  versions. This is not so nice but better than the alternatives.
2154*/
2155
2156
2157void* mspace_malloc(mspace msp, size_t bytes) {
2158  mstate ms = (mstate)msp;
2159  if (!ok_magic(ms)) {
2160    USAGE_ERROR_ACTION(ms,ms);
2161    return 0;
2162  }
2163  if (!PREACTION(ms)) {
2164    void* mem;
2165    size_t nb;
2166    if (bytes <= MAX_SMALL_REQUEST) {
2167      bindex_t idx;
2168      binmap_t smallbits;
2169      nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
2170      idx = small_index(nb);
2171      smallbits = ms->smallmap >> idx;
2172
2173      if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
2174        mchunkptr b, p;
2175        idx += ~smallbits & 1;       /* Uses next bin if idx empty */
2176        b = smallbin_at(ms, idx);
2177        p = b->fd;
2178        assert(ms->user_data, chunksize(p) == small_index2size(idx));
2179        unlink_first_small_chunk(ms, b, p, idx);
2180        set_inuse_and_pinuse(ms, p, small_index2size(idx));
2181        mem = chunk2mem(p);
2182        check_malloced_chunk(ms, mem, nb);
2183        goto postaction;
2184      }
2185
2186      else if (nb > ms->dvsize) {
2187        if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
2188          mchunkptr b, p, r;
2189          size_t rsize;
2190          bindex_t i;
2191          binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
2192          binmap_t leastbit = least_bit(leftbits);
2193          compute_bit2idx(leastbit, i);
2194          b = smallbin_at(ms, i);
2195          p = b->fd;
2196          assert(ms->user_data, chunksize(p) == small_index2size(i));
2197          unlink_first_small_chunk(ms, b, p, i);
2198          rsize = small_index2size(i) - nb;
2199          /* Fit here cannot be remainderless if 4byte sizes */
2200          if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
2201            set_inuse_and_pinuse(ms, p, small_index2size(i));
2202          else {
2203            set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
2204            r = chunk_plus_offset(p, nb);
2205            set_size_and_pinuse_of_free_chunk(r, rsize);
2206            replace_dv(ms, r, rsize);
2207          }
2208          mem = chunk2mem(p);
2209          check_malloced_chunk(ms, mem, nb);
2210          goto postaction;
2211        }
2212
2213        else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
2214          check_malloced_chunk(ms, mem, nb);
2215          goto postaction;
2216        }
2217      }
2218    }
2219    else if (bytes >= MAX_REQUEST)
2220      nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
2221    else {
2222      nb = pad_request(bytes);
2223      if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
2224        check_malloced_chunk(ms, mem, nb);
2225        goto postaction;
2226      }
2227    }
2228
2229    if (nb <= ms->dvsize) {
2230      size_t rsize = ms->dvsize - nb;
2231      mchunkptr p = ms->dv;
2232      if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
2233        mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
2234        ms->dvsize = rsize;
2235        set_size_and_pinuse_of_free_chunk(r, rsize);
2236        set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
2237      }
2238      else { /* exhaust dv */
2239        size_t dvs = ms->dvsize;
2240        ms->dvsize = 0;
2241        ms->dv = 0;
2242        set_inuse_and_pinuse(ms, p, dvs);
2243      }
2244      mem = chunk2mem(p);
2245      check_malloced_chunk(ms, mem, nb);
2246      goto postaction;
2247    }
2248
2249    else if (nb < ms->topsize) { /* Split top */
2250      size_t rsize = ms->topsize -= nb;
2251      mchunkptr p = ms->top;
2252      mchunkptr r = ms->top = chunk_plus_offset(p, nb);
2253      r->head = rsize | PINUSE_BIT;
2254      set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
2255      mem = chunk2mem(p);
2256      check_top_chunk(ms, ms->top);
2257      check_malloced_chunk(ms, mem, nb);
2258      goto postaction;
2259    }
2260
2261    mem = sys_alloc(ms, nb);
2262
2263  postaction:
2264    POSTACTION(ms);
2265    return mem;
2266  }
2267
2268  return 0;
2269}
2270
2271void mspace_free(mspace msp, void* mem) {
2272  if (mem != 0) {
2273    mchunkptr p  = mem2chunk(mem);
2274#if FOOTERS
2275    mstate fm = get_mstate_for(p);
2276#else /* FOOTERS */
2277    mstate fm = (mstate)msp;
2278#endif /* FOOTERS */
2279    if (!ok_magic(fm)) {
2280      USAGE_ERROR_ACTION(fm, p);
2281      return;
2282    }
2283    if (!PREACTION(fm)) {
2284      check_inuse_chunk(fm, p);
2285      if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
2286        size_t psize = chunksize(p);
2287        mchunkptr next = chunk_plus_offset(p, psize);
2288        if (!pinuse(p)) {
2289          size_t prevsize = p->prev_foot;
2290
2291          mchunkptr prev = chunk_minus_offset(p, prevsize);
2292          psize += prevsize;
2293          p = prev;
2294          if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
2295            if (p != fm->dv) {
2296              unlink_chunk(fm, p, prevsize);
2297            }
2298            else if ((next->head & INUSE_BITS) == INUSE_BITS) {
2299              fm->dvsize = psize;
2300              set_free_with_pinuse(p, psize, next);
2301              goto postaction;
2302            }
2303          }
2304          else
2305            goto erroraction;
2306        }
2307
2308        if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
2309          if (!cinuse(next)) {  /* consolidate forward */
2310            if (next == fm->top) {
2311              size_t tsize = fm->topsize += psize;
2312              fm->top = p;
2313              p->head = tsize | PINUSE_BIT;
2314              if (p == fm->dv) {
2315                fm->dv = 0;
2316                fm->dvsize = 0;
2317              }
2318              goto postaction;
2319            }
2320            else if (next == fm->dv) {
2321              size_t dsize = fm->dvsize += psize;
2322              fm->dv = p;
2323              set_size_and_pinuse_of_free_chunk(p, dsize);
2324              goto postaction;
2325            }
2326            else {
2327              size_t nsize = chunksize(next);
2328              psize += nsize;
2329              unlink_chunk(fm, next, nsize);
2330              set_size_and_pinuse_of_free_chunk(p, psize);
2331              if (p == fm->dv) {
2332                fm->dvsize = psize;
2333                goto postaction;
2334              }
2335            }
2336          }
2337          else
2338            set_free_with_pinuse(p, psize, next);
2339          insert_chunk(fm, p, psize);
2340          check_free_chunk(fm, p);
2341          goto postaction;
2342        }
2343      }
2344    erroraction:
2345      USAGE_ERROR_ACTION(fm, p);
2346    postaction:
2347      POSTACTION(fm);
2348    }
2349  }
2350}
2351
2352void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
2353  void* mem;
2354  size_t req = 0;
2355  mstate ms = (mstate)msp;
2356  if (!ok_magic(ms)) {
2357    USAGE_ERROR_ACTION(ms,ms);
2358    return 0;
2359  }
2360  if (n_elements != 0) {
2361    req = n_elements * elem_size;
2362    if (((n_elements | elem_size) & ~(size_t)0xffff) &&
2363        (req / n_elements != elem_size))
2364      req = MAX_SIZE_T; /* force downstream failure on overflow */
2365  }
2366  mem = internal_malloc(ms, req);
2367  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
2368    MEMCLEAR(mem, req);
2369  return mem;
2370}
2371
2372void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
2373  if (oldmem == 0)
2374    return mspace_malloc(msp, bytes);
2375#ifdef REALLOC_ZERO_BYTES_FREES
2376  if (bytes == 0) {
2377    mspace_free(msp, oldmem);
2378    return 0;
2379  }
2380#endif /* REALLOC_ZERO_BYTES_FREES */
2381  else {
2382#if FOOTERS
2383    mchunkptr p  = mem2chunk(oldmem);
2384    mstate ms = get_mstate_for(p);
2385#else /* FOOTERS */
2386    mstate ms = (mstate)msp;
2387#endif /* FOOTERS */
2388    if (!ok_magic(ms)) {
2389      USAGE_ERROR_ACTION(ms,ms);
2390      return 0;
2391    }
2392    return internal_realloc(ms, oldmem, bytes);
2393  }
2394}
2395
2396void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
2397  mstate ms = (mstate)msp;
2398  if (!ok_magic(ms)) {
2399    USAGE_ERROR_ACTION(ms,ms);
2400    return 0;
2401  }
2402  return internal_memalign(ms, alignment, bytes);
2403}
2404
2405void mspace_malloc_stats_return(mspace msp, size_t *ret_maxfp, size_t *ret_fp,
2406                                size_t *ret_used)
2407{
2408
2409  mstate ms = (mstate)msp;
2410  if (ok_magic(ms)) {
2411    internal_malloc_stats(ms, ret_maxfp, ret_fp, ret_used);
2412  }
2413  else {
2414    USAGE_ERROR_ACTION(ms,ms);
2415  }
2416}
2417
2418
2419void mspace_malloc_stats(mspace msp) {
2420    mspace_malloc_stats_return(msp, NULL, NULL, NULL);
2421}
2422
2423size_t mspace_footprint(mspace msp) {
2424  size_t result;
2425  mstate ms = (mstate)msp;
2426  if (ok_magic(ms)) {
2427    result = ms->footprint;
2428  }
2429  USAGE_ERROR_ACTION(ms,ms);
2430  return result;
2431}
2432
2433
2434size_t mspace_max_footprint(mspace msp) {
2435  size_t result;
2436  mstate ms = (mstate)msp;
2437  if (ok_magic(ms)) {
2438    result = ms->max_footprint;
2439  }
2440  USAGE_ERROR_ACTION(ms,ms);
2441  return result;
2442}
2443
2444
2445#if !NO_MALLINFO
2446struct mallinfo mspace_mallinfo(mspace msp) {
2447  mstate ms = (mstate)msp;
2448  if (!ok_magic(ms)) {
2449    USAGE_ERROR_ACTION(ms,ms);
2450  }
2451  return internal_mallinfo(ms);
2452}
2453#endif /* NO_MALLINFO */
2454
2455int mspace_mallopt(int param_number, int value) {
2456  return change_mparam(param_number, value);
2457}
2458
2459