Home | History | Annotate | Line # | Download | only in libbacktrace
mmap.c revision 1.1.1.7
      1      1.1  mrg /* mmap.c -- Memory allocation with mmap.
      2  1.1.1.7  mrg    Copyright (C) 2012-2018 Free Software Foundation, Inc.
      3      1.1  mrg    Written by Ian Lance Taylor, Google.
      4      1.1  mrg 
      5      1.1  mrg Redistribution and use in source and binary forms, with or without
      6      1.1  mrg modification, are permitted provided that the following conditions are
      7      1.1  mrg met:
      8      1.1  mrg 
      9      1.1  mrg     (1) Redistributions of source code must retain the above copyright
     10  1.1.1.4  mrg     notice, this list of conditions and the following disclaimer.
     11      1.1  mrg 
     12      1.1  mrg     (2) Redistributions in binary form must reproduce the above copyright
     13      1.1  mrg     notice, this list of conditions and the following disclaimer in
     14      1.1  mrg     the documentation and/or other materials provided with the
     15  1.1.1.4  mrg     distribution.
     16  1.1.1.4  mrg 
     17      1.1  mrg     (3) The name of the author may not be used to
     18      1.1  mrg     endorse or promote products derived from this software without
     19      1.1  mrg     specific prior written permission.
     20      1.1  mrg 
     21      1.1  mrg THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22      1.1  mrg IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     23      1.1  mrg WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     24      1.1  mrg DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
     25      1.1  mrg INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     26      1.1  mrg (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     27      1.1  mrg SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     28      1.1  mrg HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     29      1.1  mrg STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
     30      1.1  mrg IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31      1.1  mrg POSSIBILITY OF SUCH DAMAGE.  */
     32      1.1  mrg 
     33      1.1  mrg #include "config.h"
     34      1.1  mrg 
     35      1.1  mrg #include <errno.h>
     36      1.1  mrg #include <string.h>
     37      1.1  mrg #include <stdlib.h>
     38      1.1  mrg #include <unistd.h>
     39      1.1  mrg #include <sys/types.h>
     40      1.1  mrg #include <sys/mman.h>
     41      1.1  mrg 
     42      1.1  mrg #include "backtrace.h"
     43      1.1  mrg #include "internal.h"
     44      1.1  mrg 
     45      1.1  mrg /* Memory allocation on systems that provide anonymous mmap.  This
     46      1.1  mrg    permits the backtrace functions to be invoked from a signal
     47      1.1  mrg    handler, assuming that mmap is async-signal safe.  */
     48      1.1  mrg 
     49      1.1  mrg #ifndef MAP_ANONYMOUS
     50      1.1  mrg #define MAP_ANONYMOUS MAP_ANON
     51      1.1  mrg #endif
     52      1.1  mrg 
     53  1.1.1.3  mrg #ifndef MAP_FAILED
     54  1.1.1.3  mrg #define MAP_FAILED ((void *)-1)
     55  1.1.1.3  mrg #endif
     56  1.1.1.3  mrg 
     57      1.1  mrg /* A list of free memory blocks.  */
     58      1.1  mrg 
     59      1.1  mrg struct backtrace_freelist_struct
     60      1.1  mrg {
     61      1.1  mrg   /* Next on list.  */
     62      1.1  mrg   struct backtrace_freelist_struct *next;
     63      1.1  mrg   /* Size of this block, including this structure.  */
     64      1.1  mrg   size_t size;
     65      1.1  mrg };
     66      1.1  mrg 
     67      1.1  mrg /* Free memory allocated by backtrace_alloc.  */
     68      1.1  mrg 
     69      1.1  mrg static void
     70      1.1  mrg backtrace_free_locked (struct backtrace_state *state, void *addr, size_t size)
     71      1.1  mrg {
     72  1.1.1.7  mrg   /* Just leak small blocks.  We don't have to be perfect.  Don't put
     73  1.1.1.7  mrg      more than 16 entries on the free list, to avoid wasting time
     74  1.1.1.7  mrg      searching when allocating a block.  If we have more than 16
     75  1.1.1.7  mrg      entries, leak the smallest entry.  */
     76  1.1.1.7  mrg 
     77      1.1  mrg   if (size >= sizeof (struct backtrace_freelist_struct))
     78      1.1  mrg     {
     79  1.1.1.7  mrg       size_t c;
     80  1.1.1.7  mrg       struct backtrace_freelist_struct **ppsmall;
     81  1.1.1.7  mrg       struct backtrace_freelist_struct **pp;
     82      1.1  mrg       struct backtrace_freelist_struct *p;
     83      1.1  mrg 
     84  1.1.1.7  mrg       c = 0;
     85  1.1.1.7  mrg       ppsmall = NULL;
     86  1.1.1.7  mrg       for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
     87  1.1.1.7  mrg 	{
     88  1.1.1.7  mrg 	  if (ppsmall == NULL || (*pp)->size < (*ppsmall)->size)
     89  1.1.1.7  mrg 	    ppsmall = pp;
     90  1.1.1.7  mrg 	  ++c;
     91  1.1.1.7  mrg 	}
     92  1.1.1.7  mrg       if (c >= 16)
     93  1.1.1.7  mrg 	{
     94  1.1.1.7  mrg 	  if (size <= (*ppsmall)->size)
     95  1.1.1.7  mrg 	    return;
     96  1.1.1.7  mrg 	  *ppsmall = (*ppsmall)->next;
     97  1.1.1.7  mrg 	}
     98  1.1.1.7  mrg 
     99      1.1  mrg       p = (struct backtrace_freelist_struct *) addr;
    100      1.1  mrg       p->next = state->freelist;
    101      1.1  mrg       p->size = size;
    102      1.1  mrg       state->freelist = p;
    103      1.1  mrg     }
    104      1.1  mrg }
    105      1.1  mrg 
    106  1.1.1.3  mrg /* Allocate memory like malloc.  If ERROR_CALLBACK is NULL, don't
    107  1.1.1.3  mrg    report an error.  */
    108      1.1  mrg 
    109      1.1  mrg void *
    110      1.1  mrg backtrace_alloc (struct backtrace_state *state,
    111      1.1  mrg 		 size_t size, backtrace_error_callback error_callback,
    112      1.1  mrg 		 void *data)
    113      1.1  mrg {
    114      1.1  mrg   void *ret;
    115      1.1  mrg   int locked;
    116      1.1  mrg   struct backtrace_freelist_struct **pp;
    117      1.1  mrg   size_t pagesize;
    118      1.1  mrg   size_t asksize;
    119      1.1  mrg   void *page;
    120      1.1  mrg 
    121      1.1  mrg   ret = NULL;
    122      1.1  mrg 
    123      1.1  mrg   /* If we can acquire the lock, then see if there is space on the
    124      1.1  mrg      free list.  If we can't acquire the lock, drop straight into
    125      1.1  mrg      using mmap.  __sync_lock_test_and_set returns the old state of
    126      1.1  mrg      the lock, so we have acquired it if it returns 0.  */
    127      1.1  mrg 
    128      1.1  mrg   if (!state->threaded)
    129      1.1  mrg     locked = 1;
    130      1.1  mrg   else
    131      1.1  mrg     locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
    132      1.1  mrg 
    133      1.1  mrg   if (locked)
    134      1.1  mrg     {
    135      1.1  mrg       for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
    136      1.1  mrg 	{
    137      1.1  mrg 	  if ((*pp)->size >= size)
    138      1.1  mrg 	    {
    139      1.1  mrg 	      struct backtrace_freelist_struct *p;
    140      1.1  mrg 
    141      1.1  mrg 	      p = *pp;
    142      1.1  mrg 	      *pp = p->next;
    143      1.1  mrg 
    144      1.1  mrg 	      /* Round for alignment; we assume that no type we care about
    145      1.1  mrg 		 is more than 8 bytes.  */
    146      1.1  mrg 	      size = (size + 7) & ~ (size_t) 7;
    147      1.1  mrg 	      if (size < p->size)
    148      1.1  mrg 		backtrace_free_locked (state, (char *) p + size,
    149      1.1  mrg 				       p->size - size);
    150      1.1  mrg 
    151      1.1  mrg 	      ret = (void *) p;
    152      1.1  mrg 
    153      1.1  mrg 	      break;
    154      1.1  mrg 	    }
    155      1.1  mrg 	}
    156      1.1  mrg 
    157      1.1  mrg       if (state->threaded)
    158      1.1  mrg 	__sync_lock_release (&state->lock_alloc);
    159      1.1  mrg     }
    160      1.1  mrg 
    161      1.1  mrg   if (ret == NULL)
    162      1.1  mrg     {
    163      1.1  mrg       /* Allocate a new page.  */
    164      1.1  mrg 
    165      1.1  mrg       pagesize = getpagesize ();
    166      1.1  mrg       asksize = (size + pagesize - 1) & ~ (pagesize - 1);
    167      1.1  mrg       page = mmap (NULL, asksize, PROT_READ | PROT_WRITE,
    168      1.1  mrg 		   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    169  1.1.1.3  mrg       if (page == MAP_FAILED)
    170  1.1.1.3  mrg 	{
    171  1.1.1.3  mrg 	  if (error_callback)
    172  1.1.1.3  mrg 	    error_callback (data, "mmap", errno);
    173  1.1.1.3  mrg 	}
    174      1.1  mrg       else
    175      1.1  mrg 	{
    176      1.1  mrg 	  size = (size + 7) & ~ (size_t) 7;
    177      1.1  mrg 	  if (size < asksize)
    178      1.1  mrg 	    backtrace_free (state, (char *) page + size, asksize - size,
    179      1.1  mrg 			    error_callback, data);
    180      1.1  mrg 
    181      1.1  mrg 	  ret = page;
    182      1.1  mrg 	}
    183      1.1  mrg     }
    184      1.1  mrg 
    185      1.1  mrg   return ret;
    186      1.1  mrg }
    187      1.1  mrg 
    188      1.1  mrg /* Free memory allocated by backtrace_alloc.  */
    189      1.1  mrg 
    190      1.1  mrg void
    191      1.1  mrg backtrace_free (struct backtrace_state *state, void *addr, size_t size,
    192      1.1  mrg 		backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
    193      1.1  mrg 		void *data ATTRIBUTE_UNUSED)
    194      1.1  mrg {
    195      1.1  mrg   int locked;
    196      1.1  mrg 
    197  1.1.1.2  mrg   /* If we are freeing a large aligned block, just release it back to
    198  1.1.1.2  mrg      the system.  This case arises when growing a vector for a large
    199  1.1.1.2  mrg      binary with lots of debug info.  Calling munmap here may cause us
    200  1.1.1.2  mrg      to call mmap again if there is also a large shared library; we
    201  1.1.1.2  mrg      just live with that.  */
    202  1.1.1.2  mrg   if (size >= 16 * 4096)
    203  1.1.1.2  mrg     {
    204  1.1.1.2  mrg       size_t pagesize;
    205  1.1.1.2  mrg 
    206  1.1.1.2  mrg       pagesize = getpagesize ();
    207  1.1.1.2  mrg       if (((uintptr_t) addr & (pagesize - 1)) == 0
    208  1.1.1.2  mrg 	  && (size & (pagesize - 1)) == 0)
    209  1.1.1.2  mrg 	{
    210  1.1.1.2  mrg 	  /* If munmap fails for some reason, just add the block to
    211  1.1.1.2  mrg 	     the freelist.  */
    212  1.1.1.2  mrg 	  if (munmap (addr, size) == 0)
    213  1.1.1.2  mrg 	    return;
    214  1.1.1.2  mrg 	}
    215  1.1.1.2  mrg     }
    216  1.1.1.2  mrg 
    217      1.1  mrg   /* If we can acquire the lock, add the new space to the free list.
    218      1.1  mrg      If we can't acquire the lock, just leak the memory.
    219      1.1  mrg      __sync_lock_test_and_set returns the old state of the lock, so we
    220      1.1  mrg      have acquired it if it returns 0.  */
    221      1.1  mrg 
    222      1.1  mrg   if (!state->threaded)
    223      1.1  mrg     locked = 1;
    224      1.1  mrg   else
    225      1.1  mrg     locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
    226      1.1  mrg 
    227      1.1  mrg   if (locked)
    228      1.1  mrg     {
    229      1.1  mrg       backtrace_free_locked (state, addr, size);
    230      1.1  mrg 
    231      1.1  mrg       if (state->threaded)
    232      1.1  mrg 	__sync_lock_release (&state->lock_alloc);
    233      1.1  mrg     }
    234      1.1  mrg }
    235      1.1  mrg 
    236      1.1  mrg /* Grow VEC by SIZE bytes.  */
    237      1.1  mrg 
    238      1.1  mrg void *
    239      1.1  mrg backtrace_vector_grow (struct backtrace_state *state,size_t size,
    240      1.1  mrg 		       backtrace_error_callback error_callback,
    241      1.1  mrg 		       void *data, struct backtrace_vector *vec)
    242      1.1  mrg {
    243      1.1  mrg   void *ret;
    244      1.1  mrg 
    245      1.1  mrg   if (size > vec->alc)
    246      1.1  mrg     {
    247      1.1  mrg       size_t pagesize;
    248      1.1  mrg       size_t alc;
    249      1.1  mrg       void *base;
    250      1.1  mrg 
    251      1.1  mrg       pagesize = getpagesize ();
    252      1.1  mrg       alc = vec->size + size;
    253      1.1  mrg       if (vec->size == 0)
    254      1.1  mrg 	alc = 16 * size;
    255      1.1  mrg       else if (alc < pagesize)
    256      1.1  mrg 	{
    257      1.1  mrg 	  alc *= 2;
    258      1.1  mrg 	  if (alc > pagesize)
    259      1.1  mrg 	    alc = pagesize;
    260      1.1  mrg 	}
    261      1.1  mrg       else
    262  1.1.1.2  mrg 	{
    263  1.1.1.2  mrg 	  alc *= 2;
    264  1.1.1.2  mrg 	  alc = (alc + pagesize - 1) & ~ (pagesize - 1);
    265  1.1.1.2  mrg 	}
    266      1.1  mrg       base = backtrace_alloc (state, alc, error_callback, data);
    267      1.1  mrg       if (base == NULL)
    268      1.1  mrg 	return NULL;
    269      1.1  mrg       if (vec->base != NULL)
    270      1.1  mrg 	{
    271      1.1  mrg 	  memcpy (base, vec->base, vec->size);
    272  1.1.1.2  mrg 	  backtrace_free (state, vec->base, vec->size + vec->alc,
    273  1.1.1.2  mrg 			  error_callback, data);
    274      1.1  mrg 	}
    275      1.1  mrg       vec->base = base;
    276      1.1  mrg       vec->alc = alc - vec->size;
    277      1.1  mrg     }
    278      1.1  mrg 
    279      1.1  mrg   ret = (char *) vec->base + vec->size;
    280      1.1  mrg   vec->size += size;
    281      1.1  mrg   vec->alc -= size;
    282      1.1  mrg   return ret;
    283      1.1  mrg }
    284      1.1  mrg 
    285      1.1  mrg /* Finish the current allocation on VEC.  */
    286      1.1  mrg 
    287      1.1  mrg void *
    288      1.1  mrg backtrace_vector_finish (
    289      1.1  mrg   struct backtrace_state *state ATTRIBUTE_UNUSED,
    290      1.1  mrg   struct backtrace_vector *vec,
    291      1.1  mrg   backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
    292      1.1  mrg   void *data ATTRIBUTE_UNUSED)
    293      1.1  mrg {
    294      1.1  mrg   void *ret;
    295      1.1  mrg 
    296      1.1  mrg   ret = vec->base;
    297      1.1  mrg   vec->base = (char *) vec->base + vec->size;
    298      1.1  mrg   vec->size = 0;
    299      1.1  mrg   return ret;
    300      1.1  mrg }
    301      1.1  mrg 
    302      1.1  mrg /* Release any extra space allocated for VEC.  */
    303      1.1  mrg 
    304      1.1  mrg int
    305      1.1  mrg backtrace_vector_release (struct backtrace_state *state,
    306      1.1  mrg 			  struct backtrace_vector *vec,
    307      1.1  mrg 			  backtrace_error_callback error_callback,
    308      1.1  mrg 			  void *data)
    309      1.1  mrg {
    310      1.1  mrg   size_t size;
    311      1.1  mrg   size_t alc;
    312      1.1  mrg   size_t aligned;
    313      1.1  mrg 
    314      1.1  mrg   /* Make sure that the block that we free is aligned on an 8-byte
    315      1.1  mrg      boundary.  */
    316      1.1  mrg   size = vec->size;
    317      1.1  mrg   alc = vec->alc;
    318      1.1  mrg   aligned = (size + 7) & ~ (size_t) 7;
    319      1.1  mrg   alc -= aligned - size;
    320      1.1  mrg 
    321      1.1  mrg   backtrace_free (state, (char *) vec->base + aligned, alc,
    322      1.1  mrg 		  error_callback, data);
    323      1.1  mrg   vec->alc = 0;
    324      1.1  mrg   return 1;
    325      1.1  mrg }
    326