Home | History | Annotate | Line # | Download | only in libbacktrace
      1       1.1  mrg /* mmap.c -- Memory allocation with mmap.
      2  1.1.1.11  mrg    Copyright (C) 2012-2024 Free Software Foundation, Inc.
      3       1.1  mrg    Written by Ian Lance Taylor, Google.
      4       1.1  mrg 
      5       1.1  mrg Redistribution and use in source and binary forms, with or without
      6       1.1  mrg modification, are permitted provided that the following conditions are
      7       1.1  mrg met:
      8       1.1  mrg 
      9       1.1  mrg     (1) Redistributions of source code must retain the above copyright
     10   1.1.1.4  mrg     notice, this list of conditions and the following disclaimer.
     11       1.1  mrg 
     12       1.1  mrg     (2) Redistributions in binary form must reproduce the above copyright
     13       1.1  mrg     notice, this list of conditions and the following disclaimer in
     14       1.1  mrg     the documentation and/or other materials provided with the
     15   1.1.1.4  mrg     distribution.
     16   1.1.1.4  mrg 
     17       1.1  mrg     (3) The name of the author may not be used to
     18       1.1  mrg     endorse or promote products derived from this software without
     19       1.1  mrg     specific prior written permission.
     20       1.1  mrg 
     21       1.1  mrg THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22       1.1  mrg IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     23       1.1  mrg WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     24       1.1  mrg DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
     25       1.1  mrg INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     26       1.1  mrg (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     27       1.1  mrg SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     28       1.1  mrg HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     29       1.1  mrg STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
     30       1.1  mrg IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31       1.1  mrg POSSIBILITY OF SUCH DAMAGE.  */
     32       1.1  mrg 
     33       1.1  mrg #include "config.h"
     34       1.1  mrg 
     35       1.1  mrg #include <errno.h>
     36       1.1  mrg #include <string.h>
     37       1.1  mrg #include <stdlib.h>
     38       1.1  mrg #include <unistd.h>
     39       1.1  mrg #include <sys/types.h>
     40       1.1  mrg #include <sys/mman.h>
     41       1.1  mrg 
     42       1.1  mrg #include "backtrace.h"
     43       1.1  mrg #include "internal.h"
     44       1.1  mrg 
     45  1.1.1.10  mrg #ifndef HAVE_DECL_GETPAGESIZE
     46  1.1.1.10  mrg extern int getpagesize (void);
     47  1.1.1.10  mrg #endif
     48  1.1.1.10  mrg 
     49       1.1  mrg /* Memory allocation on systems that provide anonymous mmap.  This
     50       1.1  mrg    permits the backtrace functions to be invoked from a signal
     51       1.1  mrg    handler, assuming that mmap is async-signal safe.  */
     52       1.1  mrg 
     53       1.1  mrg #ifndef MAP_ANONYMOUS
     54       1.1  mrg #define MAP_ANONYMOUS MAP_ANON
     55       1.1  mrg #endif
     56       1.1  mrg 
     57   1.1.1.3  mrg #ifndef MAP_FAILED
     58   1.1.1.3  mrg #define MAP_FAILED ((void *)-1)
     59   1.1.1.3  mrg #endif
     60   1.1.1.3  mrg 
     61       1.1  mrg /* A list of free memory blocks.  */
     62       1.1  mrg 
     63       1.1  mrg struct backtrace_freelist_struct
     64       1.1  mrg {
     65       1.1  mrg   /* Next on list.  */
     66       1.1  mrg   struct backtrace_freelist_struct *next;
     67       1.1  mrg   /* Size of this block, including this structure.  */
     68       1.1  mrg   size_t size;
     69       1.1  mrg };
     70       1.1  mrg 
     71       1.1  mrg /* Free memory allocated by backtrace_alloc.  */
     72       1.1  mrg 
     73       1.1  mrg static void
     74       1.1  mrg backtrace_free_locked (struct backtrace_state *state, void *addr, size_t size)
     75       1.1  mrg {
     76   1.1.1.7  mrg   /* Just leak small blocks.  We don't have to be perfect.  Don't put
     77   1.1.1.7  mrg      more than 16 entries on the free list, to avoid wasting time
     78   1.1.1.7  mrg      searching when allocating a block.  If we have more than 16
     79   1.1.1.7  mrg      entries, leak the smallest entry.  */
     80   1.1.1.7  mrg 
     81       1.1  mrg   if (size >= sizeof (struct backtrace_freelist_struct))
     82       1.1  mrg     {
     83   1.1.1.7  mrg       size_t c;
     84   1.1.1.7  mrg       struct backtrace_freelist_struct **ppsmall;
     85   1.1.1.7  mrg       struct backtrace_freelist_struct **pp;
     86       1.1  mrg       struct backtrace_freelist_struct *p;
     87       1.1  mrg 
     88   1.1.1.7  mrg       c = 0;
     89   1.1.1.7  mrg       ppsmall = NULL;
     90   1.1.1.7  mrg       for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
     91   1.1.1.7  mrg 	{
     92   1.1.1.7  mrg 	  if (ppsmall == NULL || (*pp)->size < (*ppsmall)->size)
     93   1.1.1.7  mrg 	    ppsmall = pp;
     94   1.1.1.7  mrg 	  ++c;
     95   1.1.1.7  mrg 	}
     96   1.1.1.7  mrg       if (c >= 16)
     97   1.1.1.7  mrg 	{
     98   1.1.1.7  mrg 	  if (size <= (*ppsmall)->size)
     99   1.1.1.7  mrg 	    return;
    100   1.1.1.7  mrg 	  *ppsmall = (*ppsmall)->next;
    101   1.1.1.7  mrg 	}
    102   1.1.1.7  mrg 
    103       1.1  mrg       p = (struct backtrace_freelist_struct *) addr;
    104       1.1  mrg       p->next = state->freelist;
    105       1.1  mrg       p->size = size;
    106       1.1  mrg       state->freelist = p;
    107       1.1  mrg     }
    108       1.1  mrg }
    109       1.1  mrg 
    110   1.1.1.3  mrg /* Allocate memory like malloc.  If ERROR_CALLBACK is NULL, don't
    111   1.1.1.3  mrg    report an error.  */
    112       1.1  mrg 
    113       1.1  mrg void *
    114       1.1  mrg backtrace_alloc (struct backtrace_state *state,
    115       1.1  mrg 		 size_t size, backtrace_error_callback error_callback,
    116       1.1  mrg 		 void *data)
    117       1.1  mrg {
    118       1.1  mrg   void *ret;
    119       1.1  mrg   int locked;
    120       1.1  mrg   struct backtrace_freelist_struct **pp;
    121       1.1  mrg   size_t pagesize;
    122       1.1  mrg   size_t asksize;
    123       1.1  mrg   void *page;
    124       1.1  mrg 
    125       1.1  mrg   ret = NULL;
    126       1.1  mrg 
    127       1.1  mrg   /* If we can acquire the lock, then see if there is space on the
    128       1.1  mrg      free list.  If we can't acquire the lock, drop straight into
    129       1.1  mrg      using mmap.  __sync_lock_test_and_set returns the old state of
    130       1.1  mrg      the lock, so we have acquired it if it returns 0.  */
    131       1.1  mrg 
    132       1.1  mrg   if (!state->threaded)
    133       1.1  mrg     locked = 1;
    134       1.1  mrg   else
    135       1.1  mrg     locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
    136       1.1  mrg 
    137       1.1  mrg   if (locked)
    138       1.1  mrg     {
    139       1.1  mrg       for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
    140       1.1  mrg 	{
    141       1.1  mrg 	  if ((*pp)->size >= size)
    142       1.1  mrg 	    {
    143       1.1  mrg 	      struct backtrace_freelist_struct *p;
    144       1.1  mrg 
    145       1.1  mrg 	      p = *pp;
    146       1.1  mrg 	      *pp = p->next;
    147       1.1  mrg 
    148       1.1  mrg 	      /* Round for alignment; we assume that no type we care about
    149       1.1  mrg 		 is more than 8 bytes.  */
    150       1.1  mrg 	      size = (size + 7) & ~ (size_t) 7;
    151       1.1  mrg 	      if (size < p->size)
    152       1.1  mrg 		backtrace_free_locked (state, (char *) p + size,
    153       1.1  mrg 				       p->size - size);
    154       1.1  mrg 
    155       1.1  mrg 	      ret = (void *) p;
    156       1.1  mrg 
    157       1.1  mrg 	      break;
    158       1.1  mrg 	    }
    159       1.1  mrg 	}
    160       1.1  mrg 
    161       1.1  mrg       if (state->threaded)
    162       1.1  mrg 	__sync_lock_release (&state->lock_alloc);
    163       1.1  mrg     }
    164       1.1  mrg 
    165       1.1  mrg   if (ret == NULL)
    166       1.1  mrg     {
    167       1.1  mrg       /* Allocate a new page.  */
    168       1.1  mrg 
    169       1.1  mrg       pagesize = getpagesize ();
    170       1.1  mrg       asksize = (size + pagesize - 1) & ~ (pagesize - 1);
    171       1.1  mrg       page = mmap (NULL, asksize, PROT_READ | PROT_WRITE,
    172       1.1  mrg 		   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    173   1.1.1.3  mrg       if (page == MAP_FAILED)
    174   1.1.1.3  mrg 	{
    175   1.1.1.3  mrg 	  if (error_callback)
    176   1.1.1.3  mrg 	    error_callback (data, "mmap", errno);
    177   1.1.1.3  mrg 	}
    178       1.1  mrg       else
    179       1.1  mrg 	{
    180       1.1  mrg 	  size = (size + 7) & ~ (size_t) 7;
    181       1.1  mrg 	  if (size < asksize)
    182       1.1  mrg 	    backtrace_free (state, (char *) page + size, asksize - size,
    183       1.1  mrg 			    error_callback, data);
    184       1.1  mrg 
    185       1.1  mrg 	  ret = page;
    186       1.1  mrg 	}
    187       1.1  mrg     }
    188       1.1  mrg 
    189       1.1  mrg   return ret;
    190       1.1  mrg }
    191       1.1  mrg 
    192       1.1  mrg /* Free memory allocated by backtrace_alloc.  */
    193       1.1  mrg 
    194       1.1  mrg void
    195       1.1  mrg backtrace_free (struct backtrace_state *state, void *addr, size_t size,
    196       1.1  mrg 		backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
    197       1.1  mrg 		void *data ATTRIBUTE_UNUSED)
    198       1.1  mrg {
    199       1.1  mrg   int locked;
    200       1.1  mrg 
    201   1.1.1.2  mrg   /* If we are freeing a large aligned block, just release it back to
    202   1.1.1.2  mrg      the system.  This case arises when growing a vector for a large
    203   1.1.1.2  mrg      binary with lots of debug info.  Calling munmap here may cause us
    204   1.1.1.2  mrg      to call mmap again if there is also a large shared library; we
    205   1.1.1.2  mrg      just live with that.  */
    206   1.1.1.2  mrg   if (size >= 16 * 4096)
    207   1.1.1.2  mrg     {
    208   1.1.1.2  mrg       size_t pagesize;
    209   1.1.1.2  mrg 
    210   1.1.1.2  mrg       pagesize = getpagesize ();
    211   1.1.1.2  mrg       if (((uintptr_t) addr & (pagesize - 1)) == 0
    212   1.1.1.2  mrg 	  && (size & (pagesize - 1)) == 0)
    213   1.1.1.2  mrg 	{
    214   1.1.1.2  mrg 	  /* If munmap fails for some reason, just add the block to
    215   1.1.1.2  mrg 	     the freelist.  */
    216   1.1.1.2  mrg 	  if (munmap (addr, size) == 0)
    217   1.1.1.2  mrg 	    return;
    218   1.1.1.2  mrg 	}
    219   1.1.1.2  mrg     }
    220   1.1.1.2  mrg 
    221       1.1  mrg   /* If we can acquire the lock, add the new space to the free list.
    222       1.1  mrg      If we can't acquire the lock, just leak the memory.
    223       1.1  mrg      __sync_lock_test_and_set returns the old state of the lock, so we
    224       1.1  mrg      have acquired it if it returns 0.  */
    225       1.1  mrg 
    226       1.1  mrg   if (!state->threaded)
    227       1.1  mrg     locked = 1;
    228       1.1  mrg   else
    229       1.1  mrg     locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
    230       1.1  mrg 
    231       1.1  mrg   if (locked)
    232       1.1  mrg     {
    233       1.1  mrg       backtrace_free_locked (state, addr, size);
    234       1.1  mrg 
    235       1.1  mrg       if (state->threaded)
    236       1.1  mrg 	__sync_lock_release (&state->lock_alloc);
    237       1.1  mrg     }
    238       1.1  mrg }
    239       1.1  mrg 
    240       1.1  mrg /* Grow VEC by SIZE bytes.  */
    241       1.1  mrg 
    242       1.1  mrg void *
    243       1.1  mrg backtrace_vector_grow (struct backtrace_state *state,size_t size,
    244       1.1  mrg 		       backtrace_error_callback error_callback,
    245       1.1  mrg 		       void *data, struct backtrace_vector *vec)
    246       1.1  mrg {
    247       1.1  mrg   void *ret;
    248       1.1  mrg 
    249       1.1  mrg   if (size > vec->alc)
    250       1.1  mrg     {
    251       1.1  mrg       size_t pagesize;
    252       1.1  mrg       size_t alc;
    253       1.1  mrg       void *base;
    254       1.1  mrg 
    255       1.1  mrg       pagesize = getpagesize ();
    256       1.1  mrg       alc = vec->size + size;
    257       1.1  mrg       if (vec->size == 0)
    258       1.1  mrg 	alc = 16 * size;
    259       1.1  mrg       else if (alc < pagesize)
    260       1.1  mrg 	{
    261       1.1  mrg 	  alc *= 2;
    262       1.1  mrg 	  if (alc > pagesize)
    263       1.1  mrg 	    alc = pagesize;
    264       1.1  mrg 	}
    265       1.1  mrg       else
    266   1.1.1.2  mrg 	{
    267   1.1.1.2  mrg 	  alc *= 2;
    268   1.1.1.2  mrg 	  alc = (alc + pagesize - 1) & ~ (pagesize - 1);
    269   1.1.1.2  mrg 	}
    270       1.1  mrg       base = backtrace_alloc (state, alc, error_callback, data);
    271       1.1  mrg       if (base == NULL)
    272       1.1  mrg 	return NULL;
    273       1.1  mrg       if (vec->base != NULL)
    274       1.1  mrg 	{
    275       1.1  mrg 	  memcpy (base, vec->base, vec->size);
    276   1.1.1.2  mrg 	  backtrace_free (state, vec->base, vec->size + vec->alc,
    277   1.1.1.2  mrg 			  error_callback, data);
    278       1.1  mrg 	}
    279       1.1  mrg       vec->base = base;
    280       1.1  mrg       vec->alc = alc - vec->size;
    281       1.1  mrg     }
    282       1.1  mrg 
    283       1.1  mrg   ret = (char *) vec->base + vec->size;
    284       1.1  mrg   vec->size += size;
    285       1.1  mrg   vec->alc -= size;
    286       1.1  mrg   return ret;
    287       1.1  mrg }
    288       1.1  mrg 
    289       1.1  mrg /* Finish the current allocation on VEC.  */
    290       1.1  mrg 
    291       1.1  mrg void *
    292       1.1  mrg backtrace_vector_finish (
    293       1.1  mrg   struct backtrace_state *state ATTRIBUTE_UNUSED,
    294       1.1  mrg   struct backtrace_vector *vec,
    295       1.1  mrg   backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
    296       1.1  mrg   void *data ATTRIBUTE_UNUSED)
    297       1.1  mrg {
    298       1.1  mrg   void *ret;
    299       1.1  mrg 
    300       1.1  mrg   ret = vec->base;
    301       1.1  mrg   vec->base = (char *) vec->base + vec->size;
    302       1.1  mrg   vec->size = 0;
    303       1.1  mrg   return ret;
    304       1.1  mrg }
    305       1.1  mrg 
    306       1.1  mrg /* Release any extra space allocated for VEC.  */
    307       1.1  mrg 
    308       1.1  mrg int
    309       1.1  mrg backtrace_vector_release (struct backtrace_state *state,
    310       1.1  mrg 			  struct backtrace_vector *vec,
    311       1.1  mrg 			  backtrace_error_callback error_callback,
    312       1.1  mrg 			  void *data)
    313       1.1  mrg {
    314       1.1  mrg   size_t size;
    315       1.1  mrg   size_t alc;
    316       1.1  mrg   size_t aligned;
    317       1.1  mrg 
    318       1.1  mrg   /* Make sure that the block that we free is aligned on an 8-byte
    319       1.1  mrg      boundary.  */
    320       1.1  mrg   size = vec->size;
    321       1.1  mrg   alc = vec->alc;
    322       1.1  mrg   aligned = (size + 7) & ~ (size_t) 7;
    323       1.1  mrg   alc -= aligned - size;
    324       1.1  mrg 
    325       1.1  mrg   backtrace_free (state, (char *) vec->base + aligned, alc,
    326       1.1  mrg 		  error_callback, data);
    327       1.1  mrg   vec->alc = 0;
    328   1.1.1.8  mrg   if (vec->size == 0)
    329   1.1.1.8  mrg     vec->base = NULL;
    330       1.1  mrg   return 1;
    331       1.1  mrg }
    332