mmap.c revision 1.1.1.3 1 1.1 mrg /* mmap.c -- Memory allocation with mmap.
2 1.1.1.3 mrg Copyright (C) 2012-2016 Free Software Foundation, Inc.
3 1.1 mrg Written by Ian Lance Taylor, Google.
4 1.1 mrg
5 1.1 mrg Redistribution and use in source and binary forms, with or without
6 1.1 mrg modification, are permitted provided that the following conditions are
7 1.1 mrg met:
8 1.1 mrg
9 1.1 mrg (1) Redistributions of source code must retain the above copyright
10 1.1 mrg notice, this list of conditions and the following disclaimer.
11 1.1 mrg
12 1.1 mrg (2) Redistributions in binary form must reproduce the above copyright
13 1.1 mrg notice, this list of conditions and the following disclaimer in
14 1.1 mrg the documentation and/or other materials provided with the
15 1.1 mrg distribution.
16 1.1 mrg
17 1.1 mrg (3) The name of the author may not be used to
18 1.1 mrg endorse or promote products derived from this software without
19 1.1 mrg specific prior written permission.
20 1.1 mrg
21 1.1 mrg THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 1.1 mrg IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 1.1 mrg WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 1.1 mrg DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
25 1.1 mrg INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 1.1 mrg (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 1.1 mrg SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 1.1 mrg HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 1.1 mrg STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30 1.1 mrg IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 1.1 mrg POSSIBILITY OF SUCH DAMAGE. */
32 1.1 mrg
33 1.1 mrg #include "config.h"
34 1.1 mrg
35 1.1 mrg #include <errno.h>
36 1.1 mrg #include <string.h>
37 1.1 mrg #include <stdlib.h>
38 1.1 mrg #include <unistd.h>
39 1.1 mrg #include <sys/types.h>
40 1.1 mrg #include <sys/mman.h>
41 1.1 mrg
42 1.1 mrg #include "backtrace.h"
43 1.1 mrg #include "internal.h"
44 1.1 mrg
45 1.1 mrg /* Memory allocation on systems that provide anonymous mmap. This
46 1.1 mrg permits the backtrace functions to be invoked from a signal
47 1.1 mrg handler, assuming that mmap is async-signal safe. */
48 1.1 mrg
49 1.1 mrg #ifndef MAP_ANONYMOUS
50 1.1 mrg #define MAP_ANONYMOUS MAP_ANON
51 1.1 mrg #endif
52 1.1 mrg
53 1.1.1.3 mrg #ifndef MAP_FAILED
54 1.1.1.3 mrg #define MAP_FAILED ((void *)-1)
55 1.1.1.3 mrg #endif
56 1.1.1.3 mrg
57 1.1 mrg /* A list of free memory blocks. */
58 1.1 mrg
59 1.1 mrg struct backtrace_freelist_struct
60 1.1 mrg {
61 1.1 mrg /* Next on list. */
62 1.1 mrg struct backtrace_freelist_struct *next;
63 1.1 mrg /* Size of this block, including this structure. */
64 1.1 mrg size_t size;
65 1.1 mrg };
66 1.1 mrg
67 1.1 mrg /* Free memory allocated by backtrace_alloc. */
68 1.1 mrg
69 1.1 mrg static void
70 1.1 mrg backtrace_free_locked (struct backtrace_state *state, void *addr, size_t size)
71 1.1 mrg {
72 1.1 mrg /* Just leak small blocks. We don't have to be perfect. */
73 1.1 mrg if (size >= sizeof (struct backtrace_freelist_struct))
74 1.1 mrg {
75 1.1 mrg struct backtrace_freelist_struct *p;
76 1.1 mrg
77 1.1 mrg p = (struct backtrace_freelist_struct *) addr;
78 1.1 mrg p->next = state->freelist;
79 1.1 mrg p->size = size;
80 1.1 mrg state->freelist = p;
81 1.1 mrg }
82 1.1 mrg }
83 1.1 mrg
84 1.1.1.3 mrg /* Allocate memory like malloc. If ERROR_CALLBACK is NULL, don't
85 1.1.1.3 mrg report an error. */
86 1.1 mrg
87 1.1 mrg void *
88 1.1 mrg backtrace_alloc (struct backtrace_state *state,
89 1.1 mrg size_t size, backtrace_error_callback error_callback,
90 1.1 mrg void *data)
91 1.1 mrg {
92 1.1 mrg void *ret;
93 1.1 mrg int locked;
94 1.1 mrg struct backtrace_freelist_struct **pp;
95 1.1 mrg size_t pagesize;
96 1.1 mrg size_t asksize;
97 1.1 mrg void *page;
98 1.1 mrg
99 1.1 mrg ret = NULL;
100 1.1 mrg
101 1.1 mrg /* If we can acquire the lock, then see if there is space on the
102 1.1 mrg free list. If we can't acquire the lock, drop straight into
103 1.1 mrg using mmap. __sync_lock_test_and_set returns the old state of
104 1.1 mrg the lock, so we have acquired it if it returns 0. */
105 1.1 mrg
106 1.1 mrg if (!state->threaded)
107 1.1 mrg locked = 1;
108 1.1 mrg else
109 1.1 mrg locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
110 1.1 mrg
111 1.1 mrg if (locked)
112 1.1 mrg {
113 1.1 mrg for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
114 1.1 mrg {
115 1.1 mrg if ((*pp)->size >= size)
116 1.1 mrg {
117 1.1 mrg struct backtrace_freelist_struct *p;
118 1.1 mrg
119 1.1 mrg p = *pp;
120 1.1 mrg *pp = p->next;
121 1.1 mrg
122 1.1 mrg /* Round for alignment; we assume that no type we care about
123 1.1 mrg is more than 8 bytes. */
124 1.1 mrg size = (size + 7) & ~ (size_t) 7;
125 1.1 mrg if (size < p->size)
126 1.1 mrg backtrace_free_locked (state, (char *) p + size,
127 1.1 mrg p->size - size);
128 1.1 mrg
129 1.1 mrg ret = (void *) p;
130 1.1 mrg
131 1.1 mrg break;
132 1.1 mrg }
133 1.1 mrg }
134 1.1 mrg
135 1.1 mrg if (state->threaded)
136 1.1 mrg __sync_lock_release (&state->lock_alloc);
137 1.1 mrg }
138 1.1 mrg
139 1.1 mrg if (ret == NULL)
140 1.1 mrg {
141 1.1 mrg /* Allocate a new page. */
142 1.1 mrg
143 1.1 mrg pagesize = getpagesize ();
144 1.1 mrg asksize = (size + pagesize - 1) & ~ (pagesize - 1);
145 1.1 mrg page = mmap (NULL, asksize, PROT_READ | PROT_WRITE,
146 1.1 mrg MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
147 1.1.1.3 mrg if (page == MAP_FAILED)
148 1.1.1.3 mrg {
149 1.1.1.3 mrg if (error_callback)
150 1.1.1.3 mrg error_callback (data, "mmap", errno);
151 1.1.1.3 mrg }
152 1.1 mrg else
153 1.1 mrg {
154 1.1 mrg size = (size + 7) & ~ (size_t) 7;
155 1.1 mrg if (size < asksize)
156 1.1 mrg backtrace_free (state, (char *) page + size, asksize - size,
157 1.1 mrg error_callback, data);
158 1.1 mrg
159 1.1 mrg ret = page;
160 1.1 mrg }
161 1.1 mrg }
162 1.1 mrg
163 1.1 mrg return ret;
164 1.1 mrg }
165 1.1 mrg
166 1.1 mrg /* Free memory allocated by backtrace_alloc. */
167 1.1 mrg
168 1.1 mrg void
169 1.1 mrg backtrace_free (struct backtrace_state *state, void *addr, size_t size,
170 1.1 mrg backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
171 1.1 mrg void *data ATTRIBUTE_UNUSED)
172 1.1 mrg {
173 1.1 mrg int locked;
174 1.1 mrg
175 1.1.1.2 mrg /* If we are freeing a large aligned block, just release it back to
176 1.1.1.2 mrg the system. This case arises when growing a vector for a large
177 1.1.1.2 mrg binary with lots of debug info. Calling munmap here may cause us
178 1.1.1.2 mrg to call mmap again if there is also a large shared library; we
179 1.1.1.2 mrg just live with that. */
180 1.1.1.2 mrg if (size >= 16 * 4096)
181 1.1.1.2 mrg {
182 1.1.1.2 mrg size_t pagesize;
183 1.1.1.2 mrg
184 1.1.1.2 mrg pagesize = getpagesize ();
185 1.1.1.2 mrg if (((uintptr_t) addr & (pagesize - 1)) == 0
186 1.1.1.2 mrg && (size & (pagesize - 1)) == 0)
187 1.1.1.2 mrg {
188 1.1.1.2 mrg /* If munmap fails for some reason, just add the block to
189 1.1.1.2 mrg the freelist. */
190 1.1.1.2 mrg if (munmap (addr, size) == 0)
191 1.1.1.2 mrg return;
192 1.1.1.2 mrg }
193 1.1.1.2 mrg }
194 1.1.1.2 mrg
195 1.1 mrg /* If we can acquire the lock, add the new space to the free list.
196 1.1 mrg If we can't acquire the lock, just leak the memory.
197 1.1 mrg __sync_lock_test_and_set returns the old state of the lock, so we
198 1.1 mrg have acquired it if it returns 0. */
199 1.1 mrg
200 1.1 mrg if (!state->threaded)
201 1.1 mrg locked = 1;
202 1.1 mrg else
203 1.1 mrg locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
204 1.1 mrg
205 1.1 mrg if (locked)
206 1.1 mrg {
207 1.1 mrg backtrace_free_locked (state, addr, size);
208 1.1 mrg
209 1.1 mrg if (state->threaded)
210 1.1 mrg __sync_lock_release (&state->lock_alloc);
211 1.1 mrg }
212 1.1 mrg }
213 1.1 mrg
214 1.1 mrg /* Grow VEC by SIZE bytes. */
215 1.1 mrg
216 1.1 mrg void *
217 1.1 mrg backtrace_vector_grow (struct backtrace_state *state,size_t size,
218 1.1 mrg backtrace_error_callback error_callback,
219 1.1 mrg void *data, struct backtrace_vector *vec)
220 1.1 mrg {
221 1.1 mrg void *ret;
222 1.1 mrg
223 1.1 mrg if (size > vec->alc)
224 1.1 mrg {
225 1.1 mrg size_t pagesize;
226 1.1 mrg size_t alc;
227 1.1 mrg void *base;
228 1.1 mrg
229 1.1 mrg pagesize = getpagesize ();
230 1.1 mrg alc = vec->size + size;
231 1.1 mrg if (vec->size == 0)
232 1.1 mrg alc = 16 * size;
233 1.1 mrg else if (alc < pagesize)
234 1.1 mrg {
235 1.1 mrg alc *= 2;
236 1.1 mrg if (alc > pagesize)
237 1.1 mrg alc = pagesize;
238 1.1 mrg }
239 1.1 mrg else
240 1.1.1.2 mrg {
241 1.1.1.2 mrg alc *= 2;
242 1.1.1.2 mrg alc = (alc + pagesize - 1) & ~ (pagesize - 1);
243 1.1.1.2 mrg }
244 1.1 mrg base = backtrace_alloc (state, alc, error_callback, data);
245 1.1 mrg if (base == NULL)
246 1.1 mrg return NULL;
247 1.1 mrg if (vec->base != NULL)
248 1.1 mrg {
249 1.1 mrg memcpy (base, vec->base, vec->size);
250 1.1.1.2 mrg backtrace_free (state, vec->base, vec->size + vec->alc,
251 1.1.1.2 mrg error_callback, data);
252 1.1 mrg }
253 1.1 mrg vec->base = base;
254 1.1 mrg vec->alc = alc - vec->size;
255 1.1 mrg }
256 1.1 mrg
257 1.1 mrg ret = (char *) vec->base + vec->size;
258 1.1 mrg vec->size += size;
259 1.1 mrg vec->alc -= size;
260 1.1 mrg return ret;
261 1.1 mrg }
262 1.1 mrg
263 1.1 mrg /* Finish the current allocation on VEC. */
264 1.1 mrg
265 1.1 mrg void *
266 1.1 mrg backtrace_vector_finish (
267 1.1 mrg struct backtrace_state *state ATTRIBUTE_UNUSED,
268 1.1 mrg struct backtrace_vector *vec,
269 1.1 mrg backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
270 1.1 mrg void *data ATTRIBUTE_UNUSED)
271 1.1 mrg {
272 1.1 mrg void *ret;
273 1.1 mrg
274 1.1 mrg ret = vec->base;
275 1.1 mrg vec->base = (char *) vec->base + vec->size;
276 1.1 mrg vec->size = 0;
277 1.1 mrg return ret;
278 1.1 mrg }
279 1.1 mrg
280 1.1 mrg /* Release any extra space allocated for VEC. */
281 1.1 mrg
282 1.1 mrg int
283 1.1 mrg backtrace_vector_release (struct backtrace_state *state,
284 1.1 mrg struct backtrace_vector *vec,
285 1.1 mrg backtrace_error_callback error_callback,
286 1.1 mrg void *data)
287 1.1 mrg {
288 1.1 mrg size_t size;
289 1.1 mrg size_t alc;
290 1.1 mrg size_t aligned;
291 1.1 mrg
292 1.1 mrg /* Make sure that the block that we free is aligned on an 8-byte
293 1.1 mrg boundary. */
294 1.1 mrg size = vec->size;
295 1.1 mrg alc = vec->alc;
296 1.1 mrg aligned = (size + 7) & ~ (size_t) 7;
297 1.1 mrg alc -= aligned - size;
298 1.1 mrg
299 1.1 mrg backtrace_free (state, (char *) vec->base + aligned, alc,
300 1.1 mrg error_callback, data);
301 1.1 mrg vec->alc = 0;
302 1.1 mrg return 1;
303 1.1 mrg }
304