mmap.c revision 1.1 1 1.1 christos /* mmap.c -- Memory allocation with mmap.
2 1.1 christos Copyright (C) 2012-2021 Free Software Foundation, Inc.
3 1.1 christos Written by Ian Lance Taylor, Google.
4 1.1 christos
5 1.1 christos Redistribution and use in source and binary forms, with or without
6 1.1 christos modification, are permitted provided that the following conditions are
7 1.1 christos met:
8 1.1 christos
9 1.1 christos (1) Redistributions of source code must retain the above copyright
10 1.1 christos notice, this list of conditions and the following disclaimer.
11 1.1 christos
12 1.1 christos (2) Redistributions in binary form must reproduce the above copyright
13 1.1 christos notice, this list of conditions and the following disclaimer in
14 1.1 christos the documentation and/or other materials provided with the
15 1.1 christos distribution.
16 1.1 christos
17 1.1 christos (3) The name of the author may not be used to
18 1.1 christos endorse or promote products derived from this software without
19 1.1 christos specific prior written permission.
20 1.1 christos
21 1.1 christos THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 1.1 christos IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 1.1 christos WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 1.1 christos DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
25 1.1 christos INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 1.1 christos (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 1.1 christos SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 1.1 christos HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 1.1 christos STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30 1.1 christos IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 1.1 christos POSSIBILITY OF SUCH DAMAGE. */
32 1.1 christos
33 1.1 christos #include "config.h"
34 1.1 christos
35 1.1 christos #include <errno.h>
36 1.1 christos #include <string.h>
37 1.1 christos #include <stdlib.h>
38 1.1 christos #include <unistd.h>
39 1.1 christos #include <sys/types.h>
40 1.1 christos #include <sys/mman.h>
41 1.1 christos
42 1.1 christos #include "backtrace.h"
43 1.1 christos #include "internal.h"
44 1.1 christos
45 1.1 christos #ifndef HAVE_DECL_GETPAGESIZE
46 1.1 christos extern int getpagesize (void);
47 1.1 christos #endif
48 1.1 christos
49 1.1 christos /* Memory allocation on systems that provide anonymous mmap. This
50 1.1 christos permits the backtrace functions to be invoked from a signal
51 1.1 christos handler, assuming that mmap is async-signal safe. */
52 1.1 christos
53 1.1 christos #ifndef MAP_ANONYMOUS
54 1.1 christos #define MAP_ANONYMOUS MAP_ANON
55 1.1 christos #endif
56 1.1 christos
57 1.1 christos #ifndef MAP_FAILED
58 1.1 christos #define MAP_FAILED ((void *)-1)
59 1.1 christos #endif
60 1.1 christos
61 1.1 christos /* A list of free memory blocks. */
62 1.1 christos
63 1.1 christos struct backtrace_freelist_struct
64 1.1 christos {
65 1.1 christos /* Next on list. */
66 1.1 christos struct backtrace_freelist_struct *next;
67 1.1 christos /* Size of this block, including this structure. */
68 1.1 christos size_t size;
69 1.1 christos };
70 1.1 christos
71 1.1 christos /* Free memory allocated by backtrace_alloc. */
72 1.1 christos
73 1.1 christos static void
74 1.1 christos backtrace_free_locked (struct backtrace_state *state, void *addr, size_t size)
75 1.1 christos {
76 1.1 christos /* Just leak small blocks. We don't have to be perfect. Don't put
77 1.1 christos more than 16 entries on the free list, to avoid wasting time
78 1.1 christos searching when allocating a block. If we have more than 16
79 1.1 christos entries, leak the smallest entry. */
80 1.1 christos
81 1.1 christos if (size >= sizeof (struct backtrace_freelist_struct))
82 1.1 christos {
83 1.1 christos size_t c;
84 1.1 christos struct backtrace_freelist_struct **ppsmall;
85 1.1 christos struct backtrace_freelist_struct **pp;
86 1.1 christos struct backtrace_freelist_struct *p;
87 1.1 christos
88 1.1 christos c = 0;
89 1.1 christos ppsmall = NULL;
90 1.1 christos for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
91 1.1 christos {
92 1.1 christos if (ppsmall == NULL || (*pp)->size < (*ppsmall)->size)
93 1.1 christos ppsmall = pp;
94 1.1 christos ++c;
95 1.1 christos }
96 1.1 christos if (c >= 16)
97 1.1 christos {
98 1.1 christos if (size <= (*ppsmall)->size)
99 1.1 christos return;
100 1.1 christos *ppsmall = (*ppsmall)->next;
101 1.1 christos }
102 1.1 christos
103 1.1 christos p = (struct backtrace_freelist_struct *) addr;
104 1.1 christos p->next = state->freelist;
105 1.1 christos p->size = size;
106 1.1 christos state->freelist = p;
107 1.1 christos }
108 1.1 christos }
109 1.1 christos
110 1.1 christos /* Allocate memory like malloc. If ERROR_CALLBACK is NULL, don't
111 1.1 christos report an error. */
112 1.1 christos
113 1.1 christos void *
114 1.1 christos backtrace_alloc (struct backtrace_state *state,
115 1.1 christos size_t size, backtrace_error_callback error_callback,
116 1.1 christos void *data)
117 1.1 christos {
118 1.1 christos void *ret;
119 1.1 christos int locked;
120 1.1 christos struct backtrace_freelist_struct **pp;
121 1.1 christos size_t pagesize;
122 1.1 christos size_t asksize;
123 1.1 christos void *page;
124 1.1 christos
125 1.1 christos ret = NULL;
126 1.1 christos
127 1.1 christos /* If we can acquire the lock, then see if there is space on the
128 1.1 christos free list. If we can't acquire the lock, drop straight into
129 1.1 christos using mmap. __sync_lock_test_and_set returns the old state of
130 1.1 christos the lock, so we have acquired it if it returns 0. */
131 1.1 christos
132 1.1 christos if (!state->threaded)
133 1.1 christos locked = 1;
134 1.1 christos else
135 1.1 christos locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
136 1.1 christos
137 1.1 christos if (locked)
138 1.1 christos {
139 1.1 christos for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
140 1.1 christos {
141 1.1 christos if ((*pp)->size >= size)
142 1.1 christos {
143 1.1 christos struct backtrace_freelist_struct *p;
144 1.1 christos
145 1.1 christos p = *pp;
146 1.1 christos *pp = p->next;
147 1.1 christos
148 1.1 christos /* Round for alignment; we assume that no type we care about
149 1.1 christos is more than 8 bytes. */
150 1.1 christos size = (size + 7) & ~ (size_t) 7;
151 1.1 christos if (size < p->size)
152 1.1 christos backtrace_free_locked (state, (char *) p + size,
153 1.1 christos p->size - size);
154 1.1 christos
155 1.1 christos ret = (void *) p;
156 1.1 christos
157 1.1 christos break;
158 1.1 christos }
159 1.1 christos }
160 1.1 christos
161 1.1 christos if (state->threaded)
162 1.1 christos __sync_lock_release (&state->lock_alloc);
163 1.1 christos }
164 1.1 christos
165 1.1 christos if (ret == NULL)
166 1.1 christos {
167 1.1 christos /* Allocate a new page. */
168 1.1 christos
169 1.1 christos pagesize = getpagesize ();
170 1.1 christos asksize = (size + pagesize - 1) & ~ (pagesize - 1);
171 1.1 christos page = mmap (NULL, asksize, PROT_READ | PROT_WRITE,
172 1.1 christos MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
173 1.1 christos if (page == MAP_FAILED)
174 1.1 christos {
175 1.1 christos if (error_callback)
176 1.1 christos error_callback (data, "mmap", errno);
177 1.1 christos }
178 1.1 christos else
179 1.1 christos {
180 1.1 christos size = (size + 7) & ~ (size_t) 7;
181 1.1 christos if (size < asksize)
182 1.1 christos backtrace_free (state, (char *) page + size, asksize - size,
183 1.1 christos error_callback, data);
184 1.1 christos
185 1.1 christos ret = page;
186 1.1 christos }
187 1.1 christos }
188 1.1 christos
189 1.1 christos return ret;
190 1.1 christos }
191 1.1 christos
192 1.1 christos /* Free memory allocated by backtrace_alloc. */
193 1.1 christos
194 1.1 christos void
195 1.1 christos backtrace_free (struct backtrace_state *state, void *addr, size_t size,
196 1.1 christos backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
197 1.1 christos void *data ATTRIBUTE_UNUSED)
198 1.1 christos {
199 1.1 christos int locked;
200 1.1 christos
201 1.1 christos /* If we are freeing a large aligned block, just release it back to
202 1.1 christos the system. This case arises when growing a vector for a large
203 1.1 christos binary with lots of debug info. Calling munmap here may cause us
204 1.1 christos to call mmap again if there is also a large shared library; we
205 1.1 christos just live with that. */
206 1.1 christos if (size >= 16 * 4096)
207 1.1 christos {
208 1.1 christos size_t pagesize;
209 1.1 christos
210 1.1 christos pagesize = getpagesize ();
211 1.1 christos if (((uintptr_t) addr & (pagesize - 1)) == 0
212 1.1 christos && (size & (pagesize - 1)) == 0)
213 1.1 christos {
214 1.1 christos /* If munmap fails for some reason, just add the block to
215 1.1 christos the freelist. */
216 1.1 christos if (munmap (addr, size) == 0)
217 1.1 christos return;
218 1.1 christos }
219 1.1 christos }
220 1.1 christos
221 1.1 christos /* If we can acquire the lock, add the new space to the free list.
222 1.1 christos If we can't acquire the lock, just leak the memory.
223 1.1 christos __sync_lock_test_and_set returns the old state of the lock, so we
224 1.1 christos have acquired it if it returns 0. */
225 1.1 christos
226 1.1 christos if (!state->threaded)
227 1.1 christos locked = 1;
228 1.1 christos else
229 1.1 christos locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
230 1.1 christos
231 1.1 christos if (locked)
232 1.1 christos {
233 1.1 christos backtrace_free_locked (state, addr, size);
234 1.1 christos
235 1.1 christos if (state->threaded)
236 1.1 christos __sync_lock_release (&state->lock_alloc);
237 1.1 christos }
238 1.1 christos }
239 1.1 christos
240 1.1 christos /* Grow VEC by SIZE bytes. */
241 1.1 christos
242 1.1 christos void *
243 1.1 christos backtrace_vector_grow (struct backtrace_state *state,size_t size,
244 1.1 christos backtrace_error_callback error_callback,
245 1.1 christos void *data, struct backtrace_vector *vec)
246 1.1 christos {
247 1.1 christos void *ret;
248 1.1 christos
249 1.1 christos if (size > vec->alc)
250 1.1 christos {
251 1.1 christos size_t pagesize;
252 1.1 christos size_t alc;
253 1.1 christos void *base;
254 1.1 christos
255 1.1 christos pagesize = getpagesize ();
256 1.1 christos alc = vec->size + size;
257 1.1 christos if (vec->size == 0)
258 1.1 christos alc = 16 * size;
259 1.1 christos else if (alc < pagesize)
260 1.1 christos {
261 1.1 christos alc *= 2;
262 1.1 christos if (alc > pagesize)
263 1.1 christos alc = pagesize;
264 1.1 christos }
265 1.1 christos else
266 1.1 christos {
267 1.1 christos alc *= 2;
268 1.1 christos alc = (alc + pagesize - 1) & ~ (pagesize - 1);
269 1.1 christos }
270 1.1 christos base = backtrace_alloc (state, alc, error_callback, data);
271 1.1 christos if (base == NULL)
272 1.1 christos return NULL;
273 1.1 christos if (vec->base != NULL)
274 1.1 christos {
275 1.1 christos memcpy (base, vec->base, vec->size);
276 1.1 christos backtrace_free (state, vec->base, vec->size + vec->alc,
277 1.1 christos error_callback, data);
278 1.1 christos }
279 1.1 christos vec->base = base;
280 1.1 christos vec->alc = alc - vec->size;
281 1.1 christos }
282 1.1 christos
283 1.1 christos ret = (char *) vec->base + vec->size;
284 1.1 christos vec->size += size;
285 1.1 christos vec->alc -= size;
286 1.1 christos return ret;
287 1.1 christos }
288 1.1 christos
289 1.1 christos /* Finish the current allocation on VEC. */
290 1.1 christos
291 1.1 christos void *
292 1.1 christos backtrace_vector_finish (
293 1.1 christos struct backtrace_state *state ATTRIBUTE_UNUSED,
294 1.1 christos struct backtrace_vector *vec,
295 1.1 christos backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
296 1.1 christos void *data ATTRIBUTE_UNUSED)
297 1.1 christos {
298 1.1 christos void *ret;
299 1.1 christos
300 1.1 christos ret = vec->base;
301 1.1 christos vec->base = (char *) vec->base + vec->size;
302 1.1 christos vec->size = 0;
303 1.1 christos return ret;
304 1.1 christos }
305 1.1 christos
306 1.1 christos /* Release any extra space allocated for VEC. */
307 1.1 christos
308 1.1 christos int
309 1.1 christos backtrace_vector_release (struct backtrace_state *state,
310 1.1 christos struct backtrace_vector *vec,
311 1.1 christos backtrace_error_callback error_callback,
312 1.1 christos void *data)
313 1.1 christos {
314 1.1 christos size_t size;
315 1.1 christos size_t alc;
316 1.1 christos size_t aligned;
317 1.1 christos
318 1.1 christos /* Make sure that the block that we free is aligned on an 8-byte
319 1.1 christos boundary. */
320 1.1 christos size = vec->size;
321 1.1 christos alc = vec->alc;
322 1.1 christos aligned = (size + 7) & ~ (size_t) 7;
323 1.1 christos alc -= aligned - size;
324 1.1 christos
325 1.1 christos backtrace_free (state, (char *) vec->base + aligned, alc,
326 1.1 christos error_callback, data);
327 1.1 christos vec->alc = 0;
328 1.1 christos if (vec->size == 0)
329 1.1 christos vec->base = NULL;
330 1.1 christos return 1;
331 1.1 christos }
332