generic-morestack.c revision 1.1.1.11 1 1.1 mrg /* Library support for -fsplit-stack. */
2 1.1.1.11 mrg /* Copyright (C) 2009-2024 Free Software Foundation, Inc.
3 1.1 mrg Contributed by Ian Lance Taylor <iant (at) google.com>.
4 1.1 mrg
5 1.1 mrg This file is part of GCC.
6 1.1 mrg
7 1.1 mrg GCC is free software; you can redistribute it and/or modify it under
8 1.1 mrg the terms of the GNU General Public License as published by the Free
9 1.1 mrg Software Foundation; either version 3, or (at your option) any later
10 1.1 mrg version.
11 1.1 mrg
12 1.1 mrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 1.1 mrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 1.1 mrg FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 1.1 mrg for more details.
16 1.1 mrg
17 1.1 mrg Under Section 7 of GPL version 3, you are granted additional
18 1.1 mrg permissions described in the GCC Runtime Library Exception, version
19 1.1 mrg 3.1, as published by the Free Software Foundation.
20 1.1 mrg
21 1.1 mrg You should have received a copy of the GNU General Public License and
22 1.1 mrg a copy of the GCC Runtime Library Exception along with this program;
23 1.1 mrg see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 1.1 mrg <http://www.gnu.org/licenses/>. */
25 1.1 mrg
26 1.1.1.9 mrg #pragma GCC optimize ("no-isolate-erroneous-paths-dereference")
27 1.1.1.9 mrg
28 1.1.1.3 mrg /* powerpc 32-bit not supported. */
29 1.1.1.3 mrg #if !defined __powerpc__ || defined __powerpc64__
30 1.1.1.3 mrg
31 1.1 mrg #include "tconfig.h"
32 1.1 mrg #include "tsystem.h"
33 1.1 mrg #include "coretypes.h"
34 1.1 mrg #include "tm.h"
35 1.1 mrg #include "libgcc_tm.h"
36 1.1 mrg
37 1.1.1.8 mrg /* If inhibit_libc is defined, we cannot compile this file. The
38 1.1 mrg effect is that people will not be able to use -fsplit-stack. That
39 1.1 mrg is much better than failing the build particularly since people
40 1.1 mrg will want to define inhibit_libc while building a compiler which
41 1.1 mrg can build glibc. */
42 1.1 mrg
43 1.1 mrg #ifndef inhibit_libc
44 1.1 mrg
45 1.1 mrg #include <assert.h>
46 1.1 mrg #include <errno.h>
47 1.1 mrg #include <signal.h>
48 1.1 mrg #include <stdlib.h>
49 1.1 mrg #include <string.h>
50 1.1 mrg #include <unistd.h>
51 1.1 mrg #include <sys/mman.h>
52 1.1 mrg #include <sys/uio.h>
53 1.1 mrg
54 1.1 mrg #include "generic-morestack.h"
55 1.1 mrg
56 1.1.1.9 mrg /* Some systems use LD_PRELOAD or similar tricks to add hooks to
57 1.1.1.9 mrg mmap/munmap. That breaks this code, because when we call mmap
58 1.1.1.9 mrg there is enough stack space for the system call but there is not,
59 1.1.1.9 mrg in general, enough stack space to run a hook. Try to avoid the
60 1.1.1.9 mrg problem by calling syscall directly. We only do this on GNU/Linux
61 1.1.1.9 mrg for now, but it should be easy to add support for more systems with
62 1.1.1.9 mrg testing. */
63 1.1.1.9 mrg
64 1.1.1.9 mrg #if defined(__gnu_linux__)
65 1.1.1.9 mrg
66 1.1.1.9 mrg #include <sys/syscall.h>
67 1.1.1.9 mrg
68 1.1.1.9 mrg #if defined(SYS_mmap) || defined(SYS_mmap2)
69 1.1.1.9 mrg
70 1.1.1.9 mrg #ifdef SYS_mmap2
71 1.1.1.9 mrg #define MORESTACK_MMAP SYS_mmap2
72 1.1.1.9 mrg #define MORESTACK_ADJUST_OFFSET(x) ((x) / 4096ULL)
73 1.1.1.9 mrg #else
74 1.1.1.9 mrg #define MORESTACK_MMAP SYS_mmap
75 1.1.1.9 mrg #define MORESTACK_ADJUST_OFFSET(x) (x)
76 1.1.1.9 mrg #endif
77 1.1.1.9 mrg
78 1.1.1.9 mrg static void *
79 1.1.1.9 mrg morestack_mmap (void *addr, size_t length, int prot, int flags, int fd,
80 1.1.1.9 mrg off_t offset)
81 1.1.1.9 mrg {
82 1.1.1.9 mrg offset = MORESTACK_ADJUST_OFFSET (offset);
83 1.1.1.9 mrg
84 1.1.1.9 mrg #ifdef __s390__
85 1.1.1.9 mrg long args[6] = { (long) addr, (long) length, (long) prot, (long) flags,
86 1.1.1.9 mrg (long) fd, (long) offset };
87 1.1.1.9 mrg return (void *) syscall (MORESTACK_MMAP, args);
88 1.1.1.9 mrg #else
89 1.1.1.9 mrg return (void *) syscall (MORESTACK_MMAP, addr, length, prot, flags, fd,
90 1.1.1.9 mrg offset);
91 1.1.1.9 mrg #endif
92 1.1.1.9 mrg }
93 1.1.1.9 mrg
94 1.1.1.9 mrg #define mmap morestack_mmap
95 1.1.1.9 mrg
96 1.1.1.9 mrg #endif /* defined(SYS_MMAP) || defined(SYS_mmap2) */
97 1.1.1.9 mrg
98 1.1.1.9 mrg #if defined(SYS_munmap)
99 1.1.1.9 mrg
100 1.1.1.9 mrg static int
101 1.1.1.9 mrg morestack_munmap (void * addr, size_t length)
102 1.1.1.9 mrg {
103 1.1.1.9 mrg return (int) syscall (SYS_munmap, addr, length);
104 1.1.1.9 mrg }
105 1.1.1.9 mrg
106 1.1.1.9 mrg #define munmap morestack_munmap
107 1.1.1.9 mrg
108 1.1.1.9 mrg #endif /* defined(SYS_munmap) */
109 1.1.1.9 mrg
110 1.1.1.9 mrg #endif /* defined(__gnu_linux__) */
111 1.1.1.9 mrg
112 1.1 mrg typedef unsigned uintptr_type __attribute__ ((mode (pointer)));
113 1.1 mrg
114 1.1 mrg /* This file contains subroutines that are used by code compiled with
115 1.1 mrg -fsplit-stack. */
116 1.1 mrg
117 1.1 mrg /* Declare functions to avoid warnings--there is no header file for
118 1.1 mrg these internal functions. We give most of these functions the
119 1.1 mrg flatten attribute in order to minimize their stack usage--here we
120 1.1 mrg must minimize stack usage even at the cost of code size, and in
121 1.1 mrg general inlining everything will do that. */
122 1.1 mrg
123 1.1 mrg extern void
124 1.1 mrg __generic_morestack_set_initial_sp (void *sp, size_t len)
125 1.1 mrg __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
126 1.1 mrg
127 1.1 mrg extern void *
128 1.1 mrg __generic_morestack (size_t *frame_size, void *old_stack, size_t param_size)
129 1.1 mrg __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
130 1.1 mrg
131 1.1 mrg extern void *
132 1.1 mrg __generic_releasestack (size_t *pavailable)
133 1.1 mrg __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
134 1.1 mrg
135 1.1 mrg extern void
136 1.1 mrg __morestack_block_signals (void)
137 1.1 mrg __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
138 1.1 mrg
139 1.1 mrg extern void
140 1.1 mrg __morestack_unblock_signals (void)
141 1.1 mrg __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
142 1.1 mrg
143 1.1 mrg extern size_t
144 1.1 mrg __generic_findstack (void *stack)
145 1.1 mrg __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
146 1.1 mrg
147 1.1 mrg extern void
148 1.1 mrg __morestack_load_mmap (void)
149 1.1 mrg __attribute__ ((no_split_stack, visibility ("hidden")));
150 1.1 mrg
151 1.1 mrg extern void *
152 1.1 mrg __morestack_allocate_stack_space (size_t size)
153 1.1 mrg __attribute__ ((visibility ("hidden")));
154 1.1 mrg
155 1.1 mrg /* These are functions which -fsplit-stack code can call. These are
156 1.1 mrg not called by the compiler, and are not hidden. FIXME: These
157 1.1 mrg should be in some header file somewhere, somehow. */
158 1.1 mrg
159 1.1 mrg extern void *
160 1.1 mrg __splitstack_find (void *, void *, size_t *, void **, void **, void **)
161 1.1 mrg __attribute__ ((visibility ("default")));
162 1.1 mrg
163 1.1 mrg extern void
164 1.1 mrg __splitstack_block_signals (int *, int *)
165 1.1 mrg __attribute__ ((visibility ("default")));
166 1.1 mrg
167 1.1 mrg extern void
168 1.1 mrg __splitstack_getcontext (void *context[10])
169 1.1 mrg __attribute__ ((no_split_stack, visibility ("default")));
170 1.1 mrg
171 1.1 mrg extern void
172 1.1 mrg __splitstack_setcontext (void *context[10])
173 1.1 mrg __attribute__ ((no_split_stack, visibility ("default")));
174 1.1 mrg
175 1.1 mrg extern void *
176 1.1 mrg __splitstack_makecontext (size_t, void *context[10], size_t *)
177 1.1 mrg __attribute__ ((visibility ("default")));
178 1.1 mrg
179 1.1 mrg extern void *
180 1.1 mrg __splitstack_resetcontext (void *context[10], size_t *)
181 1.1 mrg __attribute__ ((visibility ("default")));
182 1.1 mrg
183 1.1 mrg extern void
184 1.1 mrg __splitstack_releasecontext (void *context[10])
185 1.1 mrg __attribute__ ((visibility ("default")));
186 1.1 mrg
187 1.1 mrg extern void
188 1.1 mrg __splitstack_block_signals_context (void *context[10], int *, int *)
189 1.1 mrg __attribute__ ((visibility ("default")));
190 1.1 mrg
191 1.1 mrg extern void *
192 1.1 mrg __splitstack_find_context (void *context[10], size_t *, void **, void **,
193 1.1 mrg void **)
194 1.1 mrg __attribute__ ((visibility ("default")));
195 1.1 mrg
196 1.1 mrg /* These functions must be defined by the processor specific code. */
197 1.1 mrg
198 1.1 mrg extern void *__morestack_get_guard (void)
199 1.1 mrg __attribute__ ((no_split_stack, visibility ("hidden")));
200 1.1 mrg
201 1.1 mrg extern void __morestack_set_guard (void *)
202 1.1 mrg __attribute__ ((no_split_stack, visibility ("hidden")));
203 1.1 mrg
204 1.1 mrg extern void *__morestack_make_guard (void *, size_t)
205 1.1 mrg __attribute__ ((no_split_stack, visibility ("hidden")));
206 1.1 mrg
207 1.1 mrg /* When we allocate a stack segment we put this header at the
208 1.1 mrg start. */
209 1.1 mrg
210 1.1 mrg struct stack_segment
211 1.1 mrg {
212 1.1 mrg /* The previous stack segment--when a function running on this stack
213 1.1 mrg segment returns, it will run on the previous one. */
214 1.1 mrg struct stack_segment *prev;
215 1.1 mrg /* The next stack segment, if it has been allocated--when a function
216 1.1 mrg is running on this stack segment, the next one is not being
217 1.1 mrg used. */
218 1.1 mrg struct stack_segment *next;
219 1.1 mrg /* The total size of this stack segment. */
220 1.1 mrg size_t size;
221 1.1 mrg /* The stack address when this stack was created. This is used when
222 1.1 mrg popping the stack. */
223 1.1 mrg void *old_stack;
224 1.1 mrg /* A list of memory blocks allocated by dynamic stack
225 1.1 mrg allocation. */
226 1.1 mrg struct dynamic_allocation_blocks *dynamic_allocation;
227 1.1 mrg /* A list of dynamic memory blocks no longer needed. */
228 1.1 mrg struct dynamic_allocation_blocks *free_dynamic_allocation;
229 1.1 mrg /* An extra pointer in case we need some more information some
230 1.1 mrg day. */
231 1.1 mrg void *extra;
232 1.1 mrg };
233 1.1 mrg
234 1.1 mrg /* This structure holds the (approximate) initial stack pointer and
235 1.1 mrg size for the system supplied stack for a thread. This is set when
236 1.1 mrg the thread is created. We also store a sigset_t here to hold the
237 1.1 mrg signal mask while splitting the stack, since we don't want to store
238 1.1 mrg that on the stack. */
239 1.1 mrg
240 1.1 mrg struct initial_sp
241 1.1 mrg {
242 1.1 mrg /* The initial stack pointer. */
243 1.1 mrg void *sp;
244 1.1 mrg /* The stack length. */
245 1.1 mrg size_t len;
246 1.1 mrg /* A signal mask, put here so that the thread can use it without
247 1.1 mrg needing stack space. */
248 1.1 mrg sigset_t mask;
249 1.1 mrg /* Non-zero if we should not block signals. This is a reversed flag
250 1.1 mrg so that the default zero value is the safe value. The type is
251 1.1 mrg uintptr_type because it replaced one of the void * pointers in
252 1.1 mrg extra. */
253 1.1 mrg uintptr_type dont_block_signals;
254 1.1 mrg /* Some extra space for later extensibility. */
255 1.1 mrg void *extra[4];
256 1.1 mrg };
257 1.1 mrg
258 1.1 mrg /* A list of memory blocks allocated by dynamic stack allocation.
259 1.1 mrg This is used for code that calls alloca or uses variably sized
260 1.1 mrg arrays. */
261 1.1 mrg
262 1.1 mrg struct dynamic_allocation_blocks
263 1.1 mrg {
264 1.1 mrg /* The next block in the list. */
265 1.1 mrg struct dynamic_allocation_blocks *next;
266 1.1 mrg /* The size of the allocated memory. */
267 1.1 mrg size_t size;
268 1.1 mrg /* The allocated memory. */
269 1.1 mrg void *block;
270 1.1 mrg };
271 1.1 mrg
272 1.1 mrg /* These thread local global variables must be shared by all split
273 1.1 mrg stack code across shared library boundaries. Therefore, they have
274 1.1 mrg default visibility. They have extensibility fields if needed for
275 1.1 mrg new versions. If more radical changes are needed, new code can be
276 1.1 mrg written using new variable names, while still using the existing
277 1.1 mrg variables in a backward compatible manner. Symbol versioning is
278 1.1 mrg also used, although, since these variables are only referenced by
279 1.1 mrg code in this file and generic-morestack-thread.c, it is likely that
280 1.1 mrg simply using new names will suffice. */
281 1.1 mrg
282 1.1 mrg /* The first stack segment allocated for this thread. */
283 1.1 mrg
284 1.1 mrg __thread struct stack_segment *__morestack_segments
285 1.1 mrg __attribute__ ((visibility ("default")));
286 1.1 mrg
287 1.1 mrg /* The stack segment that we think we are currently using. This will
288 1.1 mrg be correct in normal usage, but will be incorrect if an exception
289 1.1 mrg unwinds into a different stack segment or if longjmp jumps to a
290 1.1 mrg different stack segment. */
291 1.1 mrg
292 1.1 mrg __thread struct stack_segment *__morestack_current_segment
293 1.1 mrg __attribute__ ((visibility ("default")));
294 1.1 mrg
295 1.1 mrg /* The initial stack pointer and size for this thread. */
296 1.1 mrg
297 1.1 mrg __thread struct initial_sp __morestack_initial_sp
298 1.1 mrg __attribute__ ((visibility ("default")));
299 1.1 mrg
300 1.1 mrg /* A static signal mask, to avoid taking up stack space. */
301 1.1 mrg
302 1.1 mrg static sigset_t __morestack_fullmask;
303 1.1 mrg
304 1.1.1.7 mrg /* Page size, as returned from getpagesize(). Set on startup. */
305 1.1.1.7 mrg static unsigned int static_pagesize;
306 1.1.1.7 mrg
307 1.1.1.7 mrg /* Set on startup to non-zero value if SPLIT_STACK_GUARD env var is set. */
308 1.1.1.7 mrg static int use_guard_page;
309 1.1.1.7 mrg
310 1.1 mrg /* Convert an integer to a decimal string without using much stack
311 1.1 mrg space. Return a pointer to the part of the buffer to use. We this
312 1.1 mrg instead of sprintf because sprintf will require too much stack
313 1.1 mrg space. */
314 1.1 mrg
315 1.1 mrg static char *
316 1.1 mrg print_int (int val, char *buf, int buflen, size_t *print_len)
317 1.1 mrg {
318 1.1 mrg int is_negative;
319 1.1 mrg int i;
320 1.1 mrg unsigned int uval;
321 1.1 mrg
322 1.1 mrg uval = (unsigned int) val;
323 1.1 mrg if (val >= 0)
324 1.1 mrg is_negative = 0;
325 1.1 mrg else
326 1.1 mrg {
327 1.1 mrg is_negative = 1;
328 1.1 mrg uval = - uval;
329 1.1 mrg }
330 1.1 mrg
331 1.1 mrg i = buflen;
332 1.1 mrg do
333 1.1 mrg {
334 1.1 mrg --i;
335 1.1 mrg buf[i] = '0' + (uval % 10);
336 1.1 mrg uval /= 10;
337 1.1 mrg }
338 1.1 mrg while (uval != 0 && i > 0);
339 1.1 mrg
340 1.1 mrg if (is_negative)
341 1.1 mrg {
342 1.1 mrg if (i > 0)
343 1.1 mrg --i;
344 1.1 mrg buf[i] = '-';
345 1.1 mrg }
346 1.1 mrg
347 1.1 mrg *print_len = buflen - i;
348 1.1 mrg return buf + i;
349 1.1 mrg }
350 1.1 mrg
351 1.1 mrg /* Print the string MSG/LEN, the errno number ERR, and a newline on
352 1.1 mrg stderr. Then crash. */
353 1.1 mrg
354 1.1 mrg void
355 1.1 mrg __morestack_fail (const char *, size_t, int) __attribute__ ((noreturn));
356 1.1 mrg
357 1.1 mrg void
358 1.1 mrg __morestack_fail (const char *msg, size_t len, int err)
359 1.1 mrg {
360 1.1 mrg char buf[24];
361 1.1 mrg static const char nl[] = "\n";
362 1.1 mrg struct iovec iov[3];
363 1.1 mrg union { char *p; const char *cp; } const_cast;
364 1.1 mrg
365 1.1 mrg const_cast.cp = msg;
366 1.1 mrg iov[0].iov_base = const_cast.p;
367 1.1 mrg iov[0].iov_len = len;
368 1.1 mrg /* We can't call strerror, because it may try to translate the error
369 1.1 mrg message, and that would use too much stack space. */
370 1.1 mrg iov[1].iov_base = print_int (err, buf, sizeof buf, &iov[1].iov_len);
371 1.1 mrg const_cast.cp = &nl[0];
372 1.1 mrg iov[2].iov_base = const_cast.p;
373 1.1 mrg iov[2].iov_len = sizeof nl - 1;
374 1.1 mrg /* FIXME: On systems without writev we need to issue three write
375 1.1 mrg calls, or punt on printing errno. For now this is irrelevant
376 1.1 mrg since stack splitting only works on GNU/Linux anyhow. */
377 1.1 mrg writev (2, iov, 3);
378 1.1 mrg abort ();
379 1.1 mrg }
380 1.1 mrg
381 1.1 mrg /* Allocate a new stack segment. FRAME_SIZE is the required frame
382 1.1 mrg size. */
383 1.1 mrg
384 1.1 mrg static struct stack_segment *
385 1.1 mrg allocate_segment (size_t frame_size)
386 1.1 mrg {
387 1.1 mrg unsigned int pagesize;
388 1.1 mrg unsigned int overhead;
389 1.1 mrg unsigned int allocate;
390 1.1 mrg void *space;
391 1.1 mrg struct stack_segment *pss;
392 1.1 mrg
393 1.1 mrg pagesize = static_pagesize;
394 1.1 mrg overhead = sizeof (struct stack_segment);
395 1.1 mrg
396 1.1 mrg allocate = pagesize;
397 1.1 mrg if (allocate < MINSIGSTKSZ)
398 1.1 mrg allocate = ((MINSIGSTKSZ + overhead + pagesize - 1)
399 1.1 mrg & ~ (pagesize - 1));
400 1.1 mrg if (allocate < frame_size)
401 1.1 mrg allocate = ((frame_size + overhead + pagesize - 1)
402 1.1 mrg & ~ (pagesize - 1));
403 1.1 mrg
404 1.1 mrg if (use_guard_page)
405 1.1 mrg allocate += pagesize;
406 1.1 mrg
407 1.1 mrg /* FIXME: If this binary requires an executable stack, then we need
408 1.1 mrg to set PROT_EXEC. Unfortunately figuring that out is complicated
409 1.1 mrg and target dependent. We would need to use dl_iterate_phdr to
410 1.1 mrg see if there is any object which does not have a PT_GNU_STACK
411 1.1 mrg phdr, though only for architectures which use that mechanism. */
412 1.1 mrg space = mmap (NULL, allocate, PROT_READ | PROT_WRITE,
413 1.1 mrg MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
414 1.1 mrg if (space == MAP_FAILED)
415 1.1 mrg {
416 1.1 mrg static const char msg[] =
417 1.1 mrg "unable to allocate additional stack space: errno ";
418 1.1 mrg __morestack_fail (msg, sizeof msg - 1, errno);
419 1.1 mrg }
420 1.1 mrg
421 1.1 mrg if (use_guard_page)
422 1.1 mrg {
423 1.1 mrg void *guard;
424 1.1 mrg
425 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
426 1.1 mrg guard = space;
427 1.1 mrg space = (char *) space + pagesize;
428 1.1 mrg #else
429 1.1 mrg guard = space + allocate - pagesize;
430 1.1 mrg #endif
431 1.1 mrg
432 1.1 mrg mprotect (guard, pagesize, PROT_NONE);
433 1.1 mrg allocate -= pagesize;
434 1.1 mrg }
435 1.1 mrg
436 1.1 mrg pss = (struct stack_segment *) space;
437 1.1 mrg
438 1.1 mrg pss->prev = NULL;
439 1.1 mrg pss->next = NULL;
440 1.1 mrg pss->size = allocate - overhead;
441 1.1 mrg pss->dynamic_allocation = NULL;
442 1.1 mrg pss->free_dynamic_allocation = NULL;
443 1.1 mrg pss->extra = NULL;
444 1.1 mrg
445 1.1 mrg return pss;
446 1.1 mrg }
447 1.1 mrg
448 1.1 mrg /* Free a list of dynamic blocks. */
449 1.1 mrg
450 1.1 mrg static void
451 1.1 mrg free_dynamic_blocks (struct dynamic_allocation_blocks *p)
452 1.1 mrg {
453 1.1 mrg while (p != NULL)
454 1.1 mrg {
455 1.1 mrg struct dynamic_allocation_blocks *next;
456 1.1 mrg
457 1.1 mrg next = p->next;
458 1.1 mrg free (p->block);
459 1.1 mrg free (p);
460 1.1 mrg p = next;
461 1.1 mrg }
462 1.1 mrg }
463 1.1 mrg
464 1.1 mrg /* Merge two lists of dynamic blocks. */
465 1.1 mrg
466 1.1 mrg static struct dynamic_allocation_blocks *
467 1.1 mrg merge_dynamic_blocks (struct dynamic_allocation_blocks *a,
468 1.1 mrg struct dynamic_allocation_blocks *b)
469 1.1 mrg {
470 1.1 mrg struct dynamic_allocation_blocks **pp;
471 1.1 mrg
472 1.1 mrg if (a == NULL)
473 1.1 mrg return b;
474 1.1 mrg if (b == NULL)
475 1.1 mrg return a;
476 1.1 mrg for (pp = &a->next; *pp != NULL; pp = &(*pp)->next)
477 1.1 mrg ;
478 1.1 mrg *pp = b;
479 1.1 mrg return a;
480 1.1 mrg }
481 1.1 mrg
482 1.1 mrg /* Release stack segments. If FREE_DYNAMIC is non-zero, we also free
483 1.1 mrg any dynamic blocks. Otherwise we return them. */
484 1.1 mrg
485 1.1 mrg struct dynamic_allocation_blocks *
486 1.1 mrg __morestack_release_segments (struct stack_segment **pp, int free_dynamic)
487 1.1 mrg {
488 1.1 mrg struct dynamic_allocation_blocks *ret;
489 1.1 mrg struct stack_segment *pss;
490 1.1 mrg
491 1.1 mrg ret = NULL;
492 1.1 mrg pss = *pp;
493 1.1 mrg while (pss != NULL)
494 1.1 mrg {
495 1.1 mrg struct stack_segment *next;
496 1.1 mrg unsigned int allocate;
497 1.1 mrg
498 1.1 mrg next = pss->next;
499 1.1 mrg
500 1.1 mrg if (pss->dynamic_allocation != NULL
501 1.1 mrg || pss->free_dynamic_allocation != NULL)
502 1.1 mrg {
503 1.1 mrg if (free_dynamic)
504 1.1 mrg {
505 1.1 mrg free_dynamic_blocks (pss->dynamic_allocation);
506 1.1 mrg free_dynamic_blocks (pss->free_dynamic_allocation);
507 1.1 mrg }
508 1.1 mrg else
509 1.1 mrg {
510 1.1 mrg ret = merge_dynamic_blocks (pss->dynamic_allocation, ret);
511 1.1 mrg ret = merge_dynamic_blocks (pss->free_dynamic_allocation, ret);
512 1.1 mrg }
513 1.1 mrg }
514 1.1 mrg
515 1.1 mrg allocate = pss->size + sizeof (struct stack_segment);
516 1.1 mrg if (munmap (pss, allocate) < 0)
517 1.1 mrg {
518 1.1 mrg static const char msg[] = "munmap of stack space failed: errno ";
519 1.1 mrg __morestack_fail (msg, sizeof msg - 1, errno);
520 1.1 mrg }
521 1.1 mrg
522 1.1 mrg pss = next;
523 1.1 mrg }
524 1.1 mrg *pp = NULL;
525 1.1 mrg
526 1.1 mrg return ret;
527 1.1 mrg }
528 1.1 mrg
529 1.1 mrg /* This function is called by a processor specific function to set the
530 1.1 mrg initial stack pointer for a thread. The operating system will
531 1.1 mrg always create a stack for a thread. Here we record a stack pointer
532 1.1 mrg near the base of that stack. The size argument lets the processor
533 1.1 mrg specific code estimate how much stack space is available on this
534 1.1 mrg initial stack. */
535 1.1 mrg
536 1.1 mrg void
537 1.1 mrg __generic_morestack_set_initial_sp (void *sp, size_t len)
538 1.1 mrg {
539 1.1 mrg /* The stack pointer most likely starts on a page boundary. Adjust
540 1.1 mrg to the nearest 512 byte boundary. It's not essential that we be
541 1.1 mrg precise here; getting it wrong will just leave some stack space
542 1.1 mrg unused. */
543 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
544 1.1 mrg sp = (void *) ((((__UINTPTR_TYPE__) sp + 511U) / 512U) * 512U);
545 1.1 mrg #else
546 1.1 mrg sp = (void *) ((((__UINTPTR_TYPE__) sp - 511U) / 512U) * 512U);
547 1.1 mrg #endif
548 1.1 mrg
549 1.1 mrg __morestack_initial_sp.sp = sp;
550 1.1 mrg __morestack_initial_sp.len = len;
551 1.1 mrg sigemptyset (&__morestack_initial_sp.mask);
552 1.1 mrg
553 1.1 mrg sigfillset (&__morestack_fullmask);
554 1.1 mrg #if defined(__GLIBC__) && defined(__linux__)
555 1.1 mrg /* In glibc, the first two real time signals are used by the NPTL
556 1.1 mrg threading library. By taking them out of the set of signals, we
557 1.1 mrg avoiding copying the signal mask in pthread_sigmask. More
558 1.1 mrg importantly, pthread_sigmask uses less stack space on x86_64. */
559 1.1 mrg sigdelset (&__morestack_fullmask, __SIGRTMIN);
560 1.1 mrg sigdelset (&__morestack_fullmask, __SIGRTMIN + 1);
561 1.1 mrg #endif
562 1.1 mrg }
563 1.1 mrg
564 1.1 mrg /* This function is called by a processor specific function which is
565 1.1 mrg run in the prologue when more stack is needed. The processor
566 1.1 mrg specific function handles the details of saving registers and
567 1.1 mrg frobbing the actual stack pointer. This function is responsible
568 1.1 mrg for allocating a new stack segment and for copying a parameter
569 1.1 mrg block from the old stack to the new one. On function entry
570 1.1 mrg *PFRAME_SIZE is the size of the required stack frame--the returned
571 1.1 mrg stack must be at least this large. On function exit *PFRAME_SIZE
572 1.1 mrg is the amount of space remaining on the allocated stack. OLD_STACK
573 1.1 mrg points at the parameters the old stack (really the current one
574 1.1 mrg while this function is running). OLD_STACK is saved so that it can
575 1.1 mrg be returned by a later call to __generic_releasestack. PARAM_SIZE
576 1.1 mrg is the size in bytes of parameters to copy to the new stack. This
577 1.1 mrg function returns a pointer to the new stack segment, pointing to
578 1.1 mrg the memory after the parameters have been copied. The returned
579 1.1 mrg value minus the returned *PFRAME_SIZE (or plus if the stack grows
580 1.1 mrg upward) is the first address on the stack which should not be used.
581 1.1 mrg
582 1.1 mrg This function is running on the old stack and has only a limited
583 1.1 mrg amount of stack space available. */
584 1.1 mrg
585 1.1 mrg void *
586 1.1 mrg __generic_morestack (size_t *pframe_size, void *old_stack, size_t param_size)
587 1.1 mrg {
588 1.1 mrg size_t frame_size = *pframe_size;
589 1.1 mrg struct stack_segment *current;
590 1.1 mrg struct stack_segment **pp;
591 1.1 mrg struct dynamic_allocation_blocks *dynamic;
592 1.1 mrg char *from;
593 1.1 mrg char *to;
594 1.1 mrg void *ret;
595 1.1 mrg size_t i;
596 1.1 mrg size_t aligned;
597 1.1 mrg
598 1.1 mrg current = __morestack_current_segment;
599 1.1 mrg
600 1.1 mrg pp = current != NULL ? ¤t->next : &__morestack_segments;
601 1.1 mrg if (*pp != NULL && (*pp)->size < frame_size)
602 1.1 mrg dynamic = __morestack_release_segments (pp, 0);
603 1.1 mrg else
604 1.1 mrg dynamic = NULL;
605 1.1 mrg current = *pp;
606 1.1 mrg
607 1.1 mrg if (current == NULL)
608 1.1 mrg {
609 1.1 mrg current = allocate_segment (frame_size + param_size);
610 1.1 mrg current->prev = __morestack_current_segment;
611 1.1 mrg *pp = current;
612 1.1 mrg }
613 1.1 mrg
614 1.1 mrg current->old_stack = old_stack;
615 1.1 mrg
616 1.1 mrg __morestack_current_segment = current;
617 1.1 mrg
618 1.1 mrg if (dynamic != NULL)
619 1.1 mrg {
620 1.1 mrg /* Move the free blocks onto our list. We don't want to call
621 1.1 mrg free here, as we are short on stack space. */
622 1.1 mrg current->free_dynamic_allocation =
623 1.1 mrg merge_dynamic_blocks (dynamic, current->free_dynamic_allocation);
624 1.1 mrg }
625 1.1 mrg
626 1.1 mrg *pframe_size = current->size - param_size;
627 1.1 mrg
628 1.1 mrg /* Align the returned stack to a 32-byte boundary. */
629 1.1 mrg aligned = (param_size + 31) & ~ (size_t) 31;
630 1.1 mrg
631 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
632 1.1 mrg {
633 1.1 mrg char *bottom = (char *) (current + 1) + current->size;
634 1.1 mrg to = bottom - aligned;
635 1.1 mrg ret = bottom - aligned;
636 1.1 mrg }
637 1.1 mrg #else
638 1.1 mrg to = current + 1;
639 1.1 mrg to += aligned - param_size;
640 1.1 mrg ret = (char *) (current + 1) + aligned;
641 1.1 mrg #endif
642 1.1 mrg
643 1.1 mrg /* We don't call memcpy to avoid worrying about the dynamic linker
644 1.1 mrg trying to resolve it. */
645 1.1 mrg from = (char *) old_stack;
646 1.1 mrg for (i = 0; i < param_size; i++)
647 1.1 mrg *to++ = *from++;
648 1.1 mrg
649 1.1 mrg return ret;
650 1.1 mrg }
651 1.1 mrg
652 1.1 mrg /* This function is called by a processor specific function when it is
653 1.1 mrg ready to release a stack segment. We don't actually release the
654 1.1 mrg stack segment, we just move back to the previous one. The current
655 1.1 mrg stack segment will still be available if we need it in
656 1.1 mrg __generic_morestack. This returns a pointer to the new stack
657 1.1 mrg segment to use, which is the one saved by a previous call to
658 1.1 mrg __generic_morestack. The processor specific function is then
659 1.1 mrg responsible for actually updating the stack pointer. This sets
660 1.1 mrg *PAVAILABLE to the amount of stack space now available. */
661 1.1 mrg
662 1.1 mrg void *
663 1.1 mrg __generic_releasestack (size_t *pavailable)
664 1.1 mrg {
665 1.1 mrg struct stack_segment *current;
666 1.1 mrg void *old_stack;
667 1.1 mrg
668 1.1 mrg current = __morestack_current_segment;
669 1.1 mrg old_stack = current->old_stack;
670 1.1 mrg current = current->prev;
671 1.1 mrg __morestack_current_segment = current;
672 1.1 mrg
673 1.1 mrg if (current != NULL)
674 1.1 mrg {
675 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
676 1.1 mrg *pavailable = (char *) old_stack - (char *) (current + 1);
677 1.1 mrg #else
678 1.1 mrg *pavailable = (char *) (current + 1) + current->size - (char *) old_stack;
679 1.1 mrg #endif
680 1.1 mrg }
681 1.1 mrg else
682 1.1 mrg {
683 1.1 mrg size_t used;
684 1.1 mrg
685 1.1 mrg /* We have popped back to the original stack. */
686 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
687 1.1 mrg if ((char *) old_stack >= (char *) __morestack_initial_sp.sp)
688 1.1 mrg used = 0;
689 1.1 mrg else
690 1.1 mrg used = (char *) __morestack_initial_sp.sp - (char *) old_stack;
691 1.1 mrg #else
692 1.1 mrg if ((char *) old_stack <= (char *) __morestack_initial_sp.sp)
693 1.1 mrg used = 0;
694 1.1 mrg else
695 1.1 mrg used = (char *) old_stack - (char *) __morestack_initial_sp.sp;
696 1.1 mrg #endif
697 1.1 mrg
698 1.1 mrg if (used > __morestack_initial_sp.len)
699 1.1 mrg *pavailable = 0;
700 1.1 mrg else
701 1.1 mrg *pavailable = __morestack_initial_sp.len - used;
702 1.1 mrg }
703 1.1 mrg
704 1.1 mrg return old_stack;
705 1.1 mrg }
706 1.1 mrg
707 1.1 mrg /* Block signals while splitting the stack. This avoids trouble if we
708 1.1 mrg try to invoke a signal handler which itself wants to split the
709 1.1 mrg stack. */
710 1.1 mrg
711 1.1 mrg extern int pthread_sigmask (int, const sigset_t *, sigset_t *)
712 1.1 mrg __attribute__ ((weak));
713 1.1 mrg
714 1.1 mrg void
715 1.1 mrg __morestack_block_signals (void)
716 1.1 mrg {
717 1.1 mrg if (__morestack_initial_sp.dont_block_signals)
718 1.1 mrg ;
719 1.1 mrg else if (pthread_sigmask)
720 1.1 mrg pthread_sigmask (SIG_BLOCK, &__morestack_fullmask,
721 1.1 mrg &__morestack_initial_sp.mask);
722 1.1 mrg else
723 1.1 mrg sigprocmask (SIG_BLOCK, &__morestack_fullmask,
724 1.1 mrg &__morestack_initial_sp.mask);
725 1.1 mrg }
726 1.1 mrg
727 1.1 mrg /* Unblock signals while splitting the stack. */
728 1.1 mrg
729 1.1 mrg void
730 1.1 mrg __morestack_unblock_signals (void)
731 1.1 mrg {
732 1.1 mrg if (__morestack_initial_sp.dont_block_signals)
733 1.1 mrg ;
734 1.1 mrg else if (pthread_sigmask)
735 1.1 mrg pthread_sigmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
736 1.1 mrg else
737 1.1 mrg sigprocmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
738 1.1 mrg }
739 1.1 mrg
740 1.1 mrg /* This function is called to allocate dynamic stack space, for alloca
741 1.1 mrg or a variably sized array. This is a regular function with
742 1.1 mrg sufficient stack space, so we just use malloc to allocate the
743 1.1 mrg space. We attach the allocated blocks to the current stack
744 1.1 mrg segment, so that they will eventually be reused or freed. */
745 1.1 mrg
746 1.1 mrg void *
747 1.1 mrg __morestack_allocate_stack_space (size_t size)
748 1.1 mrg {
749 1.1 mrg struct stack_segment *seg, *current;
750 1.1 mrg struct dynamic_allocation_blocks *p;
751 1.1 mrg
752 1.1 mrg /* We have to block signals to avoid getting confused if we get
753 1.1 mrg interrupted by a signal whose handler itself uses alloca or a
754 1.1 mrg variably sized array. */
755 1.1 mrg __morestack_block_signals ();
756 1.1 mrg
757 1.1 mrg /* Since we don't want to call free while we are low on stack space,
758 1.1 mrg we may have a list of already allocated blocks waiting to be
759 1.1 mrg freed. Release them all, unless we find one that is large
760 1.1 mrg enough. We don't look at every block to see if one is large
761 1.1 mrg enough, just the first one, because we aren't trying to build a
762 1.1 mrg memory allocator here, we're just trying to speed up common
763 1.1 mrg cases. */
764 1.1 mrg
765 1.1 mrg current = __morestack_current_segment;
766 1.1 mrg p = NULL;
767 1.1 mrg for (seg = __morestack_segments; seg != NULL; seg = seg->next)
768 1.1 mrg {
769 1.1 mrg p = seg->free_dynamic_allocation;
770 1.1 mrg if (p != NULL)
771 1.1 mrg {
772 1.1 mrg if (p->size >= size)
773 1.1 mrg {
774 1.1 mrg seg->free_dynamic_allocation = p->next;
775 1.1 mrg break;
776 1.1 mrg }
777 1.1 mrg
778 1.1 mrg free_dynamic_blocks (p);
779 1.1 mrg seg->free_dynamic_allocation = NULL;
780 1.1 mrg p = NULL;
781 1.1 mrg }
782 1.1 mrg }
783 1.1 mrg
784 1.1 mrg if (p == NULL)
785 1.1 mrg {
786 1.1 mrg /* We need to allocate additional memory. */
787 1.1 mrg p = malloc (sizeof (*p));
788 1.1 mrg if (p == NULL)
789 1.1 mrg abort ();
790 1.1 mrg p->size = size;
791 1.1 mrg p->block = malloc (size);
792 1.1 mrg if (p->block == NULL)
793 1.1 mrg abort ();
794 1.1 mrg }
795 1.1 mrg
796 1.1 mrg /* If we are still on the initial stack, then we have a space leak.
797 1.1 mrg FIXME. */
798 1.1 mrg if (current != NULL)
799 1.1 mrg {
800 1.1 mrg p->next = current->dynamic_allocation;
801 1.1 mrg current->dynamic_allocation = p;
802 1.1 mrg }
803 1.1 mrg
804 1.1 mrg __morestack_unblock_signals ();
805 1.1 mrg
806 1.1 mrg return p->block;
807 1.1 mrg }
808 1.1 mrg
809 1.1 mrg /* Find the stack segment for STACK and return the amount of space
810 1.1 mrg available. This is used when unwinding the stack because of an
811 1.1 mrg exception, in order to reset the stack guard correctly. */
812 1.1 mrg
813 1.1 mrg size_t
814 1.1 mrg __generic_findstack (void *stack)
815 1.1 mrg {
816 1.1 mrg struct stack_segment *pss;
817 1.1 mrg size_t used;
818 1.1 mrg
819 1.1 mrg for (pss = __morestack_current_segment; pss != NULL; pss = pss->prev)
820 1.1 mrg {
821 1.1 mrg if ((char *) pss < (char *) stack
822 1.1 mrg && (char *) pss + pss->size > (char *) stack)
823 1.1 mrg {
824 1.1 mrg __morestack_current_segment = pss;
825 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
826 1.1 mrg return (char *) stack - (char *) (pss + 1);
827 1.1 mrg #else
828 1.1 mrg return (char *) (pss + 1) + pss->size - (char *) stack;
829 1.1 mrg #endif
830 1.1 mrg }
831 1.1 mrg }
832 1.1 mrg
833 1.1 mrg /* We have popped back to the original stack. */
834 1.1 mrg
835 1.1 mrg if (__morestack_initial_sp.sp == NULL)
836 1.1 mrg return 0;
837 1.1 mrg
838 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
839 1.1 mrg if ((char *) stack >= (char *) __morestack_initial_sp.sp)
840 1.1 mrg used = 0;
841 1.1 mrg else
842 1.1 mrg used = (char *) __morestack_initial_sp.sp - (char *) stack;
843 1.1 mrg #else
844 1.1 mrg if ((char *) stack <= (char *) __morestack_initial_sp.sp)
845 1.1 mrg used = 0;
846 1.1 mrg else
847 1.1 mrg used = (char *) stack - (char *) __morestack_initial_sp.sp;
848 1.1 mrg #endif
849 1.1 mrg
850 1.1 mrg if (used > __morestack_initial_sp.len)
851 1.1 mrg return 0;
852 1.1 mrg else
853 1.1 mrg return __morestack_initial_sp.len - used;
854 1.1 mrg }
855 1.1 mrg
856 1.1 mrg /* This function is called at program startup time to make sure that
857 1.1 mrg mmap, munmap, and getpagesize are resolved if linking dynamically.
858 1.1 mrg We want to resolve them while we have enough stack for them, rather
859 1.1.1.7 mrg than calling into the dynamic linker while low on stack space.
860 1.1.1.7 mrg Similarly, invoke getenv here to check for split-stack related control
861 1.1.1.7 mrg variables, since doing do as part of the __morestack path can result
862 1.1.1.7 mrg in unwanted use of SSE/AVX registers (see GCC PR 86213). */
863 1.1 mrg
864 1.1 mrg void
865 1.1 mrg __morestack_load_mmap (void)
866 1.1 mrg {
867 1.1 mrg /* Call with bogus values to run faster. We don't care if the call
868 1.1 mrg fails. Pass __MORESTACK_CURRENT_SEGMENT to make sure that any
869 1.1 mrg TLS accessor function is resolved. */
870 1.1 mrg mmap (__morestack_current_segment, 0, PROT_READ, MAP_ANONYMOUS, -1, 0);
871 1.1 mrg mprotect (NULL, 0, 0);
872 1.1.1.7 mrg munmap (0, static_pagesize);
873 1.1.1.7 mrg
874 1.1.1.7 mrg /* Initialize these values here, so as to avoid dynamic linker
875 1.1.1.7 mrg activity as part of a __morestack call. */
876 1.1.1.7 mrg static_pagesize = getpagesize();
877 1.1.1.7 mrg use_guard_page = getenv ("SPLIT_STACK_GUARD") != 0;
878 1.1 mrg }
879 1.1 mrg
880 1.1 mrg /* This function may be used to iterate over the stack segments.
881 1.1 mrg This can be called like this.
882 1.1 mrg void *next_segment = NULL;
883 1.1 mrg void *next_sp = NULL;
884 1.1 mrg void *initial_sp = NULL;
885 1.1 mrg void *stack;
886 1.1 mrg size_t stack_size;
887 1.1 mrg while ((stack = __splitstack_find (next_segment, next_sp, &stack_size,
888 1.1 mrg &next_segment, &next_sp,
889 1.1 mrg &initial_sp)) != NULL)
890 1.1 mrg {
891 1.1 mrg // Stack segment starts at stack and is stack_size bytes long.
892 1.1 mrg }
893 1.1 mrg
894 1.1 mrg There is no way to iterate over the stack segments of a different
895 1.1 mrg thread. However, what is permitted is for one thread to call this
896 1.1 mrg with the first two values NULL, to pass next_segment, next_sp, and
897 1.1 mrg initial_sp to a different thread, and then to suspend one way or
898 1.1 mrg another. A different thread may run the subsequent
899 1.1 mrg __morestack_find iterations. Of course, this will only work if the
900 1.1 mrg first thread is suspended during the __morestack_find iterations.
901 1.1 mrg If not, the second thread will be looking at the stack while it is
902 1.1 mrg changing, and anything could happen.
903 1.1 mrg
904 1.1 mrg FIXME: This should be declared in some header file, but where? */
905 1.1 mrg
906 1.1 mrg void *
907 1.1 mrg __splitstack_find (void *segment_arg, void *sp, size_t *len,
908 1.1 mrg void **next_segment, void **next_sp,
909 1.1 mrg void **initial_sp)
910 1.1 mrg {
911 1.1 mrg struct stack_segment *segment;
912 1.1 mrg void *ret;
913 1.1 mrg char *nsp;
914 1.1 mrg
915 1.1 mrg if (segment_arg == (void *) (uintptr_type) 1)
916 1.1 mrg {
917 1.1 mrg char *isp = (char *) *initial_sp;
918 1.1 mrg
919 1.1 mrg if (isp == NULL)
920 1.1 mrg return NULL;
921 1.1 mrg
922 1.1 mrg *next_segment = (void *) (uintptr_type) 2;
923 1.1 mrg *next_sp = NULL;
924 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
925 1.1 mrg if ((char *) sp >= isp)
926 1.1 mrg return NULL;
927 1.1 mrg *len = (char *) isp - (char *) sp;
928 1.1 mrg return sp;
929 1.1 mrg #else
930 1.1 mrg if ((char *) sp <= (char *) isp)
931 1.1 mrg return NULL;
932 1.1 mrg *len = (char *) sp - (char *) isp;
933 1.1 mrg return (void *) isp;
934 1.1 mrg #endif
935 1.1 mrg }
936 1.1 mrg else if (segment_arg == (void *) (uintptr_type) 2)
937 1.1 mrg return NULL;
938 1.1 mrg else if (segment_arg != NULL)
939 1.1 mrg segment = (struct stack_segment *) segment_arg;
940 1.1 mrg else
941 1.1 mrg {
942 1.1 mrg *initial_sp = __morestack_initial_sp.sp;
943 1.1 mrg segment = __morestack_current_segment;
944 1.1 mrg sp = (void *) &segment;
945 1.1 mrg while (1)
946 1.1 mrg {
947 1.1 mrg if (segment == NULL)
948 1.1 mrg return __splitstack_find ((void *) (uintptr_type) 1, sp, len,
949 1.1 mrg next_segment, next_sp, initial_sp);
950 1.1 mrg if ((char *) sp >= (char *) (segment + 1)
951 1.1 mrg && (char *) sp <= (char *) (segment + 1) + segment->size)
952 1.1 mrg break;
953 1.1 mrg segment = segment->prev;
954 1.1 mrg }
955 1.1 mrg }
956 1.1 mrg
957 1.1 mrg if (segment->prev == NULL)
958 1.1 mrg *next_segment = (void *) (uintptr_type) 1;
959 1.1 mrg else
960 1.1 mrg *next_segment = segment->prev;
961 1.1 mrg
962 1.1 mrg /* The old_stack value is the address of the function parameters of
963 1.1 mrg the function which called __morestack. So if f1 called f2 which
964 1.1 mrg called __morestack, the stack looks like this:
965 1.1 mrg
966 1.1 mrg parameters <- old_stack
967 1.1 mrg return in f1
968 1.1 mrg return in f2
969 1.1 mrg registers pushed by __morestack
970 1.1 mrg
971 1.1 mrg The registers pushed by __morestack may not be visible on any
972 1.1 mrg other stack, if we are being called by a signal handler
973 1.1 mrg immediately after the call to __morestack_unblock_signals. We
974 1.1 mrg want to adjust our return value to include those registers. This
975 1.1 mrg is target dependent. */
976 1.1 mrg
977 1.1 mrg nsp = (char *) segment->old_stack;
978 1.1 mrg
979 1.1 mrg if (nsp == NULL)
980 1.1 mrg {
981 1.1 mrg /* We've reached the top of the stack. */
982 1.1 mrg *next_segment = (void *) (uintptr_type) 2;
983 1.1 mrg }
984 1.1 mrg else
985 1.1 mrg {
986 1.1 mrg #if defined (__x86_64__)
987 1.1 mrg nsp -= 12 * sizeof (void *);
988 1.1 mrg #elif defined (__i386__)
989 1.1 mrg nsp -= 6 * sizeof (void *);
990 1.1.1.3 mrg #elif defined __powerpc64__
991 1.1.1.3 mrg #elif defined __s390x__
992 1.1.1.3 mrg nsp -= 2 * 160;
993 1.1.1.3 mrg #elif defined __s390__
994 1.1.1.3 mrg nsp -= 2 * 96;
995 1.1 mrg #else
996 1.1 mrg #error "unrecognized target"
997 1.1 mrg #endif
998 1.1 mrg
999 1.1 mrg *next_sp = (void *) nsp;
1000 1.1 mrg }
1001 1.1 mrg
1002 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
1003 1.1 mrg *len = (char *) (segment + 1) + segment->size - (char *) sp;
1004 1.1 mrg ret = (void *) sp;
1005 1.1 mrg #else
1006 1.1 mrg *len = (char *) sp - (char *) (segment + 1);
1007 1.1 mrg ret = (void *) (segment + 1);
1008 1.1 mrg #endif
1009 1.1 mrg
1010 1.1 mrg return ret;
1011 1.1 mrg }
1012 1.1 mrg
1013 1.1 mrg /* Tell the split stack code whether it has to block signals while
1014 1.1 mrg manipulating the stack. This is for programs in which some threads
1015 1.1 mrg block all signals. If a thread already blocks signals, there is no
1016 1.1 mrg need for the split stack code to block them as well. If NEW is not
1017 1.1 mrg NULL, then if *NEW is non-zero signals will be blocked while
1018 1.1 mrg splitting the stack, otherwise they will not. If OLD is not NULL,
1019 1.1 mrg *OLD will be set to the old value. */
1020 1.1 mrg
1021 1.1 mrg void
1022 1.1 mrg __splitstack_block_signals (int *new, int *old)
1023 1.1 mrg {
1024 1.1 mrg if (old != NULL)
1025 1.1 mrg *old = __morestack_initial_sp.dont_block_signals ? 0 : 1;
1026 1.1 mrg if (new != NULL)
1027 1.1 mrg __morestack_initial_sp.dont_block_signals = *new ? 0 : 1;
1028 1.1 mrg }
1029 1.1 mrg
1030 1.1 mrg /* The offsets into the arrays used by __splitstack_getcontext and
1031 1.1 mrg __splitstack_setcontext. */
1032 1.1 mrg
1033 1.1 mrg enum __splitstack_context_offsets
1034 1.1 mrg {
1035 1.1 mrg MORESTACK_SEGMENTS = 0,
1036 1.1 mrg CURRENT_SEGMENT = 1,
1037 1.1 mrg CURRENT_STACK = 2,
1038 1.1 mrg STACK_GUARD = 3,
1039 1.1 mrg INITIAL_SP = 4,
1040 1.1 mrg INITIAL_SP_LEN = 5,
1041 1.1 mrg BLOCK_SIGNALS = 6,
1042 1.1 mrg
1043 1.1 mrg NUMBER_OFFSETS = 10
1044 1.1 mrg };
1045 1.1 mrg
1046 1.1 mrg /* Get the current split stack context. This may be used for
1047 1.1 mrg coroutine switching, similar to getcontext. The argument should
1048 1.1 mrg have at least 10 void *pointers for extensibility, although we
1049 1.1 mrg don't currently use all of them. This would normally be called
1050 1.1 mrg immediately before a call to getcontext or swapcontext or
1051 1.1 mrg setjmp. */
1052 1.1 mrg
1053 1.1 mrg void
1054 1.1 mrg __splitstack_getcontext (void *context[NUMBER_OFFSETS])
1055 1.1 mrg {
1056 1.1 mrg memset (context, 0, NUMBER_OFFSETS * sizeof (void *));
1057 1.1 mrg context[MORESTACK_SEGMENTS] = (void *) __morestack_segments;
1058 1.1 mrg context[CURRENT_SEGMENT] = (void *) __morestack_current_segment;
1059 1.1 mrg context[CURRENT_STACK] = (void *) &context;
1060 1.1 mrg context[STACK_GUARD] = __morestack_get_guard ();
1061 1.1 mrg context[INITIAL_SP] = (void *) __morestack_initial_sp.sp;
1062 1.1 mrg context[INITIAL_SP_LEN] = (void *) (uintptr_type) __morestack_initial_sp.len;
1063 1.1 mrg context[BLOCK_SIGNALS] = (void *) __morestack_initial_sp.dont_block_signals;
1064 1.1 mrg }
1065 1.1 mrg
1066 1.1 mrg /* Set the current split stack context. The argument should be a
1067 1.1 mrg context previously passed to __splitstack_getcontext. This would
1068 1.1 mrg normally be called immediately after a call to getcontext or
1069 1.1 mrg swapcontext or setjmp if something jumped to it. */
1070 1.1 mrg
1071 1.1 mrg void
1072 1.1 mrg __splitstack_setcontext (void *context[NUMBER_OFFSETS])
1073 1.1 mrg {
1074 1.1 mrg __morestack_segments = (struct stack_segment *) context[MORESTACK_SEGMENTS];
1075 1.1 mrg __morestack_current_segment =
1076 1.1 mrg (struct stack_segment *) context[CURRENT_SEGMENT];
1077 1.1 mrg __morestack_set_guard (context[STACK_GUARD]);
1078 1.1 mrg __morestack_initial_sp.sp = context[INITIAL_SP];
1079 1.1 mrg __morestack_initial_sp.len = (size_t) context[INITIAL_SP_LEN];
1080 1.1 mrg __morestack_initial_sp.dont_block_signals =
1081 1.1 mrg (uintptr_type) context[BLOCK_SIGNALS];
1082 1.1 mrg }
1083 1.1 mrg
1084 1.1 mrg /* Create a new split stack context. This will allocate a new stack
1085 1.1 mrg segment which may be used by a coroutine. STACK_SIZE is the
1086 1.1 mrg minimum size of the new stack. The caller is responsible for
1087 1.1 mrg actually setting the stack pointer. This would normally be called
1088 1.1 mrg before a call to makecontext, and the returned stack pointer and
1089 1.1 mrg size would be used to set the uc_stack field. A function called
1090 1.1 mrg via makecontext on a stack created by __splitstack_makecontext may
1091 1.1 mrg not return. Note that the returned pointer points to the lowest
1092 1.1 mrg address in the stack space, and thus may not be the value to which
1093 1.1 mrg to set the stack pointer. */
1094 1.1 mrg
1095 1.1 mrg void *
1096 1.1 mrg __splitstack_makecontext (size_t stack_size, void *context[NUMBER_OFFSETS],
1097 1.1 mrg size_t *size)
1098 1.1 mrg {
1099 1.1 mrg struct stack_segment *segment;
1100 1.1 mrg void *initial_sp;
1101 1.1 mrg
1102 1.1 mrg memset (context, 0, NUMBER_OFFSETS * sizeof (void *));
1103 1.1 mrg segment = allocate_segment (stack_size);
1104 1.1 mrg context[MORESTACK_SEGMENTS] = segment;
1105 1.1 mrg context[CURRENT_SEGMENT] = segment;
1106 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
1107 1.1 mrg initial_sp = (void *) ((char *) (segment + 1) + segment->size);
1108 1.1 mrg #else
1109 1.1 mrg initial_sp = (void *) (segment + 1);
1110 1.1 mrg #endif
1111 1.1 mrg context[STACK_GUARD] = __morestack_make_guard (initial_sp, segment->size);
1112 1.1 mrg context[INITIAL_SP] = NULL;
1113 1.1 mrg context[INITIAL_SP_LEN] = 0;
1114 1.1 mrg *size = segment->size;
1115 1.1 mrg return (void *) (segment + 1);
1116 1.1 mrg }
1117 1.1 mrg
1118 1.1 mrg /* Given an existing split stack context, reset it back to the start
1119 1.1 mrg of the stack. Return the stack pointer and size, appropriate for
1120 1.1 mrg use with makecontext. This may be used if a coroutine exits, in
1121 1.1 mrg order to reuse the stack segments for a new coroutine. */
1122 1.1 mrg
1123 1.1 mrg void *
1124 1.1 mrg __splitstack_resetcontext (void *context[10], size_t *size)
1125 1.1 mrg {
1126 1.1 mrg struct stack_segment *segment;
1127 1.1 mrg void *initial_sp;
1128 1.1 mrg size_t initial_size;
1129 1.1 mrg void *ret;
1130 1.1 mrg
1131 1.1 mrg /* Reset the context assuming that MORESTACK_SEGMENTS, INITIAL_SP
1132 1.1 mrg and INITIAL_SP_LEN are correct. */
1133 1.1 mrg
1134 1.1 mrg segment = context[MORESTACK_SEGMENTS];
1135 1.1 mrg context[CURRENT_SEGMENT] = segment;
1136 1.1 mrg context[CURRENT_STACK] = NULL;
1137 1.1 mrg if (segment == NULL)
1138 1.1 mrg {
1139 1.1 mrg initial_sp = context[INITIAL_SP];
1140 1.1 mrg initial_size = (uintptr_type) context[INITIAL_SP_LEN];
1141 1.1 mrg ret = initial_sp;
1142 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
1143 1.1 mrg ret = (void *) ((char *) ret - initial_size);
1144 1.1 mrg #endif
1145 1.1 mrg }
1146 1.1 mrg else
1147 1.1 mrg {
1148 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
1149 1.1 mrg initial_sp = (void *) ((char *) (segment + 1) + segment->size);
1150 1.1 mrg #else
1151 1.1 mrg initial_sp = (void *) (segment + 1);
1152 1.1 mrg #endif
1153 1.1 mrg initial_size = segment->size;
1154 1.1 mrg ret = (void *) (segment + 1);
1155 1.1 mrg }
1156 1.1 mrg context[STACK_GUARD] = __morestack_make_guard (initial_sp, initial_size);
1157 1.1 mrg context[BLOCK_SIGNALS] = NULL;
1158 1.1 mrg *size = initial_size;
1159 1.1 mrg return ret;
1160 1.1 mrg }
1161 1.1 mrg
1162 1.1 mrg /* Release all the memory associated with a splitstack context. This
1163 1.1 mrg may be used if a coroutine exits and the associated stack should be
1164 1.1 mrg freed. */
1165 1.1 mrg
1166 1.1 mrg void
1167 1.1 mrg __splitstack_releasecontext (void *context[10])
1168 1.1 mrg {
1169 1.1 mrg __morestack_release_segments (((struct stack_segment **)
1170 1.1 mrg &context[MORESTACK_SEGMENTS]),
1171 1.1 mrg 1);
1172 1.1 mrg }
1173 1.1 mrg
1174 1.1 mrg /* Like __splitstack_block_signals, but operating on CONTEXT, rather
1175 1.1 mrg than on the current state. */
1176 1.1 mrg
1177 1.1 mrg void
1178 1.1 mrg __splitstack_block_signals_context (void *context[NUMBER_OFFSETS], int *new,
1179 1.1 mrg int *old)
1180 1.1 mrg {
1181 1.1 mrg if (old != NULL)
1182 1.1 mrg *old = ((uintptr_type) context[BLOCK_SIGNALS]) != 0 ? 0 : 1;
1183 1.1 mrg if (new != NULL)
1184 1.1 mrg context[BLOCK_SIGNALS] = (void *) (uintptr_type) (*new ? 0 : 1);
1185 1.1 mrg }
1186 1.1 mrg
1187 1.1 mrg /* Find the stack segments associated with a split stack context.
1188 1.1 mrg This will return the address of the first stack segment and set
1189 1.1 mrg *STACK_SIZE to its size. It will set next_segment, next_sp, and
1190 1.1 mrg initial_sp which may be passed to __splitstack_find to find the
1191 1.1 mrg remaining segments. */
1192 1.1 mrg
1193 1.1 mrg void *
1194 1.1 mrg __splitstack_find_context (void *context[NUMBER_OFFSETS], size_t *stack_size,
1195 1.1 mrg void **next_segment, void **next_sp,
1196 1.1 mrg void **initial_sp)
1197 1.1 mrg {
1198 1.1 mrg void *sp;
1199 1.1 mrg struct stack_segment *segment;
1200 1.1 mrg
1201 1.1 mrg *initial_sp = context[INITIAL_SP];
1202 1.1 mrg
1203 1.1 mrg sp = context[CURRENT_STACK];
1204 1.1 mrg if (sp == NULL)
1205 1.1 mrg {
1206 1.1 mrg /* Most likely this context was created but was never used. The
1207 1.1 mrg value 2 is a code used by __splitstack_find to mean that we
1208 1.1 mrg have reached the end of the list of stacks. */
1209 1.1 mrg *next_segment = (void *) (uintptr_type) 2;
1210 1.1 mrg *next_sp = NULL;
1211 1.1 mrg *initial_sp = NULL;
1212 1.1 mrg return NULL;
1213 1.1 mrg }
1214 1.1 mrg
1215 1.1 mrg segment = context[CURRENT_SEGMENT];
1216 1.1 mrg if (segment == NULL)
1217 1.1 mrg {
1218 1.1 mrg /* Most likely this context was saved by a thread which was not
1219 1.1 mrg created using __splistack_makecontext and which has never
1220 1.1 mrg split the stack. The value 1 is a code used by
1221 1.1 mrg __splitstack_find to look at the initial stack. */
1222 1.1 mrg segment = (struct stack_segment *) (uintptr_type) 1;
1223 1.1 mrg }
1224 1.1 mrg
1225 1.1 mrg return __splitstack_find (segment, sp, stack_size, next_segment, next_sp,
1226 1.1 mrg initial_sp);
1227 1.1 mrg }
1228 1.1 mrg
1229 1.1 mrg #endif /* !defined (inhibit_libc) */
1230 1.1.1.3 mrg #endif /* not powerpc 32-bit */
1231