generic-morestack.c revision 1.1.1.8 1 1.1 mrg /* Library support for -fsplit-stack. */
2 1.1.1.8 mrg /* Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 1.1 mrg Contributed by Ian Lance Taylor <iant (at) google.com>.
4 1.1 mrg
5 1.1 mrg This file is part of GCC.
6 1.1 mrg
7 1.1 mrg GCC is free software; you can redistribute it and/or modify it under
8 1.1 mrg the terms of the GNU General Public License as published by the Free
9 1.1 mrg Software Foundation; either version 3, or (at your option) any later
10 1.1 mrg version.
11 1.1 mrg
12 1.1 mrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 1.1 mrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 1.1 mrg FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 1.1 mrg for more details.
16 1.1 mrg
17 1.1 mrg Under Section 7 of GPL version 3, you are granted additional
18 1.1 mrg permissions described in the GCC Runtime Library Exception, version
19 1.1 mrg 3.1, as published by the Free Software Foundation.
20 1.1 mrg
21 1.1 mrg You should have received a copy of the GNU General Public License and
22 1.1 mrg a copy of the GCC Runtime Library Exception along with this program;
23 1.1 mrg see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 1.1 mrg <http://www.gnu.org/licenses/>. */
25 1.1 mrg
26 1.1.1.3 mrg /* powerpc 32-bit not supported. */
27 1.1.1.3 mrg #if !defined __powerpc__ || defined __powerpc64__
28 1.1.1.3 mrg
29 1.1 mrg #include "tconfig.h"
30 1.1 mrg #include "tsystem.h"
31 1.1 mrg #include "coretypes.h"
32 1.1 mrg #include "tm.h"
33 1.1 mrg #include "libgcc_tm.h"
34 1.1 mrg
35 1.1.1.8 mrg /* If inhibit_libc is defined, we cannot compile this file. The
36 1.1 mrg effect is that people will not be able to use -fsplit-stack. That
37 1.1 mrg is much better than failing the build particularly since people
38 1.1 mrg will want to define inhibit_libc while building a compiler which
39 1.1 mrg can build glibc. */
40 1.1 mrg
41 1.1 mrg #ifndef inhibit_libc
42 1.1 mrg
43 1.1 mrg #include <assert.h>
44 1.1 mrg #include <errno.h>
45 1.1 mrg #include <signal.h>
46 1.1 mrg #include <stdlib.h>
47 1.1 mrg #include <string.h>
48 1.1 mrg #include <unistd.h>
49 1.1 mrg #include <sys/mman.h>
50 1.1 mrg #include <sys/uio.h>
51 1.1 mrg
52 1.1 mrg #include "generic-morestack.h"
53 1.1 mrg
54 1.1 mrg typedef unsigned uintptr_type __attribute__ ((mode (pointer)));
55 1.1 mrg
56 1.1 mrg /* This file contains subroutines that are used by code compiled with
57 1.1 mrg -fsplit-stack. */
58 1.1 mrg
59 1.1 mrg /* Declare functions to avoid warnings--there is no header file for
60 1.1 mrg these internal functions. We give most of these functions the
61 1.1 mrg flatten attribute in order to minimize their stack usage--here we
62 1.1 mrg must minimize stack usage even at the cost of code size, and in
63 1.1 mrg general inlining everything will do that. */
64 1.1 mrg
65 1.1 mrg extern void
66 1.1 mrg __generic_morestack_set_initial_sp (void *sp, size_t len)
67 1.1 mrg __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
68 1.1 mrg
69 1.1 mrg extern void *
70 1.1 mrg __generic_morestack (size_t *frame_size, void *old_stack, size_t param_size)
71 1.1 mrg __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
72 1.1 mrg
73 1.1 mrg extern void *
74 1.1 mrg __generic_releasestack (size_t *pavailable)
75 1.1 mrg __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
76 1.1 mrg
77 1.1 mrg extern void
78 1.1 mrg __morestack_block_signals (void)
79 1.1 mrg __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
80 1.1 mrg
81 1.1 mrg extern void
82 1.1 mrg __morestack_unblock_signals (void)
83 1.1 mrg __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
84 1.1 mrg
85 1.1 mrg extern size_t
86 1.1 mrg __generic_findstack (void *stack)
87 1.1 mrg __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
88 1.1 mrg
89 1.1 mrg extern void
90 1.1 mrg __morestack_load_mmap (void)
91 1.1 mrg __attribute__ ((no_split_stack, visibility ("hidden")));
92 1.1 mrg
93 1.1 mrg extern void *
94 1.1 mrg __morestack_allocate_stack_space (size_t size)
95 1.1 mrg __attribute__ ((visibility ("hidden")));
96 1.1 mrg
97 1.1 mrg /* These are functions which -fsplit-stack code can call. These are
98 1.1 mrg not called by the compiler, and are not hidden. FIXME: These
99 1.1 mrg should be in some header file somewhere, somehow. */
100 1.1 mrg
101 1.1 mrg extern void *
102 1.1 mrg __splitstack_find (void *, void *, size_t *, void **, void **, void **)
103 1.1 mrg __attribute__ ((visibility ("default")));
104 1.1 mrg
105 1.1 mrg extern void
106 1.1 mrg __splitstack_block_signals (int *, int *)
107 1.1 mrg __attribute__ ((visibility ("default")));
108 1.1 mrg
109 1.1 mrg extern void
110 1.1 mrg __splitstack_getcontext (void *context[10])
111 1.1 mrg __attribute__ ((no_split_stack, visibility ("default")));
112 1.1 mrg
113 1.1 mrg extern void
114 1.1 mrg __splitstack_setcontext (void *context[10])
115 1.1 mrg __attribute__ ((no_split_stack, visibility ("default")));
116 1.1 mrg
117 1.1 mrg extern void *
118 1.1 mrg __splitstack_makecontext (size_t, void *context[10], size_t *)
119 1.1 mrg __attribute__ ((visibility ("default")));
120 1.1 mrg
121 1.1 mrg extern void *
122 1.1 mrg __splitstack_resetcontext (void *context[10], size_t *)
123 1.1 mrg __attribute__ ((visibility ("default")));
124 1.1 mrg
125 1.1 mrg extern void
126 1.1 mrg __splitstack_releasecontext (void *context[10])
127 1.1 mrg __attribute__ ((visibility ("default")));
128 1.1 mrg
129 1.1 mrg extern void
130 1.1 mrg __splitstack_block_signals_context (void *context[10], int *, int *)
131 1.1 mrg __attribute__ ((visibility ("default")));
132 1.1 mrg
133 1.1 mrg extern void *
134 1.1 mrg __splitstack_find_context (void *context[10], size_t *, void **, void **,
135 1.1 mrg void **)
136 1.1 mrg __attribute__ ((visibility ("default")));
137 1.1 mrg
138 1.1 mrg /* These functions must be defined by the processor specific code. */
139 1.1 mrg
140 1.1 mrg extern void *__morestack_get_guard (void)
141 1.1 mrg __attribute__ ((no_split_stack, visibility ("hidden")));
142 1.1 mrg
143 1.1 mrg extern void __morestack_set_guard (void *)
144 1.1 mrg __attribute__ ((no_split_stack, visibility ("hidden")));
145 1.1 mrg
146 1.1 mrg extern void *__morestack_make_guard (void *, size_t)
147 1.1 mrg __attribute__ ((no_split_stack, visibility ("hidden")));
148 1.1 mrg
149 1.1 mrg /* When we allocate a stack segment we put this header at the
150 1.1 mrg start. */
151 1.1 mrg
152 1.1 mrg struct stack_segment
153 1.1 mrg {
154 1.1 mrg /* The previous stack segment--when a function running on this stack
155 1.1 mrg segment returns, it will run on the previous one. */
156 1.1 mrg struct stack_segment *prev;
157 1.1 mrg /* The next stack segment, if it has been allocated--when a function
158 1.1 mrg is running on this stack segment, the next one is not being
159 1.1 mrg used. */
160 1.1 mrg struct stack_segment *next;
161 1.1 mrg /* The total size of this stack segment. */
162 1.1 mrg size_t size;
163 1.1 mrg /* The stack address when this stack was created. This is used when
164 1.1 mrg popping the stack. */
165 1.1 mrg void *old_stack;
166 1.1 mrg /* A list of memory blocks allocated by dynamic stack
167 1.1 mrg allocation. */
168 1.1 mrg struct dynamic_allocation_blocks *dynamic_allocation;
169 1.1 mrg /* A list of dynamic memory blocks no longer needed. */
170 1.1 mrg struct dynamic_allocation_blocks *free_dynamic_allocation;
171 1.1 mrg /* An extra pointer in case we need some more information some
172 1.1 mrg day. */
173 1.1 mrg void *extra;
174 1.1 mrg };
175 1.1 mrg
176 1.1 mrg /* This structure holds the (approximate) initial stack pointer and
177 1.1 mrg size for the system supplied stack for a thread. This is set when
178 1.1 mrg the thread is created. We also store a sigset_t here to hold the
179 1.1 mrg signal mask while splitting the stack, since we don't want to store
180 1.1 mrg that on the stack. */
181 1.1 mrg
182 1.1 mrg struct initial_sp
183 1.1 mrg {
184 1.1 mrg /* The initial stack pointer. */
185 1.1 mrg void *sp;
186 1.1 mrg /* The stack length. */
187 1.1 mrg size_t len;
188 1.1 mrg /* A signal mask, put here so that the thread can use it without
189 1.1 mrg needing stack space. */
190 1.1 mrg sigset_t mask;
191 1.1 mrg /* Non-zero if we should not block signals. This is a reversed flag
192 1.1 mrg so that the default zero value is the safe value. The type is
193 1.1 mrg uintptr_type because it replaced one of the void * pointers in
194 1.1 mrg extra. */
195 1.1 mrg uintptr_type dont_block_signals;
196 1.1 mrg /* Some extra space for later extensibility. */
197 1.1 mrg void *extra[4];
198 1.1 mrg };
199 1.1 mrg
200 1.1 mrg /* A list of memory blocks allocated by dynamic stack allocation.
201 1.1 mrg This is used for code that calls alloca or uses variably sized
202 1.1 mrg arrays. */
203 1.1 mrg
204 1.1 mrg struct dynamic_allocation_blocks
205 1.1 mrg {
206 1.1 mrg /* The next block in the list. */
207 1.1 mrg struct dynamic_allocation_blocks *next;
208 1.1 mrg /* The size of the allocated memory. */
209 1.1 mrg size_t size;
210 1.1 mrg /* The allocated memory. */
211 1.1 mrg void *block;
212 1.1 mrg };
213 1.1 mrg
214 1.1 mrg /* These thread local global variables must be shared by all split
215 1.1 mrg stack code across shared library boundaries. Therefore, they have
216 1.1 mrg default visibility. They have extensibility fields if needed for
217 1.1 mrg new versions. If more radical changes are needed, new code can be
218 1.1 mrg written using new variable names, while still using the existing
219 1.1 mrg variables in a backward compatible manner. Symbol versioning is
220 1.1 mrg also used, although, since these variables are only referenced by
221 1.1 mrg code in this file and generic-morestack-thread.c, it is likely that
222 1.1 mrg simply using new names will suffice. */
223 1.1 mrg
224 1.1 mrg /* The first stack segment allocated for this thread. */
225 1.1 mrg
226 1.1 mrg __thread struct stack_segment *__morestack_segments
227 1.1 mrg __attribute__ ((visibility ("default")));
228 1.1 mrg
229 1.1 mrg /* The stack segment that we think we are currently using. This will
230 1.1 mrg be correct in normal usage, but will be incorrect if an exception
231 1.1 mrg unwinds into a different stack segment or if longjmp jumps to a
232 1.1 mrg different stack segment. */
233 1.1 mrg
234 1.1 mrg __thread struct stack_segment *__morestack_current_segment
235 1.1 mrg __attribute__ ((visibility ("default")));
236 1.1 mrg
237 1.1 mrg /* The initial stack pointer and size for this thread. */
238 1.1 mrg
239 1.1 mrg __thread struct initial_sp __morestack_initial_sp
240 1.1 mrg __attribute__ ((visibility ("default")));
241 1.1 mrg
242 1.1 mrg /* A static signal mask, to avoid taking up stack space. */
243 1.1 mrg
244 1.1 mrg static sigset_t __morestack_fullmask;
245 1.1 mrg
246 1.1.1.7 mrg /* Page size, as returned from getpagesize(). Set on startup. */
247 1.1.1.7 mrg static unsigned int static_pagesize;
248 1.1.1.7 mrg
249 1.1.1.7 mrg /* Set on startup to non-zero value if SPLIT_STACK_GUARD env var is set. */
250 1.1.1.7 mrg static int use_guard_page;
251 1.1.1.7 mrg
252 1.1 mrg /* Convert an integer to a decimal string without using much stack
253 1.1 mrg space. Return a pointer to the part of the buffer to use. We this
254 1.1 mrg instead of sprintf because sprintf will require too much stack
255 1.1 mrg space. */
256 1.1 mrg
257 1.1 mrg static char *
258 1.1 mrg print_int (int val, char *buf, int buflen, size_t *print_len)
259 1.1 mrg {
260 1.1 mrg int is_negative;
261 1.1 mrg int i;
262 1.1 mrg unsigned int uval;
263 1.1 mrg
264 1.1 mrg uval = (unsigned int) val;
265 1.1 mrg if (val >= 0)
266 1.1 mrg is_negative = 0;
267 1.1 mrg else
268 1.1 mrg {
269 1.1 mrg is_negative = 1;
270 1.1 mrg uval = - uval;
271 1.1 mrg }
272 1.1 mrg
273 1.1 mrg i = buflen;
274 1.1 mrg do
275 1.1 mrg {
276 1.1 mrg --i;
277 1.1 mrg buf[i] = '0' + (uval % 10);
278 1.1 mrg uval /= 10;
279 1.1 mrg }
280 1.1 mrg while (uval != 0 && i > 0);
281 1.1 mrg
282 1.1 mrg if (is_negative)
283 1.1 mrg {
284 1.1 mrg if (i > 0)
285 1.1 mrg --i;
286 1.1 mrg buf[i] = '-';
287 1.1 mrg }
288 1.1 mrg
289 1.1 mrg *print_len = buflen - i;
290 1.1 mrg return buf + i;
291 1.1 mrg }
292 1.1 mrg
293 1.1 mrg /* Print the string MSG/LEN, the errno number ERR, and a newline on
294 1.1 mrg stderr. Then crash. */
295 1.1 mrg
296 1.1 mrg void
297 1.1 mrg __morestack_fail (const char *, size_t, int) __attribute__ ((noreturn));
298 1.1 mrg
299 1.1 mrg void
300 1.1 mrg __morestack_fail (const char *msg, size_t len, int err)
301 1.1 mrg {
302 1.1 mrg char buf[24];
303 1.1 mrg static const char nl[] = "\n";
304 1.1 mrg struct iovec iov[3];
305 1.1 mrg union { char *p; const char *cp; } const_cast;
306 1.1 mrg
307 1.1 mrg const_cast.cp = msg;
308 1.1 mrg iov[0].iov_base = const_cast.p;
309 1.1 mrg iov[0].iov_len = len;
310 1.1 mrg /* We can't call strerror, because it may try to translate the error
311 1.1 mrg message, and that would use too much stack space. */
312 1.1 mrg iov[1].iov_base = print_int (err, buf, sizeof buf, &iov[1].iov_len);
313 1.1 mrg const_cast.cp = &nl[0];
314 1.1 mrg iov[2].iov_base = const_cast.p;
315 1.1 mrg iov[2].iov_len = sizeof nl - 1;
316 1.1 mrg /* FIXME: On systems without writev we need to issue three write
317 1.1 mrg calls, or punt on printing errno. For now this is irrelevant
318 1.1 mrg since stack splitting only works on GNU/Linux anyhow. */
319 1.1 mrg writev (2, iov, 3);
320 1.1 mrg abort ();
321 1.1 mrg }
322 1.1 mrg
323 1.1 mrg /* Allocate a new stack segment. FRAME_SIZE is the required frame
324 1.1 mrg size. */
325 1.1 mrg
326 1.1 mrg static struct stack_segment *
327 1.1 mrg allocate_segment (size_t frame_size)
328 1.1 mrg {
329 1.1 mrg unsigned int pagesize;
330 1.1 mrg unsigned int overhead;
331 1.1 mrg unsigned int allocate;
332 1.1 mrg void *space;
333 1.1 mrg struct stack_segment *pss;
334 1.1 mrg
335 1.1 mrg pagesize = static_pagesize;
336 1.1 mrg overhead = sizeof (struct stack_segment);
337 1.1 mrg
338 1.1 mrg allocate = pagesize;
339 1.1 mrg if (allocate < MINSIGSTKSZ)
340 1.1 mrg allocate = ((MINSIGSTKSZ + overhead + pagesize - 1)
341 1.1 mrg & ~ (pagesize - 1));
342 1.1 mrg if (allocate < frame_size)
343 1.1 mrg allocate = ((frame_size + overhead + pagesize - 1)
344 1.1 mrg & ~ (pagesize - 1));
345 1.1 mrg
346 1.1 mrg if (use_guard_page)
347 1.1 mrg allocate += pagesize;
348 1.1 mrg
349 1.1 mrg /* FIXME: If this binary requires an executable stack, then we need
350 1.1 mrg to set PROT_EXEC. Unfortunately figuring that out is complicated
351 1.1 mrg and target dependent. We would need to use dl_iterate_phdr to
352 1.1 mrg see if there is any object which does not have a PT_GNU_STACK
353 1.1 mrg phdr, though only for architectures which use that mechanism. */
354 1.1 mrg space = mmap (NULL, allocate, PROT_READ | PROT_WRITE,
355 1.1 mrg MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
356 1.1 mrg if (space == MAP_FAILED)
357 1.1 mrg {
358 1.1 mrg static const char msg[] =
359 1.1 mrg "unable to allocate additional stack space: errno ";
360 1.1 mrg __morestack_fail (msg, sizeof msg - 1, errno);
361 1.1 mrg }
362 1.1 mrg
363 1.1 mrg if (use_guard_page)
364 1.1 mrg {
365 1.1 mrg void *guard;
366 1.1 mrg
367 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
368 1.1 mrg guard = space;
369 1.1 mrg space = (char *) space + pagesize;
370 1.1 mrg #else
371 1.1 mrg guard = space + allocate - pagesize;
372 1.1 mrg #endif
373 1.1 mrg
374 1.1 mrg mprotect (guard, pagesize, PROT_NONE);
375 1.1 mrg allocate -= pagesize;
376 1.1 mrg }
377 1.1 mrg
378 1.1 mrg pss = (struct stack_segment *) space;
379 1.1 mrg
380 1.1 mrg pss->prev = NULL;
381 1.1 mrg pss->next = NULL;
382 1.1 mrg pss->size = allocate - overhead;
383 1.1 mrg pss->dynamic_allocation = NULL;
384 1.1 mrg pss->free_dynamic_allocation = NULL;
385 1.1 mrg pss->extra = NULL;
386 1.1 mrg
387 1.1 mrg return pss;
388 1.1 mrg }
389 1.1 mrg
390 1.1 mrg /* Free a list of dynamic blocks. */
391 1.1 mrg
392 1.1 mrg static void
393 1.1 mrg free_dynamic_blocks (struct dynamic_allocation_blocks *p)
394 1.1 mrg {
395 1.1 mrg while (p != NULL)
396 1.1 mrg {
397 1.1 mrg struct dynamic_allocation_blocks *next;
398 1.1 mrg
399 1.1 mrg next = p->next;
400 1.1 mrg free (p->block);
401 1.1 mrg free (p);
402 1.1 mrg p = next;
403 1.1 mrg }
404 1.1 mrg }
405 1.1 mrg
406 1.1 mrg /* Merge two lists of dynamic blocks. */
407 1.1 mrg
408 1.1 mrg static struct dynamic_allocation_blocks *
409 1.1 mrg merge_dynamic_blocks (struct dynamic_allocation_blocks *a,
410 1.1 mrg struct dynamic_allocation_blocks *b)
411 1.1 mrg {
412 1.1 mrg struct dynamic_allocation_blocks **pp;
413 1.1 mrg
414 1.1 mrg if (a == NULL)
415 1.1 mrg return b;
416 1.1 mrg if (b == NULL)
417 1.1 mrg return a;
418 1.1 mrg for (pp = &a->next; *pp != NULL; pp = &(*pp)->next)
419 1.1 mrg ;
420 1.1 mrg *pp = b;
421 1.1 mrg return a;
422 1.1 mrg }
423 1.1 mrg
424 1.1 mrg /* Release stack segments. If FREE_DYNAMIC is non-zero, we also free
425 1.1 mrg any dynamic blocks. Otherwise we return them. */
426 1.1 mrg
427 1.1 mrg struct dynamic_allocation_blocks *
428 1.1 mrg __morestack_release_segments (struct stack_segment **pp, int free_dynamic)
429 1.1 mrg {
430 1.1 mrg struct dynamic_allocation_blocks *ret;
431 1.1 mrg struct stack_segment *pss;
432 1.1 mrg
433 1.1 mrg ret = NULL;
434 1.1 mrg pss = *pp;
435 1.1 mrg while (pss != NULL)
436 1.1 mrg {
437 1.1 mrg struct stack_segment *next;
438 1.1 mrg unsigned int allocate;
439 1.1 mrg
440 1.1 mrg next = pss->next;
441 1.1 mrg
442 1.1 mrg if (pss->dynamic_allocation != NULL
443 1.1 mrg || pss->free_dynamic_allocation != NULL)
444 1.1 mrg {
445 1.1 mrg if (free_dynamic)
446 1.1 mrg {
447 1.1 mrg free_dynamic_blocks (pss->dynamic_allocation);
448 1.1 mrg free_dynamic_blocks (pss->free_dynamic_allocation);
449 1.1 mrg }
450 1.1 mrg else
451 1.1 mrg {
452 1.1 mrg ret = merge_dynamic_blocks (pss->dynamic_allocation, ret);
453 1.1 mrg ret = merge_dynamic_blocks (pss->free_dynamic_allocation, ret);
454 1.1 mrg }
455 1.1 mrg }
456 1.1 mrg
457 1.1 mrg allocate = pss->size + sizeof (struct stack_segment);
458 1.1 mrg if (munmap (pss, allocate) < 0)
459 1.1 mrg {
460 1.1 mrg static const char msg[] = "munmap of stack space failed: errno ";
461 1.1 mrg __morestack_fail (msg, sizeof msg - 1, errno);
462 1.1 mrg }
463 1.1 mrg
464 1.1 mrg pss = next;
465 1.1 mrg }
466 1.1 mrg *pp = NULL;
467 1.1 mrg
468 1.1 mrg return ret;
469 1.1 mrg }
470 1.1 mrg
471 1.1 mrg /* This function is called by a processor specific function to set the
472 1.1 mrg initial stack pointer for a thread. The operating system will
473 1.1 mrg always create a stack for a thread. Here we record a stack pointer
474 1.1 mrg near the base of that stack. The size argument lets the processor
475 1.1 mrg specific code estimate how much stack space is available on this
476 1.1 mrg initial stack. */
477 1.1 mrg
478 1.1 mrg void
479 1.1 mrg __generic_morestack_set_initial_sp (void *sp, size_t len)
480 1.1 mrg {
481 1.1 mrg /* The stack pointer most likely starts on a page boundary. Adjust
482 1.1 mrg to the nearest 512 byte boundary. It's not essential that we be
483 1.1 mrg precise here; getting it wrong will just leave some stack space
484 1.1 mrg unused. */
485 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
486 1.1 mrg sp = (void *) ((((__UINTPTR_TYPE__) sp + 511U) / 512U) * 512U);
487 1.1 mrg #else
488 1.1 mrg sp = (void *) ((((__UINTPTR_TYPE__) sp - 511U) / 512U) * 512U);
489 1.1 mrg #endif
490 1.1 mrg
491 1.1 mrg __morestack_initial_sp.sp = sp;
492 1.1 mrg __morestack_initial_sp.len = len;
493 1.1 mrg sigemptyset (&__morestack_initial_sp.mask);
494 1.1 mrg
495 1.1 mrg sigfillset (&__morestack_fullmask);
496 1.1 mrg #if defined(__GLIBC__) && defined(__linux__)
497 1.1 mrg /* In glibc, the first two real time signals are used by the NPTL
498 1.1 mrg threading library. By taking them out of the set of signals, we
499 1.1 mrg avoiding copying the signal mask in pthread_sigmask. More
500 1.1 mrg importantly, pthread_sigmask uses less stack space on x86_64. */
501 1.1 mrg sigdelset (&__morestack_fullmask, __SIGRTMIN);
502 1.1 mrg sigdelset (&__morestack_fullmask, __SIGRTMIN + 1);
503 1.1 mrg #endif
504 1.1 mrg }
505 1.1 mrg
506 1.1 mrg /* This function is called by a processor specific function which is
507 1.1 mrg run in the prologue when more stack is needed. The processor
508 1.1 mrg specific function handles the details of saving registers and
509 1.1 mrg frobbing the actual stack pointer. This function is responsible
510 1.1 mrg for allocating a new stack segment and for copying a parameter
511 1.1 mrg block from the old stack to the new one. On function entry
512 1.1 mrg *PFRAME_SIZE is the size of the required stack frame--the returned
513 1.1 mrg stack must be at least this large. On function exit *PFRAME_SIZE
514 1.1 mrg is the amount of space remaining on the allocated stack. OLD_STACK
515 1.1 mrg points at the parameters the old stack (really the current one
516 1.1 mrg while this function is running). OLD_STACK is saved so that it can
517 1.1 mrg be returned by a later call to __generic_releasestack. PARAM_SIZE
518 1.1 mrg is the size in bytes of parameters to copy to the new stack. This
519 1.1 mrg function returns a pointer to the new stack segment, pointing to
520 1.1 mrg the memory after the parameters have been copied. The returned
521 1.1 mrg value minus the returned *PFRAME_SIZE (or plus if the stack grows
522 1.1 mrg upward) is the first address on the stack which should not be used.
523 1.1 mrg
524 1.1 mrg This function is running on the old stack and has only a limited
525 1.1 mrg amount of stack space available. */
526 1.1 mrg
527 1.1 mrg void *
528 1.1 mrg __generic_morestack (size_t *pframe_size, void *old_stack, size_t param_size)
529 1.1 mrg {
530 1.1 mrg size_t frame_size = *pframe_size;
531 1.1 mrg struct stack_segment *current;
532 1.1 mrg struct stack_segment **pp;
533 1.1 mrg struct dynamic_allocation_blocks *dynamic;
534 1.1 mrg char *from;
535 1.1 mrg char *to;
536 1.1 mrg void *ret;
537 1.1 mrg size_t i;
538 1.1 mrg size_t aligned;
539 1.1 mrg
540 1.1 mrg current = __morestack_current_segment;
541 1.1 mrg
542 1.1 mrg pp = current != NULL ? ¤t->next : &__morestack_segments;
543 1.1 mrg if (*pp != NULL && (*pp)->size < frame_size)
544 1.1 mrg dynamic = __morestack_release_segments (pp, 0);
545 1.1 mrg else
546 1.1 mrg dynamic = NULL;
547 1.1 mrg current = *pp;
548 1.1 mrg
549 1.1 mrg if (current == NULL)
550 1.1 mrg {
551 1.1 mrg current = allocate_segment (frame_size + param_size);
552 1.1 mrg current->prev = __morestack_current_segment;
553 1.1 mrg *pp = current;
554 1.1 mrg }
555 1.1 mrg
556 1.1 mrg current->old_stack = old_stack;
557 1.1 mrg
558 1.1 mrg __morestack_current_segment = current;
559 1.1 mrg
560 1.1 mrg if (dynamic != NULL)
561 1.1 mrg {
562 1.1 mrg /* Move the free blocks onto our list. We don't want to call
563 1.1 mrg free here, as we are short on stack space. */
564 1.1 mrg current->free_dynamic_allocation =
565 1.1 mrg merge_dynamic_blocks (dynamic, current->free_dynamic_allocation);
566 1.1 mrg }
567 1.1 mrg
568 1.1 mrg *pframe_size = current->size - param_size;
569 1.1 mrg
570 1.1 mrg /* Align the returned stack to a 32-byte boundary. */
571 1.1 mrg aligned = (param_size + 31) & ~ (size_t) 31;
572 1.1 mrg
573 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
574 1.1 mrg {
575 1.1 mrg char *bottom = (char *) (current + 1) + current->size;
576 1.1 mrg to = bottom - aligned;
577 1.1 mrg ret = bottom - aligned;
578 1.1 mrg }
579 1.1 mrg #else
580 1.1 mrg to = current + 1;
581 1.1 mrg to += aligned - param_size;
582 1.1 mrg ret = (char *) (current + 1) + aligned;
583 1.1 mrg #endif
584 1.1 mrg
585 1.1 mrg /* We don't call memcpy to avoid worrying about the dynamic linker
586 1.1 mrg trying to resolve it. */
587 1.1 mrg from = (char *) old_stack;
588 1.1 mrg for (i = 0; i < param_size; i++)
589 1.1 mrg *to++ = *from++;
590 1.1 mrg
591 1.1 mrg return ret;
592 1.1 mrg }
593 1.1 mrg
594 1.1 mrg /* This function is called by a processor specific function when it is
595 1.1 mrg ready to release a stack segment. We don't actually release the
596 1.1 mrg stack segment, we just move back to the previous one. The current
597 1.1 mrg stack segment will still be available if we need it in
598 1.1 mrg __generic_morestack. This returns a pointer to the new stack
599 1.1 mrg segment to use, which is the one saved by a previous call to
600 1.1 mrg __generic_morestack. The processor specific function is then
601 1.1 mrg responsible for actually updating the stack pointer. This sets
602 1.1 mrg *PAVAILABLE to the amount of stack space now available. */
603 1.1 mrg
604 1.1 mrg void *
605 1.1 mrg __generic_releasestack (size_t *pavailable)
606 1.1 mrg {
607 1.1 mrg struct stack_segment *current;
608 1.1 mrg void *old_stack;
609 1.1 mrg
610 1.1 mrg current = __morestack_current_segment;
611 1.1 mrg old_stack = current->old_stack;
612 1.1 mrg current = current->prev;
613 1.1 mrg __morestack_current_segment = current;
614 1.1 mrg
615 1.1 mrg if (current != NULL)
616 1.1 mrg {
617 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
618 1.1 mrg *pavailable = (char *) old_stack - (char *) (current + 1);
619 1.1 mrg #else
620 1.1 mrg *pavailable = (char *) (current + 1) + current->size - (char *) old_stack;
621 1.1 mrg #endif
622 1.1 mrg }
623 1.1 mrg else
624 1.1 mrg {
625 1.1 mrg size_t used;
626 1.1 mrg
627 1.1 mrg /* We have popped back to the original stack. */
628 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
629 1.1 mrg if ((char *) old_stack >= (char *) __morestack_initial_sp.sp)
630 1.1 mrg used = 0;
631 1.1 mrg else
632 1.1 mrg used = (char *) __morestack_initial_sp.sp - (char *) old_stack;
633 1.1 mrg #else
634 1.1 mrg if ((char *) old_stack <= (char *) __morestack_initial_sp.sp)
635 1.1 mrg used = 0;
636 1.1 mrg else
637 1.1 mrg used = (char *) old_stack - (char *) __morestack_initial_sp.sp;
638 1.1 mrg #endif
639 1.1 mrg
640 1.1 mrg if (used > __morestack_initial_sp.len)
641 1.1 mrg *pavailable = 0;
642 1.1 mrg else
643 1.1 mrg *pavailable = __morestack_initial_sp.len - used;
644 1.1 mrg }
645 1.1 mrg
646 1.1 mrg return old_stack;
647 1.1 mrg }
648 1.1 mrg
649 1.1 mrg /* Block signals while splitting the stack. This avoids trouble if we
650 1.1 mrg try to invoke a signal handler which itself wants to split the
651 1.1 mrg stack. */
652 1.1 mrg
653 1.1 mrg extern int pthread_sigmask (int, const sigset_t *, sigset_t *)
654 1.1 mrg __attribute__ ((weak));
655 1.1 mrg
656 1.1 mrg void
657 1.1 mrg __morestack_block_signals (void)
658 1.1 mrg {
659 1.1 mrg if (__morestack_initial_sp.dont_block_signals)
660 1.1 mrg ;
661 1.1 mrg else if (pthread_sigmask)
662 1.1 mrg pthread_sigmask (SIG_BLOCK, &__morestack_fullmask,
663 1.1 mrg &__morestack_initial_sp.mask);
664 1.1 mrg else
665 1.1 mrg sigprocmask (SIG_BLOCK, &__morestack_fullmask,
666 1.1 mrg &__morestack_initial_sp.mask);
667 1.1 mrg }
668 1.1 mrg
669 1.1 mrg /* Unblock signals while splitting the stack. */
670 1.1 mrg
671 1.1 mrg void
672 1.1 mrg __morestack_unblock_signals (void)
673 1.1 mrg {
674 1.1 mrg if (__morestack_initial_sp.dont_block_signals)
675 1.1 mrg ;
676 1.1 mrg else if (pthread_sigmask)
677 1.1 mrg pthread_sigmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
678 1.1 mrg else
679 1.1 mrg sigprocmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
680 1.1 mrg }
681 1.1 mrg
682 1.1 mrg /* This function is called to allocate dynamic stack space, for alloca
683 1.1 mrg or a variably sized array. This is a regular function with
684 1.1 mrg sufficient stack space, so we just use malloc to allocate the
685 1.1 mrg space. We attach the allocated blocks to the current stack
686 1.1 mrg segment, so that they will eventually be reused or freed. */
687 1.1 mrg
688 1.1 mrg void *
689 1.1 mrg __morestack_allocate_stack_space (size_t size)
690 1.1 mrg {
691 1.1 mrg struct stack_segment *seg, *current;
692 1.1 mrg struct dynamic_allocation_blocks *p;
693 1.1 mrg
694 1.1 mrg /* We have to block signals to avoid getting confused if we get
695 1.1 mrg interrupted by a signal whose handler itself uses alloca or a
696 1.1 mrg variably sized array. */
697 1.1 mrg __morestack_block_signals ();
698 1.1 mrg
699 1.1 mrg /* Since we don't want to call free while we are low on stack space,
700 1.1 mrg we may have a list of already allocated blocks waiting to be
701 1.1 mrg freed. Release them all, unless we find one that is large
702 1.1 mrg enough. We don't look at every block to see if one is large
703 1.1 mrg enough, just the first one, because we aren't trying to build a
704 1.1 mrg memory allocator here, we're just trying to speed up common
705 1.1 mrg cases. */
706 1.1 mrg
707 1.1 mrg current = __morestack_current_segment;
708 1.1 mrg p = NULL;
709 1.1 mrg for (seg = __morestack_segments; seg != NULL; seg = seg->next)
710 1.1 mrg {
711 1.1 mrg p = seg->free_dynamic_allocation;
712 1.1 mrg if (p != NULL)
713 1.1 mrg {
714 1.1 mrg if (p->size >= size)
715 1.1 mrg {
716 1.1 mrg seg->free_dynamic_allocation = p->next;
717 1.1 mrg break;
718 1.1 mrg }
719 1.1 mrg
720 1.1 mrg free_dynamic_blocks (p);
721 1.1 mrg seg->free_dynamic_allocation = NULL;
722 1.1 mrg p = NULL;
723 1.1 mrg }
724 1.1 mrg }
725 1.1 mrg
726 1.1 mrg if (p == NULL)
727 1.1 mrg {
728 1.1 mrg /* We need to allocate additional memory. */
729 1.1 mrg p = malloc (sizeof (*p));
730 1.1 mrg if (p == NULL)
731 1.1 mrg abort ();
732 1.1 mrg p->size = size;
733 1.1 mrg p->block = malloc (size);
734 1.1 mrg if (p->block == NULL)
735 1.1 mrg abort ();
736 1.1 mrg }
737 1.1 mrg
738 1.1 mrg /* If we are still on the initial stack, then we have a space leak.
739 1.1 mrg FIXME. */
740 1.1 mrg if (current != NULL)
741 1.1 mrg {
742 1.1 mrg p->next = current->dynamic_allocation;
743 1.1 mrg current->dynamic_allocation = p;
744 1.1 mrg }
745 1.1 mrg
746 1.1 mrg __morestack_unblock_signals ();
747 1.1 mrg
748 1.1 mrg return p->block;
749 1.1 mrg }
750 1.1 mrg
751 1.1 mrg /* Find the stack segment for STACK and return the amount of space
752 1.1 mrg available. This is used when unwinding the stack because of an
753 1.1 mrg exception, in order to reset the stack guard correctly. */
754 1.1 mrg
755 1.1 mrg size_t
756 1.1 mrg __generic_findstack (void *stack)
757 1.1 mrg {
758 1.1 mrg struct stack_segment *pss;
759 1.1 mrg size_t used;
760 1.1 mrg
761 1.1 mrg for (pss = __morestack_current_segment; pss != NULL; pss = pss->prev)
762 1.1 mrg {
763 1.1 mrg if ((char *) pss < (char *) stack
764 1.1 mrg && (char *) pss + pss->size > (char *) stack)
765 1.1 mrg {
766 1.1 mrg __morestack_current_segment = pss;
767 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
768 1.1 mrg return (char *) stack - (char *) (pss + 1);
769 1.1 mrg #else
770 1.1 mrg return (char *) (pss + 1) + pss->size - (char *) stack;
771 1.1 mrg #endif
772 1.1 mrg }
773 1.1 mrg }
774 1.1 mrg
775 1.1 mrg /* We have popped back to the original stack. */
776 1.1 mrg
777 1.1 mrg if (__morestack_initial_sp.sp == NULL)
778 1.1 mrg return 0;
779 1.1 mrg
780 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
781 1.1 mrg if ((char *) stack >= (char *) __morestack_initial_sp.sp)
782 1.1 mrg used = 0;
783 1.1 mrg else
784 1.1 mrg used = (char *) __morestack_initial_sp.sp - (char *) stack;
785 1.1 mrg #else
786 1.1 mrg if ((char *) stack <= (char *) __morestack_initial_sp.sp)
787 1.1 mrg used = 0;
788 1.1 mrg else
789 1.1 mrg used = (char *) stack - (char *) __morestack_initial_sp.sp;
790 1.1 mrg #endif
791 1.1 mrg
792 1.1 mrg if (used > __morestack_initial_sp.len)
793 1.1 mrg return 0;
794 1.1 mrg else
795 1.1 mrg return __morestack_initial_sp.len - used;
796 1.1 mrg }
797 1.1 mrg
798 1.1 mrg /* This function is called at program startup time to make sure that
799 1.1 mrg mmap, munmap, and getpagesize are resolved if linking dynamically.
800 1.1 mrg We want to resolve them while we have enough stack for them, rather
801 1.1.1.7 mrg than calling into the dynamic linker while low on stack space.
802 1.1.1.7 mrg Similarly, invoke getenv here to check for split-stack related control
803 1.1.1.7 mrg variables, since doing do as part of the __morestack path can result
804 1.1.1.7 mrg in unwanted use of SSE/AVX registers (see GCC PR 86213). */
805 1.1 mrg
806 1.1 mrg void
807 1.1 mrg __morestack_load_mmap (void)
808 1.1 mrg {
809 1.1 mrg /* Call with bogus values to run faster. We don't care if the call
810 1.1 mrg fails. Pass __MORESTACK_CURRENT_SEGMENT to make sure that any
811 1.1 mrg TLS accessor function is resolved. */
812 1.1 mrg mmap (__morestack_current_segment, 0, PROT_READ, MAP_ANONYMOUS, -1, 0);
813 1.1 mrg mprotect (NULL, 0, 0);
814 1.1.1.7 mrg munmap (0, static_pagesize);
815 1.1.1.7 mrg
816 1.1.1.7 mrg /* Initialize these values here, so as to avoid dynamic linker
817 1.1.1.7 mrg activity as part of a __morestack call. */
818 1.1.1.7 mrg static_pagesize = getpagesize();
819 1.1.1.7 mrg use_guard_page = getenv ("SPLIT_STACK_GUARD") != 0;
820 1.1 mrg }
821 1.1 mrg
822 1.1 mrg /* This function may be used to iterate over the stack segments.
823 1.1 mrg This can be called like this.
824 1.1 mrg void *next_segment = NULL;
825 1.1 mrg void *next_sp = NULL;
826 1.1 mrg void *initial_sp = NULL;
827 1.1 mrg void *stack;
828 1.1 mrg size_t stack_size;
829 1.1 mrg while ((stack = __splitstack_find (next_segment, next_sp, &stack_size,
830 1.1 mrg &next_segment, &next_sp,
831 1.1 mrg &initial_sp)) != NULL)
832 1.1 mrg {
833 1.1 mrg // Stack segment starts at stack and is stack_size bytes long.
834 1.1 mrg }
835 1.1 mrg
836 1.1 mrg There is no way to iterate over the stack segments of a different
837 1.1 mrg thread. However, what is permitted is for one thread to call this
838 1.1 mrg with the first two values NULL, to pass next_segment, next_sp, and
839 1.1 mrg initial_sp to a different thread, and then to suspend one way or
840 1.1 mrg another. A different thread may run the subsequent
841 1.1 mrg __morestack_find iterations. Of course, this will only work if the
842 1.1 mrg first thread is suspended during the __morestack_find iterations.
843 1.1 mrg If not, the second thread will be looking at the stack while it is
844 1.1 mrg changing, and anything could happen.
845 1.1 mrg
846 1.1 mrg FIXME: This should be declared in some header file, but where? */
847 1.1 mrg
848 1.1 mrg void *
849 1.1 mrg __splitstack_find (void *segment_arg, void *sp, size_t *len,
850 1.1 mrg void **next_segment, void **next_sp,
851 1.1 mrg void **initial_sp)
852 1.1 mrg {
853 1.1 mrg struct stack_segment *segment;
854 1.1 mrg void *ret;
855 1.1 mrg char *nsp;
856 1.1 mrg
857 1.1 mrg if (segment_arg == (void *) (uintptr_type) 1)
858 1.1 mrg {
859 1.1 mrg char *isp = (char *) *initial_sp;
860 1.1 mrg
861 1.1 mrg if (isp == NULL)
862 1.1 mrg return NULL;
863 1.1 mrg
864 1.1 mrg *next_segment = (void *) (uintptr_type) 2;
865 1.1 mrg *next_sp = NULL;
866 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
867 1.1 mrg if ((char *) sp >= isp)
868 1.1 mrg return NULL;
869 1.1 mrg *len = (char *) isp - (char *) sp;
870 1.1 mrg return sp;
871 1.1 mrg #else
872 1.1 mrg if ((char *) sp <= (char *) isp)
873 1.1 mrg return NULL;
874 1.1 mrg *len = (char *) sp - (char *) isp;
875 1.1 mrg return (void *) isp;
876 1.1 mrg #endif
877 1.1 mrg }
878 1.1 mrg else if (segment_arg == (void *) (uintptr_type) 2)
879 1.1 mrg return NULL;
880 1.1 mrg else if (segment_arg != NULL)
881 1.1 mrg segment = (struct stack_segment *) segment_arg;
882 1.1 mrg else
883 1.1 mrg {
884 1.1 mrg *initial_sp = __morestack_initial_sp.sp;
885 1.1 mrg segment = __morestack_current_segment;
886 1.1 mrg sp = (void *) &segment;
887 1.1 mrg while (1)
888 1.1 mrg {
889 1.1 mrg if (segment == NULL)
890 1.1 mrg return __splitstack_find ((void *) (uintptr_type) 1, sp, len,
891 1.1 mrg next_segment, next_sp, initial_sp);
892 1.1 mrg if ((char *) sp >= (char *) (segment + 1)
893 1.1 mrg && (char *) sp <= (char *) (segment + 1) + segment->size)
894 1.1 mrg break;
895 1.1 mrg segment = segment->prev;
896 1.1 mrg }
897 1.1 mrg }
898 1.1 mrg
899 1.1 mrg if (segment->prev == NULL)
900 1.1 mrg *next_segment = (void *) (uintptr_type) 1;
901 1.1 mrg else
902 1.1 mrg *next_segment = segment->prev;
903 1.1 mrg
904 1.1 mrg /* The old_stack value is the address of the function parameters of
905 1.1 mrg the function which called __morestack. So if f1 called f2 which
906 1.1 mrg called __morestack, the stack looks like this:
907 1.1 mrg
908 1.1 mrg parameters <- old_stack
909 1.1 mrg return in f1
910 1.1 mrg return in f2
911 1.1 mrg registers pushed by __morestack
912 1.1 mrg
913 1.1 mrg The registers pushed by __morestack may not be visible on any
914 1.1 mrg other stack, if we are being called by a signal handler
915 1.1 mrg immediately after the call to __morestack_unblock_signals. We
916 1.1 mrg want to adjust our return value to include those registers. This
917 1.1 mrg is target dependent. */
918 1.1 mrg
919 1.1 mrg nsp = (char *) segment->old_stack;
920 1.1 mrg
921 1.1 mrg if (nsp == NULL)
922 1.1 mrg {
923 1.1 mrg /* We've reached the top of the stack. */
924 1.1 mrg *next_segment = (void *) (uintptr_type) 2;
925 1.1 mrg }
926 1.1 mrg else
927 1.1 mrg {
928 1.1 mrg #if defined (__x86_64__)
929 1.1 mrg nsp -= 12 * sizeof (void *);
930 1.1 mrg #elif defined (__i386__)
931 1.1 mrg nsp -= 6 * sizeof (void *);
932 1.1.1.3 mrg #elif defined __powerpc64__
933 1.1.1.3 mrg #elif defined __s390x__
934 1.1.1.3 mrg nsp -= 2 * 160;
935 1.1.1.3 mrg #elif defined __s390__
936 1.1.1.3 mrg nsp -= 2 * 96;
937 1.1 mrg #else
938 1.1 mrg #error "unrecognized target"
939 1.1 mrg #endif
940 1.1 mrg
941 1.1 mrg *next_sp = (void *) nsp;
942 1.1 mrg }
943 1.1 mrg
944 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
945 1.1 mrg *len = (char *) (segment + 1) + segment->size - (char *) sp;
946 1.1 mrg ret = (void *) sp;
947 1.1 mrg #else
948 1.1 mrg *len = (char *) sp - (char *) (segment + 1);
949 1.1 mrg ret = (void *) (segment + 1);
950 1.1 mrg #endif
951 1.1 mrg
952 1.1 mrg return ret;
953 1.1 mrg }
954 1.1 mrg
955 1.1 mrg /* Tell the split stack code whether it has to block signals while
956 1.1 mrg manipulating the stack. This is for programs in which some threads
957 1.1 mrg block all signals. If a thread already blocks signals, there is no
958 1.1 mrg need for the split stack code to block them as well. If NEW is not
959 1.1 mrg NULL, then if *NEW is non-zero signals will be blocked while
960 1.1 mrg splitting the stack, otherwise they will not. If OLD is not NULL,
961 1.1 mrg *OLD will be set to the old value. */
962 1.1 mrg
963 1.1 mrg void
964 1.1 mrg __splitstack_block_signals (int *new, int *old)
965 1.1 mrg {
966 1.1 mrg if (old != NULL)
967 1.1 mrg *old = __morestack_initial_sp.dont_block_signals ? 0 : 1;
968 1.1 mrg if (new != NULL)
969 1.1 mrg __morestack_initial_sp.dont_block_signals = *new ? 0 : 1;
970 1.1 mrg }
971 1.1 mrg
972 1.1 mrg /* The offsets into the arrays used by __splitstack_getcontext and
973 1.1 mrg __splitstack_setcontext. */
974 1.1 mrg
975 1.1 mrg enum __splitstack_context_offsets
976 1.1 mrg {
977 1.1 mrg MORESTACK_SEGMENTS = 0,
978 1.1 mrg CURRENT_SEGMENT = 1,
979 1.1 mrg CURRENT_STACK = 2,
980 1.1 mrg STACK_GUARD = 3,
981 1.1 mrg INITIAL_SP = 4,
982 1.1 mrg INITIAL_SP_LEN = 5,
983 1.1 mrg BLOCK_SIGNALS = 6,
984 1.1 mrg
985 1.1 mrg NUMBER_OFFSETS = 10
986 1.1 mrg };
987 1.1 mrg
988 1.1 mrg /* Get the current split stack context. This may be used for
989 1.1 mrg coroutine switching, similar to getcontext. The argument should
990 1.1 mrg have at least 10 void *pointers for extensibility, although we
991 1.1 mrg don't currently use all of them. This would normally be called
992 1.1 mrg immediately before a call to getcontext or swapcontext or
993 1.1 mrg setjmp. */
994 1.1 mrg
995 1.1 mrg void
996 1.1 mrg __splitstack_getcontext (void *context[NUMBER_OFFSETS])
997 1.1 mrg {
998 1.1 mrg memset (context, 0, NUMBER_OFFSETS * sizeof (void *));
999 1.1 mrg context[MORESTACK_SEGMENTS] = (void *) __morestack_segments;
1000 1.1 mrg context[CURRENT_SEGMENT] = (void *) __morestack_current_segment;
1001 1.1 mrg context[CURRENT_STACK] = (void *) &context;
1002 1.1 mrg context[STACK_GUARD] = __morestack_get_guard ();
1003 1.1 mrg context[INITIAL_SP] = (void *) __morestack_initial_sp.sp;
1004 1.1 mrg context[INITIAL_SP_LEN] = (void *) (uintptr_type) __morestack_initial_sp.len;
1005 1.1 mrg context[BLOCK_SIGNALS] = (void *) __morestack_initial_sp.dont_block_signals;
1006 1.1 mrg }
1007 1.1 mrg
1008 1.1 mrg /* Set the current split stack context. The argument should be a
1009 1.1 mrg context previously passed to __splitstack_getcontext. This would
1010 1.1 mrg normally be called immediately after a call to getcontext or
1011 1.1 mrg swapcontext or setjmp if something jumped to it. */
1012 1.1 mrg
1013 1.1 mrg void
1014 1.1 mrg __splitstack_setcontext (void *context[NUMBER_OFFSETS])
1015 1.1 mrg {
1016 1.1 mrg __morestack_segments = (struct stack_segment *) context[MORESTACK_SEGMENTS];
1017 1.1 mrg __morestack_current_segment =
1018 1.1 mrg (struct stack_segment *) context[CURRENT_SEGMENT];
1019 1.1 mrg __morestack_set_guard (context[STACK_GUARD]);
1020 1.1 mrg __morestack_initial_sp.sp = context[INITIAL_SP];
1021 1.1 mrg __morestack_initial_sp.len = (size_t) context[INITIAL_SP_LEN];
1022 1.1 mrg __morestack_initial_sp.dont_block_signals =
1023 1.1 mrg (uintptr_type) context[BLOCK_SIGNALS];
1024 1.1 mrg }
1025 1.1 mrg
1026 1.1 mrg /* Create a new split stack context. This will allocate a new stack
1027 1.1 mrg segment which may be used by a coroutine. STACK_SIZE is the
1028 1.1 mrg minimum size of the new stack. The caller is responsible for
1029 1.1 mrg actually setting the stack pointer. This would normally be called
1030 1.1 mrg before a call to makecontext, and the returned stack pointer and
1031 1.1 mrg size would be used to set the uc_stack field. A function called
1032 1.1 mrg via makecontext on a stack created by __splitstack_makecontext may
1033 1.1 mrg not return. Note that the returned pointer points to the lowest
1034 1.1 mrg address in the stack space, and thus may not be the value to which
1035 1.1 mrg to set the stack pointer. */
1036 1.1 mrg
1037 1.1 mrg void *
1038 1.1 mrg __splitstack_makecontext (size_t stack_size, void *context[NUMBER_OFFSETS],
1039 1.1 mrg size_t *size)
1040 1.1 mrg {
1041 1.1 mrg struct stack_segment *segment;
1042 1.1 mrg void *initial_sp;
1043 1.1 mrg
1044 1.1 mrg memset (context, 0, NUMBER_OFFSETS * sizeof (void *));
1045 1.1 mrg segment = allocate_segment (stack_size);
1046 1.1 mrg context[MORESTACK_SEGMENTS] = segment;
1047 1.1 mrg context[CURRENT_SEGMENT] = segment;
1048 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
1049 1.1 mrg initial_sp = (void *) ((char *) (segment + 1) + segment->size);
1050 1.1 mrg #else
1051 1.1 mrg initial_sp = (void *) (segment + 1);
1052 1.1 mrg #endif
1053 1.1 mrg context[STACK_GUARD] = __morestack_make_guard (initial_sp, segment->size);
1054 1.1 mrg context[INITIAL_SP] = NULL;
1055 1.1 mrg context[INITIAL_SP_LEN] = 0;
1056 1.1 mrg *size = segment->size;
1057 1.1 mrg return (void *) (segment + 1);
1058 1.1 mrg }
1059 1.1 mrg
1060 1.1 mrg /* Given an existing split stack context, reset it back to the start
1061 1.1 mrg of the stack. Return the stack pointer and size, appropriate for
1062 1.1 mrg use with makecontext. This may be used if a coroutine exits, in
1063 1.1 mrg order to reuse the stack segments for a new coroutine. */
1064 1.1 mrg
1065 1.1 mrg void *
1066 1.1 mrg __splitstack_resetcontext (void *context[10], size_t *size)
1067 1.1 mrg {
1068 1.1 mrg struct stack_segment *segment;
1069 1.1 mrg void *initial_sp;
1070 1.1 mrg size_t initial_size;
1071 1.1 mrg void *ret;
1072 1.1 mrg
1073 1.1 mrg /* Reset the context assuming that MORESTACK_SEGMENTS, INITIAL_SP
1074 1.1 mrg and INITIAL_SP_LEN are correct. */
1075 1.1 mrg
1076 1.1 mrg segment = context[MORESTACK_SEGMENTS];
1077 1.1 mrg context[CURRENT_SEGMENT] = segment;
1078 1.1 mrg context[CURRENT_STACK] = NULL;
1079 1.1 mrg if (segment == NULL)
1080 1.1 mrg {
1081 1.1 mrg initial_sp = context[INITIAL_SP];
1082 1.1 mrg initial_size = (uintptr_type) context[INITIAL_SP_LEN];
1083 1.1 mrg ret = initial_sp;
1084 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
1085 1.1 mrg ret = (void *) ((char *) ret - initial_size);
1086 1.1 mrg #endif
1087 1.1 mrg }
1088 1.1 mrg else
1089 1.1 mrg {
1090 1.1.1.2 mrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
1091 1.1 mrg initial_sp = (void *) ((char *) (segment + 1) + segment->size);
1092 1.1 mrg #else
1093 1.1 mrg initial_sp = (void *) (segment + 1);
1094 1.1 mrg #endif
1095 1.1 mrg initial_size = segment->size;
1096 1.1 mrg ret = (void *) (segment + 1);
1097 1.1 mrg }
1098 1.1 mrg context[STACK_GUARD] = __morestack_make_guard (initial_sp, initial_size);
1099 1.1 mrg context[BLOCK_SIGNALS] = NULL;
1100 1.1 mrg *size = initial_size;
1101 1.1 mrg return ret;
1102 1.1 mrg }
1103 1.1 mrg
1104 1.1 mrg /* Release all the memory associated with a splitstack context. This
1105 1.1 mrg may be used if a coroutine exits and the associated stack should be
1106 1.1 mrg freed. */
1107 1.1 mrg
1108 1.1 mrg void
1109 1.1 mrg __splitstack_releasecontext (void *context[10])
1110 1.1 mrg {
1111 1.1 mrg __morestack_release_segments (((struct stack_segment **)
1112 1.1 mrg &context[MORESTACK_SEGMENTS]),
1113 1.1 mrg 1);
1114 1.1 mrg }
1115 1.1 mrg
1116 1.1 mrg /* Like __splitstack_block_signals, but operating on CONTEXT, rather
1117 1.1 mrg than on the current state. */
1118 1.1 mrg
1119 1.1 mrg void
1120 1.1 mrg __splitstack_block_signals_context (void *context[NUMBER_OFFSETS], int *new,
1121 1.1 mrg int *old)
1122 1.1 mrg {
1123 1.1 mrg if (old != NULL)
1124 1.1 mrg *old = ((uintptr_type) context[BLOCK_SIGNALS]) != 0 ? 0 : 1;
1125 1.1 mrg if (new != NULL)
1126 1.1 mrg context[BLOCK_SIGNALS] = (void *) (uintptr_type) (*new ? 0 : 1);
1127 1.1 mrg }
1128 1.1 mrg
1129 1.1 mrg /* Find the stack segments associated with a split stack context.
1130 1.1 mrg This will return the address of the first stack segment and set
1131 1.1 mrg *STACK_SIZE to its size. It will set next_segment, next_sp, and
1132 1.1 mrg initial_sp which may be passed to __splitstack_find to find the
1133 1.1 mrg remaining segments. */
1134 1.1 mrg
1135 1.1 mrg void *
1136 1.1 mrg __splitstack_find_context (void *context[NUMBER_OFFSETS], size_t *stack_size,
1137 1.1 mrg void **next_segment, void **next_sp,
1138 1.1 mrg void **initial_sp)
1139 1.1 mrg {
1140 1.1 mrg void *sp;
1141 1.1 mrg struct stack_segment *segment;
1142 1.1 mrg
1143 1.1 mrg *initial_sp = context[INITIAL_SP];
1144 1.1 mrg
1145 1.1 mrg sp = context[CURRENT_STACK];
1146 1.1 mrg if (sp == NULL)
1147 1.1 mrg {
1148 1.1 mrg /* Most likely this context was created but was never used. The
1149 1.1 mrg value 2 is a code used by __splitstack_find to mean that we
1150 1.1 mrg have reached the end of the list of stacks. */
1151 1.1 mrg *next_segment = (void *) (uintptr_type) 2;
1152 1.1 mrg *next_sp = NULL;
1153 1.1 mrg *initial_sp = NULL;
1154 1.1 mrg return NULL;
1155 1.1 mrg }
1156 1.1 mrg
1157 1.1 mrg segment = context[CURRENT_SEGMENT];
1158 1.1 mrg if (segment == NULL)
1159 1.1 mrg {
1160 1.1 mrg /* Most likely this context was saved by a thread which was not
1161 1.1 mrg created using __splistack_makecontext and which has never
1162 1.1 mrg split the stack. The value 1 is a code used by
1163 1.1 mrg __splitstack_find to look at the initial stack. */
1164 1.1 mrg segment = (struct stack_segment *) (uintptr_type) 1;
1165 1.1 mrg }
1166 1.1 mrg
1167 1.1 mrg return __splitstack_find (segment, sp, stack_size, next_segment, next_sp,
1168 1.1 mrg initial_sp);
1169 1.1 mrg }
1170 1.1 mrg
1171 1.1 mrg #endif /* !defined (inhibit_libc) */
1172 1.1.1.3 mrg #endif /* not powerpc 32-bit */
1173