1 /* Allocate a stack suitable to be used with xclone or xsigaltstack.
2 Copyright (C) 2021-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #include <support/check.h>
20 #include <support/support.h>
21 #include <support/xunistd.h>
22 #include <stdint.h>
23 #include <string.h>
24 #include <stackinfo.h>
25 #include <sys/mman.h>
26 #include <sys/param.h> /* roundup, MAX */
27
28 #ifndef MAP_NORESERVE
29 # define MAP_NORESERVE 0
30 #endif
31 #ifndef MAP_STACK
32 # define MAP_STACK 0
33 #endif
34
35 struct support_stack
support_stack_alloc(size_t size)36 support_stack_alloc (size_t size)
37 {
38 size_t pagesize = sysconf (_SC_PAGESIZE);
39 if (pagesize == -1)
40 FAIL_EXIT1 ("sysconf (_SC_PAGESIZE): %m\n");
41
42 /* Always supply at least sysconf (_SC_SIGSTKSZ) space; passing 0
43 as size means only that much space. No matter what the number is,
44 round it up to a whole number of pages. */
45 size_t stacksize = roundup (size + sysconf (_SC_SIGSTKSZ),
46 pagesize);
47
48 /* The guard bands need to be large enough to intercept offset
49 accesses from a stack address that might otherwise hit another
50 mapping. Make them at least twice as big as the stack itself, to
51 defend against an offset by the entire size of a large
52 stack-allocated array. The minimum is 1MiB, which is arbitrarily
53 chosen to be larger than any "typical" wild pointer offset.
54 Again, no matter what the number is, round it up to a whole
55 number of pages. */
56 size_t guardsize = roundup (MAX (2 * stacksize, 1024 * 1024), pagesize);
57 size_t alloc_size = guardsize + stacksize + guardsize;
58 /* Use MAP_NORESERVE so that RAM will not be wasted on the guard
59 bands; touch all the pages of the actual stack before returning,
60 so we know they are allocated. */
61 void *alloc_base = xmmap (0,
62 alloc_size,
63 PROT_NONE,
64 MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE|MAP_STACK,
65 -1);
66 /* Some architecture still requires executable stack for the signal return
67 trampoline, although PF_X could be overridden if PT_GNU_STACK is present.
68 However since glibc does not export such information with a proper ABI,
69 it uses the historical permissions. */
70 int prot = PROT_READ | PROT_WRITE
71 | (DEFAULT_STACK_PERMS & PF_X ? PROT_EXEC : 0);
72 xmprotect (alloc_base + guardsize, stacksize, prot);
73 memset (alloc_base + guardsize, 0xA5, stacksize);
74 return (struct support_stack) { alloc_base + guardsize, stacksize, guardsize };
75 }
76
77 void
support_stack_free(struct support_stack * stack)78 support_stack_free (struct support_stack *stack)
79 {
80 void *alloc_base = (void *)((uintptr_t) stack->stack - stack->guardsize);
81 size_t alloc_size = stack->size + 2 * stack->guardsize;
82 xmunmap (alloc_base, alloc_size);
83 }
84