1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2001 Daniel Eischen <[email protected]>
5 * Copyright (c) 2000-2001 Jason Evans <[email protected]>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/auxv.h>
33 #include <sys/mman.h>
34 #include <sys/queue.h>
35 #include <sys/resource.h>
36 #include <sys/sysctl.h>
37 #include <stdlib.h>
38 #include <pthread.h>
39 #include <link.h>
40
41 #include "thr_private.h"
42
43 /* Spare thread stack. */
44 struct stack {
45 LIST_ENTRY(stack) qe; /* Stack queue linkage. */
46 size_t stacksize; /* Stack size (rounded up). */
47 size_t guardsize; /* Guard size. */
48 void *stackaddr; /* Stack address. */
49 };
50
51 /*
52 * Default sized (stack and guard) spare stack queue. Stacks are cached
53 * to avoid additional complexity managing mmap()ed stack regions. Spare
54 * stacks are used in LIFO order to increase cache locality.
55 */
56 static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq);
57
58 /*
59 * Miscellaneous sized (non-default stack and/or guard) spare stack queue.
60 * Stacks are cached to avoid additional complexity managing mmap()ed
61 * stack regions. This list is unordered, since ordering on both stack
62 * size and guard size would be more trouble than it's worth. Stacks are
63 * allocated from this cache on a first size match basis.
64 */
65 static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq);
66
67 /**
68 * Base address of the last stack allocated (including its red zone, if
69 * there is one). Stacks are allocated contiguously, starting beyond the
70 * top of the main stack. When a new stack is created, a red zone is
71 * typically created (actually, the red zone is mapped with PROT_NONE) above
72 * the top of the stack, such that the stack will not be able to grow all
73 * the way to the bottom of the next stack. This isn't fool-proof. It is
74 * possible for a stack to grow by a large amount, such that it grows into
75 * the next stack, and as long as the memory within the red zone is never
76 * accessed, nothing will prevent one thread stack from trouncing all over
77 * the next.
78 *
79 * low memory
80 * . . . . . . . . . . . . . . . . . .
81 * | |
82 * | stack 3 | start of 3rd thread stack
83 * +-----------------------------------+
84 * | |
85 * | Red Zone (guard page) | red zone for 2nd thread
86 * | |
87 * +-----------------------------------+
88 * | stack 2 - _thr_stack_default | top of 2nd thread stack
89 * | |
90 * | |
91 * | |
92 * | |
93 * | stack 2 |
94 * +-----------------------------------+ <-- start of 2nd thread stack
95 * | |
96 * | Red Zone | red zone for 1st thread
97 * | |
98 * +-----------------------------------+
99 * | stack 1 - _thr_stack_default | top of 1st thread stack
100 * | |
101 * | |
102 * | |
103 * | |
104 * | stack 1 |
105 * +-----------------------------------+ <-- start of 1st thread stack
106 * | | (initial value of last_stack)
107 * | Red Zone |
108 * | | red zone for main thread
109 * +-----------------------------------+
110 * | USRSTACK - _thr_stack_initial | top of main thread stack
111 * | | ^
112 * | | |
113 * | | |
114 * | | | stack growth
115 * | |
116 * +-----------------------------------+ <-- start of main thread stack
117 * (USRSTACK)
118 * high memory
119 *
120 */
121 static char *last_stack = NULL;
122
123 /*
124 * Round size up to the nearest multiple of
125 * _thr_page_size.
126 */
127 static inline size_t
round_up(size_t size)128 round_up(size_t size)
129 {
130 return (roundup2(size, _thr_page_size));
131 }
132
133 void
_thr_stack_fix_protection(struct pthread * thrd)134 _thr_stack_fix_protection(struct pthread *thrd)
135 {
136
137 mprotect((char *)thrd->attr.stackaddr_attr +
138 round_up(thrd->attr.guardsize_attr),
139 round_up(thrd->attr.stacksize_attr),
140 _rtld_get_stack_prot());
141 }
142
143 static void
singlethread_map_stacks_exec(void)144 singlethread_map_stacks_exec(void)
145 {
146 char *usrstack;
147 size_t stacksz;
148
149 if (!__thr_get_main_stack_base(&usrstack) ||
150 !__thr_get_main_stack_lim(&stacksz))
151 return;
152 mprotect(usrstack - stacksz, stacksz, _rtld_get_stack_prot());
153 }
154
155 void
__thr_map_stacks_exec(void)156 __thr_map_stacks_exec(void)
157 {
158 struct pthread *curthread, *thrd;
159 struct stack *st;
160
161 if (!_thr_is_inited()) {
162 singlethread_map_stacks_exec();
163 return;
164 }
165 curthread = _get_curthread();
166 THREAD_LIST_RDLOCK(curthread);
167 LIST_FOREACH(st, &mstackq, qe)
168 mprotect((char *)st->stackaddr + st->guardsize, st->stacksize,
169 _rtld_get_stack_prot());
170 LIST_FOREACH(st, &dstackq, qe)
171 mprotect((char *)st->stackaddr + st->guardsize, st->stacksize,
172 _rtld_get_stack_prot());
173 TAILQ_FOREACH(thrd, &_thread_gc_list, gcle)
174 _thr_stack_fix_protection(thrd);
175 TAILQ_FOREACH(thrd, &_thread_list, tle)
176 _thr_stack_fix_protection(thrd);
177 THREAD_LIST_UNLOCK(curthread);
178 }
179
180 int
_thr_stack_alloc(struct pthread_attr * attr)181 _thr_stack_alloc(struct pthread_attr *attr)
182 {
183 struct pthread *curthread = _get_curthread();
184 struct stack *spare_stack;
185 size_t stacksize;
186 size_t guardsize;
187 char *stackaddr;
188
189 /*
190 * Round up stack size to nearest multiple of _thr_page_size so
191 * that mmap() * will work. If the stack size is not an even
192 * multiple, we end up initializing things such that there is
193 * unused space above the beginning of the stack, so the stack
194 * sits snugly against its guard.
195 */
196 stacksize = round_up(attr->stacksize_attr);
197 guardsize = round_up(attr->guardsize_attr);
198
199 attr->stackaddr_attr = NULL;
200 attr->flags &= ~THR_STACK_USER;
201
202 /*
203 * Use the garbage collector lock for synchronization of the
204 * spare stack lists and allocations from usrstack.
205 */
206 THREAD_LIST_WRLOCK(curthread);
207 /*
208 * If the stack and guard sizes are default, try to allocate a stack
209 * from the default-size stack cache:
210 */
211 if ((stacksize == THR_STACK_DEFAULT) &&
212 (guardsize == _thr_guard_default)) {
213 if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) {
214 /* Use the spare stack. */
215 LIST_REMOVE(spare_stack, qe);
216 attr->stackaddr_attr = spare_stack->stackaddr;
217 }
218 }
219 /*
220 * The user specified a non-default stack and/or guard size, so try to
221 * allocate a stack from the non-default size stack cache, using the
222 * rounded up stack size (stack_size) in the search:
223 */
224 else {
225 LIST_FOREACH(spare_stack, &mstackq, qe) {
226 if (spare_stack->stacksize == stacksize &&
227 spare_stack->guardsize == guardsize) {
228 LIST_REMOVE(spare_stack, qe);
229 attr->stackaddr_attr = spare_stack->stackaddr;
230 break;
231 }
232 }
233 }
234 if (attr->stackaddr_attr != NULL) {
235 /* A cached stack was found. Release the lock. */
236 THREAD_LIST_UNLOCK(curthread);
237 }
238 else {
239 /*
240 * Allocate a stack from or below usrstack, depending
241 * on the LIBPTHREAD_BIGSTACK_MAIN env variable.
242 */
243 if (last_stack == NULL)
244 last_stack = _usrstack - _thr_stack_initial -
245 _thr_guard_default;
246
247 /* Allocate a new stack. */
248 stackaddr = last_stack - stacksize - guardsize;
249
250 /*
251 * Even if stack allocation fails, we don't want to try to
252 * use this location again, so unconditionally decrement
253 * last_stack. Under normal operating conditions, the most
254 * likely reason for an mmap() error is a stack overflow of
255 * the adjacent thread stack.
256 */
257 last_stack -= (stacksize + guardsize);
258
259 /* Release the lock before mmap'ing it. */
260 THREAD_LIST_UNLOCK(curthread);
261
262 /* Map the stack and guard page together, and split guard
263 page from allocated space: */
264 if ((stackaddr = mmap(stackaddr, stacksize + guardsize,
265 _rtld_get_stack_prot(), MAP_STACK,
266 -1, 0)) != MAP_FAILED &&
267 (guardsize == 0 ||
268 mprotect(stackaddr, guardsize, PROT_NONE) == 0)) {
269 stackaddr += guardsize;
270 } else {
271 if (stackaddr != MAP_FAILED)
272 munmap(stackaddr, stacksize + guardsize);
273 stackaddr = NULL;
274 }
275 attr->stackaddr_attr = stackaddr;
276 }
277 if (attr->stackaddr_attr != NULL)
278 return (0);
279 else
280 return (-1);
281 }
282
283 /* This function must be called with _thread_list_lock held. */
284 void
_thr_stack_free(struct pthread_attr * attr)285 _thr_stack_free(struct pthread_attr *attr)
286 {
287 struct stack *spare_stack;
288
289 if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0)
290 && (attr->stackaddr_attr != NULL)) {
291 spare_stack = (struct stack *)
292 ((char *)attr->stackaddr_attr +
293 attr->stacksize_attr - sizeof(struct stack));
294 spare_stack->stacksize = round_up(attr->stacksize_attr);
295 spare_stack->guardsize = round_up(attr->guardsize_attr);
296 spare_stack->stackaddr = attr->stackaddr_attr;
297
298 if (spare_stack->stacksize == THR_STACK_DEFAULT &&
299 spare_stack->guardsize == _thr_guard_default) {
300 /* Default stack/guard size. */
301 LIST_INSERT_HEAD(&dstackq, spare_stack, qe);
302 } else {
303 /* Non-default stack/guard size. */
304 LIST_INSERT_HEAD(&mstackq, spare_stack, qe);
305 }
306 attr->stackaddr_attr = NULL;
307 }
308 }
309