xref: /xnu-11215/tests/ldt.c (revision e6231be0)
1 /*
2  * Copyright (c) 2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 // #define STANDALONE
30 
31 #ifndef STANDALONE
32 #include <darwintest.h>
33 #endif
34 #include <architecture/i386/table.h>
35 #include <i386/user_ldt.h>
36 #include <mach/i386/vm_param.h>
37 #include <mach/i386/thread_status.h>
38 #include <mach/mach.h>
39 #include <signal.h>
40 #include <stdio.h>
41 #include <stdlib.h>
42 #include <strings.h>
43 #include <sys/mman.h>
44 #include <sys/types.h>
45 #include <sys/signal.h>
46 #include <sys/sysctl.h>
47 #include <assert.h>
48 #include <errno.h>
49 #include <fcntl.h>
50 #include <pthread.h>
51 #include <unistd.h>
52 #include <ldt_mach_exc.h>
53 
54 #ifndef STANDALONE
55 T_GLOBAL_META(
56 	T_META_NAMESPACE("xnu.intel"),
57 	T_META_RADAR_COMPONENT_NAME("xnu"),
58 	T_META_RADAR_COMPONENT_VERSION("intel"),
59 	T_META_OWNER("seth_goldberg"),
60 	T_META_CHECK_LEAKS(false)
61 	);
62 #endif
63 
64 #define COMPAT_MODE_CS_SELECTOR 0x1f
65 #define SYSENTER_SELECTOR 0xb
66 /* #define DEBUG 1 */
67 #define P2ROUNDUP(x, align)     (-(-((long)x) & -((long)align)))
68 #define MSG 2048
69 
70 #define NORMAL_RUN_TIME  (10)
71 #define TIMEOUT_OVERHEAD (10)
72 
73 /*
74  * General theory of operation:
75  * ----------------------------
76  * (1) Ensure that all code and data to be accessed from compatibility mode is
77  *     located in the low 4GiB of virtual address space.
78  * (2) Allocate required segments via the i386_set_ldt() system call, making
79  *     sure to set the descriptor type correctly (code vs. data).  Creating
80  *     64-bit code segments is not allowed (just use the existing 0x2b selector.)
81  * (3) Once you know which selector is associated with the desired code, use a
82  *     trampoline (or thunk) to (a) switch to a stack that's located below 4GiB
83  *     and (b) save ABI-mandated caller-saved state so that if it's trashed by
84  *     compatibility-mode code, it can be restored before returning to 64-bit
85  *     mode (if desired), and finally (c) long-jump or long-call (aka far call)
86  *     to the segment and desired offset (this example uses an offset of 0 for
87  *     simplicity.)
88  * (4) Once in compatibility mode, if a framework call or system call is required,
89  *     the code must trampoline back to 64-bit mode to do so.  System calls from
90  *     compatibility mode code are not supported and will result in invalid opcode
91  *     exceptions.  This example includes a simple 64-bit trampoline (which must
92  *     be located in the low 4GiB of virtual address space, since it's executed
93  *     by compatibility-mode code.)  Note that since the 64-bit ABI mandates that
94  *     the stack must be aligned to a 16-byte boundary, the sample trampoline
95  *     performs that rounding, to simplify compatibility-mode code.  Additionally,
96  *     since 64-bit native code makes use of thread-local storage, the user-mode
97  *     GSbase must be restored.  This sample includes two ways to do that-- (a) by
98  *     calling into a C implementation that associates the thread-local storage
99  *     pointer with a stack range (which will be unique for each thread.), and
100  *     (b) by storing the original GSbase in a block of memory installed into
101  *     GSbase before calling into compatibility-mode code.  A special machdep
102  *     system call restores GSbase as needed.  Note that the sample trampoline
103  *     does not save and restore %gs (or most other register state, so that is an
104  *     area that may be tailored to the application's requirements.)
105  * (5) Once running in compatibility mode, should synchronous or asynchronous
106  *     exceptions occur, this sample shows how a mach exception handler (running
107  *     in a detached thread, handling exceptions for the entire task) can catch
108  *     such exceptions and manipulate thread state to perform recovery (or not.)
109  *     Other ways to handle exceptions include installing per-thread exception
110  *     servers.  Alternatively, BSD signal handlers can be used.  Note that once a
111  *     process installs a custom LDT, *ALL* future signal deliveries will include
112  *     ucontext pointers to mcontext structures that include enhanced thread
113  *     state embedded (e.g. the %ds, %es, %ss, and GSBase registers) [This assumes
114  *     that the SA_SIGINFO is passed to sigaction(2) when registering handlers].
115  *     The mcontext size (part of the ucontext) can be used to differentiate between
116  *     different mcontext flavors (e.g. those with/without full thread state plus
117  *     x87 FP state, AVX state, or AVX2/3 state).
118  */
119 
120 /*
121  * This test exercises the custom LDT functionality exposed via the i386_{get,set}_ldt
122  * system calls.
123  *
124  * Tests include:
125  * (1a) Exception handling (due to an exception or another thread sending a signal) while
126  *      running in compatibility mode;
127  * (1b) Signal handling while running in compatibility mode;
128  * (2)  Thunking back to 64-bit mode and executing a framework function (e.g. printf)
129  * (3)  Ensuring that transitions to compatibility mode and back to 64-bit mode
130  *      do not negatively impact system calls and framework calls in 64-bit mode
131  * (4)  Use of thread_get_state / thread_set_state to configure a thread to
132  *      execute in compatibility mode with the proper LDT code segment (this is
133  *      effectively what the exception handler does when the passed-in new_state
134  *      is changed (or what the BSD signal handler return handling does when the
135  *      mcontext is modified).)
136  * (5)  Ensure that compatibility mode code cannot make system calls via sysenter or
137  *      old-style int {0x80..0x82}.
138  * (6)  Negative testing to ensure errors are returned if the consumer tries
139  *      to set a disallowed segment type / Long flag. [TBD]
140  */
141 
142 /*
143  * Note that these addresses are not necessarily available due to ASLR, so
144  * a robust implementation should determine the proper range to use via
145  * another means.
146  */
147 #ifndef STANDALONE
148 /* libdarwintest needs LOTs of stack */
149 #endif
150 #define FIXED_STACK_SIZE (PAGE_SIZE * 16)
151 #define FIXED_TRAMP_MAXLEN (PAGE_SIZE * 8)
152 
153 #pragma pack(1)
154 typedef struct {
155 	uint64_t off;
156 	uint16_t seg;
157 } far_call_t;
158 #pragma pack()
159 
160 typedef struct {
161 	uint64_t stack_base;
162 	uint64_t stack_limit;
163 	uint64_t GSbase;
164 } stackaddr_to_gsbase_t;
165 
166 typedef struct thread_arg {
167 	pthread_mutex_t         mutex;
168 	pthread_cond_t          condvar;
169 	volatile boolean_t      done;
170 	uint32_t                compat_stackaddr;       /* Compatibility mode stack address */
171 } thread_arg_t;
172 
173 typedef struct custom_tsd {
174 	struct custom_tsd *     this_tsd_base;
175 	uint64_t                orig_tsd_base;
176 } custom_tsd_t;
177 
178 typedef uint64_t (*compat_tramp_t)(far_call_t *fcp, void *lowmemstk, uint64_t arg_for_32bit,
179     uint64_t callback, uint64_t absolute_addr_of_thunk64);
180 
181 #define GS_RELATIVE volatile __attribute__((address_space(256)))
182 static custom_tsd_t GS_RELATIVE *mytsd = (custom_tsd_t GS_RELATIVE *)0;
183 
184 static far_call_t input_desc = { .seg = COMPAT_MODE_CS_SELECTOR, .off = 0 };
185 static uint64_t stackAddr = 0;
186 static compat_tramp_t thunkit = NULL;
187 static uint64_t thunk64_addr;
188 /* stack2gs[0] is initialized in map_lowmem_stack() */
189 static stackaddr_to_gsbase_t stack2gs[] = { { 0 } };
190 
191 extern int compat_mode_trampoline(far_call_t *, void *, uint64_t);
192 extern void long_mode_trampoline(void);
193 extern boolean_t mach_exc_server(mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP);
194 
195 extern void code_32(void);
196 
197 kern_return_t catch_mach_exception_raise_state_identity(mach_port_t exception_port,
198     mach_port_t thread,
199     mach_port_t task,
200     exception_type_t exception,
201     mach_exception_data_t code,
202     mach_msg_type_number_t code_count,
203     int * flavor,
204     thread_state_t old_state,
205     mach_msg_type_number_t old_state_count,
206     thread_state_t new_state,
207     mach_msg_type_number_t * new_state_count);
208 
209 kern_return_t
210 catch_mach_exception_raise_state(mach_port_t exception_port,
211     exception_type_t exception,
212     const mach_exception_data_t code,
213     mach_msg_type_number_t codeCnt,
214     int *flavor,
215     const thread_state_t old_state,
216     mach_msg_type_number_t old_stateCnt,
217     thread_state_t new_state,
218     mach_msg_type_number_t *new_stateCnt);
219 
220 kern_return_t
221 catch_mach_exception_raise(mach_port_t exception_port,
222     mach_port_t thread,
223     mach_port_t task,
224     exception_type_t exception,
225     mach_exception_data_t code,
226     mach_msg_type_number_t codeCnt,
227     int *flavor,
228     thread_state_t old_state,
229     mach_msg_type_number_t old_stateCnt,
230     thread_state_t new_state,
231     mach_msg_type_number_t *new_stateCnt);
232 
233 extern void _thread_set_tsd_base(uint64_t);
234 static uint64_t stack_range_to_GSbase(uint64_t stackptr, uint64_t GSbase);
235 void restore_gsbase(uint64_t stackptr);
236 
237 static uint64_t
get_gsbase(void)238 get_gsbase(void)
239 {
240 	struct thread_identifier_info tiinfo;
241 	unsigned int info_count = THREAD_IDENTIFIER_INFO_COUNT;
242 	kern_return_t kr;
243 
244 	if ((kr = thread_info(mach_thread_self(), THREAD_IDENTIFIER_INFO,
245 	    (thread_info_t) &tiinfo, &info_count)) != KERN_SUCCESS) {
246 		fprintf(stderr, "Could not get tsd base address.  This will not end well.\n");
247 		return 0;
248 	}
249 
250 	return (uint64_t)tiinfo.thread_handle;
251 }
252 
253 void
restore_gsbase(uint64_t stackptr)254 restore_gsbase(uint64_t stackptr)
255 {
256 	/* Restore GSbase so tsd is accessible in long mode */
257 	uint64_t orig_GSbase = stack_range_to_GSbase(stackptr, 0);
258 
259 	assert(orig_GSbase != 0);
260 	_thread_set_tsd_base(orig_GSbase);
261 }
262 
263 /*
264  * Though we've directed all exceptions through the catch_mach_exception_raise_state_identity
265  * entry point, we still must provide these two other entry points, otherwise a linker error
266  * will occur.
267  */
268 kern_return_t
catch_mach_exception_raise(mach_port_t exception_port,mach_port_t thread,mach_port_t task,exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt,int * flavor,thread_state_t old_state,mach_msg_type_number_t old_stateCnt,thread_state_t new_state,mach_msg_type_number_t * new_stateCnt)269 catch_mach_exception_raise(mach_port_t exception_port,
270     mach_port_t thread,
271     mach_port_t task,
272     exception_type_t exception,
273     mach_exception_data_t code,
274     mach_msg_type_number_t codeCnt,
275     int *flavor,
276     thread_state_t old_state,
277     mach_msg_type_number_t old_stateCnt,
278     thread_state_t new_state,
279     mach_msg_type_number_t *new_stateCnt)
280 {
281 #pragma unused(exception_port, thread, task, exception, code, codeCnt, flavor, old_state, old_stateCnt, new_state, new_stateCnt)
282 	fprintf(stderr, "Unexpected exception handler called: %s\n", __func__);
283 	return KERN_FAILURE;
284 }
285 
286 kern_return_t
catch_mach_exception_raise_state(mach_port_t exception_port,exception_type_t exception,const mach_exception_data_t code,mach_msg_type_number_t codeCnt,int * flavor,const thread_state_t old_state,mach_msg_type_number_t old_stateCnt,thread_state_t new_state,mach_msg_type_number_t * new_stateCnt)287 catch_mach_exception_raise_state(mach_port_t exception_port,
288     exception_type_t exception,
289     const mach_exception_data_t code,
290     mach_msg_type_number_t codeCnt,
291     int *flavor,
292     const thread_state_t old_state,
293     mach_msg_type_number_t old_stateCnt,
294     thread_state_t new_state,
295     mach_msg_type_number_t *new_stateCnt)
296 {
297 #pragma unused(exception_port, exception, code, codeCnt, flavor, old_state, old_stateCnt, new_state, new_stateCnt)
298 	fprintf(stderr, "Unexpected exception handler called: %s\n", __func__);
299 	return KERN_FAILURE;
300 }
301 
302 static void
handle_arithmetic_exception(_STRUCT_X86_THREAD_FULL_STATE64 * xtfs64,uint64_t * ip_skip_countp)303 handle_arithmetic_exception(_STRUCT_X86_THREAD_FULL_STATE64 *xtfs64, uint64_t *ip_skip_countp)
304 {
305 	fprintf(stderr, "Caught divide-error exception\n");
306 	fprintf(stderr, "cs=0x%x rip=0x%x gs=0x%x ss=0x%x rsp=0x%llx\n",
307 	    (unsigned)xtfs64->__ss64.__cs,
308 	    (unsigned)xtfs64->__ss64.__rip, (unsigned)xtfs64->__ss64.__gs,
309 	    (unsigned)xtfs64->__ss, xtfs64->__ss64.__rsp);
310 	*ip_skip_countp = 2;
311 }
312 
313 static void
handle_badinsn_exception(_STRUCT_X86_THREAD_FULL_STATE64 * xtfs64,uint64_t __unused * ip_skip_countp)314 handle_badinsn_exception(_STRUCT_X86_THREAD_FULL_STATE64 *xtfs64, uint64_t __unused *ip_skip_countp)
315 {
316 	extern void first_invalid_opcode(void);
317 	extern void last_invalid_opcode(void);
318 
319 	uint64_t start_addr = ((uintptr_t)first_invalid_opcode - (uintptr_t)code_32);
320 	uint64_t end_addr = ((uintptr_t)last_invalid_opcode - (uintptr_t)code_32);
321 
322 	fprintf(stderr, "Caught invalid opcode exception\n");
323 	fprintf(stderr, "cs=%x rip=%x gs=%x ss=0x%x rsp=0x%llx | handling between 0x%llx and 0x%llx\n",
324 	    (unsigned)xtfs64->__ss64.__cs,
325 	    (unsigned)xtfs64->__ss64.__rip, (unsigned)xtfs64->__ss64.__gs,
326 	    (unsigned)xtfs64->__ss, xtfs64->__ss64.__rsp,
327 	    start_addr, end_addr);
328 
329 	/*
330 	 * We expect to handle 4 invalid opcode exceptions:
331 	 * (1) sysenter
332 	 * (2) int $0x80
333 	 * (3) int $0x81
334 	 * (4) int $0x82
335 	 * (Note that due to the way the invalid opcode indication was implemented,
336 	 * %rip is already set to the next instruction.)
337 	 */
338 	if (xtfs64->__ss64.__rip >= start_addr && xtfs64->__ss64.__rip <= end_addr) {
339 		/*
340 		 * On return from the failed sysenter, %cs is changed to the
341 		 * sysenter code selector and %ss is set to 0x23, so switch them
342 		 * back to sane values.
343 		 */
344 		if ((unsigned)xtfs64->__ss64.__cs == SYSENTER_SELECTOR) {
345 			xtfs64->__ss64.__cs = COMPAT_MODE_CS_SELECTOR;
346 			xtfs64->__ss = 0x23; /* XXX */
347 		}
348 	}
349 }
350 
351 kern_return_t
catch_mach_exception_raise_state_identity(mach_port_t exception_port,mach_port_t thread,mach_port_t task,exception_type_t exception,mach_exception_data_t code,mach_msg_type_number_t codeCnt,int * flavor,thread_state_t old_state,mach_msg_type_number_t old_stateCnt,thread_state_t new_state,mach_msg_type_number_t * new_stateCnt)352 catch_mach_exception_raise_state_identity(mach_port_t exception_port,
353     mach_port_t thread,
354     mach_port_t task,
355     exception_type_t exception,
356     mach_exception_data_t code,
357     mach_msg_type_number_t codeCnt,
358     int * flavor,
359     thread_state_t old_state,
360     mach_msg_type_number_t old_stateCnt,
361     thread_state_t new_state,
362     mach_msg_type_number_t * new_stateCnt)
363 {
364 #pragma unused(exception_port, thread, task)
365 
366 	_STRUCT_X86_THREAD_FULL_STATE64 *xtfs64 = (_STRUCT_X86_THREAD_FULL_STATE64 *)(void *)old_state;
367 	_STRUCT_X86_THREAD_FULL_STATE64 *new_xtfs64 = (_STRUCT_X86_THREAD_FULL_STATE64 *)(void *)new_state;
368 	uint64_t rip_skip_count = 0;
369 
370 	/*
371 	 * Check the exception code and thread state.
372 	 * If we were executing 32-bit code (or 64-bit code on behalf of
373 	 * 32-bit code), we could update the thread state to effectively longjmp
374 	 * back to a safe location where the victim thread can recover.
375 	 * Then again, we could return KERN_NOT_SUPPORTED and allow the process
376 	 * to be nuked.
377 	 */
378 
379 	switch (exception) {
380 	case EXC_ARITHMETIC:
381 		if (codeCnt >= 1 && code[0] == EXC_I386_DIV) {
382 			handle_arithmetic_exception(xtfs64, &rip_skip_count);
383 		}
384 		break;
385 
386 	case EXC_BAD_INSTRUCTION:
387 	{
388 		if (codeCnt >= 1 && code[0] == EXC_I386_INVOP) {
389 			handle_badinsn_exception(xtfs64, &rip_skip_count);
390 		}
391 		break;
392 	}
393 
394 	default:
395 		fprintf(stderr, "Unsupported catch_mach_exception_raise_state_identity: code 0x%llx sub 0x%llx\n",
396 		    code[0], codeCnt > 1 ? code[1] : 0LL);
397 		fprintf(stderr, "flavor=%d %%cs=0x%x %%rip=0x%llx\n", *flavor, (unsigned)xtfs64->__ss64.__cs,
398 		    xtfs64->__ss64.__rip);
399 	}
400 
401 	/*
402 	 * If this exception happened in compatibility mode,
403 	 * assume it was the intentional division-by-zero and set the
404 	 * new state's cs register to just after the div instruction
405 	 * to enable the thread to resume.
406 	 */
407 	if ((unsigned)xtfs64->__ss64.__cs == COMPAT_MODE_CS_SELECTOR) {
408 		*new_stateCnt = old_stateCnt;
409 		*new_xtfs64 = *xtfs64;
410 		new_xtfs64->__ss64.__rip += rip_skip_count;
411 		fprintf(stderr, "new cs=0x%x rip=0x%llx\n", (unsigned)new_xtfs64->__ss64.__cs,
412 		    new_xtfs64->__ss64.__rip);
413 		return KERN_SUCCESS;
414 	} else {
415 		return KERN_NOT_SUPPORTED;
416 	}
417 }
418 
419 static void *
handle_exceptions(void * arg)420 handle_exceptions(void *arg)
421 {
422 	mach_port_t ePort = (mach_port_t)arg;
423 	kern_return_t kret;
424 
425 	kret = mach_msg_server(mach_exc_server, MACH_MSG_SIZE_RELIABLE, ePort, 0);
426 	if (kret != KERN_SUCCESS) {
427 		fprintf(stderr, "mach_msg_server: %s (%d)", mach_error_string(kret), kret);
428 	}
429 
430 	return NULL;
431 }
432 
433 static void
init_task_exception_server(void)434 init_task_exception_server(void)
435 {
436 	kern_return_t kr;
437 	task_t me = mach_task_self();
438 	pthread_t handler_thread;
439 	pthread_attr_t  attr;
440 	mach_port_t ePort;
441 
442 	kr = mach_port_allocate(me, MACH_PORT_RIGHT_RECEIVE, &ePort);
443 	if (kr != KERN_SUCCESS) {
444 		fprintf(stderr, "allocate receive right: %d\n", kr);
445 		return;
446 	}
447 
448 	kr = mach_port_insert_right(me, ePort, ePort, MACH_MSG_TYPE_MAKE_SEND);
449 	if (kr != KERN_SUCCESS) {
450 		fprintf(stderr, "insert right into port=[%d]: %d\n", ePort, kr);
451 		return;
452 	}
453 
454 	kr = task_set_exception_ports(me, EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC, ePort,
455 	    (exception_behavior_t)(EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES), x86_THREAD_FULL_STATE64);
456 	if (kr != KERN_SUCCESS) {
457 		fprintf(stderr, "abort: error setting task exception ports on task=[%d], handler=[%d]: %d\n", me, ePort, kr);
458 		exit(1);
459 	}
460 
461 	pthread_attr_init(&attr);
462 	pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
463 
464 	if (pthread_create(&handler_thread, &attr, handle_exceptions, (void *)(uintptr_t)ePort) != 0) {
465 		perror("pthread create error");
466 		return;
467 	}
468 
469 	pthread_attr_destroy(&attr);
470 }
471 
472 static union ldt_entry *descs = 0;
473 static uint64_t idx;
474 static int saw_ud2 = 0;
475 static boolean_t ENV_set_ldt_in_sighandler = FALSE;
476 
477 static void
signal_handler(int signo,siginfo_t * sinfop,void * ucontext)478 signal_handler(int signo, siginfo_t *sinfop, void *ucontext)
479 {
480 	uint64_t rip_skip_count = 0;
481 	ucontext_t *uctxp = (ucontext_t *)ucontext;
482 	union {
483 		_STRUCT_MCONTEXT_AVX512_64 *avx512_basep;
484 		_STRUCT_MCONTEXT_AVX512_64_FULL *avx512_fullp;
485 		_STRUCT_MCONTEXT_AVX64 *avx64_basep;
486 		_STRUCT_MCONTEXT_AVX64_FULL *avx64_fullp;
487 		_STRUCT_MCONTEXT64 *fp_basep;
488 		_STRUCT_MCONTEXT64_FULL *fp_fullp;
489 	} mctx;
490 
491 	mctx.fp_fullp = (_STRUCT_MCONTEXT64_FULL *)uctxp->uc_mcontext;
492 
493 	/*
494 	 * Note that GSbase must be restored before calling into any frameworks
495 	 * that might access anything %gs-relative (e.g. TSD) if the signal
496 	 * handler was triggered while the thread was running with a non-default
497 	 * (system-established) GSbase.
498 	 */
499 
500 	if ((signo != SIGFPE && signo != SIGILL) || sinfop->si_signo != signo) {
501 #ifndef STANDALONE
502 		T_ASSERT_FAIL("Unexpected signal %d\n", signo);
503 #else
504 		restore_gsbase(mctx.fp_fullp->__ss.__ss64.__rsp);
505 		fprintf(stderr, "Not handling signal %d\n", signo);
506 		abort();
507 #endif
508 	}
509 
510 	if (uctxp->uc_mcsize == sizeof(_STRUCT_MCONTEXT_AVX512_64) ||
511 	    uctxp->uc_mcsize == sizeof(_STRUCT_MCONTEXT_AVX64) ||
512 	    uctxp->uc_mcsize == sizeof(_STRUCT_MCONTEXT64)) {
513 		_STRUCT_X86_THREAD_STATE64 *ss64 = &mctx.fp_basep->__ss;
514 
515 		/*
516 		 * The following block is an illustration of what NOT to do.
517 		 * Configuring an LDT for the first time in a signal handler
518 		 * will likely cause the process to crash.
519 		 */
520 		if (ENV_set_ldt_in_sighandler == TRUE && !saw_ud2) {
521 			/* Set the LDT: */
522 			int cnt = i386_set_ldt((int)idx, &descs[idx], 1);
523 			if (cnt != (int)idx) {
524 #ifdef DEBUG
525 				fprintf(stderr, "i386_set_ldt unexpectedly returned %d (errno = %s)\n", cnt, strerror(errno));
526 #endif
527 #ifndef STANDALONE
528 				T_LOG("i386_set_ldt unexpectedly returned %d (errno: %s)\n", cnt, strerror(errno));
529 				T_ASSERT_FAIL("i386_set_ldt failure");
530 #else
531 				exit(1);
532 #endif
533 			}
534 #ifdef DEBUG
535 			printf("i386_set_ldt returned %d\n", cnt);
536 #endif
537 			ss64->__rip += 2;       /* ud2 is 2 bytes */
538 
539 			saw_ud2 = 1;
540 
541 			/*
542 			 * When we return here, the sigreturn processing code will try to copy a FULL
543 			 * thread context from the signal stack, which will likely cause the resumed
544 			 * thread to fault and be terminated.
545 			 */
546 			return;
547 		}
548 
549 		restore_gsbase(ss64->__rsp);
550 
551 		/*
552 		 * If we're in this block, either we are dispatching a signal received
553 		 * before we installed a custom LDT or we are on a kernel without
554 		 * BSD-signalling-sending-full-thread-state support.  It's likely the latter case.
555 		 */
556 #ifndef STANDALONE
557 		T_ASSERT_FAIL("This system doesn't support BSD signals with full thread state.");
558 #else
559 		fprintf(stderr, "This system doesn't support BSD signals with full thread state.  Aborting.\n");
560 		abort();
561 #endif
562 	} else if (uctxp->uc_mcsize == sizeof(_STRUCT_MCONTEXT_AVX512_64_FULL) ||
563 	    uctxp->uc_mcsize == sizeof(_STRUCT_MCONTEXT_AVX64_FULL) ||
564 	    uctxp->uc_mcsize == sizeof(_STRUCT_MCONTEXT64_FULL)) {
565 		_STRUCT_X86_THREAD_FULL_STATE64 *ss64 = &mctx.fp_fullp->__ss;
566 
567 		/*
568 		 * Since we're handing this signal on the same thread, we may need to
569 		 * restore GSbase.
570 		 */
571 		uint64_t orig_gsbase = stack_range_to_GSbase(ss64->__ss64.__rsp, 0);
572 		if (orig_gsbase != 0 && orig_gsbase != ss64->__gsbase) {
573 			restore_gsbase(ss64->__ss64.__rsp);
574 		}
575 
576 		if (signo == SIGFPE) {
577 			handle_arithmetic_exception(ss64, &rip_skip_count);
578 		} else if (signo == SIGILL) {
579 			handle_badinsn_exception(ss64, &rip_skip_count);
580 		}
581 
582 		/*
583 		 * If this exception happened in compatibility mode,
584 		 * assume it was the intentional division-by-zero and set the
585 		 * new state's cs register to just after the div instruction
586 		 * to enable the thread to resume.
587 		 */
588 		if ((unsigned)ss64->__ss64.__cs == COMPAT_MODE_CS_SELECTOR) {
589 			ss64->__ss64.__rip += rip_skip_count;
590 			fprintf(stderr, "new cs=0x%x rip=0x%llx\n", (unsigned)ss64->__ss64.__cs,
591 			    ss64->__ss64.__rip);
592 		}
593 	} else {
594 		_STRUCT_X86_THREAD_STATE64 *ss64 = &mctx.fp_basep->__ss;
595 
596 		restore_gsbase(ss64->__rsp);
597 #ifndef STANDALONE
598 		T_ASSERT_FAIL("Unknown mcontext size %lu: Aborting.", uctxp->uc_mcsize);
599 #else
600 		fprintf(stderr, "Unknown mcontext size %lu: Aborting.\n", uctxp->uc_mcsize);
601 		abort();
602 #endif
603 	}
604 }
605 
606 static void
setup_signal_handling(void)607 setup_signal_handling(void)
608 {
609 	int rv;
610 
611 	struct sigaction sa = {
612 		.__sigaction_u = { .__sa_sigaction = signal_handler },
613 		.sa_flags = SA_SIGINFO
614 	};
615 
616 	sigfillset(&sa.sa_mask);
617 
618 	rv = sigaction(SIGFPE, &sa, NULL);
619 	if (rv != 0) {
620 #ifndef STANDALONE
621 		T_ASSERT_FAIL("Failed to configure SIGFPE signal handler\n");
622 #else
623 		fprintf(stderr, "Failed to configure SIGFPE signal handler\n");
624 		abort();
625 #endif
626 	}
627 
628 	rv = sigaction(SIGILL, &sa, NULL);
629 	if (rv != 0) {
630 #ifndef STANDALONE
631 		T_ASSERT_FAIL("Failed to configure SIGILL signal handler\n");
632 #else
633 		fprintf(stderr, "Failed to configure SIGILL signal handler\n");
634 		abort();
635 #endif
636 	}
637 }
638 
639 static void
teardown_signal_handling(void)640 teardown_signal_handling(void)
641 {
642 	if (signal(SIGFPE, SIG_DFL) == SIG_ERR) {
643 #ifndef STANDALONE
644 		T_ASSERT_FAIL("Error resetting SIGFPE signal disposition\n");
645 #else
646 		fprintf(stderr, "Error resetting SIGFPE signal disposition\n");
647 		abort();
648 #endif
649 	}
650 
651 	if (signal(SIGILL, SIG_DFL) == SIG_ERR) {
652 #ifndef STANDALONE
653 		T_ASSERT_FAIL("Error resetting SIGILL signal disposition\n");
654 #else
655 		fprintf(stderr, "Error resetting SIGILL signal disposition\n");
656 		abort();
657 #endif
658 	}
659 }
660 
661 #ifdef DEBUG
662 static void
dump_desc(union ldt_entry * entp)663 dump_desc(union ldt_entry *entp)
664 {
665 	printf("base %p lim %p type 0x%x dpl %x present %x opsz %x granular %x\n",
666 	    (void *)(uintptr_t)(entp->code.base00 + (entp->code.base16 << 16) + (entp->code.base24 << 24)),
667 	    (void *)(uintptr_t)(entp->code.limit00 + (entp->code.limit16 << 16)),
668 	    entp->code.type,
669 	    entp->code.dpl,
670 	    entp->code.present,
671 	    entp->code.opsz,
672 	    entp->code.granular);
673 }
674 #endif
675 
676 static int
map_lowmem_stack(void ** lowmemstk)677 map_lowmem_stack(void **lowmemstk)
678 {
679 	void *addr;
680 	int err;
681 
682 	if ((addr = mmap(0, FIXED_STACK_SIZE + PAGE_SIZE, PROT_READ | PROT_WRITE,
683 	    MAP_32BIT | MAP_PRIVATE | MAP_ANON, -1, 0)) == MAP_FAILED) {
684 		return errno;
685 	}
686 
687 	if ((uintptr_t)addr > 0xFFFFF000ULL) {
688 		/* Error: This kernel does not support MAP_32BIT or there's a bug. */
689 #ifndef STANDALONE
690 		T_ASSERT_FAIL("%s: failed to map a 32-bit-accessible stack", __func__);
691 #else
692 		fprintf(stderr, "This kernel returned a virtual address > 4G (%p) despite MAP_32BIT.  Aborting.\n", addr);
693 		exit(1);
694 #endif
695 	}
696 
697 	/* Enforce one page of redzone at the bottom of the stack */
698 	if (mprotect(addr, PAGE_SIZE, PROT_NONE) < 0) {
699 		err = errno;
700 		(void) munmap(addr, FIXED_STACK_SIZE + PAGE_SIZE);
701 		return err;
702 	}
703 
704 	if (lowmemstk) {
705 		stack2gs[0].stack_base = (uintptr_t)addr + PAGE_SIZE;
706 		stack2gs[0].stack_limit = stack2gs[0].stack_base + FIXED_STACK_SIZE;
707 		*lowmemstk = (void *)((uintptr_t)addr + PAGE_SIZE);
708 	}
709 
710 	return 0;
711 }
712 
713 static int
map_32bit_code_impl(uint8_t * code_src,size_t code_len,void ** codeptr,size_t szlimit)714 map_32bit_code_impl(uint8_t *code_src, size_t code_len, void **codeptr,
715     size_t szlimit)
716 {
717 	void *addr;
718 	size_t sz = (size_t)P2ROUNDUP(code_len, (unsigned)PAGE_SIZE);
719 
720 	if (code_len > szlimit) {
721 		return E2BIG;
722 	}
723 
724 #ifdef DEBUG
725 	printf("size = %lu, szlimit = %u\n", sz, (unsigned)szlimit);
726 #endif
727 
728 	if ((addr = mmap(0, sz, PROT_READ | PROT_WRITE | PROT_EXEC,
729 	    MAP_32BIT | MAP_PRIVATE | MAP_ANON, -1, 0)) == MAP_FAILED) {
730 		return errno;
731 	}
732 
733 	if ((uintptr_t)addr > 0xFFFFF000ULL) {
734 		/* Error: This kernel does not support MAP_32BIT or there's a bug. */
735 #ifndef STANDALONE
736 		T_ASSERT_FAIL("%s: failed to map a 32-bit-accessible trampoline", __func__);
737 #else
738 		fprintf(stderr, "This kernel returned a virtual address > 4G (%p) despite MAP_32BIT.  Aborting.\n", addr);
739 		exit(1);
740 #endif
741 	}
742 
743 #ifdef DEBUG
744 	printf("Mapping code @%p..%p => %p..%p\n", (void *)code_src,
745 	    (void *)((uintptr_t)code_src + (unsigned)code_len),
746 	    addr, (void *)((uintptr_t)addr + (unsigned)code_len));
747 #endif
748 
749 	bcopy(code_src, addr, code_len);
750 
751 	/* Fill the rest of the page with NOPs */
752 	if ((sz - code_len) > 0) {
753 		memset((void *)((uintptr_t)addr + code_len), 0x90, sz - code_len);
754 	}
755 
756 	if (codeptr) {
757 		*codeptr = addr;
758 	}
759 
760 	return 0;
761 }
762 
763 static int
map_32bit_trampoline(compat_tramp_t * lowmemtrampp)764 map_32bit_trampoline(compat_tramp_t *lowmemtrampp)
765 {
766 	extern int compat_mode_trampoline_len;
767 
768 	return map_32bit_code_impl((uint8_t *)&compat_mode_trampoline,
769 	           (size_t)compat_mode_trampoline_len, (void **)lowmemtrampp,
770 	           FIXED_TRAMP_MAXLEN);
771 }
772 
773 static uint64_t
stack_range_to_GSbase(uint64_t stackptr,uint64_t GSbase)774 stack_range_to_GSbase(uint64_t stackptr, uint64_t GSbase)
775 {
776 	unsigned long i;
777 
778 	for (i = 0; i < sizeof(stack2gs) / sizeof(stack2gs[0]); i++) {
779 		if (stackptr >= stack2gs[i].stack_base &&
780 		    stackptr < stack2gs[i].stack_limit) {
781 			if (GSbase != 0) {
782 #ifdef DEBUG
783 				fprintf(stderr, "Updated gsbase for stack at 0x%llx..0x%llx to 0x%llx\n",
784 				    stack2gs[i].stack_base, stack2gs[i].stack_limit, GSbase);
785 #endif
786 				stack2gs[i].GSbase = GSbase;
787 			}
788 			return stack2gs[i].GSbase;
789 		}
790 	}
791 	return 0;
792 }
793 
794 static uint64_t
call_compatmode(uint32_t stackaddr,uint64_t compat_arg,uint64_t callback)795 call_compatmode(uint32_t stackaddr, uint64_t compat_arg, uint64_t callback)
796 {
797 	uint64_t rv;
798 
799 	/*
800 	 * Depending on how this is used, this allocation may need to be
801 	 * made with an allocator that returns virtual addresses below 4G.
802 	 */
803 	custom_tsd_t *new_GSbase = malloc(PAGE_SIZE);
804 
805 	/*
806 	 * Change the GSbase (so things like printf will fail unless GSbase is
807 	 * restored)
808 	 */
809 	if (new_GSbase != NULL) {
810 #ifdef DEBUG
811 		fprintf(stderr, "Setting new GS base: %p\n", (void *)new_GSbase);
812 #endif
813 		new_GSbase->this_tsd_base = new_GSbase;
814 		new_GSbase->orig_tsd_base = get_gsbase();
815 		_thread_set_tsd_base((uintptr_t)new_GSbase);
816 	} else {
817 #ifndef STANDALONE
818 		T_ASSERT_FAIL("Failed to allocate a page for new GSbase");
819 #else
820 		fprintf(stderr, "Failed to allocate a page for new GSbase");
821 		abort();
822 #endif
823 	}
824 
825 	rv = thunkit(&input_desc, (void *)(uintptr_t)stackaddr, compat_arg,
826 	    callback, thunk64_addr);
827 
828 	restore_gsbase(stackaddr);
829 
830 	free(new_GSbase);
831 
832 	return rv;
833 }
834 
835 static uint64_t
get_cursp(void)836 get_cursp(void)
837 {
838 	uint64_t curstk;
839 	__asm__ __volatile__ ("movq %%rsp, %0" : "=r" (curstk) :: "memory");
840 	return curstk;
841 }
842 
843 static void
hello_from_32bit(void)844 hello_from_32bit(void)
845 {
846 	uint64_t cur_tsd_base = (uint64_t)(uintptr_t)mytsd->this_tsd_base;
847 	restore_gsbase(get_cursp());
848 
849 	printf("Hello on behalf of 32-bit compatibility mode!\n");
850 
851 	_thread_set_tsd_base(cur_tsd_base);
852 }
853 
854 /*
855  * Thread for executing 32-bit code
856  */
857 static void *
thread_32bit(void * arg)858 thread_32bit(void *arg)
859 {
860 	thread_arg_t *targp = (thread_arg_t *)arg;
861 	uint64_t cthread_self = 0;
862 
863 	/* Save the GSbase for context switch back to 64-bit mode */
864 	cthread_self = get_gsbase();
865 
866 	/*
867 	 * Associate GSbase with the compat-mode stack (which will be used for long mode
868 	 * thunk calls as well.)
869 	 */
870 	(void)stack_range_to_GSbase(targp->compat_stackaddr, cthread_self);
871 
872 #ifdef DEBUG
873 	printf("[thread %p] tsd base => %p\n", (void *)pthread_self(), (void *)cthread_self);
874 #endif
875 
876 	pthread_mutex_lock(&targp->mutex);
877 
878 	do {
879 		if (targp->done == FALSE) {
880 			pthread_cond_wait(&targp->condvar, &targp->mutex);
881 		}
882 
883 		/* Finally, execute the test */
884 		if (call_compatmode(targp->compat_stackaddr, 0,
885 		    (uint64_t)&hello_from_32bit) == 1) {
886 			printf("32-bit code test passed\n");
887 		} else {
888 			printf("32-bit code test failed\n");
889 		}
890 	} while (targp->done == FALSE);
891 
892 	pthread_mutex_unlock(&targp->mutex);
893 
894 	return 0;
895 }
896 
897 static void
join_32bit_thread(pthread_t * thridp,thread_arg_t * cmargp)898 join_32bit_thread(pthread_t *thridp, thread_arg_t *cmargp)
899 {
900 	(void)pthread_mutex_lock(&cmargp->mutex);
901 	cmargp->done = TRUE;
902 	(void)pthread_cond_signal(&cmargp->condvar);
903 	(void)pthread_mutex_unlock(&cmargp->mutex);
904 	(void)pthread_join(*thridp, NULL);
905 	*thridp = 0;
906 }
907 
908 static int
create_worker_thread(thread_arg_t * cmargp,uint32_t stackaddr,pthread_t * cmthreadp)909 create_worker_thread(thread_arg_t *cmargp, uint32_t stackaddr, pthread_t *cmthreadp)
910 {
911 	*cmargp = (thread_arg_t) { .mutex = PTHREAD_MUTEX_INITIALIZER,
912 		                   .condvar = PTHREAD_COND_INITIALIZER,
913 		                   .done = FALSE,
914 		                   .compat_stackaddr = stackaddr };
915 
916 	return pthread_create(cmthreadp, NULL, thread_32bit, cmargp);
917 }
918 
919 static void
ldt64_test_setup(pthread_t * cmthreadp,thread_arg_t * cmargp,boolean_t setldt_in_sighandler)920 ldt64_test_setup(pthread_t *cmthreadp, thread_arg_t *cmargp, boolean_t setldt_in_sighandler)
921 {
922 	extern void thunk64(void);
923 	extern void thunk64_movabs(void);
924 	int cnt = 0, err;
925 	void *addr;
926 	uintptr_t code_addr;
927 	uintptr_t thunk64_movabs_addr;
928 
929 	descs = malloc(sizeof(union ldt_entry) * 256);
930 	if (descs == 0) {
931 #ifndef STANDALONE
932 		T_ASSERT_FAIL("Could not allocate descriptor storage");
933 #else
934 		fprintf(stderr, "Could not allocate descriptor storage\n");
935 		abort();
936 #endif
937 	}
938 
939 #ifdef DEBUG
940 	printf("32-bit code is at %p\n", (void *)&code_32);
941 #endif
942 
943 	if ((err = map_lowmem_stack(&addr)) != 0) {
944 #ifndef STANDALONE
945 		T_ASSERT_FAIL("failed to mmap lowmem stack: %s", strerror(err));
946 #else
947 		fprintf(stderr, "Failed to mmap lowmem stack: %s\n", strerror(err));
948 		exit(1);
949 #endif
950 	}
951 
952 	stackAddr = (uintptr_t)addr + FIXED_STACK_SIZE - 16;
953 #ifdef DEBUG
954 	printf("lowstack addr = %p\n", (void *)stackAddr);
955 #endif
956 
957 	if ((err = map_32bit_trampoline(&thunkit)) != 0) {
958 #ifndef STANDALONE
959 		T_LOG("Failed to map trampoline into lowmem: %s\n", strerror(err));
960 		T_ASSERT_FAIL("Failed to map trampoline into lowmem");
961 #else
962 		fprintf(stderr, "Failed to map trampoline into lowmem: %s\n", strerror(err));
963 		exit(1);
964 #endif
965 	}
966 
967 	/*
968 	 * Store long_mode_trampoline's address into the constant part of the movabs
969 	 * instruction in thunk64
970 	 */
971 	thunk64_movabs_addr = (uintptr_t)thunkit + ((uintptr_t)thunk64_movabs - (uintptr_t)compat_mode_trampoline);
972 	*((uint64_t *)(thunk64_movabs_addr + 2)) = (uint64_t)&long_mode_trampoline;
973 
974 	bzero(descs, sizeof(union ldt_entry) * 256);
975 
976 	if ((cnt = i386_get_ldt(0, descs, 1)) <= 0) {
977 #ifndef STANDALONE
978 		T_LOG("i386_get_ldt unexpectedly returned %d (errno: %s)\n", cnt, strerror(errno));
979 		T_ASSERT_FAIL("i386_get_ldt failure");
980 #else
981 		fprintf(stderr, "i386_get_ldt unexpectedly returned %d (errno: %s)\n", cnt, strerror(errno));
982 		exit(1);
983 #endif
984 	}
985 
986 #ifdef DEBUG
987 	printf("i386_get_ldt returned %d\n", cnt);
988 #endif
989 
990 	idx = (unsigned)cnt;      /* Put the desired descriptor in the first available slot */
991 
992 	/*
993 	 * code_32's address for the purposes of this descriptor is the base mapped address of
994 	 * the thunkit function + the offset of code_32 from compat_mode_trampoline.
995 	 */
996 	code_addr = (uintptr_t)thunkit + ((uintptr_t)code_32 - (uintptr_t)compat_mode_trampoline);
997 	thunk64_addr = (uintptr_t)thunkit + ((uintptr_t)thunk64 - (uintptr_t)compat_mode_trampoline);
998 
999 	/* Initialize desired descriptor */
1000 	descs[idx].code.limit00 = (unsigned short)(((code_addr >> 12) + 1) & 0xFFFF);
1001 	descs[idx].code.limit16 = (unsigned char)((((code_addr >> 12) + 1) >> 16) & 0xF);
1002 	descs[idx].code.base00 = (unsigned short)((code_addr) & 0xFFFF);
1003 	descs[idx].code.base16 = (unsigned char)((code_addr >> 16) & 0xFF);
1004 	descs[idx].code.base24 = (unsigned char)((code_addr >> 24) & 0xFF);
1005 	descs[idx].code.type = DESC_CODE_READ;
1006 	descs[idx].code.opsz = DESC_CODE_32B;
1007 	descs[idx].code.granular = DESC_GRAN_PAGE;
1008 	descs[idx].code.dpl = 3;
1009 	descs[idx].code.present = 1;
1010 
1011 	if (setldt_in_sighandler == FALSE) {
1012 		/* Set the LDT: */
1013 		cnt = i386_set_ldt((int)idx, &descs[idx], 1);
1014 		if (cnt != (int)idx) {
1015 #ifndef STANDALONE
1016 			T_LOG("i386_set_ldt unexpectedly returned %d (errno: %s)\n", cnt, strerror(errno));
1017 			T_ASSERT_FAIL("i386_set_ldt failure");
1018 #else
1019 			fprintf(stderr, "i386_set_ldt unexpectedly returned %d (errno: %s)\n", cnt, strerror(errno));
1020 			exit(1);
1021 #endif
1022 		}
1023 #ifdef DEBUG
1024 		printf("i386_set_ldt returned %d\n", cnt);
1025 #endif
1026 	} else {
1027 		__asm__ __volatile__ ("ud2" ::: "memory");
1028 	}
1029 
1030 
1031 	/* Read back the LDT to ensure it was set properly */
1032 	if ((cnt = i386_get_ldt(0, descs, (int)idx)) > 0) {
1033 #ifdef DEBUG
1034 		for (int i = 0; i < cnt; i++) {
1035 			dump_desc(&descs[i]);
1036 		}
1037 #endif
1038 	} else {
1039 #ifndef STANDALONE
1040 		T_LOG("i386_get_ldt unexpectedly returned %d (errno: %s)\n", cnt, strerror(errno));
1041 		T_ASSERT_FAIL("i386_get_ldt failure");
1042 #else
1043 		fprintf(stderr, "i386_get_ldt unexpectedly returned %d (errno: %s)\n", cnt, strerror(errno));
1044 		exit(1);
1045 #endif
1046 	}
1047 
1048 	free(descs);
1049 
1050 	if ((err = create_worker_thread(cmargp, (uint32_t)stackAddr, cmthreadp)) != 0) {
1051 #ifdef DEBUG
1052 		fprintf(stderr, "Fatal: Could not create thread: %s\n", strerror(err));
1053 #endif
1054 #ifndef STANDALONE
1055 		T_LOG("Fatal: Could not create thread: %s\n", strerror(err));
1056 		T_ASSERT_FAIL("Thread creation failure");
1057 #else
1058 		exit(1);
1059 #endif
1060 	}
1061 }
1062 
1063 #ifdef STANDALONE
1064 static void
test_ldt64_with_bsdsig(void)1065 test_ldt64_with_bsdsig(void)
1066 #else
1067 /*
1068  * Main test declarations
1069  */
1070 T_DECL(ldt64_with_bsd_sighandling,
1071     "Ensures that a 64-bit process can create LDT entries and can execute code in "
1072     "compatibility mode with BSD signal handling",
1073     T_META_TIMEOUT(NORMAL_RUN_TIME + TIMEOUT_OVERHEAD))
1074 #endif
1075 {
1076 	pthread_t cmthread;
1077 	thread_arg_t cmarg;
1078 
1079 	int translated = 0;
1080 	size_t translated_size = sizeof(int);
1081 
1082 	sysctlbyname("sysctl.proc_translated", &translated, &translated_size, NULL, 0);
1083 
1084 	if (translated) {
1085 		T_SKIP("Skipping this test because it is translated");
1086 	}
1087 
1088 	setup_signal_handling();
1089 
1090 #ifndef STANDALONE
1091 	T_SETUPBEGIN;
1092 #endif
1093 	ENV_set_ldt_in_sighandler = (getenv("LDT_SET_IN_SIGHANDLER") != NULL) ? TRUE : FALSE;
1094 	ldt64_test_setup(&cmthread, &cmarg, ENV_set_ldt_in_sighandler);
1095 #ifndef STANDALONE
1096 	T_SETUPEND;
1097 #endif
1098 
1099 	join_32bit_thread(&cmthread, &cmarg);
1100 
1101 	teardown_signal_handling();
1102 
1103 #ifndef STANDALONE
1104 	T_PASS("Successfully completed ldt64 test with BSD signal handling");
1105 #else
1106 	fprintf(stderr, "PASSED: ldt64_with_bsd_signal_handling\n");
1107 #endif
1108 }
1109 
1110 #ifdef STANDALONE
1111 static void
test_ldt64_with_machexc(void)1112 test_ldt64_with_machexc(void)
1113 #else
1114 T_DECL(ldt64_with_mach_exception_handling,
1115     "Ensures that a 64-bit process can create LDT entries and can execute code in "
1116     "compatibility mode with Mach exception handling",
1117     T_META_TIMEOUT(NORMAL_RUN_TIME + TIMEOUT_OVERHEAD))
1118 #endif
1119 {
1120 	pthread_t cmthread;
1121 	thread_arg_t cmarg;
1122 
1123 	int translated = 0;
1124 	size_t translated_size = sizeof(int);
1125 
1126 	sysctlbyname("sysctl.proc_translated", &translated, &translated_size, NULL, 0);
1127 
1128 	if (translated) {
1129 		T_SKIP("Skipping this test because it is translated");
1130 	}
1131 
1132 #ifndef STANDALONE
1133 	T_SETUPBEGIN;
1134 #endif
1135 	ldt64_test_setup(&cmthread, &cmarg, FALSE);
1136 #ifndef STANDALONE
1137 	T_SETUPEND;
1138 #endif
1139 
1140 	/* Now repeat with Mach exception handling */
1141 	init_task_exception_server();
1142 
1143 	join_32bit_thread(&cmthread, &cmarg);
1144 
1145 #ifndef STANDALONE
1146 	T_PASS("Successfully completed ldt64 test with mach exception handling");
1147 #else
1148 	fprintf(stderr, "PASSED: ldt64_with_mach_exception_handling\n");
1149 #endif
1150 }
1151 
1152 #ifdef STANDALONE
1153 int
main(int __unused argc,char ** __unused argv)1154 main(int __unused argc, char ** __unused argv)
1155 {
1156 	test_ldt64_with_bsdsig();
1157 	test_ldt64_with_machexc();
1158 }
1159 #endif
1160