xref: /f-stack/freebsd/i386/i386/exception.s (revision 22ce4aff)
1/*-
2 * Copyright (c) 1989, 1990 William F. Jolitz.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * Copyright (c) 2007, 2018 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * Portions of this software were developed by A. Joseph Koshy under
8 * sponsorship from the FreeBSD Foundation and Google, Inc.
9 * Portions of this software were developed by Konstantin Belousov
10 * <[email protected]> under sponsorship from the FreeBSD Foundation.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * $FreeBSD$
37 */
38
39#include "opt_apic.h"
40#include "opt_atpic.h"
41#include "opt_hwpmc_hooks.h"
42
43#include "assym.inc"
44
45#include <machine/psl.h>
46#include <machine/asmacros.h>
47#include <machine/trap.h>
48
49#ifdef KDTRACE_HOOKS
50	.bss
51	.globl	dtrace_invop_jump_addr
52	.align	4
53	.type	dtrace_invop_jump_addr, @object
54	.size	dtrace_invop_jump_addr, 4
55dtrace_invop_jump_addr:
56	.zero	4
57	.globl	dtrace_invop_calltrap_addr
58	.align	4
59	.type	dtrace_invop_calltrap_addr, @object
60	.size	dtrace_invop_calltrap_addr, 4
61dtrace_invop_calltrap_addr:
62	.zero	8
63#endif
64	.text
65ENTRY(start_exceptions)
66	.globl	tramp_idleptd
67tramp_idleptd:	.long	0
68
69/*****************************************************************************/
70/* Trap handling                                                             */
71/*****************************************************************************/
72/*
73 * Trap and fault vector routines.
74 *
75 * All traps are 'interrupt gates', SDT_SYS386IGT.  Interrupts are disabled
76 * by hardware to not allow interrupts until code switched to the kernel
77 * address space and the kernel thread stack.
78 *
79 * The cpu will push a certain amount of state onto the kernel stack for
80 * the current process.  The amount of state depends on the type of trap
81 * and whether the trap crossed rings or not.  See i386/include/frame.h.
82 * At the very least the current EFLAGS (status register, which includes
83 * the interrupt disable state prior to the trap), the code segment register,
84 * and the return instruction pointer are pushed by the cpu.  The cpu
85 * will also push an 'error' code for certain traps.  We push a dummy
86 * error code for those traps where the cpu doesn't in order to maintain
87 * a consistent frame.  We also push a contrived 'trap number'.
88 *
89 * The cpu does not push the general registers, we must do that, and we
90 * must restore them prior to calling 'iret'.  The cpu adjusts the %cs and
91 * %ss segment registers, but does not mess with %ds, %es, or %fs.  Thus we
92 * must load them with appropriate values for supervisor mode operation.
93 *
94 * This code is not executed at the linked address, it is copied to the
95 * trampoline area.  As the consequence, all code there and in included files
96 * must be PIC.
97 */
98
99MCOUNT_LABEL(user)
100MCOUNT_LABEL(btrap)
101
102#define	TRAP(a)		pushl $(a) ; jmp alltraps
103
104IDTVEC(div)
105	pushl $0; TRAP(T_DIVIDE)
106IDTVEC(bpt)
107	pushl $0; TRAP(T_BPTFLT)
108IDTVEC(dtrace_ret)
109	pushl $0; TRAP(T_DTRACE_RET)
110IDTVEC(ofl)
111	pushl $0; TRAP(T_OFLOW)
112IDTVEC(bnd)
113	pushl $0; TRAP(T_BOUND)
114#ifndef KDTRACE_HOOKS
115IDTVEC(ill)
116	pushl $0; TRAP(T_PRIVINFLT)
117#endif
118IDTVEC(dna)
119	pushl $0; TRAP(T_DNA)
120IDTVEC(fpusegm)
121	pushl $0; TRAP(T_FPOPFLT)
122IDTVEC(tss)
123	TRAP(T_TSSFLT)
124IDTVEC(missing)
125	pushl	$T_SEGNPFLT
126	jmp	irettraps
127IDTVEC(stk)
128	pushl	$T_STKFLT
129	jmp	irettraps
130IDTVEC(prot)
131	pushl	$T_PROTFLT
132	jmp	irettraps
133IDTVEC(page)
134	testl	$PSL_VM, TF_EFLAGS-TF_ERR(%esp)
135	jnz	1f
136	testb	$SEL_RPL_MASK, TF_CS-TF_ERR(%esp)
137	jnz	1f
138	cmpl	$PMAP_TRM_MIN_ADDRESS, TF_EIP-TF_ERR(%esp)
139	jb	1f
140	movl	%ebx, %cr3
141	movl	%edx, TF_EIP-TF_ERR(%esp)
142	addl	$4, %esp
143	iret
1441:	pushl	$T_PAGEFLT
145	jmp	alltraps
146IDTVEC(rsvd_pti)
147IDTVEC(rsvd)
148	pushl $0; TRAP(T_RESERVED)
149IDTVEC(fpu)
150	pushl $0; TRAP(T_ARITHTRAP)
151IDTVEC(align)
152	TRAP(T_ALIGNFLT)
153IDTVEC(xmm)
154	pushl $0; TRAP(T_XMMFLT)
155
156	/*
157	 * All traps except ones for syscalls or invalid segment,
158	 * jump to alltraps.  If
159	 * interrupts were enabled when the trap occurred, then interrupts
160	 * are enabled now if the trap was through a trap gate, else
161	 * disabled if the trap was through an interrupt gate.  Note that
162	 * int0x80_syscall is a trap gate.   Interrupt gates are used by
163	 * page faults, non-maskable interrupts, debug and breakpoint
164	 * exceptions.
165	 */
166	SUPERALIGN_TEXT
167	.globl	alltraps
168	.type	alltraps,@function
169alltraps:
170	PUSH_FRAME2
171alltraps_with_regs_pushed:
172	SET_KERNEL_SREGS
173	cld
174	KENTER
175	FAKE_MCOUNT(TF_EIP(%esp))
176calltrap:
177	pushl	%esp
178	movl	$trap,%eax
179	call	*%eax
180	add	$4, %esp
181
182	/*
183	 * Return via doreti to handle ASTs.
184	 */
185	MEXITCOUNT
186	jmp	doreti
187
188	.globl	irettraps
189	.type	irettraps,@function
190irettraps:
191	testl	$PSL_VM, TF_EFLAGS-TF_TRAPNO(%esp)
192	jnz	alltraps
193	testb	$SEL_RPL_MASK, TF_CS-TF_TRAPNO(%esp)
194	jnz	alltraps
195
196	/*
197	 * Kernel mode.
198	 * The special case there is the kernel mode with user %cr3 and
199	 * trampoline stack. We need to copy both current frame and the
200	 * hardware portion of the frame we tried to return to, to the
201	 * normal stack.  This logic must follow the stack unwind order
202	 * in doreti.
203	 */
204	PUSH_FRAME2
205	SET_KERNEL_SREGS
206	cld
207	call	1f
2081:	popl	%ebx
209	leal	(doreti_iret - 1b)(%ebx), %edx
210	cmpl	%edx, TF_EIP(%esp)
211	jne	2f
212	movl	$(2 * TF_SZ - TF_EIP), %ecx
213	jmp	6f
2142:	leal	(doreti_popl_ds - 1b)(%ebx), %edx
215	cmpl	%edx, TF_EIP(%esp)
216	jne	3f
217	movl	$(2 * TF_SZ - TF_DS), %ecx
218	jmp	6f
2193:	leal	(doreti_popl_es - 1b)(%ebx), %edx
220	cmpl	%edx, TF_EIP(%esp)
221	jne	4f
222	movl	$(2 * TF_SZ - TF_ES), %ecx
223	jmp	6f
2244:	leal	(doreti_popl_fs - 1b)(%ebx), %edx
225	cmpl	%edx, TF_EIP(%esp)
226	jne	5f
227	movl	$(2 * TF_SZ - TF_FS), %ecx
228	jmp	6f
229	/* kernel mode, normal */
2305:	FAKE_MCOUNT(TF_EIP(%esp))
231	jmp	calltrap
2326:	cmpl	$PMAP_TRM_MIN_ADDRESS, %esp	/* trampoline stack ? */
233	jb	5b	/* if not, no need to change stacks */
234	movl	(tramp_idleptd - 1b)(%ebx), %eax
235	movl	%eax, %cr3
236	movl	PCPU(KESP0), %edx
237	subl	%ecx, %edx
238	movl	%edx, %edi
239	movl	%esp, %esi
240	rep; movsb
241	movl	%edx, %esp
242	FAKE_MCOUNT(TF_EIP(%esp))
243	jmp	calltrap
244
245/*
246 * Privileged instruction fault.
247 */
248#ifdef KDTRACE_HOOKS
249	SUPERALIGN_TEXT
250IDTVEC(ill)
251	/*
252	 * Check if this is a user fault.  If so, just handle it as a normal
253	 * trap.
254	 */
255	testl	$PSL_VM, 8(%esp)	/* and vm86 mode. */
256	jnz	norm_ill
257	cmpl	$GSEL_KPL, 4(%esp)	/* Check the code segment */
258	jne	norm_ill
259
260	/*
261	 * Check if a DTrace hook is registered.  The trampoline cannot
262	 * be instrumented.
263	 */
264	cmpl	$0, dtrace_invop_jump_addr
265	je	norm_ill
266
267	/*
268	 * This is a kernel instruction fault that might have been caused
269	 * by a DTrace provider.
270	 */
271	pushal
272	cld
273
274	/*
275	 * Set our jump address for the jump back in the event that
276	 * the exception wasn't caused by DTrace at all.
277	 */
278	movl	$norm_ill, dtrace_invop_calltrap_addr
279
280	/* Jump to the code hooked in by DTrace. */
281	jmpl	*dtrace_invop_jump_addr
282
283	/*
284	 * Process the instruction fault in the normal way.
285	 */
286norm_ill:
287	pushl	$0
288	pushl	$T_PRIVINFLT
289	jmp	alltraps
290#endif
291
292/*
293 * See comment in the handler for the kernel case T_TRCTRAP in trap.c.
294 * The exception handler must be ready to execute with wrong %cr3.
295 * We save original %cr3 in frame->tf_err, similarly to NMI and MCE
296 * handlers.
297 */
298IDTVEC(dbg)
299	pushl	$0
300	pushl	$T_TRCTRAP
301	PUSH_FRAME2
302	SET_KERNEL_SREGS
303	cld
304	movl	%cr3, %eax
305	movl	%eax, TF_ERR(%esp)
306	call	1f
3071:	popl	%eax
308	movl	(tramp_idleptd - 1b)(%eax), %eax
309	movl	%eax, %cr3
310	FAKE_MCOUNT(TF_EIP(%esp))
311	testl	$PSL_VM, TF_EFLAGS(%esp)
312	jnz	dbg_user
313	testb	$SEL_RPL_MASK,TF_CS(%esp)
314	jz	calltrap
315dbg_user:
316	NMOVE_STACKS
317	movl	$handle_ibrs_entry,%eax
318	call	*%eax
319	pushl	%esp
320	movl	$trap,%eax
321	call	*%eax
322	add	$4, %esp
323	movl	$T_RESERVED, TF_TRAPNO(%esp)
324	MEXITCOUNT
325	jmp	doreti
326
327IDTVEC(mchk)
328	pushl	$0
329	pushl	$T_MCHK
330	jmp	nmi_mchk_common
331
332IDTVEC(nmi)
333	pushl	$0
334	pushl	$T_NMI
335nmi_mchk_common:
336	PUSH_FRAME2
337	SET_KERNEL_SREGS
338	cld
339	/*
340	 * Save %cr3 into tf_err.  There is no good place to put it.
341	 * Always reload %cr3, since we might have interrupted the
342	 * kernel entry or exit.
343	 * Do not switch to the thread kernel stack, otherwise we might
344	 * obliterate the previous context partially copied from the
345	 * trampoline stack.
346	 * Do not re-enable IBRS, there is no good place to store
347	 * previous state if we come from the kernel.
348	 */
349	movl	%cr3, %eax
350	movl	%eax, TF_ERR(%esp)
351	call	1f
3521:	popl	%eax
353	movl	(tramp_idleptd - 1b)(%eax), %eax
354	movl	%eax, %cr3
355	FAKE_MCOUNT(TF_EIP(%esp))
356	jmp	calltrap
357
358/*
359 * Trap gate entry for syscalls (int 0x80).
360 * This is used by FreeBSD ELF executables, "new" a.out executables, and all
361 * Linux executables.
362 *
363 * Even though the name says 'int0x80', this is actually a trap gate, not an
364 * interrupt gate.  Thus interrupts are enabled on entry just as they are for
365 * a normal syscall.
366 */
367	SUPERALIGN_TEXT
368IDTVEC(int0x80_syscall)
369	pushl	$2			/* sizeof "int 0x80" */
370	pushl	$0			/* tf_trapno */
371	PUSH_FRAME2
372	SET_KERNEL_SREGS
373	cld
374	MOVE_STACKS
375	movl	$handle_ibrs_entry,%eax
376	call	*%eax
377	sti
378	FAKE_MCOUNT(TF_EIP(%esp))
379	pushl	%esp
380	movl	$syscall, %eax
381	call	*%eax
382	add	$4, %esp
383	MEXITCOUNT
384	jmp	doreti
385
386ENTRY(fork_trampoline)
387	pushl	%esp			/* trapframe pointer */
388	pushl	%ebx			/* arg1 */
389	pushl	%esi			/* function */
390	movl	$fork_exit, %eax
391	call	*%eax
392	addl	$12,%esp
393	/* cut from syscall */
394
395	/*
396	 * Return via doreti to handle ASTs.
397	 */
398	MEXITCOUNT
399	jmp	doreti
400
401
402/*
403 * To efficiently implement classification of trap and interrupt handlers
404 * for profiling, there must be only trap handlers between the labels btrap
405 * and bintr, and only interrupt handlers between the labels bintr and
406 * eintr.  This is implemented (partly) by including files that contain
407 * some of the handlers.  Before including the files, set up a normal asm
408 * environment so that the included files doen't need to know that they are
409 * included.
410 */
411
412	.data
413	.p2align 4
414	.text
415	SUPERALIGN_TEXT
416MCOUNT_LABEL(bintr)
417
418#ifdef DEV_ATPIC
419#include <i386/i386/atpic_vector.s>
420#endif
421
422#if defined(DEV_APIC) && defined(DEV_ATPIC)
423	.data
424	.p2align 4
425	.text
426	SUPERALIGN_TEXT
427#endif
428
429#ifdef DEV_APIC
430#include <i386/i386/apic_vector.s>
431#endif
432
433	.data
434	.p2align 4
435	.text
436	SUPERALIGN_TEXT
437#include <i386/i386/vm86bios.s>
438
439	.text
440MCOUNT_LABEL(eintr)
441
442#include <i386/i386/copyout_fast.s>
443
444/*
445 * void doreti(struct trapframe)
446 *
447 * Handle return from interrupts, traps and syscalls.
448 */
449	.text
450	SUPERALIGN_TEXT
451	.type	doreti,@function
452	.globl	doreti
453doreti:
454	FAKE_MCOUNT($bintr)		/* init "from" bintr -> doreti */
455doreti_next:
456	/*
457	 * Check if ASTs can be handled now.  ASTs cannot be safely
458	 * processed when returning from an NMI.
459	 */
460	cmpb	$T_NMI,TF_TRAPNO(%esp)
461#ifdef HWPMC_HOOKS
462	je	doreti_nmi
463#else
464	je	doreti_exit
465#endif
466	/*
467	 * PSL_VM must be checked first since segment registers only
468	 * have an RPL in non-VM86 mode.
469	 * ASTs can not be handled now if we are in a vm86 call.
470	 */
471	testl	$PSL_VM,TF_EFLAGS(%esp)
472	jz	doreti_notvm86
473	movl	PCPU(CURPCB),%ecx
474	testl	$PCB_VM86CALL,PCB_FLAGS(%ecx)
475	jz	doreti_ast
476	jmp	doreti_popl_fs
477
478doreti_notvm86:
479	testb	$SEL_RPL_MASK,TF_CS(%esp) /* are we returning to user mode? */
480	jz	doreti_exit		/* can't handle ASTs now if not */
481
482doreti_ast:
483	/*
484	 * Check for ASTs atomically with returning.  Disabling CPU
485	 * interrupts provides sufficient locking even in the SMP case,
486	 * since we will be informed of any new ASTs by an IPI.
487	 */
488	cli
489	movl	PCPU(CURTHREAD),%eax
490	testl	$TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%eax)
491	je	doreti_exit
492	sti
493	pushl	%esp			/* pass a pointer to the trapframe */
494	movl	$ast, %eax
495	call	*%eax
496	add	$4,%esp
497	jmp	doreti_ast
498
499	/*
500	 * doreti_exit:	pop registers, iret.
501	 *
502	 *	The segment register pop is a special case, since it may
503	 *	fault if (for example) a sigreturn specifies bad segment
504	 *	registers.  The fault is handled in trap.c.
505	 */
506doreti_exit:
507	MEXITCOUNT
508
509	cmpl	$T_NMI, TF_TRAPNO(%esp)
510	je	doreti_iret_nmi
511	cmpl	$T_MCHK, TF_TRAPNO(%esp)
512	je	doreti_iret_nmi
513	cmpl	$T_TRCTRAP, TF_TRAPNO(%esp)
514	je	doreti_iret_nmi
515	movl	$TF_SZ, %ecx
516	testl	$PSL_VM,TF_EFLAGS(%esp)
517	jz	1f			/* PCB_VM86CALL is not set */
518	addl	$VM86_STACK_SPACE, %ecx
519	jmp	2f
5201:	testl	$SEL_RPL_MASK, TF_CS(%esp)
521	jz	doreti_popl_fs
5222:	movl	$handle_ibrs_exit,%eax
523	pushl	%ecx			/* preserve enough call-used regs */
524	call	*%eax
525	movl	mds_handler,%eax
526	call	*%eax
527	popl	%ecx
528	movl	%esp, %esi
529	movl	PCPU(TRAMPSTK), %edx
530	subl	%ecx, %edx
531	movl	%edx, %edi
532	rep; movsb
533	movl	%edx, %esp
534	movl	PCPU(CURPCB),%eax
535	movl	PCB_CR3(%eax), %eax
536	movl	%eax, %cr3
537
538	.globl	doreti_popl_fs
539doreti_popl_fs:
540	popl	%fs
541	.globl	doreti_popl_es
542doreti_popl_es:
543	popl	%es
544	.globl	doreti_popl_ds
545doreti_popl_ds:
546	popl	%ds
547	popal
548	addl	$8,%esp
549	.globl	doreti_iret
550doreti_iret:
551	iret
552
553doreti_iret_nmi:
554	movl	TF_ERR(%esp), %eax
555	movl	%eax, %cr3
556	jmp	doreti_popl_fs
557
558	/*
559	 * doreti_iret_fault and friends.  Alternative return code for
560	 * the case where we get a fault in the doreti_exit code
561	 * above.  trap() (i386/i386/trap.c) catches this specific
562	 * case, and continues in the corresponding place in the code
563	 * below.
564	 *
565	 * If the fault occured during return to usermode, we recreate
566	 * the trap frame and call trap() to send a signal.  Otherwise
567	 * the kernel was tricked into fault by attempt to restore invalid
568	 * usermode segment selectors on return from nested fault or
569	 * interrupt, where interrupted kernel entry code not yet loaded
570	 * kernel selectors.  In the latter case, emulate iret and zero
571	 * the invalid selector.
572	 */
573	ALIGN_TEXT
574	.globl	doreti_iret_fault
575doreti_iret_fault:
576	pushl	$0	/* tf_err */
577	pushl	$0	/* tf_trapno XXXKIB: provide more useful value ? */
578	pushal
579	pushl	$0
580	movw	%ds,(%esp)
581	.globl	doreti_popl_ds_fault
582doreti_popl_ds_fault:
583	testb	$SEL_RPL_MASK,TF_CS-TF_DS(%esp)
584	jz	doreti_popl_ds_kfault
585	pushl	$0
586	movw	%es,(%esp)
587	.globl	doreti_popl_es_fault
588doreti_popl_es_fault:
589	testb	$SEL_RPL_MASK,TF_CS-TF_ES(%esp)
590	jz	doreti_popl_es_kfault
591	pushl	$0
592	movw	%fs,(%esp)
593	.globl	doreti_popl_fs_fault
594doreti_popl_fs_fault:
595	testb	$SEL_RPL_MASK,TF_CS-TF_FS(%esp)
596	jz	doreti_popl_fs_kfault
597	movl	$0,TF_ERR(%esp)	/* XXX should be the error code */
598	movl	$T_PROTFLT,TF_TRAPNO(%esp)
599	SET_KERNEL_SREGS
600	jmp	calltrap
601
602doreti_popl_ds_kfault:
603	movl	$0,(%esp)
604	jmp	doreti_popl_ds
605doreti_popl_es_kfault:
606	movl	$0,(%esp)
607	jmp	doreti_popl_es
608doreti_popl_fs_kfault:
609	movl	$0,(%esp)
610	jmp	doreti_popl_fs
611
612#ifdef HWPMC_HOOKS
613doreti_nmi:
614	/*
615	 * Since we are returning from an NMI, check if the current trap
616	 * was from user mode and if so whether the current thread
617	 * needs a user call chain capture.
618	 */
619	testl	$PSL_VM, TF_EFLAGS(%esp)
620	jnz	doreti_exit
621	testb	$SEL_RPL_MASK,TF_CS(%esp)
622	jz	doreti_exit
623	movl	PCPU(CURTHREAD),%eax	/* curthread present? */
624	orl	%eax,%eax
625	jz	doreti_exit
626	testl	$TDP_CALLCHAIN,TD_PFLAGS(%eax) /* flagged for capture? */
627	jz	doreti_exit
628	/*
629	 * Switch to thread stack.  Reset tf_trapno to not indicate NMI,
630	 * to cause normal userspace exit.
631	 */
632	movl	$T_RESERVED, TF_TRAPNO(%esp)
633	NMOVE_STACKS
634	/*
635	 * Take the processor out of NMI mode by executing a fake "iret".
636	 */
637	pushfl
638	pushl	%cs
639	call	1f
6401:	popl	%eax
641	leal	(outofnmi-1b)(%eax),%eax
642	pushl	%eax
643	iret
644outofnmi:
645	/*
646	 * Call the callchain capture hook after turning interrupts back on.
647	 */
648	movl	pmc_hook,%ecx
649	orl	%ecx,%ecx
650	jz	doreti_exit
651	pushl	%esp			/* frame pointer */
652	pushl	$PMC_FN_USER_CALLCHAIN	/* command */
653	movl	PCPU(CURTHREAD),%eax
654	pushl	%eax			/* curthread */
655	sti
656	call	*%ecx
657	addl	$12,%esp
658	jmp	doreti_ast
659#endif
660
661ENTRY(end_exceptions)
662