xref: /f-stack/freebsd/mips/mips/trap.c (revision 22ce4aff)
1 /*	$OpenBSD: trap.c,v 1.19 1998/09/30 12:40:41 pefo Exp $	*/
2 /* tracked to 1.23 */
3 /*-
4  * SPDX-License-Identifier: BSD-3-Clause
5  *
6  * Copyright (c) 1988 University of Utah.
7  * Copyright (c) 1992, 1993
8  *	The Regents of the University of California.  All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * the Systems Programming Group of the University of Utah Computer
12  * Science Department and Ralph Campbell.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * from: Utah Hdr: trap.c 1.32 91/04/06
39  *
40  *	from: @(#)trap.c	8.5 (Berkeley) 1/11/94
41  *	JNPR: trap.c,v 1.13.2.2 2007/08/29 10:03:49 girish
42  */
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include "opt_ddb.h"
47 #include "opt_ktrace.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/sysent.h>
52 #include <sys/proc.h>
53 #include <sys/kernel.h>
54 #include <sys/ktr.h>
55 #include <sys/signalvar.h>
56 #include <sys/syscall.h>
57 #include <sys/lock.h>
58 #include <vm/vm.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_map.h>
63 #include <vm/vm_param.h>
64 #include <sys/vmmeter.h>
65 #include <sys/ptrace.h>
66 #include <sys/user.h>
67 #include <sys/buf.h>
68 #include <sys/vnode.h>
69 #include <sys/sysctl.h>
70 #include <sys/syslog.h>
71 #include <sys/bus.h>
72 #ifdef KTRACE
73 #include <sys/ktrace.h>
74 #endif
75 #include <net/netisr.h>
76 
77 #include <machine/trap.h>
78 #include <machine/cpu.h>
79 #include <machine/cpuinfo.h>
80 #include <machine/pte.h>
81 #include <machine/pmap.h>
82 #include <machine/md_var.h>
83 #include <machine/mips_opcode.h>
84 #include <machine/frame.h>
85 #include <machine/regnum.h>
86 #include <machine/tls.h>
87 
88 #ifdef DDB
89 #include <machine/db_machdep.h>
90 #include <ddb/db_sym.h>
91 #include <ddb/ddb.h>
92 #include <sys/kdb.h>
93 #endif
94 
95 #ifdef KDTRACE_HOOKS
96 #include <sys/dtrace_bsd.h>
97 #endif
98 
99 #ifdef TRAP_DEBUG
100 int trap_debug = 0;
101 SYSCTL_INT(_machdep, OID_AUTO, trap_debug, CTLFLAG_RW,
102     &trap_debug, 0, "Debug information on all traps");
103 #endif
104 
105 #define	lbu_macro(data, addr)						\
106 	__asm __volatile ("lbu %0, 0x0(%1)"				\
107 			: "=r" (data)	/* outputs */			\
108 			: "r" (addr));	/* inputs */
109 
110 #define	lb_macro(data, addr)						\
111 	__asm __volatile ("lb %0, 0x0(%1)"				\
112 			: "=r" (data)	/* outputs */			\
113 			: "r" (addr));	/* inputs */
114 
115 #define	lwl_macro(data, addr)						\
116 	__asm __volatile ("lwl %0, 0x0(%1)"				\
117 			: "+r" (data)	/* outputs */			\
118 			: "r" (addr));	/* inputs */
119 
120 #define	lwr_macro(data, addr)						\
121 	__asm __volatile ("lwr %0, 0x0(%1)"				\
122 			: "+r" (data)	/* outputs */			\
123 			: "r" (addr));	/* inputs */
124 
125 #define	ldl_macro(data, addr)						\
126 	__asm __volatile ("ldl %0, 0x0(%1)"				\
127 			: "+r" (data)	/* outputs */			\
128 			: "r" (addr));	/* inputs */
129 
130 #define	ldr_macro(data, addr)						\
131 	__asm __volatile ("ldr %0, 0x0(%1)"				\
132 			: "+r" (data)	/* outputs */			\
133 			: "r" (addr));	/* inputs */
134 
135 #define	sb_macro(data, addr)						\
136 	__asm __volatile ("sb %0, 0x0(%1)"				\
137 			:				/* outputs */	\
138 			: "r" (data), "r" (addr));	/* inputs */
139 
140 #define	swl_macro(data, addr)						\
141 	__asm __volatile ("swl %0, 0x0(%1)"				\
142 			: 				/* outputs */	\
143 			: "r" (data), "r" (addr));	/* inputs */
144 
145 #define	swr_macro(data, addr)						\
146 	__asm __volatile ("swr %0, 0x0(%1)"				\
147 			: 				/* outputs */	\
148 			: "r" (data), "r" (addr));	/* inputs */
149 
150 #define	sdl_macro(data, addr)						\
151 	__asm __volatile ("sdl %0, 0x0(%1)"				\
152 			: 				/* outputs */	\
153 			: "r" (data), "r" (addr));	/* inputs */
154 
155 #define	sdr_macro(data, addr)						\
156 	__asm __volatile ("sdr %0, 0x0(%1)"				\
157 			:				/* outputs */	\
158 			: "r" (data), "r" (addr));	/* inputs */
159 
160 static void log_illegal_instruction(const char *, struct trapframe *);
161 static void log_bad_page_fault(char *, struct trapframe *, int);
162 static void log_frame_dump(struct trapframe *frame);
163 static void get_mapping_info(vm_offset_t, pd_entry_t **, pt_entry_t **);
164 
165 int (*dtrace_invop_jump_addr)(struct trapframe *);
166 
167 #ifdef TRAP_DEBUG
168 static void trap_frame_dump(struct trapframe *frame);
169 #endif
170 
171 void (*machExceptionTable[]) (void)= {
172 /*
173  * The kernel exception handlers.
174  */
175 	MipsKernIntr,		/* external interrupt */
176 	MipsKernGenException,	/* TLB modification */
177 	MipsTLBInvalidException,/* TLB miss (load or instr. fetch) */
178 	MipsTLBInvalidException,/* TLB miss (store) */
179 	MipsKernGenException,	/* address error (load or I-fetch) */
180 	MipsKernGenException,	/* address error (store) */
181 	MipsKernGenException,	/* bus error (I-fetch) */
182 	MipsKernGenException,	/* bus error (load or store) */
183 	MipsKernGenException,	/* system call */
184 	MipsKernGenException,	/* breakpoint */
185 	MipsKernGenException,	/* reserved instruction */
186 	MipsKernGenException,	/* coprocessor unusable */
187 	MipsKernGenException,	/* arithmetic overflow */
188 	MipsKernGenException,	/* trap exception */
189 	MipsKernGenException,	/* virtual coherence exception inst */
190 	MipsKernGenException,	/* floating point exception */
191 	MipsKernGenException,	/* reserved */
192 	MipsKernGenException,	/* reserved */
193 	MipsKernGenException,	/* reserved */
194 	MipsKernGenException,	/* reserved */
195 	MipsKernGenException,	/* reserved */
196 	MipsKernGenException,	/* reserved */
197 	MipsKernGenException,	/* reserved */
198 	MipsKernGenException,	/* watch exception */
199 	MipsKernGenException,	/* reserved */
200 	MipsKernGenException,	/* reserved */
201 	MipsKernGenException,	/* reserved */
202 	MipsKernGenException,	/* reserved */
203 	MipsKernGenException,	/* reserved */
204 	MipsKernGenException,	/* reserved */
205 	MipsKernGenException,	/* reserved */
206 	MipsKernGenException,	/* virtual coherence exception data */
207 /*
208  * The user exception handlers.
209  */
210 	MipsUserIntr,		/* 0 */
211 	MipsUserGenException,	/* 1 */
212 	MipsTLBInvalidException,/* 2 */
213 	MipsTLBInvalidException,/* 3 */
214 	MipsUserGenException,	/* 4 */
215 	MipsUserGenException,	/* 5 */
216 	MipsUserGenException,	/* 6 */
217 	MipsUserGenException,	/* 7 */
218 	MipsUserGenException,	/* 8 */
219 	MipsUserGenException,	/* 9 */
220 	MipsUserGenException,	/* 10 */
221 	MipsUserGenException,	/* 11 */
222 	MipsUserGenException,	/* 12 */
223 	MipsUserGenException,	/* 13 */
224 	MipsUserGenException,	/* 14 */
225 	MipsUserGenException,	/* 15 */
226 	MipsUserGenException,	/* 16 */
227 	MipsUserGenException,	/* 17 */
228 	MipsUserGenException,	/* 18 */
229 	MipsUserGenException,	/* 19 */
230 	MipsUserGenException,	/* 20 */
231 	MipsUserGenException,	/* 21 */
232 	MipsUserGenException,	/* 22 */
233 	MipsUserGenException,	/* 23 */
234 	MipsUserGenException,	/* 24 */
235 	MipsUserGenException,	/* 25 */
236 	MipsUserGenException,	/* 26 */
237 	MipsUserGenException,	/* 27 */
238 	MipsUserGenException,	/* 28 */
239 	MipsUserGenException,	/* 29 */
240 	MipsUserGenException,	/* 20 */
241 	MipsUserGenException,	/* 31 */
242 };
243 
244 char *trap_type[] = {
245 	"external interrupt",
246 	"TLB modification",
247 	"TLB miss (load or instr. fetch)",
248 	"TLB miss (store)",
249 	"address error (load or I-fetch)",
250 	"address error (store)",
251 	"bus error (I-fetch)",
252 	"bus error (load or store)",
253 	"system call",
254 	"breakpoint",
255 	"reserved instruction",
256 	"coprocessor unusable",
257 	"arithmetic overflow",
258 	"trap",
259 	"virtual coherency instruction",
260 	"floating point",
261 	"reserved 16",
262 	"reserved 17",
263 	"reserved 18",
264 	"reserved 19",
265 	"reserved 20",
266 	"reserved 21",
267 	"reserved 22",
268 	"watch",
269 	"reserved 24",
270 	"reserved 25",
271 	"reserved 26",
272 	"reserved 27",
273 	"reserved 28",
274 	"reserved 29",
275 	"reserved 30",
276 	"virtual coherency data",
277 };
278 
279 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
280 struct trapdebug trapdebug[TRAPSIZE], *trp = trapdebug;
281 #endif
282 
283 #define	KERNLAND(x)	((vm_offset_t)(x) >= VM_MIN_KERNEL_ADDRESS && (vm_offset_t)(x) < VM_MAX_KERNEL_ADDRESS)
284 #define	DELAYBRANCH(x)	((x) & MIPS_CR_BR_DELAY)
285 
286 /*
287  * MIPS load/store access type
288  */
289 enum {
290 	MIPS_LHU_ACCESS = 1,
291 	MIPS_LH_ACCESS,
292 	MIPS_LWU_ACCESS,
293 	MIPS_LW_ACCESS,
294 	MIPS_LD_ACCESS,
295 	MIPS_SH_ACCESS,
296 	MIPS_SW_ACCESS,
297 	MIPS_SD_ACCESS
298 };
299 
300 char *access_name[] = {
301 	"Load Halfword Unsigned",
302 	"Load Halfword",
303 	"Load Word Unsigned",
304 	"Load Word",
305 	"Load Doubleword",
306 	"Store Halfword",
307 	"Store Word",
308 	"Store Doubleword"
309 };
310 
311 #ifdef	CPU_CNMIPS
312 #include <machine/octeon_cop2.h>
313 #endif
314 
315 static int allow_unaligned_acc = 1;
316 
317 SYSCTL_INT(_vm, OID_AUTO, allow_unaligned_acc, CTLFLAG_RW,
318     &allow_unaligned_acc, 0, "Allow unaligned accesses");
319 
320 /*
321  * FP emulation is assumed to work on O32, but the code is outdated and crufty
322  * enough that it's a more sensible default to have it disabled when using
323  * other ABIs.  At the very least, it needs a lot of help in using
324  * type-semantic ABI-oblivious macros for everything it does.
325  */
326 #if defined(__mips_o32)
327 static int emulate_fp = 1;
328 #else
329 static int emulate_fp = 0;
330 #endif
331 SYSCTL_INT(_machdep, OID_AUTO, emulate_fp, CTLFLAG_RW,
332     &emulate_fp, 0, "Emulate unimplemented FPU instructions");
333 
334 static int emulate_unaligned_access(struct trapframe *frame, int mode);
335 
336 extern void fswintrberr(void); /* XXX */
337 
338 int
cpu_fetch_syscall_args(struct thread * td)339 cpu_fetch_syscall_args(struct thread *td)
340 {
341 	struct trapframe *locr0;
342 	struct sysentvec *se;
343 	struct syscall_args *sa;
344 	int error, nsaved;
345 
346 	locr0 = td->td_frame;
347 	sa = &td->td_sa;
348 
349 	bzero(sa->args, sizeof(sa->args));
350 
351 	/* compute next PC after syscall instruction */
352 	td->td_pcb->pcb_tpc = locr0->pc; /* Remember if restart */
353 	if (DELAYBRANCH(locr0->cause))	 /* Check BD bit */
354 		locr0->pc = MipsEmulateBranch(locr0, locr0->pc, 0, 0);
355 	else
356 		locr0->pc += sizeof(int);
357 	sa->code = locr0->v0;
358 
359 	switch (sa->code) {
360 	case SYS___syscall:
361 	case SYS_syscall:
362 		/*
363 		 * This is an indirect syscall, in which the code is the first argument.
364 		 */
365 #if (!defined(__mips_n32) && !defined(__mips_n64)) || defined(COMPAT_FREEBSD32)
366 		if (sa->code == SYS___syscall && SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
367 			/*
368 			 * Like syscall, but code is a quad, so as to maintain alignment
369 			 * for the rest of the arguments.
370 			 */
371 			if (_QUAD_LOWWORD == 0)
372 				sa->code = locr0->a0;
373 			else
374 				sa->code = locr0->a1;
375 			sa->args[0] = locr0->a2;
376 			sa->args[1] = locr0->a3;
377 			nsaved = 2;
378 			break;
379 		}
380 #endif
381 		/*
382 		 * This is either not a quad syscall, or is a quad syscall with a
383 		 * new ABI in which quads fit in a single register.
384 		 */
385 		sa->code = locr0->a0;
386 		sa->args[0] = locr0->a1;
387 		sa->args[1] = locr0->a2;
388 		sa->args[2] = locr0->a3;
389 		nsaved = 3;
390 #if defined(__mips_n32) || defined(__mips_n64)
391 #ifdef COMPAT_FREEBSD32
392 		if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
393 #endif
394 			/*
395 			 * Non-o32 ABIs support more arguments in registers.
396 			 */
397 			sa->args[3] = locr0->a4;
398 			sa->args[4] = locr0->a5;
399 			sa->args[5] = locr0->a6;
400 			sa->args[6] = locr0->a7;
401 			nsaved += 4;
402 #ifdef COMPAT_FREEBSD32
403 		}
404 #endif
405 #endif
406 		break;
407 	default:
408 		/*
409 		 * A direct syscall, arguments are just parameters to the syscall.
410 		 */
411 		sa->args[0] = locr0->a0;
412 		sa->args[1] = locr0->a1;
413 		sa->args[2] = locr0->a2;
414 		sa->args[3] = locr0->a3;
415 		nsaved = 4;
416 #if defined (__mips_n32) || defined(__mips_n64)
417 #ifdef COMPAT_FREEBSD32
418 		if (!SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
419 #endif
420 			/*
421 			 * Non-o32 ABIs support more arguments in registers.
422 			 */
423 			sa->args[4] = locr0->a4;
424 			sa->args[5] = locr0->a5;
425 			sa->args[6] = locr0->a6;
426 			sa->args[7] = locr0->a7;
427 			nsaved += 4;
428 #ifdef COMPAT_FREEBSD32
429 		}
430 #endif
431 #endif
432 		break;
433 	}
434 
435 #ifdef TRAP_DEBUG
436 	if (trap_debug)
437 		printf("SYSCALL #%d pid:%u\n", sa->code, td->td_proc->p_pid);
438 #endif
439 
440 	se = td->td_proc->p_sysent;
441 	/*
442 	 * XXX
443 	 * Shouldn't this go before switching on the code?
444 	 */
445 
446 	if (sa->code >= se->sv_size)
447 		sa->callp = &se->sv_table[0];
448 	else
449 		sa->callp = &se->sv_table[sa->code];
450 
451 	if (sa->callp->sy_narg > nsaved) {
452 #if defined(__mips_n32) || defined(__mips_n64)
453 		/*
454 		 * XXX
455 		 * Is this right for new ABIs?  I think the 4 there
456 		 * should be 8, size there are 8 registers to skip,
457 		 * not 4, but I'm not certain.
458 		 */
459 #ifdef COMPAT_FREEBSD32
460 		if (!SV_PROC_FLAG(td->td_proc, SV_ILP32))
461 #endif
462 			printf("SYSCALL #%u pid:%u, narg (%u) > nsaved (%u).\n",
463 			    sa->code, td->td_proc->p_pid, sa->callp->sy_narg, nsaved);
464 #endif
465 #if (defined(__mips_n32) || defined(__mips_n64)) && defined(COMPAT_FREEBSD32)
466 		if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
467 			unsigned i;
468 			int32_t arg;
469 
470 			error = 0; /* XXX GCC is awful.  */
471 			for (i = nsaved; i < sa->callp->sy_narg; i++) {
472 				error = copyin((caddr_t)(intptr_t)(locr0->sp +
473 				    (4 + (i - nsaved)) * sizeof(int32_t)),
474 				    (caddr_t)&arg, sizeof arg);
475 				if (error != 0)
476 					break;
477 				sa->args[i] = arg;
478 			}
479 		} else
480 #endif
481 		error = copyin((caddr_t)(intptr_t)(locr0->sp +
482 		    4 * sizeof(register_t)), (caddr_t)&sa->args[nsaved],
483 		   (u_int)(sa->callp->sy_narg - nsaved) * sizeof(register_t));
484 		if (error != 0) {
485 			locr0->v0 = error;
486 			locr0->a3 = 1;
487 		}
488 	} else
489 		error = 0;
490 
491 	if (error == 0) {
492 		td->td_retval[0] = 0;
493 		td->td_retval[1] = locr0->v1;
494 	}
495 
496 	return (error);
497 }
498 
499 #undef __FBSDID
500 #define __FBSDID(x)
501 #include "../../kern/subr_syscall.c"
502 
503 /*
504  * Handle an exception.
505  * Called from MipsKernGenException() or MipsUserGenException()
506  * when a processor trap occurs.
507  * In the case of a kernel trap, we return the pc where to resume if
508  * p->p_addr->u_pcb.pcb_onfault is set, otherwise, return old pc.
509  */
510 register_t
trap(struct trapframe * trapframe)511 trap(struct trapframe *trapframe)
512 {
513 	int type, usermode;
514 	int i = 0;
515 	unsigned ucode = 0;
516 	struct thread *td = curthread;
517 	struct proc *p = curproc;
518 	vm_prot_t ftype;
519 	pmap_t pmap;
520 	int access_type;
521 	ksiginfo_t ksi;
522 	char *msg = NULL;
523 	intptr_t addr = 0;
524 	register_t pc;
525 	int cop, error;
526 	register_t *frame_regs;
527 #ifdef KDB
528 	bool handled;
529 #endif
530 
531 	trapdebug_enter(trapframe, 0);
532 #ifdef KDB
533 	if (kdb_active) {
534 		kdb_reenter();
535 		return (0);
536 	}
537 #endif
538 	type = (trapframe->cause & MIPS_CR_EXC_CODE) >> MIPS_CR_EXC_CODE_SHIFT;
539 	if (TRAPF_USERMODE(trapframe)) {
540 		type |= T_USER;
541 		usermode = 1;
542 	} else {
543 		usermode = 0;
544 	}
545 
546 	/*
547 	 * Enable hardware interrupts if they were on before the trap. If it
548 	 * was off disable all so we don't accidently enable it when doing a
549 	 * return to userland.
550 	 */
551 	if (trapframe->sr & MIPS_SR_INT_IE) {
552 		set_intr_mask(trapframe->sr & MIPS_SR_INT_MASK);
553 		intr_enable();
554 	} else {
555 		intr_disable();
556 	}
557 
558 #ifdef TRAP_DEBUG
559 	if (trap_debug) {
560 		static vm_offset_t last_badvaddr = 0;
561 		static vm_offset_t this_badvaddr = 0;
562 		static int count = 0;
563 		u_int32_t pid;
564 
565 		printf("trap type %x (%s - ", type,
566 		    trap_type[type & (~T_USER)]);
567 
568 		if (type & T_USER)
569 			printf("user mode)\n");
570 		else
571 			printf("kernel mode)\n");
572 
573 #ifdef SMP
574 		printf("cpuid = %d\n", PCPU_GET(cpuid));
575 #endif
576 		pid = mips_rd_entryhi() & TLBHI_ASID_MASK;
577 		printf("badaddr = %#jx, pc = %#jx, ra = %#jx, sp = %#jx, sr = %jx, pid = %d, ASID = %u\n",
578 		    (intmax_t)trapframe->badvaddr, (intmax_t)trapframe->pc, (intmax_t)trapframe->ra,
579 		    (intmax_t)trapframe->sp, (intmax_t)trapframe->sr,
580 		    (curproc ? curproc->p_pid : -1), pid);
581 
582 		switch (type & ~T_USER) {
583 		case T_TLB_MOD:
584 		case T_TLB_LD_MISS:
585 		case T_TLB_ST_MISS:
586 		case T_ADDR_ERR_LD:
587 		case T_ADDR_ERR_ST:
588 			this_badvaddr = trapframe->badvaddr;
589 			break;
590 		case T_SYSCALL:
591 			this_badvaddr = trapframe->ra;
592 			break;
593 		default:
594 			this_badvaddr = trapframe->pc;
595 			break;
596 		}
597 		if ((last_badvaddr == this_badvaddr) &&
598 		    ((type & ~T_USER) != T_SYSCALL) &&
599 		    ((type & ~T_USER) != T_COP_UNUSABLE)) {
600 			if (++count == 3) {
601 				trap_frame_dump(trapframe);
602 				panic("too many faults at %p\n", (void *)last_badvaddr);
603 			}
604 		} else {
605 			last_badvaddr = this_badvaddr;
606 			count = 0;
607 		}
608 	}
609 #endif
610 
611 #ifdef KDTRACE_HOOKS
612 	/*
613 	 * A trap can occur while DTrace executes a probe. Before
614 	 * executing the probe, DTrace blocks re-scheduling and sets
615 	 * a flag in its per-cpu flags to indicate that it doesn't
616 	 * want to fault. On returning from the probe, the no-fault
617 	 * flag is cleared and finally re-scheduling is enabled.
618 	 *
619 	 * If the DTrace kernel module has registered a trap handler,
620 	 * call it and if it returns non-zero, assume that it has
621 	 * handled the trap and modified the trap frame so that this
622 	 * function can return normally.
623 	 */
624 	/*
625 	 * XXXDTRACE: add pid probe handler here (if ever)
626 	 */
627 	if (!usermode) {
628 		if (dtrace_trap_func != NULL &&
629 		    (*dtrace_trap_func)(trapframe, type) != 0)
630 			return (trapframe->pc);
631 	}
632 #endif
633 
634 	switch (type) {
635 	case T_MCHECK:
636 #ifdef DDB
637 		kdb_trap(type, 0, trapframe);
638 #endif
639 		panic("MCHECK\n");
640 		break;
641 	case T_TLB_MOD:
642 		/* check for kernel address */
643 		if (KERNLAND(trapframe->badvaddr)) {
644 			if (pmap_emulate_modified(kernel_pmap,
645 			    trapframe->badvaddr) != 0) {
646 				ftype = VM_PROT_WRITE;
647 				goto kernel_fault;
648 			}
649 			return (trapframe->pc);
650 		}
651 		/* FALLTHROUGH */
652 
653 	case T_TLB_MOD + T_USER:
654 		pmap = &p->p_vmspace->vm_pmap;
655 		if (pmap_emulate_modified(pmap, trapframe->badvaddr) != 0) {
656 			ftype = VM_PROT_WRITE;
657 			goto dofault;
658 		}
659 		if (!usermode)
660 			return (trapframe->pc);
661 		goto out;
662 
663 	case T_TLB_LD_MISS:
664 	case T_TLB_ST_MISS:
665 		ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
666 		/* check for kernel address */
667 		if (KERNLAND(trapframe->badvaddr)) {
668 			vm_offset_t va;
669 			int rv;
670 
671 	kernel_fault:
672 			va = (vm_offset_t)trapframe->badvaddr;
673 			rv = vm_fault_trap(kernel_map, va, ftype,
674 			    VM_FAULT_NORMAL, NULL, NULL);
675 			if (rv == KERN_SUCCESS)
676 				return (trapframe->pc);
677 			if (td->td_pcb->pcb_onfault != NULL) {
678 				pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
679 				td->td_pcb->pcb_onfault = NULL;
680 				return (pc);
681 			}
682 			goto err;
683 		}
684 
685 		/*
686 		 * It is an error for the kernel to access user space except
687 		 * through the copyin/copyout routines.
688 		 */
689 		if (td->td_pcb->pcb_onfault == NULL)
690 			goto err;
691 
692 		goto dofault;
693 
694 	case T_TLB_LD_MISS + T_USER:
695 		ftype = VM_PROT_READ;
696 		goto dofault;
697 
698 	case T_TLB_ST_MISS + T_USER:
699 		ftype = VM_PROT_WRITE;
700 dofault:
701 		{
702 			vm_offset_t va;
703 			struct vmspace *vm;
704 			vm_map_t map;
705 			int rv = 0;
706 
707 			vm = p->p_vmspace;
708 			map = &vm->vm_map;
709 			va = (vm_offset_t)trapframe->badvaddr;
710 			if (KERNLAND(trapframe->badvaddr)) {
711 				/*
712 				 * Don't allow user-mode faults in kernel
713 				 * address space.
714 				 */
715 				goto nogo;
716 			}
717 
718 			rv = vm_fault_trap(map, va, ftype, VM_FAULT_NORMAL,
719 			    &i, &ucode);
720 			/*
721 			 * XXXDTRACE: add dtrace_doubletrap_func here?
722 			 */
723 #ifdef VMFAULT_TRACE
724 			printf("vm_fault(%p (pmap %p), %p (%p), %x, %d) -> %x at pc %p\n",
725 			    map, &vm->vm_pmap, (void *)va, (void *)(intptr_t)trapframe->badvaddr,
726 			    ftype, VM_FAULT_NORMAL, rv, (void *)(intptr_t)trapframe->pc);
727 #endif
728 
729 			if (rv == KERN_SUCCESS) {
730 				if (!usermode) {
731 					return (trapframe->pc);
732 				}
733 				goto out;
734 			}
735 	nogo:
736 			if (!usermode) {
737 				if (td->td_pcb->pcb_onfault != NULL) {
738 					pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
739 					td->td_pcb->pcb_onfault = NULL;
740 					return (pc);
741 				}
742 				goto err;
743 			}
744 			addr = trapframe->badvaddr;
745 
746 			msg = "BAD_PAGE_FAULT";
747 			log_bad_page_fault(msg, trapframe, type);
748 
749 			break;
750 		}
751 
752 	case T_ADDR_ERR_LD + T_USER:	/* misaligned or kseg access */
753 	case T_ADDR_ERR_ST + T_USER:	/* misaligned or kseg access */
754 		if (trapframe->badvaddr < 0 ||
755 		    trapframe->badvaddr >= VM_MAXUSER_ADDRESS) {
756 			msg = "ADDRESS_SPACE_ERR";
757 		} else if (allow_unaligned_acc) {
758 			int mode;
759 
760 			if (type == (T_ADDR_ERR_LD + T_USER))
761 				mode = VM_PROT_READ;
762 			else
763 				mode = VM_PROT_WRITE;
764 
765 			access_type = emulate_unaligned_access(trapframe, mode);
766 			if (access_type != 0)
767 				goto out;
768 			msg = "ALIGNMENT_FIX_ERR";
769 		} else {
770 			msg = "ADDRESS_ERR";
771 		}
772 
773 		/* FALL THROUGH */
774 
775 	case T_BUS_ERR_IFETCH + T_USER:	/* BERR asserted to cpu */
776 	case T_BUS_ERR_LD_ST + T_USER:	/* BERR asserted to cpu */
777 		ucode = 0;	/* XXX should be VM_PROT_something */
778 		i = SIGBUS;
779 		addr = trapframe->pc;
780 		if (!msg)
781 			msg = "BUS_ERR";
782 		log_bad_page_fault(msg, trapframe, type);
783 		break;
784 
785 	case T_SYSCALL + T_USER:
786 		{
787 			syscallenter(td);
788 
789 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
790 			if (trp == trapdebug)
791 				trapdebug[TRAPSIZE - 1].code = td->td_sa.code;
792 			else
793 				trp[-1].code = td->td_sa.code;
794 #endif
795 			trapdebug_enter(td->td_frame, -td->td_sa.code);
796 
797 			/*
798 			 * The sync'ing of I & D caches for SYS_ptrace() is
799 			 * done by procfs_domem() through procfs_rwmem()
800 			 * instead of being done here under a special check
801 			 * for SYS_ptrace().
802 			 */
803 			syscallret(td);
804 			return (trapframe->pc);
805 		}
806 
807 #if defined(KDTRACE_HOOKS) || defined(DDB)
808 	case T_BREAK:
809 #ifdef KDTRACE_HOOKS
810 		if (!usermode && dtrace_invop_jump_addr != NULL &&
811 		    dtrace_invop_jump_addr(trapframe) == 0)
812 			return (trapframe->pc);
813 #endif
814 #ifdef DDB
815 		kdb_trap(type, 0, trapframe);
816 		return (trapframe->pc);
817 #endif
818 #endif
819 
820 	case T_BREAK + T_USER:
821 		{
822 			intptr_t va;
823 			uint32_t instr;
824 
825 			i = SIGTRAP;
826 			ucode = TRAP_BRKPT;
827 
828 			/* compute address of break instruction */
829 			va = trapframe->pc;
830 			if (DELAYBRANCH(trapframe->cause))
831 				va += sizeof(int);
832 			addr = va;
833 
834 			if (td->td_md.md_ss_addr != va)
835 				break;
836 
837 			/* read break instruction */
838 			instr = fuword32((caddr_t)va);
839 
840 			if (instr != MIPS_BREAK_SSTEP)
841 				break;
842 
843 			CTR3(KTR_PTRACE,
844 			    "trap: tid %d, single step at %#lx: %#08x",
845 			    td->td_tid, va, instr);
846 			PROC_LOCK(p);
847 			_PHOLD(p);
848 			error = ptrace_clear_single_step(td);
849 			_PRELE(p);
850 			PROC_UNLOCK(p);
851 			if (error == 0)
852 				ucode = TRAP_TRACE;
853 			break;
854 		}
855 
856 	case T_IWATCH + T_USER:
857 	case T_DWATCH + T_USER:
858 		{
859 			intptr_t va;
860 
861 			/* compute address of trapped instruction */
862 			va = trapframe->pc;
863 			if (DELAYBRANCH(trapframe->cause))
864 				va += sizeof(int);
865 			printf("watch exception @ %p\n", (void *)va);
866 			i = SIGTRAP;
867 			ucode = TRAP_BRKPT;
868 			addr = va;
869 			break;
870 		}
871 
872 	case T_TRAP + T_USER:
873 		{
874 			intptr_t va;
875 			struct trapframe *locr0 = td->td_frame;
876 
877 			/* compute address of trap instruction */
878 			va = trapframe->pc;
879 			if (DELAYBRANCH(trapframe->cause))
880 				va += sizeof(int);
881 
882 			if (DELAYBRANCH(trapframe->cause)) {	/* Check BD bit */
883 				locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0,
884 				    0);
885 			} else {
886 				locr0->pc += sizeof(int);
887 			}
888 			addr = va;
889 			i = SIGEMT;	/* Stuff it with something for now */
890 			break;
891 		}
892 
893 	case T_RES_INST + T_USER:
894 		{
895 			InstFmt inst;
896 			inst = *(InstFmt *)(intptr_t)trapframe->pc;
897 			switch (inst.RType.op) {
898 			case OP_SPECIAL3:
899 				switch (inst.RType.func) {
900 				case OP_RDHWR:
901 					/* Register 29 used for TLS */
902 					if (inst.RType.rd == 29) {
903 						frame_regs = &(trapframe->zero);
904 						frame_regs[inst.RType.rt] = (register_t)(intptr_t)td->td_md.md_tls;
905 						frame_regs[inst.RType.rt] += td->td_proc->p_md.md_tls_tcb_offset;
906 						trapframe->pc += sizeof(int);
907 						goto out;
908 					}
909 				break;
910 				}
911 			break;
912 			}
913 
914 			log_illegal_instruction("RES_INST", trapframe);
915 			i = SIGILL;
916 			addr = trapframe->pc;
917 		}
918 		break;
919 	case T_C2E:
920 	case T_C2E + T_USER:
921 		goto err;
922 		break;
923 	case T_COP_UNUSABLE:
924 #ifdef	CPU_CNMIPS
925 		cop = (trapframe->cause & MIPS_CR_COP_ERR) >> MIPS_CR_COP_ERR_SHIFT;
926 		/* Handle only COP2 exception */
927 		if (cop != 2)
928 			goto err;
929 
930 		addr = trapframe->pc;
931 		/* save userland cop2 context if it has been touched */
932 		if ((td->td_md.md_flags & MDTD_COP2USED) &&
933 		    (td->td_md.md_cop2owner == COP2_OWNER_USERLAND)) {
934 			if (td->td_md.md_ucop2)
935 				octeon_cop2_save(td->td_md.md_ucop2);
936 			else
937 				panic("COP2 was used in user mode but md_ucop2 is NULL");
938 		}
939 
940 		if (td->td_md.md_cop2 == NULL) {
941 			td->td_md.md_cop2 = octeon_cop2_alloc_ctx();
942 			if (td->td_md.md_cop2 == NULL)
943 				panic("Failed to allocate COP2 context");
944 			memset(td->td_md.md_cop2, 0, sizeof(*td->td_md.md_cop2));
945 		}
946 
947 		octeon_cop2_restore(td->td_md.md_cop2);
948 
949 		/* Make userland re-request its context */
950 		td->td_frame->sr &= ~MIPS_SR_COP_2_BIT;
951 		td->td_md.md_flags |= MDTD_COP2USED;
952 		td->td_md.md_cop2owner = COP2_OWNER_KERNEL;
953 		/* Enable COP2, it will be disabled in cpu_switch */
954 		mips_wr_status(mips_rd_status() | MIPS_SR_COP_2_BIT);
955 		return (trapframe->pc);
956 #else
957 		goto err;
958 		break;
959 #endif
960 
961 	case T_COP_UNUSABLE + T_USER:
962 		cop = (trapframe->cause & MIPS_CR_COP_ERR) >> MIPS_CR_COP_ERR_SHIFT;
963 		if (cop == 1) {
964 			/* FP (COP1) instruction */
965 			if (cpuinfo.fpu_id == 0) {
966 				log_illegal_instruction("COP1_UNUSABLE",
967 				    trapframe);
968 				i = SIGILL;
969 				break;
970 			}
971 			addr = trapframe->pc;
972 			MipsSwitchFPState(PCPU_GET(fpcurthread), td->td_frame);
973 			PCPU_SET(fpcurthread, td);
974 #if defined(__mips_n32) || defined(__mips_n64)
975 			td->td_frame->sr |= MIPS_SR_COP_1_BIT | MIPS_SR_FR;
976 #else
977 			td->td_frame->sr |= MIPS_SR_COP_1_BIT;
978 #endif
979 			td->td_md.md_flags |= MDTD_FPUSED;
980 			goto out;
981 		}
982 #ifdef	CPU_CNMIPS
983 		else  if (cop == 2) {
984 			addr = trapframe->pc;
985 			if ((td->td_md.md_flags & MDTD_COP2USED) &&
986 			    (td->td_md.md_cop2owner == COP2_OWNER_KERNEL)) {
987 				if (td->td_md.md_cop2)
988 					octeon_cop2_save(td->td_md.md_cop2);
989 				else
990 					panic("COP2 was used in kernel mode but md_cop2 is NULL");
991 			}
992 
993 			if (td->td_md.md_ucop2 == NULL) {
994 				td->td_md.md_ucop2 = octeon_cop2_alloc_ctx();
995 				if (td->td_md.md_ucop2 == NULL)
996 					panic("Failed to allocate userland COP2 context");
997 				memset(td->td_md.md_ucop2, 0, sizeof(*td->td_md.md_ucop2));
998 			}
999 
1000 			octeon_cop2_restore(td->td_md.md_ucop2);
1001 
1002 			td->td_frame->sr |= MIPS_SR_COP_2_BIT;
1003 			td->td_md.md_flags |= MDTD_COP2USED;
1004 			td->td_md.md_cop2owner = COP2_OWNER_USERLAND;
1005 			goto out;
1006 		}
1007 #endif
1008 		else {
1009 			log_illegal_instruction("COPn_UNUSABLE", trapframe);
1010 			i = SIGILL;	/* only FPU instructions allowed */
1011 			break;
1012 		}
1013 
1014 	case T_FPE:
1015 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
1016 		trapDump("fpintr");
1017 #else
1018 		printf("FPU Trap: PC %#jx CR %x SR %x\n",
1019 		    (intmax_t)trapframe->pc, (unsigned)trapframe->cause, (unsigned)trapframe->sr);
1020 		goto err;
1021 #endif
1022 
1023 	case T_FPE + T_USER:
1024 		if (!emulate_fp) {
1025 			i = SIGFPE;
1026 			addr = trapframe->pc;
1027 			break;
1028 		}
1029 		MipsFPTrap(trapframe->sr, trapframe->cause, trapframe->pc);
1030 		goto out;
1031 
1032 	case T_OVFLOW + T_USER:
1033 		i = SIGFPE;
1034 		addr = trapframe->pc;
1035 		break;
1036 
1037 	case T_ADDR_ERR_LD:	/* misaligned access */
1038 	case T_ADDR_ERR_ST:	/* misaligned access */
1039 #ifdef TRAP_DEBUG
1040 		if (trap_debug) {
1041 			printf("+++ ADDR_ERR: type = %d, badvaddr = %#jx\n", type,
1042 			    (intmax_t)trapframe->badvaddr);
1043 		}
1044 #endif
1045 		/* Only allow emulation on a user address */
1046 		if (allow_unaligned_acc &&
1047 		    ((vm_offset_t)trapframe->badvaddr < VM_MAXUSER_ADDRESS)) {
1048 			int mode;
1049 
1050 			if (type == T_ADDR_ERR_LD)
1051 				mode = VM_PROT_READ;
1052 			else
1053 				mode = VM_PROT_WRITE;
1054 
1055 			access_type = emulate_unaligned_access(trapframe, mode);
1056 			if (access_type != 0)
1057 				return (trapframe->pc);
1058 		}
1059 		/* FALLTHROUGH */
1060 
1061 	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
1062 		if (td->td_pcb->pcb_onfault != NULL) {
1063 			pc = (register_t)(intptr_t)td->td_pcb->pcb_onfault;
1064 			td->td_pcb->pcb_onfault = NULL;
1065 			return (pc);
1066 		}
1067 
1068 		/* FALLTHROUGH */
1069 
1070 	default:
1071 err:
1072 
1073 #if !defined(SMP) && defined(DEBUG)
1074 		trapDump("trap");
1075 #endif
1076 #ifdef SMP
1077 		printf("cpu:%d-", PCPU_GET(cpuid));
1078 #endif
1079 		printf("Trap cause = %d (%s - ", type,
1080 		    trap_type[type & (~T_USER)]);
1081 
1082 		if (type & T_USER)
1083 			printf("user mode)\n");
1084 		else
1085 			printf("kernel mode)\n");
1086 
1087 #ifdef TRAP_DEBUG
1088 		if (trap_debug)
1089 			printf("badvaddr = %#jx, pc = %#jx, ra = %#jx, sr = %#jxx\n",
1090 			       (intmax_t)trapframe->badvaddr, (intmax_t)trapframe->pc, (intmax_t)trapframe->ra,
1091 			       (intmax_t)trapframe->sr);
1092 #endif
1093 
1094 #ifdef KDB
1095 		if (debugger_on_trap) {
1096 			kdb_why = KDB_WHY_TRAP;
1097 			handled = kdb_trap(type, 0, trapframe);
1098 			kdb_why = KDB_WHY_UNSET;
1099 			if (handled)
1100 				return (trapframe->pc);
1101 		}
1102 #endif
1103 		panic("trap");
1104 	}
1105 	td->td_frame->pc = trapframe->pc;
1106 	td->td_frame->cause = trapframe->cause;
1107 	td->td_frame->badvaddr = trapframe->badvaddr;
1108 	ksiginfo_init_trap(&ksi);
1109 	ksi.ksi_signo = i;
1110 	ksi.ksi_code = ucode;
1111 	ksi.ksi_addr = (void *)addr;
1112 	ksi.ksi_trapno = type & ~T_USER;
1113 	trapsignal(td, &ksi);
1114 out:
1115 
1116 	/*
1117 	 * Note: we should only get here if returning to user mode.
1118 	 */
1119 	userret(td, trapframe);
1120 	return (trapframe->pc);
1121 }
1122 
1123 #if !defined(SMP) && (defined(DDB) || defined(DEBUG))
1124 void
trapDump(char * msg)1125 trapDump(char *msg)
1126 {
1127 	register_t s;
1128 	int i;
1129 
1130 	s = intr_disable();
1131 	printf("trapDump(%s)\n", msg);
1132 	for (i = 0; i < TRAPSIZE; i++) {
1133 		if (trp == trapdebug) {
1134 			trp = &trapdebug[TRAPSIZE - 1];
1135 		} else {
1136 			trp--;
1137 		}
1138 
1139 		if (trp->cause == 0)
1140 			break;
1141 
1142 		printf("%s: ADR %jx PC %jx CR %jx SR %jx\n",
1143 		    trap_type[(trp->cause & MIPS_CR_EXC_CODE) >>
1144 			MIPS_CR_EXC_CODE_SHIFT],
1145 		    (intmax_t)trp->vadr, (intmax_t)trp->pc,
1146 		    (intmax_t)trp->cause, (intmax_t)trp->status);
1147 
1148 		printf("   RA %jx SP %jx code %d\n", (intmax_t)trp->ra,
1149 		    (intmax_t)trp->sp, (int)trp->code);
1150 	}
1151 	intr_restore(s);
1152 }
1153 #endif
1154 
1155 /*
1156  * Return the resulting PC as if the branch was executed.
1157  */
1158 uintptr_t
MipsEmulateBranch(struct trapframe * framePtr,uintptr_t instPC,int fpcCSR,uintptr_t instptr)1159 MipsEmulateBranch(struct trapframe *framePtr, uintptr_t instPC, int fpcCSR,
1160     uintptr_t instptr)
1161 {
1162 	InstFmt inst;
1163 	register_t *regsPtr = (register_t *) framePtr;
1164 	uintptr_t retAddr = 0;
1165 	int condition;
1166 
1167 #define	GetBranchDest(InstPtr, inst) \
1168 	(InstPtr + 4 + ((short)inst.IType.imm << 2))
1169 
1170 	if (instptr) {
1171 		if (instptr < MIPS_KSEG0_START)
1172 			inst.word = fuword32((void *)instptr);
1173 		else
1174 			inst = *(InstFmt *) instptr;
1175 	} else {
1176 		if ((vm_offset_t)instPC < MIPS_KSEG0_START)
1177 			inst.word = fuword32((void *)instPC);
1178 		else
1179 			inst = *(InstFmt *) instPC;
1180 	}
1181 
1182 	switch ((int)inst.JType.op) {
1183 	case OP_SPECIAL:
1184 		switch ((int)inst.RType.func) {
1185 		case OP_JR:
1186 		case OP_JALR:
1187 			retAddr = regsPtr[inst.RType.rs];
1188 			break;
1189 
1190 		default:
1191 			retAddr = instPC + 4;
1192 			break;
1193 		}
1194 		break;
1195 
1196 	case OP_BCOND:
1197 		switch ((int)inst.IType.rt) {
1198 		case OP_BLTZ:
1199 		case OP_BLTZL:
1200 		case OP_BLTZAL:
1201 		case OP_BLTZALL:
1202 			if ((int)(regsPtr[inst.RType.rs]) < 0)
1203 				retAddr = GetBranchDest(instPC, inst);
1204 			else
1205 				retAddr = instPC + 8;
1206 			break;
1207 
1208 		case OP_BGEZ:
1209 		case OP_BGEZL:
1210 		case OP_BGEZAL:
1211 		case OP_BGEZALL:
1212 			if ((int)(regsPtr[inst.RType.rs]) >= 0)
1213 				retAddr = GetBranchDest(instPC, inst);
1214 			else
1215 				retAddr = instPC + 8;
1216 			break;
1217 
1218 		case OP_TGEI:
1219 		case OP_TGEIU:
1220 		case OP_TLTI:
1221 		case OP_TLTIU:
1222 		case OP_TEQI:
1223 		case OP_TNEI:
1224 			retAddr = instPC + 4;	/* Like syscall... */
1225 			break;
1226 
1227 		default:
1228 			panic("MipsEmulateBranch: Bad branch cond");
1229 		}
1230 		break;
1231 
1232 	case OP_J:
1233 	case OP_JAL:
1234 		retAddr = (inst.JType.target << 2) |
1235 		    ((unsigned)(instPC + 4) & 0xF0000000);
1236 		break;
1237 
1238 	case OP_BEQ:
1239 	case OP_BEQL:
1240 		if (regsPtr[inst.RType.rs] == regsPtr[inst.RType.rt])
1241 			retAddr = GetBranchDest(instPC, inst);
1242 		else
1243 			retAddr = instPC + 8;
1244 		break;
1245 
1246 	case OP_BNE:
1247 	case OP_BNEL:
1248 		if (regsPtr[inst.RType.rs] != regsPtr[inst.RType.rt])
1249 			retAddr = GetBranchDest(instPC, inst);
1250 		else
1251 			retAddr = instPC + 8;
1252 		break;
1253 
1254 	case OP_BLEZ:
1255 	case OP_BLEZL:
1256 		if ((int)(regsPtr[inst.RType.rs]) <= 0)
1257 			retAddr = GetBranchDest(instPC, inst);
1258 		else
1259 			retAddr = instPC + 8;
1260 		break;
1261 
1262 	case OP_BGTZ:
1263 	case OP_BGTZL:
1264 		if ((int)(regsPtr[inst.RType.rs]) > 0)
1265 			retAddr = GetBranchDest(instPC, inst);
1266 		else
1267 			retAddr = instPC + 8;
1268 		break;
1269 
1270 	case OP_COP1:
1271 		switch (inst.RType.rs) {
1272 		case OP_BCx:
1273 		case OP_BCy:
1274 			if ((inst.RType.rt & COPz_BC_TF_MASK) == COPz_BC_TRUE)
1275 				condition = fpcCSR & MIPS_FPU_COND_BIT;
1276 			else
1277 				condition = !(fpcCSR & MIPS_FPU_COND_BIT);
1278 			if (condition)
1279 				retAddr = GetBranchDest(instPC, inst);
1280 			else
1281 				retAddr = instPC + 8;
1282 			break;
1283 
1284 		default:
1285 			retAddr = instPC + 4;
1286 		}
1287 		break;
1288 
1289 	default:
1290 		retAddr = instPC + 4;
1291 	}
1292 	return (retAddr);
1293 }
1294 
1295 static void
log_frame_dump(struct trapframe * frame)1296 log_frame_dump(struct trapframe *frame)
1297 {
1298 	log(LOG_ERR, "Trapframe Register Dump:\n");
1299 	log(LOG_ERR, "\tzero: %#jx\tat: %#jx\tv0: %#jx\tv1: %#jx\n",
1300 	    (intmax_t)0, (intmax_t)frame->ast, (intmax_t)frame->v0, (intmax_t)frame->v1);
1301 
1302 	log(LOG_ERR, "\ta0: %#jx\ta1: %#jx\ta2: %#jx\ta3: %#jx\n",
1303 	    (intmax_t)frame->a0, (intmax_t)frame->a1, (intmax_t)frame->a2, (intmax_t)frame->a3);
1304 
1305 #if defined(__mips_n32) || defined(__mips_n64)
1306 	log(LOG_ERR, "\ta4: %#jx\ta5: %#jx\ta6: %#jx\ta6: %#jx\n",
1307 	    (intmax_t)frame->a4, (intmax_t)frame->a5, (intmax_t)frame->a6, (intmax_t)frame->a7);
1308 
1309 	log(LOG_ERR, "\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1310 	    (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1311 #else
1312 	log(LOG_ERR, "\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1313 	    (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1314 
1315 	log(LOG_ERR, "\tt4: %#jx\tt5: %#jx\tt6: %#jx\tt7: %#jx\n",
1316 	    (intmax_t)frame->t4, (intmax_t)frame->t5, (intmax_t)frame->t6, (intmax_t)frame->t7);
1317 #endif
1318 	log(LOG_ERR, "\tt8: %#jx\tt9: %#jx\ts0: %#jx\ts1: %#jx\n",
1319 	    (intmax_t)frame->t8, (intmax_t)frame->t9, (intmax_t)frame->s0, (intmax_t)frame->s1);
1320 
1321 	log(LOG_ERR, "\ts2: %#jx\ts3: %#jx\ts4: %#jx\ts5: %#jx\n",
1322 	    (intmax_t)frame->s2, (intmax_t)frame->s3, (intmax_t)frame->s4, (intmax_t)frame->s5);
1323 
1324 	log(LOG_ERR, "\ts6: %#jx\ts7: %#jx\tk0: %#jx\tk1: %#jx\n",
1325 	    (intmax_t)frame->s6, (intmax_t)frame->s7, (intmax_t)frame->k0, (intmax_t)frame->k1);
1326 
1327 	log(LOG_ERR, "\tgp: %#jx\tsp: %#jx\ts8: %#jx\tra: %#jx\n",
1328 	    (intmax_t)frame->gp, (intmax_t)frame->sp, (intmax_t)frame->s8, (intmax_t)frame->ra);
1329 
1330 	log(LOG_ERR, "\tsr: %#jx\tmullo: %#jx\tmulhi: %#jx\tbadvaddr: %#jx\n",
1331 	    (intmax_t)frame->sr, (intmax_t)frame->mullo, (intmax_t)frame->mulhi, (intmax_t)frame->badvaddr);
1332 
1333 	log(LOG_ERR, "\tcause: %#jx\tpc: %#jx\n",
1334 	    (intmax_t)frame->cause, (intmax_t)frame->pc);
1335 }
1336 
1337 #ifdef TRAP_DEBUG
1338 static void
trap_frame_dump(struct trapframe * frame)1339 trap_frame_dump(struct trapframe *frame)
1340 {
1341 	printf("Trapframe Register Dump:\n");
1342 	printf("\tzero: %#jx\tat: %#jx\tv0: %#jx\tv1: %#jx\n",
1343 	    (intmax_t)0, (intmax_t)frame->ast, (intmax_t)frame->v0, (intmax_t)frame->v1);
1344 
1345 	printf("\ta0: %#jx\ta1: %#jx\ta2: %#jx\ta3: %#jx\n",
1346 	    (intmax_t)frame->a0, (intmax_t)frame->a1, (intmax_t)frame->a2, (intmax_t)frame->a3);
1347 #if defined(__mips_n32) || defined(__mips_n64)
1348 	printf("\ta4: %#jx\ta5: %#jx\ta6: %#jx\ta7: %#jx\n",
1349 	    (intmax_t)frame->a4, (intmax_t)frame->a5, (intmax_t)frame->a6, (intmax_t)frame->a7);
1350 
1351 	printf("\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1352 	    (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1353 #else
1354 	printf("\tt0: %#jx\tt1: %#jx\tt2: %#jx\tt3: %#jx\n",
1355 	    (intmax_t)frame->t0, (intmax_t)frame->t1, (intmax_t)frame->t2, (intmax_t)frame->t3);
1356 
1357 	printf("\tt4: %#jx\tt5: %#jx\tt6: %#jx\tt7: %#jx\n",
1358 	    (intmax_t)frame->t4, (intmax_t)frame->t5, (intmax_t)frame->t6, (intmax_t)frame->t7);
1359 #endif
1360 	printf("\tt8: %#jx\tt9: %#jx\ts0: %#jx\ts1: %#jx\n",
1361 	    (intmax_t)frame->t8, (intmax_t)frame->t9, (intmax_t)frame->s0, (intmax_t)frame->s1);
1362 
1363 	printf("\ts2: %#jx\ts3: %#jx\ts4: %#jx\ts5: %#jx\n",
1364 	    (intmax_t)frame->s2, (intmax_t)frame->s3, (intmax_t)frame->s4, (intmax_t)frame->s5);
1365 
1366 	printf("\ts6: %#jx\ts7: %#jx\tk0: %#jx\tk1: %#jx\n",
1367 	    (intmax_t)frame->s6, (intmax_t)frame->s7, (intmax_t)frame->k0, (intmax_t)frame->k1);
1368 
1369 	printf("\tgp: %#jx\tsp: %#jx\ts8: %#jx\tra: %#jx\n",
1370 	    (intmax_t)frame->gp, (intmax_t)frame->sp, (intmax_t)frame->s8, (intmax_t)frame->ra);
1371 
1372 	printf("\tsr: %#jx\tmullo: %#jx\tmulhi: %#jx\tbadvaddr: %#jx\n",
1373 	    (intmax_t)frame->sr, (intmax_t)frame->mullo, (intmax_t)frame->mulhi, (intmax_t)frame->badvaddr);
1374 
1375 	printf("\tcause: %#jx\tpc: %#jx\n",
1376 	    (intmax_t)frame->cause, (intmax_t)frame->pc);
1377 }
1378 
1379 #endif
1380 
1381 static void
get_mapping_info(vm_offset_t va,pd_entry_t ** pdepp,pt_entry_t ** ptepp)1382 get_mapping_info(vm_offset_t va, pd_entry_t **pdepp, pt_entry_t **ptepp)
1383 {
1384 	pt_entry_t *ptep;
1385 	pd_entry_t *pdep;
1386 	struct proc *p = curproc;
1387 
1388 	pdep = (&(p->p_vmspace->vm_pmap.pm_segtab[(va >> SEGSHIFT) & (NPDEPG - 1)]));
1389 	if (*pdep)
1390 		ptep = pmap_pte(&p->p_vmspace->vm_pmap, va);
1391 	else
1392 		ptep = (pt_entry_t *)0;
1393 
1394 	*pdepp = pdep;
1395 	*ptepp = ptep;
1396 }
1397 
1398 static void
log_illegal_instruction(const char * msg,struct trapframe * frame)1399 log_illegal_instruction(const char *msg, struct trapframe *frame)
1400 {
1401 	pt_entry_t *ptep;
1402 	pd_entry_t *pdep;
1403 	unsigned int *addr, instr[4];
1404 	struct thread *td;
1405 	struct proc *p;
1406 	register_t pc;
1407 
1408 	td = curthread;
1409 	p = td->td_proc;
1410 
1411 #ifdef SMP
1412 	printf("cpuid = %d\n", PCPU_GET(cpuid));
1413 #endif
1414 	pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1415 	log(LOG_ERR, "%s: pid %d tid %ld (%s), uid %d: pc %#jx ra %#jx\n",
1416 	    msg, p->p_pid, (long)td->td_tid, p->p_comm,
1417 	    p->p_ucred ? p->p_ucred->cr_uid : -1,
1418 	    (intmax_t)pc,
1419 	    (intmax_t)frame->ra);
1420 
1421 	/* log registers in trap frame */
1422 	log_frame_dump(frame);
1423 
1424 	get_mapping_info((vm_offset_t)pc, &pdep, &ptep);
1425 
1426 	/*
1427 	 * Dump a few words around faulting instruction, if the addres is
1428 	 * valid.
1429 	 */
1430 	addr = (unsigned int *)(intptr_t)pc;
1431 	if ((pc & 3) == 0 && copyin(addr, instr, sizeof(instr)) == 0) {
1432 		/* dump page table entry for faulting instruction */
1433 		log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#jx\n",
1434 		    (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1435 
1436 		log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n",
1437 		    addr);
1438 		log(LOG_ERR, "%08x %08x %08x %08x\n",
1439 		    instr[0], instr[1], instr[2], instr[3]);
1440 	} else {
1441 		log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#jx\n",
1442 		    (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1443 	}
1444 }
1445 
1446 static void
log_bad_page_fault(char * msg,struct trapframe * frame,int trap_type)1447 log_bad_page_fault(char *msg, struct trapframe *frame, int trap_type)
1448 {
1449 	pt_entry_t *ptep;
1450 	pd_entry_t *pdep;
1451 	unsigned int *addr, instr[4];
1452 	struct thread *td;
1453 	struct proc *p;
1454 	char *read_or_write;
1455 	register_t pc;
1456 
1457 	trap_type &= ~T_USER;
1458 
1459 	td = curthread;
1460 	p = td->td_proc;
1461 
1462 #ifdef SMP
1463 	printf("cpuid = %d\n", PCPU_GET(cpuid));
1464 #endif
1465 	switch (trap_type) {
1466 	case T_TLB_MOD:
1467 	case T_TLB_ST_MISS:
1468 	case T_ADDR_ERR_ST:
1469 		read_or_write = "write";
1470 		break;
1471 	case T_TLB_LD_MISS:
1472 	case T_ADDR_ERR_LD:
1473 	case T_BUS_ERR_IFETCH:
1474 		read_or_write = "read";
1475 		break;
1476 	default:
1477 		read_or_write = "unknown";
1478 	}
1479 
1480 	pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1481 	log(LOG_ERR, "%s: pid %d tid %ld (%s), uid %d: pc %#jx got a %s fault "
1482 	    "(type %#x) at %#jx\n",
1483 	    msg, p->p_pid, (long)td->td_tid, p->p_comm,
1484 	    p->p_ucred ? p->p_ucred->cr_uid : -1,
1485 	    (intmax_t)pc,
1486 	    read_or_write,
1487 	    trap_type,
1488 	    (intmax_t)frame->badvaddr);
1489 
1490 	/* log registers in trap frame */
1491 	log_frame_dump(frame);
1492 
1493 	get_mapping_info((vm_offset_t)pc, &pdep, &ptep);
1494 
1495 	/*
1496 	 * Dump a few words around faulting instruction, if the addres is
1497 	 * valid.
1498 	 */
1499 	addr = (unsigned int *)(intptr_t)pc;
1500 	if ((pc & 3) == 0 && pc != frame->badvaddr &&
1501 	    trap_type != T_BUS_ERR_IFETCH &&
1502 	    copyin((caddr_t)(intptr_t)pc, instr, sizeof(instr)) == 0) {
1503 		/* dump page table entry for faulting instruction */
1504 		log(LOG_ERR, "Page table info for pc address %#jx: pde = %p, pte = %#jx\n",
1505 		    (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1506 
1507 		log(LOG_ERR, "Dumping 4 words starting at pc address %p: \n",
1508 		    addr);
1509 		log(LOG_ERR, "%08x %08x %08x %08x\n",
1510 		    instr[0], instr[1], instr[2], instr[3]);
1511 	} else {
1512 		log(LOG_ERR, "pc address %#jx is inaccessible, pde = %p, pte = %#jx\n",
1513 		    (intmax_t)pc, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1514 	}
1515 
1516 	get_mapping_info((vm_offset_t)frame->badvaddr, &pdep, &ptep);
1517 	log(LOG_ERR, "Page table info for bad address %#jx: pde = %p, pte = %#jx\n",
1518 	    (intmax_t)frame->badvaddr, (void *)(intptr_t)*pdep, (uintmax_t)(ptep ? *ptep : 0));
1519 }
1520 
1521 /*
1522  * Unaligned load/store emulation
1523  */
1524 static int
mips_unaligned_load_store(struct trapframe * frame,int mode,register_t addr,register_t pc)1525 mips_unaligned_load_store(struct trapframe *frame, int mode, register_t addr, register_t pc)
1526 {
1527 	register_t *reg = (register_t *) frame;
1528 	u_int32_t inst = *((u_int32_t *)(intptr_t)pc);
1529 	register_t value_msb = 0, value = 0;
1530 	unsigned size;
1531 
1532 	/*
1533 	 * ADDR_ERR faults have higher priority than TLB
1534 	 * Miss faults.  Therefore, it is necessary to
1535 	 * verify that the faulting address is a valid
1536 	 * virtual address within the process' address space
1537 	 * before trying to emulate the unaligned access.
1538 	 */
1539 	switch (MIPS_INST_OPCODE(inst)) {
1540 	case OP_LHU: case OP_LH:
1541 	case OP_SH:
1542 		size = 2;
1543 		break;
1544 	case OP_LWU: case OP_LW:
1545 	case OP_SW:
1546 		size = 4;
1547 		break;
1548 	case OP_LD:
1549 	case OP_SD:
1550 		size = 8;
1551 		break;
1552 	default:
1553 		printf("%s: unhandled opcode in address error: %#x\n", __func__, MIPS_INST_OPCODE(inst));
1554 		return (0);
1555 	}
1556 
1557 	if (!useracc((void *)rounddown2((vm_offset_t)addr, size), size * 2, mode))
1558 		return (0);
1559 
1560 	/*
1561 	 * XXX
1562 	 * Handle LL/SC LLD/SCD.
1563 	 */
1564 	switch (MIPS_INST_OPCODE(inst)) {
1565 	case OP_LHU:
1566 		KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1567 		lbu_macro(value_msb, addr);
1568 		addr += 1;
1569 		lbu_macro(value, addr);
1570 		value |= value_msb << 8;
1571 		reg[MIPS_INST_RT(inst)] = value;
1572 		return (MIPS_LHU_ACCESS);
1573 
1574 	case OP_LH:
1575 		KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1576 		lb_macro(value_msb, addr);
1577 		addr += 1;
1578 		lbu_macro(value, addr);
1579 		value |= value_msb << 8;
1580 		reg[MIPS_INST_RT(inst)] = value;
1581 		return (MIPS_LH_ACCESS);
1582 
1583 	case OP_LWU:
1584 		KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1585 		lwl_macro(value, addr);
1586 		addr += 3;
1587 		lwr_macro(value, addr);
1588 		value &= 0xffffffff;
1589 		reg[MIPS_INST_RT(inst)] = value;
1590 		return (MIPS_LWU_ACCESS);
1591 
1592 	case OP_LW:
1593 		KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1594 		lwl_macro(value, addr);
1595 		addr += 3;
1596 		lwr_macro(value, addr);
1597 		reg[MIPS_INST_RT(inst)] = value;
1598 		return (MIPS_LW_ACCESS);
1599 
1600 #if defined(__mips_n32) || defined(__mips_n64)
1601 	case OP_LD:
1602 		KASSERT(mode == VM_PROT_READ, ("access mode must be read for load instruction."));
1603 		ldl_macro(value, addr);
1604 		addr += 7;
1605 		ldr_macro(value, addr);
1606 		reg[MIPS_INST_RT(inst)] = value;
1607 		return (MIPS_LD_ACCESS);
1608 #endif
1609 
1610 	case OP_SH:
1611 		KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1612 		value = reg[MIPS_INST_RT(inst)];
1613 		value_msb = value >> 8;
1614 		sb_macro(value_msb, addr);
1615 		addr += 1;
1616 		sb_macro(value, addr);
1617 		return (MIPS_SH_ACCESS);
1618 
1619 	case OP_SW:
1620 		KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1621 		value = reg[MIPS_INST_RT(inst)];
1622 		swl_macro(value, addr);
1623 		addr += 3;
1624 		swr_macro(value, addr);
1625 		return (MIPS_SW_ACCESS);
1626 
1627 #if defined(__mips_n32) || defined(__mips_n64)
1628 	case OP_SD:
1629 		KASSERT(mode == VM_PROT_WRITE, ("access mode must be write for store instruction."));
1630 		value = reg[MIPS_INST_RT(inst)];
1631 		sdl_macro(value, addr);
1632 		addr += 7;
1633 		sdr_macro(value, addr);
1634 		return (MIPS_SD_ACCESS);
1635 #endif
1636 	}
1637 	panic("%s: should not be reached.", __func__);
1638 }
1639 
1640 /*
1641  * XXX TODO: SMP?
1642  */
1643 static struct timeval unaligned_lasterr;
1644 static int unaligned_curerr;
1645 
1646 static int unaligned_pps_log_limit = 4;
1647 
1648 SYSCTL_INT(_machdep, OID_AUTO, unaligned_log_pps_limit, CTLFLAG_RWTUN,
1649     &unaligned_pps_log_limit, 0,
1650     "limit number of userland unaligned log messages per second");
1651 
1652 static int
emulate_unaligned_access(struct trapframe * frame,int mode)1653 emulate_unaligned_access(struct trapframe *frame, int mode)
1654 {
1655 	register_t pc;
1656 	int access_type = 0;
1657 	struct thread *td = curthread;
1658 	struct proc *p = curproc;
1659 
1660 	pc = frame->pc + (DELAYBRANCH(frame->cause) ? 4 : 0);
1661 
1662 	/*
1663 	 * Fall through if it's instruction fetch exception
1664 	 */
1665 	if (!((pc & 3) || (pc == frame->badvaddr))) {
1666 		/*
1667 		 * Handle unaligned load and store
1668 		 */
1669 
1670 		/*
1671 		 * Return access type if the instruction was emulated.
1672 		 * Otherwise restore pc and fall through.
1673 		 */
1674 		access_type = mips_unaligned_load_store(frame,
1675 		    mode, frame->badvaddr, pc);
1676 
1677 		if (access_type) {
1678 			if (DELAYBRANCH(frame->cause))
1679 				frame->pc = MipsEmulateBranch(frame, frame->pc,
1680 				    0, 0);
1681 			else
1682 				frame->pc += 4;
1683 
1684 			if (ppsratecheck(&unaligned_lasterr,
1685 			    &unaligned_curerr, unaligned_pps_log_limit)) {
1686 				/* XXX TODO: keep global/tid/pid counters? */
1687 				log(LOG_INFO,
1688 				    "Unaligned %s: pid=%ld (%s), tid=%ld, "
1689 				    "pc=%#jx, badvaddr=%#jx\n",
1690 				    access_name[access_type - 1],
1691 				    (long) p->p_pid,
1692 				    p->p_comm,
1693 				    (long) td->td_tid,
1694 				    (intmax_t)pc,
1695 				    (intmax_t)frame->badvaddr);
1696 			}
1697 		}
1698 	}
1699 	return access_type;
1700 }
1701