xref: /linux-6.15/arch/arc/kernel/unaligned.c (revision 52cd5c4b)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
22e651ea1SVineet Gupta /*
32e651ea1SVineet Gupta  * Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
42e651ea1SVineet Gupta  *
52e651ea1SVineet Gupta  * vineetg : May 2011
62e651ea1SVineet Gupta  *  -Adapted (from .26 to .35)
72e651ea1SVineet Gupta  *  -original contribution by [email protected]
82e651ea1SVineet Gupta  */
92e651ea1SVineet Gupta 
102e651ea1SVineet Gupta #include <linux/types.h>
11ceed97abSVineet Gupta #include <linux/perf_event.h>
122e651ea1SVineet Gupta #include <linux/ptrace.h>
132e651ea1SVineet Gupta #include <linux/uaccess.h>
142e651ea1SVineet Gupta #include <asm/disasm.h>
1500429083SAl Viro #include "unaligned.h"
162e651ea1SVineet Gupta 
177d669a19SNoam Camus #ifdef CONFIG_CPU_BIG_ENDIAN
187d669a19SNoam Camus #define BE		1
197d669a19SNoam Camus #define FIRST_BYTE_16	"swap %1, %1\n swape %1, %1\n"
207d669a19SNoam Camus #define FIRST_BYTE_32	"swape %1, %1\n"
217d669a19SNoam Camus #else
227d669a19SNoam Camus #define BE		0
237d669a19SNoam Camus #define FIRST_BYTE_16
247d669a19SNoam Camus #define FIRST_BYTE_32
257d669a19SNoam Camus #endif
267d669a19SNoam Camus 
272e651ea1SVineet Gupta #define __get8_unaligned_check(val, addr, err)		\
282e651ea1SVineet Gupta 	__asm__(					\
292e651ea1SVineet Gupta 	"1:	ldb.ab	%1, [%2, 1]\n"			\
302e651ea1SVineet Gupta 	"2:\n"						\
312e651ea1SVineet Gupta 	"	.section .fixup,\"ax\"\n"		\
322e651ea1SVineet Gupta 	"	.align	4\n"				\
332e651ea1SVineet Gupta 	"3:	mov	%0, 1\n"			\
346de6066cSYuriy Kolerov 	"	j	2b\n"				\
352e651ea1SVineet Gupta 	"	.previous\n"				\
362e651ea1SVineet Gupta 	"	.section __ex_table,\"a\"\n"		\
372e651ea1SVineet Gupta 	"	.align	4\n"				\
382e651ea1SVineet Gupta 	"	.long	1b, 3b\n"			\
392e651ea1SVineet Gupta 	"	.previous\n"				\
402e651ea1SVineet Gupta 	: "=r" (err), "=&r" (val), "=r" (addr)		\
412e651ea1SVineet Gupta 	: "0" (err), "2" (addr))
422e651ea1SVineet Gupta 
432e651ea1SVineet Gupta #define get16_unaligned_check(val, addr)		\
442e651ea1SVineet Gupta 	do {						\
452e651ea1SVineet Gupta 		unsigned int err = 0, v, a = addr;	\
462e651ea1SVineet Gupta 		__get8_unaligned_check(v, a, err);	\
477d669a19SNoam Camus 		val =  v << ((BE) ? 8 : 0);		\
482e651ea1SVineet Gupta 		__get8_unaligned_check(v, a, err);	\
497d669a19SNoam Camus 		val |= v << ((BE) ? 0 : 8);		\
502e651ea1SVineet Gupta 		if (err)				\
512e651ea1SVineet Gupta 			goto fault;			\
522e651ea1SVineet Gupta 	} while (0)
532e651ea1SVineet Gupta 
542e651ea1SVineet Gupta #define get32_unaligned_check(val, addr)		\
552e651ea1SVineet Gupta 	do {						\
562e651ea1SVineet Gupta 		unsigned int err = 0, v, a = addr;	\
572e651ea1SVineet Gupta 		__get8_unaligned_check(v, a, err);	\
587d669a19SNoam Camus 		val =  v << ((BE) ? 24 : 0);		\
592e651ea1SVineet Gupta 		__get8_unaligned_check(v, a, err);	\
607d669a19SNoam Camus 		val |= v << ((BE) ? 16 : 8);		\
612e651ea1SVineet Gupta 		__get8_unaligned_check(v, a, err);	\
627d669a19SNoam Camus 		val |= v << ((BE) ? 8 : 16);		\
632e651ea1SVineet Gupta 		__get8_unaligned_check(v, a, err);	\
647d669a19SNoam Camus 		val |= v << ((BE) ? 0 : 24);		\
652e651ea1SVineet Gupta 		if (err)				\
662e651ea1SVineet Gupta 			goto fault;			\
672e651ea1SVineet Gupta 	} while (0)
682e651ea1SVineet Gupta 
692e651ea1SVineet Gupta #define put16_unaligned_check(val, addr)		\
702e651ea1SVineet Gupta 	do {						\
712e651ea1SVineet Gupta 		unsigned int err = 0, v = val, a = addr;\
722e651ea1SVineet Gupta 							\
732e651ea1SVineet Gupta 		__asm__(				\
747d669a19SNoam Camus 		FIRST_BYTE_16				\
752e651ea1SVineet Gupta 		"1:	stb.ab	%1, [%2, 1]\n"		\
762e651ea1SVineet Gupta 		"	lsr %1, %1, 8\n"		\
772e651ea1SVineet Gupta 		"2:	stb	%1, [%2]\n"		\
782e651ea1SVineet Gupta 		"3:\n"					\
792e651ea1SVineet Gupta 		"	.section .fixup,\"ax\"\n"	\
802e651ea1SVineet Gupta 		"	.align	4\n"			\
812e651ea1SVineet Gupta 		"4:	mov	%0, 1\n"		\
826de6066cSYuriy Kolerov 		"	j	3b\n"			\
832e651ea1SVineet Gupta 		"	.previous\n"			\
842e651ea1SVineet Gupta 		"	.section __ex_table,\"a\"\n"	\
852e651ea1SVineet Gupta 		"	.align	4\n"			\
862e651ea1SVineet Gupta 		"	.long	1b, 4b\n"		\
872e651ea1SVineet Gupta 		"	.long	2b, 4b\n"		\
882e651ea1SVineet Gupta 		"	.previous\n"			\
892e651ea1SVineet Gupta 		: "=r" (err), "=&r" (v), "=&r" (a)	\
902e651ea1SVineet Gupta 		: "0" (err), "1" (v), "2" (a));		\
912e651ea1SVineet Gupta 							\
922e651ea1SVineet Gupta 		if (err)				\
932e651ea1SVineet Gupta 			goto fault;			\
942e651ea1SVineet Gupta 	} while (0)
952e651ea1SVineet Gupta 
962e651ea1SVineet Gupta #define put32_unaligned_check(val, addr)		\
972e651ea1SVineet Gupta 	do {						\
982e651ea1SVineet Gupta 		unsigned int err = 0, v = val, a = addr;\
992e651ea1SVineet Gupta 							\
1007d669a19SNoam Camus 		__asm__(				\
1017d669a19SNoam Camus 		FIRST_BYTE_32				\
1022e651ea1SVineet Gupta 		"1:	stb.ab	%1, [%2, 1]\n"		\
1032e651ea1SVineet Gupta 		"	lsr %1, %1, 8\n"		\
1042e651ea1SVineet Gupta 		"2:	stb.ab	%1, [%2, 1]\n"		\
1052e651ea1SVineet Gupta 		"	lsr %1, %1, 8\n"		\
1062e651ea1SVineet Gupta 		"3:	stb.ab	%1, [%2, 1]\n"		\
1072e651ea1SVineet Gupta 		"	lsr %1, %1, 8\n"		\
1082e651ea1SVineet Gupta 		"4:	stb	%1, [%2]\n"		\
1092e651ea1SVineet Gupta 		"5:\n"					\
1102e651ea1SVineet Gupta 		"	.section .fixup,\"ax\"\n"	\
1112e651ea1SVineet Gupta 		"	.align	4\n"			\
1122e651ea1SVineet Gupta 		"6:	mov	%0, 1\n"		\
1136de6066cSYuriy Kolerov 		"	j	5b\n"			\
1142e651ea1SVineet Gupta 		"	.previous\n"			\
1152e651ea1SVineet Gupta 		"	.section __ex_table,\"a\"\n"	\
1162e651ea1SVineet Gupta 		"	.align	4\n"			\
1172e651ea1SVineet Gupta 		"	.long	1b, 6b\n"		\
1182e651ea1SVineet Gupta 		"	.long	2b, 6b\n"		\
1192e651ea1SVineet Gupta 		"	.long	3b, 6b\n"		\
1202e651ea1SVineet Gupta 		"	.long	4b, 6b\n"		\
1212e651ea1SVineet Gupta 		"	.previous\n"			\
1222e651ea1SVineet Gupta 		: "=r" (err), "=&r" (v), "=&r" (a)	\
1232e651ea1SVineet Gupta 		: "0" (err), "1" (v), "2" (a));		\
1242e651ea1SVineet Gupta 							\
1252e651ea1SVineet Gupta 		if (err)				\
1262e651ea1SVineet Gupta 			goto fault;			\
1272e651ea1SVineet Gupta 	} while (0)
1282e651ea1SVineet Gupta 
1292e651ea1SVineet Gupta /* sysctl hooks */
1302e651ea1SVineet Gupta int unaligned_enabled __read_mostly = 1;	/* Enabled by default */
1312e651ea1SVineet Gupta int no_unaligned_warning __read_mostly = 1;	/* Only 1 warning by default */
1322e651ea1SVineet Gupta 
fixup_load(struct disasm_state * state,struct pt_regs * regs,struct callee_regs * cregs)1332e651ea1SVineet Gupta static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
1342e651ea1SVineet Gupta 			struct callee_regs *cregs)
1352e651ea1SVineet Gupta {
1362e651ea1SVineet Gupta 	int val;
1372e651ea1SVineet Gupta 
1382e651ea1SVineet Gupta 	/* register write back */
1392e651ea1SVineet Gupta 	if ((state->aa == 1) || (state->aa == 2)) {
1402e651ea1SVineet Gupta 		set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
1412e651ea1SVineet Gupta 
1422e651ea1SVineet Gupta 		if (state->aa == 2)
1432e651ea1SVineet Gupta 			state->src2 = 0;
1442e651ea1SVineet Gupta 	}
1452e651ea1SVineet Gupta 
1462e651ea1SVineet Gupta 	if (state->zz == 0) {
1472e651ea1SVineet Gupta 		get32_unaligned_check(val, state->src1 + state->src2);
1482e651ea1SVineet Gupta 	} else {
1492e651ea1SVineet Gupta 		get16_unaligned_check(val, state->src1 + state->src2);
1502e651ea1SVineet Gupta 
1512e651ea1SVineet Gupta 		if (state->x)
1522e651ea1SVineet Gupta 			val = (val << 16) >> 16;
1532e651ea1SVineet Gupta 	}
1542e651ea1SVineet Gupta 
1552e651ea1SVineet Gupta 	if (state->pref == 0)
1562e651ea1SVineet Gupta 		set_reg(state->dest, val, regs, cregs);
1572e651ea1SVineet Gupta 
1582e651ea1SVineet Gupta 	return;
1592e651ea1SVineet Gupta 
1602e651ea1SVineet Gupta fault:	state->fault = 1;
1612e651ea1SVineet Gupta }
1622e651ea1SVineet Gupta 
fixup_store(struct disasm_state * state,struct pt_regs * regs,struct callee_regs * cregs)1632e651ea1SVineet Gupta static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
1642e651ea1SVineet Gupta 			struct callee_regs *cregs)
1652e651ea1SVineet Gupta {
1662e651ea1SVineet Gupta 	/* register write back */
1672e651ea1SVineet Gupta 	if ((state->aa == 1) || (state->aa == 2)) {
1682e651ea1SVineet Gupta 		set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
1692e651ea1SVineet Gupta 
1702e651ea1SVineet Gupta 		if (state->aa == 3)
1712e651ea1SVineet Gupta 			state->src3 = 0;
1722e651ea1SVineet Gupta 	} else if (state->aa == 3) {
1732e651ea1SVineet Gupta 		if (state->zz == 2) {
1742e651ea1SVineet Gupta 			set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
1752e651ea1SVineet Gupta 				regs, cregs);
1762e651ea1SVineet Gupta 		} else if (!state->zz) {
1772e651ea1SVineet Gupta 			set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
1782e651ea1SVineet Gupta 				regs, cregs);
1792e651ea1SVineet Gupta 		} else {
1802e651ea1SVineet Gupta 			goto fault;
1812e651ea1SVineet Gupta 		}
1822e651ea1SVineet Gupta 	}
1832e651ea1SVineet Gupta 
1842e651ea1SVineet Gupta 	/* write fix-up */
1852e651ea1SVineet Gupta 	if (!state->zz)
1862e651ea1SVineet Gupta 		put32_unaligned_check(state->src1, state->src2 + state->src3);
1872e651ea1SVineet Gupta 	else
1882e651ea1SVineet Gupta 		put16_unaligned_check(state->src1, state->src2 + state->src3);
1892e651ea1SVineet Gupta 
1902e651ea1SVineet Gupta 	return;
1912e651ea1SVineet Gupta 
1922e651ea1SVineet Gupta fault:	state->fault = 1;
1932e651ea1SVineet Gupta }
1942e651ea1SVineet Gupta 
1952e651ea1SVineet Gupta /*
1962e651ea1SVineet Gupta  * Handle an unaligned access
1972e651ea1SVineet Gupta  * Returns 0 if successfully handled, 1 if some error happened
1982e651ea1SVineet Gupta  */
misaligned_fixup(unsigned long address,struct pt_regs * regs,struct callee_regs * cregs)1992e651ea1SVineet Gupta int misaligned_fixup(unsigned long address, struct pt_regs *regs,
20038a9ff6dSVineet Gupta 		     struct callee_regs *cregs)
2012e651ea1SVineet Gupta {
2022e651ea1SVineet Gupta 	struct disasm_state state;
2032e651ea1SVineet Gupta 
2042e651ea1SVineet Gupta 	/* handle user mode only and only if enabled by sysadmin */
2052e651ea1SVineet Gupta 	if (!user_mode(regs) || !unaligned_enabled)
2062e651ea1SVineet Gupta 		return 1;
2072e651ea1SVineet Gupta 
2082e651ea1SVineet Gupta 	if (no_unaligned_warning) {
2092e651ea1SVineet Gupta 		pr_warn_once("%s(%d) made unaligned access which was emulated"
2102e651ea1SVineet Gupta 			     " by kernel assist\n. This can degrade application"
2112e651ea1SVineet Gupta 			     " performance significantly\n. To enable further"
2122e651ea1SVineet Gupta 			     " logging of such instances, please \n"
2132e651ea1SVineet Gupta 			     " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
214*52cd5c4bSYafang Shao 			     current->comm, task_pid_nr(current));
2152e651ea1SVineet Gupta 	} else {
2162e651ea1SVineet Gupta 		/* Add rate limiting if it gets down to it */
2172e651ea1SVineet Gupta 		pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
218*52cd5c4bSYafang Shao 			current->comm, task_pid_nr(current),
2192e651ea1SVineet Gupta 			address, regs->ret);
2202e651ea1SVineet Gupta 
2212e651ea1SVineet Gupta 	}
2222e651ea1SVineet Gupta 
2232e651ea1SVineet Gupta 	disasm_instr(regs->ret, &state, 1, regs, cregs);
2242e651ea1SVineet Gupta 
2252e651ea1SVineet Gupta 	if (state.fault)
2262e651ea1SVineet Gupta 		goto fault;
2272e651ea1SVineet Gupta 
2282e651ea1SVineet Gupta 	/* ldb/stb should not have unaligned exception */
2292e651ea1SVineet Gupta 	if ((state.zz == 1) || (state.di))
2302e651ea1SVineet Gupta 		goto fault;
2312e651ea1SVineet Gupta 
2322e651ea1SVineet Gupta 	if (!state.write)
2332e651ea1SVineet Gupta 		fixup_load(&state, regs, cregs);
2342e651ea1SVineet Gupta 	else
2352e651ea1SVineet Gupta 		fixup_store(&state, regs, cregs);
2362e651ea1SVineet Gupta 
2372e651ea1SVineet Gupta 	if (state.fault)
2382e651ea1SVineet Gupta 		goto fault;
2392e651ea1SVineet Gupta 
240ecaa054fSJulia Lawall 	/* clear any remnants of delay slot */
2412e651ea1SVineet Gupta 	if (delay_mode(regs)) {
242a524c218SVineet Gupta 		regs->ret = regs->bta & ~1U;
2432e651ea1SVineet Gupta 		regs->status32 &= ~STATUS_DE_MASK;
2442e651ea1SVineet Gupta 	} else {
2452e651ea1SVineet Gupta 		regs->ret += state.instr_len;
246c11eb222SMischa Jonker 
247c11eb222SMischa Jonker 		/* handle zero-overhead-loop */
248c11eb222SMischa Jonker 		if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
249c11eb222SMischa Jonker 			regs->ret = regs->lp_start;
250c11eb222SMischa Jonker 			regs->lp_count--;
251c11eb222SMischa Jonker 		}
2522e651ea1SVineet Gupta 	}
2532e651ea1SVineet Gupta 
254ceed97abSVineet Gupta 	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
2552e651ea1SVineet Gupta 	return 0;
2562e651ea1SVineet Gupta 
2572e651ea1SVineet Gupta fault:
2582e651ea1SVineet Gupta 	pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
2592e651ea1SVineet Gupta 		state.words[0], address);
2602e651ea1SVineet Gupta 
2612e651ea1SVineet Gupta 	return 1;
2622e651ea1SVineet Gupta }
263